patch
stringlengths 17
31.2k
| y
int64 1
1
| oldf
stringlengths 0
2.21M
| idx
int64 1
1
| id
int64 4.29k
68.4k
| msg
stringlengths 8
843
| proj
stringclasses 212
values | lang
stringclasses 9
values |
---|---|---|---|---|---|---|---|
@@ -97,6 +97,7 @@ std::string FlatCompiler::GetUsageString(const char *program_name) const {
" Default value is \"T\"\n"
" --no-js-exports Removes Node.js style export lines in JS.\n"
" --goog-js-export Uses goog.exports* for closure compiler exporting in JS.\n"
+ " --es6-js-export Uses ECMAScript 6 export style lines in JS.\n"
" --go-namespace Generate the overrided namespace in Golang.\n"
" --go-import Generate the overrided import for flatbuffers in Golang.\n"
" (default is \"github.com/google/flatbuffers/go\")\n" | 1 | /*
* Copyright 2014 Google Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
#include "flatbuffers/flatc.h"
#include <list>
#define FLATC_VERSION "1.9.0 (" __DATE__ " " __TIME__ ")"
namespace flatbuffers {
void FlatCompiler::ParseFile(
flatbuffers::Parser &parser, const std::string &filename,
const std::string &contents,
std::vector<const char *> &include_directories) const {
auto local_include_directory = flatbuffers::StripFileName(filename);
include_directories.push_back(local_include_directory.c_str());
include_directories.push_back(nullptr);
if (!parser.Parse(contents.c_str(), &include_directories[0],
filename.c_str()))
Error(parser.error_, false, false);
include_directories.pop_back();
include_directories.pop_back();
}
void FlatCompiler::Warn(const std::string &warn, bool show_exe_name) const {
params_.warn_fn(this, warn, show_exe_name);
}
void FlatCompiler::Error(const std::string &err, bool usage,
bool show_exe_name) const {
params_.error_fn(this, err, usage, show_exe_name);
}
std::string FlatCompiler::GetUsageString(const char *program_name) const {
std::stringstream ss;
ss << "Usage: " << program_name << " [OPTION]... FILE... [-- FILE...]\n";
for (size_t i = 0; i < params_.num_generators; ++i) {
const Generator &g = params_.generators[i];
std::stringstream full_name;
full_name << std::setw(12) << std::left << g.generator_opt_long;
const char *name = g.generator_opt_short ? g.generator_opt_short : " ";
const char *help = g.generator_help;
ss << " " << full_name.str() << " " << name << " " << help << ".\n";
}
// clang-format off
ss <<
" -o PATH Prefix PATH to all generated files.\n"
" -I PATH Search for includes in the specified path.\n"
" -M Print make rules for generated files.\n"
" --version Print the version number of flatc and exit.\n"
" --strict-json Strict JSON: field names must be / will be quoted,\n"
" no trailing commas in tables/vectors.\n"
" --allow-non-utf8 Pass non-UTF-8 input through parser and emit nonstandard\n"
" \\x escapes in JSON. (Default is to raise parse error on\n"
" non-UTF-8 input.)\n"
" --natural-utf8 Output strings with UTF-8 as human-readable strings.\n"
" By default, UTF-8 characters are printed as \\uXXXX escapes.\n"
" --defaults-json Output fields whose value is the default when\n"
" writing JSON\n"
" --unknown-json Allow fields in JSON that are not defined in the\n"
" schema. These fields will be discared when generating\n"
" binaries.\n"
" --no-prefix Don\'t prefix enum values with the enum type in C++.\n"
" --scoped-enums Use C++11 style scoped and strongly typed enums.\n"
" also implies --no-prefix.\n"
" --gen-includes (deprecated), this is the default behavior.\n"
" If the original behavior is required (no include\n"
" statements) use --no-includes.\n"
" --no-includes Don\'t generate include statements for included\n"
" schemas the generated file depends on (C++).\n"
" --gen-mutable Generate accessors that can mutate buffers in-place.\n"
" --gen-onefile Generate single output file for C# and Go.\n"
" --gen-name-strings Generate type name functions for C++.\n"
" --gen-object-api Generate an additional object-based API.\n"
" --cpp-ptr-type T Set object API pointer type (default std::unique_ptr)\n"
" --cpp-str-type T Set object API string type (default std::string)\n"
" T::c_str() and T::length() must be supported\n"
" --gen-nullable Add Clang _Nullable for C++ pointer. or @Nullable for Java\n"
" --object-prefix Customise class prefix for C++ object-based API.\n"
" --object-suffix Customise class suffix for C++ object-based API.\n"
" Default value is \"T\"\n"
" --no-js-exports Removes Node.js style export lines in JS.\n"
" --goog-js-export Uses goog.exports* for closure compiler exporting in JS.\n"
" --go-namespace Generate the overrided namespace in Golang.\n"
" --go-import Generate the overrided import for flatbuffers in Golang.\n"
" (default is \"github.com/google/flatbuffers/go\")\n"
" --raw-binary Allow binaries without file_indentifier to be read.\n"
" This may crash flatc given a mismatched schema.\n"
" --size-prefixed Input binaries are size prefixed buffers.\n"
" --proto Input is a .proto, translate to .fbs.\n"
" --oneof-union Translate .proto oneofs to flatbuffer unions.\n"
" --grpc Generate GRPC interfaces for the specified languages\n"
" --schema Serialize schemas instead of JSON (use with -b)\n"
" --bfbs-comments Add doc comments to the binary schema files.\n"
" --bfbs-builtins Add builtin attributes to the binary schema files.\n"
" --conform FILE Specify a schema the following schemas should be\n"
" an evolution of. Gives errors if not.\n"
" --conform-includes Include path for the schema given with --conform\n"
" PATH \n"
" --include-prefix Prefix this path to any generated include statements.\n"
" PATH\n"
" --keep-prefix Keep original prefix of schema include statement.\n"
" --no-fb-import Don't include flatbuffers import statement for TypeScript.\n"
" --no-ts-reexport Don't re-export imported dependencies for TypeScript.\n"
" --reflect-types Add minimal type reflection to code generation.\n"
" --reflect-names Add minimal type/name reflection.\n"
" --root-type T Select or override the default root_type\n"
"FILEs may be schemas (must end in .fbs), or JSON files (conforming to preceding\n"
"schema). FILEs after the -- must be binary flatbuffer format files.\n"
"Output files are named using the base file name of the input,\n"
"and written to the current directory or the path given by -o.\n"
"example: " << program_name << " -c -b schema1.fbs schema2.fbs data.json\n";
// clang-format on
return ss.str();
}
int FlatCompiler::Compile(int argc, const char **argv) {
if (params_.generators == nullptr || params_.num_generators == 0) {
return 0;
}
flatbuffers::IDLOptions opts;
std::string output_path;
bool any_generator = false;
bool print_make_rules = false;
bool raw_binary = false;
bool schema_binary = false;
bool grpc_enabled = false;
std::vector<std::string> filenames;
std::list<std::string> include_directories_storage;
std::vector<const char *> include_directories;
std::vector<const char *> conform_include_directories;
std::vector<bool> generator_enabled(params_.num_generators, false);
size_t binary_files_from = std::numeric_limits<size_t>::max();
std::string conform_to_schema;
for (int argi = 0; argi < argc; argi++) {
std::string arg = argv[argi];
if (arg[0] == '-') {
if (filenames.size() && arg[1] != '-')
Error("invalid option location: " + arg, true);
if (arg == "-o") {
if (++argi >= argc) Error("missing path following: " + arg, true);
output_path = flatbuffers::ConCatPathFileName(
flatbuffers::PosixPath(argv[argi]), "");
} else if (arg == "-I") {
if (++argi >= argc) Error("missing path following" + arg, true);
include_directories_storage.push_back(
flatbuffers::PosixPath(argv[argi]));
include_directories.push_back(
include_directories_storage.back().c_str());
} else if (arg == "--conform") {
if (++argi >= argc) Error("missing path following" + arg, true);
conform_to_schema = flatbuffers::PosixPath(argv[argi]);
} else if (arg == "--conform-includes") {
if (++argi >= argc) Error("missing path following" + arg, true);
include_directories_storage.push_back(
flatbuffers::PosixPath(argv[argi]));
conform_include_directories.push_back(
include_directories_storage.back().c_str());
} else if (arg == "--include-prefix") {
if (++argi >= argc) Error("missing path following" + arg, true);
opts.include_prefix = flatbuffers::ConCatPathFileName(
flatbuffers::PosixPath(argv[argi]), "");
} else if (arg == "--keep-prefix") {
opts.keep_include_path = true;
} else if (arg == "--strict-json") {
opts.strict_json = true;
} else if (arg == "--allow-non-utf8") {
opts.allow_non_utf8 = true;
} else if (arg == "--natural-utf8") {
opts.natural_utf8 = true;
} else if (arg == "--no-js-exports") {
opts.skip_js_exports = true;
} else if (arg == "--goog-js-export") {
opts.use_goog_js_export_format = true;
} else if (arg == "--go-namespace") {
if (++argi >= argc) Error("missing golang namespace" + arg, true);
opts.go_namespace = argv[argi];
} else if (arg == "--go-import") {
if (++argi >= argc) Error("missing golang import" + arg, true);
opts.go_import = argv[argi];
} else if (arg == "--defaults-json") {
opts.output_default_scalars_in_json = true;
} else if (arg == "--unknown-json") {
opts.skip_unexpected_fields_in_json = true;
} else if (arg == "--no-prefix") {
opts.prefixed_enums = false;
} else if (arg == "--scoped-enums") {
opts.prefixed_enums = false;
opts.scoped_enums = true;
} else if (arg == "--no-union-value-namespacing") {
opts.union_value_namespacing = false;
} else if (arg == "--gen-mutable") {
opts.mutable_buffer = true;
} else if (arg == "--gen-name-strings") {
opts.generate_name_strings = true;
} else if (arg == "--gen-object-api") {
opts.generate_object_based_api = true;
} else if (arg == "--cpp-ptr-type") {
if (++argi >= argc) Error("missing type following" + arg, true);
opts.cpp_object_api_pointer_type = argv[argi];
} else if (arg == "--cpp-str-type") {
if (++argi >= argc) Error("missing type following" + arg, true);
opts.cpp_object_api_string_type = argv[argi];
} else if (arg == "--gen-nullable") {
opts.gen_nullable = true;
} else if (arg == "--object-prefix") {
if (++argi >= argc) Error("missing prefix following" + arg, true);
opts.object_prefix = argv[argi];
} else if (arg == "--object-suffix") {
if (++argi >= argc) Error("missing suffix following" + arg, true);
opts.object_suffix = argv[argi];
} else if (arg == "--gen-all") {
opts.generate_all = true;
opts.include_dependence_headers = false;
} else if (arg == "--gen-includes") {
// Deprecated, remove this option some time in the future.
printf("warning: --gen-includes is deprecated (it is now default)\n");
} else if (arg == "--no-includes") {
opts.include_dependence_headers = false;
} else if (arg == "--gen-onefile") {
opts.one_file = true;
} else if (arg == "--raw-binary") {
raw_binary = true;
} else if (arg == "--size-prefixed") {
opts.size_prefixed = true;
} else if (arg == "--") { // Separator between text and binary inputs.
binary_files_from = filenames.size();
} else if (arg == "--proto") {
opts.proto_mode = true;
} else if (arg == "--oneof-union") {
opts.proto_oneof_union = true;
} else if (arg == "--schema") {
schema_binary = true;
} else if (arg == "-M") {
print_make_rules = true;
} else if (arg == "--version") {
printf("flatc version %s\n", FLATC_VERSION);
exit(0);
} else if (arg == "--grpc") {
grpc_enabled = true;
} else if (arg == "--bfbs-comments") {
opts.binary_schema_comments = true;
} else if (arg == "--bfbs-builtins") {
opts.binary_schema_builtins = true;
} else if (arg == "--no-fb-import") {
opts.skip_flatbuffers_import = true;
} else if (arg == "--no-ts-reexport") {
opts.reexport_ts_modules = false;
} else if (arg == "--reflect-types") {
opts.mini_reflect = IDLOptions::kTypes;
} else if (arg == "--reflect-names") {
opts.mini_reflect = IDLOptions::kTypesAndNames;
} else if (arg == "--root-type") {
if (++argi >= argc) Error("missing type following" + arg, true);
opts.root_type = argv[argi];
} else {
for (size_t i = 0; i < params_.num_generators; ++i) {
if (arg == params_.generators[i].generator_opt_long ||
(params_.generators[i].generator_opt_short &&
arg == params_.generators[i].generator_opt_short)) {
generator_enabled[i] = true;
any_generator = true;
opts.lang_to_generate |= params_.generators[i].lang;
goto found;
}
}
Error("unknown commandline argument: " + arg, true);
found:;
}
} else {
filenames.push_back(flatbuffers::PosixPath(argv[argi]));
}
}
if (!filenames.size()) Error("missing input files", false, true);
if (opts.proto_mode) {
if (any_generator)
Error("cannot generate code directly from .proto files", true);
} else if (!any_generator && conform_to_schema.empty()) {
Error("no options: specify at least one generator.", true);
}
flatbuffers::Parser conform_parser;
if (!conform_to_schema.empty()) {
std::string contents;
if (!flatbuffers::LoadFile(conform_to_schema.c_str(), true, &contents))
Error("unable to load schema: " + conform_to_schema);
ParseFile(conform_parser, conform_to_schema, contents,
conform_include_directories);
}
std::unique_ptr<flatbuffers::Parser> parser(new flatbuffers::Parser(opts));
for (auto file_it = filenames.begin(); file_it != filenames.end();
++file_it) {
auto &filename = *file_it;
std::string contents;
if (!flatbuffers::LoadFile(filename.c_str(), true, &contents))
Error("unable to load file: " + filename);
bool is_binary =
static_cast<size_t>(file_it - filenames.begin()) >= binary_files_from;
auto ext = flatbuffers::GetExtension(filename);
auto is_schema = ext == "fbs" || ext == "proto";
if (is_binary) {
parser->builder_.Clear();
parser->builder_.PushFlatBuffer(
reinterpret_cast<const uint8_t *>(contents.c_str()),
contents.length());
if (!raw_binary) {
// Generally reading binaries that do not correspond to the schema
// will crash, and sadly there's no way around that when the binary
// does not contain a file identifier.
// We'd expect that typically any binary used as a file would have
// such an identifier, so by default we require them to match.
if (!parser->file_identifier_.length()) {
Error("current schema has no file_identifier: cannot test if \"" +
filename +
"\" matches the schema, use --raw-binary to read this file"
" anyway.");
} else if (!flatbuffers::BufferHasIdentifier(
contents.c_str(), parser->file_identifier_.c_str(), opts.size_prefixed)) {
Error("binary \"" + filename +
"\" does not have expected file_identifier \"" +
parser->file_identifier_ +
"\", use --raw-binary to read this file anyway.");
}
}
} else {
// Check if file contains 0 bytes.
if (contents.length() != strlen(contents.c_str())) {
Error("input file appears to be binary: " + filename, true);
}
if (is_schema) {
// If we're processing multiple schemas, make sure to start each
// one from scratch. If it depends on previous schemas it must do
// so explicitly using an include.
parser.reset(new flatbuffers::Parser(opts));
}
ParseFile(*parser.get(), filename, contents, include_directories);
if (!is_schema && !parser->builder_.GetSize()) {
// If a file doesn't end in .fbs, it must be json/binary. Ensure we
// didn't just parse a schema with a different extension.
Error(
"input file is neither json nor a .fbs (schema) file: " + filename,
true);
}
if (is_schema && !conform_to_schema.empty()) {
auto err = parser->ConformTo(conform_parser);
if (!err.empty()) Error("schemas don\'t conform: " + err);
}
if (schema_binary) {
parser->Serialize();
parser->file_extension_ = reflection::SchemaExtension();
}
}
std::string filebase =
flatbuffers::StripPath(flatbuffers::StripExtension(filename));
for (size_t i = 0; i < params_.num_generators; ++i) {
parser->opts.lang = params_.generators[i].lang;
if (generator_enabled[i]) {
if (!print_make_rules) {
flatbuffers::EnsureDirExists(output_path);
if ((!params_.generators[i].schema_only || is_schema) &&
!params_.generators[i].generate(*parser.get(), output_path,
filebase)) {
Error(std::string("Unable to generate ") +
params_.generators[i].lang_name + " for " + filebase);
}
} else {
std::string make_rule = params_.generators[i].make_rule(
*parser.get(), output_path, filename);
if (!make_rule.empty())
printf("%s\n",
flatbuffers::WordWrap(make_rule, 80, " ", " \\").c_str());
}
if (grpc_enabled) {
if (params_.generators[i].generateGRPC != nullptr) {
if (!params_.generators[i].generateGRPC(*parser.get(), output_path,
filebase)) {
Error(std::string("Unable to generate GRPC interface for") +
params_.generators[i].lang_name);
}
} else {
Warn(std::string("GRPC interface generator not implemented for ") +
params_.generators[i].lang_name);
}
}
}
}
if (!opts.root_type.empty()) {
if (!parser->SetRootType(opts.root_type.c_str()))
Error("unknown root type: " + opts.root_type);
else if (parser->root_struct_def_->fixed)
Error("root type must be a table");
}
if (opts.proto_mode) GenerateFBS(*parser.get(), output_path, filebase);
// We do not want to generate code for the definitions in this file
// in any files coming up next.
parser->MarkGenerated();
}
return 0;
}
} // namespace flatbuffers
| 1 | 13,543 | Can you also add this to Compiler.md ? | google-flatbuffers | java |
@@ -144,8 +144,9 @@ class HttpClient {
* @param {function(!Error)} onError The function to call if the request fails.
* @param {?string=} opt_data The data to send with the request.
* @param {?RequestOptions=} opt_proxy The proxy server to use for the request.
+ * @param {!number=} retries The current number of retries.
*/
-function sendRequest(options, onOk, onError, opt_data, opt_proxy) {
+function sendRequest(options, onOk, onError, opt_data, opt_proxy, retries) {
var hostname = options.hostname;
var port = options.port;
| 1 | // Licensed to the Software Freedom Conservancy (SFC) under one
// or more contributor license agreements. See the NOTICE file
// distributed with this work for additional information
// regarding copyright ownership. The SFC licenses this file
// to you under the Apache License, Version 2.0 (the
// "License"); you may not use this file except in compliance
// with the License. You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing,
// software distributed under the License is distributed on an
// "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
// KIND, either express or implied. See the License for the
// specific language governing permissions and limitations
// under the License.
/**
* @fileoverview Defines an {@linkplain cmd.Executor command executor} that
* communicates with a remote end using HTTP + JSON.
*/
'use strict';
const http = require('http');
const https = require('https');
const url = require('url');
const httpLib = require('../lib/http');
/**
* @typedef {{protocol: (?string|undefined),
* auth: (?string|undefined),
* hostname: (?string|undefined),
* host: (?string|undefined),
* port: (?string|undefined),
* path: (?string|undefined),
* pathname: (?string|undefined)}}
*/
var RequestOptions;
/**
* @param {string} aUrl The request URL to parse.
* @return {RequestOptions} The request options.
* @throws {Error} if the URL does not include a hostname.
*/
function getRequestOptions(aUrl) {
let options = url.parse(aUrl);
if (!options.hostname) {
throw new Error('Invalid URL: ' + aUrl);
}
// Delete the search and has portions as they are not used.
options.search = null;
options.hash = null;
options.path = options.pathname;
return options;
}
/**
* A basic HTTP client used to send messages to a remote end.
*
* @implements {httpLib.Client}
*/
class HttpClient {
/**
* @param {string} serverUrl URL for the WebDriver server to send commands to.
* @param {http.Agent=} opt_agent The agent to use for each request.
* Defaults to `http.globalAgent`.
* @param {?string=} opt_proxy The proxy to use for the connection to the
* server. Default is to use no proxy.
*/
constructor(serverUrl, opt_agent, opt_proxy) {
/** @private {http.Agent} */
this.agent_ = opt_agent || null;
/**
* Base options for each request.
* @private {RequestOptions}
*/
this.options_ = getRequestOptions(serverUrl);
/**
* @private {?RequestOptions}
*/
this.proxyOptions_ = opt_proxy ? getRequestOptions(opt_proxy) : null;
}
/** @override */
send(httpRequest) {
let data;
let headers = {};
httpRequest.headers.forEach(function(value, name) {
headers[name] = value;
});
headers['Content-Length'] = 0;
if (httpRequest.method == 'POST' || httpRequest.method == 'PUT') {
data = JSON.stringify(httpRequest.data);
headers['Content-Length'] = Buffer.byteLength(data, 'utf8');
headers['Content-Type'] = 'application/json;charset=UTF-8';
}
let path = this.options_.path;
if (path.endsWith('/') && httpRequest.path.startsWith('/')) {
path += httpRequest.path.substring(1);
} else {
path += httpRequest.path;
}
let parsedPath = url.parse(path);
let options = {
agent: this.agent_ || null,
method: httpRequest.method,
auth: this.options_.auth,
hostname: this.options_.hostname,
port: this.options_.port,
protocol: this.options_.protocol,
path: parsedPath.path,
pathname: parsedPath.pathname,
search: parsedPath.search,
hash: parsedPath.hash,
headers,
};
return new Promise((fulfill, reject) => {
sendRequest(options, fulfill, reject, data, this.proxyOptions_);
});
}
}
/**
* Sends a single HTTP request.
* @param {!Object} options The request options.
* @param {function(!httpLib.Response)} onOk The function to call if the
* request succeeds.
* @param {function(!Error)} onError The function to call if the request fails.
* @param {?string=} opt_data The data to send with the request.
* @param {?RequestOptions=} opt_proxy The proxy server to use for the request.
*/
function sendRequest(options, onOk, onError, opt_data, opt_proxy) {
var hostname = options.hostname;
var port = options.port;
if (opt_proxy) {
let proxy = /** @type {RequestOptions} */(opt_proxy);
// RFC 2616, section 5.1.2:
// The absoluteURI form is REQUIRED when the request is being made to a
// proxy.
let absoluteUri = url.format(options);
// RFC 2616, section 14.23:
// An HTTP/1.1 proxy MUST ensure that any request message it forwards does
// contain an appropriate Host header field that identifies the service
// being requested by the proxy.
let targetHost = options.hostname
if (options.port) {
targetHost += ':' + options.port;
}
// Update the request options with our proxy info.
options.headers['Host'] = targetHost;
options.path = absoluteUri;
options.host = proxy.host;
options.hostname = proxy.hostname;
options.port = proxy.port;
if (proxy.auth) {
options.headers['Proxy-Authorization'] =
'Basic ' + new Buffer(proxy.auth).toString('base64');
}
}
let requestFn = options.protocol === 'https:' ? https.request : http.request;
var request = requestFn(options, function onResponse(response) {
if (response.statusCode == 302 || response.statusCode == 303) {
try {
var location = url.parse(response.headers['location']);
} catch (ex) {
onError(Error(
'Failed to parse "Location" header for server redirect: ' +
ex.message + '\nResponse was: \n' +
new httpLib.Response(response.statusCode, response.headers, '')));
return;
}
if (!location.hostname) {
location.hostname = hostname;
location.port = port;
}
request.abort();
sendRequest({
method: 'GET',
protocol: location.protocol || options.protocol,
hostname: location.hostname,
port: location.port,
path: location.path,
pathname: location.pathname,
search: location.search,
hash: location.hash,
headers: {
'Accept': 'application/json; charset=utf-8'
}
}, onOk, onError, undefined, opt_proxy);
return;
}
var body = [];
response.on('data', body.push.bind(body));
response.on('end', function() {
var resp = new httpLib.Response(
/** @type {number} */(response.statusCode),
/** @type {!Object<string>} */(response.headers),
body.join('').replace(/\0/g, ''));
onOk(resp);
});
});
request.on('error', function(e) {
if (e.code === 'ECONNRESET') {
setTimeout(function() {
sendRequest(options, onOk, onError, opt_data, opt_proxy);
}, 15);
} else {
var message = e.message;
if (e.code) {
message = e.code + ' ' + message;
}
onError(new Error(message));
}
});
if (opt_data) {
request.write(opt_data);
}
request.end();
}
// PUBLIC API
exports.Executor = httpLib.Executor;
exports.HttpClient = HttpClient;
exports.Request = httpLib.Request;
exports.Response = httpLib.Response;
| 1 | 14,969 | !numbers -> numbers | SeleniumHQ-selenium | java |
@@ -413,9 +413,9 @@ public class JavaSurfaceNamer extends SurfaceNamer {
* <p>All Google cloud java libraries have package names like "com.google.cloud.library.v1" and
* the respective examples have package names like
* "com.google.cloud.examples.library.v1.snippets". This method assumes {@code packageName} has
- * the format of "com.google.(.+).artifact.name". This works for both existing libraries and the
- * baseline test. We will need to adjust this if in the future there are libraries that do not
- * follow the package name format assumed here.
+ * the format of "com.google.orgname.artifact.name", where "artifact.name" could be empty. This
+ * works for both existing libraries and the baseline test. We will need to adjust this if in the
+ * future there are libraries that do not follow the package name format assumed here.
*/
@Override
public String getExamplePackageName() { | 1 | /* Copyright 2016 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.api.codegen.transformer.java;
import static com.google.common.base.Preconditions.checkArgument;
import com.google.api.codegen.ReleaseLevel;
import com.google.api.codegen.config.FieldConfig;
import com.google.api.codegen.config.FieldModel;
import com.google.api.codegen.config.InterfaceConfig;
import com.google.api.codegen.config.InterfaceModel;
import com.google.api.codegen.config.MethodConfig;
import com.google.api.codegen.config.MethodModel;
import com.google.api.codegen.config.ProtoInterfaceModel;
import com.google.api.codegen.config.ResourceNameConfig;
import com.google.api.codegen.config.ResourceNameType;
import com.google.api.codegen.config.TransportProtocol;
import com.google.api.codegen.config.TypeModel;
import com.google.api.codegen.metacode.InitFieldConfig;
import com.google.api.codegen.transformer.ImportTypeTable;
import com.google.api.codegen.transformer.MethodContext;
import com.google.api.codegen.transformer.ModelTypeFormatterImpl;
import com.google.api.codegen.transformer.ModelTypeTable;
import com.google.api.codegen.transformer.SchemaTypeFormatterImpl;
import com.google.api.codegen.transformer.SurfaceNamer;
import com.google.api.codegen.util.CommonRenderingUtil;
import com.google.api.codegen.util.Inflector;
import com.google.api.codegen.util.Name;
import com.google.api.codegen.util.NamePath;
import com.google.api.codegen.util.StringUtil;
import com.google.api.codegen.util.java.JavaCommentReformatter;
import com.google.api.codegen.util.java.JavaNameFormatter;
import com.google.api.codegen.util.java.JavaRenderingUtil;
import com.google.api.codegen.util.java.JavaTypeTable;
import com.google.api.codegen.viewmodel.ServiceMethodType;
import com.google.common.base.Joiner;
import java.io.File;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
/** The SurfaceNamer for Java. */
public class JavaSurfaceNamer extends SurfaceNamer {
private final JavaNameFormatter nameFormatter;
public JavaSurfaceNamer(String rootPackageName, String packageName) {
super(
new JavaNameFormatter(),
new ModelTypeFormatterImpl(new JavaModelTypeNameConverter(packageName)),
new JavaTypeTable(packageName),
new JavaCommentReformatter(),
rootPackageName,
packageName);
nameFormatter = (JavaNameFormatter) super.getNameFormatter();
}
/* Create a JavaSurfaceNamer for a Discovery-based API. */
public JavaSurfaceNamer(String rootPackageName, String packageName, JavaNameFormatter formatter) {
super(
formatter,
new SchemaTypeFormatterImpl(new JavaSchemaTypeNameConverter(packageName, formatter)),
new JavaTypeTable(packageName),
new JavaCommentReformatter(),
rootPackageName,
packageName);
nameFormatter = formatter;
}
@Override
public SurfaceNamer cloneWithPackageName(String packageName) {
return new JavaSurfaceNamer(getRootPackageName(), packageName);
}
@Override
public SurfaceNamer cloneWithPackageNameForDiscovery(String packageName) {
return new JavaSurfaceNamer(getRootPackageName(), packageName, getNameFormatter());
}
@Override
public JavaNameFormatter getNameFormatter() {
return nameFormatter;
}
@Override
public String getApiSnippetsClassName(InterfaceConfig interfaceConfig) {
return publicClassName(
Name.upperCamel(interfaceConfig.getInterfaceModel().getSimpleName(), "ClientSnippets"));
}
@Override
public String getApiSampleFileName(String className) {
return className + ".java";
}
@Override
public String getSourceFilePath(String path, String publicClassName) {
return path + File.separator + publicClassName + ".java";
}
@Override
public boolean shouldImportRequestObjectParamElementType(FieldModel field) {
return !field.isMap();
}
@Override
public List<String> getDocLines(String text) {
return JavaRenderingUtil.getDocLines(text);
}
@Override
public List<String> getThrowsDocLines(MethodConfig methodConfig) {
return Arrays.asList("@throws com.google.api.gax.rpc.ApiException if the remote call fails");
}
@Override
public String getStaticLangReturnTypeName(MethodContext methodContext) {
MethodModel method = methodContext.getMethodModel();
if (method.isOutputTypeEmpty()) {
return "void";
}
return method.getOutputTypeName(methodContext.getTypeTable()).getFullName();
}
@Override
public String getAndSaveOperationResponseTypeName(
MethodModel method, ImportTypeTable typeTable, MethodConfig methodConfig) {
String responseTypeName =
typeTable.getFullNameFor(methodConfig.getLongRunningConfig().getReturnType());
String metadataTypeName =
typeTable.getFullNameFor(methodConfig.getLongRunningConfig().getMetadataType());
return typeTable.getAndSaveNicknameForContainer(
"com.google.api.gax.grpc.OperationFuture", responseTypeName, metadataTypeName);
}
@Override
public String getLongRunningOperationTypeName(ImportTypeTable typeTable, TypeModel type) {
return ((ModelTypeTable) typeTable).getAndSaveNicknameForElementType(type);
}
@Override
public String getGenericAwareResponseTypeName(MethodContext methodContext) {
MethodModel method = methodContext.getMethodModel();
if (method.isOutputTypeEmpty()) {
return "Void";
} else {
return method.getOutputTypeName(methodContext.getTypeTable()).getFullName();
}
}
@Override
public String getPagedResponseIterateMethod() {
return publicMethodName(Name.from("iterate_all"));
}
@Override
public String getResourceTypeParseMethodName(
ImportTypeTable typeTable, FieldConfig resourceFieldConfig) {
String concreteResourceTypeName = getConcreteResourceTypeName(typeTable, resourceFieldConfig);
return concreteResourceTypeName + "." + publicMethodName(Name.from("parse"));
}
@Override
public String getResourceTypeParseListMethodName(
ImportTypeTable typeTable, FieldConfig resourceFieldConfig) {
String concreteResourceTypeName = getConcreteResourceTypeName(typeTable, resourceFieldConfig);
return concreteResourceTypeName + "." + publicMethodName(Name.from("parse_list"));
}
@Override
public String getResourceTypeFormatListMethodName(
ImportTypeTable typeTable, FieldConfig resourceFieldConfig) {
String concreteResourceTypeName = getConcreteResourceTypeName(typeTable, resourceFieldConfig);
return concreteResourceTypeName + "." + publicMethodName(Name.from("to_string_list"));
}
private String getConcreteResourceTypeName(
ImportTypeTable typeTable, FieldConfig resourceFieldConfig) {
String resourceTypeName = getAndSaveElementResourceTypeName(typeTable, resourceFieldConfig);
if (resourceFieldConfig.getResourceNameType() == ResourceNameType.ANY) {
return publicClassName(Name.from("untyped_resource_name"));
} else {
return resourceTypeName;
}
}
/** The name of the create method for the resource one-of for the given field config */
public String getResourceTypeParentParseMethod(
ImportTypeTable typeTable, FieldConfig fieldConfig) {
return getAndSaveResourceTypeFactoryName(typeTable, fieldConfig.getMessageFieldConfig())
+ "."
+ publicMethodName(Name.from("parse"));
}
private String getAndSaveResourceTypeFactoryName(
ImportTypeTable typeTable, FieldConfig fieldConfig) {
String resourceClassName =
publicClassName(getResourceTypeNameObject(fieldConfig.getResourceNameConfig()));
return typeTable.getAndSaveNicknameForTypedResourceName(
fieldConfig, Inflector.pluralize(resourceClassName));
}
@Override
public String getAndSavePagedResponseTypeName(
MethodContext methodContext, FieldConfig resourceFieldConfig) {
// TODO(michaelbausor) make sure this uses the typeTable correctly
ImportTypeTable typeTable = methodContext.getTypeTable();
String fullPackageWrapperName =
typeTable.getImplicitPackageFullNameFor(
getApiWrapperClassName(methodContext.getInterfaceConfig()));
String pagedResponseShortName =
getPagedResponseTypeInnerName(
methodContext.getMethodModel(), typeTable, resourceFieldConfig.getField());
return typeTable.getAndSaveNicknameForInnerType(fullPackageWrapperName, pagedResponseShortName);
}
@Override
public String getPagedResponseTypeInnerName(
MethodModel method, ImportTypeTable typeTable, FieldModel resourceField) {
return publicClassName(Name.anyCamel(method.getSimpleName(), "PagedResponse"));
}
@Override
public String getPageTypeInnerName(
MethodModel method, ImportTypeTable typeTable, FieldModel resourceField) {
return publicClassName(Name.anyCamel(method.getSimpleName(), "Page"));
}
@Override
public String getFixedSizeCollectionTypeInnerName(
MethodModel method, ImportTypeTable typeTable, FieldModel resourceField) {
return publicClassName(Name.anyCamel(method.getSimpleName(), "FixedSizeCollection"));
}
@Override
public String getFullyQualifiedApiWrapperClassName(InterfaceConfig interfaceConfig) {
return getPackageName() + "." + getApiWrapperClassName(interfaceConfig);
}
@Override
public String getFullyQualifiedRpcStubType(
InterfaceModel interfaceModel, TransportProtocol transportProtocol) {
return getStubPackageName() + "." + getApiRpcStubClassName(interfaceModel, transportProtocol);
}
/**
* The type name of the Grpc service class This needs to match what Grpc generates for the
* particular language.
*/
@Override
public String getGrpcServiceClassName(InterfaceModel apiInterface) {
String fullName =
JavaModelTypeNameConverter.getGrpcTypeName(
((ProtoInterfaceModel) apiInterface).getInterface())
.getFullName();
NamePath namePath = getTypeNameConverter().getNamePath(fullName);
String grpcContainerName =
publicClassName(Name.upperCamelKeepUpperAcronyms(namePath.getHead(), "Grpc"));
String serviceClassName =
publicClassName(Name.upperCamelKeepUpperAcronyms(apiInterface.getSimpleName(), "ImplBase"));
return qualifiedName(namePath.withHead(grpcContainerName).append(serviceClassName));
}
/**
* The type name of the Grpc container class. This needs to match what Grpc generates for the
* particular language.
*/
public String getGrpcContainerTypeName(InterfaceModel apiInterface) {
String fullName =
JavaModelTypeNameConverter.getGrpcTypeName(
((ProtoInterfaceModel) apiInterface).getInterface())
.getFullName();
NamePath namePath = getTypeNameConverter().getNamePath(fullName);
String publicClassName =
publicClassName(Name.upperCamelKeepUpperAcronyms(namePath.getHead(), "Grpc"));
return qualifiedName(namePath.withHead(publicClassName));
}
@Override
protected Name getResourceTypeNameObject(ResourceNameConfig resourceNameConfig) {
String entityName = resourceNameConfig.getEntityName();
ResourceNameType resourceNameType = resourceNameConfig.getResourceNameType();
switch (resourceNameType) {
case ANY:
return getAnyResourceTypeName();
case FIXED:
return Name.anyLower(entityName).join("name_fixed");
case ONEOF:
// Remove suffix "_oneof". This allows the collection oneof config to "share" an entity name
// with a collection config.
entityName = StringUtil.removeSuffix(entityName, "_oneof");
return Name.anyLower(entityName).join("name");
case SINGLE:
return Name.anyLower(entityName).join("name");
case NONE:
default:
throw new UnsupportedOperationException("unexpected entity name type");
}
}
@Override
public String injectRandomStringGeneratorCode(String randomString) {
String delimiter = ",";
String[] split =
CommonRenderingUtil.stripQuotes(randomString)
.replace(
InitFieldConfig.RANDOM_TOKEN, delimiter + InitFieldConfig.RANDOM_TOKEN + delimiter)
.split(delimiter);
ArrayList<String> stringParts = new ArrayList<>();
for (String token : split) {
if (token.length() > 0) {
if (token.equals(InitFieldConfig.RANDOM_TOKEN)) {
stringParts.add("System.currentTimeMillis()");
} else {
stringParts.add("\"" + token + "\"");
}
}
}
return Joiner.on(" + ").join(stringParts);
}
@Override
public String getApiCallableTypeName(ServiceMethodType serviceMethodType) {
switch (serviceMethodType) {
case UnaryMethod:
return "UnaryCallable";
case GrpcBidiStreamingMethod:
return "BidiStreamingCallable";
case GrpcServerStreamingMethod:
return "ServerStreamingCallable";
case GrpcClientStreamingMethod:
return "ClientStreamingCallable";
case LongRunningMethod:
return "OperationCallable";
default:
return getNotImplementedString("getApiCallableTypeName() for " + serviceMethodType);
}
}
@Override
public String getReleaseAnnotation(ReleaseLevel releaseLevel) {
switch (releaseLevel) {
case UNSET_RELEASE_LEVEL:
case ALPHA:
return "@BetaApi";
case BETA:
return "@BetaApi";
case DEPRECATED:
return "@Deprecated";
default:
return "";
}
}
@Override
public String getBatchingDescriptorConstName(MethodModel method) {
return inittedConstantName(Name.upperCamel(method.getSimpleName()).join("batching_desc"));
}
@Override
/** The name of the settings member name for the given method. */
public String getOperationSettingsMemberName(MethodModel method) {
return publicMethodName(Name.upperCamel(method.getSimpleName(), "OperationSettings"));
}
@Override
public String getAndSaveResourceTypeName(ImportTypeTable typeTable, FieldConfig fieldConfig) {
String commonResourceName = fieldConfig.getResourceNameConfig().getCommonResourceName();
String resourceClassName;
if (commonResourceName == null) {
resourceClassName =
publicClassName(getResourceTypeNameObject(fieldConfig.getResourceNameConfig()));
} else {
// Common resource name is fully-qualified.
resourceClassName = commonResourceName.substring(commonResourceName.lastIndexOf(".") + 1);
}
return typeTable.getAndSaveNicknameForTypedResourceName(fieldConfig, resourceClassName);
}
@Override
public String getToStringMethod() {
return "Objects.toString";
}
@Override
public String getPrintSpec(String spec) {
return spec;
}
@Override
public String getAndSaveTypeName(ImportTypeTable typeTable, TypeModel type) {
return typeTable.getAndSaveNicknameForElementType(type);
}
/**
* Returns the package name of standalone samples.
*
* <p>All Google cloud java libraries have package names like "com.google.cloud.library.v1" and
* the respective examples have package names like
* "com.google.cloud.examples.library.v1.snippets". This method assumes {@code packageName} has
* the format of "com.google.(.+).artifact.name". This works for both existing libraries and the
* baseline test. We will need to adjust this if in the future there are libraries that do not
* follow the package name format assumed here.
*/
@Override
public String getExamplePackageName() {
String packageName = getPackageName();
checkArgument(
packageName.startsWith("com.google."),
"We currently only support packages beginning with 'com.google'");
packageName = packageName.replaceFirst("com\\.google\\.", "");
String simpleOrgName = packageName.substring(0, packageName.indexOf('.'));
return "com.google."
+ simpleOrgName
+ ".examples"
+ packageName.replaceFirst(simpleOrgName, "")
+ ".snippets";
}
}
| 1 | 27,502 | This description doesn't seem quite right for the longrunning case - `longrunning` isn't an orgname. | googleapis-gapic-generator | java |
@@ -93,4 +93,11 @@ public interface LeafCollector {
*/
void collect(int doc) throws IOException;
+ /*
+ * optionally returns an iterator over competitive documents
+ */
+ default DocIdSetIterator iterator() {
+ return null;
+ }
+
} | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.lucene.search;
import java.io.IOException;
/**
* <p>Collector decouples the score from the collected doc:
* the score computation is skipped entirely if it's not
* needed. Collectors that do need the score should
* implement the {@link #setScorer} method, to hold onto the
* passed {@link Scorer} instance, and call {@link
* Scorer#score()} within the collect method to compute the
* current hit's score. If your collector may request the
* score for a single hit multiple times, you should use
* {@link ScoreCachingWrappingScorer}. </p>
*
* <p><b>NOTE:</b> The doc that is passed to the collect
* method is relative to the current reader. If your
* collector needs to resolve this to the docID space of the
* Multi*Reader, you must re-base it by recording the
* docBase from the most recent setNextReader call. Here's
* a simple example showing how to collect docIDs into a
* BitSet:</p>
*
* <pre class="prettyprint">
* IndexSearcher searcher = new IndexSearcher(indexReader);
* final BitSet bits = new BitSet(indexReader.maxDoc());
* searcher.search(query, new Collector() {
*
* public LeafCollector getLeafCollector(LeafReaderContext context)
* throws IOException {
* final int docBase = context.docBase;
* return new LeafCollector() {
*
* <em>// ignore scorer</em>
* public void setScorer(Scorer scorer) throws IOException {
* }
*
* public void collect(int doc) throws IOException {
* bits.set(docBase + doc);
* }
*
* };
* }
*
* });
* </pre>
*
* <p>Not all collectors will need to rebase the docID. For
* example, a collector that simply counts the total number
* of hits would skip it.</p>
*
* @lucene.experimental
*/
public interface LeafCollector {
/**
* Called before successive calls to {@link #collect(int)}. Implementations
* that need the score of the current document (passed-in to
* {@link #collect(int)}), should save the passed-in Scorer and call
* scorer.score() when needed.
*/
void setScorer(Scorable scorer) throws IOException;
/**
* Called once for every document matching a query, with the unbased document
* number.
* <p>Note: The collection of the current segment can be terminated by throwing
* a {@link CollectionTerminatedException}. In this case, the last docs of the
* current {@link org.apache.lucene.index.LeafReaderContext} will be skipped and {@link IndexSearcher}
* will swallow the exception and continue collection with the next leaf.
* <p>
* Note: This is called in an inner search loop. For good search performance,
* implementations of this method should not call {@link IndexSearcher#doc(int)} or
* {@link org.apache.lucene.index.IndexReader#document(int)} on every hit.
* Doing so can slow searches by an order of magnitude or more.
*/
void collect(int doc) throws IOException;
}
| 1 | 33,181 | maybe give it a more descriptive name, e.g. `competitiveFilter` | apache-lucene-solr | java |
@@ -106,7 +106,7 @@ public class JWTVerificationkeyResolver implements VerificationKeyResolver {
}
}
- // Add all keys into a master list
+ // Add all keys into a primary list
if (issuerConfig.usesHttpsJwk()) {
keysSource = "[" + String.join(", ", issuerConfig.getJwksUrls()) + "]";
for (HttpsJwks hjwks : issuerConfig.getHttpsJwks()) { | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.solr.security;
import java.io.IOException;
import java.lang.invoke.MethodHandles;
import java.security.Key;
import java.util.ArrayList;
import java.util.Collection;
import java.util.HashMap;
import java.util.HashSet;
import java.util.List;
import java.util.Map;
import java.util.Set;
import org.apache.solr.common.SolrException;
import org.jose4j.jwk.HttpsJwks;
import org.jose4j.jwk.JsonWebKey;
import org.jose4j.jwk.VerificationJwkSelector;
import org.jose4j.jws.JsonWebSignature;
import org.jose4j.jwt.JwtClaims;
import org.jose4j.jwt.MalformedClaimException;
import org.jose4j.jwt.consumer.InvalidJwtException;
import org.jose4j.jwx.JsonWebStructure;
import org.jose4j.keys.resolvers.VerificationKeyResolver;
import org.jose4j.lang.JoseException;
import org.jose4j.lang.UnresolvableKeyException;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* Resolves jws signature verification keys from a set of {@link JWTIssuerConfig} objects, which
* may represent any valid configuration in Solr's security.json, i.e. static list of JWKs
* or keys retrieved from HTTPs JWK endpoints.
*
* This implementation maintains a map of issuers, each with its own list of {@link JsonWebKey},
* and resolves correct key from correct issuer similar to HttpsJwksVerificationKeyResolver.
* If issuer claim is not required, we will select the first IssuerConfig if there is exactly one such config.
*
* If a key is not found, and issuer is backed by HTTPsJWKs, we attempt one cache refresh before failing.
*/
public class JWTVerificationkeyResolver implements VerificationKeyResolver {
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
private VerificationJwkSelector verificationJwkSelector = new VerificationJwkSelector();
private Map<String, JWTIssuerConfig> issuerConfigs = new HashMap<>();
private final boolean requireIssuer;
/**
* Resolves key from a JWKs from one or more IssuerConfigs
* @param issuerConfigs Collection of configuration objects for the issuer(s)
* @param requireIssuer if true, will require 'iss' claim on jws
*/
public JWTVerificationkeyResolver(Collection<JWTIssuerConfig> issuerConfigs, boolean requireIssuer) {
this.requireIssuer = requireIssuer;
issuerConfigs.forEach(ic -> {
this.issuerConfigs.put(ic.getIss(), ic);
});
}
@Override
public Key resolveKey(JsonWebSignature jws, List<JsonWebStructure> nestingContext) throws UnresolvableKeyException {
JsonWebKey theChosenOne;
List<JsonWebKey> jsonWebKeys = new ArrayList<>();
String keysSource = "N/A";
try {
String tokenIssuer = JwtClaims.parse(jws.getUnverifiedPayload()).getIssuer();
JWTIssuerConfig issuerConfig;
if (tokenIssuer == null) {
if (requireIssuer) {
throw new UnresolvableKeyException("Token does not contain required issuer claim");
} else if (issuerConfigs.size() == 1) {
issuerConfig = issuerConfigs.values().iterator().next();
} else {
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,
"Signature verifiction not supported for multiple issuers without 'iss' claim in token.");
}
} else {
issuerConfig = issuerConfigs.get(tokenIssuer);
if (issuerConfig == null) {
if (issuerConfigs.size() > 1) {
throw new UnresolvableKeyException("No issuers configured for iss='" + tokenIssuer + "', cannot validate signature");
} else if (issuerConfigs.size() == 1) {
issuerConfig = issuerConfigs.values().iterator().next();
log.debug("No issuer matching token's iss claim, but exactly one configured, selecting that one");
} else {
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,
"Signature verifiction failed due to no configured issuer with id " + tokenIssuer);
}
}
}
// Add all keys into a master list
if (issuerConfig.usesHttpsJwk()) {
keysSource = "[" + String.join(", ", issuerConfig.getJwksUrls()) + "]";
for (HttpsJwks hjwks : issuerConfig.getHttpsJwks()) {
jsonWebKeys.addAll(hjwks.getJsonWebKeys());
}
} else {
keysSource = "static list of keys in security.json";
jsonWebKeys.addAll(issuerConfig.getJsonWebKeySet().getJsonWebKeys());
}
theChosenOne = verificationJwkSelector.select(jws, jsonWebKeys);
if (theChosenOne == null && issuerConfig.usesHttpsJwk()) {
if (log.isDebugEnabled()) {
log.debug("Refreshing JWKs from all {} locations, as no suitable verification key for JWS w/ header {} was found in {}",
issuerConfig.getHttpsJwks().size(), jws.getHeaders().getFullHeaderAsJsonString(), jsonWebKeys);
}
jsonWebKeys.clear();
for (HttpsJwks hjwks : issuerConfig.getHttpsJwks()) {
hjwks.refresh();
jsonWebKeys.addAll(hjwks.getJsonWebKeys());
}
theChosenOne = verificationJwkSelector.select(jws, jsonWebKeys);
}
} catch (JoseException | IOException | InvalidJwtException | MalformedClaimException e) {
StringBuilder sb = new StringBuilder();
sb.append("Unable to find a suitable verification key for JWS w/ header ").append(jws.getHeaders().getFullHeaderAsJsonString());
sb.append(" due to an unexpected exception (").append(e).append(") while obtaining or using keys from source ");
sb.append(keysSource);
throw new UnresolvableKeyException(sb.toString(), e);
}
if (theChosenOne == null) {
StringBuilder sb = new StringBuilder();
sb.append("Unable to find a suitable verification key for JWS w/ header ").append(jws.getHeaders().getFullHeaderAsJsonString());
sb.append(" from ").append(jsonWebKeys.size()).append(" keys from source ").append(keysSource);
throw new UnresolvableKeyException(sb.toString());
}
return theChosenOne.getKey();
}
Set<JWTIssuerConfig> getIssuerConfigs() {
return new HashSet<>(issuerConfigs.values());
}
}
| 1 | 35,962 | This occurrence of "master" is unrelated to master/slave replication. Maybe simply remove the word "master" or replace it with "reference" but "primary" doesn't really make sense. | apache-lucene-solr | java |
@@ -15,6 +15,7 @@ import (
"github.com/filecoin-project/go-filecoin/actor/builtin/miner"
"github.com/filecoin-project/go-filecoin/address"
"github.com/filecoin-project/go-filecoin/exec"
+ "github.com/filecoin-project/go-filecoin/proofs"
"github.com/filecoin-project/go-filecoin/types"
"github.com/filecoin-project/go-filecoin/vm/errors"
) | 1 | package storagemarket
import (
"context"
"fmt"
"math/big"
"github.com/ipfs/go-cid"
"github.com/ipfs/go-hamt-ipld"
cbor "github.com/ipfs/go-ipld-cbor"
"github.com/libp2p/go-libp2p-peer"
"github.com/filecoin-project/go-filecoin/abi"
"github.com/filecoin-project/go-filecoin/actor"
"github.com/filecoin-project/go-filecoin/actor/builtin/miner"
"github.com/filecoin-project/go-filecoin/address"
"github.com/filecoin-project/go-filecoin/exec"
"github.com/filecoin-project/go-filecoin/types"
"github.com/filecoin-project/go-filecoin/vm/errors"
)
// MinimumPledge is the minimum amount of sectors a user can pledge.
var MinimumPledge = big.NewInt(10)
// MinimumCollateralPerSector is the minimum amount of collateral required per sector
var MinimumCollateralPerSector, _ = types.NewAttoFILFromFILString("0.001")
const (
// ErrPledgeTooLow is the error code for a pledge under the MinimumPledge.
ErrPledgeTooLow = 33
// ErrUnknownMiner indicates a pledge under the MinimumPledge.
ErrUnknownMiner = 34
// ErrInsufficientCollateral indicates the collateral is too low.
ErrInsufficientCollateral = 43
)
// Errors map error codes to revert errors this actor may return.
var Errors = map[uint8]error{
ErrPledgeTooLow: errors.NewCodedRevertErrorf(ErrPledgeTooLow, "pledge must be at least %s sectors", MinimumPledge),
ErrUnknownMiner: errors.NewCodedRevertErrorf(ErrUnknownMiner, "unknown miner"),
ErrInsufficientCollateral: errors.NewCodedRevertErrorf(ErrInsufficientCollateral, "collateral must be more than %s FIL per sector", MinimumCollateralPerSector),
}
func init() {
cbor.RegisterCborType(State{})
cbor.RegisterCborType(struct{}{})
}
// Actor implements the filecoin storage market. It is responsible
// for starting up new miners, and keeping track of the total storage power in the network.
type Actor struct{}
// State is the storage market's storage.
type State struct {
Miners cid.Cid `refmt:",omitempty"`
// TotalCommitedStorage is the number of sectors that are currently committed
// in the whole network.
TotalCommittedStorage *big.Int
}
// NewActor returns a new storage market actor.
func NewActor() (*actor.Actor, error) {
return actor.NewActor(types.StorageMarketActorCodeCid, types.NewZeroAttoFIL()), nil
}
// InitializeState stores the actor's initial data structure.
func (sma *Actor) InitializeState(storage exec.Storage, _ interface{}) error {
initStorage := &State{
TotalCommittedStorage: big.NewInt(0),
}
stateBytes, err := cbor.DumpObject(initStorage)
if err != nil {
return err
}
id, err := storage.Put(stateBytes)
if err != nil {
return err
}
return storage.Commit(id, cid.Undef)
}
var _ exec.ExecutableActor = (*Actor)(nil)
// Exports returns the actors exports.
func (sma *Actor) Exports() exec.Exports {
return storageMarketExports
}
var storageMarketExports = exec.Exports{
"createMiner": &exec.FunctionSignature{
Params: []abi.Type{abi.Integer, abi.Bytes, abi.PeerID},
Return: []abi.Type{abi.Address},
},
"updatePower": &exec.FunctionSignature{
Params: []abi.Type{abi.Integer},
Return: nil,
},
"getTotalStorage": &exec.FunctionSignature{
Params: []abi.Type{},
Return: []abi.Type{abi.Integer},
},
}
// CreateMiner creates a new miner with the a pledge of the given amount of sectors. The
// miners collateral is set by the value in the message.
func (sma *Actor) CreateMiner(vmctx exec.VMContext, pledge *big.Int, publicKey []byte, pid peer.ID) (address.Address, uint8, error) {
if err := vmctx.Charge(actor.DefaultGasCost); err != nil {
return address.Undef, exec.ErrInsufficientGas, errors.RevertErrorWrap(err, "Insufficient gas")
}
var state State
ret, err := actor.WithState(vmctx, &state, func() (interface{}, error) {
if pledge.Cmp(MinimumPledge) < 0 {
// TODO This should probably return a non-zero exit code instead of an error.
return nil, Errors[ErrPledgeTooLow]
}
addr, err := vmctx.AddressForNewActor()
if err != nil {
return nil, errors.FaultErrorWrap(err, "could not get address for new actor")
}
if vmctx.Message().Value.LessThan(MinimumCollateral(pledge)) {
return nil, Errors[ErrInsufficientCollateral]
}
minerInitializationParams := miner.NewState(vmctx.Message().From, publicKey, pledge, pid, vmctx.Message().Value)
actorCodeCid := types.MinerActorCodeCid
if vmctx.BlockHeight().Equal(types.NewBlockHeight(0)) {
actorCodeCid = types.BootstrapMinerActorCodeCid
}
if err := vmctx.CreateNewActor(addr, actorCodeCid, minerInitializationParams); err != nil {
return nil, err
}
_, _, err = vmctx.Send(addr, "", vmctx.Message().Value, nil)
if err != nil {
return nil, err
}
ctx := context.Background()
state.Miners, err = actor.SetKeyValue(ctx, vmctx.Storage(), state.Miners, addr.String(), true)
if err != nil {
return nil, errors.FaultErrorWrapf(err, "could not set miner key value for lookup with CID: %s", state.Miners)
}
return addr, nil
})
if err != nil {
return address.Undef, errors.CodeError(err), err
}
return ret.(address.Address), 0, nil
}
// UpdatePower is called to reflect a change in the overall power of the network.
// This occurs either when a miner adds a new commitment, or when one is removed
// (via slashing or willful removal). The delta is in number of sectors.
func (sma *Actor) UpdatePower(vmctx exec.VMContext, delta *big.Int) (uint8, error) {
if err := vmctx.Charge(actor.DefaultGasCost); err != nil {
return exec.ErrInsufficientGas, errors.RevertErrorWrap(err, "Insufficient gas")
}
var state State
_, err := actor.WithState(vmctx, &state, func() (interface{}, error) {
miner := vmctx.Message().From
ctx := context.Background()
miners, err := actor.LoadLookup(ctx, vmctx.Storage(), state.Miners)
if err != nil {
return nil, errors.FaultErrorWrapf(err, "could not load lookup for miner with CID: %s", state.Miners)
}
_, err = miners.Find(ctx, miner.String())
if err != nil {
if err == hamt.ErrNotFound {
return nil, Errors[ErrUnknownMiner]
}
return nil, errors.FaultErrorWrapf(err, "could not load lookup for miner with address: %s", miner)
}
state.TotalCommittedStorage = state.TotalCommittedStorage.Add(state.TotalCommittedStorage, delta)
return nil, nil
})
if err != nil {
return errors.CodeError(err), err
}
return 0, nil
}
// GetTotalStorage returns the total amount of proven storage in the system.
func (sma *Actor) GetTotalStorage(vmctx exec.VMContext) (*big.Int, uint8, error) {
if err := vmctx.Charge(actor.DefaultGasCost); err != nil {
return nil, exec.ErrInsufficientGas, errors.RevertErrorWrap(err, "Insufficient gas")
}
var state State
ret, err := actor.WithState(vmctx, &state, func() (interface{}, error) {
return state.TotalCommittedStorage, nil
})
if err != nil {
return nil, errors.CodeError(err), err
}
count, ok := ret.(*big.Int)
if !ok {
return nil, 1, fmt.Errorf("expected *big.Int to be returned, but got %T instead", ret)
}
return count, 0, nil
}
// MinimumCollateral returns the minimum required amount of collateral for a given pledge
func MinimumCollateral(sectors *big.Int) *types.AttoFIL {
return MinimumCollateralPerSector.MulBigInt(sectors)
}
| 1 | 18,395 | BLOCKING: This is a problem. We shouldn't be introducing new dependencies on proofs into actors. The miner has some dependencies that should be removed (#2555). This could be accomplished either by moving `proofs.Mode` to `types.ProofsMode` or by forgoing a new type altogether and replacing it with a boolean. The later would have the additional advantage of reducing the number of types we have to export through ABI. I get why `proofs.Mode` has value as a type over a boolean, but I'm not sure it's worth it here. | filecoin-project-venus | go |
@@ -74,7 +74,9 @@ func verifyReceiveMessage(r *request.Request) {
for _, msg := range out.Messages {
err := checksumsMatch(msg.Body, msg.MD5OfBody)
if err != nil {
- ids = append(ids, *msg.MessageId)
+ if msg.MessageId != nil {
+ ids = append(ids, *msg.MessageId)
+ }
}
}
if len(ids) > 0 { | 1 | package sqs
import (
"crypto/md5"
"encoding/hex"
"fmt"
"strings"
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/aws/awserr"
"github.com/aws/aws-sdk-go/aws/request"
)
var (
errChecksumMissingBody = fmt.Errorf("cannot compute checksum. missing body")
errChecksumMissingMD5 = fmt.Errorf("cannot verify checksum. missing response MD5")
)
func setupChecksumValidation(r *request.Request) {
if aws.BoolValue(r.Config.DisableComputeChecksums) {
return
}
switch r.Operation.Name {
case opSendMessage:
r.Handlers.Unmarshal.PushBack(verifySendMessage)
case opSendMessageBatch:
r.Handlers.Unmarshal.PushBack(verifySendMessageBatch)
case opReceiveMessage:
r.Handlers.Unmarshal.PushBack(verifyReceiveMessage)
}
}
func verifySendMessage(r *request.Request) {
if r.DataFilled() && r.ParamsFilled() {
in := r.Params.(*SendMessageInput)
out := r.Data.(*SendMessageOutput)
err := checksumsMatch(in.MessageBody, out.MD5OfMessageBody)
if err != nil {
setChecksumError(r, err.Error())
}
}
}
func verifySendMessageBatch(r *request.Request) {
if r.DataFilled() && r.ParamsFilled() {
entries := map[string]*SendMessageBatchResultEntry{}
ids := []string{}
out := r.Data.(*SendMessageBatchOutput)
for _, entry := range out.Successful {
entries[*entry.Id] = entry
}
in := r.Params.(*SendMessageBatchInput)
for _, entry := range in.Entries {
if e := entries[*entry.Id]; e != nil {
err := checksumsMatch(entry.MessageBody, e.MD5OfMessageBody)
if err != nil {
ids = append(ids, *e.MessageId)
}
}
}
if len(ids) > 0 {
setChecksumError(r, "invalid messages: %s", strings.Join(ids, ", "))
}
}
}
func verifyReceiveMessage(r *request.Request) {
if r.DataFilled() && r.ParamsFilled() {
ids := []string{}
out := r.Data.(*ReceiveMessageOutput)
for _, msg := range out.Messages {
err := checksumsMatch(msg.Body, msg.MD5OfBody)
if err != nil {
ids = append(ids, *msg.MessageId)
}
}
if len(ids) > 0 {
setChecksumError(r, "invalid messages: %s", strings.Join(ids, ", "))
}
}
}
func checksumsMatch(body, expectedMD5 *string) error {
if body == nil {
return errChecksumMissingBody
} else if expectedMD5 == nil {
return errChecksumMissingMD5
}
msum := md5.Sum([]byte(*body))
sum := hex.EncodeToString(msum[:])
if sum != *expectedMD5 {
return fmt.Errorf("expected MD5 checksum '%s', got '%s'", *expectedMD5, sum)
}
return nil
}
func setChecksumError(r *request.Request, format string, args ...interface{}) {
r.Retryable = aws.Bool(true)
r.Error = awserr.New("InvalidChecksum", fmt.Sprintf(format, args...), nil)
}
| 1 | 8,000 | We probably want to log the fact that a `MessageId` was not present in the response. In addition the `RequestID` from `request.Request` should be included in the message so that the user is aware of the issue. | aws-aws-sdk-go | go |
@@ -38,10 +38,11 @@ apiFetchMock.mockImplementation( ( ...args ) => {
const setupRegistry = ( { dispatch } ) => {
const { id, webPropertyId, accountId } = fixtures.propertiesProfiles.profiles[ 0 ];
+ const propertyID = webPropertyId;
dispatch( STORE_NAME ).setAccountID( accountId );
dispatch( STORE_NAME ).setPropertyID( webPropertyId );
dispatch( STORE_NAME ).setProfileID( id );
- dispatch( STORE_NAME ).receiveProfiles( fixtures.propertiesProfiles.profiles );
+ dispatch( STORE_NAME ).receiveProfiles( fixtures.propertiesProfiles.profiles, { propertyID } );
};
const setupRegistryWithExistingTag = ( { dispatch } ) => { | 1 | /**
* Profile Select component tests.
*
* Site Kit by Google, Copyright 2020 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* WordPress dependencies
*/
import apiFetchMock from '@wordpress/api-fetch';
/**
* Internal dependencies
*/
import ProfileSelect from './profile-select';
import { STORE_NAME, PROFILE_CREATE } from '../datastore/constants';
import * as fixtures from '../datastore/__fixtures__';
import { fireEvent, render, act } from '../../../../../tests/js/test-utils';
// Mock apiFetch so we know if it's called.
jest.mock( '@wordpress/api-fetch' );
apiFetchMock.mockImplementation( ( ...args ) => {
// eslint-disable-next-line no-console
console.warn( 'apiFetch', ...args );
} );
const setupRegistry = ( { dispatch } ) => {
const { id, webPropertyId, accountId } = fixtures.propertiesProfiles.profiles[ 0 ];
dispatch( STORE_NAME ).setAccountID( accountId );
dispatch( STORE_NAME ).setPropertyID( webPropertyId );
dispatch( STORE_NAME ).setProfileID( id );
dispatch( STORE_NAME ).receiveProfiles( fixtures.propertiesProfiles.profiles );
};
const setupRegistryWithExistingTag = ( { dispatch } ) => {
const existingTag = {
accountID: fixtures.accountsPropertiesProfiles.profiles[ 0 ].accountId,
propertyID: fixtures.accountsPropertiesProfiles.profiles[ 0 ].webPropertyId,
};
const { id } = fixtures.propertiesProfiles.profiles[ 0 ];
dispatch( STORE_NAME ).setAccountID( existingTag.accountID );
dispatch( STORE_NAME ).setPropertyID( existingTag.propertyID );
dispatch( STORE_NAME ).setProfileID( id );
dispatch( STORE_NAME ).receiveProfiles( fixtures.accountsPropertiesProfiles.profiles );
dispatch( STORE_NAME ).receiveExistingTag( existingTag.propertyID );
};
const setupEmptyRegistry = ( { dispatch } ) => {
dispatch( STORE_NAME ).setSettings( {} );
dispatch( STORE_NAME ).receiveProfiles( [] );
};
describe( 'ProfileSelect', () => {
afterEach( () => apiFetchMock.mockClear() );
afterAll( () => jest.restoreAllMocks() );
it( 'should render an option for each analytics profile of the currently selected account and property.', async () => {
const { getAllByRole } = render( <ProfileSelect />, { setupRegistry } );
const listItems = getAllByRole( 'menuitem', { hidden: true } );
// Note: we do length + 1 here because there should also be an item for
// "Set up a new property".
expect( listItems ).toHaveLength( fixtures.accountsPropertiesProfiles.profiles.length + 1 );
} );
it( 'should display profile options of an existing account when present, and not be disabled.', async () => {
const { container, getAllByRole, registry } = render( <ProfileSelect />, { setupRegistry: setupRegistryWithExistingTag } );
const currentPropertyID = registry.select( STORE_NAME ).getPropertyID();
const existingTagPropertyID = registry.select( STORE_NAME ).getExistingTag();
expect( existingTagPropertyID ).toEqual( currentPropertyID );
const existingTagProfiles = fixtures.accountsPropertiesProfiles.profiles
.filter( ( { webPropertyId } ) => webPropertyId === existingTagPropertyID );
const listItems = getAllByRole( 'menuitem', { hidden: true } );
expect( listItems ).toHaveLength( existingTagProfiles.length + 1 );
const selectedText = container.querySelector( '.mdc-select__selected-text' );
expect( selectedText ).toHaveAttribute( 'aria-disabled', 'false' );
expect( container.querySelector( '.googlesitekit-analytics__select-profile' ) )
.not.toHaveClass( 'mdc-select--disabled' );
expect( apiFetchMock ).not.toHaveBeenCalled();
} );
it( 'should be disabled when in the absence of an valid account or property ID.', async () => {
const { container, registry } = render( <ProfileSelect />, { setupRegistry } );
const validAccountID = registry.select( STORE_NAME ).getAccountID();
// A valid accountID is provided, so ensure it is not currently disabled.
expect( container.querySelector( '.googlesitekit-analytics__select-profile' ) )
.not.toHaveClass( 'mdc-select--disabled' );
await act( () => registry.dispatch( STORE_NAME ).setAccountID( '0' ) );
// An empty accountID is invalid, so ensure the select IS currently disabled.
expect( container.querySelector( '.googlesitekit-analytics__select-profile' ) )
.toHaveClass( 'mdc-select--disabled' );
await act( () => registry.dispatch( STORE_NAME ).setAccountID( validAccountID ) );
await act( () => registry.dispatch( STORE_NAME ).setPropertyID( '0' ) );
// The accountID is valid, but an empty propertyID is invalid, so ensure the select IS currently disabled.
expect( container.querySelector( '.googlesitekit-analytics__select-profile' ) )
.toHaveClass( 'mdc-select--disabled' );
} );
it( 'should render a select box with only an option to create a new property if no properties are available.', async () => {
const { getAllByRole } = render( <ProfileSelect />, { setupRegistry: setupEmptyRegistry } );
const listItems = getAllByRole( 'menuitem', { hidden: true } );
expect( listItems ).toHaveLength( 1 );
expect( listItems[ 0 ].textContent ).toMatch( /set up a new profile/i );
} );
it( 'should update profileID in the store when a new item is selected', async () => {
const { getByText, container, registry } = render( <ProfileSelect />, { setupRegistry } );
const originalProfileID = registry.select( STORE_NAME ).getProfileID();
// Click the label to expose the elements in the menu.
fireEvent.click( container.querySelector( '.mdc-floating-label' ) );
// Click this element to select it and fire the onChange event.
fireEvent.click( getByText( /set up a new profile/i ) );
const newProfileID = registry.select( STORE_NAME ).getProfileID();
expect( originalProfileID ).not.toEqual( newProfileID );
expect( newProfileID ).toEqual( PROFILE_CREATE );
} );
} );
| 1 | 28,514 | It feels a bit strange to re-assign that here, as that line itself has no context on why it's reassigned. I'd prefer if we could pass `{ propertyID: webPropertyId }` below instead (that's how we do that elsewhere too). This also applies to other similar re-assignments below - let's rather pass the values within the objects as applicable, since then there is more clear context on why this "rename" / "reassignment" is necessary (because of an object expecting specific properties). | google-site-kit-wp | js |
@@ -172,6 +172,7 @@ module.exports = function(grunt) {
'frontend/express/public/core/events/javascripts/countly.overview.views.js',
'frontend/express/public/core/events/javascripts/countly.details.views.js',
'frontend/express/public/core/events/javascripts/countly.details.models.js',
+ 'frontend/express/public/core/events/javascripts/countly.compare.models.js',
'frontend/express/public/core/user-management/javascripts/countly.overview.models.js',
'frontend/express/public/core/user-management/javascripts/countly.overview.views.js'
], | 1 | module.exports = function(grunt) {
grunt.initConfig({
eslint: {
options: {
configFile: './.eslintrc.json'
},
target: ['./']
},
concat: {
options: {
separator: ';'
},
dom: {
src: [
'frontend/express/public/javascripts/dom/jquery/jquery.js',
'frontend/express/public/javascripts/dom/jquery.form.js',
'frontend/express/public/javascripts/dom/tipsy/jquery.tipsy.js',
'frontend/express/public/javascripts/dom/jquery.noisy.min.js',
'frontend/express/public/javascripts/dom/jquery.sticky.headers.js',
'frontend/express/public/javascripts/dom/jqueryui/jquery-ui.js',
'frontend/express/public/javascripts/dom/jqueryui/jquery-ui-i18n.js',
'frontend/express/public/javascripts/dom/slimScroll.min.js',
'frontend/express/public/javascripts/dom/jquery.easing.1.3.js',
'frontend/express/public/javascripts/dom/dataTables/js/jquery.dataTables.js',
'frontend/express/public/javascripts/dom/dataTables/js/ZeroClipboard.js',
'frontend/express/public/javascripts/dom/dataTables/js/TableTools.js',
'frontend/express/public/javascripts/dom/pace/pace.min.js',
'frontend/express/public/javascripts/dom/drop/tether.min.js',
'frontend/express/public/javascripts/dom/drop/drop.min.js'
],
dest: 'frontend/express/public/javascripts/min/countly.dom.concat.js'
},
utils: {
src: [
'frontend/express/public/javascripts/utils/polyfills.js',
'frontend/express/public/javascripts/utils/underscore-min.js',
'frontend/express/public/javascripts/utils/lodash.merge.js',
'frontend/express/public/javascripts/utils/prefixfree.min.js',
'frontend/express/public/javascripts/utils/moment/moment-with-locales.min.js',
'frontend/express/public/javascripts/utils/handlebars.js',
'frontend/express/public/javascripts/utils/backbone-min.js',
'frontend/express/public/javascripts/utils/jquery.i18n.properties.js',
'frontend/express/public/javascripts/utils/jstz.min.js',
'frontend/express/public/javascripts/utils/store+json2.min.js',
'frontend/express/public/javascripts/utils/jquery.idle-timer.js',
'frontend/express/public/javascripts/utils/textcounter.min.js',
'frontend/express/public/javascripts/utils/initialAvatar.js',
'frontend/express/public/javascripts/utils/jquery.amaran.min.js',
'frontend/express/public/javascripts/utils/jquery.titlealert.js',
'frontend/express/public/javascripts/utils/jquery.hoverIntent.minified.js',
'frontend/express/public/javascripts/utils/tooltipster/tooltipster.bundle.min.js',
'frontend/express/public/javascripts/utils/highlight/highlight.pack.js',
'frontend/express/public/javascripts/utils/dropzone.js',
'frontend/express/public/javascripts/utils/webfont.js',
'frontend/express/public/javascripts/utils/selectize.min.js',
'frontend/express/public/javascripts/utils/leaflet.js',
'frontend/express/public/javascripts/utils/polyfill/es6-promise.auto.min.js',
'frontend/express/public/javascripts/utils/polyfill/intersection-observer.js',
'frontend/express/public/javascripts/utils/vue/vue.min.js',
'frontend/express/public/javascripts/utils/vue/composition-api.min.js',
'frontend/express/public/javascripts/utils/vue/vuex.min.js',
'frontend/express/public/javascripts/utils/echarts.5.min.js',
'frontend/express/public/javascripts/utils/vue/vue-echarts.umd.min.js',
'frontend/express/public/javascripts/utils/vue/vue-color.min.js',
'frontend/express/public/javascripts/utils/vue/v-tooltip.min.js',
'frontend/express/public/javascripts/utils/vue/vee-validate.full.min.js',
'frontend/express/public/javascripts/utils/vue/vue-good-table.min.js',
'frontend/express/public/javascripts/utils/vue/vue2Dropzone.min.js',
'frontend/express/public/javascripts/utils/vue/element-ui.js',
'frontend/express/public/javascripts/utils/vue/vue2-leaflet.min.js',
'frontend/express/public/javascripts/utils/vue/inViewportMixin.js',
'frontend/express/public/javascripts/utils/vue/vuescroll.min.js',
'frontend/express/public/javascripts/utils/vue/vue-google-charts.js',
'frontend/express/public/javascripts/utils/jquery.xss.js',
'frontend/express/public/javascripts/countly/countly.common.js',
'frontend/express/public/javascripts/utils/simpleUpload.min.js',
'frontend/express/public/javascripts/utils/jsoneditor/codemirror.js',
'frontend/express/public/javascripts/utils/jsoneditor/javascript.js',
'frontend/express/public/javascripts/utils/jsoneditor/json2.js',
'frontend/express/public/javascripts/utils/jsoneditor/jsonlint.js',
'frontend/express/public/javascripts/utils/jsoneditor/minify.json.js',
'frontend/express/public/javascripts/utils/jsoneditor/jsoneditor.js',
'frontend/express/public/javascripts/utils/Sortable.min.js',
'frontend/express/public/javascripts/utils/vue/vuedraggable.umd.min.js',
'frontend/express/public/javascripts/utils/countly.checkbox.js'
],
dest: 'frontend/express/public/javascripts/min/countly.utils.concat.js'
},
visualization: {
src: [
'frontend/express/public/javascripts/visualization/jquery.peity.min.js',
'frontend/express/public/javascripts/visualization/jquery.sparkline.js',
'frontend/express/public/javascripts/visualization/flot/jquery.flot.js',
'frontend/express/public/javascripts/visualization/flot/jquery.flot.tickrotor.js',
'frontend/express/public/javascripts/visualization/flot/jquery.flot.pie.js',
'frontend/express/public/javascripts/visualization/flot/jquery.flot.resize.js',
'frontend/express/public/javascripts/visualization/flot/jquery.flot.stack.js',
'frontend/express/public/javascripts/visualization/flot/jquery.flot.spline.js',
'frontend/express/public/javascripts/visualization/flot/jquery.flot.crosshair.js',
'frontend/express/public/javascripts/visualization/flot/jquery.flot.orderBars.js',
'frontend/express/public/javascripts/visualization/flot/jquery.flot.navigate.js',
'frontend/express/public/javascripts/visualization/gauge.min.js',
'frontend/express/public/javascripts/visualization/d3/d3.min.js',
'frontend/express/public/javascripts/visualization/rickshaw/rickshaw.min.js',
'frontend/express/public/javascripts/visualization/rickshaw/rickshaw.x.axis.js'
],
dest: 'frontend/express/public/javascripts/min/countly.visualization.concat.js'
},
lib: {
src: [
'frontend/express/public/javascripts/countly/countly.auth.js',
'frontend/express/public/javascripts/countly/countly.helpers.js',
'frontend/express/public/javascripts/countly/countly.map.helper.js',
'frontend/express/public/javascripts/countly/countly.event.js',
'frontend/express/public/javascripts/countly/countly.session.js',
'frontend/express/public/javascripts/countly/countly.city.js',
'frontend/express/public/javascripts/countly/countly.location.js',
'frontend/express/public/javascripts/countly/countly.device.list.js',
'frontend/express/public/javascripts/countly/countly.device.osmapping.js',
'frontend/express/public/javascripts/countly/countly.device.js',
'frontend/express/public/javascripts/countly/countly.device.detail.js',
'frontend/express/public/javascripts/countly/countly.app.version.js',
'frontend/express/public/javascripts/countly/countly.carrier.js',
'frontend/express/public/javascripts/countly/countly.total.users.js',
'frontend/express/public/javascripts/countly/countly.task.manager.js',
'frontend/express/public/javascripts/countly/countly.app.users.js',
'frontend/express/public/javascripts/countly/countly.template.js',
'frontend/express/public/javascripts/countly/vue/core.js',
'frontend/express/public/javascripts/countly/vue/container.js',
'frontend/express/public/javascripts/countly/vue/helpers.js',
'frontend/express/public/javascripts/countly/vue/data/vuex.js',
'frontend/express/public/javascripts/countly/vue/imports.js',
'frontend/express/public/javascripts/countly/vue/components/nav.js',
'frontend/express/public/javascripts/countly/vue/components/layout.js',
'frontend/express/public/javascripts/countly/vue/components/form.js',
'frontend/express/public/javascripts/countly/vue/components/date.js',
'frontend/express/public/javascripts/countly/vue/components/dropdown.js',
'frontend/express/public/javascripts/countly/vue/components/input.js',
'frontend/express/public/javascripts/countly/vue/datatable-legacy.js',
'frontend/express/public/javascripts/countly/vue/components/datatable.js',
'frontend/express/public/javascripts/countly/vue/components/dialog.js',
'frontend/express/public/javascripts/countly/vue/components/drawer.js',
'frontend/express/public/javascripts/countly/vue/components/vis.js',
'frontend/express/public/javascripts/countly/vue/components/helpers.js',
'frontend/express/public/javascripts/countly/vue/components/sidebar.js',
'frontend/express/public/javascripts/countly/vue/components/progress.js',
'frontend/express/public/javascripts/countly/vue/directives/scroll-shadow.js',
'frontend/express/public/javascripts/countly/vue/legacy.js',
'frontend/express/public/javascripts/countly/countly.vue.legacy.js',
'frontend/express/public/javascripts/countly/countly.token.manager.js',
'frontend/express/public/javascripts/countly/countly.version.history.js',
'frontend/express/public/javascripts/countly/countly.analytics.js',
'frontend/express/public/javascripts/countly/countly.views.js',
'frontend/express/public/core/user-activity/javascripts/countly.models.js',
'frontend/express/public/core/user-activity/javascripts/countly.views.js',
'frontend/express/public/core/session-overview/javascripts/countly.models.js',
'frontend/express/public/core/session-overview/javascripts/countly.views.js',
'frontend/express/public/core/session-durations/javascripts/countly.models.js',
'frontend/express/public/core/session-durations/javascripts/countly.views.js',
'frontend/express/public/core/session-frequency/javascripts/countly.models.js',
'frontend/express/public/core/session-frequency/javascripts/countly.views.js',
'frontend/express/public/core/app-version/javascripts/countly.views.js',
'frontend/express/public/core/app-resolution/javascripts/countly.views.js',
'frontend/express/public/core/platform/javascripts/countly.views.js',
'frontend/express/public/core/browser/javascripts/countly.views.js',
'frontend/express/public/core/devices-and-types/javascripts/countly.views.js',
'frontend/express/public/core/devices-and-types/javascripts/countly.models.js',
'frontend/express/public/core/carrier/javascripts/countly.views.js',
'frontend/express/public/core/carrier/javascripts/countly.models.js',
'frontend/express/public/core/events/javascripts/countly.overview.models.js',
'frontend/express/public/core/events/javascripts/countly.overview.views.js',
'frontend/express/public/core/events/javascripts/countly.details.views.js',
'frontend/express/public/core/events/javascripts/countly.details.models.js',
'frontend/express/public/core/user-management/javascripts/countly.overview.models.js',
'frontend/express/public/core/user-management/javascripts/countly.overview.views.js'
],
dest: 'frontend/express/public/javascripts/min/countly.lib.concat.js'
}
},
uglify: {
options: {
banner: '/*! Countly <%= grunt.template.today("dd-mm-yyyy") %> */\n',
mangle: {
reserved: ["$super"]
}
},
dist: {
files: {
'frontend/express/public/javascripts/min/countly.dom.js': 'frontend/express/public/javascripts/min/countly.dom.concat.js',
'frontend/express/public/javascripts/min/countly.utils.js': 'frontend/express/public/javascripts/min/countly.utils.concat.js',
'frontend/express/public/javascripts/min/countly.visualization.js': 'frontend/express/public/javascripts/min/countly.visualization.concat.js',
'frontend/express/public/javascripts/min/countly.lib.js': 'frontend/express/public/javascripts/min/countly.lib.concat.js'
}
}
},
copy: {},
cssmin: {
dist: {
files: {
'frontend/express/public/stylesheets/main.min.css': [
'frontend/express/public/stylesheets/main.css',
'frontend/express/public/stylesheets/vue/element-ui.css',
'frontend/express/public/stylesheets/vue/clyvue.css',
'frontend/express/public/stylesheets/amaranjs/amaran.min.css',
'frontend/express/public/stylesheets/selectize/selectize.css',
'frontend/express/public/stylesheets/leaflet/leaflet.css',
'frontend/express/public/stylesheets/jsoneditor/codemirror.css',
'frontend/express/public/stylesheets/countly-checkbox/countly.checkbox.css',
'frontend/express/public/javascripts/dom/tipsy/tipsy.css',
'frontend/express/public/javascripts/visualization/rickshaw/rickshaw.min.css',
'frontend/express/public/javascripts/dom/pace/pace-theme-flash.css',
'frontend/express/public/javascripts/dom/drop/drop-theme-countly.min.css',
'frontend/express/public/javascripts/utils/tooltipster/tooltipster.bundle.min.css',
'frontend/express/public/stylesheets/bulma/bulma-custom.css',
'frontend/express/public/stylesheets/styles/manifest.css',
]
}
}
},
mochaTest: {
test: {
options: {
reporter: 'spec',
timeout: 50000
},
src: ['test/*/*.js']
}
},
mocha_nyc: {
coverage: {
src: ['test/*/*.js'], // a folder works nicely
options: {
coverage: true, // this will make the grunt.event.on('coverage') event listener to be triggered
mask: '*.js',
excludes: ['bin/*', 'frontend/*', 'extend/*', 'Gruntfile.js', 'test/*'],
mochaOptions: ['--harmony', '--async-only', '--reporter', 'spec', '--timeout', '50000', '--exit'],
nycOptions: ['--harmony', '--clean', 'false'], //,'--include-all-sources' '--all'
reportFormats: ['none']
}
}
},
istanbul_check_coverage: {
default: {
options: {
coverageFolder: 'coverage*', // will check both coverage folders and merge the coverage results
check: {
lines: 80,
statements: 80
}
}
}
},
sass: { // Task
dist: { // Target
options: { // Target options
style: 'compressed',
update: false // only compile when scss file is newer than css file
},
files: [
{ src: 'frontend/express/public/stylesheets/vue/clyvue.scss', dest: 'frontend/express/public/stylesheets/vue/clyvue.css' },
{ src: 'frontend/express/public/stylesheets/styles/manifest.scss', dest: 'frontend/express/public/stylesheets/styles/manifest.css' },
{ expand: true, src: ['plugins/*/frontend/public/stylesheets/**/*.scss'], ext: '.css', extDot: 'first' }
]
}
},
watch: {
scripts: {
files: ['plugins/*/frontend/public/stylesheets/**/*.scss',
"frontend/express/public/core/*/stylesheets/**/*.scss",
"frontend/express/public/stylesheets/styles/**/*.scss"
],
tasks: ['sass']
},
}
});
//code coverage
grunt.event.on('coverage', function(lcovFileContents, done) {
// Check below on the section "The coverage event"
done();
});
grunt.loadNpmTasks('grunt-mocha-nyc');
grunt.registerTask('coverage', ['mocha_nyc:coverage']);
//-----------code coverage-----------
grunt.loadNpmTasks('grunt-contrib-sass');
grunt.loadNpmTasks('grunt-contrib-concat');
grunt.loadNpmTasks('grunt-contrib-uglify');
grunt.loadNpmTasks('grunt-contrib-copy');
grunt.loadNpmTasks('grunt-contrib-cssmin');
grunt.loadNpmTasks('grunt-contrib-watch');
grunt.loadNpmTasks('grunt-eslint');
grunt.loadNpmTasks('grunt-mocha-test');
grunt.registerTask('default', ['mochaTest']);
grunt.registerTask('dist', ['sass', 'concat', 'uglify', 'cssmin']);
grunt.registerTask('plugins', 'Minify plugin JS / CSS files and copy images', function() {
var plugins = require('./plugins/plugins.json'), js = [], css = [], img = [], fs = require('fs'), path = require('path');
console.log('Preparing production files for following plugins: %j', plugins);
if (plugins.indexOf('push') !== -1) {
if (plugins.indexOf('geo') !== -1) {
plugins.splice(plugins.indexOf('geo'), 1);
plugins.push('geo');
}
if (plugins.indexOf('push_approver') !== -1) {
plugins.splice(plugins.indexOf('push_approver'), 1);
plugins.push('push_approver');
}
}
if (plugins.indexOf('drill') !== -1) {
if (plugins.indexOf('cohorts') !== -1) {
plugins.splice(plugins.indexOf('cohorts'), 1);
plugins.push('cohorts');
}
if (plugins.indexOf('funnels') !== -1) {
plugins.splice(plugins.indexOf('funnels'), 1);
plugins.push('funnels');
}
if (plugins.indexOf('formulas') !== -1) {
plugins.splice(plugins.indexOf('formulas'), 1);
plugins.push('formulas');
}
}
plugins.forEach(function(plugin) {
var files, pluginPath = path.join(__dirname, 'plugins', plugin),
javascripts = path.join(pluginPath, 'frontend/public/javascripts'),
stylesheets = path.join(pluginPath, 'frontend/public/stylesheets'),
images = path.join(pluginPath, 'frontend/public/images', plugin);
if (fs.existsSync(javascripts) && fs.statSync(javascripts).isDirectory()) {
files = fs.readdirSync(javascripts);
if (files.length) {
// move models to the top, then all dependencies, then views
for (var i = 0; i < files.length; i++) {
if (files[i].indexOf('countly.models.js') !== -1 && i !== 0) {
files.splice(0, 0, files.splice(i, 1)[0]);
}
else if (files[i].indexOf('countly.views.js') !== -1 && i !== files.length - 1) {
files.splice(files.length - 1, 0, files.splice(i, 1)[0]);
}
}
files.forEach(function(name) {
var file = path.join(javascripts, name);
if (fs.statSync(file).isFile() && name.indexOf('.') !== 0 && name.endsWith('.js')) {
js.push('plugins/' + plugin + '/frontend/public/javascripts/' + name);
}
});
}
}
if (fs.existsSync(stylesheets) && fs.statSync(stylesheets).isDirectory()) {
files = fs.readdirSync(stylesheets);
files.forEach(function(name) {
var file = path.join(stylesheets, name);
if (fs.statSync(file).isFile() && name !== 'pre-login.css' && name.indexOf('.') !== 0 && name.endsWith('.css')) {
css.push('plugins/' + plugin + '/frontend/public/stylesheets/' + name);
}
});
}
try {
if (fs.existsSync(images) && fs.statSync(images).isDirectory()) {
img.push({ expand: true, cwd: 'plugins/' + plugin + '/frontend/public/images/' + plugin + '/', filter: 'isFile', src: '**', dest: 'frontend/express/public/images/' + plugin + '/' });
}
}
catch (err) {
if (err.code !== 'ENOENT') {
throw err;
}
}
});
grunt.config('copy.plugins.files', img);
grunt.config('concat.plugins.src', js);
grunt.config('concat.plugins.dest', 'frontend/express/public/javascripts/min/countly.plugins.concat.js');
grunt.config('uglify.plugins.files.frontend/express/public/javascripts/min/countly\\.plugins\\.js', 'frontend/express/public/javascripts/min/countly.plugins.concat.js');
grunt.config('cssmin.plugins.files.frontend/express/public/stylesheets/plugins\\.min\\.css', css);
// grunt.task.loadTasks(['copy:plugins', 'concat:plugins', 'uglify:plugins']);
// grunt.task.run(['concat', 'uglify']);
grunt.task.run(['concat:plugins', 'uglify:plugins', 'copy:plugins', 'sass', 'cssmin:plugins']);
console.log('Done preparing production files');
});
grunt.registerTask('locales', 'Concat all locale files into one', function() {
var plugins = require('./plugins/plugins.json'), locales = {}, fs = require('fs'), path = require('path');
console.log('Preparing locale files for core & plugins: %j', plugins);
var pushLocaleFile = function(name, path) {
var lang = '';
name = name.replace('.properties', '');
if (name.indexOf('_') !== -1) {
lang = name.split('_').pop();
if (lang.length > 3 || lang === "en") {
lang = '';
}
}
if (!locales[lang]) {
locales[lang] = [];
}
locales[lang].push(path);
};
function processLocaleDir(dir) {
if (!fs.existsSync(dir)) {
return;
}
fs.readdirSync(dir).forEach(function(name) {
var file = path.join(dir, name);
if (fs.statSync(file).isFile() && name.indexOf('.') !== 0) {
pushLocaleFile(name, dir + '/' + name);
}
});
}
[path.join(__dirname, 'frontend/express/public/localization/dashboard'), path.join(__dirname, 'frontend/express/public/localization/help'), path.join(__dirname, 'frontend/express/public/localization/mail')].forEach(processLocaleDir);
plugins.forEach(function(plugin) {
var localization = path.join(__dirname, 'plugins', plugin, 'frontend/public/localization');
try {
if (fs.statSync(localization).isDirectory()) {
fs.readdirSync(localization).forEach(function(name) {
var file = path.join(localization, name);
if (fs.statSync(file).isFile() && name.indexOf('.') !== 0) {
pushLocaleFile(name, 'plugins/' + plugin + '/frontend/public/localization/' + name);
}
});
}
}
catch (err) {
if (err.code !== 'ENOENT') {
throw err;
}
}
});
processLocaleDir(path.join(__dirname, 'frontend/express/public/localization/custom'));
for (var lang in locales) {
grunt.config('concat.locales_' + lang + '.options.separator', '\n\n');
grunt.config('concat.locales_' + lang + '.src', locales[lang]);
grunt.config('concat.locales_' + lang + '.dest', 'frontend/express/public/localization/min/locale' + (lang.length ? '_' + lang : '') + '.properties');
grunt.task.run('concat:locales_' + lang);
}
console.log('Done preparing locale files');
});
grunt.registerTask('dist-all', ['dist', 'plugins', 'locales']);
}; | 1 | 14,257 | remove this in the new pr aswell when moving compare to plugins as discussed. | Countly-countly-server | js |
@@ -210,10 +210,16 @@ Status YieldClauseWrapper::prepare(
yieldColsHolder_ = std::make_unique<YieldColumns>();
for (auto *col : cols) {
if (col->expr()->isInputExpression()) {
+ if (inputs == nullptr) {
+ return Status::Error("Inputs nullptr.");
+ }
if (needAllPropsFromInput(col, inputs, yields)) {
continue;
}
} else if (col->expr()->isVariableExpression()) {
+ if (varHolder == nullptr) {
+ return Status::Error("VarHolder nullptr.");
+ }
auto ret = needAllPropsFromVar(col, varHolder, yields);
if (!ret.ok()) {
return std::move(ret).status(); | 1 | /* Copyright (c) 2018 vesoft inc. All rights reserved.
*
* This source code is licensed under Apache 2.0 License,
* attached with Common Clause Condition 1.0, found in the LICENSES directory.
*/
#include "base/Base.h"
#include "graph/TraverseExecutor.h"
#include "parser/TraverseSentences.h"
#include "dataman/RowReader.h"
#include "dataman/RowWriter.h"
#include "graph/GoExecutor.h"
#include "graph/PipeExecutor.h"
#include "graph/OrderByExecutor.h"
#include "graph/FetchVerticesExecutor.h"
#include "graph/FetchEdgesExecutor.h"
#include "dataman/RowReader.h"
#include "dataman/RowWriter.h"
#include "graph/SetExecutor.h"
#include "graph/FindExecutor.h"
#include "graph/MatchExecutor.h"
#include "graph/FindPathExecutor.h"
#include "graph/LimitExecutor.h"
#include "graph/YieldExecutor.h"
#include "graph/GroupByExecutor.h"
namespace nebula {
namespace graph {
std::unique_ptr<TraverseExecutor> TraverseExecutor::makeTraverseExecutor(Sentence *sentence) {
return makeTraverseExecutor(sentence, ectx());
}
// static
std::unique_ptr<TraverseExecutor>
TraverseExecutor::makeTraverseExecutor(Sentence *sentence, ExecutionContext *ectx) {
auto kind = sentence->kind();
std::unique_ptr<TraverseExecutor> executor;
switch (kind) {
case Sentence::Kind::kGo:
executor = std::make_unique<GoExecutor>(sentence, ectx);
break;
case Sentence::Kind::kPipe:
executor = std::make_unique<PipeExecutor>(sentence, ectx);
break;
case Sentence::Kind::kOrderBy:
executor = std::make_unique<OrderByExecutor>(sentence, ectx);
break;
case Sentence::Kind::kFetchVertices:
executor = std::make_unique<FetchVerticesExecutor>(sentence, ectx);
break;
case Sentence::Kind::kFetchEdges:
executor = std::make_unique<FetchEdgesExecutor>(sentence, ectx);
break;
case Sentence::Kind::kSet:
executor = std::make_unique<SetExecutor>(sentence, ectx);
break;
case Sentence::Kind::kMatch:
executor = std::make_unique<MatchExecutor>(sentence, ectx);
break;
case Sentence::Kind::kFind:
executor = std::make_unique<FindExecutor>(sentence, ectx);
break;
case Sentence::Kind::kYield:
executor = std::make_unique<YieldExecutor>(sentence, ectx);
break;
case Sentence::Kind::kFindPath:
executor = std::make_unique<FindPathExecutor>(sentence, ectx);
break;
case Sentence::Kind::kLimit:
executor = std::make_unique<LimitExecutor>(sentence, ectx);
break;
case Sentence::Kind::KGroupBy:
executor = std::make_unique<GroupByExecutor>(sentence, ectx);
break;
case Sentence::Kind::kUnknown:
LOG(FATAL) << "Sentence kind unknown";
break;
default:
LOG(FATAL) << "Sentence kind illegal: " << kind;
break;
}
return executor;
}
Status Collector::collect(VariantType &var, RowWriter *writer) {
switch (var.which()) {
case VAR_INT64:
(*writer) << boost::get<int64_t>(var);
break;
case VAR_DOUBLE:
(*writer) << boost::get<double>(var);
break;
case VAR_BOOL:
(*writer) << boost::get<bool>(var);
break;
case VAR_STR:
(*writer) << boost::get<std::string>(var);
break;
default:
std::string errMsg =
folly::stringPrintf("Unknown VariantType: %d", var.which());
LOG(ERROR) << errMsg;
return Status::Error(errMsg);
}
return Status::OK();
}
OptVariantType Collector::getProp(const meta::SchemaProviderIf *schema,
const std::string &prop,
const RowReader *reader) {
DCHECK(reader != nullptr);
DCHECK(schema != nullptr);
using nebula::cpp2::SupportedType;
auto type = schema->getFieldType(prop).type;
switch (type) {
case SupportedType::BOOL: {
bool v;
reader->getBool(prop, v);
VLOG(3) << "get prop: " << prop << ", value: " << v;
return v;
}
case SupportedType::TIMESTAMP:
case SupportedType::INT: {
int64_t v;
reader->getInt(prop, v);
VLOG(3) << "get prop: " << prop << ", value: " << v;
return v;
}
case SupportedType::VID: {
VertexID v;
reader->getVid(prop, v);
VLOG(3) << "get prop: " << prop << ", value: " << v;
return v;
}
case SupportedType::FLOAT: {
float v;
reader->getFloat(prop, v);
VLOG(3) << "get prop: " << prop << ", value: " << v;
return static_cast<double>(v);
}
case SupportedType::DOUBLE: {
double v;
reader->getDouble(prop, v);
VLOG(3) << "get prop: " << prop << ", value: " << v;
return v;
}
case SupportedType::STRING: {
folly::StringPiece v;
reader->getString(prop, v);
VLOG(3) << "get prop: " << prop << ", value: " << v;
return v.toString();
}
default:
std::string errMsg =
folly::stringPrintf("Unknown type: %d", static_cast<int32_t>(type));
LOG(ERROR) << errMsg;
return Status::Error(errMsg);
}
}
Status Collector::getSchema(const std::vector<VariantType> &vals,
const std::vector<std::string> &colNames,
const std::vector<nebula::cpp2::SupportedType> &colTypes,
SchemaWriter *outputSchema) {
DCHECK(outputSchema != nullptr);
DCHECK_EQ(vals.size(), colNames.size());
DCHECK_EQ(vals.size(), colTypes.size());
using nebula::cpp2::SupportedType;
auto index = 0u;
for (auto &it : colTypes) {
SupportedType type;
if (it == SupportedType::UNKNOWN) {
switch (vals[index].which()) {
case VAR_INT64:
// all integers in InterimResult are regarded as type of INT
type = SupportedType::INT;
break;
case VAR_DOUBLE:
type = SupportedType::DOUBLE;
break;
case VAR_BOOL:
type = SupportedType::BOOL;
break;
case VAR_STR:
type = SupportedType::STRING;
break;
default:
std::string errMsg =
folly::stringPrintf("Unknown VariantType: %d", vals[index].which());
LOG(ERROR) << errMsg;
return Status::Error(errMsg);
}
} else {
type = it;
}
outputSchema->appendCol(colNames[index], type);
index++;
}
return Status::OK();
}
Status YieldClauseWrapper::prepare(
const InterimResult *inputs,
const VariableHolder *varHolder,
std::vector<YieldColumn*> &yields) {
auto cols = clause_->columns();
yieldColsHolder_ = std::make_unique<YieldColumns>();
for (auto *col : cols) {
if (col->expr()->isInputExpression()) {
if (needAllPropsFromInput(col, inputs, yields)) {
continue;
}
} else if (col->expr()->isVariableExpression()) {
auto ret = needAllPropsFromVar(col, varHolder, yields);
if (!ret.ok()) {
return std::move(ret).status();
}
if (ret.value()) {
continue;
}
}
yields.emplace_back(col);
}
return Status::OK();
}
bool YieldClauseWrapper::needAllPropsFromInput(const YieldColumn *col,
const InterimResult *inputs,
std::vector<YieldColumn*> &yields) {
auto *inputExpr = static_cast<InputPropertyExpression*>(col->expr());
auto *colName = inputExpr->prop();
if (*colName == "*") {
if (inputs != nullptr) {
auto schema = inputs->schema();
auto iter = schema->begin();
while (iter) {
auto *prop = iter->getName();
Expression *expr = new InputPropertyExpression(new std::string(prop));
YieldColumn *column = new YieldColumn(expr);
yieldColsHolder_->addColumn(column);
yields.emplace_back(column);
++iter;
}
}
return true;
}
return false;
}
StatusOr<bool> YieldClauseWrapper::needAllPropsFromVar(
const YieldColumn *col,
const VariableHolder *varHolder,
std::vector<YieldColumn*> &yields) {
auto *variableExpr = static_cast<VariablePropertyExpression*>(col->expr());
auto *colName = variableExpr->prop();
bool existing = false;
auto *varname = variableExpr->alias();
auto varInputs = varHolder->get(*varname, &existing);
if (varInputs == nullptr && !existing) {
return Status::Error("Variable `%s' not defined.", varname->c_str());
}
if (*colName == "*") {
auto schema = varInputs->schema();
auto iter = schema->begin();
while (iter) {
auto *alias = new std::string(*(variableExpr->alias()));
auto *prop = iter->getName();
Expression *expr =
new VariablePropertyExpression(alias, new std::string(prop));
YieldColumn *column = new YieldColumn(expr);
yieldColsHolder_->addColumn(column);
yields.emplace_back(column);
++iter;
}
return true;
}
return false;
}
} // namespace graph
} // namespace nebula
| 1 | 25,028 | check varHolder is nullptr is redundant, ExecutionContext ensure | vesoft-inc-nebula | cpp |
@@ -0,0 +1,8 @@
+module UnsubscribesHelper
+ def unsubscribe_token_verifier
+ @_unsubscribe_token_verifier ||= ActiveSupport::MessageVerifier.new(
+ ENV.fetch("UNSUBSCRIBE_SECRET_BASE"),
+ digest: "SHA256".freeze,
+ )
+ end
+end | 1 | 1 | 17,535 | Make these frozen constants? | thoughtbot-upcase | rb |
|
@@ -55,8 +55,9 @@ def browseableMessage(message,title=None,isHtml=False):
if not title:
# Translators: The title for the dialog used to present general NVDA messages in browse mode.
title = _("NVDA Message")
- isHtmlArgument = "true" if isHtml else "false"
- dialogString = u"{isHtml};{title};{message}".format( isHtml = isHtmlArgument , title=title , message=message )
+ if not isHtml:
+ message = f"<pre>{message.replace('<', '<')}</pre>"
+ dialogString = f"{title};{message}"
dialogArguements = automation.VARIANT( dialogString )
gui.mainFrame.prePopup()
windll.mshtml.ShowHTMLDialogEx( | 1 | # -*- coding: utf-8 -*-
# A part of NonVisual Desktop Access (NVDA)
# Copyright (C) 2008-2020 NV Access Limited, James Teh, Dinesh Kaushal, Davy Kager, André-Abush Clause,
# Babbage B.V., Leonard de Ruijter, Michael Curran, Accessolutions, Julien Cochuyt
# This file may be used under the terms of the GNU General Public License, version 2 or later.
# For more details see: https://www.gnu.org/licenses/gpl-2.0.html
"""User interface functionality.
This refers to the user interface presented by the screen reader alone, not the graphical user interface.
See L{gui} for the graphical user interface.
"""
import os
import sys
from ctypes import windll, byref, POINTER, addressof
from comtypes import IUnknown
from comtypes import automation
from logHandler import log
import gui
import speech
import braille
import globalVars
from typing import Optional
# From urlmon.h
URL_MK_UNIFORM = 1
# Dialog box properties
DIALOG_OPTIONS = "resizable:yes;help:no"
#dwDialogFlags for ShowHTMLDialogEx from mshtmhst.h
HTMLDLG_NOUI = 0x0010
HTMLDLG_MODAL = 0x0020
HTMLDLG_MODELESS = 0x0040
HTMLDLG_PRINT_TEMPLATE = 0x0080
HTMLDLG_VERIFY = 0x0100
def browseableMessage(message,title=None,isHtml=False):
"""Present a message to the user that can be read in browse mode.
The message will be presented in an HTML document.
@param message: The message in either html or text.
@type message: str
@param title: The title for the message.
@type title: str
@param isHtml: Whether the message is html
@type isHtml: boolean
"""
htmlFileName = os.path.join(globalVars.appDir, 'message.html')
if not os.path.isfile(htmlFileName ):
raise LookupError(htmlFileName )
moniker = POINTER(IUnknown)()
windll.urlmon.CreateURLMonikerEx(0, htmlFileName, byref(moniker), URL_MK_UNIFORM)
if not title:
# Translators: The title for the dialog used to present general NVDA messages in browse mode.
title = _("NVDA Message")
isHtmlArgument = "true" if isHtml else "false"
dialogString = u"{isHtml};{title};{message}".format( isHtml = isHtmlArgument , title=title , message=message )
dialogArguements = automation.VARIANT( dialogString )
gui.mainFrame.prePopup()
windll.mshtml.ShowHTMLDialogEx(
gui.mainFrame.Handle ,
moniker ,
HTMLDLG_MODELESS ,
addressof( dialogArguements ) ,
DIALOG_OPTIONS,
None
)
gui.mainFrame.postPopup()
def message(
text: str,
speechPriority: Optional[speech.Spri] = None,
brailleText: Optional[str] = None,
):
"""Present a message to the user.
The message will be presented in both speech and braille.
@param text: The text of the message.
@param speechPriority: The speech priority.
@param brailleText: If specified, present this alternative text on the braille display.
"""
speech.speakMessage(text, priority=speechPriority)
braille.handler.message(brailleText if brailleText is not None else text)
def reviewMessage(text: str, speechPriority: Optional[speech.Spri] = None):
"""Present a message from review or object navigation to the user.
The message will always be presented in speech, and also in braille if it is tethered to review or when auto tethering is on.
@param text: The text of the message.
@param speechPriority: The speech priority.
"""
speech.speakMessage(text, priority=speechPriority)
if braille.handler.shouldAutoTether or braille.handler.getTether() == braille.handler.TETHER_REVIEW:
braille.handler.message(text)
def reportTextCopiedToClipboard(text: Optional[str] = None):
"""Notify about the result of a "Copy to clipboard" operation.
@param text: The text that has been copied. Set to `None` to notify of a failed operation.
See: `api.copyToClip`
"""
if not text:
# Translators: Presented when unable to copy to the clipboard because of an error
# or the clipboard content did not match what was just copied.
message(_("Unable to copy"))
return
# Depending on the speech synthesizer, large amount of spoken text can freeze NVDA (#11843)
if len(text) < 1024:
spokenText = text
else:
# Translators: Spoken instead of a lengthy text when copied to clipboard.
spokenText = _("%d characters") % len(text)
message(
# Translators: Announced when a text has been copied to clipboard.
# {text} is replaced by the copied text.
text=_("Copied to clipboard: {text}").format(text=spokenText),
# Translators: Displayed in braille when a text has been copied to clipboard.
# {text} is replaced by the copied text.
brailleText=_("Copied: {text}").format(text=text)
)
| 1 | 31,586 | Please use something like html.escape() to ensure that the text is totally safe to include within html. < is not enough. | nvaccess-nvda | py |
@@ -130,7 +130,7 @@ func (s *Service) RetrieveChunk(ctx context.Context, addr swarm.Address, origin
var (
peerAttempt int
peersResults int
- resultC = make(chan retrievalResult, maxSelects)
+ resultC = make(chan retrievalResult, 2)
)
requestAttempt := 0 | 1 | // Copyright 2020 The Swarm Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
// Package retrieval provides the retrieval protocol
// implementation. The protocol is used to retrieve
// chunks over the network using forwarding-kademlia
// routing.
package retrieval
import (
"context"
"errors"
"fmt"
"strconv"
"time"
"github.com/ethersphere/bee/pkg/accounting"
"github.com/ethersphere/bee/pkg/cac"
"github.com/ethersphere/bee/pkg/logging"
"github.com/ethersphere/bee/pkg/p2p"
"github.com/ethersphere/bee/pkg/p2p/protobuf"
"github.com/ethersphere/bee/pkg/postage"
"github.com/ethersphere/bee/pkg/pricer"
pb "github.com/ethersphere/bee/pkg/retrieval/pb"
"github.com/ethersphere/bee/pkg/soc"
"github.com/ethersphere/bee/pkg/storage"
"github.com/ethersphere/bee/pkg/swarm"
"github.com/ethersphere/bee/pkg/topology"
"github.com/ethersphere/bee/pkg/tracing"
"github.com/opentracing/opentracing-go"
"resenje.org/singleflight"
)
type requestSourceContextKey struct{}
const (
protocolName = "retrieval"
protocolVersion = "1.0.0"
streamName = "retrieval"
)
var _ Interface = (*Service)(nil)
type Interface interface {
RetrieveChunk(ctx context.Context, addr swarm.Address, origin bool) (chunk swarm.Chunk, err error)
}
type retrievalResult struct {
chunk swarm.Chunk
peer swarm.Address
err error
retrieved bool
}
type Service struct {
addr swarm.Address
streamer p2p.Streamer
peerSuggester topology.EachPeerer
storer storage.Storer
singleflight singleflight.Group
logger logging.Logger
accounting accounting.Interface
metrics metrics
pricer pricer.Interface
tracer *tracing.Tracer
caching bool
validStamp postage.ValidStampFn
}
func New(addr swarm.Address, storer storage.Storer, streamer p2p.Streamer, chunkPeerer topology.EachPeerer, logger logging.Logger, accounting accounting.Interface, pricer pricer.Interface, tracer *tracing.Tracer, forwarderCaching bool, validStamp postage.ValidStampFn) *Service {
return &Service{
addr: addr,
streamer: streamer,
peerSuggester: chunkPeerer,
storer: storer,
logger: logger,
accounting: accounting,
pricer: pricer,
metrics: newMetrics(),
tracer: tracer,
caching: forwarderCaching,
validStamp: validStamp,
}
}
func (s *Service) Protocol() p2p.ProtocolSpec {
return p2p.ProtocolSpec{
Name: protocolName,
Version: protocolVersion,
StreamSpecs: []p2p.StreamSpec{
{
Name: streamName,
Handler: s.handler,
},
},
}
}
const (
retrieveChunkTimeout = 10 * time.Second
retrieveRetryIntervalDuration = 5 * time.Second
maxRequestRounds = 5
maxSelects = 8
originSuffix = "_origin"
)
func (s *Service) RetrieveChunk(ctx context.Context, addr swarm.Address, origin bool) (swarm.Chunk, error) {
s.metrics.RequestCounter.Inc()
flightRoute := addr.String()
if origin {
flightRoute = addr.String() + originSuffix
}
// topCtx is passing the tracing span to the first singleflight call
topCtx := ctx
v, _, err := s.singleflight.Do(ctx, flightRoute, func(ctx context.Context) (interface{}, error) {
maxPeers := 1
if origin {
maxPeers = maxSelects
}
sp := newSkipPeers()
ticker := time.NewTicker(retrieveRetryIntervalDuration)
defer ticker.Stop()
var (
peerAttempt int
peersResults int
resultC = make(chan retrievalResult, maxSelects)
)
requestAttempt := 0
lastTime := time.Now().Unix()
for requestAttempt < maxRequestRounds {
if peerAttempt < maxSelects {
// create a new context without cancelation but
// set the tracing span to the new context from the context of the first caller
ctx := tracing.WithContext(context.Background(), tracing.FromContext(topCtx))
// get the tracing span
span, _, ctx := s.tracer.StartSpanFromContext(ctx, "retrieve-chunk", s.logger, opentracing.Tag{Key: "address", Value: addr.String()})
defer span.Finish()
peerAttempt++
s.metrics.PeerRequestCounter.Inc()
go func() {
// cancel the goroutine just with the timeout
ctx, cancel := context.WithTimeout(ctx, retrieveChunkTimeout)
defer cancel()
chunk, peer, requested, err := s.retrieveChunk(ctx, addr, sp, origin)
select {
case resultC <- retrievalResult{
chunk: chunk,
peer: peer,
err: err,
retrieved: requested,
}:
case <-ctx.Done():
}
}()
} else {
select {
case resultC <- retrievalResult{}:
case <-ctx.Done():
}
}
select {
case <-ticker.C:
// break
case res := <-resultC:
if res.retrieved {
if res.err != nil {
if !res.peer.IsZero() {
s.logger.Debugf("retrieval: failed to get chunk %s from peer %s: %v", addr, res.peer, res.err)
}
peersResults++
} else {
return res.chunk, nil
}
}
case <-ctx.Done():
s.logger.Tracef("retrieval: failed to get chunk %s: %v", addr, ctx.Err())
return nil, fmt.Errorf("retrieval: %w", ctx.Err())
}
// all results received, only successfully attempted requests are counted
if peersResults >= maxPeers {
s.logger.Tracef("retrieval: failed to get chunk %s", addr)
return nil, storage.ErrNotFound
}
// if we have not counted enough successful attempts but out of selection amount, reset
if peerAttempt >= maxSelects {
if !origin {
return nil, storage.ErrNotFound
}
requestAttempt++
timeNow := time.Now().Unix()
if timeNow > lastTime {
lastTime = timeNow
peerAttempt = 0
sp.Reset()
} else {
select {
case <-time.After(600 * time.Millisecond):
case <-ctx.Done():
s.logger.Tracef("retrieval: failed to get chunk %s: %v", addr, ctx.Err())
return nil, fmt.Errorf("retrieval: %w", ctx.Err())
}
}
}
}
// if we have not managed to get results after 5 (maxRequestRounds) rounds of peer selections, give up
return nil, storage.ErrNotFound
})
if err != nil {
return nil, err
}
return v.(swarm.Chunk), nil
}
func (s *Service) retrieveChunk(ctx context.Context, addr swarm.Address, sp *skipPeers, originated bool) (chunk swarm.Chunk, peer swarm.Address, requested bool, err error) {
startTimer := time.Now()
v := ctx.Value(requestSourceContextKey{})
sourcePeerAddr := swarm.Address{}
// allow upstream requests if this node is the source of the request
// i.e. the request was not forwarded, to improve retrieval
// if this node is the closest to he chunk but still does not contain it
allowUpstream := true
if src, ok := v.(string); ok {
sourcePeerAddr, err = swarm.ParseHexAddress(src)
if err == nil {
sp.Add(sourcePeerAddr)
}
// do not allow upstream requests if the request was forwarded to this node
// to avoid the request loops
allowUpstream = false
}
ctx, cancel := context.WithTimeout(ctx, retrieveChunkTimeout)
defer cancel()
peer, err = s.closestPeer(addr, sp.All(), allowUpstream)
if err != nil {
return nil, peer, false, fmt.Errorf("get closest for address %s, allow upstream %v: %w", addr.String(), allowUpstream, err)
}
peerPO := swarm.Proximity(s.addr.Bytes(), peer.Bytes())
if !sourcePeerAddr.IsZero() {
// is forwarded request
sourceAddrPO := swarm.Proximity(sourcePeerAddr.Bytes(), addr.Bytes())
addrPO := swarm.Proximity(peer.Bytes(), addr.Bytes())
poGain := int(addrPO) - int(sourceAddrPO)
s.metrics.RetrieveChunkPOGainCounter.
WithLabelValues(strconv.Itoa(poGain)).
Inc()
}
// compute the peer's price for this chunk for price header
chunkPrice := s.pricer.PeerPrice(peer, addr)
// Reserve to see whether we can request the chunk
err = s.accounting.Reserve(ctx, peer, chunkPrice)
if err != nil {
sp.AddOverdraft(peer)
return nil, peer, false, err
}
defer s.accounting.Release(peer, chunkPrice)
sp.Add(peer)
s.logger.Tracef("retrieval: requesting chunk %s from peer %s", addr, peer)
stream, err := s.streamer.NewStream(ctx, peer, nil, protocolName, protocolVersion, streamName)
if err != nil {
s.metrics.TotalErrors.Inc()
return nil, peer, false, fmt.Errorf("new stream: %w", err)
}
defer func() {
if err != nil {
_ = stream.Reset()
} else {
go stream.FullClose()
}
}()
w, r := protobuf.NewWriterAndReader(stream)
if err := w.WriteMsgWithContext(ctx, &pb.Request{
Addr: addr.Bytes(),
}); err != nil {
s.metrics.TotalErrors.Inc()
return nil, peer, false, fmt.Errorf("write request: %w peer %s", err, peer.String())
}
var d pb.Delivery
if err := r.ReadMsgWithContext(ctx, &d); err != nil {
s.metrics.TotalErrors.Inc()
return nil, peer, true, fmt.Errorf("read delivery: %w peer %s", err, peer.String())
}
s.metrics.RetrieveChunkPeerPOTimer.
WithLabelValues(strconv.Itoa(int(peerPO))).
Observe(time.Since(startTimer).Seconds())
s.metrics.TotalRetrieved.Inc()
stamp := new(postage.Stamp)
err = stamp.UnmarshalBinary(d.Stamp)
if err != nil {
return nil, peer, true, fmt.Errorf("stamp unmarshal: %w", err)
}
chunk = swarm.NewChunk(addr, d.Data).WithStamp(stamp)
if !cac.Valid(chunk) {
if !soc.Valid(chunk) {
s.metrics.InvalidChunkRetrieved.Inc()
s.metrics.TotalErrors.Inc()
return nil, peer, true, swarm.ErrInvalidChunk
}
}
// credit the peer after successful delivery
err = s.accounting.Credit(peer, chunkPrice, originated)
if err != nil {
return nil, peer, true, err
}
s.metrics.ChunkPrice.Observe(float64(chunkPrice))
return chunk, peer, true, err
}
// closestPeer returns address of the peer that is closest to the chunk with
// provided address addr. This function will ignore peers with addresses
// provided in skipPeers and if allowUpstream is true, peers that are further of
// the chunk than this node is, could also be returned, allowing the upstream
// retrieve request.
func (s *Service) closestPeer(addr swarm.Address, skipPeers []swarm.Address, allowUpstream bool) (swarm.Address, error) {
closest := swarm.Address{}
err := s.peerSuggester.EachPeerRev(func(peer swarm.Address, po uint8) (bool, bool, error) {
for _, a := range skipPeers {
if a.Equal(peer) {
return false, false, nil
}
}
if closest.IsZero() {
closest = peer
return false, false, nil
}
dcmp, err := swarm.DistanceCmp(addr.Bytes(), closest.Bytes(), peer.Bytes())
if err != nil {
return false, false, fmt.Errorf("distance compare error. addr %s closest %s peer %s: %w", addr.String(), closest.String(), peer.String(), err)
}
switch dcmp {
case 0:
// do nothing
case -1:
// current peer is closer
closest = peer
case 1:
// closest is already closer to chunk
// do nothing
}
return false, false, nil
})
if err != nil {
return swarm.Address{}, err
}
// check if found
if closest.IsZero() {
return swarm.Address{}, topology.ErrNotFound
}
if allowUpstream {
return closest, nil
}
dcmp, err := swarm.DistanceCmp(addr.Bytes(), closest.Bytes(), s.addr.Bytes())
if err != nil {
return swarm.Address{}, fmt.Errorf("distance compare addr %s closest %s base address %s: %w", addr.String(), closest.String(), s.addr.String(), err)
}
if dcmp != 1 {
return swarm.Address{}, topology.ErrNotFound
}
return closest, nil
}
func (s *Service) handler(ctx context.Context, p p2p.Peer, stream p2p.Stream) (err error) {
w, r := protobuf.NewWriterAndReader(stream)
defer func() {
if err != nil {
_ = stream.Reset()
} else {
_ = stream.FullClose()
}
}()
var req pb.Request
if err := r.ReadMsgWithContext(ctx, &req); err != nil {
return fmt.Errorf("read request: %w peer %s", err, p.Address.String())
}
span, _, ctx := s.tracer.StartSpanFromContext(ctx, "handle-retrieve-chunk", s.logger, opentracing.Tag{Key: "address", Value: swarm.NewAddress(req.Addr).String()})
defer span.Finish()
ctx = context.WithValue(ctx, requestSourceContextKey{}, p.Address.String())
addr := swarm.NewAddress(req.Addr)
forwarded := false
chunk, err := s.storer.Get(ctx, storage.ModeGetRequest, addr)
if err != nil {
if errors.Is(err, storage.ErrNotFound) {
// forward the request
chunk, err = s.RetrieveChunk(ctx, addr, false)
if err != nil {
return fmt.Errorf("retrieve chunk: %w", err)
}
forwarded = true
} else {
return fmt.Errorf("get from store: %w", err)
}
}
stamp, err := chunk.Stamp().MarshalBinary()
if err != nil {
return fmt.Errorf("stamp marshal: %w", err)
}
chunkPrice := s.pricer.Price(chunk.Address())
debit, err := s.accounting.PrepareDebit(p.Address, chunkPrice)
if err != nil {
return fmt.Errorf("prepare debit to peer %s before writeback: %w", p.Address.String(), err)
}
defer debit.Cleanup()
if err := w.WriteMsgWithContext(ctx, &pb.Delivery{
Data: chunk.Data(),
Stamp: stamp,
}); err != nil {
return fmt.Errorf("write delivery: %w peer %s", err, p.Address.String())
}
s.logger.Tracef("retrieval protocol debiting peer %s", p.Address.String())
// debit price from p's balance
if err := debit.Apply(); err != nil {
return fmt.Errorf("apply debit: %w", err)
}
// cache the request last, so that putting to the localstore does not slow down the request flow
if s.caching && forwarded {
putMode := storage.ModePutRequest
cch, err := s.validStamp(chunk, stamp)
if err != nil {
// if a chunk with an invalid postage stamp was received
// we force it into the cache.
putMode = storage.ModePutRequestCache
cch = chunk
}
_, err = s.storer.Put(ctx, putMode, cch)
if err != nil {
return fmt.Errorf("retrieve cache put: %w", err)
}
}
return nil
}
| 1 | 15,073 | why is a buffered channel larger than 1 needed here? the separate goroutines can just try to write to the channel with a select-default block, and then it is not needed. i find the current implementation a bit convoluted, maybe we could simplify it a bit? not sure why it is needed for example to communicate an empty result. these patterns can be simplified using waitgroups | ethersphere-bee | go |
@@ -11,7 +11,7 @@
#
# It's strongly recommended that you check this file into your version control system.
-ActiveRecord::Schema.define(version: 20150410155813) do
+ActiveRecord::Schema.define(version: 20150415145819) do
# These are extensions that must be enabled in order to support this database
enable_extension "plpgsql" | 1 | # encoding: UTF-8
# This file is auto-generated from the current state of the database. Instead
# of editing this file, please use the migrations feature of Active Record to
# incrementally modify your database, and then regenerate this schema definition.
#
# Note that this schema.rb definition is the authoritative source for your
# database schema. If you need to create the application database on another
# system, you should be using db:schema:load, not running all the migrations
# from scratch. The latter is a flawed and unsustainable approach (the more migrations
# you'll amass, the slower it'll run and the greater likelihood for issues).
#
# It's strongly recommended that you check this file into your version control system.
ActiveRecord::Schema.define(version: 20150410155813) do
# These are extensions that must be enabled in order to support this database
enable_extension "plpgsql"
enable_extension "pg_stat_statements"
create_table "checkouts", force: :cascade do |t|
t.integer "user_id", null: false
t.integer "plan_id", null: false
t.string "stripe_coupon_id", limit: 255
t.datetime "created_at"
t.datetime "updated_at"
end
add_index "checkouts", ["user_id"], name: "index_checkouts_on_user_id", using: :btree
create_table "classifications", force: :cascade do |t|
t.integer "topic_id"
t.string "classifiable_type", limit: 255
t.integer "classifiable_id"
t.datetime "created_at", null: false
t.datetime "updated_at", null: false
end
create_table "collaborations", force: :cascade do |t|
t.integer "repository_id", null: false
t.integer "user_id", null: false
t.datetime "created_at", null: false
t.datetime "updated_at", null: false
end
add_index "collaborations", ["repository_id", "user_id"], name: "index_collaborations_on_repository_id_and_user_id", unique: true, using: :btree
create_table "delayed_jobs", force: :cascade do |t|
t.integer "priority", default: 0
t.integer "attempts", default: 0
t.text "handler"
t.text "last_error"
t.datetime "run_at"
t.datetime "locked_at"
t.datetime "failed_at"
t.string "locked_by", limit: 255
t.string "queue", limit: 255
t.datetime "created_at", null: false
t.datetime "updated_at", null: false
end
add_index "delayed_jobs", ["priority", "run_at"], name: "delayed_jobs_priority", using: :btree
create_table "downloads", force: :cascade do |t|
t.integer "purchaseable_id"
t.string "download_file_name", limit: 255
t.string "download_file_size", limit: 255
t.string "download_content_type", limit: 255
t.string "download_updated_at", limit: 255
t.string "description", limit: 255
t.datetime "created_at", null: false
t.datetime "updated_at", null: false
t.string "purchaseable_type", limit: 255
end
create_table "exercises", force: :cascade do |t|
t.string "name", limit: 255, null: false
t.string "url", limit: 255, null: false
t.text "summary", null: false
t.datetime "created_at"
t.datetime "updated_at"
t.string "uuid", limit: 255, null: false
t.boolean "public", default: false, null: false
t.string "edit_url", limit: 255
end
add_index "exercises", ["public"], name: "index_exercises_on_public", using: :btree
add_index "exercises", ["uuid"], name: "index_exercises_on_uuid", unique: true, using: :btree
create_table "invitations", force: :cascade do |t|
t.string "email", limit: 255, null: false
t.string "code", limit: 255, null: false
t.datetime "accepted_at"
t.integer "sender_id", null: false
t.integer "recipient_id"
t.integer "team_id", null: false
t.datetime "created_at", null: false
t.datetime "updated_at", null: false
end
add_index "invitations", ["code"], name: "index_invitations_on_code", using: :btree
add_index "invitations", ["team_id"], name: "index_invitations_on_team_id", using: :btree
create_table "mentors", force: :cascade do |t|
t.integer "user_id", null: false
t.string "availability", limit: 255, default: "11am to 5pm on Fridays", null: false
t.boolean "accepting_new_mentees", default: true, null: false
end
add_index "mentors", ["user_id"], name: "index_mentors_on_user_id", using: :btree
create_table "oauth_access_grants", force: :cascade do |t|
t.integer "resource_owner_id", null: false
t.integer "application_id", null: false
t.string "token", limit: 255, null: false
t.integer "expires_in", null: false
t.string "redirect_uri", limit: 255, null: false
t.datetime "created_at", null: false
t.datetime "revoked_at"
t.string "scopes", limit: 255
end
add_index "oauth_access_grants", ["token"], name: "index_oauth_access_grants_on_token", unique: true, using: :btree
create_table "oauth_access_tokens", force: :cascade do |t|
t.integer "resource_owner_id"
t.integer "application_id", null: false
t.string "token", limit: 255, null: false
t.string "refresh_token", limit: 255
t.integer "expires_in"
t.datetime "revoked_at"
t.datetime "created_at", null: false
t.string "scopes", limit: 255
end
add_index "oauth_access_tokens", ["refresh_token"], name: "index_oauth_access_tokens_on_refresh_token", unique: true, using: :btree
add_index "oauth_access_tokens", ["resource_owner_id"], name: "index_oauth_access_tokens_on_resource_owner_id", using: :btree
add_index "oauth_access_tokens", ["token"], name: "index_oauth_access_tokens_on_token", unique: true, using: :btree
create_table "oauth_applications", force: :cascade do |t|
t.string "name", limit: 255, null: false
t.string "uid", limit: 255, null: false
t.string "secret", limit: 255, null: false
t.string "redirect_uri", limit: 255, null: false
t.datetime "created_at", null: false
t.datetime "updated_at", null: false
t.string "scopes", limit: 255, default: "", null: false
end
add_index "oauth_applications", ["uid"], name: "index_oauth_applications_on_uid", unique: true, using: :btree
create_table "plans", force: :cascade do |t|
t.string "name", limit: 255, null: false
t.string "sku", limit: 255, null: false
t.string "short_description", limit: 255, null: false
t.text "description", null: false
t.boolean "active", default: true, null: false
t.integer "price_in_dollars", null: false
t.text "terms"
t.boolean "includes_mentor", default: false
t.boolean "featured", default: true, null: false
t.datetime "created_at", null: false
t.datetime "updated_at", null: false
t.boolean "includes_repositories", default: true, null: false
t.boolean "includes_forum", default: true, null: false
t.boolean "includes_shows", default: true, null: false
t.boolean "includes_team", default: false, null: false
t.boolean "annual", default: false
t.integer "annual_plan_id"
t.integer "minimum_quantity", default: 1, null: false
t.boolean "includes_trails", default: false, null: false
end
add_index "plans", ["annual_plan_id"], name: "index_plans_on_annual_plan_id", using: :btree
create_table "products", force: :cascade do |t|
t.string "name", limit: 255
t.string "sku", limit: 255
t.string "tagline", limit: 255
t.string "call_to_action", limit: 255
t.string "short_description", limit: 255
t.text "description"
t.string "type", limit: 255, null: false
t.boolean "active", default: true, null: false
t.datetime "created_at"
t.datetime "updated_at"
t.string "github_url", limit: 255
t.text "questions"
t.text "terms"
t.text "alternative_description"
t.string "product_image_file_name", limit: 255
t.string "product_image_file_size", limit: 255
t.string "product_image_content_type", limit: 255
t.string "product_image_updated_at", limit: 255
t.boolean "promoted", default: false, null: false
t.string "slug", limit: 255, null: false
t.text "resources", default: "", null: false
t.integer "product_id"
t.string "github_repository"
t.integer "trail_id"
end
add_index "products", ["product_id"], name: "index_products_on_product_id", using: :btree
add_index "products", ["slug"], name: "index_products_on_slug", unique: true, using: :btree
add_index "products", ["trail_id"], name: "index_products_on_trail_id", using: :btree
create_table "rails_admin_histories", force: :cascade do |t|
t.text "message"
t.string "username", limit: 255
t.integer "item"
t.string "table", limit: 255
t.integer "month", limit: 2
t.integer "year", limit: 8
t.datetime "created_at", null: false
t.datetime "updated_at", null: false
end
add_index "rails_admin_histories", ["item", "table", "month", "year"], name: "index_rails_admin_histories", using: :btree
create_table "statuses", force: :cascade do |t|
t.integer "completeable_id", null: false
t.integer "user_id", null: false
t.string "state", limit: 255, default: "In Progress", null: false
t.datetime "created_at"
t.datetime "updated_at"
t.string "completeable_type", limit: 255, null: false
end
add_index "statuses", ["completeable_id"], name: "index_statuses_on_completeable_id", using: :btree
add_index "statuses", ["completeable_type"], name: "index_statuses_on_completeable_type", using: :btree
add_index "statuses", ["user_id"], name: "index_statuses_on_user_id", using: :btree
create_table "steps", force: :cascade do |t|
t.integer "trail_id", null: false
t.integer "completeable_id", null: false
t.integer "position", null: false
t.datetime "created_at", null: false
t.datetime "updated_at", null: false
t.string "completeable_type", null: false
end
add_index "steps", ["trail_id", "position"], name: "index_steps_on_trail_id_and_position", using: :btree
add_index "steps", ["trail_id"], name: "index_steps_on_trail_id", using: :btree
create_table "subscriptions", force: :cascade do |t|
t.integer "user_id"
t.datetime "created_at", null: false
t.datetime "updated_at", null: false
t.date "deactivated_on"
t.date "scheduled_for_cancellation_on"
t.integer "plan_id", null: false
t.string "plan_type", limit: 255, default: "IndividualPlan", null: false
t.decimal "next_payment_amount", default: 0.0, null: false
t.date "next_payment_on"
t.string "stripe_id", limit: 255
end
add_index "subscriptions", ["plan_id", "plan_type"], name: "index_subscriptions_on_plan_id_and_plan_type", using: :btree
add_index "subscriptions", ["stripe_id"], name: "index_subscriptions_on_stripe_id", using: :btree
add_index "subscriptions", ["user_id"], name: "index_subscriptions_on_user_id", using: :btree
create_table "teachers", force: :cascade do |t|
t.integer "user_id"
t.integer "video_id"
end
add_index "teachers", ["user_id", "video_id"], name: "index_teachers_on_user_id_and_video_id", unique: true, using: :btree
create_table "teams", force: :cascade do |t|
t.string "name", limit: 255, null: false
t.datetime "created_at", null: false
t.datetime "updated_at", null: false
t.integer "subscription_id", null: false
end
create_table "topics", force: :cascade do |t|
t.datetime "created_at", null: false
t.datetime "updated_at", null: false
t.string "keywords", limit: 255
t.string "name", limit: 255, null: false
t.string "slug", limit: 255, null: false
t.text "summary"
t.boolean "explorable", default: false, null: false
t.string "color", limit: 255, default: "", null: false
t.string "color_accent", limit: 255, default: "", null: false
end
add_index "topics", ["explorable"], name: "index_topics_on_explorable", using: :btree
add_index "topics", ["slug"], name: "index_topics_on_slug", unique: true, using: :btree
create_table "trails", force: :cascade do |t|
t.string "name", limit: 255, null: false
t.datetime "created_at", null: false
t.datetime "updated_at", null: false
t.string "complete_text", limit: 255, null: false
t.boolean "published", default: false, null: false
t.string "slug", limit: 255, null: false
t.text "description", default: "", null: false
t.integer "topic_id"
end
add_index "trails", ["published"], name: "index_trails_on_published", using: :btree
add_index "trails", ["slug"], name: "index_trails_on_slug", unique: true, using: :btree
create_table "users", force: :cascade do |t|
t.string "email", limit: 255
t.string "encrypted_password", limit: 128
t.string "salt", limit: 128
t.string "confirmation_token", limit: 128
t.string "remember_token", limit: 128
t.boolean "email_confirmed", default: true, null: false
t.datetime "created_at"
t.datetime "updated_at"
t.string "reference", limit: 255
t.boolean "admin", default: false, null: false
t.string "stripe_customer_id", limit: 255, default: "", null: false
t.string "github_username", null: false
t.string "auth_provider", limit: 255
t.integer "auth_uid"
t.string "organization", limit: 255
t.string "address1", limit: 255
t.string "address2", limit: 255
t.string "city", limit: 255
t.string "state", limit: 255
t.string "zip_code", limit: 255
t.string "country", limit: 255
t.string "name", limit: 255
t.text "bio"
t.integer "mentor_id"
t.integer "team_id"
end
add_index "users", ["admin"], name: "index_users_on_admin", using: :btree
add_index "users", ["email"], name: "index_users_on_email", using: :btree
add_index "users", ["github_username"], name: "index_users_on_github_username", unique: true, using: :btree
add_index "users", ["id", "confirmation_token"], name: "index_users_on_id_and_confirmation_token", using: :btree
add_index "users", ["mentor_id"], name: "index_users_on_mentor_id", using: :btree
add_index "users", ["remember_token"], name: "index_users_on_remember_token", using: :btree
add_index "users", ["team_id"], name: "index_users_on_team_id", using: :btree
create_table "vanity_conversions", force: :cascade do |t|
t.integer "vanity_experiment_id"
t.integer "alternative"
t.integer "conversions"
end
add_index "vanity_conversions", ["vanity_experiment_id", "alternative"], name: "by_experiment_id_and_alternative", using: :btree
create_table "vanity_experiments", force: :cascade do |t|
t.string "experiment_id"
t.integer "outcome"
t.datetime "created_at"
t.datetime "completed_at"
end
add_index "vanity_experiments", ["experiment_id"], name: "index_vanity_experiments_on_experiment_id", using: :btree
create_table "vanity_metric_values", force: :cascade do |t|
t.integer "vanity_metric_id"
t.integer "index"
t.integer "value"
t.string "date"
end
add_index "vanity_metric_values", ["vanity_metric_id"], name: "index_vanity_metric_values_on_vanity_metric_id", using: :btree
create_table "vanity_metrics", force: :cascade do |t|
t.string "metric_id"
t.datetime "updated_at"
end
add_index "vanity_metrics", ["metric_id"], name: "index_vanity_metrics_on_metric_id", using: :btree
create_table "vanity_participants", force: :cascade do |t|
t.string "experiment_id"
t.string "identity"
t.integer "shown"
t.integer "seen"
t.integer "converted"
t.datetime "created_at"
t.datetime "updated_at"
end
add_index "vanity_participants", ["experiment_id", "converted"], name: "by_experiment_id_and_converted", using: :btree
add_index "vanity_participants", ["experiment_id", "identity"], name: "by_experiment_id_and_identity", using: :btree
add_index "vanity_participants", ["experiment_id", "seen"], name: "by_experiment_id_and_seen", using: :btree
add_index "vanity_participants", ["experiment_id", "shown"], name: "by_experiment_id_and_shown", using: :btree
add_index "vanity_participants", ["experiment_id"], name: "index_vanity_participants_on_experiment_id", using: :btree
create_table "videos", force: :cascade do |t|
t.integer "watchable_id"
t.string "wistia_id", limit: 255
t.string "name", limit: 255
t.datetime "created_at", null: false
t.datetime "updated_at", null: false
t.string "watchable_type", limit: 255
t.integer "position", default: 0, null: false
t.text "notes"
t.date "published_on"
t.string "preview_wistia_id", limit: 255
t.string "slug", limit: 255, null: false
t.text "summary"
end
add_index "videos", ["slug"], name: "index_videos_on_slug", unique: true, using: :btree
add_index "videos", ["watchable_type", "watchable_id"], name: "index_videos_on_watchable_type_and_watchable_id", using: :btree
end
| 1 | 14,481 | There seem to be more changes in here than I'd expect (mostly `limit: 255` additions). | thoughtbot-upcase | rb |
@@ -38,6 +38,10 @@ namespace OpenTelemetry.Context.Propagation
private static readonly int VersionAndTraceIdAndSpanIdLength = "00-0af7651916cd43dd8448eb211c80319c-00f067aa0ba902b7-".Length;
private static readonly int OptionsLength = "00".Length;
private static readonly int TraceparentLengthV0 = "00-0af7651916cd43dd8448eb211c80319c-00f067aa0ba902b7-00".Length;
+ private static readonly int TraceStateKeyMaxLength = 256;
+ private static readonly int TraceStateKeyTenentMaxLength = 241;
+ private static readonly int TraceStateKeyVendorMaxLength = 14;
+ private static readonly int TraceStateValueMaxLength = 256;
/// <inheritdoc/>
public override ISet<string> Fields => new HashSet<string> { TraceState, TraceParent }; | 1 | // <copyright file="TraceContextPropagator.cs" company="OpenTelemetry Authors">
// Copyright The OpenTelemetry Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// </copyright>
using System;
using System.Collections.Generic;
using System.Diagnostics;
using System.Linq;
using System.Text;
using OpenTelemetry.Internal;
namespace OpenTelemetry.Context.Propagation
{
/// <summary>
/// A text map propagator for W3C trace context. See https://w3c.github.io/trace-context/.
/// </summary>
public class TraceContextPropagator : TextMapPropagator
{
private const string TraceParent = "traceparent";
private const string TraceState = "tracestate";
private static readonly int VersionPrefixIdLength = "00-".Length;
private static readonly int TraceIdLength = "0af7651916cd43dd8448eb211c80319c".Length;
private static readonly int VersionAndTraceIdLength = "00-0af7651916cd43dd8448eb211c80319c-".Length;
private static readonly int SpanIdLength = "00f067aa0ba902b7".Length;
private static readonly int VersionAndTraceIdAndSpanIdLength = "00-0af7651916cd43dd8448eb211c80319c-00f067aa0ba902b7-".Length;
private static readonly int OptionsLength = "00".Length;
private static readonly int TraceparentLengthV0 = "00-0af7651916cd43dd8448eb211c80319c-00f067aa0ba902b7-00".Length;
/// <inheritdoc/>
public override ISet<string> Fields => new HashSet<string> { TraceState, TraceParent };
/// <inheritdoc/>
public override PropagationContext Extract<T>(PropagationContext context, T carrier, Func<T, string, IEnumerable<string>> getter)
{
if (context.ActivityContext.IsValid())
{
// If a valid context has already been extracted, perform a noop.
return context;
}
if (carrier == null)
{
OpenTelemetryApiEventSource.Log.FailedToExtractActivityContext(nameof(TraceContextPropagator), "null carrier");
return context;
}
if (getter == null)
{
OpenTelemetryApiEventSource.Log.FailedToExtractActivityContext(nameof(TraceContextPropagator), "null getter");
return context;
}
try
{
var traceparentCollection = getter(carrier, TraceParent);
// There must be a single traceparent
if (traceparentCollection == null || traceparentCollection.Count() != 1)
{
return context;
}
var traceparent = traceparentCollection.First();
var traceparentParsed = TryExtractTraceparent(traceparent, out var traceId, out var spanId, out var traceoptions);
if (!traceparentParsed)
{
return context;
}
string tracestate = null;
var tracestateCollection = getter(carrier, TraceState);
if (tracestateCollection?.Any() ?? false)
{
TryExtractTracestate(tracestateCollection.ToArray(), out tracestate);
}
return new PropagationContext(
new ActivityContext(traceId, spanId, traceoptions, tracestate, isRemote: true),
context.Baggage);
}
catch (Exception ex)
{
OpenTelemetryApiEventSource.Log.ActivityContextExtractException(nameof(TraceContextPropagator), ex);
}
// in case of exception indicate to upstream that there is no parseable context from the top
return context;
}
/// <inheritdoc/>
public override void Inject<T>(PropagationContext context, T carrier, Action<T, string, string> setter)
{
if (context.ActivityContext.TraceId == default || context.ActivityContext.SpanId == default)
{
OpenTelemetryApiEventSource.Log.FailedToInjectActivityContext(nameof(TraceContextPropagator), "Invalid context");
return;
}
if (carrier == null)
{
OpenTelemetryApiEventSource.Log.FailedToInjectActivityContext(nameof(TraceContextPropagator), "null carrier");
return;
}
if (setter == null)
{
OpenTelemetryApiEventSource.Log.FailedToInjectActivityContext(nameof(TraceContextPropagator), "null setter");
return;
}
var traceparent = string.Concat("00-", context.ActivityContext.TraceId.ToHexString(), "-", context.ActivityContext.SpanId.ToHexString());
traceparent = string.Concat(traceparent, (context.ActivityContext.TraceFlags & ActivityTraceFlags.Recorded) != 0 ? "-01" : "-00");
setter(carrier, TraceParent, traceparent);
string tracestateStr = context.ActivityContext.TraceState;
if (tracestateStr?.Length > 0)
{
setter(carrier, TraceState, tracestateStr);
}
}
internal static bool TryExtractTraceparent(string traceparent, out ActivityTraceId traceId, out ActivitySpanId spanId, out ActivityTraceFlags traceOptions)
{
// from https://github.com/w3c/distributed-tracing/blob/master/trace_context/HTTP_HEADER_FORMAT.md
// traceparent: 00-0af7651916cd43dd8448eb211c80319c-00f067aa0ba902b7-01
traceId = default;
spanId = default;
traceOptions = default;
var bestAttempt = false;
if (string.IsNullOrWhiteSpace(traceparent) || traceparent.Length < TraceparentLengthV0)
{
return false;
}
// if version does not end with delimiter
if (traceparent[VersionPrefixIdLength - 1] != '-')
{
return false;
}
// or version is not a hex (will throw)
var version0 = HexCharToByte(traceparent[0]);
var version1 = HexCharToByte(traceparent[1]);
if (version0 == 0xf && version1 == 0xf)
{
return false;
}
if (version0 > 0)
{
// expected version is 00
// for higher versions - best attempt parsing of trace id, span id, etc.
bestAttempt = true;
}
if (traceparent[VersionAndTraceIdLength - 1] != '-')
{
return false;
}
try
{
traceId = ActivityTraceId.CreateFromString(traceparent.AsSpan().Slice(VersionPrefixIdLength, TraceIdLength));
}
catch (ArgumentOutOfRangeException)
{
// it's ok to still parse tracestate
return false;
}
if (traceparent[VersionAndTraceIdAndSpanIdLength - 1] != '-')
{
return false;
}
byte options1;
try
{
spanId = ActivitySpanId.CreateFromString(traceparent.AsSpan().Slice(VersionAndTraceIdLength, SpanIdLength));
options1 = HexCharToByte(traceparent[VersionAndTraceIdAndSpanIdLength + 1]);
}
catch (ArgumentOutOfRangeException)
{
// it's ok to still parse tracestate
return false;
}
if ((options1 & 1) == 1)
{
traceOptions |= ActivityTraceFlags.Recorded;
}
if ((!bestAttempt) && (traceparent.Length != VersionAndTraceIdAndSpanIdLength + OptionsLength))
{
return false;
}
if (bestAttempt)
{
if ((traceparent.Length > TraceparentLengthV0) && (traceparent[TraceparentLengthV0] != '-'))
{
return false;
}
}
return true;
}
internal static bool TryExtractTracestate(string[] tracestateCollection, out string tracestateResult)
{
tracestateResult = string.Empty;
if (tracestateCollection != null)
{
var result = new StringBuilder();
// Iterate in reverse order because when call builder set the elements is added in the
// front of the list.
for (int i = tracestateCollection.Length - 1; i >= 0; i--)
{
if (string.IsNullOrEmpty(tracestateCollection[i]))
{
return false;
}
result.Append(tracestateCollection[i]);
}
tracestateResult = result.ToString();
}
return true;
}
private static byte HexCharToByte(char c)
{
if (((c >= '0') && (c <= '9'))
|| ((c >= 'a') && (c <= 'f'))
|| ((c >= 'A') && (c <= 'F')))
{
return Convert.ToByte(c);
}
throw new ArgumentOutOfRangeException(nameof(c), c, $"Invalid character: {c}.");
}
}
}
| 1 | 18,697 | I'm not sure, but I think it's `Tenant` | open-telemetry-opentelemetry-dotnet | .cs |
@@ -15,8 +15,9 @@ namespace Datadog.Trace.Configuration
/// <paramref name="data"/>.
/// </summary>
/// <param name="data">A string containing key-value pairs which are comma-separated, and for which the key and value are colon-separated.</param>
+ /// <param name="allowOptionalMappings">Determines whether to create dictionary entries when the input has no value mapping</param>
/// <returns><see cref="IDictionary{TKey, TValue}"/> of key value pairs.</returns>
- public static IDictionary<string, string> ParseCustomKeyValues(string data)
+ public static IDictionary<string, string> ParseCustomKeyValues(string data, bool allowOptionalMappings = false)
{
var dictionary = new ConcurrentDictionary<string, string>();
| 1 | using System.Collections.Concurrent;
using System.Collections.Generic;
using Datadog.Trace.ExtensionMethods;
namespace Datadog.Trace.Configuration
{
/// <summary>
/// A base <see cref="IConfigurationSource"/> implementation
/// for string-only configuration sources.
/// </summary>
public abstract class StringConfigurationSource : IConfigurationSource
{
/// <summary>
/// Returns a <see cref="IDictionary{TKey, TValue}"/> from parsing
/// <paramref name="data"/>.
/// </summary>
/// <param name="data">A string containing key-value pairs which are comma-separated, and for which the key and value are colon-separated.</param>
/// <returns><see cref="IDictionary{TKey, TValue}"/> of key value pairs.</returns>
public static IDictionary<string, string> ParseCustomKeyValues(string data)
{
var dictionary = new ConcurrentDictionary<string, string>();
// A null return value means the key was not present,
// and CompositeConfigurationSource depends on this behavior
// (it returns the first non-null value it finds).
if (data == null)
{
return null;
}
if (string.IsNullOrWhiteSpace(data))
{
return dictionary;
}
var entries = data.Split(',');
foreach (var e in entries)
{
var kv = e.Split(':');
if (kv.Length != 2)
{
continue;
}
var key = kv[0];
var value = kv[1];
dictionary[key] = value;
}
return dictionary;
}
/// <inheritdoc />
public abstract string GetString(string key);
/// <inheritdoc />
public virtual int? GetInt32(string key)
{
string value = GetString(key);
return int.TryParse(value, out int result)
? result
: (int?)null;
}
/// <inheritdoc />
public double? GetDouble(string key)
{
string value = GetString(key);
return double.TryParse(value, out double result)
? result
: (double?)null;
}
/// <inheritdoc />
public virtual bool? GetBool(string key)
{
var value = GetString(key);
return value?.ToBoolean();
}
/// <summary>
/// Gets a <see cref="ConcurrentDictionary{TKey, TValue}"/> from parsing
/// </summary>
/// <param name="key">The key</param>
/// <returns><see cref="ConcurrentDictionary{TKey, TValue}"/> containing all of the key-value pairs.</returns>
public IDictionary<string, string> GetDictionary(string key)
{
return ParseCustomKeyValues(GetString(key));
}
}
}
| 1 | 19,501 | Not sure if it's an issue, but this is a breaking change in a public API. Maybe we should add as an overload without optional parameter instead and delegate the existing call to this one? | DataDog-dd-trace-dotnet | .cs |
@@ -147,6 +147,11 @@ func (o *Outbound) Transports() []transport.Transport {
return []transport.Transport{o.transport}
}
+// Chooser returns the outbound's peer chooser.
+func (o *Outbound) Chooser() peer.Chooser {
+ return o.chooser
+}
+
// Start the HTTP outbound
func (o *Outbound) Start() error {
return o.once.Start(o.chooser.Start) | 1 | // Copyright (c) 2017 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package http
import (
"context"
"fmt"
"io/ioutil"
"log"
"net/http"
"net/url"
"strings"
"time"
"go.uber.org/yarpc/api/peer"
"go.uber.org/yarpc/api/transport"
"go.uber.org/yarpc/internal/errors"
"go.uber.org/yarpc/internal/introspection"
"go.uber.org/yarpc/internal/sync"
peerchooser "go.uber.org/yarpc/peer"
"go.uber.org/yarpc/peer/hostport"
"github.com/opentracing/opentracing-go"
"github.com/opentracing/opentracing-go/ext"
)
// this ensures the HTTP outbound implements both transport.Outbound interfaces
var (
_ transport.UnaryOutbound = (*Outbound)(nil)
_ transport.OnewayOutbound = (*Outbound)(nil)
_ introspection.IntrospectableOutbound = (*Outbound)(nil)
)
var defaultURLTemplate, _ = url.Parse("http://localhost")
// OutboundOption customizes an HTTP Outbound.
type OutboundOption func(*Outbound)
func (OutboundOption) httpOption() {}
// URLTemplate specifies the URL this outbound makes requests to. For
// peer.Chooser-based outbounds, the peer (host:port) spection of the URL may
// vary from call to call but the rest will remain unchanged. For single-peer
// outbounds, the URL will be used as-is.
func URLTemplate(template string) OutboundOption {
return func(o *Outbound) {
o.setURLTemplate(template)
}
}
// NewOutbound builds an HTTP outbound which sends requests to peers supplied
// by the given peer.Chooser. The URL template for used for the different
// peers may be customized using the URLTemplate option.
//
// Peer Choosers used with the HTTP outbound MUST yield *hostport.Peer
// objects. Also note that the Chooser MUST have started before Outbound.Start
// is called.
func (t *Transport) NewOutbound(chooser peer.Chooser, opts ...OutboundOption) *Outbound {
o := &Outbound{
once: sync.Once(),
chooser: chooser,
urlTemplate: defaultURLTemplate,
tracer: t.tracer,
transport: t,
}
for _, opt := range opts {
opt(o)
}
return o
}
// NewOutbound builds an HTTP outbound which sends requests to peers supplied
// by the given peer.Chooser. The URL template for used for the different
// peers may be customized using the URLTemplate option.
//
// Peer Choosers used with the HTTP outbound MUST yield *hostport.Peer
// objects. Also note that the Chooser MUST have started before Outbound.Start
// is called.
func NewOutbound(chooser peer.Chooser, opts ...OutboundOption) *Outbound {
return NewTransport().NewOutbound(chooser, opts...)
}
// NewSingleOutbound builds an outbound which sends YARPC requests over HTTP
// to the specified URL.
//
// The URLTemplate option has no effect in this form.
func (t *Transport) NewSingleOutbound(uri string, opts ...OutboundOption) *Outbound {
parsedURL, err := url.Parse(uri)
if err != nil {
panic(err.Error())
}
chooser := peerchooser.NewSingle(hostport.PeerIdentifier(parsedURL.Host), t)
o := t.NewOutbound(chooser)
for _, opt := range opts {
opt(o)
}
o.setURLTemplate(uri)
return o
}
// Outbound sends YARPC requests over HTTP. It may be constructed using the
// NewOutbound function or the NewOutbound or NewSingleOutbound methods on the
// HTTP Transport. It is recommended that services use a single HTTP transport
// to construct all HTTP outbounds, ensuring efficient sharing of resources
// across the different outbounds.
type Outbound struct {
chooser peer.Chooser
urlTemplate *url.URL
tracer opentracing.Tracer
transport *Transport
once sync.LifecycleOnce
}
// setURLTemplate configures an alternate URL template.
// The host:port portion of the URL template gets replaced by the chosen peer's
// identifier for each outbound request.
func (o *Outbound) setURLTemplate(URL string) {
parsedURL, err := url.Parse(URL)
if err != nil {
log.Fatalf("failed to configure HTTP outbound: invalid URL template %q: %s", URL, err)
}
o.urlTemplate = parsedURL
}
// Transports returns the outbound's HTTP transport.
func (o *Outbound) Transports() []transport.Transport {
return []transport.Transport{o.transport}
}
// Start the HTTP outbound
func (o *Outbound) Start() error {
return o.once.Start(o.chooser.Start)
}
// Stop the HTTP outbound
func (o *Outbound) Stop() error {
return o.once.Stop(o.chooser.Stop)
}
// IsRunning returns whether the Outbound is running.
func (o *Outbound) IsRunning() bool {
return o.once.IsRunning()
}
// Call makes a HTTP request
func (o *Outbound) Call(ctx context.Context, treq *transport.Request) (*transport.Response, error) {
if err := o.once.WhenRunning(ctx); err != nil {
return nil, err
}
start := time.Now()
deadline, _ := ctx.Deadline()
ttl := deadline.Sub(start)
return o.call(ctx, treq, start, ttl)
}
// CallOneway makes a oneway request
func (o *Outbound) CallOneway(ctx context.Context, treq *transport.Request) (transport.Ack, error) {
if err := o.once.WhenRunning(ctx); err != nil {
return nil, err
}
start := time.Now()
var ttl time.Duration
_, err := o.call(ctx, treq, start, ttl)
if err != nil {
return nil, err
}
return time.Now(), nil
}
func (o *Outbound) call(ctx context.Context, treq *transport.Request, start time.Time, ttl time.Duration) (*transport.Response, error) {
p, onFinish, err := o.getPeerForRequest(ctx, treq)
if err != nil {
return nil, err
}
resp, err := o.callWithPeer(ctx, treq, start, ttl, p)
// Call the onFinish method right before returning (with the error from call with peer)
onFinish(err)
return resp, err
}
func (o *Outbound) callWithPeer(
ctx context.Context,
treq *transport.Request,
start time.Time,
ttl time.Duration,
p *hostport.Peer,
) (*transport.Response, error) {
req, err := o.createRequest(p, treq)
if err != nil {
return nil, err
}
req.Header = applicationHeaders.ToHTTPHeaders(treq.Headers, nil)
ctx, req, span, err := o.withOpentracingSpan(ctx, req, treq, start)
if err != nil {
return nil, err
}
defer span.Finish()
req = o.withCoreHeaders(req, treq, ttl)
client, err := o.getHTTPClient(p)
if err != nil {
return nil, err
}
response, err := client.Do(req.WithContext(ctx))
if err != nil {
// Workaround borrowed from ctxhttp until
// https://github.com/golang/go/issues/17711 is resolved.
select {
case <-ctx.Done():
err = ctx.Err()
default:
}
span.SetTag("error", true)
span.LogEvent(err.Error())
if err == context.DeadlineExceeded {
end := time.Now()
return nil, errors.ClientTimeoutError(treq.Service, treq.Procedure, end.Sub(start))
}
return nil, err
}
span.SetTag("http.status_code", response.StatusCode)
if response.StatusCode >= 200 && response.StatusCode < 300 {
appHeaders := applicationHeaders.FromHTTPHeaders(
response.Header, transport.NewHeaders())
appError := response.Header.Get(ApplicationStatusHeader) == ApplicationErrorStatus
return &transport.Response{
Headers: appHeaders,
Body: response.Body,
ApplicationError: appError,
}, nil
}
return nil, getErrFromResponse(response)
}
func (o *Outbound) getPeerForRequest(ctx context.Context, treq *transport.Request) (*hostport.Peer, func(error), error) {
p, onFinish, err := o.chooser.Choose(ctx, treq)
if err != nil {
return nil, nil, err
}
hpPeer, ok := p.(*hostport.Peer)
if !ok {
return nil, nil, peer.ErrInvalidPeerConversion{
Peer: p,
ExpectedType: "*hostport.Peer",
}
}
return hpPeer, onFinish, nil
}
func (o *Outbound) createRequest(p *hostport.Peer, treq *transport.Request) (*http.Request, error) {
newURL := *o.urlTemplate
newURL.Host = p.HostPort()
return http.NewRequest("POST", newURL.String(), treq.Body)
}
func (o *Outbound) withOpentracingSpan(ctx context.Context, req *http.Request, treq *transport.Request, start time.Time) (context.Context, *http.Request, opentracing.Span, error) {
// Apply HTTP Context headers for tracing and baggage carried by tracing.
tracer := o.tracer
var parent opentracing.SpanContext // ok to be nil
if parentSpan := opentracing.SpanFromContext(ctx); parentSpan != nil {
parent = parentSpan.Context()
}
span := tracer.StartSpan(
treq.Procedure,
opentracing.StartTime(start),
opentracing.ChildOf(parent),
opentracing.Tags{
"rpc.caller": treq.Caller,
"rpc.service": treq.Service,
"rpc.encoding": treq.Encoding,
"rpc.transport": "http",
},
)
ext.PeerService.Set(span, treq.Service)
ext.SpanKindRPCClient.Set(span)
ext.HTTPUrl.Set(span, req.URL.String())
ctx = opentracing.ContextWithSpan(ctx, span)
err := tracer.Inject(
span.Context(),
opentracing.HTTPHeaders,
opentracing.HTTPHeadersCarrier(req.Header),
)
return ctx, req, span, err
}
func (o *Outbound) withCoreHeaders(req *http.Request, treq *transport.Request, ttl time.Duration) *http.Request {
req.Header.Set(CallerHeader, treq.Caller)
req.Header.Set(ServiceHeader, treq.Service)
req.Header.Set(ProcedureHeader, treq.Procedure)
if ttl != 0 {
req.Header.Set(TTLMSHeader, fmt.Sprintf("%d", ttl/time.Millisecond))
}
if treq.ShardKey != "" {
req.Header.Set(ShardKeyHeader, treq.ShardKey)
}
if treq.RoutingKey != "" {
req.Header.Set(RoutingKeyHeader, treq.RoutingKey)
}
if treq.RoutingDelegate != "" {
req.Header.Set(RoutingDelegateHeader, treq.RoutingDelegate)
}
encoding := string(treq.Encoding)
if encoding != "" {
req.Header.Set(EncodingHeader, encoding)
}
return req
}
func (o *Outbound) getHTTPClient(p *hostport.Peer) (*http.Client, error) {
t, ok := p.Transport().(*Transport)
if !ok {
return nil, peer.ErrInvalidTransportConversion{
Transport: p.Transport(),
ExpectedType: "*http.Transport",
}
}
return t.client, nil
}
func getErrFromResponse(response *http.Response) error {
// TODO Behavior for 300-range status codes is undefined
contents, err := ioutil.ReadAll(response.Body)
if err != nil {
return err
}
if err := response.Body.Close(); err != nil {
return err
}
// Trim the trailing newline from HTTP error messages
message := strings.TrimSuffix(string(contents), "\n")
if response.StatusCode >= 400 && response.StatusCode < 500 {
return errors.RemoteBadRequestError(message)
}
if response.StatusCode == http.StatusGatewayTimeout {
return errors.RemoteTimeoutError(message)
}
return errors.RemoteUnexpectedError(message)
}
// Introspect returns basic status about this outbound.
func (o *Outbound) Introspect() introspection.OutboundStatus {
state := "Stopped"
if o.IsRunning() {
state = "Running"
}
var chooser introspection.ChooserStatus
if i, ok := o.chooser.(introspection.IntrospectableChooser); ok {
chooser = i.Introspect()
} else {
chooser = introspection.ChooserStatus{
Name: "Introspection not available",
}
}
return introspection.OutboundStatus{
Transport: "http",
Endpoint: o.urlTemplate.String(),
State: state,
Chooser: chooser,
}
}
| 1 | 13,360 | as opposed to exposing the chooser function and increasing our API exposure for the purpose of tests, can we move the HTTP transport config test into this package? | yarpc-yarpc-go | go |
@@ -461,6 +461,17 @@ error_conn:
return 0;
}
+static void wlr_drm_connector_set_dpms(struct wlr_output *output, enum dpms_enum level) {
+ struct wlr_drm_connector *conn = (struct wlr_drm_connector *)output;
+ struct wlr_drm_backend *drm = (struct wlr_drm_backend *)output->backend;
+
+ if (drmModeObjectSetProperty (drm->fd, conn->id,
+ DRM_MODE_OBJECT_CONNECTOR,
+ conn->props.dpms, level) < 0)
+ wlr_log(L_ERROR, "Failed to set dpms state for %s", conn->output.name);
+
+}
+
static bool wlr_drm_connector_set_mode(struct wlr_output *output,
struct wlr_output_mode *mode) {
struct wlr_drm_connector *conn = (struct wlr_drm_connector *)output; | 1 | #include <assert.h>
#include <drm_mode.h>
#include <EGL/egl.h>
#include <EGL/eglext.h>
#include <errno.h>
#include <gbm.h>
#include <GLES2/gl2.h>
#include <GLES2/gl2ext.h>
#include <inttypes.h>
#include <stdio.h>
#include <stdlib.h>
#include <string.h>
#include <time.h>
#include <wayland-server.h>
#include <wayland-util.h>
#include <wlr/backend/interface.h>
#include <wlr/interfaces/wlr_output.h>
#include <wlr/render/gles2.h>
#include <wlr/render/wlr_renderer.h>
#include <wlr/types/wlr_matrix.h>
#include <wlr/util/log.h>
#include <xf86drm.h>
#include <xf86drmMode.h>
#include "backend/drm/drm.h"
#include "backend/drm/iface.h"
#include "backend/drm/util.h"
#include "util/signal.h"
bool wlr_drm_check_features(struct wlr_drm_backend *drm) {
if (drmSetClientCap(drm->fd, DRM_CLIENT_CAP_UNIVERSAL_PLANES, 1)) {
wlr_log(L_ERROR, "DRM universal planes unsupported");
return false;
}
if (getenv("WLR_DRM_NO_ATOMIC")) {
wlr_log(L_DEBUG, "WLR_DRM_NO_ATOMIC set, forcing legacy DRM interface");
drm->iface = &legacy_iface;
} else if (drmSetClientCap(drm->fd, DRM_CLIENT_CAP_ATOMIC, 1)) {
wlr_log(L_DEBUG, "Atomic modesetting unsupported, using legacy DRM interface");
drm->iface = &legacy_iface;
} else {
wlr_log(L_DEBUG, "Using atomic DRM interface");
drm->iface = &atomic_iface;
}
return true;
}
static int cmp_plane(const void *arg1, const void *arg2) {
const struct wlr_drm_plane *a = arg1;
const struct wlr_drm_plane *b = arg2;
return (int)a->type - (int)b->type;
}
static bool init_planes(struct wlr_drm_backend *drm) {
drmModePlaneRes *plane_res = drmModeGetPlaneResources(drm->fd);
if (!plane_res) {
wlr_log_errno(L_ERROR, "Failed to get DRM plane resources");
return false;
}
wlr_log(L_INFO, "Found %"PRIu32" DRM planes", plane_res->count_planes);
if (plane_res->count_planes == 0) {
drmModeFreePlaneResources(plane_res);
return true;
}
drm->num_planes = plane_res->count_planes;
drm->planes = calloc(drm->num_planes, sizeof(*drm->planes));
if (!drm->planes) {
wlr_log_errno(L_ERROR, "Allocation failed");
goto error_res;
}
for (size_t i = 0; i < drm->num_planes; ++i) {
struct wlr_drm_plane *p = &drm->planes[i];
drmModePlane *plane = drmModeGetPlane(drm->fd, plane_res->planes[i]);
if (!plane) {
wlr_log_errno(L_ERROR, "Failed to get DRM plane");
goto error_planes;
}
p->id = plane->plane_id;
p->possible_crtcs = plane->possible_crtcs;
uint64_t type;
if (!wlr_drm_get_plane_props(drm->fd, p->id, &p->props) ||
!wlr_drm_get_prop(drm->fd, p->id, p->props.type, &type)) {
drmModeFreePlane(plane);
goto error_planes;
}
p->type = type;
drm->num_type_planes[type]++;
drmModeFreePlane(plane);
}
wlr_log(L_INFO, "(%zu overlay, %zu primary, %zu cursor)",
drm->num_overlay_planes,
drm->num_primary_planes,
drm->num_cursor_planes);
qsort(drm->planes, drm->num_planes, sizeof(*drm->planes), cmp_plane);
drm->overlay_planes = drm->planes;
drm->primary_planes = drm->overlay_planes
+ drm->num_overlay_planes;
drm->cursor_planes = drm->primary_planes
+ drm->num_primary_planes;
drmModeFreePlaneResources(plane_res);
return true;
error_planes:
free(drm->planes);
error_res:
drmModeFreePlaneResources(plane_res);
return false;
}
bool wlr_drm_resources_init(struct wlr_drm_backend *drm) {
drmModeRes *res = drmModeGetResources(drm->fd);
if (!res) {
wlr_log_errno(L_ERROR, "Failed to get DRM resources");
return false;
}
wlr_log(L_INFO, "Found %d DRM CRTCs", res->count_crtcs);
drm->num_crtcs = res->count_crtcs;
drm->crtcs = calloc(drm->num_crtcs, sizeof(drm->crtcs[0]));
if (!drm->crtcs) {
wlr_log_errno(L_ERROR, "Allocation failed");
goto error_res;
}
for (size_t i = 0; i < drm->num_crtcs; ++i) {
struct wlr_drm_crtc *crtc = &drm->crtcs[i];
crtc->id = res->crtcs[i];
crtc->legacy_crtc = drmModeGetCrtc(drm->fd, crtc->id);
wlr_drm_get_crtc_props(drm->fd, crtc->id, &crtc->props);
}
if (!init_planes(drm)) {
goto error_crtcs;
}
drmModeFreeResources(res);
return true;
error_crtcs:
free(drm->crtcs);
error_res:
drmModeFreeResources(res);
return false;
}
void wlr_drm_resources_free(struct wlr_drm_backend *drm) {
if (!drm) {
return;
}
for (size_t i = 0; i < drm->num_crtcs; ++i) {
struct wlr_drm_crtc *crtc = &drm->crtcs[i];
drmModeAtomicFree(crtc->atomic);
drmModeFreeCrtc(crtc->legacy_crtc);
if (crtc->mode_id) {
drmModeDestroyPropertyBlob(drm->fd, crtc->mode_id);
}
if (crtc->gamma_lut) {
drmModeDestroyPropertyBlob(drm->fd, crtc->gamma_lut);
}
}
for (size_t i = 0; i < drm->num_planes; ++i) {
struct wlr_drm_plane *plane = &drm->planes[i];
if (plane->cursor_bo) {
gbm_bo_destroy(plane->cursor_bo);
}
}
free(drm->crtcs);
free(drm->planes);
}
static bool wlr_drm_connector_make_current(struct wlr_output *output,
int *buffer_age) {
struct wlr_drm_connector *conn = (struct wlr_drm_connector *)output;
return wlr_drm_surface_make_current(&conn->crtc->primary->surf, buffer_age);
}
static bool wlr_drm_connector_swap_buffers(struct wlr_output *output,
pixman_region32_t *damage) {
struct wlr_drm_connector *conn = (struct wlr_drm_connector *)output;
struct wlr_drm_backend *drm = (struct wlr_drm_backend *)output->backend;
if (!drm->session->active) {
return false;
}
struct wlr_drm_crtc *crtc = conn->crtc;
if (!crtc) {
return false;
}
struct wlr_drm_plane *plane = crtc->primary;
struct gbm_bo *bo = wlr_drm_surface_swap_buffers(&plane->surf, damage);
if (drm->parent) {
bo = wlr_drm_surface_mgpu_copy(&plane->mgpu_surf, bo);
}
uint32_t fb_id = get_fb_for_bo(bo);
if (conn->pageflip_pending) {
wlr_log(L_ERROR, "Skipping pageflip");
return false;
}
if (!drm->iface->crtc_pageflip(drm, conn, crtc, fb_id, NULL)) {
return false;
}
conn->pageflip_pending = true;
wlr_output_update_enabled(output, true);
return true;
}
static void wlr_drm_connector_set_gamma(struct wlr_output *output,
uint32_t size, uint16_t *r, uint16_t *g, uint16_t *b) {
struct wlr_drm_connector *conn = (struct wlr_drm_connector *)output;
struct wlr_drm_backend *drm = (struct wlr_drm_backend *)output->backend;
bool ok;
if (conn->crtc) {
ok = drm->iface->crtc_set_gamma(drm, conn->crtc, r, g, b, size);
if (ok) {
wlr_output_update_needs_swap(output);
}
}
}
static uint32_t wlr_drm_connector_get_gamma_size(struct wlr_output *output) {
struct wlr_drm_connector *conn = (struct wlr_drm_connector *)output;
struct wlr_drm_backend *drm = (struct wlr_drm_backend *)output->backend;
if (conn->crtc) {
return drm->iface->crtc_get_gamma_size(drm, conn->crtc);
}
return 0;
}
void wlr_drm_connector_start_renderer(struct wlr_drm_connector *conn) {
if (conn->state != WLR_DRM_CONN_CONNECTED) {
return;
}
struct wlr_drm_backend *drm = (struct wlr_drm_backend *)conn->output.backend;
struct wlr_drm_crtc *crtc = conn->crtc;
if (!crtc) {
return;
}
struct wlr_drm_plane *plane = crtc->primary;
struct gbm_bo *bo = wlr_drm_surface_get_front(
drm->parent ? &plane->mgpu_surf : &plane->surf);
uint32_t fb_id = get_fb_for_bo(bo);
struct wlr_drm_mode *mode = (struct wlr_drm_mode *)conn->output.current_mode;
if (drm->iface->crtc_pageflip(drm, conn, crtc, fb_id, &mode->drm_mode)) {
conn->pageflip_pending = true;
wlr_output_update_enabled(&conn->output, true);
} else {
wl_event_source_timer_update(conn->retry_pageflip,
1000000.0f / conn->output.current_mode->refresh);
}
}
void wlr_drm_connector_enable(struct wlr_output *output, bool enable) {
struct wlr_drm_connector *conn = (struct wlr_drm_connector *)output;
if (conn->state != WLR_DRM_CONN_CONNECTED) {
return;
}
struct wlr_drm_backend *drm = (struct wlr_drm_backend *)output->backend;
bool ok = drm->iface->conn_enable(drm, conn, enable);
if (!ok) {
return;
}
if (enable) {
wlr_drm_connector_start_renderer(conn);
}
wlr_output_update_enabled(&conn->output, enable);
}
static void realloc_planes(struct wlr_drm_backend *drm, const uint32_t *crtc_in,
bool *changed_outputs) {
// overlay, primary, cursor
for (int type = 0; type < 3; ++type) {
if (drm->num_type_planes[type] == 0) {
continue;
}
uint32_t possible[drm->num_type_planes[type]];
uint32_t crtc[drm->num_crtcs];
uint32_t crtc_res[drm->num_crtcs];
for (size_t i = 0; i < drm->num_type_planes[type]; ++i) {
possible[i] = drm->type_planes[type][i].possible_crtcs;
}
for (size_t i = 0; i < drm->num_crtcs; ++i) {
if (crtc_in[i] == UNMATCHED) {
crtc[i] = SKIP;
} else if (drm->crtcs[i].planes[type]) {
crtc[i] = drm->crtcs[i].planes[type]
- drm->type_planes[type];
} else {
crtc[i] = UNMATCHED;
}
}
match_obj(drm->num_type_planes[type], possible,
drm->num_crtcs, crtc, crtc_res);
for (size_t i = 0; i < drm->num_crtcs; ++i) {
if (crtc_res[i] == UNMATCHED || crtc_res[i] == SKIP) {
continue;
}
struct wlr_drm_crtc *c = &drm->crtcs[i];
struct wlr_drm_plane **old = &c->planes[type];
struct wlr_drm_plane *new = &drm->type_planes[type][crtc_res[i]];
if (*old != new) {
changed_outputs[crtc_res[i]] = true;
if (*old) {
wlr_drm_surface_finish(&(*old)->surf);
}
wlr_drm_surface_finish(&new->surf);
*old = new;
}
}
}
}
static void realloc_crtcs(struct wlr_drm_backend *drm,
struct wlr_drm_connector *conn, bool *changed_outputs) {
uint32_t crtc[drm->num_crtcs];
uint32_t crtc_res[drm->num_crtcs];
ssize_t num_outputs = wl_list_length(&drm->outputs);
uint32_t possible_crtc[num_outputs];
for (size_t i = 0; i < drm->num_crtcs; ++i) {
crtc[i] = UNMATCHED;
}
memset(possible_crtc, 0, sizeof(possible_crtc));
ssize_t index = -1, i = -1;
struct wlr_drm_connector *c;
wl_list_for_each(c, &drm->outputs, link) {
i++;
if (c == conn) {
index = i;
}
if (c->crtc) {
crtc[c->crtc - drm->crtcs] = i;
}
if (c->state == WLR_DRM_CONN_CONNECTED) {
possible_crtc[i] = c->possible_crtc;
}
}
assert(index != -1);
possible_crtc[index] = conn->possible_crtc;
match_obj(wl_list_length(&drm->outputs), possible_crtc,
drm->num_crtcs, crtc, crtc_res);
bool matched[num_outputs];
memset(matched, false, sizeof(matched));
for (size_t i = 0; i < drm->num_crtcs; ++i) {
if (crtc_res[i] != UNMATCHED) {
matched[crtc_res[i]] = true;
}
}
// There is no point doing anything if this monitor doesn't get activated
if (!matched[index]) {
return;
}
for (size_t i = 0; i < drm->num_crtcs; ++i) {
// We don't want any of the current monitors to be deactivated.
if (crtc[i] != UNMATCHED && !matched[crtc[i]]) {
return;
}
}
changed_outputs[index] = true;
for (size_t i = 0; i < drm->num_crtcs; ++i) {
if (crtc_res[i] == UNMATCHED) {
continue;
}
if (crtc_res[i] != crtc[i]) {
changed_outputs[crtc_res[i]] = true;
struct wlr_drm_connector *c;
size_t pos = 0;
wl_list_for_each(c, &drm->outputs, link) {
if (pos == crtc_res[i]) {
break;
}
pos++;
}
c->crtc = &drm->crtcs[i];
}
}
realloc_planes(drm, crtc_res, changed_outputs);
}
static uint32_t get_possible_crtcs(int fd, uint32_t conn_id) {
drmModeConnector *conn = drmModeGetConnector(fd, conn_id);
if (!conn) {
wlr_log_errno(L_ERROR, "Failed to get DRM connector");
return 0;
}
if (conn->connection != DRM_MODE_CONNECTED || conn->count_modes == 0) {
wlr_log(L_ERROR, "Output is not connected");
goto error_conn;
}
drmModeEncoder *enc = NULL;
for (int i = 0; !enc && i < conn->count_encoders; ++i) {
enc = drmModeGetEncoder(fd, conn->encoders[i]);
}
if (!enc) {
wlr_log(L_ERROR, "Failed to get DRM encoder");
goto error_conn;
}
uint32_t ret = enc->possible_crtcs;
drmModeFreeEncoder(enc);
drmModeFreeConnector(conn);
return ret;
error_conn:
drmModeFreeConnector(conn);
return 0;
}
static bool wlr_drm_connector_set_mode(struct wlr_output *output,
struct wlr_output_mode *mode) {
struct wlr_drm_connector *conn = (struct wlr_drm_connector *)output;
struct wlr_drm_backend *drm = (struct wlr_drm_backend *)output->backend;
bool changed_outputs[wl_list_length(&drm->outputs)];
wlr_log(L_INFO, "Modesetting '%s' with '%ux%u@%u mHz'", conn->output.name,
mode->width, mode->height, mode->refresh);
conn->possible_crtc = get_possible_crtcs(drm->fd, conn->id);
if (conn->possible_crtc == 0) {
goto error_conn;
}
memset(changed_outputs, false, sizeof(changed_outputs));
realloc_crtcs(drm, conn, changed_outputs);
if (!conn->crtc) {
wlr_log(L_ERROR, "Unable to match %s with a CRTC", conn->output.name);
goto error_conn;
}
struct wlr_drm_crtc *crtc = conn->crtc;
if (!crtc) {
return false;
}
wlr_log(L_DEBUG, "%s: crtc=%td ovr=%td pri=%td cur=%td", conn->output.name,
crtc - drm->crtcs,
crtc->overlay ? crtc->overlay - drm->overlay_planes : -1,
crtc->primary ? crtc->primary - drm->primary_planes : -1,
crtc->cursor ? crtc->cursor - drm->cursor_planes : -1);
conn->state = WLR_DRM_CONN_CONNECTED;
wlr_output_update_mode(&conn->output, mode);
// Since realloc_crtcs can deallocate planes on OTHER outputs,
// we actually need to reinitialize any than has changed
ssize_t output_index = -1;
wl_list_for_each(conn, &drm->outputs, link) {
output_index += 1;
struct wlr_output_mode *mode = conn->output.current_mode;
struct wlr_drm_crtc *crtc = conn->crtc;
if (conn->state != WLR_DRM_CONN_CONNECTED ||
!changed_outputs[output_index]) {
continue;
}
if (!wlr_drm_plane_surfaces_init(crtc->primary, drm,
mode->width, mode->height, GBM_FORMAT_XRGB8888)) {
wlr_log(L_ERROR, "Failed to initialize renderer for plane");
goto error_conn;
}
wlr_drm_connector_start_renderer(conn);
}
return true;
error_conn:
wlr_drm_connector_cleanup(conn);
return false;
}
static void wlr_drm_connector_transform(struct wlr_output *output,
enum wl_output_transform transform) {
output->transform = transform;
}
static bool wlr_drm_connector_set_cursor(struct wlr_output *output,
const uint8_t *buf, int32_t stride, uint32_t width, uint32_t height,
int32_t hotspot_x, int32_t hotspot_y, bool update_pixels) {
struct wlr_drm_connector *conn = (struct wlr_drm_connector *)output;
struct wlr_drm_backend *drm = (struct wlr_drm_backend *)output->backend;
struct wlr_drm_renderer *renderer = &drm->renderer;
struct wlr_drm_crtc *crtc = conn->crtc;
if (!crtc) {
return false;
}
struct wlr_drm_plane *plane = crtc->cursor;
if (!plane) {
// We don't have a real cursor plane, so we make a fake one
plane = calloc(1, sizeof(*plane));
if (!plane) {
wlr_log_errno(L_ERROR, "Allocation failed");
return false;
}
crtc->cursor = plane;
}
if (!plane->surf.gbm) {
int ret;
uint64_t w, h;
ret = drmGetCap(drm->fd, DRM_CAP_CURSOR_WIDTH, &w);
w = ret ? 64 : w;
ret = drmGetCap(drm->fd, DRM_CAP_CURSOR_HEIGHT, &h);
h = ret ? 64 : h;
if (width > w || height > h) {
wlr_log(L_INFO, "Cursor too large (max %dx%d)", (int)w, (int)h);
return false;
}
if (!wlr_drm_surface_init(&plane->surf, renderer, w, h,
GBM_FORMAT_ARGB8888, 0)) {
wlr_log(L_ERROR, "Cannot allocate cursor resources");
return false;
}
plane->cursor_bo = gbm_bo_create(renderer->gbm, w, h,
GBM_FORMAT_ARGB8888, GBM_BO_USE_CURSOR | GBM_BO_USE_WRITE);
if (!plane->cursor_bo) {
wlr_log_errno(L_ERROR, "Failed to create cursor bo");
return false;
}
enum wl_output_transform transform = output->transform;
wlr_matrix_projection(plane->matrix, plane->surf.width,
plane->surf.height, transform);
}
struct wlr_box hotspot = { .x = hotspot_x, .y = hotspot_y };
enum wl_output_transform transform =
wlr_output_transform_invert(output->transform);
wlr_box_transform(&hotspot, transform,
plane->surf.width, plane->surf.height, &hotspot);
if (plane->cursor_hotspot_x != hotspot.x ||
plane->cursor_hotspot_y != hotspot.y) {
// Update cursor hotspot
conn->cursor_x -= hotspot.x - plane->cursor_hotspot_x;
conn->cursor_y -= hotspot.y - plane->cursor_hotspot_y;
plane->cursor_hotspot_x = hotspot.x;
plane->cursor_hotspot_y = hotspot.y;
if (!drm->iface->crtc_move_cursor(drm, conn->crtc, conn->cursor_x,
conn->cursor_y)) {
return false;
}
wlr_output_update_needs_swap(output);
}
if (!update_pixels) {
// Don't update cursor image
return true;
}
plane->cursor_enabled = buf != NULL;
if (buf != NULL) {
uint32_t bo_width = gbm_bo_get_width(plane->cursor_bo);
uint32_t bo_height = gbm_bo_get_height(plane->cursor_bo);
uint32_t bo_stride;
void *bo_data;
if (!gbm_bo_map(plane->cursor_bo, 0, 0, bo_width, bo_height,
GBM_BO_TRANSFER_WRITE, &bo_stride, &bo_data)) {
wlr_log_errno(L_ERROR, "Unable to map buffer");
return false;
}
wlr_drm_surface_make_current(&plane->surf, NULL);
struct wlr_renderer *rend = plane->surf.renderer->wlr_rend;
struct wlr_texture *texture = wlr_texture_from_pixels(rend,
WL_SHM_FORMAT_ARGB8888, stride, width, height, buf);
if (texture == NULL) {
wlr_log(L_ERROR, "Unable to create texture");
return false;
}
wlr_renderer_begin(rend, plane->surf.width, plane->surf.height);
wlr_renderer_clear(rend, (float[]){ 0.0, 0.0, 0.0, 0.0 });
wlr_render_texture(rend, texture, plane->matrix, 0, 0, 1.0f);
wlr_renderer_end(rend);
wlr_renderer_read_pixels(rend, WL_SHM_FORMAT_ARGB8888, bo_stride,
plane->surf.width, plane->surf.height, 0, 0, 0, 0, bo_data);
wlr_drm_surface_swap_buffers(&plane->surf, NULL);
wlr_texture_destroy(texture);
gbm_bo_unmap(plane->cursor_bo, bo_data);
}
if (!drm->session->active) {
return true; // will be committed when session is resumed
}
struct gbm_bo *bo = plane->cursor_enabled ? plane->cursor_bo : NULL;
bool ok = drm->iface->crtc_set_cursor(drm, crtc, bo);
if (ok) {
wlr_output_update_needs_swap(output);
}
return ok;
}
static bool wlr_drm_connector_move_cursor(struct wlr_output *output,
int x, int y) {
struct wlr_drm_connector *conn = (struct wlr_drm_connector *)output;
struct wlr_drm_backend *drm = (struct wlr_drm_backend *)output->backend;
if (!conn->crtc) {
return false;
}
struct wlr_drm_plane *plane = conn->crtc->cursor;
struct wlr_box box = { .x = x, .y = y };
int width, height;
wlr_output_transformed_resolution(output, &width, &height);
enum wl_output_transform transform =
wlr_output_transform_invert(output->transform);
wlr_box_transform(&box, transform, width, height, &box);
if (plane != NULL) {
box.x -= plane->cursor_hotspot_x;
box.y -= plane->cursor_hotspot_y;
}
conn->cursor_x = box.x;
conn->cursor_y = box.y;
if (!drm->session->active) {
return true; // will be committed when session is resumed
}
bool ok = drm->iface->crtc_move_cursor(drm, conn->crtc, box.x, box.y);
if (ok) {
wlr_output_update_needs_swap(output);
}
return ok;
}
static void wlr_drm_connector_destroy(struct wlr_output *output) {
struct wlr_drm_connector *conn = (struct wlr_drm_connector *)output;
wlr_drm_connector_cleanup(conn);
wl_event_source_remove(conn->retry_pageflip);
wl_list_remove(&conn->link);
free(conn);
}
static struct wlr_output_impl output_impl = {
.enable = wlr_drm_connector_enable,
.set_mode = wlr_drm_connector_set_mode,
.transform = wlr_drm_connector_transform,
.set_cursor = wlr_drm_connector_set_cursor,
.move_cursor = wlr_drm_connector_move_cursor,
.destroy = wlr_drm_connector_destroy,
.make_current = wlr_drm_connector_make_current,
.swap_buffers = wlr_drm_connector_swap_buffers,
.set_gamma = wlr_drm_connector_set_gamma,
.get_gamma_size = wlr_drm_connector_get_gamma_size,
};
bool wlr_output_is_drm(struct wlr_output *output) {
return output->impl == &output_impl;
}
static int retry_pageflip(void *data) {
struct wlr_drm_connector *conn = data;
wlr_log(L_INFO, "%s: Retrying pageflip", conn->output.name);
wlr_drm_connector_start_renderer(conn);
return 0;
}
static const int32_t subpixel_map[] = {
[DRM_MODE_SUBPIXEL_UNKNOWN] = WL_OUTPUT_SUBPIXEL_UNKNOWN,
[DRM_MODE_SUBPIXEL_HORIZONTAL_RGB] = WL_OUTPUT_SUBPIXEL_HORIZONTAL_RGB,
[DRM_MODE_SUBPIXEL_HORIZONTAL_BGR] = WL_OUTPUT_SUBPIXEL_HORIZONTAL_BGR,
[DRM_MODE_SUBPIXEL_VERTICAL_RGB] = WL_OUTPUT_SUBPIXEL_VERTICAL_RGB,
[DRM_MODE_SUBPIXEL_VERTICAL_BGR] = WL_OUTPUT_SUBPIXEL_VERTICAL_BGR,
[DRM_MODE_SUBPIXEL_NONE] = WL_OUTPUT_SUBPIXEL_NONE,
};
void wlr_drm_scan_connectors(struct wlr_drm_backend *drm) {
wlr_log(L_INFO, "Scanning DRM connectors");
drmModeRes *res = drmModeGetResources(drm->fd);
if (!res) {
wlr_log_errno(L_ERROR, "Failed to get DRM resources");
return;
}
size_t seen_len = wl_list_length(&drm->outputs);
// +1 so length can never be 0, which is undefined behaviour.
// Last element isn't used.
bool seen[seen_len + 1];
memset(seen, 0, sizeof(seen));
for (int i = 0; i < res->count_connectors; ++i) {
drmModeConnector *drm_conn = drmModeGetConnector(drm->fd,
res->connectors[i]);
if (!drm_conn) {
wlr_log_errno(L_ERROR, "Failed to get DRM connector");
continue;
}
drmModeEncoder *curr_enc = drmModeGetEncoder(drm->fd,
drm_conn->encoder_id);
int index = -1;
struct wlr_drm_connector *c, *wlr_conn = NULL;
wl_list_for_each(c, &drm->outputs, link) {
index++;
if (c->id == drm_conn->connector_id) {
wlr_conn = c;
break;
}
}
if (!wlr_conn) {
wlr_conn = calloc(1, sizeof(*wlr_conn));
if (!wlr_conn) {
wlr_log_errno(L_ERROR, "Allocation failed");
drmModeFreeEncoder(curr_enc);
drmModeFreeConnector(drm_conn);
continue;
}
wlr_output_init(&wlr_conn->output, &drm->backend, &output_impl,
drm->display);
struct wl_event_loop *ev = wl_display_get_event_loop(drm->display);
wlr_conn->retry_pageflip = wl_event_loop_add_timer(ev, retry_pageflip,
wlr_conn);
wlr_conn->state = WLR_DRM_CONN_DISCONNECTED;
wlr_conn->id = drm_conn->connector_id;
if (curr_enc) {
wlr_conn->old_crtc = drmModeGetCrtc(drm->fd, curr_enc->crtc_id);
}
snprintf(wlr_conn->output.name, sizeof(wlr_conn->output.name),
"%s-%"PRIu32,
conn_get_name(drm_conn->connector_type),
drm_conn->connector_type_id);
wl_list_insert(&drm->outputs, &wlr_conn->link);
wlr_log(L_INFO, "Found display '%s'", wlr_conn->output.name);
} else {
seen[index] = true;
}
if (curr_enc) {
for (size_t i = 0; i < drm->num_crtcs; ++i) {
if (drm->crtcs[i].id == curr_enc->crtc_id) {
wlr_conn->crtc = &drm->crtcs[i];
break;
}
}
} else {
wlr_conn->crtc = NULL;
}
if (wlr_conn->state == WLR_DRM_CONN_DISCONNECTED &&
drm_conn->connection == DRM_MODE_CONNECTED) {
wlr_log(L_INFO, "'%s' connected", wlr_conn->output.name);
wlr_conn->output.phys_width = drm_conn->mmWidth;
wlr_conn->output.phys_height = drm_conn->mmHeight;
wlr_log(L_INFO, "Physical size: %"PRId32"x%"PRId32,
wlr_conn->output.phys_width, wlr_conn->output.phys_height);
wlr_conn->output.subpixel = subpixel_map[drm_conn->subpixel];
wlr_drm_get_connector_props(drm->fd, wlr_conn->id, &wlr_conn->props);
size_t edid_len = 0;
uint8_t *edid = wlr_drm_get_prop_blob(drm->fd,
wlr_conn->id, wlr_conn->props.edid, &edid_len);
parse_edid(&wlr_conn->output, edid_len, edid);
free(edid);
wlr_log(L_INFO, "Detected modes:");
for (int i = 0; i < drm_conn->count_modes; ++i) {
struct wlr_drm_mode *mode = calloc(1, sizeof(*mode));
if (!mode) {
wlr_log_errno(L_ERROR, "Allocation failed");
continue;
}
mode->drm_mode = drm_conn->modes[i];
mode->wlr_mode.width = mode->drm_mode.hdisplay;
mode->wlr_mode.height = mode->drm_mode.vdisplay;
mode->wlr_mode.refresh = calculate_refresh_rate(&mode->drm_mode);
wlr_log(L_INFO, " %"PRId32"@%"PRId32"@%"PRId32,
mode->wlr_mode.width, mode->wlr_mode.height,
mode->wlr_mode.refresh);
wl_list_insert(&wlr_conn->output.modes, &mode->wlr_mode.link);
}
wlr_output_update_enabled(&wlr_conn->output, true);
wlr_conn->state = WLR_DRM_CONN_NEEDS_MODESET;
wlr_log(L_INFO, "Sending modesetting signal for '%s'",
wlr_conn->output.name);
wlr_signal_emit_safe(&drm->backend.events.new_output,
&wlr_conn->output);
} else if (wlr_conn->state == WLR_DRM_CONN_CONNECTED &&
drm_conn->connection != DRM_MODE_CONNECTED) {
wlr_log(L_INFO, "'%s' disconnected", wlr_conn->output.name);
wlr_output_update_enabled(&wlr_conn->output, false);
wlr_drm_connector_cleanup(wlr_conn);
}
drmModeFreeEncoder(curr_enc);
drmModeFreeConnector(drm_conn);
}
drmModeFreeResources(res);
struct wlr_drm_connector *conn, *tmp_conn;
size_t index = wl_list_length(&drm->outputs);
wl_list_for_each_safe(conn, tmp_conn, &drm->outputs, link) {
index--;
if (index >= seen_len || seen[index]) {
continue;
}
wlr_log(L_INFO, "'%s' disappeared", conn->output.name);
wlr_drm_connector_cleanup(conn);
drmModeFreeCrtc(conn->old_crtc);
wl_event_source_remove(conn->retry_pageflip);
wl_list_remove(&conn->link);
free(conn);
}
}
static void page_flip_handler(int fd, unsigned seq,
unsigned tv_sec, unsigned tv_usec, void *user) {
struct wlr_drm_connector *conn = user;
struct wlr_drm_backend *drm = (struct wlr_drm_backend *)conn->output.backend;
conn->pageflip_pending = false;
if (conn->state != WLR_DRM_CONN_CONNECTED) {
return;
}
wlr_drm_surface_post(&conn->crtc->primary->surf);
if (drm->parent) {
wlr_drm_surface_post(&conn->crtc->primary->mgpu_surf);
}
if (drm->session->active) {
wlr_output_send_frame(&conn->output);
}
}
int wlr_drm_event(int fd, uint32_t mask, void *data) {
drmEventContext event = {
.version = DRM_EVENT_CONTEXT_VERSION,
.page_flip_handler = page_flip_handler,
};
drmHandleEvent(fd, &event);
return 1;
}
void wlr_drm_restore_outputs(struct wlr_drm_backend *drm) {
uint64_t to_close = (1 << wl_list_length(&drm->outputs)) - 1;
struct wlr_drm_connector *conn;
wl_list_for_each(conn, &drm->outputs, link) {
if (conn->state == WLR_DRM_CONN_CONNECTED) {
conn->state = WLR_DRM_CONN_CLEANUP;
}
}
time_t timeout = time(NULL) + 5;
while (to_close && time(NULL) < timeout) {
wlr_drm_event(drm->fd, 0, NULL);
size_t i = 0;
struct wlr_drm_connector *conn;
wl_list_for_each(conn, &drm->outputs, link) {
if (conn->state != WLR_DRM_CONN_CLEANUP || !conn->pageflip_pending) {
to_close &= ~(1 << i);
}
i++;
}
}
if (to_close) {
wlr_log(L_ERROR, "Timed out stopping output renderers");
}
wl_list_for_each(conn, &drm->outputs, link) {
drmModeCrtc *crtc = conn->old_crtc;
if (!crtc) {
continue;
}
drmModeSetCrtc(drm->fd, crtc->crtc_id, crtc->buffer_id, crtc->x, crtc->y,
&conn->id, 1, &crtc->mode);
drmModeFreeCrtc(crtc);
}
}
void wlr_drm_connector_cleanup(struct wlr_drm_connector *conn) {
if (!conn) {
return;
}
switch (conn->state) {
case WLR_DRM_CONN_CONNECTED:
case WLR_DRM_CONN_CLEANUP:;
struct wlr_drm_crtc *crtc = conn->crtc;
for (int i = 0; i < 3; ++i) {
if (!crtc->planes[i]) {
continue;
}
wlr_drm_surface_finish(&crtc->planes[i]->surf);
wlr_drm_surface_finish(&crtc->planes[i]->mgpu_surf);
if (crtc->planes[i]->id == 0) {
free(crtc->planes[i]);
crtc->planes[i] = NULL;
}
}
struct wlr_drm_mode *mode;
struct wlr_drm_mode *tmp;
wl_list_for_each_safe(mode, tmp, &conn->output.modes, wlr_mode.link) {
wl_list_remove(&mode->wlr_mode.link);
free(mode);
}
memset(&conn->output.make, 0, sizeof(conn->output.make));
memset(&conn->output.model, 0, sizeof(conn->output.model));
memset(&conn->output.serial, 0, sizeof(conn->output.serial));
conn->crtc = NULL;
conn->possible_crtc = 0;
/* Fallthrough */
case WLR_DRM_CONN_NEEDS_MODESET:
wlr_log(L_INFO, "Emitting destruction signal for '%s'",
conn->output.name);
wlr_signal_emit_safe(&conn->output.events.destroy, &conn->output);
break;
case WLR_DRM_CONN_DISCONNECTED:
break;
}
conn->state = WLR_DRM_CONN_DISCONNECTED;
}
| 1 | 10,993 | This is using the legacy DRM interface. DPMS levels have been removed from the atomic interface IIRC. Should we handle DPMS levels at all? | swaywm-wlroots | c |
@@ -162,13 +162,13 @@ type confirmedTx struct {
watch *transactionWatch
}
-// potentiallyConfirmedWatches returns all watches with nonce less than what was specified
+// potentiallyConfirmedWatches returns all watches with nonce less than or equal what was specified
func (tm *transactionMonitor) potentiallyConfirmedWatches(nonce uint64) (watches []*transactionWatch) {
tm.lock.Lock()
defer tm.lock.Unlock()
for watch := range tm.watches {
- if watch.nonce < nonce {
+ if watch.nonce <= nonce {
watches = append(watches, watch)
}
} | 1 | // Copyright 2021 The Swarm Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
package transaction
import (
"context"
"errors"
"io"
"math/big"
"sync"
"time"
"github.com/ethereum/go-ethereum"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/core/types"
"github.com/ethersphere/bee/pkg/logging"
)
var ErrTransactionCancelled = errors.New("transaction cancelled")
var ErrMonitorClosed = errors.New("monitor closed")
// Monitor is a nonce-based watcher for transaction confirmations.
// Instead of watching transactions individually, the senders nonce is monitored and transactions are checked based on this.
// The idea is that if the nonce is still lower than that of a pending transaction, there is no point in actually checking the transaction for a receipt.
// At the same time if the nonce was already used and this was a few blocks ago we can reasonably assume that it will never confirm.
type Monitor interface {
io.Closer
// WatchTransaction watches the transaction until either there is 1 confirmation or a competing transaction with cancellationDepth confirmations.
WatchTransaction(txHash common.Hash, nonce uint64) (<-chan types.Receipt, <-chan error, error)
}
type transactionMonitor struct {
lock sync.Mutex
ctx context.Context // context which is used for all backend calls
cancelFunc context.CancelFunc // function to cancel the above context
wg sync.WaitGroup
logger logging.Logger
backend Backend
sender common.Address // sender of transactions which this instance can monitor
pollingInterval time.Duration // time between checking for new blocks
cancellationDepth uint64 // number of blocks until considering a tx cancellation final
watches map[*transactionWatch]struct{} // active watches
watchAdded chan struct{} // channel to trigger instant pending check
}
type transactionWatch struct {
receiptC chan types.Receipt // channel to which the receipt will be written once available
errC chan error // error channel (primarily for cancelled transactions)
txHash common.Hash // hash of the transaction to watch
nonce uint64 // nonce of the transaction to watch
}
func NewMonitor(logger logging.Logger, backend Backend, sender common.Address, pollingInterval time.Duration, cancellationDepth uint64) Monitor {
ctx, cancelFunc := context.WithCancel(context.Background())
t := &transactionMonitor{
ctx: ctx,
cancelFunc: cancelFunc,
logger: logger,
backend: backend,
sender: sender,
pollingInterval: pollingInterval,
cancellationDepth: cancellationDepth,
watches: make(map[*transactionWatch]struct{}),
watchAdded: make(chan struct{}, 1),
}
t.wg.Add(1)
go t.watchPending()
return t
}
func (tm *transactionMonitor) WatchTransaction(txHash common.Hash, nonce uint64) (<-chan types.Receipt, <-chan error, error) {
tm.lock.Lock()
defer tm.lock.Unlock()
// these channels will be written to at most once
// buffer size is 1 to avoid blocking in the watch loop
receiptC := make(chan types.Receipt, 1)
errC := make(chan error, 1)
tm.watches[&transactionWatch{
receiptC: receiptC,
errC: errC,
txHash: txHash,
nonce: nonce,
}] = struct{}{}
select {
case tm.watchAdded <- struct{}{}:
default:
}
tm.logger.Tracef("starting to watch transaction %x with nonce %d", txHash, nonce)
return receiptC, errC, nil
}
// main watch loop
func (tm *transactionMonitor) watchPending() {
defer tm.wg.Done()
defer func() {
tm.lock.Lock()
defer tm.lock.Unlock()
for watch := range tm.watches {
watch.errC <- ErrMonitorClosed
}
}()
var lastBlock uint64 = 0
var added bool // flag if this iteration was triggered by the watchAdded channel
for {
added = false
select {
// if a new watch has been added check again without waiting
case <-tm.watchAdded:
added = true
// otherwise wait
case <-time.After(tm.pollingInterval):
// if the main context is cancelled terminate
case <-tm.ctx.Done():
return
}
// if there are no watched transactions there is nothing to do
if !tm.hasWatches() {
continue
}
// switch to new head subscriptions once websockets are the norm
block, err := tm.backend.BlockNumber(tm.ctx)
if err != nil {
tm.logger.Errorf("could not get block number: %v", err)
continue
} else if block <= lastBlock && !added {
// if the block number is not higher than before there is nothing todo
// unless a watch was added in which case we will do the check anyway
// in the rare case where a block was reorged and the new one is the first to contain our tx we wait an extra block
continue
}
if err := tm.checkPending(block); err != nil {
tm.logger.Errorf("error while checking pending transactions: %v", err)
}
lastBlock = block
}
}
type confirmedTx struct {
receipt types.Receipt
watch *transactionWatch
}
// potentiallyConfirmedWatches returns all watches with nonce less than what was specified
func (tm *transactionMonitor) potentiallyConfirmedWatches(nonce uint64) (watches []*transactionWatch) {
tm.lock.Lock()
defer tm.lock.Unlock()
for watch := range tm.watches {
if watch.nonce < nonce {
watches = append(watches, watch)
}
}
return watches
}
func (tm *transactionMonitor) hasWatches() bool {
tm.lock.Lock()
defer tm.lock.Unlock()
return len(tm.watches) > 0
}
// check pending checks the given block (number) for confirmed or cancelled transactions
func (tm *transactionMonitor) checkPending(block uint64) error {
nonce, err := tm.backend.NonceAt(tm.ctx, tm.sender, new(big.Int).SetUint64(block))
if err != nil {
return err
}
// transactions with a nonce lower or equal to what is found on-chain are either confirmed or (at least temporarily) cancelled
checkWatches := tm.potentiallyConfirmedWatches(nonce)
var confirmedTxs []confirmedTx
var potentiallyCancelledTxs []*transactionWatch
for _, watch := range checkWatches {
receipt, err := tm.backend.TransactionReceipt(tm.ctx, watch.txHash)
if receipt != nil {
// if we have a receipt we have a confirmation
confirmedTxs = append(confirmedTxs, confirmedTx{
receipt: *receipt,
watch: watch,
})
} else if err == nil || errors.Is(err, ethereum.NotFound) {
// if both err and receipt are nil, there is no receipt
// we also match for the special error "not found" that some clients return
// the reason why we consider this only potentially cancelled is to catch cases where after a reorg the original transaction wins
potentiallyCancelledTxs = append(potentiallyCancelledTxs, watch)
} else {
// any other error is probably a real error
return err
}
}
// mark all transactions without receipt whose nonce was already used at least cancellationDepth blocks ago as cancelled
var cancelledTxs []*transactionWatch
if len(potentiallyCancelledTxs) > 0 {
oldNonce, err := tm.backend.NonceAt(tm.ctx, tm.sender, new(big.Int).SetUint64(block-tm.cancellationDepth))
if err != nil {
return err
}
for _, watch := range potentiallyCancelledTxs {
if watch.nonce <= oldNonce {
cancelledTxs = append(cancelledTxs, watch)
}
}
}
// notify the subscribers and remove watches for confirmed or cancelled transactions
tm.lock.Lock()
defer tm.lock.Unlock()
for _, confirmedTx := range confirmedTxs {
confirmedTx.watch.receiptC <- confirmedTx.receipt
delete(tm.watches, confirmedTx.watch)
}
for _, watch := range cancelledTxs {
watch.errC <- ErrTransactionCancelled
delete(tm.watches, watch)
}
return nil
}
func (tm *transactionMonitor) Close() error {
tm.cancelFunc()
tm.wg.Wait()
return nil
}
| 1 | 14,915 | I think this was correct before. The nonce passed in here is the nonce at a block (which is the next nonce not yet used, e.g. 0 if the account was never used, 1 if only the tx with nonce 0 has been sent). So if the in-block nonce is 12, then a transaction with nonce 12 cannot have been included yet. | ethersphere-bee | go |
@@ -0,0 +1,13 @@
+const { NativeModules, Platform } = require('react-native');
+
+const ext = (Platform.OS === 'android' && NativeModules.ShareExtension) ?
+ {
+ data: () => NativeModules.ShareExtension.data(),
+ close: () => NativeModules.ShareExtension.close(),
+ } :
+ {
+ data: () => {},
+ close: () => {},
+ };
+
+export default ext; | 1 | 1 | 13,669 | Please name the file ShareExtension.js, as you import it under this name (also could you convert it to TypeScript please?) | laurent22-joplin | js |
|
@@ -45,6 +45,7 @@ type ClusterOpts struct {
TLSTimeout float64 `json:"-"`
TLSConfig *tls.Config `json:"-"`
TLSMap bool `json:"-"`
+ TLSInsecure bool `json:"tls_insecure"`
ListenStr string `json:"-"`
Advertise string `json:"-"`
NoAdvertise bool `json:"-"` | 1 | // Copyright 2012-2019 The NATS Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package server
import (
"context"
"crypto/tls"
"crypto/x509"
"errors"
"flag"
"fmt"
"io/ioutil"
"net"
"net/url"
"os"
"regexp"
"strconv"
"strings"
"time"
"github.com/nats-io/gnatsd/conf"
"github.com/nats-io/jwt"
"github.com/nats-io/nkeys"
)
// ClusterOpts are options for clusters.
type ClusterOpts struct {
Host string `json:"addr,omitempty"`
Port int `json:"cluster_port,omitempty"`
Username string `json:"-"`
Password string `json:"-"`
AuthTimeout float64 `json:"auth_timeout,omitempty"`
Permissions *RoutePermissions `json:"-"`
TLSTimeout float64 `json:"-"`
TLSConfig *tls.Config `json:"-"`
TLSMap bool `json:"-"`
ListenStr string `json:"-"`
Advertise string `json:"-"`
NoAdvertise bool `json:"-"`
ConnectRetries int `json:"-"`
}
// GatewayOpts are options for gateways.
type GatewayOpts struct {
Name string `json:"name"`
Host string `json:"addr,omitempty"`
Port int `json:"port,omitempty"`
Username string `json:"-"`
Password string `json:"-"`
AuthTimeout float64 `json:"auth_timeout,omitempty"`
TLSConfig *tls.Config `json:"-"`
TLSTimeout float64 `json:"tls_timeout,omitempty"`
TLSMap bool `json:"-"`
Advertise string `json:"advertise,omitempty"`
ConnectRetries int `json:"connect_retries,omitempty"`
Gateways []*RemoteGatewayOpts `json:"gateways,omitempty"`
RejectUnknown bool `json:"reject_unknown,omitempty"`
// Not exported, for tests.
resolver netResolver
sendQSubsBufSize int
}
// RemoteGatewayOpts are options for connecting to a remote gateway
type RemoteGatewayOpts struct {
Name string `json:"name"`
TLSConfig *tls.Config `json:"-"`
TLSTimeout float64 `json:"tls_timeout,omitempty"`
URLs []*url.URL `json:"urls,omitempty"`
}
// Options block for gnatsd server.
type Options struct {
ConfigFile string `json:"-"`
Host string `json:"addr"`
Port int `json:"port"`
ClientAdvertise string `json:"-"`
Trace bool `json:"-"`
Debug bool `json:"-"`
NoLog bool `json:"-"`
NoSigs bool `json:"-"`
Logtime bool `json:"-"`
MaxConn int `json:"max_connections"`
MaxSubs int `json:"max_subscriptions,omitempty"`
Nkeys []*NkeyUser `json:"-"`
Users []*User `json:"-"`
Accounts []*Account `json:"-"`
SystemAccount string `json:"-"`
AllowNewAccounts bool `json:"-"`
Username string `json:"-"`
Password string `json:"-"`
Authorization string `json:"-"`
PingInterval time.Duration `json:"ping_interval"`
MaxPingsOut int `json:"ping_max"`
HTTPHost string `json:"http_host"`
HTTPPort int `json:"http_port"`
HTTPSPort int `json:"https_port"`
AuthTimeout float64 `json:"auth_timeout"`
MaxControlLine int32 `json:"max_control_line"`
MaxPayload int32 `json:"max_payload"`
MaxPending int64 `json:"max_pending"`
Cluster ClusterOpts `json:"cluster,omitempty"`
Gateway GatewayOpts `json:"gateway,omitempty"`
ProfPort int `json:"-"`
PidFile string `json:"-"`
PortsFileDir string `json:"-"`
LogFile string `json:"-"`
Syslog bool `json:"-"`
RemoteSyslog string `json:"-"`
Routes []*url.URL `json:"-"`
RoutesStr string `json:"-"`
TLSTimeout float64 `json:"tls_timeout"`
TLS bool `json:"-"`
TLSVerify bool `json:"-"`
TLSMap bool `json:"-"`
TLSCert string `json:"-"`
TLSKey string `json:"-"`
TLSCaCert string `json:"-"`
TLSConfig *tls.Config `json:"-"`
WriteDeadline time.Duration `json:"-"`
RQSubsSweep time.Duration `json:"-"` // Deprecated
MaxClosedClients int `json:"-"`
LameDuckDuration time.Duration `json:"-"`
// Operating a trusted NATS server
TrustedKeys []string `json:"-"`
TrustedOperators []*jwt.OperatorClaims `json:"-"`
AccountResolver AccountResolver `json:"-"`
resolverPreloads map[string]string
CustomClientAuthentication Authentication `json:"-"`
CustomRouterAuthentication Authentication `json:"-"`
// CheckConfig configuration file syntax test was successful and exit.
CheckConfig bool `json:"-"`
// private fields, used to know if bool options are explicitly
// defined in config and/or command line params.
inConfig map[string]bool
inCmdLine map[string]bool
// private fields, used for testing
gatewaysSolicitDelay time.Duration
}
type netResolver interface {
LookupHost(ctx context.Context, host string) ([]string, error)
}
// Clone performs a deep copy of the Options struct, returning a new clone
// with all values copied.
func (o *Options) Clone() *Options {
if o == nil {
return nil
}
clone := &Options{}
*clone = *o
if o.Users != nil {
clone.Users = make([]*User, len(o.Users))
for i, user := range o.Users {
clone.Users[i] = user.clone()
}
}
if o.Nkeys != nil {
clone.Nkeys = make([]*NkeyUser, len(o.Nkeys))
for i, nkey := range o.Nkeys {
clone.Nkeys[i] = nkey.clone()
}
}
if o.Routes != nil {
clone.Routes = deepCopyURLs(o.Routes)
}
if o.TLSConfig != nil {
clone.TLSConfig = o.TLSConfig.Clone()
}
if o.Cluster.TLSConfig != nil {
clone.Cluster.TLSConfig = o.Cluster.TLSConfig.Clone()
}
if o.Gateway.TLSConfig != nil {
clone.Gateway.TLSConfig = o.Gateway.TLSConfig.Clone()
}
if len(o.Gateway.Gateways) > 0 {
clone.Gateway.Gateways = make([]*RemoteGatewayOpts, len(o.Gateway.Gateways))
for i, g := range o.Gateway.Gateways {
clone.Gateway.Gateways[i] = g.clone()
}
}
return clone
}
func deepCopyURLs(urls []*url.URL) []*url.URL {
if urls == nil {
return nil
}
curls := make([]*url.URL, len(urls))
for i, u := range urls {
cu := &url.URL{}
*cu = *u
curls[i] = cu
}
return curls
}
// Configuration file authorization section.
type authorization struct {
// Singles
user string
pass string
token string
// Multiple Nkeys/Users
nkeys []*NkeyUser
users []*User
timeout float64
defaultPermissions *Permissions
}
// TLSConfigOpts holds the parsed tls config information,
// used with flag parsing
type TLSConfigOpts struct {
CertFile string
KeyFile string
CaFile string
Verify bool
Map bool
Timeout float64
Ciphers []uint16
CurvePreferences []tls.CurveID
}
var tlsUsage = `
TLS configuration is specified in the tls section of a configuration file:
e.g.
tls {
cert_file: "./certs/server-cert.pem"
key_file: "./certs/server-key.pem"
ca_file: "./certs/ca.pem"
verify: true
verify_and_map: true
cipher_suites: [
"TLS_ECDHE_ECDSA_WITH_AES_128_GCM_SHA256",
"TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256"
]
curve_preferences: [
"CurveP256",
"CurveP384",
"CurveP521"
]
}
Available cipher suites include:
`
// ProcessConfigFile processes a configuration file.
// FIXME(dlc): A bit hacky
func ProcessConfigFile(configFile string) (*Options, error) {
opts := &Options{}
if err := opts.ProcessConfigFile(configFile); err != nil {
// If only warnings then continue and return the options.
if cerr, ok := err.(*processConfigErr); ok && len(cerr.Errors()) == 0 {
return opts, nil
}
return nil, err
}
return opts, nil
}
// token is an item parsed from the configuration.
type token interface {
Value() interface{}
Line() int
IsUsedVariable() bool
SourceFile() string
Position() int
}
// unwrapValue can be used to get the token and value from an item
// to be able to report the line number in case of an incorrect
// configuration.
func unwrapValue(v interface{}) (token, interface{}) {
switch tk := v.(type) {
case token:
return tk, tk.Value()
default:
return nil, v
}
}
// ProcessConfigFile updates the Options structure with options
// present in the given configuration file.
// This version is convenient if one wants to set some default
// options and then override them with what is in the config file.
// For instance, this version allows you to do something such as:
//
// opts := &Options{Debug: true}
// opts.ProcessConfigFile(myConfigFile)
//
// If the config file contains "debug: false", after this call,
// opts.Debug would really be false. It would be impossible to
// achieve that with the non receiver ProcessConfigFile() version,
// since one would not know after the call if "debug" was not present
// or was present but set to false.
func (o *Options) ProcessConfigFile(configFile string) error {
o.ConfigFile = configFile
if configFile == "" {
return nil
}
m, err := conf.ParseFileWithChecks(configFile)
if err != nil {
return err
}
// Collect all errors and warnings and report them all together.
errors := make([]error, 0)
warnings := make([]error, 0)
for k, v := range m {
tk, v := unwrapValue(v)
switch strings.ToLower(k) {
case "listen":
hp, err := parseListen(v)
if err != nil {
errors = append(errors, &configErr{tk, err.Error()})
continue
}
o.Host = hp.host
o.Port = hp.port
case "client_advertise":
o.ClientAdvertise = v.(string)
case "port":
o.Port = int(v.(int64))
case "host", "net":
o.Host = v.(string)
case "debug":
o.Debug = v.(bool)
trackExplicitVal(o, &o.inConfig, "Debug", o.Debug)
case "trace":
o.Trace = v.(bool)
trackExplicitVal(o, &o.inConfig, "Trace", o.Trace)
case "logtime":
o.Logtime = v.(bool)
trackExplicitVal(o, &o.inConfig, "Logtime", o.Logtime)
case "accounts":
err := parseAccounts(tk, o, &errors, &warnings)
if err != nil {
errors = append(errors, err)
continue
}
case "authorization":
auth, err := parseAuthorization(tk, o, &errors, &warnings)
if err != nil {
errors = append(errors, err)
continue
}
o.Username = auth.user
o.Password = auth.pass
o.Authorization = auth.token
if (auth.user != "" || auth.pass != "") && auth.token != "" {
err := &configErr{tk, fmt.Sprintf("Cannot have a user/pass and token")}
errors = append(errors, err)
continue
}
o.AuthTimeout = auth.timeout
// Check for multiple users defined
if auth.users != nil {
if auth.user != "" {
err := &configErr{tk, fmt.Sprintf("Can not have a single user/pass and a users array")}
errors = append(errors, err)
continue
}
if auth.token != "" {
err := &configErr{tk, fmt.Sprintf("Can not have a token and a users array")}
errors = append(errors, err)
continue
}
// Users may have been added from Accounts parsing, so do an append here
o.Users = append(o.Users, auth.users...)
}
// Check for nkeys
if auth.nkeys != nil {
// NKeys may have been added from Accounts parsing, so do an append here
o.Nkeys = append(o.Nkeys, auth.nkeys...)
}
case "http":
hp, err := parseListen(v)
if err != nil {
err := &configErr{tk, err.Error()}
errors = append(errors, err)
continue
}
o.HTTPHost = hp.host
o.HTTPPort = hp.port
case "https":
hp, err := parseListen(v)
if err != nil {
err := &configErr{tk, err.Error()}
errors = append(errors, err)
continue
}
o.HTTPHost = hp.host
o.HTTPSPort = hp.port
case "http_port", "monitor_port":
o.HTTPPort = int(v.(int64))
case "https_port":
o.HTTPSPort = int(v.(int64))
case "cluster":
err := parseCluster(tk, o, &errors, &warnings)
if err != nil {
errors = append(errors, err)
continue
}
case "gateway":
if err := parseGateway(tk, o, &errors, &warnings); err != nil {
errors = append(errors, err)
continue
}
case "logfile", "log_file":
o.LogFile = v.(string)
case "syslog":
o.Syslog = v.(bool)
trackExplicitVal(o, &o.inConfig, "Syslog", o.Syslog)
case "remote_syslog":
o.RemoteSyslog = v.(string)
case "pidfile", "pid_file":
o.PidFile = v.(string)
case "ports_file_dir":
o.PortsFileDir = v.(string)
case "prof_port":
o.ProfPort = int(v.(int64))
case "max_control_line":
if v.(int64) > 1<<31-1 {
err := &configErr{tk, fmt.Sprintf("%s value is too big", k)}
errors = append(errors, err)
continue
}
o.MaxControlLine = int32(v.(int64))
case "max_payload":
if v.(int64) > 1<<31-1 {
err := &configErr{tk, fmt.Sprintf("%s value is too big", k)}
errors = append(errors, err)
continue
}
o.MaxPayload = int32(v.(int64))
case "max_pending":
o.MaxPending = v.(int64)
case "max_connections", "max_conn":
o.MaxConn = int(v.(int64))
case "max_subscriptions", "max_subs":
o.MaxSubs = int(v.(int64))
case "ping_interval":
o.PingInterval = time.Duration(int(v.(int64))) * time.Second
case "ping_max":
o.MaxPingsOut = int(v.(int64))
case "tls":
tc, err := parseTLS(tk)
if err != nil {
errors = append(errors, err)
continue
}
if o.TLSConfig, err = GenTLSConfig(tc); err != nil {
err := &configErr{tk, err.Error()}
errors = append(errors, err)
continue
}
o.TLSTimeout = tc.Timeout
o.TLSMap = tc.Map
case "write_deadline":
wd, ok := v.(string)
if ok {
dur, err := time.ParseDuration(wd)
if err != nil {
err := &configErr{tk, fmt.Sprintf("error parsing write_deadline: %v", err)}
errors = append(errors, err)
continue
}
o.WriteDeadline = dur
} else {
// Backward compatible with old type, assume this is the
// number of seconds.
o.WriteDeadline = time.Duration(v.(int64)) * time.Second
err := &configWarningErr{
field: k,
configErr: configErr{
token: tk,
reason: "write_deadline should be converted to a duration",
},
}
warnings = append(warnings, err)
}
case "lame_duck_duration":
dur, err := time.ParseDuration(v.(string))
if err != nil {
err := &configErr{tk, fmt.Sprintf("error parsing lame_duck_duration: %v", err)}
errors = append(errors, err)
continue
}
if dur < 30*time.Second {
err := &configErr{tk, fmt.Sprintf("invalid lame_duck_duration of %v, minimum is 30 seconds", dur)}
errors = append(errors, err)
continue
}
o.LameDuckDuration = dur
case "operator", "operators", "roots", "root", "root_operators", "root_operator":
opFiles := []string{}
switch v := v.(type) {
case string:
opFiles = append(opFiles, v)
case []string:
opFiles = append(opFiles, v...)
default:
err := &configErr{tk, fmt.Sprintf("error parsing operators: unsupported type %T", v)}
errors = append(errors, err)
}
// Assume for now these are file names.
// TODO(dlc) - If we try to read the file and it fails we could treat the string
// as the JWT itself.
o.TrustedOperators = make([]*jwt.OperatorClaims, 0, len(opFiles))
for _, fname := range opFiles {
opc, err := readOperatorJWT(fname)
if err != nil {
err := &configErr{tk, fmt.Sprintf("error parsing operator JWT: %v", err)}
errors = append(errors, err)
continue
}
o.TrustedOperators = append(o.TrustedOperators, opc)
}
case "resolver", "account_resolver", "accounts_resolver":
var memResolverRe = regexp.MustCompile(`(MEM|MEMORY|mem|memory)\s*`)
var resolverRe = regexp.MustCompile(`(?:URL|url){1}(?:\({1}\s*"?([^\s"]*)"?\s*\){1})?\s*`)
str, ok := v.(string)
if !ok {
err := &configErr{tk, fmt.Sprintf("error parsing operator resolver, wrong type %T", v)}
errors = append(errors, err)
continue
}
if memResolverRe.MatchString(str) {
o.AccountResolver = &MemAccResolver{}
} else {
items := resolverRe.FindStringSubmatch(str)
if len(items) == 2 {
url := items[1]
_, err := parseURL(url, "account resolver")
if err != nil {
errors = append(errors, &configErr{tk, err.Error()})
continue
}
if ur, err := NewURLAccResolver(url); err != nil {
err := &configErr{tk, err.Error()}
errors = append(errors, err)
continue
} else {
o.AccountResolver = ur
}
}
}
if o.AccountResolver == nil {
err := &configErr{tk, fmt.Sprintf("error parsing account resolver, should be MEM or URL(\"url\")")}
errors = append(errors, err)
}
case "resolver_preload":
mp, ok := v.(map[string]interface{})
if !ok {
err := &configErr{tk, fmt.Sprintf("preload should be a map of key:jwt")}
errors = append(errors, err)
continue
}
o.resolverPreloads = make(map[string]string)
for key, val := range mp {
tk, val = unwrapValue(val)
if jwt, ok := val.(string); !ok {
err := &configErr{tk, fmt.Sprintf("preload map value should be a string jwt")}
errors = append(errors, err)
continue
} else {
o.resolverPreloads[key] = jwt
}
}
case "system_account", "system":
if sa, ok := v.(string); !ok {
err := &configErr{tk, fmt.Sprintf("system account name must be a string")}
errors = append(errors, err)
} else {
o.SystemAccount = sa
}
case "trusted", "trusted_keys":
switch v := v.(type) {
case string:
o.TrustedKeys = []string{v}
case []string:
o.TrustedKeys = v
case []interface{}:
keys := make([]string, 0, len(v))
for _, mv := range v {
tk, mv = unwrapValue(mv)
if key, ok := mv.(string); ok {
keys = append(keys, key)
} else {
err := &configErr{tk, fmt.Sprintf("error parsing trusted: unsupported type in array %T", mv)}
errors = append(errors, err)
continue
}
}
o.TrustedKeys = keys
default:
err := &configErr{tk, fmt.Sprintf("error parsing trusted: unsupported type %T", v)}
errors = append(errors, err)
}
// Do a quick sanity check on keys
for _, key := range o.TrustedKeys {
if !nkeys.IsValidPublicOperatorKey(key) {
err := &configErr{tk, fmt.Sprintf("trust key %q required to be a valid public operator nkey", key)}
errors = append(errors, err)
}
}
default:
if !tk.IsUsedVariable() {
err := &unknownConfigFieldErr{
field: k,
configErr: configErr{
token: tk,
},
}
errors = append(errors, err)
}
}
}
if len(errors) > 0 || len(warnings) > 0 {
return &processConfigErr{
errors: errors,
warnings: warnings,
}
}
return nil
}
func trackExplicitVal(opts *Options, pm *map[string]bool, name string, val bool) {
m := *pm
if m == nil {
m = make(map[string]bool)
*pm = m
}
m[name] = val
}
// hostPort is simple struct to hold parsed listen/addr strings.
type hostPort struct {
host string
port int
}
// parseListen will parse listen option which is replacing host/net and port
func parseListen(v interface{}) (*hostPort, error) {
hp := &hostPort{}
switch vv := v.(type) {
// Only a port
case int64:
hp.port = int(vv)
case string:
host, port, err := net.SplitHostPort(vv)
if err != nil {
return nil, fmt.Errorf("could not parse address string %q", vv)
}
hp.port, err = strconv.Atoi(port)
if err != nil {
return nil, fmt.Errorf("could not parse port %q", port)
}
hp.host = host
}
return hp, nil
}
// parseCluster will parse the cluster config.
func parseCluster(v interface{}, opts *Options, errors *[]error, warnings *[]error) error {
tk, v := unwrapValue(v)
cm, ok := v.(map[string]interface{})
if !ok {
return &configErr{tk, fmt.Sprintf("Expected map to define cluster, got %T", v)}
}
for mk, mv := range cm {
// Again, unwrap token value if line check is required.
tk, mv = unwrapValue(mv)
switch strings.ToLower(mk) {
case "listen":
hp, err := parseListen(mv)
if err != nil {
err := &configErr{tk, err.Error()}
*errors = append(*errors, err)
continue
}
opts.Cluster.Host = hp.host
opts.Cluster.Port = hp.port
case "port":
opts.Cluster.Port = int(mv.(int64))
case "host", "net":
opts.Cluster.Host = mv.(string)
case "authorization":
auth, err := parseAuthorization(tk, opts, errors, warnings)
if err != nil {
*errors = append(*errors, err)
continue
}
if auth.users != nil {
err := &configErr{tk, fmt.Sprintf("Cluster authorization does not allow multiple users")}
*errors = append(*errors, err)
continue
}
opts.Cluster.Username = auth.user
opts.Cluster.Password = auth.pass
opts.Cluster.AuthTimeout = auth.timeout
if auth.defaultPermissions != nil {
err := &configWarningErr{
field: mk,
configErr: configErr{
token: tk,
reason: `setting "permissions" within cluster authorization block is deprecated`,
},
}
*warnings = append(*warnings, err)
// Do not set permissions if they were specified in top-level cluster block.
if opts.Cluster.Permissions == nil {
setClusterPermissions(&opts.Cluster, auth.defaultPermissions)
}
}
case "routes":
ra := mv.([]interface{})
routes, errs := parseURLs(ra, "route")
if errs != nil {
*errors = append(*errors, errs...)
continue
}
opts.Routes = routes
case "tls":
config, tlsopts, err := getTLSConfig(tk)
if err != nil {
*errors = append(*errors, err)
continue
}
opts.Cluster.TLSConfig = config
opts.Cluster.TLSTimeout = tlsopts.Timeout
opts.Cluster.TLSMap = tlsopts.Map
case "cluster_advertise", "advertise":
opts.Cluster.Advertise = mv.(string)
case "no_advertise":
opts.Cluster.NoAdvertise = mv.(bool)
trackExplicitVal(opts, &opts.inConfig, "Cluster.NoAdvertise", opts.Cluster.NoAdvertise)
case "connect_retries":
opts.Cluster.ConnectRetries = int(mv.(int64))
case "permissions":
perms, err := parseUserPermissions(mv, errors, warnings)
if err != nil {
*errors = append(*errors, err)
continue
}
// This will possibly override permissions that were define in auth block
setClusterPermissions(&opts.Cluster, perms)
default:
if !tk.IsUsedVariable() {
err := &unknownConfigFieldErr{
field: mk,
configErr: configErr{
token: tk,
},
}
*errors = append(*errors, err)
continue
}
}
}
return nil
}
func parseURLs(a []interface{}, typ string) ([]*url.URL, []error) {
var (
errors []error
urls = make([]*url.URL, 0, len(a))
)
for _, u := range a {
tk, u := unwrapValue(u)
sURL := u.(string)
url, err := parseURL(sURL, typ)
if err != nil {
err := &configErr{tk, err.Error()}
errors = append(errors, err)
continue
}
urls = append(urls, url)
}
return urls, errors
}
func parseURL(u string, typ string) (*url.URL, error) {
urlStr := strings.TrimSpace(u)
url, err := url.Parse(urlStr)
if err != nil {
return nil, fmt.Errorf("error parsing %s url [%q]", typ, urlStr)
}
return url, nil
}
func parseGateway(v interface{}, o *Options, errors *[]error, warnings *[]error) error {
tk, v := unwrapValue(v)
gm, ok := v.(map[string]interface{})
if !ok {
return &configErr{tk, fmt.Sprintf("Expected gateway to be a map, got %T", v)}
}
for mk, mv := range gm {
// Again, unwrap token value if line check is required.
tk, mv = unwrapValue(mv)
switch strings.ToLower(mk) {
case "name":
o.Gateway.Name = mv.(string)
case "listen":
hp, err := parseListen(mv)
if err != nil {
err := &configErr{tk, err.Error()}
*errors = append(*errors, err)
continue
}
o.Gateway.Host = hp.host
o.Gateway.Port = hp.port
case "port":
o.Gateway.Port = int(mv.(int64))
case "host", "net":
o.Gateway.Host = mv.(string)
case "authorization":
auth, err := parseAuthorization(tk, o, errors, warnings)
if err != nil {
*errors = append(*errors, err)
continue
}
if auth.users != nil {
*errors = append(*errors, &configErr{tk, "Gateway authorization does not allow multiple users"})
continue
}
o.Gateway.Username = auth.user
o.Gateway.Password = auth.pass
o.Gateway.AuthTimeout = auth.timeout
case "tls":
config, tlsopts, err := getTLSConfig(tk)
if err != nil {
*errors = append(*errors, err)
continue
}
o.Gateway.TLSConfig = config
o.Gateway.TLSTimeout = tlsopts.Timeout
o.Gateway.TLSMap = tlsopts.Map
case "advertise":
o.Gateway.Advertise = mv.(string)
case "connect_retries":
o.Gateway.ConnectRetries = int(mv.(int64))
case "gateways":
gateways, err := parseGateways(mv, errors, warnings)
if err != nil {
return err
}
o.Gateway.Gateways = gateways
case "reject_unknown":
o.Gateway.RejectUnknown = mv.(bool)
default:
if !tk.IsUsedVariable() {
err := &unknownConfigFieldErr{
field: mk,
configErr: configErr{
token: tk,
},
}
*errors = append(*errors, err)
continue
}
}
}
return nil
}
// Parse TLS and returns a TLSConfig and TLSTimeout.
// Used by cluster and gateway parsing.
func getTLSConfig(tk token) (*tls.Config, *TLSConfigOpts, error) {
tc, err := parseTLS(tk)
if err != nil {
return nil, nil, err
}
config, err := GenTLSConfig(tc)
if err != nil {
err := &configErr{tk, err.Error()}
return nil, nil, err
}
// For clusters/gateways, we will force strict verification. We also act
// as both client and server, so will mirror the rootCA to the
// clientCA pool.
config.ClientAuth = tls.RequireAndVerifyClientCert
config.RootCAs = config.ClientCAs
return config, tc, nil
}
func parseGateways(v interface{}, errors *[]error, warnings *[]error) ([]*RemoteGatewayOpts, error) {
tk, v := unwrapValue(v)
// Make sure we have an array
ga, ok := v.([]interface{})
if !ok {
return nil, &configErr{tk, fmt.Sprintf("Expected gateways field to be an array, got %T", v)}
}
gateways := []*RemoteGatewayOpts{}
for _, g := range ga {
tk, g = unwrapValue(g)
// Check its a map/struct
gm, ok := g.(map[string]interface{})
if !ok {
*errors = append(*errors, &configErr{tk, fmt.Sprintf("Expected gateway entry to be a map/struct, got %v", g)})
continue
}
gateway := &RemoteGatewayOpts{}
for k, v := range gm {
tk, v = unwrapValue(v)
switch strings.ToLower(k) {
case "name":
gateway.Name = v.(string)
case "tls":
tls, tlsopts, err := getTLSConfig(tk)
if err != nil {
*errors = append(*errors, err)
continue
}
gateway.TLSConfig = tls
gateway.TLSTimeout = tlsopts.Timeout
case "url":
url, err := parseURL(v.(string), "gateway")
if err != nil {
*errors = append(*errors, &configErr{tk, err.Error()})
continue
}
gateway.URLs = append(gateway.URLs, url)
case "urls":
urls, errs := parseURLs(v.([]interface{}), "gateway")
if errs != nil {
*errors = append(*errors, errs...)
continue
}
gateway.URLs = urls
default:
if !tk.IsUsedVariable() {
err := &unknownConfigFieldErr{
field: k,
configErr: configErr{
token: tk,
},
}
*errors = append(*errors, err)
continue
}
}
}
gateways = append(gateways, gateway)
}
return gateways, nil
}
// Sets cluster's permissions based on given pub/sub permissions,
// doing the appropriate translation.
func setClusterPermissions(opts *ClusterOpts, perms *Permissions) {
// Import is whether or not we will send a SUB for interest to the other side.
// Export is whether or not we will accept a SUB from the remote for a given subject.
// Both only effect interest registration.
// The parsing sets Import into Publish and Export into Subscribe, convert
// accordingly.
opts.Permissions = &RoutePermissions{
Import: perms.Publish,
Export: perms.Subscribe,
}
}
// Temp structures to hold account import and export defintions since they need
// to be processed after being parsed.
type export struct {
acc *Account
sub string
accs []string
}
type importStream struct {
acc *Account
an string
sub string
pre string
}
type importService struct {
acc *Account
an string
sub string
to string
}
// Checks if an account name is reserved.
func isReservedAccount(name string) bool {
return name == globalAccountName
}
// parseAccounts will parse the different accounts syntax.
func parseAccounts(v interface{}, opts *Options, errors *[]error, warnings *[]error) error {
var (
importStreams []*importStream
importServices []*importService
exportStreams []*export
exportServices []*export
)
tk, v := unwrapValue(v)
switch vv := v.(type) {
// Simple array of account names.
case []interface{}, []string:
m := make(map[string]struct{}, len(v.([]interface{})))
for _, n := range v.([]interface{}) {
tk, name := unwrapValue(n)
ns := name.(string)
// Check for reserved names.
if isReservedAccount(ns) {
err := &configErr{tk, fmt.Sprintf("%q is a Reserved Account", ns)}
*errors = append(*errors, err)
continue
}
if _, ok := m[ns]; ok {
err := &configErr{tk, fmt.Sprintf("Duplicate Account Entry: %s", ns)}
*errors = append(*errors, err)
continue
}
opts.Accounts = append(opts.Accounts, NewAccount(ns))
m[ns] = struct{}{}
}
// More common map entry
case map[string]interface{}:
// Track users across accounts, must be unique across
// accounts and nkeys vs users.
uorn := make(map[string]struct{})
for aname, mv := range vv {
tk, amv := unwrapValue(mv)
// Skip referenced config vars within the account block.
if tk.IsUsedVariable() {
continue
}
// These should be maps.
mv, ok := amv.(map[string]interface{})
if !ok {
err := &configErr{tk, "Expected map entries for accounts"}
*errors = append(*errors, err)
continue
}
if isReservedAccount(aname) {
err := &configErr{tk, fmt.Sprintf("%q is a Reserved Account", aname)}
*errors = append(*errors, err)
continue
}
acc := NewAccount(aname)
opts.Accounts = append(opts.Accounts, acc)
for k, v := range mv {
tk, mv := unwrapValue(v)
switch strings.ToLower(k) {
case "nkey":
nk, ok := mv.(string)
if !ok || !nkeys.IsValidPublicAccountKey(nk) {
err := &configErr{tk, fmt.Sprintf("Not a valid public nkey for an account: %q", mv)}
*errors = append(*errors, err)
continue
}
acc.Nkey = nk
case "imports":
streams, services, err := parseAccountImports(tk, acc, errors, warnings)
if err != nil {
*errors = append(*errors, err)
continue
}
importStreams = append(importStreams, streams...)
importServices = append(importServices, services...)
case "exports":
streams, services, err := parseAccountExports(tk, acc, errors, warnings)
if err != nil {
*errors = append(*errors, err)
continue
}
exportStreams = append(exportStreams, streams...)
exportServices = append(exportServices, services...)
case "users":
nkeys, users, err := parseUsers(mv, opts, errors, warnings)
if err != nil {
*errors = append(*errors, err)
continue
}
for _, u := range users {
if _, ok := uorn[u.Username]; ok {
err := &configErr{tk, fmt.Sprintf("Duplicate user %q detected", u.Username)}
*errors = append(*errors, err)
continue
}
uorn[u.Username] = struct{}{}
u.Account = acc
}
opts.Users = append(opts.Users, users...)
for _, u := range nkeys {
if _, ok := uorn[u.Nkey]; ok {
err := &configErr{tk, fmt.Sprintf("Duplicate nkey %q detected", u.Nkey)}
*errors = append(*errors, err)
continue
}
uorn[u.Nkey] = struct{}{}
u.Account = acc
}
opts.Nkeys = append(opts.Nkeys, nkeys...)
default:
if !tk.IsUsedVariable() {
err := &unknownConfigFieldErr{
field: k,
configErr: configErr{
token: tk,
},
}
*errors = append(*errors, err)
}
}
}
}
}
// Bail already if there are previous errors.
if len(*errors) > 0 {
return nil
}
// Parse Imports and Exports here after all accounts defined.
// Do exports first since they need to be defined for imports to succeed
// since we do permissions checks.
// Create a lookup map for accounts lookups.
am := make(map[string]*Account, len(opts.Accounts))
for _, a := range opts.Accounts {
am[a.Name] = a
}
// Do stream exports
for _, stream := range exportStreams {
// Make array of accounts if applicable.
var accounts []*Account
for _, an := range stream.accs {
ta := am[an]
if ta == nil {
msg := fmt.Sprintf("%q account not defined for stream export", an)
*errors = append(*errors, &configErr{tk, msg})
continue
}
accounts = append(accounts, ta)
}
if err := stream.acc.AddStreamExport(stream.sub, accounts); err != nil {
msg := fmt.Sprintf("Error adding stream export %q: %v", stream.sub, err)
*errors = append(*errors, &configErr{tk, msg})
continue
}
}
for _, service := range exportServices {
// Make array of accounts if applicable.
var accounts []*Account
for _, an := range service.accs {
ta := am[an]
if ta == nil {
msg := fmt.Sprintf("%q account not defined for service export", an)
*errors = append(*errors, &configErr{tk, msg})
continue
}
accounts = append(accounts, ta)
}
if err := service.acc.AddServiceExport(service.sub, accounts); err != nil {
msg := fmt.Sprintf("Error adding service export %q: %v", service.sub, err)
*errors = append(*errors, &configErr{tk, msg})
continue
}
}
for _, stream := range importStreams {
ta := am[stream.an]
if ta == nil {
msg := fmt.Sprintf("%q account not defined for stream import", stream.an)
*errors = append(*errors, &configErr{tk, msg})
continue
}
if err := stream.acc.AddStreamImport(ta, stream.sub, stream.pre); err != nil {
msg := fmt.Sprintf("Error adding stream import %q: %v", stream.sub, err)
*errors = append(*errors, &configErr{tk, msg})
continue
}
}
for _, service := range importServices {
ta := am[service.an]
if ta == nil {
msg := fmt.Sprintf("%q account not defined for service import", service.an)
*errors = append(*errors, &configErr{tk, msg})
continue
}
if service.to == "" {
service.to = service.sub
}
if err := service.acc.AddServiceImport(ta, service.to, service.sub); err != nil {
msg := fmt.Sprintf("Error adding service import %q: %v", service.sub, err)
*errors = append(*errors, &configErr{tk, msg})
continue
}
}
return nil
}
// Parse the account imports
func parseAccountExports(v interface{}, acc *Account, errors, warnings *[]error) ([]*export, []*export, error) {
// This should be an array of objects/maps.
tk, v := unwrapValue(v)
ims, ok := v.([]interface{})
if !ok {
return nil, nil, &configErr{tk, fmt.Sprintf("Exports should be an array, got %T", v)}
}
var services []*export
var streams []*export
for _, v := range ims {
// Should have stream or service
stream, service, err := parseExportStreamOrService(v, errors, warnings)
if err != nil {
*errors = append(*errors, err)
continue
}
if service != nil {
service.acc = acc
services = append(services, service)
}
if stream != nil {
stream.acc = acc
streams = append(streams, stream)
}
}
return streams, services, nil
}
// Parse the account imports
func parseAccountImports(v interface{}, acc *Account, errors, warnings *[]error) ([]*importStream, []*importService, error) {
// This should be an array of objects/maps.
tk, v := unwrapValue(v)
ims, ok := v.([]interface{})
if !ok {
return nil, nil, &configErr{tk, fmt.Sprintf("Imports should be an array, got %T", v)}
}
var services []*importService
var streams []*importStream
for _, v := range ims {
// Should have stream or service
stream, service, err := parseImportStreamOrService(v, errors, warnings)
if err != nil {
*errors = append(*errors, err)
continue
}
if service != nil {
service.acc = acc
services = append(services, service)
}
if stream != nil {
stream.acc = acc
streams = append(streams, stream)
}
}
return streams, services, nil
}
// Helper to parse an embedded account description for imported services or streams.
func parseAccount(v map[string]interface{}, errors, warnings *[]error) (string, string, error) {
var accountName, subject string
for mk, mv := range v {
tk, mv := unwrapValue(mv)
switch strings.ToLower(mk) {
case "account":
accountName = mv.(string)
case "subject":
subject = mv.(string)
default:
if !tk.IsUsedVariable() {
err := &unknownConfigFieldErr{
field: mk,
configErr: configErr{
token: tk,
},
}
*errors = append(*errors, err)
}
}
}
return accountName, subject, nil
}
// Parse an import stream or service.
// e.g.
// {stream: "public.>"} # No accounts means public.
// {stream: "synadia.private.>", accounts: [cncf, natsio]}
// {service: "pub.request"} # No accounts means public.
// {service: "pub.special.request", accounts: [nats.io]}
func parseExportStreamOrService(v interface{}, errors, warnings *[]error) (*export, *export, error) {
var (
curStream *export
curService *export
accounts []string
)
tk, v := unwrapValue(v)
vv, ok := v.(map[string]interface{})
if !ok {
return nil, nil, &configErr{tk, fmt.Sprintf("Export Items should be a map with type entry, got %T", v)}
}
for mk, mv := range vv {
tk, mv := unwrapValue(mv)
switch strings.ToLower(mk) {
case "stream":
if curService != nil {
err := &configErr{tk, fmt.Sprintf("Detected stream %q but already saw a service", mv)}
*errors = append(*errors, err)
continue
}
mvs, ok := mv.(string)
if !ok {
err := &configErr{tk, fmt.Sprintf("Expected stream name to be string, got %T", mv)}
*errors = append(*errors, err)
continue
}
curStream = &export{sub: mvs}
if accounts != nil {
curStream.accs = accounts
}
case "service":
if curStream != nil {
err := &configErr{tk, fmt.Sprintf("Detected service %q but already saw a stream", mv)}
*errors = append(*errors, err)
continue
}
mvs, ok := mv.(string)
if !ok {
err := &configErr{tk, fmt.Sprintf("Expected service name to be string, got %T", mv)}
*errors = append(*errors, err)
continue
}
curService = &export{sub: mvs}
if accounts != nil {
curService.accs = accounts
}
case "accounts":
for _, iv := range mv.([]interface{}) {
_, mv := unwrapValue(iv)
accounts = append(accounts, mv.(string))
}
if curStream != nil {
curStream.accs = accounts
} else if curService != nil {
curService.accs = accounts
}
default:
if !tk.IsUsedVariable() {
err := &unknownConfigFieldErr{
field: mk,
configErr: configErr{
token: tk,
},
}
*errors = append(*errors, err)
}
}
}
return curStream, curService, nil
}
// Parse an import stream or service.
// e.g.
// {stream: {account: "synadia", subject:"public.synadia"}, prefix: "imports.synadia"}
// {stream: {account: "synadia", subject:"synadia.private.*"}}
// {service: {account: "synadia", subject: "pub.special.request"}, to: "synadia.request"}
func parseImportStreamOrService(v interface{}, errors, warnings *[]error) (*importStream, *importService, error) {
var (
curStream *importStream
curService *importService
pre, to string
)
tk, mv := unwrapValue(v)
vv, ok := mv.(map[string]interface{})
if !ok {
return nil, nil, &configErr{tk, fmt.Sprintf("Import Items should be a map with type entry, got %T", mv)}
}
for mk, mv := range vv {
tk, mv := unwrapValue(mv)
switch strings.ToLower(mk) {
case "stream":
if curService != nil {
err := &configErr{tk, fmt.Sprintf("Detected stream but already saw a service")}
*errors = append(*errors, err)
continue
}
ac, ok := mv.(map[string]interface{})
if !ok {
err := &configErr{tk, fmt.Sprintf("Stream entry should be an account map, got %T", mv)}
*errors = append(*errors, err)
continue
}
// Make sure this is a map with account and subject
accountName, subject, err := parseAccount(ac, errors, warnings)
if err != nil {
*errors = append(*errors, err)
continue
}
if accountName == "" || subject == "" {
err := &configErr{tk, fmt.Sprintf("Expect an account name and a subject")}
*errors = append(*errors, err)
continue
}
curStream = &importStream{an: accountName, sub: subject}
if pre != "" {
curStream.pre = pre
}
case "service":
if curStream != nil {
err := &configErr{tk, fmt.Sprintf("Detected service but already saw a stream")}
*errors = append(*errors, err)
continue
}
ac, ok := mv.(map[string]interface{})
if !ok {
err := &configErr{tk, fmt.Sprintf("Service entry should be an account map, got %T", mv)}
*errors = append(*errors, err)
continue
}
// Make sure this is a map with account and subject
accountName, subject, err := parseAccount(ac, errors, warnings)
if err != nil {
*errors = append(*errors, err)
continue
}
if accountName == "" || subject == "" {
err := &configErr{tk, fmt.Sprintf("Expect an account name and a subject")}
*errors = append(*errors, err)
continue
}
curService = &importService{an: accountName, sub: subject}
if to != "" {
curService.to = to
}
case "prefix":
pre = mv.(string)
if curStream != nil {
curStream.pre = pre
}
case "to":
to = mv.(string)
if curService != nil {
curService.to = to
}
default:
if !tk.IsUsedVariable() {
err := &unknownConfigFieldErr{
field: mk,
configErr: configErr{
token: tk,
},
}
*errors = append(*errors, err)
}
}
}
return curStream, curService, nil
}
// Helper function to parse Authorization configs.
func parseAuthorization(v interface{}, opts *Options, errors *[]error, warnings *[]error) (*authorization, error) {
var (
am map[string]interface{}
tk token
auth = &authorization{}
)
_, v = unwrapValue(v)
am = v.(map[string]interface{})
for mk, mv := range am {
tk, mv = unwrapValue(mv)
switch strings.ToLower(mk) {
case "user", "username":
auth.user = mv.(string)
case "pass", "password":
auth.pass = mv.(string)
case "token":
auth.token = mv.(string)
case "timeout":
at := float64(1)
switch mv := mv.(type) {
case int64:
at = float64(mv)
case float64:
at = mv
}
auth.timeout = at
case "users":
nkeys, users, err := parseUsers(tk, opts, errors, warnings)
if err != nil {
*errors = append(*errors, err)
continue
}
auth.users = users
auth.nkeys = nkeys
case "default_permission", "default_permissions", "permissions":
permissions, err := parseUserPermissions(tk, errors, warnings)
if err != nil {
*errors = append(*errors, err)
continue
}
auth.defaultPermissions = permissions
default:
if !tk.IsUsedVariable() {
err := &unknownConfigFieldErr{
field: mk,
configErr: configErr{
token: tk,
},
}
*errors = append(*errors, err)
}
continue
}
// Now check for permission defaults with multiple users, etc.
if auth.users != nil && auth.defaultPermissions != nil {
for _, user := range auth.users {
if user.Permissions == nil {
user.Permissions = auth.defaultPermissions
}
}
}
}
return auth, nil
}
// Helper function to parse multiple users array with optional permissions.
func parseUsers(mv interface{}, opts *Options, errors *[]error, warnings *[]error) ([]*NkeyUser, []*User, error) {
var (
tk token
keys []*NkeyUser
users = []*User{}
)
tk, mv = unwrapValue(mv)
// Make sure we have an array
uv, ok := mv.([]interface{})
if !ok {
return nil, nil, &configErr{tk, fmt.Sprintf("Expected users field to be an array, got %v", mv)}
}
for _, u := range uv {
tk, u = unwrapValue(u)
// Check its a map/struct
um, ok := u.(map[string]interface{})
if !ok {
err := &configErr{tk, fmt.Sprintf("Expected user entry to be a map/struct, got %v", u)}
*errors = append(*errors, err)
continue
}
var (
user = &User{}
nkey = &NkeyUser{}
perms *Permissions
err error
)
for k, v := range um {
// Also needs to unwrap first
tk, v = unwrapValue(v)
switch strings.ToLower(k) {
case "nkey":
nkey.Nkey = v.(string)
case "user", "username":
user.Username = v.(string)
case "pass", "password":
user.Password = v.(string)
case "permission", "permissions", "authorization":
perms, err = parseUserPermissions(tk, errors, warnings)
if err != nil {
*errors = append(*errors, err)
continue
}
default:
if !tk.IsUsedVariable() {
err := &unknownConfigFieldErr{
field: k,
configErr: configErr{
token: tk,
},
}
*errors = append(*errors, err)
continue
}
}
}
// Place perms if we have them.
if perms != nil {
// nkey takes precedent.
if nkey.Nkey != "" {
nkey.Permissions = perms
} else {
user.Permissions = perms
}
}
// Check to make sure we have at least an nkey or username <password> defined.
if nkey.Nkey == "" && user.Username == "" {
return nil, nil, &configErr{tk, fmt.Sprintf("User entry requires a user")}
} else if nkey.Nkey != "" {
// Make sure the nkey a proper public nkey for a user..
if !nkeys.IsValidPublicUserKey(nkey.Nkey) {
return nil, nil, &configErr{tk, fmt.Sprintf("Not a valid public nkey for a user")}
}
// If we have user or password defined here that is an error.
if user.Username != "" || user.Password != "" {
return nil, nil, &configErr{tk, fmt.Sprintf("Nkey users do not take usernames or passwords")}
}
keys = append(keys, nkey)
} else {
users = append(users, user)
}
}
return keys, users, nil
}
// Helper function to parse user/account permissions
func parseUserPermissions(mv interface{}, errors, warnings *[]error) (*Permissions, error) {
var (
tk token
p = &Permissions{}
)
tk, mv = unwrapValue(mv)
pm, ok := mv.(map[string]interface{})
if !ok {
return nil, &configErr{tk, fmt.Sprintf("Expected permissions to be a map/struct, got %+v", mv)}
}
for k, v := range pm {
tk, v = unwrapValue(v)
switch strings.ToLower(k) {
// For routes:
// Import is Publish
// Export is Subscribe
case "pub", "publish", "import":
perms, err := parseVariablePermissions(v, errors, warnings)
if err != nil {
*errors = append(*errors, err)
continue
}
p.Publish = perms
case "sub", "subscribe", "export":
perms, err := parseVariablePermissions(v, errors, warnings)
if err != nil {
*errors = append(*errors, err)
continue
}
p.Subscribe = perms
default:
if !tk.IsUsedVariable() {
err := &configErr{tk, fmt.Sprintf("Unknown field %q parsing permissions", k)}
*errors = append(*errors, err)
}
}
}
return p, nil
}
// Top level parser for authorization configurations.
func parseVariablePermissions(v interface{}, errors, warnings *[]error) (*SubjectPermission, error) {
switch vv := v.(type) {
case map[string]interface{}:
// New style with allow and/or deny properties.
return parseSubjectPermission(vv, errors, warnings)
default:
// Old style
return parseOldPermissionStyle(v, errors, warnings)
}
}
// Helper function to parse subject singeltons and/or arrays
func parseSubjects(v interface{}, errors, warnings *[]error) ([]string, error) {
tk, v := unwrapValue(v)
var subjects []string
switch vv := v.(type) {
case string:
subjects = append(subjects, vv)
case []string:
subjects = vv
case []interface{}:
for _, i := range vv {
tk, i := unwrapValue(i)
subject, ok := i.(string)
if !ok {
return nil, &configErr{tk, fmt.Sprintf("Subject in permissions array cannot be cast to string")}
}
subjects = append(subjects, subject)
}
default:
return nil, &configErr{tk, fmt.Sprintf("Expected subject permissions to be a subject, or array of subjects, got %T", v)}
}
if err := checkSubjectArray(subjects); err != nil {
return nil, &configErr{tk, err.Error()}
}
return subjects, nil
}
// Helper function to parse old style authorization configs.
func parseOldPermissionStyle(v interface{}, errors, warnings *[]error) (*SubjectPermission, error) {
subjects, err := parseSubjects(v, errors, warnings)
if err != nil {
return nil, err
}
return &SubjectPermission{Allow: subjects}, nil
}
// Helper function to parse new style authorization into a SubjectPermission with Allow and Deny.
func parseSubjectPermission(v interface{}, errors, warnings *[]error) (*SubjectPermission, error) {
m := v.(map[string]interface{})
if len(m) == 0 {
return nil, nil
}
p := &SubjectPermission{}
for k, v := range m {
tk, _ := unwrapValue(v)
switch strings.ToLower(k) {
case "allow":
subjects, err := parseSubjects(tk, errors, warnings)
if err != nil {
*errors = append(*errors, err)
continue
}
p.Allow = subjects
case "deny":
subjects, err := parseSubjects(tk, errors, warnings)
if err != nil {
*errors = append(*errors, err)
continue
}
p.Deny = subjects
default:
if !tk.IsUsedVariable() {
err := &configErr{tk, fmt.Sprintf("Unknown field name %q parsing subject permissions, only 'allow' or 'deny' are permitted", k)}
*errors = append(*errors, err)
}
}
}
return p, nil
}
// Helper function to validate subjects, etc for account permissioning.
func checkSubjectArray(sa []string) error {
for _, s := range sa {
if !IsValidSubject(s) {
return fmt.Errorf("subject %q is not a valid subject", s)
}
}
return nil
}
// PrintTLSHelpAndDie prints TLS usage and exits.
func PrintTLSHelpAndDie() {
fmt.Printf("%s", tlsUsage)
for k := range cipherMap {
fmt.Printf(" %s\n", k)
}
fmt.Printf("\nAvailable curve preferences include:\n")
for k := range curvePreferenceMap {
fmt.Printf(" %s\n", k)
}
os.Exit(0)
}
func parseCipher(cipherName string) (uint16, error) {
cipher, exists := cipherMap[cipherName]
if !exists {
return 0, fmt.Errorf("unrecognized cipher %s", cipherName)
}
return cipher, nil
}
func parseCurvePreferences(curveName string) (tls.CurveID, error) {
curve, exists := curvePreferenceMap[curveName]
if !exists {
return 0, fmt.Errorf("unrecognized curve preference %s", curveName)
}
return curve, nil
}
// Helper function to parse TLS configs.
func parseTLS(v interface{}) (*TLSConfigOpts, error) {
var (
tlsm map[string]interface{}
tc = TLSConfigOpts{}
)
_, v = unwrapValue(v)
tlsm = v.(map[string]interface{})
for mk, mv := range tlsm {
tk, mv := unwrapValue(mv)
switch strings.ToLower(mk) {
case "cert_file":
certFile, ok := mv.(string)
if !ok {
return nil, &configErr{tk, fmt.Sprintf("error parsing tls config, expected 'cert_file' to be filename")}
}
tc.CertFile = certFile
case "key_file":
keyFile, ok := mv.(string)
if !ok {
return nil, &configErr{tk, fmt.Sprintf("error parsing tls config, expected 'key_file' to be filename")}
}
tc.KeyFile = keyFile
case "ca_file":
caFile, ok := mv.(string)
if !ok {
return nil, &configErr{tk, fmt.Sprintf("error parsing tls config, expected 'ca_file' to be filename")}
}
tc.CaFile = caFile
case "verify":
verify, ok := mv.(bool)
if !ok {
return nil, &configErr{tk, fmt.Sprintf("error parsing tls config, expected 'verify' to be a boolean")}
}
tc.Verify = verify
case "verify_and_map":
verify, ok := mv.(bool)
if !ok {
return nil, &configErr{tk, fmt.Sprintf("error parsing tls config, expected 'verify_and_map' to be a boolean")}
}
tc.Verify = verify
tc.Map = verify
case "cipher_suites":
ra := mv.([]interface{})
if len(ra) == 0 {
return nil, &configErr{tk, fmt.Sprintf("error parsing tls config, 'cipher_suites' cannot be empty")}
}
tc.Ciphers = make([]uint16, 0, len(ra))
for _, r := range ra {
tk, r := unwrapValue(r)
cipher, err := parseCipher(r.(string))
if err != nil {
return nil, &configErr{tk, err.Error()}
}
tc.Ciphers = append(tc.Ciphers, cipher)
}
case "curve_preferences":
ra := mv.([]interface{})
if len(ra) == 0 {
return nil, &configErr{tk, fmt.Sprintf("error parsing tls config, 'curve_preferences' cannot be empty")}
}
tc.CurvePreferences = make([]tls.CurveID, 0, len(ra))
for _, r := range ra {
tk, r := unwrapValue(r)
cps, err := parseCurvePreferences(r.(string))
if err != nil {
return nil, &configErr{tk, err.Error()}
}
tc.CurvePreferences = append(tc.CurvePreferences, cps)
}
case "timeout":
at := float64(0)
switch mv := mv.(type) {
case int64:
at = float64(mv)
case float64:
at = mv
}
tc.Timeout = at
default:
return nil, &configErr{tk, fmt.Sprintf("error parsing tls config, unknown field [%q]", mk)}
}
}
// If cipher suites were not specified then use the defaults
if tc.Ciphers == nil {
tc.Ciphers = defaultCipherSuites()
}
// If curve preferences were not specified, then use the defaults
if tc.CurvePreferences == nil {
tc.CurvePreferences = defaultCurvePreferences()
}
return &tc, nil
}
// GenTLSConfig loads TLS related configuration parameters.
func GenTLSConfig(tc *TLSConfigOpts) (*tls.Config, error) {
// Now load in cert and private key
cert, err := tls.LoadX509KeyPair(tc.CertFile, tc.KeyFile)
if err != nil {
return nil, fmt.Errorf("error parsing X509 certificate/key pair: %v", err)
}
cert.Leaf, err = x509.ParseCertificate(cert.Certificate[0])
if err != nil {
return nil, fmt.Errorf("error parsing certificate: %v", err)
}
// Create the tls.Config from our options.
// We will determine the cipher suites that we prefer.
// FIXME(dlc) change if ARM based.
config := tls.Config{
MinVersion: tls.VersionTLS12,
CipherSuites: tc.Ciphers,
PreferServerCipherSuites: true,
CurvePreferences: tc.CurvePreferences,
Certificates: []tls.Certificate{cert},
}
// Require client certificates as needed
if tc.Verify {
config.ClientAuth = tls.RequireAndVerifyClientCert
}
// Add in CAs if applicable.
if tc.CaFile != "" {
rootPEM, err := ioutil.ReadFile(tc.CaFile)
if err != nil || rootPEM == nil {
return nil, err
}
pool := x509.NewCertPool()
ok := pool.AppendCertsFromPEM(rootPEM)
if !ok {
return nil, fmt.Errorf("failed to parse root ca certificate")
}
config.ClientCAs = pool
}
return &config, nil
}
// MergeOptions will merge two options giving preference to the flagOpts
// if the item is present.
func MergeOptions(fileOpts, flagOpts *Options) *Options {
if fileOpts == nil {
return flagOpts
}
if flagOpts == nil {
return fileOpts
}
// Merge the two, flagOpts override
opts := *fileOpts
if flagOpts.Port != 0 {
opts.Port = flagOpts.Port
}
if flagOpts.Host != "" {
opts.Host = flagOpts.Host
}
if flagOpts.ClientAdvertise != "" {
opts.ClientAdvertise = flagOpts.ClientAdvertise
}
if flagOpts.Username != "" {
opts.Username = flagOpts.Username
}
if flagOpts.Password != "" {
opts.Password = flagOpts.Password
}
if flagOpts.Authorization != "" {
opts.Authorization = flagOpts.Authorization
}
if flagOpts.HTTPPort != 0 {
opts.HTTPPort = flagOpts.HTTPPort
}
if flagOpts.Debug {
opts.Debug = true
}
if flagOpts.Trace {
opts.Trace = true
}
if flagOpts.Logtime {
opts.Logtime = true
}
if flagOpts.LogFile != "" {
opts.LogFile = flagOpts.LogFile
}
if flagOpts.PidFile != "" {
opts.PidFile = flagOpts.PidFile
}
if flagOpts.PortsFileDir != "" {
opts.PortsFileDir = flagOpts.PortsFileDir
}
if flagOpts.ProfPort != 0 {
opts.ProfPort = flagOpts.ProfPort
}
if flagOpts.Cluster.ListenStr != "" {
opts.Cluster.ListenStr = flagOpts.Cluster.ListenStr
}
if flagOpts.Cluster.NoAdvertise {
opts.Cluster.NoAdvertise = true
}
if flagOpts.Cluster.ConnectRetries != 0 {
opts.Cluster.ConnectRetries = flagOpts.Cluster.ConnectRetries
}
if flagOpts.Cluster.Advertise != "" {
opts.Cluster.Advertise = flagOpts.Cluster.Advertise
}
if flagOpts.RoutesStr != "" {
mergeRoutes(&opts, flagOpts)
}
return &opts
}
// RoutesFromStr parses route URLs from a string
func RoutesFromStr(routesStr string) []*url.URL {
routes := strings.Split(routesStr, ",")
if len(routes) == 0 {
return nil
}
routeUrls := []*url.URL{}
for _, r := range routes {
r = strings.TrimSpace(r)
u, _ := url.Parse(r)
routeUrls = append(routeUrls, u)
}
return routeUrls
}
// This will merge the flag routes and override anything that was present.
func mergeRoutes(opts, flagOpts *Options) {
routeUrls := RoutesFromStr(flagOpts.RoutesStr)
if routeUrls == nil {
return
}
opts.Routes = routeUrls
opts.RoutesStr = flagOpts.RoutesStr
}
// RemoveSelfReference removes this server from an array of routes
func RemoveSelfReference(clusterPort int, routes []*url.URL) ([]*url.URL, error) {
var cleanRoutes []*url.URL
cport := strconv.Itoa(clusterPort)
selfIPs, err := getInterfaceIPs()
if err != nil {
return nil, err
}
for _, r := range routes {
host, port, err := net.SplitHostPort(r.Host)
if err != nil {
return nil, err
}
ipList, err := getURLIP(host)
if err != nil {
return nil, err
}
if cport == port && isIPInList(selfIPs, ipList) {
continue
}
cleanRoutes = append(cleanRoutes, r)
}
return cleanRoutes, nil
}
func isIPInList(list1 []net.IP, list2 []net.IP) bool {
for _, ip1 := range list1 {
for _, ip2 := range list2 {
if ip1.Equal(ip2) {
return true
}
}
}
return false
}
func getURLIP(ipStr string) ([]net.IP, error) {
ipList := []net.IP{}
ip := net.ParseIP(ipStr)
if ip != nil {
ipList = append(ipList, ip)
return ipList, nil
}
hostAddr, err := net.LookupHost(ipStr)
if err != nil {
return nil, fmt.Errorf("Error looking up host with route hostname: %v", err)
}
for _, addr := range hostAddr {
ip = net.ParseIP(addr)
if ip != nil {
ipList = append(ipList, ip)
}
}
return ipList, nil
}
func getInterfaceIPs() ([]net.IP, error) {
var localIPs []net.IP
interfaceAddr, err := net.InterfaceAddrs()
if err != nil {
return nil, fmt.Errorf("Error getting self referencing address: %v", err)
}
for i := 0; i < len(interfaceAddr); i++ {
interfaceIP, _, _ := net.ParseCIDR(interfaceAddr[i].String())
if net.ParseIP(interfaceIP.String()) != nil {
localIPs = append(localIPs, interfaceIP)
} else {
return nil, fmt.Errorf("Error parsing self referencing address: %v", err)
}
}
return localIPs, nil
}
func setBaselineOptions(opts *Options) {
// Setup non-standard Go defaults
if opts.Host == "" {
opts.Host = DEFAULT_HOST
}
if opts.HTTPHost == "" {
// Default to same bind from server if left undefined
opts.HTTPHost = opts.Host
}
if opts.Port == 0 {
opts.Port = DEFAULT_PORT
} else if opts.Port == RANDOM_PORT {
// Choose randomly inside of net.Listen
opts.Port = 0
}
if opts.MaxConn == 0 {
opts.MaxConn = DEFAULT_MAX_CONNECTIONS
}
if opts.PingInterval == 0 {
opts.PingInterval = DEFAULT_PING_INTERVAL
}
if opts.MaxPingsOut == 0 {
opts.MaxPingsOut = DEFAULT_PING_MAX_OUT
}
if opts.TLSTimeout == 0 {
opts.TLSTimeout = float64(TLS_TIMEOUT) / float64(time.Second)
}
if opts.AuthTimeout == 0 {
opts.AuthTimeout = float64(AUTH_TIMEOUT) / float64(time.Second)
}
if opts.Cluster.Port != 0 {
if opts.Cluster.Host == "" {
opts.Cluster.Host = DEFAULT_HOST
}
if opts.Cluster.TLSTimeout == 0 {
opts.Cluster.TLSTimeout = float64(TLS_TIMEOUT) / float64(time.Second)
}
if opts.Cluster.AuthTimeout == 0 {
opts.Cluster.AuthTimeout = float64(AUTH_TIMEOUT) / float64(time.Second)
}
}
if opts.MaxControlLine == 0 {
opts.MaxControlLine = MAX_CONTROL_LINE_SIZE
}
if opts.MaxPayload == 0 {
opts.MaxPayload = MAX_PAYLOAD_SIZE
}
if opts.MaxPending == 0 {
opts.MaxPending = MAX_PENDING_SIZE
}
if opts.WriteDeadline == time.Duration(0) {
opts.WriteDeadline = DEFAULT_FLUSH_DEADLINE
}
if opts.RQSubsSweep == time.Duration(0) {
opts.RQSubsSweep = DEFAULT_REMOTE_QSUBS_SWEEPER
}
if opts.MaxClosedClients == 0 {
opts.MaxClosedClients = DEFAULT_MAX_CLOSED_CLIENTS
}
if opts.LameDuckDuration == 0 {
opts.LameDuckDuration = DEFAULT_LAME_DUCK_DURATION
}
if opts.Gateway.Port != 0 {
if opts.Gateway.Host == "" {
opts.Gateway.Host = DEFAULT_HOST
}
if opts.Gateway.TLSTimeout == 0 {
opts.Gateway.TLSTimeout = float64(TLS_TIMEOUT) / float64(time.Second)
}
if opts.Gateway.AuthTimeout == 0 {
opts.Gateway.AuthTimeout = float64(AUTH_TIMEOUT) / float64(time.Second)
}
}
}
// ConfigureOptions accepts a flag set and augment it with NATS Server
// specific flags. On success, an options structure is returned configured
// based on the selected flags and/or configuration file.
// The command line options take precedence to the ones in the configuration file.
func ConfigureOptions(fs *flag.FlagSet, args []string, printVersion, printHelp, printTLSHelp func()) (*Options, error) {
opts := &Options{}
var (
showVersion bool
showHelp bool
showTLSHelp bool
signal string
configFile string
dbgAndTrace bool
err error
)
fs.BoolVar(&showHelp, "h", false, "Show this message.")
fs.BoolVar(&showHelp, "help", false, "Show this message.")
fs.IntVar(&opts.Port, "port", 0, "Port to listen on.")
fs.IntVar(&opts.Port, "p", 0, "Port to listen on.")
fs.StringVar(&opts.Host, "addr", "", "Network host to listen on.")
fs.StringVar(&opts.Host, "a", "", "Network host to listen on.")
fs.StringVar(&opts.Host, "net", "", "Network host to listen on.")
fs.StringVar(&opts.ClientAdvertise, "client_advertise", "", "Client URL to advertise to other servers.")
fs.BoolVar(&opts.Debug, "D", false, "Enable Debug logging.")
fs.BoolVar(&opts.Debug, "debug", false, "Enable Debug logging.")
fs.BoolVar(&opts.Trace, "V", false, "Enable Trace logging.")
fs.BoolVar(&opts.Trace, "trace", false, "Enable Trace logging.")
fs.BoolVar(&dbgAndTrace, "DV", false, "Enable Debug and Trace logging.")
fs.BoolVar(&opts.Logtime, "T", true, "Timestamp log entries.")
fs.BoolVar(&opts.Logtime, "logtime", true, "Timestamp log entries.")
fs.StringVar(&opts.Username, "user", "", "Username required for connection.")
fs.StringVar(&opts.Password, "pass", "", "Password required for connection.")
fs.StringVar(&opts.Authorization, "auth", "", "Authorization token required for connection.")
fs.IntVar(&opts.HTTPPort, "m", 0, "HTTP Port for /varz, /connz endpoints.")
fs.IntVar(&opts.HTTPPort, "http_port", 0, "HTTP Port for /varz, /connz endpoints.")
fs.IntVar(&opts.HTTPSPort, "ms", 0, "HTTPS Port for /varz, /connz endpoints.")
fs.IntVar(&opts.HTTPSPort, "https_port", 0, "HTTPS Port for /varz, /connz endpoints.")
fs.StringVar(&configFile, "c", "", "Configuration file.")
fs.StringVar(&configFile, "config", "", "Configuration file.")
fs.BoolVar(&opts.CheckConfig, "t", false, "Check configuration and exit.")
fs.StringVar(&signal, "sl", "", "Send signal to gnatsd process (stop, quit, reopen, reload)")
fs.StringVar(&signal, "signal", "", "Send signal to gnatsd process (stop, quit, reopen, reload)")
fs.StringVar(&opts.PidFile, "P", "", "File to store process pid.")
fs.StringVar(&opts.PidFile, "pid", "", "File to store process pid.")
fs.StringVar(&opts.PortsFileDir, "ports_file_dir", "", "Creates a ports file in the specified directory (<executable_name>_<pid>.ports)")
fs.StringVar(&opts.LogFile, "l", "", "File to store logging output.")
fs.StringVar(&opts.LogFile, "log", "", "File to store logging output.")
fs.BoolVar(&opts.Syslog, "s", false, "Enable syslog as log method.")
fs.BoolVar(&opts.Syslog, "syslog", false, "Enable syslog as log method..")
fs.StringVar(&opts.RemoteSyslog, "r", "", "Syslog server addr (udp://127.0.0.1:514).")
fs.StringVar(&opts.RemoteSyslog, "remote_syslog", "", "Syslog server addr (udp://127.0.0.1:514).")
fs.BoolVar(&showVersion, "version", false, "Print version information.")
fs.BoolVar(&showVersion, "v", false, "Print version information.")
fs.IntVar(&opts.ProfPort, "profile", 0, "Profiling HTTP port")
fs.StringVar(&opts.RoutesStr, "routes", "", "Routes to actively solicit a connection.")
fs.StringVar(&opts.Cluster.ListenStr, "cluster", "", "Cluster url from which members can solicit routes.")
fs.StringVar(&opts.Cluster.ListenStr, "cluster_listen", "", "Cluster url from which members can solicit routes.")
fs.StringVar(&opts.Cluster.Advertise, "cluster_advertise", "", "Cluster URL to advertise to other servers.")
fs.BoolVar(&opts.Cluster.NoAdvertise, "no_advertise", false, "Advertise known cluster IPs to clients.")
fs.IntVar(&opts.Cluster.ConnectRetries, "connect_retries", 0, "For implicit routes, number of connect retries")
fs.BoolVar(&showTLSHelp, "help_tls", false, "TLS help.")
fs.BoolVar(&opts.TLS, "tls", false, "Enable TLS.")
fs.BoolVar(&opts.TLSVerify, "tlsverify", false, "Enable TLS with client verification.")
fs.StringVar(&opts.TLSCert, "tlscert", "", "Server certificate file.")
fs.StringVar(&opts.TLSKey, "tlskey", "", "Private key for server certificate.")
fs.StringVar(&opts.TLSCaCert, "tlscacert", "", "Client certificate CA for verification.")
// The flags definition above set "default" values to some of the options.
// Calling Parse() here will override the default options with any value
// specified from the command line. This is ok. We will then update the
// options with the content of the configuration file (if present), and then,
// call Parse() again to override the default+config with command line values.
// Calling Parse() before processing config file is necessary since configFile
// itself is a command line argument, and also Parse() is required in order
// to know if user wants simply to show "help" or "version", etc...
if err := fs.Parse(args); err != nil {
return nil, err
}
if showVersion {
printVersion()
return nil, nil
}
if showHelp {
printHelp()
return nil, nil
}
if showTLSHelp {
printTLSHelp()
return nil, nil
}
// Process args looking for non-flag options,
// 'version' and 'help' only for now
showVersion, showHelp, err = ProcessCommandLineArgs(fs)
if err != nil {
return nil, err
} else if showVersion {
printVersion()
return nil, nil
} else if showHelp {
printHelp()
return nil, nil
}
// Snapshot flag options.
FlagSnapshot = opts.Clone()
// Keep track of the boolean flags that were explicitly set with their value.
fs.Visit(func(f *flag.Flag) {
switch f.Name {
case "DV":
trackExplicitVal(FlagSnapshot, &FlagSnapshot.inCmdLine, "Debug", dbgAndTrace)
trackExplicitVal(FlagSnapshot, &FlagSnapshot.inCmdLine, "Trace", dbgAndTrace)
case "D":
fallthrough
case "debug":
trackExplicitVal(FlagSnapshot, &FlagSnapshot.inCmdLine, "Debug", FlagSnapshot.Debug)
case "V":
fallthrough
case "trace":
trackExplicitVal(FlagSnapshot, &FlagSnapshot.inCmdLine, "Trace", FlagSnapshot.Trace)
case "T":
fallthrough
case "logtime":
trackExplicitVal(FlagSnapshot, &FlagSnapshot.inCmdLine, "Logtime", FlagSnapshot.Logtime)
case "s":
fallthrough
case "syslog":
trackExplicitVal(FlagSnapshot, &FlagSnapshot.inCmdLine, "Syslog", FlagSnapshot.Syslog)
case "no_advertise":
trackExplicitVal(FlagSnapshot, &FlagSnapshot.inCmdLine, "Cluster.NoAdvertise", FlagSnapshot.Cluster.NoAdvertise)
}
})
// Process signal control.
if signal != "" {
if err := processSignal(signal); err != nil {
return nil, err
}
}
// Parse config if given
if configFile != "" {
// This will update the options with values from the config file.
err := opts.ProcessConfigFile(configFile)
if err != nil {
if opts.CheckConfig {
return nil, err
}
// If only warnings then can still continue.
if cerr, ok := err.(*processConfigErr); ok && len(cerr.Errors()) == 0 {
fmt.Fprint(os.Stderr, err)
return opts, nil
}
return nil, err
} else if opts.CheckConfig {
// Report configuration file syntax test was successful and exit.
return opts, nil
}
// Call this again to override config file options with options from command line.
// Note: We don't need to check error here since if there was an error, it would
// have been caught the first time this function was called (after setting up the
// flags).
fs.Parse(args)
} else if opts.CheckConfig {
return nil, fmt.Errorf("must specify [-c, --config] option to check configuration file syntax")
}
// Special handling of some flags
var (
flagErr error
tlsDisabled bool
tlsOverride bool
)
fs.Visit(func(f *flag.Flag) {
// short-circuit if an error was encountered
if flagErr != nil {
return
}
if strings.HasPrefix(f.Name, "tls") {
if f.Name == "tls" {
if !opts.TLS {
// User has specified "-tls=false", we need to disable TLS
opts.TLSConfig = nil
tlsDisabled = true
tlsOverride = false
return
}
tlsOverride = true
} else if !tlsDisabled {
tlsOverride = true
}
} else {
switch f.Name {
case "DV":
// Check value to support -DV=false
opts.Trace, opts.Debug = dbgAndTrace, dbgAndTrace
case "cluster", "cluster_listen":
// Override cluster config if explicitly set via flags.
flagErr = overrideCluster(opts)
case "routes":
// Keep in mind that the flag has updated opts.RoutesStr at this point.
if opts.RoutesStr == "" {
// Set routes array to nil since routes string is empty
opts.Routes = nil
return
}
routeUrls := RoutesFromStr(opts.RoutesStr)
opts.Routes = routeUrls
}
}
})
if flagErr != nil {
return nil, flagErr
}
// This will be true if some of the `-tls` params have been set and
// `-tls=false` has not been set.
if tlsOverride {
if err := overrideTLS(opts); err != nil {
return nil, err
}
}
// If we don't have cluster defined in the configuration
// file and no cluster listen string override, but we do
// have a routes override, we need to report misconfiguration.
if opts.RoutesStr != "" && opts.Cluster.ListenStr == "" && opts.Cluster.Host == "" && opts.Cluster.Port == 0 {
return nil, errors.New("solicited routes require cluster capabilities, e.g. --cluster")
}
return opts, nil
}
// overrideTLS is called when at least "-tls=true" has been set.
func overrideTLS(opts *Options) error {
if opts.TLSCert == "" {
return errors.New("TLS Server certificate must be present and valid")
}
if opts.TLSKey == "" {
return errors.New("TLS Server private key must be present and valid")
}
tc := TLSConfigOpts{}
tc.CertFile = opts.TLSCert
tc.KeyFile = opts.TLSKey
tc.CaFile = opts.TLSCaCert
tc.Verify = opts.TLSVerify
var err error
opts.TLSConfig, err = GenTLSConfig(&tc)
return err
}
// overrideCluster updates Options.Cluster if that flag "cluster" (or "cluster_listen")
// has explicitly be set in the command line. If it is set to empty string, it will
// clear the Cluster options.
func overrideCluster(opts *Options) error {
if opts.Cluster.ListenStr == "" {
// This one is enough to disable clustering.
opts.Cluster.Port = 0
return nil
}
clusterURL, err := url.Parse(opts.Cluster.ListenStr)
if err != nil {
return err
}
h, p, err := net.SplitHostPort(clusterURL.Host)
if err != nil {
return err
}
opts.Cluster.Host = h
_, err = fmt.Sscan(p, &opts.Cluster.Port)
if err != nil {
return err
}
if clusterURL.User != nil {
pass, hasPassword := clusterURL.User.Password()
if !hasPassword {
return errors.New("expected cluster password to be set")
}
opts.Cluster.Password = pass
user := clusterURL.User.Username()
opts.Cluster.Username = user
} else {
// Since we override from flag and there is no user/pwd, make
// sure we clear what we may have gotten from config file.
opts.Cluster.Username = ""
opts.Cluster.Password = ""
}
return nil
}
func processSignal(signal string) error {
var (
pid string
commandAndPid = strings.Split(signal, "=")
)
if l := len(commandAndPid); l == 2 {
pid = maybeReadPidFile(commandAndPid[1])
} else if l > 2 {
return fmt.Errorf("invalid signal parameters: %v", commandAndPid[2:])
}
if err := ProcessSignal(Command(commandAndPid[0]), pid); err != nil {
return err
}
os.Exit(0)
return nil
}
// maybeReadPidFile returns a PID or Windows service name obtained via the following method:
// 1. Try to open a file with path "pidStr" (absolute or relative).
// 2. If such a file exists and can be read, return its contents.
// 3. Otherwise, return the original "pidStr" string.
func maybeReadPidFile(pidStr string) string {
if b, err := ioutil.ReadFile(pidStr); err == nil {
return string(b)
}
return pidStr
}
| 1 | 8,740 | Not sure if we need it, but if we set it, be sure to add `,omitempty`. | nats-io-nats-server | go |
@@ -17,7 +17,7 @@ var (
errGetLengthNotForVersionNegotiation = errors.New("PublicHeader: GetLength cannot be called for VersionNegotiation packets")
)
-// The PublicHeader of a QUIC packet
+// The PublicHeader of a QUIC packet.
type PublicHeader struct {
Raw []byte
ConnectionID protocol.ConnectionID | 1 | package quic
import (
"bytes"
"errors"
"github.com/lucas-clemente/quic-go/protocol"
"github.com/lucas-clemente/quic-go/qerr"
"github.com/lucas-clemente/quic-go/utils"
)
var (
errPacketNumberLenNotSet = errors.New("PublicHeader: PacketNumberLen not set")
errResetAndVersionFlagSet = errors.New("PublicHeader: Reset Flag and Version Flag should not be set at the same time")
errReceivedTruncatedConnectionID = qerr.Error(qerr.InvalidPacketHeader, "receiving packets with truncated ConnectionID is not supported")
errInvalidConnectionID = qerr.Error(qerr.InvalidPacketHeader, "connection ID cannot be 0")
errGetLengthNotForVersionNegotiation = errors.New("PublicHeader: GetLength cannot be called for VersionNegotiation packets")
)
// The PublicHeader of a QUIC packet
type PublicHeader struct {
Raw []byte
ConnectionID protocol.ConnectionID
VersionFlag bool
ResetFlag bool
TruncateConnectionID bool
PacketNumberLen protocol.PacketNumberLen
PacketNumber protocol.PacketNumber
VersionNumber protocol.VersionNumber // VersionNumber sent by the client
SupportedVersions []protocol.VersionNumber // VersionNumbers sent by the server
DiversificationNonce []byte
}
// Write writes a public header
func (h *PublicHeader) Write(b *bytes.Buffer, version protocol.VersionNumber, pers protocol.Perspective) error {
publicFlagByte := uint8(0x00)
if h.VersionFlag && h.ResetFlag {
return errResetAndVersionFlagSet
}
if h.VersionFlag {
publicFlagByte |= 0x01
}
if h.ResetFlag {
publicFlagByte |= 0x02
}
if !h.TruncateConnectionID {
publicFlagByte |= 0x08
}
if len(h.DiversificationNonce) > 0 {
if len(h.DiversificationNonce) != 32 {
return errors.New("invalid diversification nonce length")
}
publicFlagByte |= 0x04
}
// only set PacketNumberLen bits if a packet number will be written
if h.hasPacketNumber(pers) {
switch h.PacketNumberLen {
case protocol.PacketNumberLen1:
publicFlagByte |= 0x00
case protocol.PacketNumberLen2:
publicFlagByte |= 0x10
case protocol.PacketNumberLen4:
publicFlagByte |= 0x20
case protocol.PacketNumberLen6:
publicFlagByte |= 0x30
}
}
b.WriteByte(publicFlagByte)
if !h.TruncateConnectionID {
utils.WriteUint64(b, uint64(h.ConnectionID))
}
if h.VersionFlag && pers == protocol.PerspectiveClient {
utils.WriteUint32(b, protocol.VersionNumberToTag(h.VersionNumber))
}
if len(h.DiversificationNonce) > 0 {
b.Write(h.DiversificationNonce)
}
// if we're a server, and the VersionFlag is set, we must not include anything else in the packet
if !h.hasPacketNumber(pers) {
return nil
}
if h.PacketNumberLen != protocol.PacketNumberLen1 && h.PacketNumberLen != protocol.PacketNumberLen2 && h.PacketNumberLen != protocol.PacketNumberLen4 && h.PacketNumberLen != protocol.PacketNumberLen6 {
return errPacketNumberLenNotSet
}
switch h.PacketNumberLen {
case protocol.PacketNumberLen1:
b.WriteByte(uint8(h.PacketNumber))
case protocol.PacketNumberLen2:
utils.WriteUint16(b, uint16(h.PacketNumber))
case protocol.PacketNumberLen4:
utils.WriteUint32(b, uint32(h.PacketNumber))
case protocol.PacketNumberLen6:
utils.WriteUint48(b, uint64(h.PacketNumber))
default:
return errPacketNumberLenNotSet
}
return nil
}
// ParsePublicHeader parses a QUIC packet's public header
// the packetSentBy is the perspective of the peer that sent this PublicHeader, i.e. if we're the server, packetSentBy should be PerspectiveClient
func ParsePublicHeader(b *bytes.Reader, packetSentBy protocol.Perspective) (*PublicHeader, error) {
header := &PublicHeader{}
// First byte
publicFlagByte, err := b.ReadByte()
if err != nil {
return nil, err
}
header.VersionFlag = publicFlagByte&0x01 > 0
header.ResetFlag = publicFlagByte&0x02 > 0
// TODO: activate this check once Chrome sends the correct value
// see https://github.com/lucas-clemente/quic-go/issues/232
// if publicFlagByte&0x04 > 0 {
// return nil, errors.New("diversification nonces should only be sent by servers")
// }
header.TruncateConnectionID = publicFlagByte&0x08 == 0
if header.TruncateConnectionID && packetSentBy == protocol.PerspectiveClient {
return nil, errReceivedTruncatedConnectionID
}
if header.hasPacketNumber(packetSentBy) {
switch publicFlagByte & 0x30 {
case 0x30:
header.PacketNumberLen = protocol.PacketNumberLen6
case 0x20:
header.PacketNumberLen = protocol.PacketNumberLen4
case 0x10:
header.PacketNumberLen = protocol.PacketNumberLen2
case 0x00:
header.PacketNumberLen = protocol.PacketNumberLen1
}
}
// Connection ID
if !header.TruncateConnectionID {
var connID uint64
connID, err = utils.ReadUint64(b)
if err != nil {
return nil, err
}
header.ConnectionID = protocol.ConnectionID(connID)
if header.ConnectionID == 0 {
return nil, errInvalidConnectionID
}
}
if packetSentBy == protocol.PerspectiveServer && publicFlagByte&0x04 > 0 {
// TODO: remove the if once the Google servers send the correct value
// assume that a packet doesn't contain a diversification nonce if the version flag or the reset flag is set, no matter what the public flag says
// see https://github.com/lucas-clemente/quic-go/issues/232
if !header.VersionFlag && !header.ResetFlag {
header.DiversificationNonce = make([]byte, 32)
// this Read can never return an EOF for a valid packet, since the diversification nonce is followed by the packet number
_, err = b.Read(header.DiversificationNonce)
if err != nil {
return nil, err
}
}
}
// Version (optional)
if !header.ResetFlag {
if header.VersionFlag {
if packetSentBy == protocol.PerspectiveClient {
var versionTag uint32
versionTag, err = utils.ReadUint32(b)
if err != nil {
return nil, err
}
header.VersionNumber = protocol.VersionTagToNumber(versionTag)
} else { // parse the version negotiaton packet
if b.Len()%4 != 0 {
return nil, qerr.InvalidVersionNegotiationPacket
}
header.SupportedVersions = make([]protocol.VersionNumber, 0)
for {
var versionTag uint32
versionTag, err = utils.ReadUint32(b)
if err != nil {
break
}
v := protocol.VersionTagToNumber(versionTag)
if !protocol.IsSupportedVersion(v) {
v = protocol.VersionUnsupported
}
header.SupportedVersions = append(header.SupportedVersions, v)
}
}
}
}
// Packet number
if header.hasPacketNumber(packetSentBy) {
packetNumber, err := utils.ReadUintN(b, uint8(header.PacketNumberLen))
if err != nil {
return nil, err
}
header.PacketNumber = protocol.PacketNumber(packetNumber)
}
return header, nil
}
// GetLength gets the length of the publicHeader in bytes
// can only be called for regular packets
func (h *PublicHeader) GetLength(pers protocol.Perspective) (protocol.ByteCount, error) {
if h.VersionFlag && h.ResetFlag {
return 0, errResetAndVersionFlagSet
}
if h.VersionFlag && pers == protocol.PerspectiveServer {
return 0, errGetLengthNotForVersionNegotiation
}
length := protocol.ByteCount(1) // 1 byte for public flags
if h.hasPacketNumber(pers) {
if h.PacketNumberLen != protocol.PacketNumberLen1 && h.PacketNumberLen != protocol.PacketNumberLen2 && h.PacketNumberLen != protocol.PacketNumberLen4 && h.PacketNumberLen != protocol.PacketNumberLen6 {
return 0, errPacketNumberLenNotSet
}
length += protocol.ByteCount(h.PacketNumberLen)
}
if !h.TruncateConnectionID {
length += 8 // 8 bytes for the connection ID
}
// Version Number in packets sent by the client
if h.VersionFlag {
length += 4
}
length += protocol.ByteCount(len(h.DiversificationNonce))
return length, nil
}
// hasPacketNumber determines if this PublicHeader will contain a packet number
// this depends on the ResetFlag, the VersionFlag and who sent the packet
func (h *PublicHeader) hasPacketNumber(packetSentBy protocol.Perspective) bool {
if h.ResetFlag {
return false
}
if h.VersionFlag && packetSentBy == protocol.PerspectiveServer {
return false
}
return true
}
| 1 | 5,704 | It's the whole `PublicHeader` that will change soon, and we should state that here, not in the member functions. If Jana's proposal for a new header is accepted, we might also want to rename the fields here. For example, there won't be a dedicated version flag anymore, so a more appropriate name might be `ContainsVersion` instead of `VersionFlag`. | lucas-clemente-quic-go | go |
@@ -1003,8 +1003,8 @@ public final class CharSeq implements CharSequence, IndexedSeq<Character>, Seria
}
@Override
- public int indexOf(Character element, int from) {
- return back.indexOf(element, from);
+ public int indexOf(Object element, int from) {
+ return back.indexOf((Character) element, from);
}
@Override | 1 | /* / \____ _ _ ____ ______ / \ ____ __ _ _____
* / / \/ \ / \/ \ / /\__\/ // \/ \ / / _ \ Javaslang
* _/ / /\ \ \/ / /\ \\__\\ \ // /\ \ /\\/ \__/ / Copyright 2014-now Daniel Dietrich
* /___/\_/ \_/\____/\_/ \_/\__\/__/___\_/ \_// \__/_____/ Licensed under the Apache License, Version 2.0
*/
package javaslang.collection;
import javaslang.Function1;
import javaslang.Tuple;
import javaslang.Tuple2;
import javaslang.Tuple3;
import javaslang.collection.CharSeqModule.Combinations;
import javaslang.control.Match;
import javaslang.control.Option;
import java.io.Serializable;
import java.io.UnsupportedEncodingException;
import java.nio.charset.Charset;
import java.util.*;
import java.util.HashSet;
import java.util.function.*;
import java.util.regex.PatternSyntaxException;
import java.util.stream.Collector;
/**
* The CharSeq (read: character sequence) collection essentially is a rich String wrapper having all operations
* we know from the functional Javaslang collections.
*
* @author Ruslan Sennov, Daniel Dietrich
* @since 2.0.0
*/
public final class CharSeq implements CharSequence, IndexedSeq<Character>, Serializable {
private static final long serialVersionUID = 1L;
private static final CharSeq EMPTY = new CharSeq("");
private final String back;
private CharSeq(String javaString) {
this.back = javaString;
}
public static CharSeq empty() {
return EMPTY;
}
/**
* Returns a {@link java.util.stream.Collector} which may be used in conjunction with
* {@link java.util.stream.Stream#collect(java.util.stream.Collector)} to obtain a {@link CharSeq}.
*
* @return A {@code CharSeq} Collector.
*/
public static Collector<Character, ArrayList<Character>, CharSeq> collector() {
final Supplier<ArrayList<Character>> supplier = ArrayList::new;
final BiConsumer<ArrayList<Character>, Character> accumulator = ArrayList::add;
final BinaryOperator<ArrayList<Character>> combiner = (left, right) -> {
left.addAll(right);
return left;
};
final Function<ArrayList<Character>, CharSeq> finisher = CharSeq::ofAll;
return Collector.of(supplier, accumulator, combiner, finisher);
}
/**
* Creates a String of {@code CharSequence}.
*
* @param sequence {@code CharSequence} instance.
* @return A new {@code javaslang.String}
*/
// DEV-NOTE: Needs to be 'of' instead of 'ofAll' because 'ofAll(CharSeq)' is ambiguous.
public static CharSeq of(CharSequence sequence) {
Objects.requireNonNull(sequence, "sequence is null");
if (sequence instanceof CharSeq) {
return (CharSeq) sequence;
} else {
return sequence.length() == 0 ? empty() : new CharSeq(sequence.toString());
}
}
/**
* Returns a singleton {@code CharSeq}, i.e. a {@code CharSeq} of one character.
*
* @param character A character.
* @return A new {@code CharSeq} instance containing the given element
*/
public static CharSeq of(char character) {
return new CharSeq(new String(new char[] { character }));
}
/**
* Creates a String of the given characters.
*
* @param characters Zero or more characters.
* @return A string containing the given characters in the same order.
* @throws NullPointerException if {@code elements} is null
*/
public static CharSeq of(char... characters) {
Objects.requireNonNull(characters, "characters is null");
if (characters.length == 0) {
return empty();
} else {
final char[] chrs = new char[characters.length];
System.arraycopy(characters, 0, chrs, 0, characters.length);
return new CharSeq(new String(chrs));
}
}
/**
* Creates a String of the given elements.
*
* The resulting string has the same iteration order as the given iterable of elements
* if the iteration order of the elements is stable.
*
* @param elements An Iterable of elements.
* @return A string containing the given elements in the same order.
* @throws NullPointerException if {@code elements} is null
*/
public static CharSeq ofAll(Iterable<? extends Character> elements) {
Objects.requireNonNull(elements, "elements is null");
final StringBuilder sb = new StringBuilder();
for (Character character : elements) {
sb.append(character);
}
return sb.length() == 0 ? EMPTY : of(sb.toString());
}
/**
* Returns a CharSeq containing {@code n} values of a given Function {@code f}
* over a range of integer values from 0 to {@code n - 1}.
*
* @param n The number of elements in the CharSeq
* @param f The Function computing element values
* @return A CharSeq consisting of elements {@code f(0),f(1), ..., f(n - 1)}
* @throws NullPointerException if {@code f} is null
*/
public static CharSeq tabulate(int n, Function<? super Integer, ? extends Character> f) {
Objects.requireNonNull(f, "f is null");
StringBuilder sb = new StringBuilder();
for (int i = 0; i < n; i++) {
sb.append(f.apply(i));
}
return of(sb);
}
/**
* Returns a CharSeq containing {@code n} values supplied by a given Supplier {@code s}.
*
* @param n The number of elements in the CharSeq
* @param s The Supplier computing element values
* @return A CharSeq of size {@code n}, where each element contains the result supplied by {@code s}.
* @throws NullPointerException if {@code s} is null
*/
public static CharSeq fill(int n, Supplier<? extends Character> s) {
return tabulate(n, anything -> s.get());
}
/**
* Creates a CharSeq starting from character {@code from}, extending to character {@code toExclusive - 1}.
* <p>
* Examples:
* <pre>
* <code>
* CharSeq.range('a', 'c') // = "ab"
* CharSeq.range('c', 'a') // = ""
* </code>
* </pre>
*
* @param from the first character
* @param toExclusive the successor of the last character
* @return a range of characters as specified or the empty range if {@code from >= toExclusive}
*/
public static CharSeq range(char from, char toExclusive) {
return new CharSeq(Iterator.range(from, toExclusive).mkString());
}
public static CharSeq rangeBy(char from, char toExclusive, int step) {
return new CharSeq(Iterator.rangeBy(from, toExclusive, step).mkString());
}
/**
* Creates a CharSeq starting from character {@code from}, extending to character {@code toInclusive}.
* <p>
* Examples:
* <pre>
* <code>
* CharSeq.rangeClosed('a', 'c') // = "abc"
* CharSeq.rangeClosed('c', 'a') // = ""
* </code>
* </pre>
*
* @param from the first character
* @param toInclusive the last character
* @return a range of characters as specified or the empty range if {@code from > toInclusive}
*/
public static CharSeq rangeClosed(char from, char toInclusive) {
return new CharSeq(Iterator.rangeClosed(from, toInclusive).mkString());
}
/**
* Creates a CharSeq starting from character {@code from}, extending to character {@code toInclusive},
* with {@code step}.
* <p>
* Examples:
* <pre>
* <code>
* CharSeq.rangeClosedBy('a', 'c', 1) // = ('a', 'b', 'c')
* CharSeq.rangeClosedBy('a', 'd', 2) // = ('a', 'c')
* CharSeq.rangeClosedBy('d', 'a', -2) // = ('d', 'b')
* CharSeq.rangeClosedBy('d', 'a', 2) // = ()
* </code>
* </pre>
*
* @param from the first character
* @param toInclusive the last character
* @param step the step
* @return a range of characters as specified or the empty range if {@code step * (from - toInclusive) > 0}.
* @throws IllegalArgumentException if {@code step} is zero
*/
public static CharSeq rangeClosedBy(char from, char toInclusive, int step) {
return new CharSeq(Iterator.rangeClosedBy(from, toInclusive, step).mkString());
}
private Tuple2<CharSeq, CharSeq> splitByBuilder(StringBuilder sb) {
if (sb.length() == 0) {
return Tuple.of(EMPTY, this);
} else if (sb.length() == length()) {
return Tuple.of(this, EMPTY);
} else {
return Tuple.of(of(sb.toString()), of(back.substring(sb.length())));
}
}
/**
* Repeats a character {@code times} times.
*
* @param character A character
* @param times Repetition count
* @return A CharSeq representing {@code character * times}
*/
public static CharSeq repeat(char character, int times) {
final int length = Math.max(times, 0);
final char[] characters = new char[length];
Arrays.fill(characters, character);
return new CharSeq(String.valueOf(characters));
}
/**
* Repeats this CharSeq {@code times} times.
* <p>
* Example: {@code CharSeq.of("ja").repeat(13) = "jajajajajajajajajajajajaja"}
*
* @param times Repetition count
* @return A CharSeq representing {@code this * times}
*/
public CharSeq repeat(int times) {
final StringBuilder builder = new StringBuilder();
for (int i = 0; i < times; i++) {
builder.append(back);
}
return new CharSeq(builder.toString());
}
//
//
// IndexedSeq
//
//
@Override
public CharSeq append(Character element) {
return of(back + element);
}
@Override
public CharSeq appendAll(Iterable<? extends Character> elements) {
Objects.requireNonNull(elements, "elements is null");
final StringBuilder sb = new StringBuilder(back);
for (char element : elements) {
sb.append(element);
}
return of(sb.toString());
}
@Override
public CharSeq clear() {
return EMPTY;
}
@Override
public IndexedSeq<CharSeq> combinations() {
return Vector.rangeClosed(0, length()).map(this::combinations).flatMap(Function.identity());
}
@Override
public IndexedSeq<CharSeq> combinations(int k) {
return Combinations.apply(this, Math.max(k, 0));
}
@Override
public IndexedSeq<Tuple2<Character, Character>> crossProduct() {
return crossProduct(this);
}
@Override
public IndexedSeq<CharSeq> crossProduct(int power) {
return Collections.crossProduct(this, power).map(CharSeq::ofAll).toVector();
}
@Override
public <U> IndexedSeq<Tuple2<Character, U>> crossProduct(Iterable<? extends U> that) {
Objects.requireNonNull(that, "that is null");
final IndexedSeq<U> other = Vector.ofAll(that);
return flatMap(a -> other.map(b -> Tuple.of(a, b)));
}
@Override
public CharSeq distinct() {
return distinctBy(Function.identity());
}
@Override
public CharSeq distinctBy(Comparator<? super Character> comparator) {
Objects.requireNonNull(comparator, "comparator is null");
final java.util.Set<Character> seen = new java.util.TreeSet<>(comparator);
return filter(seen::add);
}
@Override
public <U> CharSeq distinctBy(Function<? super Character, ? extends U> keyExtractor) {
Objects.requireNonNull(keyExtractor, "keyExtractor is null");
final java.util.Set<U> seen = new java.util.HashSet<>();
return filter(t -> seen.add(keyExtractor.apply(t)));
}
@Override
public CharSeq drop(long n) {
if (n <= 0) {
return this;
} else if (n >= length()) {
return EMPTY;
} else {
return of(back.substring((int) n));
}
}
@Override
public CharSeq dropRight(long n) {
if (n <= 0) {
return this;
} else if (n >= length()) {
return EMPTY;
} else {
return of(back.substring(0, length() - (int) n));
}
}
@Override
public CharSeq dropUntil(Predicate<? super Character> predicate) {
Objects.requireNonNull(predicate, "predicate is null");
return dropWhile(predicate.negate());
}
@Override
public CharSeq dropWhile(Predicate<? super Character> predicate) {
Objects.requireNonNull(predicate, "predicate is null");
int index = 0;
while (index < length() && predicate.test(charAt(index))) {
index++;
}
return index < length() ? (index == 0 ? this : of(back.substring(index))) : empty();
}
@Override
public CharSeq filter(Predicate<? super Character> predicate) {
Objects.requireNonNull(predicate, "predicate is null");
final StringBuilder sb = new StringBuilder();
for (int i = 0; i < back.length(); i++) {
final char ch = back.charAt(i);
if (predicate.test(ch)) {
sb.append(ch);
}
}
return sb.length() == 0 ? EMPTY : sb.length() == length() ? this : of(sb.toString());
}
@Override
public <U> IndexedSeq<U> flatMap(Function<? super Character, ? extends Iterable<? extends U>> mapper) {
Objects.requireNonNull(mapper, "mapper is null");
if (isEmpty()) {
return Vector.empty();
} else {
IndexedSeq<U> result = Vector.empty();
for (int i = 0; i < length(); i++) {
for (U u : mapper.apply(get(i))) {
result = result.append(u);
}
}
return result;
}
}
public CharSeq flatMapChars(CharFunction<? extends CharSequence> mapper) {
Objects.requireNonNull(mapper, "mapper is null");
if (isEmpty()) {
return this;
} else {
final StringBuilder builder = new StringBuilder();
back.chars().forEach(c -> builder.append(mapper.apply((char) c)));
return new CharSeq(builder.toString());
}
}
@Override
public <C> Map<C, CharSeq> groupBy(Function<? super Character, ? extends C> classifier) {
Objects.requireNonNull(classifier, "classifier is null");
return iterator().groupBy(classifier).map((c, it) -> Tuple.of(c, CharSeq.ofAll(it)));
}
@Override
public Iterator<CharSeq> grouped(long size) {
return sliding(size, size);
}
@Override
public boolean hasDefiniteSize() {
return true;
}
@Override
public CharSeq init() {
if (isEmpty()) {
throw new UnsupportedOperationException("init of empty string");
} else {
return of(back.substring(0, length() - 1));
}
}
@Override
public Option<CharSeq> initOption() {
if (isEmpty()) {
return Option.none();
} else {
return Option.some(init());
}
}
@Override
public CharSeq insert(int index, Character element) {
if (index < 0) {
throw new IndexOutOfBoundsException("insert(" + index + ", e)");
}
if (index > length()) {
throw new IndexOutOfBoundsException("insert(" + index + ", e) on String of length " + length());
}
return of(new StringBuilder(back).insert(index, element).toString());
}
@Override
public CharSeq insertAll(int index, Iterable<? extends Character> elements) {
Objects.requireNonNull(elements, "elements is null");
if (index < 0) {
throw new IndexOutOfBoundsException("insertAll(" + index + ", elements)");
}
if (index > length()) {
throw new IndexOutOfBoundsException("insertAll(" + index + ", elements) on String of length " + length());
}
final String javaString = back;
final StringBuilder sb = new StringBuilder(javaString.substring(0, index));
for (Character element : elements) {
sb.append(element);
}
sb.append(javaString.substring(index));
return of(sb.toString());
}
@Override
public Iterator<Character> iterator() {
return new AbstractIterator<Character>() {
private int index = 0;
@Override
public boolean hasNext() {
return index < back.length();
}
@Override
public Character getNext() {
return back.charAt(index++);
}
};
}
@Override
public CharSeq intersperse(Character element) {
final StringBuilder sb = new StringBuilder();
for (int i = 0; i < length(); i++) {
if (i > 0) {
sb.append(element);
}
sb.append(get(i));
}
return sb.length() == 0 ? EMPTY : of(sb.toString());
}
@Override
public <U> IndexedSeq<U> map(Function<? super Character, ? extends U> mapper) {
Objects.requireNonNull(mapper, "mapper is null");
IndexedSeq<U> result = Vector.empty();
for (int i = 0; i < length(); i++) {
result = result.append(mapper.apply(get(i)));
}
return result;
}
@Override
public CharSeq padTo(int length, Character element) {
if (length <= back.length()) {
return this;
}
final StringBuilder sb = new StringBuilder(back);
final int limit = length - back.length();
for (int i = 0; i < limit; i++) {
sb.append(element);
}
return new CharSeq(sb.toString());
}
@Override
public CharSeq patch(int from, Iterable<? extends Character> that, int replaced) {
from = from < 0 ? 0 : from > length() ? length() : from;
replaced = replaced < 0 ? 0 : replaced;
final StringBuilder sb = new StringBuilder(back.substring(0, from));
for (Character character : that) {
sb.append(character);
}
from += replaced;
if (from < length()) {
sb.append(back.substring(from));
}
return sb.length() == 0 ? EMPTY : new CharSeq(sb.toString());
}
public CharSeq mapChars(CharUnaryOperator mapper) {
Objects.requireNonNull(mapper, "mapper is null");
if (isEmpty()) {
return this;
} else {
final char[] chars = back.toCharArray();
for (int i = 0; i < chars.length; i++) {
chars[i] = mapper.apply(chars[i]);
}
return CharSeq.of(chars);
}
}
@Override
public Match.MatchValue.Of<CharSeq> match() {
return Match.of(this);
}
@Override
public Tuple2<CharSeq, CharSeq> partition(Predicate<? super Character> predicate) {
Objects.requireNonNull(predicate, "predicate is null");
if (isEmpty()) {
return Tuple.of(EMPTY, EMPTY);
}
final StringBuilder left = new StringBuilder();
final StringBuilder right = new StringBuilder();
for (int i = 0; i < length(); i++) {
Character t = get(i);
(predicate.test(t) ? left : right).append(t);
}
if (left.length() == 0) {
return Tuple.of(EMPTY, of(right.toString()));
} else if (right.length() == 0) {
return Tuple.of(of(left.toString()), EMPTY);
} else {
return Tuple.of(of(left.toString()), of(right.toString()));
}
}
@Override
public CharSeq peek(Consumer<? super Character> action) {
Objects.requireNonNull(action, "action is null");
if (!isEmpty()) {
action.accept(back.charAt(0));
}
return this;
}
@Override
public IndexedSeq<CharSeq> permutations() {
if (isEmpty()) {
return Vector.empty();
} else {
if (length() == 1) {
return Vector.of(this);
} else {
IndexedSeq<CharSeq> result = Vector.empty();
for (Character t : distinct()) {
for (CharSeq ts : remove(t).permutations()) {
result = result.append(CharSeq.of(t).appendAll(ts));
}
}
return result;
}
}
}
@Override
public CharSeq prepend(Character element) {
return of(element + back);
}
@Override
public CharSeq prependAll(Iterable<? extends Character> elements) {
Objects.requireNonNull(elements, "elements is null");
final StringBuilder sb = new StringBuilder();
for (Character element : elements) {
sb.append(element);
}
sb.append(back);
return sb.length() == 0 ? EMPTY : of(sb.toString());
}
@Override
public CharSeq remove(Character element) {
final StringBuilder sb = new StringBuilder();
boolean found = false;
for (int i = 0; i < length(); i++) {
char c = get(i);
if (!found && c == element) {
found = true;
} else {
sb.append(c);
}
}
return sb.length() == 0 ? EMPTY : sb.length() == length() ? this : of(sb.toString());
}
@Override
public CharSeq removeFirst(Predicate<Character> predicate) {
Objects.requireNonNull(predicate, "predicate is null");
final StringBuilder sb = new StringBuilder();
boolean found = false;
for (int i = 0; i < back.length(); i++) {
final char ch = back.charAt(i);
if (predicate.test(ch)) {
if (found) {
sb.append(ch);
}
found = true;
} else {
sb.append(ch);
}
}
return found ? (sb.length() == 0 ? EMPTY : of(sb.toString())) : this;
}
@Override
public CharSeq removeLast(Predicate<Character> predicate) {
Objects.requireNonNull(predicate, "predicate is null");
for (int i = length() - 1; i >= 0; i--) {
if (predicate.test(back.charAt(i))) {
return removeAt(i);
}
}
return this;
}
@Override
public CharSeq removeAt(int index) {
final String removed = back.substring(0, index) + back.substring(index + 1);
return removed.isEmpty() ? EMPTY : of(removed);
}
@Override
public CharSeq removeAll(Character element) {
final StringBuilder sb = new StringBuilder();
for (int i = 0; i < length(); i++) {
final char c = back.charAt(i);
if (c != element) {
sb.append(c);
}
}
return sb.length() == 0 ? EMPTY : sb.length() == length() ? this : of(sb.toString());
}
@Override
public CharSeq removeAll(Iterable<? extends Character> elements) {
Objects.requireNonNull(elements, "elements is null");
final java.util.Set<Character> distinct = new HashSet<>();
for (Character element : elements) {
distinct.add(element);
}
final StringBuilder sb = new StringBuilder();
for (int i = 0; i < length(); i++) {
final char c = back.charAt(i);
if (!distinct.contains(c)) {
sb.append(c);
}
}
return sb.length() == 0 ? EMPTY : sb.length() == length() ? this : of(sb.toString());
}
@Override
public CharSeq replace(Character currentElement, Character newElement) {
final StringBuilder sb = new StringBuilder();
boolean found = false;
for (int i = 0; i < length(); i++) {
final char c = back.charAt(i);
if (c == currentElement && !found) {
sb.append(newElement);
found = true;
} else {
sb.append(c);
}
}
return found ? of(sb.toString()) : this;
}
@Override
public CharSeq replaceAll(Character currentElement, Character newElement) {
final StringBuilder sb = new StringBuilder();
boolean found = false;
for (int i = 0; i < length(); i++) {
final char c = back.charAt(i);
if (c == currentElement) {
sb.append(newElement);
found = true;
} else {
sb.append(c);
}
}
return found ? of(sb.toString()) : this;
}
@Override
public CharSeq retainAll(Iterable<? extends Character> elements) {
Objects.requireNonNull(elements, "elements is null");
final java.util.Set<Character> kept = new HashSet<>();
for (Character element : elements) {
kept.add(element);
}
final StringBuilder sb = new StringBuilder();
for (int i = 0; i < length(); i++) {
final char c = back.charAt(i);
if (kept.contains(c)) {
sb.append(c);
}
}
return sb.length() == 0 ? EMPTY : of(sb.toString());
}
@Override
public CharSeq reverse() {
return of(new StringBuilder(back).reverse().toString());
}
@Override
public IndexedSeq<Character> scan(Character zero, BiFunction<? super Character, ? super Character, ? extends Character> operation) {
return scanLeft(zero, operation);
}
@Override
public <U> IndexedSeq<U> scanLeft(U zero, BiFunction<? super U, ? super Character, ? extends U> operation) {
Objects.requireNonNull(operation, "operation is null");
return Collections.scanLeft(this, zero, operation, Vector.empty(), Vector::append, Function.identity());
}
@Override
public <U> IndexedSeq<U> scanRight(U zero, BiFunction<? super Character, ? super U, ? extends U> operation) {
Objects.requireNonNull(operation, "operation is null");
return Collections.scanRight(this, zero, operation, Vector.empty(), Vector::prepend, Function.identity());
}
@Override
public CharSeq slice(long beginIndex, long endIndex) {
final long from = beginIndex < 0 ? 0 : beginIndex;
final long to = endIndex > length() ? length() : endIndex;
if (from >= to) {
return EMPTY;
}
if (from <= 0 && to >= length()) {
return this;
}
return CharSeq.of(back.substring((int) from, (int) to));
}
@Override
public Iterator<CharSeq> sliding(long size) {
return sliding(size, 1);
}
@Override
public Iterator<CharSeq> sliding(long size, long step) {
return iterator().sliding(size, step).map(CharSeq::ofAll);
}
@Override
public CharSeq sorted() {
return isEmpty() ? this : toJavaStream().sorted().collect(CharSeq.collector());
}
@Override
public CharSeq sorted(Comparator<? super Character> comparator) {
Objects.requireNonNull(comparator, "comparator is null");
return isEmpty() ? this : toJavaStream().sorted(comparator).collect(CharSeq.collector());
}
@Override
public <U extends Comparable<? super U>> CharSeq sortBy(Function<? super Character, ? extends U> mapper) {
return sortBy(U::compareTo, mapper);
}
@Override
public <U> CharSeq sortBy(Comparator<? super U> comparator, Function<? super Character, ? extends U> mapper) {
final Function<? super Character, ? extends U> domain = Function1.of(mapper::apply).memoized();
return toJavaStream()
.sorted((e1, e2) -> comparator.compare(domain.apply(e1), domain.apply(e2)))
.collect(collector());
}
@Override
public Tuple2<CharSeq, CharSeq> span(Predicate<? super Character> predicate) {
Objects.requireNonNull(predicate, "predicate is null");
final StringBuilder sb = new StringBuilder();
for (int i = 0; i < length(); i++) {
final char c = back.charAt(i);
if (predicate.test(c)) {
sb.append(c);
} else {
break;
}
}
return splitByBuilder(sb);
}
@Override
public Spliterator<Character> spliterator() {
return Spliterators.spliterator(iterator(), length(), Spliterator.ORDERED | Spliterator.IMMUTABLE);
}
@Override
public CharSeq subSequence(int beginIndex) {
if (beginIndex < 0 || beginIndex > length()) {
throw new IndexOutOfBoundsException("begin index " + beginIndex + " < 0");
}
if (beginIndex == 0) {
return this;
} else if (beginIndex == length()) {
return EMPTY;
} else {
return CharSeq.of(back.substring(beginIndex));
}
}
@Override
public CharSeq tail() {
if (isEmpty()) {
throw new UnsupportedOperationException("tail of empty string");
} else {
return CharSeq.of(back.substring(1));
}
}
@Override
public Option<CharSeq> tailOption() {
if (isEmpty()) {
return Option.none();
} else {
return Option.some(CharSeq.of(back.substring(1)));
}
}
@Override
public CharSeq take(long n) {
if (n <= 0) {
return EMPTY;
} else if (n >= length()) {
return this;
} else {
return CharSeq.of(back.substring(0, (int) n));
}
}
@Override
public CharSeq takeRight(long n) {
if (n <= 0) {
return EMPTY;
} else if (n >= length()) {
return this;
} else {
return CharSeq.of(back.substring(length() - (int) n));
}
}
@Override
public CharSeq takeUntil(Predicate<? super Character> predicate) {
Objects.requireNonNull(predicate, "predicate is null");
return takeWhile(predicate.negate());
}
@Override
public CharSeq takeWhile(Predicate<? super Character> predicate) {
Objects.requireNonNull(predicate, "predicate is null");
final StringBuilder sb = new StringBuilder();
for (int i = 0; i < length(); i++) {
char c = back.charAt(i);
if (!predicate.test(c)) {
break;
}
sb.append(c);
}
return sb.length() == length() ? this : sb.length() == 0 ? EMPTY : of(sb.toString());
}
@Override
public <U> IndexedSeq<U> unit(Iterable<? extends U> iterable) {
return Vector.ofAll(iterable);
}
@Override
public <T1, T2> Tuple2<IndexedSeq<T1>, IndexedSeq<T2>> unzip(
Function<? super Character, Tuple2<? extends T1, ? extends T2>> unzipper) {
Objects.requireNonNull(unzipper, "unzipper is null");
IndexedSeq<T1> xs = Vector.empty();
IndexedSeq<T2> ys = Vector.empty();
for (int i = 0; i < length(); i++) {
final Tuple2<? extends T1, ? extends T2> t = unzipper.apply(back.charAt(i));
xs = xs.append(t._1);
ys = ys.append(t._2);
}
return Tuple.of(xs, ys);
}
@Override
public <T1, T2, T3> Tuple3<IndexedSeq<T1>, IndexedSeq<T2>, IndexedSeq<T3>> unzip3(
Function<? super Character, Tuple3<? extends T1, ? extends T2, ? extends T3>> unzipper) {
Objects.requireNonNull(unzipper, "unzipper is null");
IndexedSeq<T1> xs = Vector.empty();
IndexedSeq<T2> ys = Vector.empty();
IndexedSeq<T3> zs = Vector.empty();
for (int i = 0; i < length(); i++) {
final Tuple3<? extends T1, ? extends T2, ? extends T3> t = unzipper.apply(back.charAt(i));
xs = xs.append(t._1);
ys = ys.append(t._2);
zs = zs.append(t._3);
}
return Tuple.of(xs, ys, zs);
}
@Override
public CharSeq update(int index, Character element) {
if (index < 0) {
throw new IndexOutOfBoundsException("update(" + index + ")");
}
if (index >= length()) {
throw new IndexOutOfBoundsException("update(" + index + ")");
}
return of(back.substring(0, index) + element + back.substring(index + 1));
}
@Override
public <U> IndexedSeq<Tuple2<Character, U>> zip(Iterable<U> that) {
Objects.requireNonNull(that, "that is null");
IndexedSeq<Tuple2<Character, U>> result = Vector.empty();
Iterator<Character> list1 = iterator();
java.util.Iterator<U> list2 = that.iterator();
while (list1.hasNext() && list2.hasNext()) {
result = result.append(Tuple.of(list1.next(), list2.next()));
}
return result;
}
@Override
public <U> IndexedSeq<Tuple2<Character, U>> zipAll(Iterable<U> that, Character thisElem, U thatElem) {
Objects.requireNonNull(that, "that is null");
IndexedSeq<Tuple2<Character, U>> result = Vector.empty();
Iterator<Character> list1 = iterator();
java.util.Iterator<U> list2 = that.iterator();
while (list1.hasNext() || list2.hasNext()) {
final Character elem1 = list1.hasNext() ? list1.next() : thisElem;
final U elem2 = list2.hasNext() ? list2.next() : thatElem;
result = result.append(Tuple.of(elem1, elem2));
}
return result;
}
@Override
public IndexedSeq<Tuple2<Character, Long>> zipWithIndex() {
IndexedSeq<Tuple2<Character, Long>> result = Vector.empty();
for (int i = 0; i < length(); i++) {
result = result.append(Tuple.of(get(i), (long) i));
}
return result;
}
@Override
public Character get(int index) {
return back.charAt(index);
}
@Override
public int indexOf(Character element, int from) {
return back.indexOf(element, from);
}
@Override
public int lastIndexOf(Character element, int end) {
return back.lastIndexOf(element, end);
}
@Override
public Tuple2<CharSeq, CharSeq> splitAt(long n) {
if (n <= 0) {
return Tuple.of(EMPTY, this);
} else if (n >= length()) {
return Tuple.of(this, EMPTY);
} else {
return Tuple.of(of(back.substring(0, (int) n)), of(back.substring((int) n)));
}
}
@Override
public Tuple2<CharSeq, CharSeq> splitAt(Predicate<? super Character> predicate) {
Objects.requireNonNull(predicate, "predicate is null");
if (isEmpty()) {
return Tuple.of(EMPTY, EMPTY);
}
final StringBuilder left = new StringBuilder();
for (int i = 0; i < length(); i++) {
Character t = get(i);
if (!predicate.test(t)) {
left.append(t);
} else {
break;
}
}
return splitByBuilder(left);
}
@Override
public Tuple2<CharSeq, CharSeq> splitAtInclusive(Predicate<? super Character> predicate) {
Objects.requireNonNull(predicate, "predicate is null");
if (isEmpty()) {
return Tuple.of(EMPTY, EMPTY);
}
final StringBuilder left = new StringBuilder();
for (int i = 0; i < length(); i++) {
Character t = get(i);
left.append(t);
if (predicate.test(t)) {
break;
}
}
return splitByBuilder(left);
}
@Override
public boolean startsWith(Iterable<? extends Character> that, int offset) {
return startsWith(CharSeq.ofAll(that), offset);
}
@Override
public Character head() {
if (isEmpty()) {
throw new NoSuchElementException("head of empty string");
} else {
return back.charAt(0);
}
}
@Override
public Option<Character> headOption() {
if (isEmpty()) {
return Option.none();
} else {
return Option.some(back.charAt(0));
}
}
@Override
public boolean isEmpty() {
return back.isEmpty();
}
@Override
public boolean isTraversableAgain() {
return true;
}
private Object readResolve() {
return isEmpty() ? EMPTY : this;
}
@Override
public boolean equals(Object o) {
if (o == this) {
return true;
} else if (o instanceof CharSeq) {
return ((CharSeq) o).back.equals(back);
} else {
return false;
}
}
@Override
public int hashCode() {
return back.hashCode();
}
//
//
// java.lang.CharSequence
//
//
/**
* Returns the {@code char} value at the
* specified index. An index ranges from {@code 0} to
* {@code length() - 1}. The first {@code char} value of the sequence
* is at index {@code 0}, the next at index {@code 1},
* and so on, as for array indexing.
*
* <p>If the {@code char} value specified by the index is a
* <a href="Character.html#unicode">surrogate</a>, the surrogate
* value is returned.
*
* @param index the index of the {@code char} value.
* @return the {@code char} value at the specified index of this string.
* The first {@code char} value is at index {@code 0}.
* @throws IndexOutOfBoundsException if the {@code index}
* argument is negative or not less than the length of this
* string.
*/
@Override
public char charAt(int index) {
return back.charAt(index);
}
/**
* Returns the length of this string.
* The length is equal to the number of <a href="Character.html#unicode">Unicode
* code units</a> in the string.
*
* @return the length of the sequence of characters represented by this
* object.
*/
@Override
public int length() {
return back.length();
}
//
//
// String
//
//
/**
* Returns the character (Unicode code point) at the specified
* index. The index refers to {@code char} values
* (Unicode code units) and ranges from {@code 0} to
* {@link #length()}{@code - 1}.
*
* <p> If the {@code char} value specified at the given index
* is in the high-surrogate range, the following index is less
* than the length of this {@code CharSeq}, and the
* {@code char} value at the following index is in the
* low-surrogate range, then the supplementary code point
* corresponding to this surrogate pair is returned. Otherwise,
* the {@code char} value at the given index is returned.
*
* @param index the index to the {@code char} values
* @return the code point value of the character at the
* {@code index}
* @throws IndexOutOfBoundsException if the {@code index}
* argument is negative or not less than the length of this
* string.
*/
public int codePointAt(int index) {
return back.codePointAt(index);
}
/**
* Returns the character (Unicode code point) before the specified
* index. The index refers to {@code char} values
* (Unicode code units) and ranges from {@code 1} to {@link
* CharSequence#length() length}.
*
* <p> If the {@code char} value at {@code (index - 1)}
* is in the low-surrogate range, {@code (index - 2)} is not
* negative, and the {@code char} value at {@code (index -
* 2)} is in the high-surrogate range, then the
* supplementary code point value of the surrogate pair is
* returned. If the {@code char} value at {@code index -
* 1} is an unpaired low-surrogate or a high-surrogate, the
* surrogate value is returned.
*
* @param index the index following the code point that should be returned
* @return the Unicode code point value before the given index.
* @throws IndexOutOfBoundsException if the {@code index}
* argument is less than 1 or greater than the length
* of this string.
*/
public int codePointBefore(int index) {
return back.codePointBefore(index);
}
/**
* Returns the number of Unicode code points in the specified text
* range of this {@code CharSeq}. The text range begins at the
* specified {@code beginIndex} and extends to the
* {@code char} at index {@code endIndex - 1}. Thus the
* length (in {@code char}s) of the text range is
* {@code endIndex-beginIndex}. Unpaired surrogates within
* the text range count as one code point each.
*
* @param beginIndex the index to the first {@code char} of
* the text range.
* @param endIndex the index after the last {@code char} of
* the text range.
* @return the number of Unicode code points in the specified text
* range
* @throws IndexOutOfBoundsException if the
* {@code beginIndex} is negative, or {@code endIndex}
* is larger than the length of this {@code CharSeq}, or
* {@code beginIndex} is larger than {@code endIndex}.
*/
public int codePointCount(int beginIndex, int endIndex) {
return back.codePointCount(beginIndex, endIndex);
}
/**
* Returns the index within this {@code CharSeq} that is
* offset from the given {@code index} by
* {@code codePointOffset} code points. Unpaired surrogates
* within the text range given by {@code index} and
* {@code codePointOffset} count as one code point each.
*
* @param index the index to be offset
* @param codePointOffset the offset in code points
* @return the index within this {@code CharSeq}
* @throws IndexOutOfBoundsException if {@code index}
* is negative or larger then the length of this
* {@code CharSeq}, or if {@code codePointOffset} is positive
* and the substring starting with {@code index} has fewer
* than {@code codePointOffset} code points,
* or if {@code codePointOffset} is negative and the substring
* before {@code index} has fewer than the absolute value
* of {@code codePointOffset} code points.
*/
public int offsetByCodePoints(int index, int codePointOffset) {
return back.offsetByCodePoints(index, codePointOffset);
}
/**
* Copies characters from this string into the destination character
* array.
* <p>
* The first character to be copied is at index {@code srcBegin};
* the last character to be copied is at index {@code srcEnd-1}
* (thus the total number of characters to be copied is
* {@code srcEnd-srcBegin}). The characters are copied into the
* subarray of {@code dst} starting at index {@code dstBegin}
* and ending at index:
* <blockquote><pre>
* dstbegin + (srcEnd-srcBegin) - 1
* </pre></blockquote>
*
* @param srcBegin index of the first character in the string
* to copy.
* @param srcEnd index after the last character in the string
* to copy.
* @param dst the destination array.
* @param dstBegin the start offset in the destination array.
* @throws IndexOutOfBoundsException If any of the following
* is true:
* <ul><li>{@code srcBegin} is negative.
* <li>{@code srcBegin} is greater than {@code srcEnd}
* <li>{@code srcEnd} is greater than the length of this
* string
* <li>{@code dstBegin} is negative
* <li>{@code dstBegin+(srcEnd-srcBegin)} is larger than
* {@code dst.length}</ul>
*/
public void getChars(int srcBegin, int srcEnd, char dst[], int dstBegin) {
back.getChars(srcBegin, srcEnd, dst, dstBegin);
}
/**
* Encodes this {@code CharSeq} into a sequence of bytes using the named
* charset, storing the result into a new byte array.
*
* <p> The behavior of this method when this string cannot be encoded in
* the given charset is unspecified. The {@link
* java.nio.charset.CharsetEncoder} class should be used when more control
* over the encoding process is required.
*
* @param charsetName The name of a supported {@linkplain java.nio.charset.Charset
* charset}
* @return The resultant byte array
* @throws UnsupportedEncodingException If the named charset is not supported
*/
public byte[] getBytes(String charsetName) throws UnsupportedEncodingException {
return back.getBytes(charsetName);
}
/**
* Encodes this {@code CharSeq} into a sequence of bytes using the given
* {@linkplain java.nio.charset.Charset charset}, storing the result into a
* new byte array.
*
* <p> This method always replaces malformed-input and unmappable-character
* sequences with this charset's default replacement byte array. The
* {@link java.nio.charset.CharsetEncoder} class should be used when more
* control over the encoding process is required.
*
* @param charset The {@linkplain java.nio.charset.Charset} to be used to encode
* the {@code CharSeq}
* @return The resultant byte array
*/
public byte[] getBytes(Charset charset) {
return back.getBytes(charset);
}
/**
* Encodes this {@code CharSeq} into a sequence of bytes using the
* platform's default charset, storing the result into a new byte array.
*
* <p> The behavior of this method when this string cannot be encoded in
* the default charset is unspecified. The {@link
* java.nio.charset.CharsetEncoder} class should be used when more control
* over the encoding process is required.
*
* @return The resultant byte array
*/
public byte[] getBytes() {
return back.getBytes();
}
/**
* Compares this string to the specified {@code StringBuffer}. The result
* is {@code true} if and only if this {@code CharSeq} represents the same
* sequence of characters as the specified {@code StringBuffer}. This method
* synchronizes on the {@code StringBuffer}.
*
* @param sb The {@code StringBuffer} to compare this {@code CharSeq} against
* @return {@code true} if this {@code CharSeq} represents the same
* sequence of characters as the specified {@code StringBuffer},
* {@code false} otherwise
*/
public boolean contentEquals(StringBuffer sb) {
return back.contentEquals(sb);
}
/**
* Compares this string to the specified {@code CharSequence}. The
* result is {@code true} if and only if this {@code CharSeq} represents the
* same sequence of char values as the specified sequence. Note that if the
* {@code CharSequence} is a {@code StringBuffer} then the method
* synchronizes on it.
*
* @param cs The sequence to compare this {@code CharSeq} against
* @return {@code true} if this {@code CharSeq} represents the same
* sequence of char values as the specified sequence, {@code
* false} otherwise
*/
public boolean contentEquals(CharSequence cs) {
return back.contentEquals(cs);
}
/**
* Compares this {@code CharSeq} to another {@code CharSeq}, ignoring case
* considerations. Two strings are considered equal ignoring case if they
* are of the same length and corresponding characters in the two strings
* are equal ignoring case.
*
* <p> Two characters {@code c1} and {@code c2} are considered the same
* ignoring case if at least one of the following is true:
* <ul>
* <li> The two characters are the same (as compared by the
* {@code ==} operator)
* <li> Applying the method {@link
* java.lang.Character#toUpperCase(char)} to each character
* produces the same result
* <li> Applying the method {@link
* java.lang.Character#toLowerCase(char)} to each character
* produces the same result
* </ul>
*
* @param anotherString The {@code CharSeq} to compare this {@code CharSeq} against
* @return {@code true} if the argument is not {@code null} and it
* represents an equivalent {@code CharSeq} ignoring case; {@code
* false} otherwise
* @see #equals(Object)
*/
public boolean equalsIgnoreCase(CharSeq anotherString) {
return back.equalsIgnoreCase(anotherString.back);
}
/**
* Compares two strings lexicographically.
* The comparison is based on the Unicode value of each character in
* the strings. The character sequence represented by this
* {@code CharSeq} object is compared lexicographically to the
* character sequence represented by the argument string. The result is
* a negative integer if this {@code CharSeq} object
* lexicographically precedes the argument string. The result is a
* positive integer if this {@code CharSeq} object lexicographically
* follows the argument string. The result is zero if the strings
* are equal; {@code compareTo} returns {@code 0} exactly when
* the {@link #equals(Object)} method would return {@code true}.
* <p>
* This is the definition of lexicographic ordering. If two strings are
* different, then either they have different characters at some index
* that is a valid index for both strings, or their lengths are different,
* or both. If they have different characters at one or more index
* positions, let <i>k</i> be the smallest such index; then the string
* whose character at position <i>k</i> has the smaller value, as
* determined by using the < operator, lexicographically precedes the
* other string. In this case, {@code compareTo} returns the
* difference of the two character values at position {@code k} in
* the two string -- that is, the value:
* <blockquote><pre>
* this.charAt(k)-anotherString.charAt(k)
* </pre></blockquote>
* If there is no index position at which they differ, then the shorter
* string lexicographically precedes the longer string. In this case,
* {@code compareTo} returns the difference of the lengths of the
* strings -- that is, the value:
* <blockquote><pre>
* this.length()-anotherString.length()
* </pre></blockquote>
*
* @param anotherString the {@code CharSeq} to be compared.
* @return the value {@code 0} if the argument string is equal to
* this string; a value less than {@code 0} if this string
* is lexicographically less than the string argument; and a
* value greater than {@code 0} if this string is
* lexicographically greater than the string argument.
*/
public int compareTo(CharSeq anotherString) {
return back.compareTo(anotherString.back);
}
/**
* Compares two strings lexicographically, ignoring case
* differences. This method returns an integer whose sign is that of
* calling {@code compareTo} with normalized versions of the strings
* where case differences have been eliminated by calling
* {@code Character.toLowerCase(Character.toUpperCase(character))} on
* each character.
* <p>
* Note that this method does <em>not</em> take locale into account,
* and will result in an unsatisfactory ordering for certain locales.
* The java.text package provides <em>collators</em> to allow
* locale-sensitive ordering.
*
* @param str the {@code CharSeq} to be compared.
* @return a negative integer, zero, or a positive integer as the
* specified String is greater than, equal to, or less
* than this String, ignoring case considerations.
*/
public int compareToIgnoreCase(CharSeq str) {
return back.compareToIgnoreCase(str.back);
}
/**
* Tests if two string regions are equal.
* <p>
* A substring of this {@code CharSeq} object is compared to a substring
* of the argument other. The result is true if these substrings
* represent identical character sequences. The substring of this
* {@code CharSeq} object to be compared begins at index {@code toffset}
* and has length {@code len}. The substring of other to be compared
* begins at index {@code ooffset} and has length {@code len}. The
* result is {@code false} if and only if at least one of the following
* is true:
* <ul><li>{@code toffset} is negative.
* <li>{@code ooffset} is negative.
* <li>{@code toffset+len} is greater than the length of this
* {@code CharSeq} object.
* <li>{@code ooffset+len} is greater than the length of the other
* argument.
* <li>There is some nonnegative integer <i>k</i> less than {@code len}
* such that:
* {@code this.charAt(toffset + }<i>k</i>{@code ) != other.charAt(ooffset + }
* <i>k</i>{@code )}
* </ul>
*
* @param toffset the starting offset of the subregion in this string.
* @param other the string argument.
* @param ooffset the starting offset of the subregion in the string
* argument.
* @param len the number of characters to compare.
* @return {@code true} if the specified subregion of this string
* exactly matches the specified subregion of the string argument;
* {@code false} otherwise.
*/
public boolean regionMatches(int toffset, CharSeq other, int ooffset, int len) {
return back.regionMatches(toffset, other.back, ooffset, len);
}
/**
* Tests if two string regions are equal.
* <p>
* A substring of this {@code CharSeq} object is compared to a substring
* of the argument {@code other}. The result is {@code true} if these
* substrings represent character sequences that are the same, ignoring
* case if and only if {@code ignoreCase} is true. The substring of
* this {@code CharSeq} object to be compared begins at index
* {@code toffset} and has length {@code len}. The substring of
* {@code other} to be compared begins at index {@code ooffset} and
* has length {@code len}. The result is {@code false} if and only if
* at least one of the following is true:
* <ul><li>{@code toffset} is negative.
* <li>{@code ooffset} is negative.
* <li>{@code toffset+len} is greater than the length of this
* {@code CharSeq} object.
* <li>{@code ooffset+len} is greater than the length of the other
* argument.
* <li>{@code ignoreCase} is {@code false} and there is some nonnegative
* integer <i>k</i> less than {@code len} such that:
* <blockquote><pre>
* this.charAt(toffset+k) != other.charAt(ooffset+k)
* </pre></blockquote>
* <li>{@code ignoreCase} is {@code true} and there is some nonnegative
* integer <i>k</i> less than {@code len} such that:
* <blockquote><pre>
* Character.toLowerCase(this.charAt(toffset+k)) !=
* Character.toLowerCase(other.charAt(ooffset+k))
* </pre></blockquote>
* and:
* <blockquote><pre>
* Character.toUpperCase(this.charAt(toffset+k)) !=
* Character.toUpperCase(other.charAt(ooffset+k))
* </pre></blockquote>
* </ul>
*
* @param ignoreCase if {@code true}, ignore case when comparing
* characters.
* @param toffset the starting offset of the subregion in this
* string.
* @param other the string argument.
* @param ooffset the starting offset of the subregion in the string
* argument.
* @param len the number of characters to compare.
* @return {@code true} if the specified subregion of this string
* matches the specified subregion of the string argument;
* {@code false} otherwise. Whether the matching is exact
* or case insensitive depends on the {@code ignoreCase}
* argument.
*/
public boolean regionMatches(boolean ignoreCase, int toffset, CharSeq other, int ooffset, int len) {
return back.regionMatches(ignoreCase, toffset, other.back, ooffset, len);
}
@Override
public CharSeq subSequence(int beginIndex, int endIndex) {
if (beginIndex < 0) {
throw new IndexOutOfBoundsException("begin index " + beginIndex + " < 0");
}
if (endIndex > length()) {
throw new IndexOutOfBoundsException("endIndex " + endIndex + " > length " + length());
}
int subLen = endIndex - beginIndex;
if (subLen < 0) {
throw new IndexOutOfBoundsException("beginIndex " + beginIndex + " > endIndex " + endIndex);
}
if (beginIndex == 0 && endIndex == length()) {
return this;
} else {
return CharSeq.of(back.subSequence(beginIndex, endIndex));
}
}
/**
* Tests if the substring of this string beginning at the
* specified index starts with the specified prefix.
*
* @param prefix the prefix.
* @param toffset where to begin looking in this string.
* @return {@code true} if the character sequence represented by the
* argument is a prefix of the substring of this object starting
* at index {@code toffset}; {@code false} otherwise.
* The result is {@code false} if {@code toffset} is
* negative or greater than the length of this
* {@code CharSeq} object; otherwise the result is the same
* as the result of the expression
* <pre>
* this.substring(toffset).startsWith(prefix)
* </pre>
*/
public boolean startsWith(CharSeq prefix, int toffset) {
return back.startsWith(prefix.back, toffset);
}
/**
* Tests if this string starts with the specified prefix.
*
* @param prefix the prefix.
* @return {@code true} if the character sequence represented by the
* argument is a prefix of the character sequence represented by
* this string; {@code false} otherwise.
* Note also that {@code true} will be returned if the
* argument is an empty string or is equal to this
* {@code CharSeq} object as determined by the
* {@link #equals(Object)} method.
*/
public boolean startsWith(CharSeq prefix) {
return back.startsWith(prefix.back);
}
/**
* Tests if this string ends with the specified suffix.
*
* @param suffix the suffix.
* @return {@code true} if the character sequence represented by the
* argument is a suffix of the character sequence represented by
* this object; {@code false} otherwise. Note that the
* result will be {@code true} if the argument is the
* empty string or is equal to this {@code CharSeq} object
* as determined by the {@link #equals(Object)} method.
*/
public boolean endsWith(CharSeq suffix) {
return back.endsWith(suffix.back);
}
/**
* Returns the index within this string of the first occurrence of
* the specified character. If a character with value
* {@code ch} occurs in the character sequence represented by
* this {@code CharSeq} object, then the index (in Unicode
* code units) of the first such occurrence is returned. For
* values of {@code ch} in the range from 0 to 0xFFFF
* (inclusive), this is the smallest value <i>k</i> such that:
* <blockquote><pre>
* this.charAt(<i>k</i>) == ch
* </pre></blockquote>
* is true. For other values of {@code ch}, it is the
* smallest value <i>k</i> such that:
* <blockquote><pre>
* this.codePointAt(<i>k</i>) == ch
* </pre></blockquote>
* is true. In either case, if no such character occurs in this
* string, then {@code -1} is returned.
*
* @param ch a character (Unicode code point).
* @return the index of the first occurrence of the character in the
* character sequence represented by this object, or
* {@code -1} if the character does not occur.
*/
public int indexOf(int ch) {
return back.indexOf(ch);
}
/**
* Returns the index within this string of the first occurrence of the
* specified character, starting the search at the specified index.
* <p>
* If a character with value {@code ch} occurs in the
* character sequence represented by this {@code CharSeq}
* object at an index no smaller than {@code fromIndex}, then
* the index of the first such occurrence is returned. For values
* of {@code ch} in the range from 0 to 0xFFFF (inclusive),
* this is the smallest value <i>k</i> such that:
* <blockquote><pre>
* (this.charAt(<i>k</i>) == ch) {@code &&} (<i>k</i> >= fromIndex)
* </pre></blockquote>
* is true. For other values of {@code ch}, it is the
* smallest value <i>k</i> such that:
* <blockquote><pre>
* (this.codePointAt(<i>k</i>) == ch) {@code &&} (<i>k</i> >= fromIndex)
* </pre></blockquote>
* is true. In either case, if no such character occurs in this
* string at or after position {@code fromIndex}, then
* {@code -1} is returned.
*
* <p>
* There is no restriction on the value of {@code fromIndex}. If it
* is negative, it has the same effect as if it were zero: this entire
* string may be searched. If it is greater than the length of this
* string, it has the same effect as if it were equal to the length of
* this string: {@code -1} is returned.
*
* <p>All indices are specified in {@code char} values
* (Unicode code units).
*
* @param ch a character (Unicode code point).
* @param fromIndex the index to start the search from.
* @return the index of the first occurrence of the character in the
* character sequence represented by this object that is greater
* than or equal to {@code fromIndex}, or {@code -1}
* if the character does not occur.
*/
public int indexOf(int ch, int fromIndex) {
return back.indexOf(ch, fromIndex);
}
/**
* Returns the index within this string of the last occurrence of
* the specified character. For values of {@code ch} in the
* range from 0 to 0xFFFF (inclusive), the index (in Unicode code
* units) returned is the largest value <i>k</i> such that:
* <blockquote><pre>
* this.charAt(<i>k</i>) == ch
* </pre></blockquote>
* is true. For other values of {@code ch}, it is the
* largest value <i>k</i> such that:
* <blockquote><pre>
* this.codePointAt(<i>k</i>) == ch
* </pre></blockquote>
* is true. In either case, if no such character occurs in this
* string, then {@code -1} is returned. The
* {@code CharSeq} is searched backwards starting at the last
* character.
*
* @param ch a character (Unicode code point).
* @return the index of the last occurrence of the character in the
* character sequence represented by this object, or
* {@code -1} if the character does not occur.
*/
public int lastIndexOf(int ch) {
return back.lastIndexOf(ch);
}
/**
* Returns the index within this string of the last occurrence of
* the specified character, searching backward starting at the
* specified index. For values of {@code ch} in the range
* from 0 to 0xFFFF (inclusive), the index returned is the largest
* value <i>k</i> such that:
* <blockquote><pre>
* (this.charAt(<i>k</i>) == ch) {@code &&} (<i>k</i> <= fromIndex)
* </pre></blockquote>
* is true. For other values of {@code ch}, it is the
* largest value <i>k</i> such that:
* <blockquote><pre>
* (this.codePointAt(<i>k</i>) == ch) {@code &&} (<i>k</i> <= fromIndex)
* </pre></blockquote>
* is true. In either case, if no such character occurs in this
* string at or before position {@code fromIndex}, then
* {@code -1} is returned.
*
* <p>All indices are specified in {@code char} values
* (Unicode code units).
*
* @param ch a character (Unicode code point).
* @param fromIndex the index to start the search from. There is no
* restriction on the value of {@code fromIndex}. If it is
* greater than or equal to the length of this string, it has
* the same effect as if it were equal to one less than the
* length of this string: this entire string may be searched.
* If it is negative, it has the same effect as if it were -1:
* -1 is returned.
* @return the index of the last occurrence of the character in the
* character sequence represented by this object that is less
* than or equal to {@code fromIndex}, or {@code -1}
* if the character does not occur before that point.
*/
public int lastIndexOf(int ch, int fromIndex) {
return back.lastIndexOf(ch, fromIndex);
}
/**
* Returns the index within this string of the first occurrence of the
* specified substring.
*
* <p>The returned index is the smallest value <i>k</i> for which:
* <blockquote><pre>
* this.startsWith(str, <i>k</i>)
* </pre></blockquote>
* If no such value of <i>k</i> exists, then {@code -1} is returned.
*
* @param str the substring to search for.
* @return the index of the first occurrence of the specified substring,
* or {@code -1} if there is no such occurrence.
*/
public int indexOf(CharSeq str) {
return back.indexOf(str.back);
}
/**
* Returns the index within this string of the first occurrence of the
* specified substring, starting at the specified index.
*
* <p>The returned index is the smallest value <i>k</i> for which:
* <blockquote><pre>
* <i>k</i> >= fromIndex {@code &&} this.startsWith(str, <i>k</i>)
* </pre></blockquote>
* If no such value of <i>k</i> exists, then {@code -1} is returned.
*
* @param str the substring to search for.
* @param fromIndex the index from which to start the search.
* @return the index of the first occurrence of the specified substring,
* starting at the specified index,
* or {@code -1} if there is no such occurrence.
*/
public int indexOf(CharSeq str, int fromIndex) {
return back.indexOf(str.back, fromIndex);
}
/**
* Returns the index within this string of the last occurrence of the
* specified substring. The last occurrence of the empty string ""
* is considered to occur at the index value {@code this.length()}.
*
* <p>The returned index is the largest value <i>k</i> for which:
* <blockquote><pre>
* this.startsWith(str, <i>k</i>)
* </pre></blockquote>
* If no such value of <i>k</i> exists, then {@code -1} is returned.
*
* @param str the substring to search for.
* @return the index of the last occurrence of the specified substring,
* or {@code -1} if there is no such occurrence.
*/
public int lastIndexOf(CharSeq str) {
return back.lastIndexOf(str.back);
}
/**
* Returns the index within this string of the last occurrence of the
* specified substring, searching backward starting at the specified index.
*
* <p>The returned index is the largest value <i>k</i> for which:
* <blockquote><pre>
* <i>k</i> {@code <=} fromIndex {@code &&} this.startsWith(str, <i>k</i>)
* </pre></blockquote>
* If no such value of <i>k</i> exists, then {@code -1} is returned.
*
* @param str the substring to search for.
* @param fromIndex the index to start the search from.
* @return the index of the last occurrence of the specified substring,
* searching backward from the specified index,
* or {@code -1} if there is no such occurrence.
*/
public int lastIndexOf(CharSeq str, int fromIndex) {
return back.lastIndexOf(str.back, fromIndex);
}
/**
* Returns a string that is a substring of this string. The
* substring begins with the character at the specified index and
* extends to the end of this string. <p>
* Examples:
* <blockquote><pre>
* "unhappy".substring(2) returns "happy"
* "Harbison".substring(3) returns "bison"
* "emptiness".substring(9) returns "" (an empty string)
* </pre></blockquote>
*
* @param beginIndex the beginning index, inclusive.
* @return the specified substring.
* @throws IndexOutOfBoundsException if
* {@code beginIndex} is negative or larger than the
* length of this {@code CharSeq} object.
*/
public CharSeq substring(int beginIndex) {
return CharSeq.of(back.substring(beginIndex));
}
/**
* Returns a string that is a substring of this string. The
* substring begins at the specified {@code beginIndex} and
* extends to the character at index {@code endIndex - 1}.
* Thus the length of the substring is {@code endIndex-beginIndex}.
* <p>
* Examples:
* <blockquote><pre>
* "hamburger".substring(4, 8) returns "urge"
* "smiles".substring(1, 5) returns "mile"
* </pre></blockquote>
*
* @param beginIndex the beginning index, inclusive.
* @param endIndex the ending index, exclusive.
* @return the specified substring.
* @throws IndexOutOfBoundsException if the
* {@code beginIndex} is negative, or
* {@code endIndex} is larger than the length of
* this {@code CharSeq} object, or
* {@code beginIndex} is larger than
* {@code endIndex}.
*/
public CharSeq substring(int beginIndex, int endIndex) {
return CharSeq.of(back.substring(beginIndex, endIndex));
}
@Override
public String stringPrefix() {
return "CharSeq";
}
/**
* Returns a string containing the characters in this sequence in the same
* order as this sequence. The length of the string will be the length of
* this sequence.
*
* @return a string consisting of exactly this sequence of characters
*/
@Override
public String toString() {
return back;
}
/**
* Concatenates the specified string to the end of this string.
* <p>
* If the length of the argument string is {@code 0}, then this
* {@code CharSeq} object is returned. Otherwise, a
* {@code CharSeq} object is returned that represents a character
* sequence that is the concatenation of the character sequence
* represented by this {@code CharSeq} object and the character
* sequence represented by the argument string.<p>
* Examples:
* <blockquote><pre>
* "cares".concat("s") returns "caress"
* "to".concat("get").concat("her") returns "together"
* </pre></blockquote>
*
* @param str the {@code CharSeq} that is concatenated to the end
* of this {@code CharSeq}.
* @return a string that represents the concatenation of this object's
* characters followed by the string argument's characters.
*/
public CharSeq concat(CharSeq str) {
return CharSeq.of(back.concat(str.back));
}
/**
* Tells whether or not this string matches the given <a
* href="../util/regex/Pattern.html#sum">regular expression</a>.
*
* <p> An invocation of this method of the form
* <i>str</i>{@code .matches(}<i>regex</i>{@code )} yields exactly the
* same result as the expression
*
* <blockquote>
* {@link java.util.regex.Pattern}.{@link java.util.regex.Pattern#matches(String, CharSequence)
* matches(<i>regex</i>, <i>str</i>)}
* </blockquote>
*
* @param regex the regular expression to which this string is to be matched
* @return {@code true} if, and only if, this string matches the
* given regular expression
* @throws PatternSyntaxException if the regular expression's syntax is invalid
* @see java.util.regex.Pattern
*/
public boolean matches(String regex) {
return back.matches(regex);
}
/**
* Returns true if and only if this string contains the specified
* sequence of char values.
*
* @param s the sequence to search for
* @return true if this string contains {@code s}, false otherwise
*/
public boolean contains(CharSequence s) {
return back.contains(s);
}
/**
* Replaces the first substring of this string that matches the given <a
* href="../util/regex/Pattern.html#sum">regular expression</a> with the
* given replacement.
*
* <p> An invocation of this method of the form
* <i>str</i>{@code .replaceFirst(}<i>regex</i>{@code ,} <i>repl</i>{@code )}
* yields exactly the same result as the expression
*
* <blockquote>
* <code>
* {@link java.util.regex.Pattern}.{@link
* java.util.regex.Pattern#compile compile}(<i>regex</i>).{@link
* java.util.regex.Pattern#matcher(java.lang.CharSequence) matcher}(<i>str</i>).{@link
* java.util.regex.Matcher#replaceFirst replaceFirst}(<i>repl</i>)
* </code>
* </blockquote>
*
* <p>
* Note that backslashes ({@code \}) and dollar signs ({@code $}) in the
* replacement string may cause the results to be different than if it were
* being treated as a literal replacement string; see
* {@link java.util.regex.Matcher#replaceFirst}.
* Use {@link java.util.regex.Matcher#quoteReplacement} to suppress the special
* meaning of these characters, if desired.
*
* @param regex the regular expression to which this string is to be matched
* @param replacement the string to be substituted for the first match
* @return The resulting {@code CharSeq}
* @throws PatternSyntaxException if the regular expression's syntax is invalid
* @see java.util.regex.Pattern
*/
public CharSeq replaceFirst(String regex, String replacement) {
return CharSeq.of(back.replaceFirst(regex, replacement));
}
/**
* Replaces each substring of this string that matches the given <a
* href="../util/regex/Pattern.html#sum">regular expression</a> with the
* given replacement.
*
* <p> An invocation of this method of the form
* <i>str</i>{@code .replaceAll(}<i>regex</i>{@code ,} <i>repl</i>{@code )}
* yields exactly the same result as the expression
*
* <blockquote>
* <code>
* {@link java.util.regex.Pattern}.{@link
* java.util.regex.Pattern#compile compile}(<i>regex</i>).{@link
* java.util.regex.Pattern#matcher(java.lang.CharSequence) matcher}(<i>str</i>).{@link
* java.util.regex.Matcher#replaceAll replaceAll}(<i>repl</i>)
* </code>
* </blockquote>
*
* <p>
* Note that backslashes ({@code \}) and dollar signs ({@code $}) in the
* replacement string may cause the results to be different than if it were
* being treated as a literal replacement string; see
* {@link java.util.regex.Matcher#replaceAll Matcher.replaceAll}.
* Use {@link java.util.regex.Matcher#quoteReplacement} to suppress the special
* meaning of these characters, if desired.
*
* @param regex the regular expression to which this string is to be matched
* @param replacement the string to be substituted for each match
* @return The resulting {@code CharSeq}
* @throws PatternSyntaxException if the regular expression's syntax is invalid
* @see java.util.regex.Pattern
*/
public CharSeq replaceAll(String regex, String replacement) {
return CharSeq.of(back.replaceAll(regex, replacement));
}
/**
* Replaces each substring of this string that matches the literal target
* sequence with the specified literal replacement sequence. The
* replacement proceeds from the beginning of the string to the end, for
* example, replacing "aa" with "b" in the string "aaa" will result in
* "ba" rather than "ab".
*
* @param target The sequence of char values to be replaced
* @param replacement The replacement sequence of char values
* @return The resulting string
*/
public CharSeq replace(CharSequence target, CharSequence replacement) {
return CharSeq.of(back.replace(target, replacement));
}
/**
* Splits this string around matches of the given
* <a href="../util/regex/Pattern.html#sum">regular expression</a>.
*
* <p> The array returned by this method contains each substring of this
* string that is terminated by another substring that matches the given
* expression or is terminated by the end of the string. The substrings in
* the array are in the order in which they occur in this string. If the
* expression does not match any part of the input then the resulting array
* has just one element, namely this string.
*
* <p> When there is a positive-width match at the beginning of this
* string then an empty leading substring is included at the beginning
* of the resulting array. A zero-width match at the beginning however
* never produces such empty leading substring.
*
* <p> The {@code limit} parameter controls the number of times the
* pattern is applied and therefore affects the length of the resulting
* array. If the limit <i>n</i> is greater than zero then the pattern
* will be applied at most <i>n</i> - 1 times, the array's
* length will be no greater than <i>n</i>, and the array's last entry
* will contain all input beyond the last matched delimiter. If <i>n</i>
* is non-positive then the pattern will be applied as many times as
* possible and the array can have any length. If <i>n</i> is zero then
* the pattern will be applied as many times as possible, the array can
* have any length, and trailing empty strings will be discarded.
*
* <p> The string {@code "boo:and:foo"}, for example, yields the
* following results with these parameters:
*
* <blockquote><table cellpadding=1 cellspacing=0 summary="Split example showing regex, limit, and result">
* <tr>
* <th>Regex</th>
* <th>Limit</th>
* <th>Result</th>
* </tr>
* <tr><td align=center>:</td>
* <td align=center>2</td>
* <td>{@code { "boo", "and:foo" }}</td></tr>
* <tr><td align=center>:</td>
* <td align=center>5</td>
* <td>{@code { "boo", "and", "foo" }}</td></tr>
* <tr><td align=center>:</td>
* <td align=center>-2</td>
* <td>{@code { "boo", "and", "foo" }}</td></tr>
* <tr><td align=center>o</td>
* <td align=center>5</td>
* <td>{@code { "b", "", ":and:f", "", "" }}</td></tr>
* <tr><td align=center>o</td>
* <td align=center>-2</td>
* <td>{@code { "b", "", ":and:f", "", "" }}</td></tr>
* <tr><td align=center>o</td>
* <td align=center>0</td>
* <td>{@code { "b", "", ":and:f" }}</td></tr>
* </table></blockquote>
*
* <p> An invocation of this method of the form
* <i>str.</i>{@code split(}<i>regex</i>{@code ,} <i>n</i>{@code )}
* yields the same result as the expression
*
* <blockquote>
* <code>
* {@link java.util.regex.Pattern}.{@link
* java.util.regex.Pattern#compile compile}(<i>regex</i>).{@link
* java.util.regex.Pattern#split(java.lang.CharSequence, int) split}(<i>str</i>, <i>n</i>)
* </code>
* </blockquote>
*
* @param regex the delimiting regular expression
* @param limit the result threshold, as described above
* @return the array of strings computed by splitting this string
* around matches of the given regular expression
* @throws PatternSyntaxException if the regular expression's syntax is invalid
* @see java.util.regex.Pattern
*/
public CharSeq[] split(String regex, int limit) {
final String[] javaStrings = back.split(regex, limit);
final CharSeq[] strings = new CharSeq[javaStrings.length];
for (int i = 0; i < strings.length; i++) {
strings[i] = of(javaStrings[i]);
}
return strings;
}
/**
* Splits this string around matches of the given <a
* href="../util/regex/Pattern.html#sum">regular expression</a>.
*
* <p> This method works as if by invoking the two-argument {@link
* #split(String, int) split} method with the given expression and a limit
* argument of zero. Trailing empty strings are therefore not included in
* the resulting array.
*
* <p> The string {@code "boo:and:foo"}, for example, yields the following
* results with these expressions:
*
* <blockquote><table cellpadding=1 cellspacing=0 summary="Split examples showing regex and result">
* <tr>
* <th>Regex</th>
* <th>Result</th>
* </tr>
* <tr><td align=center>:</td>
* <td>{@code { "boo", "and", "foo" }}</td></tr>
* <tr><td align=center>o</td>
* <td>{@code { "b", "", ":and:f" }}</td></tr>
* </table></blockquote>
*
* @param regex the delimiting regular expression
* @return the array of strings computed by splitting this string
* around matches of the given regular expression
* @throws PatternSyntaxException if the regular expression's syntax is invalid
* @see java.util.regex.Pattern
*/
public CharSeq[] split(String regex) {
return split(regex, 0);
}
/**
* Converts all of the characters in this {@code CharSeq} to lower
* case using the rules of the given {@code Locale}. Case mapping is based
* on the Unicode Standard version specified by the {@link java.lang.Character Character}
* class. Since case mappings are not always 1:1 char mappings, the resulting
* {@code CharSeq} may be a different length than the original {@code CharSeq}.
* <p>
* Examples of lowercase mappings are in the following table:
* <table border="1" summary="Lowercase mapping examples showing language code of locale, upper case, lower case, and description">
* <tr>
* <th>Language Code of Locale</th>
* <th>Upper Case</th>
* <th>Lower Case</th>
* <th>Description</th>
* </tr>
* <tr>
* <td>tr (Turkish)</td>
* <td>\u0130</td>
* <td>\u0069</td>
* <td>capital letter I with dot above -> small letter i</td>
* </tr>
* <tr>
* <td>tr (Turkish)</td>
* <td>\u0049</td>
* <td>\u0131</td>
* <td>capital letter I -> small letter dotless i </td>
* </tr>
* <tr>
* <td>(all)</td>
* <td>French Fries</td>
* <td>french fries</td>
* <td>lowercased all chars in String</td>
* </tr>
* <tr>
* <td>(all)</td>
* <td><img src="doc-files/capiota.gif" alt="capiota"><img src="doc-files/capchi.gif" alt="capchi">
* <img src="doc-files/captheta.gif" alt="captheta"><img src="doc-files/capupsil.gif" alt="capupsil">
* <img src="doc-files/capsigma.gif" alt="capsigma"></td>
* <td><img src="doc-files/iota.gif" alt="iota"><img src="doc-files/chi.gif" alt="chi">
* <img src="doc-files/theta.gif" alt="theta"><img src="doc-files/upsilon.gif" alt="upsilon">
* <img src="doc-files/sigma1.gif" alt="sigma"></td>
* <td>lowercased all chars in String</td>
* </tr>
* </table>
*
* @param locale use the case transformation rules for this locale
* @return the {@code CharSeq}, converted to lowercase.
* @see String#toLowerCase()
* @see String#toUpperCase()
* @see String#toUpperCase(Locale)
*/
public CharSeq toLowerCase(Locale locale) {
return CharSeq.of(back.toLowerCase(locale));
}
/**
* Converts all of the characters in this {@code CharSeq} to lower
* case using the rules of the default locale. This is equivalent to calling
* {@code toLowerCase(Locale.getDefault())}.
* <p>
* <b>Note:</b> This method is locale sensitive, and may produce unexpected
* results if used for strings that are intended to be interpreted locale
* independently.
* Examples are programming language identifiers, protocol keys, and HTML
* tags.
* For instance, {@code "TITLE".toLowerCase()} in a Turkish locale
* returns {@code "t\u005Cu0131tle"}, where '\u005Cu0131' is the
* LATIN SMALL LETTER DOTLESS I character.
* To obtain correct results for locale insensitive strings, use
* {@code toLowerCase(Locale.ROOT)}.
* <p>
*
* @return the {@code CharSeq}, converted to lowercase.
* @see String#toLowerCase(Locale)
*/
public CharSeq toLowerCase() {
return CharSeq.of(back.toLowerCase(Locale.getDefault()));
}
/**
* Converts all of the characters in this {@code CharSeq} to upper
* case using the rules of the given {@code Locale}. Case mapping is based
* on the Unicode Standard version specified by the {@link java.lang.Character Character}
* class. Since case mappings are not always 1:1 char mappings, the resulting
* {@code CharSeq} may be a different length than the original {@code CharSeq}.
* <p>
* Examples of locale-sensitive and 1:M case mappings are in the following table.
*
* <table border="1" summary="Examples of locale-sensitive and 1:M case mappings. Shows Language code of locale, lower case, upper case, and description.">
* <tr>
* <th>Language Code of Locale</th>
* <th>Lower Case</th>
* <th>Upper Case</th>
* <th>Description</th>
* </tr>
* <tr>
* <td>tr (Turkish)</td>
* <td>\u0069</td>
* <td>\u0130</td>
* <td>small letter i -> capital letter I with dot above</td>
* </tr>
* <tr>
* <td>tr (Turkish)</td>
* <td>\u0131</td>
* <td>\u0049</td>
* <td>small letter dotless i -> capital letter I</td>
* </tr>
* <tr>
* <td>(all)</td>
* <td>\u00df</td>
* <td>\u0053 \u0053</td>
* <td>small letter sharp s -> two letters: SS</td>
* </tr>
* <tr>
* <td>(all)</td>
* <td>Fahrvergnügen</td>
* <td>FAHRVERGNÜGEN</td>
* <td></td>
* </tr>
* </table>
*
* @param locale use the case transformation rules for this locale
* @return the {@code CharSeq}, converted to uppercase.
* @see String#toUpperCase()
* @see String#toLowerCase()
* @see String#toLowerCase(Locale)
*/
public CharSeq toUpperCase(Locale locale) {
return CharSeq.of(back.toUpperCase(locale));
}
/**
* Converts all of the characters in this {@code CharSeq} to upper
* case using the rules of the default locale. This method is equivalent to
* {@code toUpperCase(Locale.getDefault())}.
* <p>
* <b>Note:</b> This method is locale sensitive, and may produce unexpected
* results if used for strings that are intended to be interpreted locale
* independently.
* Examples are programming language identifiers, protocol keys, and HTML
* tags.
* For instance, {@code "title".toUpperCase()} in a Turkish locale
* returns {@code "T\u005Cu0130TLE"}, where '\u005Cu0130' is the
* LATIN CAPITAL LETTER I WITH DOT ABOVE character.
* To obtain correct results for locale insensitive strings, use
* {@code toUpperCase(Locale.ROOT)}.
* <p>
*
* @return the {@code CharSeq}, converted to uppercase.
* @see String#toUpperCase(Locale)
*/
public CharSeq toUpperCase() {
return CharSeq.of(back.toUpperCase(Locale.getDefault()));
}
/**
* Returns a string whose value is this string, with any leading and trailing
* whitespace removed.
* <p>
* If this {@code CharSeq} object represents an empty character
* sequence, or the first and last characters of character sequence
* represented by this {@code CharSeq} object both have codes
* greater than {@code '\u005Cu0020'} (the space character), then a
* reference to this {@code CharSeq} object is returned.
* <p>
* Otherwise, if there is no character with a code greater than
* {@code '\u005Cu0020'} in the string, then a
* {@code CharSeq} object representing an empty string is
* returned.
* <p>
* Otherwise, let <i>k</i> be the index of the first character in the
* string whose code is greater than {@code '\u005Cu0020'}, and let
* <i>m</i> be the index of the last character in the string whose code
* is greater than {@code '\u005Cu0020'}. A {@code CharSeq}
* object is returned, representing the substring of this string that
* begins with the character at index <i>k</i> and ends with the
* character at index <i>m</i>-that is, the result of
* {@code this.substring(k, m + 1)}.
* <p>
* This method may be used to trim whitespace (as defined above) from
* the beginning and end of a string.
*
* @return A string whose value is this string, with any leading and trailing white
* space removed, or this string if it has no leading or
* trailing white space.
*/
public CharSeq trim() {
return of(back.trim());
}
/**
* Converts this string to a new character array.
*
* @return a newly allocated character array whose length is the length
* of this string and whose contents are initialized to contain
* the character sequence represented by this string.
*/
public char[] toCharArray() {
return back.toCharArray();
}
@FunctionalInterface
interface CharUnaryOperator {
char apply(char c);
}
@FunctionalInterface
interface CharFunction<R> {
R apply(char c);
}
}
interface CharSeqModule {
interface Combinations {
static IndexedSeq<CharSeq> apply(CharSeq elements, int k) {
if (k == 0) {
return Vector.of(CharSeq.empty());
} else {
return elements.zipWithIndex().flatMap(
t -> apply(elements.drop(t._2 + 1), (k - 1)).map((CharSeq c) -> c.prepend(t._1))
);
}
}
}
}
| 1 | 7,281 | I think `element` can stay of type `Character` because `Character` is a final class. The we do not need the cast in the line below. | vavr-io-vavr | java |
@@ -260,6 +260,15 @@ public interface ASTAnyTypeDeclaration
}
+ /**
+ * Returns true if this is a regular interface declaration (not an annotation).
+ * Note that {@link #isInterface()} counts annotations in.
+ */
+ default boolean isRegularInterface() {
+ return false;
+ }
+
+
/** Returns true if this is an {@linkplain ASTAnnotationTypeDeclaration annotation type declaration}. */
default boolean isAnnotation() {
return this instanceof ASTAnnotationTypeDeclaration; | 1 | /**
* BSD-style license; for more info see http://pmd.sourceforge.net/license.html
*/
package net.sourceforge.pmd.lang.java.ast;
import static net.sourceforge.pmd.lang.java.ast.JModifier.ABSTRACT;
import java.util.List;
import org.checkerframework.checker.nullness.qual.NonNull;
import org.checkerframework.checker.nullness.qual.Nullable;
import net.sourceforge.pmd.lang.ast.NodeStream;
import net.sourceforge.pmd.lang.java.symbols.JClassSymbol;
import net.sourceforge.pmd.lang.java.types.JClassType;
import net.sourceforge.pmd.lang.rule.xpath.DeprecatedAttribute;
/**
* Groups class, enum, record, annotation and interface declarations under a common
* supertype.
*
* <pre class="grammar">
*
* AnyTypeDeclaration ::= {@link ASTClassOrInterfaceDeclaration ClassOrInterfaceDeclaration}
* | {@link ASTAnonymousClassDeclaration AnonymousClassDeclaration}
* | {@link ASTEnumDeclaration EnumDeclaration}
* | {@link ASTAnnotationTypeDeclaration AnnotationTypeDeclaration}
* | {@link ASTRecordDeclaration RecordDeclaration}
*
* </pre>
*/
public interface ASTAnyTypeDeclaration
extends TypeNode,
AccessNode,
TypeParamOwnerNode,
ASTBodyDeclaration,
ASTTopLevelDeclaration,
FinalizableNode,
JavadocCommentOwner {
@Override
@NonNull
JClassSymbol getSymbol();
/**
* Returns the {@linkplain JClassType#getGenericTypeDeclaration() generic type declaration}
* of the declared type. Note that for {@linkplain ASTAnonymousClassDeclaration anonymous classes},
* this returns a class type whose symbol is the actual anonymous
* class. Eg {@code new Runnable() { void foo() { } void run() { } }}
* would present both methods, ie not just be a {@code Runnable}.
* The {@link ASTConstructorCall} would have type {@code Runnable}
* though, not the anonymous class.
*/
@Override
@NonNull JClassType getTypeMirror();
/**
* Returns the simple name of this type declaration. Returns the
* empty string if this is an anonymous class declaration.
*/
@NonNull
default String getSimpleName() {
return getImage();
}
/**
* @deprecated Use {@link #getSimpleName()}
*/
@Deprecated
@DeprecatedAttribute(replaceWith = "@SimpleName")
@Override
String getImage();
/**
* Returns the name of the package in which this class is declared.
*/
default String getPackageName() {
return getRoot().getPackageName();
}
/**
* Returns the binary name of this type declaration. This
* is like {@link Class#getName()}.
*/
@NonNull
String getBinaryName();
/**
* Returns the canonical name of this class, if it exists.
* Otherwise returns null. This is like {@link Class#getCanonicalName()}.
*
* <p>A canonical name exists if all enclosing types have a
* canonical name, and this is neither a local class nor an
* anonymous class. For example:
*
* <pre>{@code
* package p;
*
* public class A { // p.A
* class M { // p.A.M
* {
* class Local { // null, local class
* class M2 {} // null, member of a local class
* }
*
* new Local() { // null, anonymous class
* class M2 {} // null, member of an anonymous class
* };
* }
* }
*
* }
* }</pre>
*
*
* So non-local/anonymous classes declared
* somewhere in a local/anonymous class also have no loc
*/
@Nullable
String getCanonicalName();
/**
* Returns true if this is an abstract type. Interfaces and annotations
* types are implicitly abstract.
*/
@Override
default boolean isAbstract() {
return hasModifiers(ABSTRACT);
}
/**
* Returns the enum constants declared by this enum. If this is not
* an enum declaration, returns an empty stream.
*/
default NodeStream<ASTEnumConstant> getEnumConstants() {
return getFirstChildOfType(ASTEnumBody.class).children(ASTEnumConstant.class);
}
/**
* Returns the record components declared by this class. If this is not
* a record declaration, returns null.
*/
default @Nullable ASTRecordComponentList getRecordComponents() {
return null;
}
/**
* Retrieves the member declarations (fields, methods, classes, etc.)
* from the body of this type declaration.
*/
default NodeStream<ASTBodyDeclaration> getDeclarations() {
return getBody().toStream();
}
/**
* Returns the declarations of a particular type.
*
* @param klass Type of the declarations
* @param <T>Type of the declarations
*/
default <T extends ASTBodyDeclaration> NodeStream<T> getDeclarations(Class<? extends T> klass) {
return getDeclarations().filterIs(klass);
}
/**
* Returns the operations declared in this class (methods and constructors).
*/
default NodeStream<ASTMethodOrConstructorDeclaration> getOperations() {
return getDeclarations().filterIs(ASTMethodOrConstructorDeclaration.class);
}
/**
* Returns the body of this type declaration.
*/
default ASTTypeBody getBody() {
return (ASTTypeBody) getLastChild();
}
/**
* Returns true if this type declaration is nested inside an interface,
* class or annotation.
*/
default boolean isNested() {
return getParent() instanceof ASTTypeBody;
}
/**
* Returns true if the class is declared inside a block other
* than the body of another class, or the top level. Anonymous
* classes are not considered local. Only class declarations
* can be local. Local classes cannot be static.
*/
default boolean isLocal() {
return getParent() instanceof ASTLocalClassStatement;
}
/**
* Returns true if this type is declared at the top-level of a file.
*/
default boolean isTopLevel() {
return getParent() instanceof ASTCompilationUnit;
}
/**
* Returns true if this is an {@linkplain ASTAnonymousClassDeclaration anonymous class declaration}.
*/
default boolean isAnonymous() {
return this instanceof ASTAnonymousClassDeclaration;
}
/**
* Returns true if this is an {@linkplain ASTEnumDeclaration enum class declaration}.
*/
default boolean isEnum() {
return this instanceof ASTEnumDeclaration;
}
/**
* Returns true if this is an {@linkplain ASTRecordDeclaration record class declaration}.
*/
default boolean isRecord() {
return this instanceof ASTRecordDeclaration;
}
/**
* Returns true if this is an interface type declaration (including
* annotation types). This is consistent with {@link Class#isInterface()}.
*/
default boolean isInterface() {
return false;
}
/**
* Returns true if this is a regular class declaration (not an enum,
* not a record, not an interface or annotation). Note that eg
* {@link JClassSymbol#isClass()} counts records and enums in, just
* like {@link #isInterface()} counts annotations in.
*/
default boolean isRegularClass() {
return false;
}
/** Returns true if this is an {@linkplain ASTAnnotationTypeDeclaration annotation type declaration}. */
default boolean isAnnotation() {
return this instanceof ASTAnnotationTypeDeclaration;
}
/**
* Returns the list of interfaces implemented by this class, or
* extended by this interface. Returns null if no such list is declared.
*/
@NonNull
default List<ASTClassOrInterfaceType> getSuperInterfaceTypeNodes() {
return ASTList.orEmpty(isInterface() ? getFirstChildOfType(ASTExtendsList.class)
: getFirstChildOfType(ASTImplementsList.class));
}
}
| 1 | 18,486 | Not sure, if we should do it the other way round. If you ask be in the middle of the night, whether Interface should return true as "isInterface" and what Annotation would return. I'd tell you, Annotations should return false.... I want to say, on first glance, I'd say, it's easier to have "isInterface" and "isAnnotation" rather than "isInterface", "isAnnotation" and "isRegularInterface"... But I see, we also have "isRegularClass".... We still have the possibility to change the semantics of those APIs in PMD 7 *Update:* Or in other words: What is less surprising? That isInterface returns true for annotations or that it returns false? | pmd-pmd | java |
@@ -67,7 +67,6 @@ def lambda_handler(request):
raise ValueError("'packages' action searching indexes that don't end in '_packages'")
_source = user_source
size = user_size
- terminate_after = None
elif action == 'search':
query = request.args.get('query', '')
my_fields = user_fields or [ | 1 | """
Sends the request to ElasticSearch.
TODO: Implement a higher-level search API.
"""
import os
from copy import deepcopy
from itertools import filterfalse, tee
from aws_requests_auth.boto_utils import BotoAWSRequestsAuth
from elasticsearch import Elasticsearch, RequestsHttpConnection
from t4_lambda_shared.decorator import api
from t4_lambda_shared.utils import get_default_origins, make_json_response
DEFAULT_SIZE = 1_000
MAX_QUERY_DURATION = 27 # Just shy of 29s API Gateway limit
NUM_PREVIEW_IMAGES = 100
NUM_PREVIEW_FILES = 20
COMPRESSION_EXTS = ['.gz']
IMG_EXTS = r'.*\.(bmp|gif|jpg|jpeg|png|tif|tiff|webp)'
SAMPLE_EXTS = r'.*\.(csv|ipynb|json|md|parquet|pdf|rmd|tsv|txt|vcf|xls|xlsx)(.gz)?'
README_KEYS = ['README.md', 'README.txt', 'README.ipynb']
SUMMARIZE_KEY = 'quilt_summarize.json'
@api(cors_origins=get_default_origins())
def lambda_handler(request):
"""
Proxy the request to the elastic search.
"""
action = request.args.get('action')
user_body = request.args.get('body', {})
user_fields = request.args.get('fields', [])
user_indexes = request.args.get('index', "")
user_size = request.args.get('size', DEFAULT_SIZE)
user_source = request.args.get('_source', [])
# 0-indexed starting position (for pagination)
user_from = int(request.args.get('from', 0))
user_retry = int(request.args.get('retry', 0))
terminate_after = int(os.environ.get('MAX_DOCUMENTS_PER_SHARD', 10_000))
if not user_indexes or not isinstance(user_indexes, str):
raise ValueError("Request must include index=<comma-separated string of indices>")
if user_from < 0:
raise ValueError("'from' must be a non-negative integer")
if action == 'packages':
query = request.args.get('query', '')
body = user_body or {
"query": {
"query_string": {
"analyze_wildcard": True,
"lenient": True,
"query": query,
# see enterprise/**/bucket.py for mappings
"fields": user_fields or [
# package
'comment', 'handle', 'handle_text^2', 'metadata', 'tags'
]
}
}
}
if not all(i.endswith('_packages') for i in user_indexes.split(',')):
raise ValueError("'packages' action searching indexes that don't end in '_packages'")
_source = user_source
size = user_size
terminate_after = None
elif action == 'search':
query = request.args.get('query', '')
my_fields = user_fields or [
# object
'content', 'comment', 'ext', 'key', 'key_text', 'meta_text',
# package, and boost the fields
'handle^2', 'handle_text^2', 'metadata^2', 'tags^2'
]
if user_retry <= 1:
body = {
"query": {
"query_string": {
"analyze_wildcard": True,
"lenient": user_retry > 0,
"query": query,
# see enterprise/**/bucket.py for mappings
"fields": my_fields
}
}
}
else:
body = {
"query": {
"simple_query_string": {
"query": query,
"analyze_wildcard": user_retry < 3,
"fields": my_fields,
"lenient": True,
}
}
}
_source = user_source or [
'key', 'version_id', 'updated', 'last_modified', 'size', 'user_meta',
'comment', 'handle', 'hash', 'tags', 'metadata', 'pointer_file'
]
size = DEFAULT_SIZE
if not user_retry:
terminate_after = None
elif action == 'stats':
body = {
"query": {"match_all": {}},
"aggs": {
"totalBytes": {"sum": {"field": 'size'}},
"exts": {
"terms": {"field": 'ext'},
"aggs": {"size": {"sum": {"field": 'size'}}},
},
"totalPackageHandles": {"value_count": {"field": "handle"}},
}
}
size = 0 # We still get all aggregates, just don't need the results
_source = False
# Consider all documents when computing counts, etc.
terminate_after = None
elif action == 'images':
body = {
'query': {'regexp': {'ext': IMG_EXTS}},
'collapse': {
'field': 'key',
'inner_hits': {
'name': 'latest',
'size': 1,
'sort': [{'last_modified': 'desc'}],
'_source': ['key', 'version_id'],
},
},
}
size = NUM_PREVIEW_IMAGES
_source = False
elif action == 'sample':
body = {
'query': {
'bool': {
'must': [{'regexp': {'ext': SAMPLE_EXTS}}],
'must_not': [
{'terms': {'key': README_KEYS + [SUMMARIZE_KEY]}},
{'wildcard': {'key': '*/' + SUMMARIZE_KEY}},
],
},
},
'collapse': {
'field': 'key',
'inner_hits': {
'name': 'latest',
'size': 1,
'sort': [{'last_modified': 'desc'}],
'_source': ['key', 'version_id'],
},
},
}
size = NUM_PREVIEW_FILES
_source = False
else:
return make_json_response(400, {"title": "Invalid action"})
es_host = os.environ['ES_HOST']
region = os.environ['AWS_REGION']
index_overrides = os.getenv('INDEX_OVERRIDES', '')
auth = BotoAWSRequestsAuth(
aws_host=es_host,
aws_region=region,
aws_service='es'
)
es_client = Elasticsearch(
hosts=[{'host': es_host, 'port': 443}],
http_auth=auth,
use_ssl=True,
verify_certs=True,
connection_class=RequestsHttpConnection,
timeout=MAX_QUERY_DURATION,
)
to_search = f"{user_indexes},{index_overrides}" if index_overrides else user_indexes
result = es_client.search(
index=to_search,
body=body,
_source=_source,
size=size,
from_=user_from,
# try turning this off to consider all documents
terminate_after=terminate_after,
)
return make_json_response(200, post_process(result, action))
def post_process(result: dict, action: str) -> dict:
"""post process result from elastic conditional on action
"""
if action == "stats":
# don't modify the original to avoid side-effects
result = deepcopy(result)
counts = result["aggregations"]["exts"]["buckets"]
non_gz, gz = partition(
lambda c: any(c.get("key", "").lower().endswith(ext) for ext in COMPRESSION_EXTS),
counts
)
ext_counts = {}
# ES reports double extensions e.g. file.foo.ext, get down to just .ext
# for any .ext that is not .gz
# populate ext_counts
for record in non_gz:
_, ext = os.path.splitext(f"fakename{record['key']}")
if ext not in ext_counts:
ext_counts[ext] = {'doc_count': 0, 'size': 0}
ext_counts[ext]['doc_count'] += record.get('doc_count', 0)
ext_counts[ext]['size'] += record.get('size', {}).get('value', 0)
corrected = [
{
'key': ext,
'doc_count': val['doc_count'],
'size': {'value': val['size']}
}
for ext, val in ext_counts.items()
]
# rewrite aggregation buckets so gz aggregates use two-level extensions
# and all other extensions are single-level
corrected.extend(gz)
result["aggregations"]["exts"]["buckets"] = corrected
return result
def partition(pred, iterable):
"""Use a predicate to partition entries into false entries and true entries
partition(is_odd, range(10)) --> 0 2 4 6 8 and 1 3 5 7 9
from https://docs.python.org/dev/library/itertools.html#itertools-recipes
"""
t1, t2 = tee(iterable)
return filterfalse(pred, t1), filter(pred, t2)
| 1 | 19,699 | This will potentially skip package results. Do we really want that? | quiltdata-quilt | py |
@@ -787,7 +787,7 @@ const std::map<llvm::StringRef, hipCounter> CUDA_RUNTIME_TYPE_NAME_MAP {
{"cudaErrorInvalidPc", {"hipErrorInvalidPc", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 718
// CUDA_ERROR_LAUNCH_FAILED
{"cudaErrorLaunchFailure", {"hipErrorLaunchFailure", "", CONV_NUMERIC_LITERAL, API_RUNTIME}}, // 719
- // no analogue
+ // CUDA_ERROR_COOPERATIVE_LAUNCH_TOO_LARGE
{"cudaErrorCooperativeLaunchTooLarge", {"hipErrorCooperativeLaunchTooLarge", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 720
// CUDA_ERROR_NOT_PERMITTED
{"cudaErrorNotPermitted", {"hipErrorNotPermitted", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 800 | 1 | /*
Copyright (c) 2015 - present Advanced Micro Devices, Inc. All rights reserved.
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
*/
#include "CUDA2HIP.h"
// Maps the names of CUDA RUNTIME API types to the corresponding HIP types
const std::map<llvm::StringRef, hipCounter> CUDA_RUNTIME_TYPE_NAME_MAP {
// 1. Structs
// no analogue
{"cudaChannelFormatDesc", {"hipChannelFormatDesc", "", CONV_TYPE, API_RUNTIME}},
// no analogue
{"cudaDeviceProp", {"hipDeviceProp_t", "", CONV_TYPE, API_RUNTIME}},
// no analogue
{"cudaEglFrame", {"hipEglFrame", "", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED}},
{"cudaEglFrame_st", {"hipEglFrame", "", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED}},
// no analogue
{"cudaEglPlaneDesc", {"hipEglPlaneDesc", "", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED}},
{"cudaEglPlaneDesc_st", {"hipEglPlaneDesc", "", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED}},
// no analogue
{"cudaExtent", {"hipExtent", "", CONV_TYPE, API_RUNTIME}},
// CUDA_EXTERNAL_MEMORY_BUFFER_DESC
{"cudaExternalMemoryBufferDesc", {"HIP_EXTERNAL_MEMORY_BUFFER_DESC", "", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED}},
// CUDA_EXTERNAL_MEMORY_HANDLE_DESC
{"cudaExternalMemoryHandleDesc", {"HIP_EXTERNAL_MEMORY_HANDLE_DESC", "", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED}},
// CUDA_EXTERNAL_MEMORY_MIPMAPPED_ARRAY_DESC
{"cudaExternalMemoryMipmappedArrayDesc", {"HIP_EXTERNAL_MEMORY_MIPMAPPED_ARRAY_DESC", "", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED}},
// CUDA_EXTERNAL_SEMAPHORE_HANDLE_DESC
{"cudaExternalSemaphoreHandleDesc", {"HIP_EXTERNAL_SEMAPHORE_HANDLE_DESC", "", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED}},
// CUDA_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS
{"cudaExternalSemaphoreSignalParams", {"HIP_EXTERNAL_SEMAPHORE_SIGNAL_PARAMS", "", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED}},
// CUDA_EXTERNAL_SEMAPHORE_WAIT_PARAMS
{"cudaExternalSemaphoreWaitParams", {"HIP_EXTERNAL_SEMAPHORE_WAIT_PARAMS", "", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED}},
// no analogue
{"cudaFuncAttributes", {"hipFuncAttributes", "", CONV_TYPE, API_RUNTIME}},
// CUDA_HOST_NODE_PARAMS
{"cudaHostNodeParams", {"HIP_HOST_NODE_PARAMS", "", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED}},
// CUipcEventHandle
{"cudaIpcEventHandle_t", {"ihipIpcEventHandle_t", "", CONV_TYPE, API_RUNTIME}},
// CUipcEventHandle_st
{"cudaIpcEventHandle_st", {"ihipIpcEventHandle_t", "", CONV_TYPE, API_RUNTIME}},
// CUipcMemHandle
{"cudaIpcMemHandle_t", {"hipIpcMemHandle_t", "", CONV_TYPE, API_RUNTIME}},
// CUipcMemHandle_st
{"cudaIpcMemHandle_st", {"hipIpcMemHandle_st", "", CONV_TYPE, API_RUNTIME}},
// CUDA_KERNEL_NODE_PARAMS
{"cudaKernelNodeParams", {"hipKernelNodeParams", "", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED}},
// no analogue
// CUDA_LAUNCH_PARAMS struct differs
{"cudaLaunchParams", {"hipLaunchParams", "", CONV_TYPE, API_RUNTIME}},
// no analogue
// NOTE: HIP struct is bigger and contains cudaMemcpy3DParms only in the beginning
{"cudaMemcpy3DParms", {"hipMemcpy3DParms", "", CONV_TYPE, API_RUNTIME}},
// no analogue
{"cudaMemcpy3DPeerParms", {"hipMemcpy3DPeerParms", "", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED}},
// CUDA_MEMSET_NODE_PARAMS
{"cudaMemsetParams", {"hipMemsetParams", "", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED}},
// no analogue
{"cudaPitchedPtr", {"hipPitchedPtr", "", CONV_TYPE, API_RUNTIME}},
// no analogue
{"cudaPointerAttributes", {"hipPointerAttribute_t", "", CONV_TYPE, API_RUNTIME}},
// no analogue
{"cudaPos", {"hipPos", "", CONV_TYPE, API_RUNTIME}},
// no analogue
// NOTE: CUDA_RESOURCE_DESC struct differs
{"cudaResourceDesc", {"hipResourceDesc", "", CONV_TYPE, API_RUNTIME}},
// NOTE: CUDA_RESOURCE_VIEW_DESC has reserved bytes in the end
{"cudaResourceViewDesc", {"hipResourceViewDesc", "", CONV_TYPE, API_RUNTIME}},
// no analogue
// NOTE: CUDA_TEXTURE_DESC differs
{"cudaTextureDesc", {"hipTextureDesc", "", CONV_TYPE, API_RUNTIME}},
// NOTE: the same struct and its name
{"CUuuid_st", {"hipUUID", "", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED}},
// NOTE: possibly CUsurfref is analogue
{"surfaceReference", {"hipSurfaceReference", "", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED}},
// the same - CUevent_st
{"CUevent_st", {"ihipEvent_t", "", CONV_TYPE, API_RUNTIME}},
// CUevent
{"cudaEvent_t", {"hipEvent_t", "", CONV_TYPE, API_RUNTIME}},
// CUextMemory_st
{"CUexternalMemory_st", {"hipExtMemory_st", "", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED}},
// CUexternalMemory
{"cudaExternalMemory_t", {"hipExternalMemory", "", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED}},
// CUextSemaphore_st
{"CUexternalSemaphore_st", {"hipExtSemaphore_st", "", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED}},
// CUexternalSemaphore
{"cudaExternalSemaphore_t", {"hipExternalSemaphore", "", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED}},
// the same - CUgraph_st
{"CUgraph_st", {"hipGraph_st", "", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED}},
// CUgraph
{"cudaGraph_t", {"hipGraph", "", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED}},
// the same -CUgraphExec_st
{"CUgraphExec_st", {"hipGraphExec_st", "", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED}},
// CUgraphExec
{"cudaGraphExec_t", {"hipGraphExec", "", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED}},
// CUgraphicsResource_st
{"cudaGraphicsResource", {"hipGraphicsResource_st", "", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED}},
// CUgraphicsResource
{"cudaGraphicsResource_t", {"hipGraphicsResource_t", "", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED}},
// the same - CUgraphNode_st
{"CUgraphNode_st", {"hipGraphNode_st", "", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED}},
// CUgraphNode
{"cudaGraphNode_t", {"hipGraphNode", "", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED}},
// CUeglStreamConnection_st
{"CUeglStreamConnection_st", {"hipEglStreamConnection", "", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED}},
// CUeglStreamConnection
{"cudaEglStreamConnection", {"hipEglStreamConnection", "", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED}},
// CUarray_st
{"cudaArray", {"hipArray", "", CONV_TYPE, API_RUNTIME}},
// CUarray
{"cudaArray_t", {"hipArray_t", "", CONV_TYPE, API_RUNTIME}},
// no analogue
{"cudaArray_const_t", {"hipArray_const_t", "", CONV_TYPE, API_RUNTIME}},
// CUmipmappedArray_st
{"cudaMipmappedArray", {"hipMipmappedArray", "", CONV_TYPE, API_RUNTIME}},
// CUmipmappedArray
{"cudaMipmappedArray_t", {"hipMipmappedArray_t", "", CONV_TYPE, API_RUNTIME}},
// no analogue
{"cudaMipmappedArray_const_t", {"hipMipmappedArray_const_t", "", CONV_TYPE, API_RUNTIME}},
// the same - CUstream_st
{"CUstream_st", {"ihipStream_t", "", CONV_TYPE, API_RUNTIME}},
// CUstream
{"cudaStream_t", {"hipStream_t", "", CONV_TYPE, API_RUNTIME}},
// 3. Enums
// no analogue
{"cudaCGScope", {"hipCGScope", "", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED}},
// cudaCGScope enum values
{"cudaCGScopeInvalid", {"hipCGScopeInvalid", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 0
{"cudaCGScopeGrid", {"hipCGScopeGrid", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 1
{"cudaCGScopeMultiGrid", {"hipCGScopeMultiGrid", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 2
// no analogue
{"cudaChannelFormatKind", {"hipChannelFormatKind", "", CONV_TYPE, API_RUNTIME}},
// cudaChannelFormatKind enum values
{"cudaChannelFormatKindSigned", {"hipChannelFormatKindSigned", "", CONV_NUMERIC_LITERAL, API_RUNTIME}}, // 0
{"cudaChannelFormatKindUnsigned", {"hipChannelFormatKindUnsigned", "", CONV_NUMERIC_LITERAL, API_RUNTIME}}, // 1
{"cudaChannelFormatKindFloat", {"hipChannelFormatKindFloat", "", CONV_NUMERIC_LITERAL, API_RUNTIME}}, // 2
{"cudaChannelFormatKindNone", {"hipChannelFormatKindNone", "", CONV_NUMERIC_LITERAL, API_RUNTIME}}, // 3
// CUcomputemode
{"cudaComputeMode", {"hipComputeMode", "", CONV_TYPE, API_RUNTIME}},
// cudaComputeMode enum values
// CU_COMPUTEMODE_DEFAULT
{"cudaComputeModeDefault", {"hipComputeModeDefault", "", CONV_NUMERIC_LITERAL, API_RUNTIME}}, // 0
// CU_COMPUTEMODE_EXCLUSIVE
{"cudaComputeModeExclusive", {"hipComputeModeExclusive", "", CONV_NUMERIC_LITERAL, API_RUNTIME}}, // 1
// CU_COMPUTEMODE_PROHIBITED
{"cudaComputeModeProhibited", {"hipComputeModeProhibited", "", CONV_NUMERIC_LITERAL, API_RUNTIME}}, // 2
// CU_COMPUTEMODE_EXCLUSIVE_PROCESS
{"cudaComputeModeExclusiveProcess", {"hipComputeModeExclusiveProcess", "", CONV_NUMERIC_LITERAL, API_RUNTIME}}, // 3
// CUdevice_attribute
{"cudaDeviceAttr", {"hipDeviceAttribute_t", "", CONV_TYPE, API_RUNTIME}},
// cudaDeviceAttr enum values
// CU_DEVICE_ATTRIBUTE_MAX_THREADS_PER_BLOCK
{"cudaDevAttrMaxThreadsPerBlock", {"hipDeviceAttributeMaxThreadsPerBlock", "", CONV_NUMERIC_LITERAL, API_RUNTIME}}, // 1
// CU_DEVICE_ATTRIBUTE_MAX_BLOCK_DIM_X
{"cudaDevAttrMaxBlockDimX", {"hipDeviceAttributeMaxBlockDimX", "", CONV_NUMERIC_LITERAL, API_RUNTIME}}, // 2
// CU_DEVICE_ATTRIBUTE_MAX_BLOCK_DIM_Y
{"cudaDevAttrMaxBlockDimY", {"hipDeviceAttributeMaxBlockDimY", "", CONV_NUMERIC_LITERAL, API_RUNTIME}}, // 3
// CU_DEVICE_ATTRIBUTE_MAX_BLOCK_DIM_Z
{"cudaDevAttrMaxBlockDimZ", {"hipDeviceAttributeMaxBlockDimZ", "", CONV_NUMERIC_LITERAL, API_RUNTIME}}, // 4
// CU_DEVICE_ATTRIBUTE_MAX_GRID_DIM_X
{"cudaDevAttrMaxGridDimX", {"hipDeviceAttributeMaxGridDimX", "", CONV_NUMERIC_LITERAL, API_RUNTIME}}, // 5
// CU_DEVICE_ATTRIBUTE_MAX_GRID_DIM_Y
{"cudaDevAttrMaxGridDimY", {"hipDeviceAttributeMaxGridDimY", "", CONV_NUMERIC_LITERAL, API_RUNTIME}}, // 6
// CU_DEVICE_ATTRIBUTE_MAX_GRID_DIM_Z
{"cudaDevAttrMaxGridDimZ", {"hipDeviceAttributeMaxGridDimZ", "", CONV_NUMERIC_LITERAL, API_RUNTIME}}, // 7
// CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_BLOCK
{"cudaDevAttrMaxSharedMemoryPerBlock", {"hipDeviceAttributeMaxSharedMemoryPerBlock", "", CONV_NUMERIC_LITERAL, API_RUNTIME}}, // 8
// CU_DEVICE_ATTRIBUTE_TOTAL_CONSTANT_MEMORY
{"cudaDevAttrTotalConstantMemory", {"hipDeviceAttributeTotalConstantMemory", "", CONV_NUMERIC_LITERAL, API_RUNTIME}}, // 9
// CU_DEVICE_ATTRIBUTE_WARP_SIZE
{"cudaDevAttrWarpSize", {"hipDeviceAttributeWarpSize", "", CONV_NUMERIC_LITERAL, API_RUNTIME}}, // 10
// CU_DEVICE_ATTRIBUTE_MAX_PITCH
{"cudaDevAttrMaxPitch", {"hipDeviceAttributeMaxPitch", "", CONV_NUMERIC_LITERAL, API_RUNTIME}}, // 11
// CU_DEVICE_ATTRIBUTE_MAX_REGISTERS_PER_BLOCK
{"cudaDevAttrMaxRegistersPerBlock", {"hipDeviceAttributeMaxRegistersPerBlock", "", CONV_NUMERIC_LITERAL, API_RUNTIME}}, // 12
// CU_DEVICE_ATTRIBUTE_CLOCK_RATE
{"cudaDevAttrClockRate", {"hipDeviceAttributeClockRate", "", CONV_NUMERIC_LITERAL, API_RUNTIME}}, // 13
// CU_DEVICE_ATTRIBUTE_TEXTURE_ALIGNMENT
{"cudaDevAttrTextureAlignment", {"hipDeviceAttributeTextureAlignment", "", CONV_NUMERIC_LITERAL, API_RUNTIME}}, // 14
// CU_DEVICE_ATTRIBUTE_GPU_OVERLAP
// NOTE: Is not deprecated as CUDA Driver's API analogue CU_DEVICE_ATTRIBUTE_GPU_OVERLAP
{"cudaDevAttrGpuOverlap", {"hipDeviceAttributeGpuOverlap", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 15
// CU_DEVICE_ATTRIBUTE_MULTIPROCESSOR_COUNT
{"cudaDevAttrMultiProcessorCount", {"hipDeviceAttributeMultiprocessorCount", "", CONV_NUMERIC_LITERAL, API_RUNTIME}}, // 16
// CU_DEVICE_ATTRIBUTE_KERNEL_EXEC_TIMEOUT
{"cudaDevAttrKernelExecTimeout", {"hipDeviceAttributeKernelExecTimeout", "", CONV_NUMERIC_LITERAL, API_RUNTIME}}, // 17
// CU_DEVICE_ATTRIBUTE_INTEGRATED
{"cudaDevAttrIntegrated", {"hipDeviceAttributeIntegrated", "", CONV_NUMERIC_LITERAL, API_RUNTIME}}, // 18
// CU_DEVICE_ATTRIBUTE_CAN_MAP_HOST_MEMORY
{"cudaDevAttrCanMapHostMemory", {"hipDeviceAttributeCanMapHostMemory", "", CONV_NUMERIC_LITERAL, API_RUNTIME}}, // 19
// CU_DEVICE_ATTRIBUTE_COMPUTE_MODE
{"cudaDevAttrComputeMode", {"hipDeviceAttributeComputeMode", "", CONV_NUMERIC_LITERAL, API_RUNTIME}}, // 20
// CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_WIDTH
{"cudaDevAttrMaxTexture1DWidth", {"hipDeviceAttributeMaxTexture1DWidth", "", CONV_NUMERIC_LITERAL, API_RUNTIME}}, // 21
// CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_WIDTH
{"cudaDevAttrMaxTexture2DWidth", {"hipDeviceAttributeMaxTexture2DWidth", "", CONV_NUMERIC_LITERAL, API_RUNTIME}}, // 22
// CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_HEIGHT
{"cudaDevAttrMaxTexture2DHeight", {"hipDeviceAttributeMaxTexture2DHeight", "", CONV_NUMERIC_LITERAL, API_RUNTIME}}, // 23
// CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_WIDTH
{"cudaDevAttrMaxTexture3DWidth", {"hipDeviceAttributeMaxTexture3DWidth", "", CONV_NUMERIC_LITERAL, API_RUNTIME}}, // 24
// CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_HEIGHT
{"cudaDevAttrMaxTexture3DHeight", {"hipDeviceAttributeMaxTexture3DHeight", "", CONV_NUMERIC_LITERAL, API_RUNTIME}}, // 25
// CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_DEPTH
{"cudaDevAttrMaxTexture3DDepth", {"hipDeviceAttributeMaxTexture3DDepth", "", CONV_NUMERIC_LITERAL, API_RUNTIME}}, // 26
// CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LAYERED_WIDTH
{"cudaDevAttrMaxTexture2DLayeredWidth", {"hipDeviceAttributeMaxTexture2DLayeredWidth", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 27
// CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LAYERED_HEIGHT
{"cudaDevAttrMaxTexture2DLayeredHeight", {"hipDeviceAttributeMaxTexture2DLayeredHeight", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 28
// CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LAYERED_LAYERS
{"cudaDevAttrMaxTexture2DLayeredLayers", {"hipDeviceAttributeMaxTexture2DLayeredLayers", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 29
// CU_DEVICE_ATTRIBUTE_SURFACE_ALIGNMENT
{"cudaDevAttrSurfaceAlignment", {"hipDeviceAttributeSurfaceAlignment", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 30
// CU_DEVICE_ATTRIBUTE_CONCURRENT_KERNELS
{"cudaDevAttrConcurrentKernels", {"hipDeviceAttributeConcurrentKernels", "", CONV_NUMERIC_LITERAL, API_RUNTIME}}, // 31
// CU_DEVICE_ATTRIBUTE_ECC_ENABLED
{"cudaDevAttrEccEnabled", {"hipDeviceAttributeEccEnabled", "", CONV_NUMERIC_LITERAL, API_RUNTIME}}, // 32
// CU_DEVICE_ATTRIBUTE_PCI_BUS_ID
{"cudaDevAttrPciBusId", {"hipDeviceAttributePciBusId", "", CONV_NUMERIC_LITERAL, API_RUNTIME}}, // 33
// CU_DEVICE_ATTRIBUTE_PCI_DEVICE_ID
{"cudaDevAttrPciDeviceId", {"hipDeviceAttributePciDeviceId", "", CONV_NUMERIC_LITERAL, API_RUNTIME}}, // 34
// CU_DEVICE_ATTRIBUTE_TCC_DRIVER
{"cudaDevAttrTccDriver", {"hipDeviceAttributeTccDriver", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 35
// CU_DEVICE_ATTRIBUTE_MEMORY_CLOCK_RATE
{"cudaDevAttrMemoryClockRate", {"hipDeviceAttributeMemoryClockRate", "", CONV_NUMERIC_LITERAL, API_RUNTIME}}, // 36
// CU_DEVICE_ATTRIBUTE_GLOBAL_MEMORY_BUS_WIDTH
{"cudaDevAttrGlobalMemoryBusWidth", {"hipDeviceAttributeMemoryBusWidth", "", CONV_NUMERIC_LITERAL, API_RUNTIME}}, // 37
// CU_DEVICE_ATTRIBUTE_L2_CACHE_SIZE
{"cudaDevAttrL2CacheSize", {"hipDeviceAttributeL2CacheSize", "", CONV_NUMERIC_LITERAL, API_RUNTIME}}, // 38
// CU_DEVICE_ATTRIBUTE_MAX_THREADS_PER_MULTIPROCESSOR
{"cudaDevAttrMaxThreadsPerMultiProcessor", {"hipDeviceAttributeMaxThreadsPerMultiProcessor", "", CONV_NUMERIC_LITERAL, API_RUNTIME}}, // 39
// CU_DEVICE_ATTRIBUTE_ASYNC_ENGINE_COUNT
{"cudaDevAttrAsyncEngineCount", {"hipDeviceAttributeAsyncEngineCount", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 40
// CU_DEVICE_ATTRIBUTE_UNIFIED_ADDRESSING
{"cudaDevAttrUnifiedAddressing", {"hipDeviceAttributeUnifiedAddressing", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 41
// CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_LAYERED_WIDTH
{"cudaDevAttrMaxTexture1DLayeredWidth", {"hipDeviceAttributeMaxTexture1DLayeredWidth", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 42
// CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_LAYERED_LAYERS
{"cudaDevAttrMaxTexture1DLayeredLayers", {"hipDeviceAttributeMaxTexture1DLayeredLayers", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 43
// 44 - no
// CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_GATHER_WIDTH
{"cudaDevAttrMaxTexture2DGatherWidth", {"hipDeviceAttributeMaxTexture2DGatherWidth", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 45
// CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_GATHER_HEIGHT
{"cudaDevAttrMaxTexture2DGatherHeight", {"hipDeviceAttributeMaxTexture2DGatherHeight", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 46
// CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_WIDTH_ALTERNATE
{"cudaDevAttrMaxTexture3DWidthAlt", {"hipDeviceAttributeMaxTexture3DWidthAlternate", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 47
// CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_HEIGHT_ALTERNATE
{"cudaDevAttrMaxTexture3DHeightAlt", {"hipDeviceAttributeMaxTexture3DHeightAlternate", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 48
// CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE3D_DEPTH_ALTERNATE
{"cudaDevAttrMaxTexture3DDepthAlt", {"hipDeviceAttributeMaxTexture3DDepthAlternate", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 49
// CU_DEVICE_ATTRIBUTE_PCI_DOMAIN_ID
{"cudaDevAttrPciDomainId", {"hipDeviceAttributePciDomainId", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 50
// CU_DEVICE_ATTRIBUTE_TEXTURE_PITCH_ALIGNMENT
{"cudaDevAttrTexturePitchAlignment", {"hipDeviceAttributeTexturePitchAlignment", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 51
// CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURECUBEMAP_WIDTH
{"cudaDevAttrMaxTextureCubemapWidth", {"hipDeviceAttributeMaxTextureCubemapWidth", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 52
// CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURECUBEMAP_LAYERED_WIDTH
{"cudaDevAttrMaxTextureCubemapLayeredWidth", {"hipDeviceAttributeMaxTextureCubemapLayeredWidth", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 53
// CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURECUBEMAP_LAYERED_LAYERS
{"cudaDevAttrMaxTextureCubemapLayeredLayers", {"hipDeviceAttributeMaxTextureCubemapLayeredLayers", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 54
// CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE1D_WIDTH
{"cudaDevAttrMaxSurface1DWidth", {"hipDeviceAttributeMaxSurface1DWidth", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 55
// CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_WIDTH
{"cudaDevAttrMaxSurface2DWidth", {"hipDeviceAttributeMaxSurface2DWidth", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 56
// CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_HEIGHT
{"cudaDevAttrMaxSurface2DHeight", {"hipDeviceAttributeMaxSurface2DHeight", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 57
// CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE3D_WIDTH
{"cudaDevAttrMaxSurface3DWidth", {"hipDeviceAttributeMaxSurface3DWidth", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 58
// CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE3D_HEIGHT
{"cudaDevAttrMaxSurface3DHeight", {"hipDeviceAttributeMaxSurface3DHeight", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 59
// CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE3D_DEPTH
{"cudaDevAttrMaxSurface3DDepth", {"hipDeviceAttributeMaxSurface3DDepth", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 60
// CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE1D_LAYERED_WIDTH
{"cudaDevAttrMaxSurface1DLayeredWidth", {"hipDeviceAttributeMaxSurface1DLayeredWidth", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 61
// CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE1D_LAYERED_LAYERS
{"cudaDevAttrMaxSurface1DLayeredLayers", {"hipDeviceAttributeMaxSurface1DLayeredLayers", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 62
// CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_LAYERED_WIDTH
{"cudaDevAttrMaxSurface2DLayeredWidth", {"hipDeviceAttributeMaxSurface2DLayeredWidth", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 63
// CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_LAYERED_HEIGHT
{"cudaDevAttrMaxSurface2DLayeredHeight", {"hipDeviceAttributeMaxSurface2DLayeredHeight", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 64
// CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACE2D_LAYERED_LA YERS
{"cudaDevAttrMaxSurface2DLayeredLayers", {"hipDeviceAttributeMaxSurface2DLayeredLayers", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 65
// CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACECUBEMAP_WIDTH
{"cudaDevAttrMaxSurfaceCubemapWidth", {"hipDeviceAttributeMaxSurfaceCubemapWidth", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 66
// CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACECUBEMAP_LAYERED_WIDTH
{"cudaDevAttrMaxSurfaceCubemapLayeredWidth", {"hipDeviceAttributeMaxSurfaceCubemapLayeredWidth", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 67
// CU_DEVICE_ATTRIBUTE_MAXIMUM_SURFACECUBEMAP_LAYERED_LAYERS
{"cudaDevAttrMaxSurfaceCubemapLayeredLayers", {"hipDeviceAttributeMaxSurfaceCubemapLayeredLayers", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 68
// CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_LINEAR_WIDTH
{"cudaDevAttrMaxTexture1DLinearWidth", {"hipDeviceAttributeMaxTexture1DLinearWidth", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 69
// CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LINEAR_WIDTH
{"cudaDevAttrMaxTexture2DLinearWidth", {"hipDeviceAttributeMaxTexture2DLinearWidth", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 70
// CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LINEAR_HEIGHT
{"cudaDevAttrMaxTexture2DLinearHeight", {"hipDeviceAttributeMaxTexture2DLinearHeight", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 71
// CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_LINEAR_PITCH
{"cudaDevAttrMaxTexture2DLinearPitch", {"hipDeviceAttributeMaxTexture2DLinearPitch", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 72
// CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_MIPMAPPED_WIDTH
{"cudaDevAttrMaxTexture2DMipmappedWidth", {"hipDeviceAttributeMaxTexture2DMipmappedWidth", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 73
// CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE2D_MIPMAPPED_HEIGHT
{"cudaDevAttrMaxTexture2DMipmappedHeight", {"hipDeviceAttributeMaxTexture2DMipmappedHeight", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 74
// CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MAJOR
{"cudaDevAttrComputeCapabilityMajor", {"hipDeviceAttributeComputeCapabilityMajor", "", CONV_NUMERIC_LITERAL, API_RUNTIME}}, // 75
// CU_DEVICE_ATTRIBUTE_COMPUTE_CAPABILITY_MINOR
{"cudaDevAttrComputeCapabilityMinor", {"hipDeviceAttributeComputeCapabilityMinor", "", CONV_NUMERIC_LITERAL, API_RUNTIME}}, // 76
// CU_DEVICE_ATTRIBUTE_MAXIMUM_TEXTURE1D_MIPMAPPED_WIDTH
{"cudaDevAttrMaxTexture1DMipmappedWidth", {"hipDeviceAttributeMaxTexture1DMipmappedWidth", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 77
// CU_DEVICE_ATTRIBUTE_STREAM_PRIORITIES_SUPPORTED
{"cudaDevAttrStreamPrioritiesSupported", {"hipDeviceAttributeStreamPrioritiesSupported", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 78
// CU_DEVICE_ATTRIBUTE_GLOBAL_L1_CACHE_SUPPORTED
{"cudaDevAttrGlobalL1CacheSupported", {"hipDeviceAttributeGlobalL1CacheSupported", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 79
// CU_DEVICE_ATTRIBUTE_LOCAL_L1_CACHE_SUPPORTED
{"cudaDevAttrLocalL1CacheSupported", {"hipDeviceAttributeLocalL1CacheSupported", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 80
// CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_MULTIPROCESSOR
{"cudaDevAttrMaxSharedMemoryPerMultiprocessor", {"hipDeviceAttributeMaxSharedMemoryPerMultiprocessor", "", CONV_NUMERIC_LITERAL, API_RUNTIME}}, // 81
// CU_DEVICE_ATTRIBUTE_MAX_REGISTERS_PER_MULTIPROCESSOR
{"cudaDevAttrMaxRegistersPerMultiprocessor", {"hipDeviceAttributeMaxRegistersPerMultiprocessor", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 82
// CU_DEVICE_ATTRIBUTE_MANAGED_MEMORY
{"cudaDevAttrManagedMemory", {"hipDeviceAttributeManagedMemory", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 83
// CU_DEVICE_ATTRIBUTE_MULTI_GPU_BOARD
{"cudaDevAttrIsMultiGpuBoard", {"hipDeviceAttributeIsMultiGpuBoard", "", CONV_NUMERIC_LITERAL, API_RUNTIME}}, // 84
// CU_DEVICE_ATTRIBUTE_MULTI_GPU_BOARD_GROUP_ID
{"cudaDevAttrMultiGpuBoardGroupID", {"hipDeviceAttributeMultiGpuBoardGroupID", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 85
// CU_DEVICE_ATTRIBUTE_HOST_NATIVE_ATOMIC_SUPPORTED
{"cudaDevAttrHostNativeAtomicSupported", {"hipDeviceAttributeHostNativeAtomicSupported", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 86
// CU_DEVICE_ATTRIBUTE_SINGLE_TO_DOUBLE_PRECISION_PERF_RATIO
{"cudaDevAttrSingleToDoublePrecisionPerfRatio", {"hipDeviceAttributeSingleToDoublePrecisionPerfRatio", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 87
// CU_DEVICE_ATTRIBUTE_PAGEABLE_MEMORY_ACCESS
{"cudaDevAttrPageableMemoryAccess", {"hipDeviceAttributePageableMemoryAccess", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 88
// CU_DEVICE_ATTRIBUTE_CONCURRENT_MANAGED_ACCESS
{"cudaDevAttrConcurrentManagedAccess", {"hipDeviceAttributeConcurrentManagedAccess", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 89
// CU_DEVICE_ATTRIBUTE_COMPUTE_PREEMPTION_SUPPORTED
{"cudaDevAttrComputePreemptionSupported", {"hipDeviceAttributeComputePreemptionSupported", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 90
// CU_DEVICE_ATTRIBUTE_CAN_USE_HOST_POINTER_FOR_REGISTERED_MEM
{"cudaDevAttrCanUseHostPointerForRegisteredMem", {"hipDeviceAttributeCanUseHostPointerForRegisteredMem", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 91
// CU_DEVICE_ATTRIBUTE_CAN_USE_STREAM_MEM_OPS
{"cudaDevAttrReserved92", {"hipDeviceAttributeCanUseStreamMemOps", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 92
// CU_DEVICE_ATTRIBUTE_CAN_USE_64_BIT_STREAM_MEM_OPS
{"cudaDevAttrReserved93", {"hipDeviceAttributeCanUse64BitStreamMemOps", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 93
// CU_DEVICE_ATTRIBUTE_CAN_USE_STREAM_WAIT_VALUE_NOR
{"cudaDevAttrReserved94", {"hipDeviceAttributeCanUseStreamWaitValueNor", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 94
// CU_DEVICE_ATTRIBUTE_COOPERATIVE_LAUNCH
{"cudaDevAttrCooperativeLaunch", {"hipDeviceAttributeCooperativeLaunch", "", CONV_NUMERIC_LITERAL, API_RUNTIME}}, // 95
// CU_DEVICE_ATTRIBUTE_COOPERATIVE_MULTI_DEVICE_LAUNCH
{"cudaDevAttrCooperativeMultiDeviceLaunch", {"hipDeviceAttributeCooperativeMultiDeviceLaunch", "", CONV_NUMERIC_LITERAL, API_RUNTIME}}, // 96
// CU_DEVICE_ATTRIBUTE_MAX_SHARED_MEMORY_PER_BLOCK_OPTIN
{"cudaDevAttrMaxSharedMemoryPerBlockOptin", {"hipDeviceAttributeMaxSharedMemoryPerBlockOptin", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 97
// CU_DEVICE_ATTRIBUTE_CAN_FLUSH_REMOTE_WRITES
{"cudaDevAttrCanFlushRemoteWrites", {"hipDeviceAttributeCanFlushRemoteWrites", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 98
// CU_DEVICE_ATTRIBUTE_HOST_REGISTER_SUPPORTED
{"cudaDevAttrHostRegisterSupported", {"hipDeviceAttributeHostRegisterSupported", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 99
// CU_DEVICE_ATTRIBUTE_PAGEABLE_MEMORY_ACCESS_USES_HOST_PAGE_TABLES
{"cudaDevAttrPageableMemoryAccessUsesHostPageTables", {"hipDeviceAttributePageableMemoryAccessUsesHostPageTables", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 100
// CU_DEVICE_ATTRIBUTE_DIRECT_MANAGED_MEM_ACCESS_FROM_HOST
{"cudaDevAttrDirectManagedMemAccessFromHost", {"hipDeviceAttributeDirectManagedMemAccessFromHost", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 101
// CUdevice_P2PAttribute
{"cudaDeviceP2PAttr", {"hipDeviceP2PAttribute", "", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED}},
// cudaDeviceP2PAttr enum values
// CU_DEVICE_P2P_ATTRIBUTE_PERFORMANCE_RANK = 0x01
{"cudaDevP2PAttrPerformanceRank", {"hipDeviceP2PAttributePerformanceRank", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 1
// CU_DEVICE_P2P_ATTRIBUTE_ACCESS_SUPPORTED = 0x02
{"cudaDevP2PAttrAccessSupported", {"hipDeviceP2PAttributeAccessSupported", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 2
// CU_DEVICE_P2P_ATTRIBUTE_NATIVE_ATOMIC_SUPPORTED = 0x03
{"cudaDevP2PAttrNativeAtomicSupported", {"hipDeviceP2PAttributeNativeAtomicSupported", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 3
// CU_DEVICE_P2P_ATTRIBUTE_CUDA_ARRAY_ACCESS_SUPPORTED = 0x04
{"cudaDevP2PAttrCudaArrayAccessSupported", {"hipDevP2PAttributeCudaArrayAccessSupported", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 4
// cudaEGL.h - presented only on Linux in nvidia-cuda-dev package
// CUeglColorFormat
{"cudaEglColorFormat", {"hipEglColorFormat", "", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED}},
// cudaEglColorFormat enum values
// CU_EGL_COLOR_FORMAT_YUV420_PLANAR = 0x00
{"cudaEglColorFormatYUV420Planar", {"hipEglColorFormatYUV420Planar", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 0
// CU_EGL_COLOR_FORMAT_YUV420_SEMIPLANAR= 0x01
{"cudaEglColorFormatYUV420SemiPlanar ", {"hipEglColorFormatYUV420SemiPlanar", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 1
// CU_EGL_COLOR_FORMAT_YUV422_PLANAR = 0x02
{"cudaEglColorFormatYUV422Planar", {"hipEglColorFormatYUV422Planar", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 2
// CU_EGL_COLOR_FORMAT_YUV422_SEMIPLANAR = 0x03
{"cudaEglColorFormatYUV422SemiPlanar", {"hipEglColorFormatYUV422SemiPlanar", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 3
// CU_EGL_COLOR_FORMAT_RGB = 0x04
{"cudaEglColorFormatRGB", {"hipEglColorFormatRGB", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 4
// CU_EGL_COLOR_FORMAT_BGR = 0x05
{"cudaEglColorFormatBGR", {"hipEglColorFormatBGR", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 5
// CU_EGL_COLOR_FORMAT_ARGB = 0x06
{"cudaEglColorFormatARGB", {"hipEglColorFormatARGB", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 6
// CU_EGL_COLOR_FORMAT_RGBA = 0x07
{"cudaEglColorFormatRGBA", {"hipEglColorFormatRGBA", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 7
// CU_EGL_COLOR_FORMAT_L = 0x08
{"cudaEglColorFormatL", {"hipEglColorFormatL", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 8
// CU_EGL_COLOR_FORMAT_R = 0x09
{"cudaEglColorFormatR", {"hipEglColorFormatR", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 9
// CU_EGL_COLOR_FORMAT_YUV444_PLANAR = 0x0A
{"cudaEglColorFormatYUV444Planar", {"hipEglColorFormatYUV444Planar", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 10
// CU_EGL_COLOR_FORMAT_YUV444_SEMIPLANAR = 0x0B
{"cudaEglColorFormatYUV444SemiPlanar", {"hipEglColorFormatYUV444SemiPlanar", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 11
// CU_EGL_COLOR_FORMAT_YUYV_422 = 0x0C
{"cudaEglColorFormatYUYV422", {"hipEglColorFormatYUYV422", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 12
// CU_EGL_COLOR_FORMAT_UYVY_422 = 0x0D
{"cudaEglColorFormatUYVY422", {"hipEglColorFormatUYVY422", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 13
// CU_EGL_COLOR_FORMAT_ABGR = 0x0E
{"cudaEglColorFormatABGR", {"hipEglColorFormatABGR", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 14
// CU_EGL_COLOR_FORMAT_BGRA = 0x0F
{"cudaEglColorFormatBGRA", {"hipEglColorFormatBGRA", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 15
// CU_EGL_COLOR_FORMAT_A = 0x10
{"cudaEglColorFormatA", {"hipEglColorFormatA", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 16
// CU_EGL_COLOR_FORMAT_RG = 0x11
{"cudaEglColorFormatRG", {"hipEglColorFormatRG", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 17
// CU_EGL_COLOR_FORMAT_AYUV = 0x12
{"cudaEglColorFormatAYUV", {"hipEglColorFormatAYUV", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 18
// CU_EGL_COLOR_FORMAT_YVU444_SEMIPLANAR = 0x13
{"cudaEglColorFormatYVU444SemiPlanar", {"hipEglColorFormatYVU444SemiPlanar", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 19
// CU_EGL_COLOR_FORMAT_YVU422_SEMIPLANAR = 0x14
{"cudaEglColorFormatYVU422SemiPlanar", {"hipEglColorFormatYVU422SemiPlanar", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 20
// CU_EGL_COLOR_FORMAT_YVU420_SEMIPLANAR = 0x15
{"cudaEglColorFormatYVU420SemiPlanar", {"hipEglColorFormatYVU420SemiPlanar", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 21
// CU_EGL_COLOR_FORMAT_Y10V10U10_444_SEMIPLANAR = 0x16
{"cudaEglColorFormatY10V10U10_444SemiPlanar", {"hipEglColorFormatY10V10U10_444SemiPlanar", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 22
// CU_EGL_COLOR_FORMAT_Y10V10U10_420_SEMIPLANAR = 0x17
{"cudaEglColorFormatY10V10U10_420SemiPlanar", {"hipEglColorFormatY10V10U10_420SemiPlanar", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 23
// CU_EGL_COLOR_FORMAT_Y12V12U12_444_SEMIPLANAR = 0x18
{"cudaEglColorFormatY12V12U12_444SemiPlanar", {"hipEglColorFormatY12V12U12_444SemiPlanar", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 24
// CU_EGL_COLOR_FORMAT_Y12V12U12_420_SEMIPLANAR = 0x19
{"cudaEglColorFormatY12V12U12_420SemiPlanar", {"hipEglColorFormatY12V12U12_420SemiPlanar", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 25
// CU_EGL_COLOR_FORMAT_VYUY_ER = 0x1A
{"cudaEglColorFormatVYUY_ER", {"hipEglColorFormatVYUY_ER", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 26
// CU_EGL_COLOR_FORMAT_UYVY_ER = 0x1B
{"cudaEglColorFormatUYVY_ER", {"hipEglColorFormatUYVY_ER", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 27
// CU_EGL_COLOR_FORMAT_YUYV_ER = 0x1C
{"cudaEglColorFormatYUYV_ER", {"hipEglColorFormatYUYV_ER", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 28
// CU_EGL_COLOR_FORMAT_YVYU_ER = 0x1D
{"cudaEglColorFormatYVYU_ER", {"hipEglColorFormatYVYU_ER", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 29
// CU_EGL_COLOR_FORMAT_YUV_ER = 0x1E
{"cudaEglColorFormatYUV_ER", {"hipEglColorFormatYUV_ER", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 30
// CU_EGL_COLOR_FORMAT_YUVA_ER = 0x1F
{"cudaEglColorFormatYUVA_ER", {"hipEglColorFormatYUVA_ER", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 31
// CU_EGL_COLOR_FORMAT_AYUV_ER = 0x20
{"cudaEglColorFormatAYUV_ER", {"hipEglColorFormatAYUV_ER", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 32
// CU_EGL_COLOR_FORMAT_YUV444_PLANAR_ER = 0x21
{"cudaEglColorFormatYUV444Planar_ER", {"hipEglColorFormatYUV444Planar_ER", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 33
// CU_EGL_COLOR_FORMAT_YUV422_PLANAR_ER = 0x22
{"cudaEglColorFormatYUV422Planar_ER", {"hipEglColorFormatYUV422Planar_ER", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 34
// CU_EGL_COLOR_FORMAT_YUV420_PLANAR_ER = 0x23
{"cudaEglColorFormatYUV420Planar_ER", {"hipEglColorFormatYUV420Planar_ER", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 35
// CU_EGL_COLOR_FORMAT_YUV444_SEMIPLANAR_ER = 0x24
{"cudaEglColorFormatYUV444SemiPlanar_ER", {"hipEglColorFormatYUV444SemiPlanar_ER", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 36
// CU_EGL_COLOR_FORMAT_YUV422_SEMIPLANAR_ER = 0x25
{"cudaEglColorFormatYUV422SemiPlanar_ER", {"hipEglColorFormatYUV422SemiPlanar_ER", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 37
// CU_EGL_COLOR_FORMAT_YUV420_SEMIPLANAR_ER = 0x26
{"cudaEglColorFormatYUV420SemiPlanar_ER", {"hipEglColorFormatYUV420SemiPlanar_ER", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 38
// CU_EGL_COLOR_FORMAT_YVU444_PLANAR_ER = 0x27
{"cudaEglColorFormatYVU444Planar_ER", {"hipEglColorFormatYVU444Planar_ER", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 39
// CU_EGL_COLOR_FORMAT_YVU422_PLANAR_ER = 0x28
{"cudaEglColorFormatYVU422Planar_ER", {"hipEglColorFormatYVU422Planar_ER", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 40
// CU_EGL_COLOR_FORMAT_YVU420_PLANAR_ER = 0x29
{"cudaEglColorFormatYVU420Planar_ER", {"hipEglColorFormatYVU420Planar_ER", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 41
// CU_EGL_COLOR_FORMAT_YVU444_SEMIPLANAR_ER = 0x2A
{"cudaEglColorFormatYVU444SemiPlanar_ER", {"hipEglColorFormatYVU444SemiPlanar_ER", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 42
// CU_EGL_COLOR_FORMAT_YVU422_SEMIPLANAR_ER = 0x2B
{"cudaEglColorFormatYVU422SemiPlanar_ER", {"hipEglColorFormatYVU422SemiPlanar_ER", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 43
// CU_EGL_COLOR_FORMAT_YVU420_SEMIPLANAR_ER = 0x2C
{"cudaEglColorFormatYVU420SemiPlanar_ER", {"hipEglColorFormatYVU420SemiPlanar_ER", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 44
// CU_EGL_COLOR_FORMAT_BAYER_RGGB = 0x2D
{"cudaEglColorFormatBayerRGGB", {"hipEglColorFormatBayerRGGB", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 45
// CU_EGL_COLOR_FORMAT_BAYER_BGGR = 0x2E
{"cudaEglColorFormatBayerBGGR", {"hipEglColorFormatBayerBGGR", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 46
// CU_EGL_COLOR_FORMAT_BAYER_GRBG = 0x2F
{"cudaEglColorFormatBayerGRBG", {"hipEglColorFormatBayerGRBG", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 47
// CU_EGL_COLOR_FORMAT_BAYER_GBRG = 0x30
{"cudaEglColorFormatBayerGBRG", {"hipEglColorFormatBayerGBRG", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 48
// CU_EGL_COLOR_FORMAT_BAYER10_RGGB = 0x31
{"cudaEglColorFormatBayer10RGGB", {"hipEglColorFormatBayer10RGGB", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 49
// CU_EGL_COLOR_FORMAT_BAYER10_BGGR = 0x32
{"cudaEglColorFormatBayer10BGGR", {"hipEglColorFormatBayer10BGGR", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 50
// CU_EGL_COLOR_FORMAT_BAYER10_GRBG = 0x33
{"cudaEglColorFormatBayer10GRBG", {"hipEglColorFormatBayer10GRBG", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 51
// CU_EGL_COLOR_FORMAT_BAYER10_GBRG = 0x34
{"cudaEglColorFormatBayer10GBRG", {"hipEglColorFormatBayer10GBRG", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 52
// CU_EGL_COLOR_FORMAT_BAYER12_RGGB = 0x35
{"cudaEglColorFormatBayer12RGGB", {"hipEglColorFormatBayer12RGGB", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 53
// CU_EGL_COLOR_FORMAT_BAYER12_BGGR = 0x36
{"cudaEglColorFormatBayer12BGGR", {"hipEglColorFormatBayer12BGGR", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 54
// CU_EGL_COLOR_FORMAT_BAYER12_GRBG = 0x37
{"cudaEglColorFormatBayer12GRBG", {"hipEglColorFormatBayer12GRBG", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 55
// CU_EGL_COLOR_FORMAT_BAYER12_GBRG = 0x38
{"cudaEglColorFormatBayer12GBRG", {"hipEglColorFormatBayer12GBRG", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 56
// CU_EGL_COLOR_FORMAT_BAYER14_RGGB = 0x39
{"cudaEglColorFormatBayer14RGGB", {"hipEglColorFormatBayer14RGGB", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 57
// CU_EGL_COLOR_FORMAT_BAYER14_BGGR = 0x3A
{"cudaEglColorFormatBayer14BGGR", {"hipEglColorFormatBayer14BGGR", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 58
// CU_EGL_COLOR_FORMAT_BAYER14_GRBG = 0x3B
{"cudaEglColorFormatBayer14GRBG", {"hipEglColorFormatBayer14GRBG", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 59
// CU_EGL_COLOR_FORMAT_BAYER14_GBRG = 0x3C
{"cudaEglColorFormatBayer14GBRG", {"hipEglColorFormatBayer14GBRG", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 60
// CU_EGL_COLOR_FORMAT_BAYER20_RGGB = 0x3D
{"cudaEglColorFormatBayer20RGGB", {"hipEglColorFormatBayer20RGGB", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 61
// CU_EGL_COLOR_FORMAT_BAYER20_BGGR = 0x3E
{"cudaEglColorFormatBayer20BGGR", {"hipEglColorFormatBayer20BGGR", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 62
// CU_EGL_COLOR_FORMAT_BAYER20_GRBG = 0x3F
{"cudaEglColorFormatBayer20GRBG", {"hipEglColorFormatBayer20GRBG", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 63
// CU_EGL_COLOR_FORMAT_BAYER20_GBRG = 0x40
{"cudaEglColorFormatBayer20GBRG", {"hipEglColorFormatBayer20GBRG", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 64
// CU_EGL_COLOR_FORMAT_YVU444_PLANAR = 0x41
{"cudaEglColorFormatYVU444Planar", {"hipEglColorFormatYVU444Planar", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 65
// CU_EGL_COLOR_FORMAT_YVU422_PLANAR = 0x42
{"cudaEglColorFormatYVU422Planar", {"hipEglColorFormatYVU422Planar", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 66
// CU_EGL_COLOR_FORMAT_YVU420_PLANAR = 0x43
{"cudaEglColorFormatYVU420Planar", {"hipEglColorFormatYVU420Planar", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 67
// CU_EGL_COLOR_FORMAT_BAYER_ISP_RGGB = 0x44
{"cudaEglColorFormatBayerIspRGGB", {"hipEglColorFormatBayerIspRGGB", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 68
// CU_EGL_COLOR_FORMAT_BAYER_ISP_BGGR = 0x45
{"cudaEglColorFormatBayerIspBGGR", {"hipEglColorFormatBayerIspBGGR", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 69
// CU_EGL_COLOR_FORMAT_BAYER_ISP_GRBG = 0x46
{"cudaEglColorFormatBayerIspGRBG", {"hipEglColorFormatBayerIspGRBG", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 70
// CU_EGL_COLOR_FORMAT_BAYER_ISP_GBRG = 0x47
{"cudaEglColorFormatBayerIspGBRG", {"hipEglColorFormatBayerIspGBRG", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 71
// CUeglFrameType
{"cudaEglFrameType", {"hipEglFrameType", "", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED}},
// cudaEglFrameType enum values
// CU_EGL_FRAME_TYPE_ARRAY
{"cudaEglFrameTypeArray", {"hipEglFrameTypeArray", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 0
// CU_EGL_FRAME_TYPE_PITCH
{"cudaEglFrameTypePitch", {"hipEglFrameTypePitch", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 1
// CUeglResourceLocationFlags
{"cudaEglResourceLocationFlags", {"hipEglResourceLocationFlags", "", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED}},
// cudaEglResourceLocationFlagss enum values
// CU_EGL_RESOURCE_LOCATION_SYSMEM
{"cudaEglResourceLocationSysmem", {"hipEglResourceLocationSysmem", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 0x00
// CU_EGL_RESOURCE_LOCATION_VIDMEM
{"cudaEglResourceLocationVidmem", {"hipEglResourceLocationVidmem", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 0x01
// CUresult
{"cudaError", {"hipError_t", "", CONV_TYPE, API_RUNTIME}},
{"cudaError_t", {"hipError_t", "", CONV_TYPE, API_RUNTIME}},
// cudaError enum values
// CUDA_SUCCESS
{"cudaSuccess", {"hipSuccess", "", CONV_NUMERIC_LITERAL, API_RUNTIME}}, // 0
// CUDA_ERROR_INVALID_VALUE
{"cudaErrorInvalidValue", {"hipErrorInvalidValue", "", CONV_NUMERIC_LITERAL, API_RUNTIME}}, // 1
// CUDA_ERROR_OUT_OF_MEMORY
{"cudaErrorMemoryAllocation", {"hipErrorOutOfMemory", "", CONV_NUMERIC_LITERAL, API_RUNTIME}}, // 2
// CUDA_ERROR_NOT_INITIALIZED
{"cudaErrorInitializationError", {"hipErrorNotInitialized", "", CONV_NUMERIC_LITERAL, API_RUNTIME}}, // 3
// CUDA_ERROR_DEINITIALIZED
{"cudaErrorCudartUnloading", {"hipErrorDeinitialized", "", CONV_NUMERIC_LITERAL, API_RUNTIME}}, // 4
// CUDA_ERROR_PROFILER_DISABLED
{"cudaErrorProfilerDisabled", {"hipErrorProfilerDisabled", "", CONV_NUMERIC_LITERAL, API_RUNTIME}}, // 5
// Deprecated since CUDA 5.0
// CUDA_ERROR_PROFILER_NOT_INITIALIZED
{"cudaErrorProfilerNotInitialized", {"hipErrorProfilerNotInitialized", "", CONV_NUMERIC_LITERAL, API_RUNTIME}}, // 6
// Deprecated since CUDA 5.0
// CUDA_ERROR_PROFILER_ALREADY_STARTED
{"cudaErrorProfilerAlreadyStarted", {"hipErrorProfilerAlreadyStarted", "", CONV_NUMERIC_LITERAL, API_RUNTIME}}, // 7
// Deprecated since CUDA 5.0
// CUDA_ERROR_PROFILER_ALREADY_STOPPED
{"cudaErrorProfilerAlreadyStopped", {"hipErrorProfilerAlreadyStopped", "", CONV_NUMERIC_LITERAL, API_RUNTIME}}, // 8
// no analogue
{"cudaErrorInvalidConfiguration", {"hipErrorInvalidConfiguration", "", CONV_NUMERIC_LITERAL, API_RUNTIME}}, // 9
// no analogue
{"cudaErrorInvalidPitchValue", {"hipErrorInvalidPitchValue", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 12
// no analogue
{"cudaErrorInvalidSymbol", {"hipErrorInvalidSymbol", "", CONV_NUMERIC_LITERAL, API_RUNTIME}}, // 13
// Deprecated since CUDA 10.1
// no analogue
{"cudaErrorInvalidHostPointer", {"hipErrorInvalidHostPointer", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 16
// Deprecated since CUDA 10.1
// no analogue
{"cudaErrorInvalidDevicePointer", {"hipErrorInvalidDevicePointer", "", CONV_NUMERIC_LITERAL, API_RUNTIME}}, // 17
// no analogue
{"cudaErrorInvalidTexture", {"hipErrorInvalidTexture", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 18
// no analogue
{"cudaErrorInvalidTextureBinding", {"hipErrorInvalidTextureBinding", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 19
// no analogue
{"cudaErrorInvalidChannelDescriptor", {"hipErrorInvalidChannelDescriptor", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 20
// no analogue
{"cudaErrorInvalidMemcpyDirection", {"hipErrorInvalidMemcpyDirection", "", CONV_NUMERIC_LITERAL, API_RUNTIME}}, // 21
// no analogue
{"cudaErrorAddressOfConstant", {"hipErrorAddressOfConstant", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 22
// no analogue
{"cudaErrorTextureFetchFailed", {"hipErrorTextureFetchFailed", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 23
// no analogue
{"cudaErrorTextureNotBound", {"hipErrorTextureNotBound", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 24
// no analogue
{"cudaErrorSynchronizationError", {"hipErrorSynchronizationError", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 25
// no analogue
{"cudaErrorInvalidFilterSetting", {"hipErrorInvalidFilterSetting", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 26
// no analogue
{"cudaErrorInvalidNormSetting", {"hipErrorInvalidNormSetting", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 27
// no analogue
{"cudaErrorMixedDeviceExecution", {"hipErrorMixedDeviceExecution", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 28
// Deprecated since CUDA 4.1
// no analogue
{"cudaErrorNotYetImplemented", {"hipErrorNotYetImplemented", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 31
// Deprecated since CUDA 3.1
// no analogue
{"cudaErrorMemoryValueTooLarge", {"hipErrorMemoryValueTooLarge", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 32
// no analogue
{"cudaErrorInsufficientDriver", {"hipErrorInsufficientDriver", "", CONV_NUMERIC_LITERAL, API_RUNTIME}}, // 35
// no analogue
{"cudaErrorInvalidSurface", {"hipErrorInvalidSurface", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 37
// no analogue
{"cudaErrorDuplicateVariableName", {"hipErrorDuplicateVariableName", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 43
// no analogue
{"cudaErrorDuplicateTextureName", {"hipErrorDuplicateTextureName", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 44
// no analogue
{"cudaErrorDuplicateSurfaceName", {"hipErrorDuplicateSurfaceName", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 45
// no analogue
{"cudaErrorDevicesUnavailable", {"hipErrorDevicesUnavailable", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 46
// no analogue
{"cudaErrorIncompatibleDriverContext", {"hipErrorIncompatibleDriverContext", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 49
// no analogue
{"cudaErrorMissingConfiguration", {"hipErrorMissingConfiguration", "", CONV_NUMERIC_LITERAL, API_RUNTIME}}, // 52
// no analogue
{"cudaErrorPriorLaunchFailure", {"hipErrorPriorLaunchFailure", "", CONV_NUMERIC_LITERAL, API_RUNTIME}}, // 53
// no analogue
{"cudaErrorLaunchMaxDepthExceeded", {"hipErrorLaunchMaxDepthExceeded", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 65
// no analogue
{"cudaErrorLaunchFileScopedTex", {"hipErrorLaunchFileScopedTex", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 66
// no analogue
{"cudaErrorLaunchFileScopedSurf", {"hipErrorLaunchFileScopedSurf", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 67
// no analogue
{"cudaErrorSyncDepthExceeded", {"hipErrorSyncDepthExceeded", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 68
// no analogue
{"cudaErrorLaunchPendingCountExceeded", {"hipErrorLaunchPendingCountExceeded", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 69
// no analogue
{"cudaErrorInvalidDeviceFunction", {"hipErrorInvalidDeviceFunction", "", CONV_NUMERIC_LITERAL, API_RUNTIME}}, // 98
// CUDA_ERROR_NO_DEVICE
{"cudaErrorNoDevice", {"hipErrorNoDevice", "", CONV_NUMERIC_LITERAL, API_RUNTIME}}, // 100
// CUDA_ERROR_INVALID_DEVICE
{"cudaErrorInvalidDevice", {"hipErrorInvalidDevice", "", CONV_NUMERIC_LITERAL, API_RUNTIME}}, // 101
// no analogue
{"cudaErrorStartupFailure", {"hipErrorStartupFailure", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 127
// CUDA_ERROR_INVALID_IMAGE
{"cudaErrorInvalidKernelImage", {"hipErrorInvalidImage", "", CONV_NUMERIC_LITERAL, API_RUNTIME}}, // 200
// CUDA_ERROR_INVALID_CONTEXT
{"cudaErrorDeviceUninitialized", {"hipErrorInvalidContext", "", CONV_NUMERIC_LITERAL, API_RUNTIME}}, // 201
// Typo fixed in 10.2
// CUDA_ERROR_INVALID_CONTEXT
{"cudaErrorDeviceUninitilialized", {"hipErrorInvalidContext", "", CONV_NUMERIC_LITERAL, API_RUNTIME}}, // 201
// CUDA_ERROR_MAP_FAILED
{"cudaErrorMapBufferObjectFailed", {"hipErrorMapFailed", "", CONV_NUMERIC_LITERAL, API_RUNTIME}}, // 205
// CUDA_ERROR_UNMAP_FAILED
{"cudaErrorUnmapBufferObjectFailed", {"hipErrorUnmapFailed", "", CONV_NUMERIC_LITERAL, API_RUNTIME}}, // 206
// CUDA_ERROR_ARRAY_IS_MAPPED
{"cudaErrorArrayIsMapped", {"hipErrorArrayIsMapped", "", CONV_NUMERIC_LITERAL, API_RUNTIME}}, // 207
// CUDA_ERROR_ALREADY_MAPPED
{"cudaErrorAlreadyMapped", {"hipErrorAlreadyMapped", "", CONV_NUMERIC_LITERAL, API_RUNTIME}}, // 208
// CUDA_ERROR_NO_BINARY_FOR_GPU
{"cudaErrorNoKernelImageForDevice", {"hipErrorNoBinaryForGpu", "", CONV_NUMERIC_LITERAL, API_RUNTIME}}, // 209
// CUDA_ERROR_ALREADY_ACQUIRED
{"cudaErrorAlreadyAcquired", {"hipErrorAlreadyAcquired", "", CONV_NUMERIC_LITERAL, API_RUNTIME}}, // 210
// CUDA_ERROR_NOT_MAPPED
{"cudaErrorNotMapped", {"hipErrorNotMapped", "", CONV_NUMERIC_LITERAL, API_RUNTIME}}, // 211
// CUDA_ERROR_NOT_MAPPED_AS_ARRAY
{"cudaErrorNotMappedAsArray", {"hipErrorNotMappedAsArray", "", CONV_NUMERIC_LITERAL, API_RUNTIME}}, // 212
// CUDA_ERROR_NOT_MAPPED_AS_POINTER
{"cudaErrorNotMappedAsPointer", {"hipErrorNotMappedAsPointer", "", CONV_NUMERIC_LITERAL, API_RUNTIME}}, // 213
// CUDA_ERROR_ECC_UNCORRECTABLE
{"cudaErrorECCUncorrectable", {"hipErrorECCNotCorrectable", "", CONV_NUMERIC_LITERAL, API_RUNTIME}}, // 214
// CUDA_ERROR_UNSUPPORTED_LIMIT
{"cudaErrorUnsupportedLimit", {"hipErrorUnsupportedLimit", "", CONV_NUMERIC_LITERAL, API_RUNTIME}}, // 215
// CUDA_ERROR_CONTEXT_ALREADY_IN_USE
{"cudaErrorDeviceAlreadyInUse", {"hipErrorContextAlreadyInUse", "", CONV_NUMERIC_LITERAL, API_RUNTIME}}, // 216
// CUDA_ERROR_PEER_ACCESS_UNSUPPORTED
{"cudaErrorPeerAccessUnsupported", {"hipErrorPeerAccessUnsupported", "", CONV_NUMERIC_LITERAL, API_RUNTIME}}, // 217
// CUDA_ERROR_INVALID_PTX
{"cudaErrorInvalidPtx", {"hipErrorInvalidKernelFile", "", CONV_NUMERIC_LITERAL, API_RUNTIME}}, // 218
// CUDA_ERROR_INVALID_GRAPHICS_CONTEXT
{"cudaErrorInvalidGraphicsContext", {"hipErrorInvalidGraphicsContext", "", CONV_NUMERIC_LITERAL, API_RUNTIME}}, // 219
// CUDA_ERROR_NVLINK_UNCORRECTABLE
{"cudaErrorNvlinkUncorrectable", {"hipErrorNvlinkUncorrectable", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 220
// CUDA_ERROR_JIT_COMPILER_NOT_FOUND
{"cudaErrorJitCompilerNotFound", {"hipErrorJitCompilerNotFound", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 221
// CUDA_ERROR_INVALID_SOURCE
{"cudaErrorInvalidSource", {"hipErrorInvalidSource", "", CONV_NUMERIC_LITERAL, API_RUNTIME}}, // 300
// CUDA_ERROR_FILE_NOT_FOUND
{"cudaErrorFileNotFound", {"hipErrorFileNotFound", "", CONV_NUMERIC_LITERAL, API_RUNTIME}}, // 301
// CUDA_ERROR_SHARED_OBJECT_SYMBOL_NOT_FOUND
{"cudaErrorSharedObjectSymbolNotFound", {"hipErrorSharedObjectSymbolNotFound", "", CONV_NUMERIC_LITERAL, API_RUNTIME}}, // 302
// CUDA_ERROR_SHARED_OBJECT_INIT_FAILED
{"cudaErrorSharedObjectInitFailed", {"hipErrorSharedObjectInitFailed", "", CONV_NUMERIC_LITERAL, API_RUNTIME}}, // 303
// CUDA_ERROR_OPERATING_SYSTEM
{"cudaErrorOperatingSystem", {"hipErrorOperatingSystem", "", CONV_NUMERIC_LITERAL, API_RUNTIME}}, // 304
// CUDA_ERROR_INVALID_HANDLE
{"cudaErrorInvalidResourceHandle", {"hipErrorInvalidHandle", "", CONV_NUMERIC_LITERAL, API_RUNTIME}}, // 400
// CUDA_ERROR_ILLEGAL_STATE
{"cudaErrorIllegalState", {"hipErrorIllegalState", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 401
// CUDA_ERROR_NOT_FOUND
{"cudaErrorSymbolNotFound", {"hipErrorNotFound", "", CONV_NUMERIC_LITERAL, API_RUNTIME}}, // 500
// CUDA_ERROR_NOT_READY
{"cudaErrorNotReady", {"hipErrorNotReady", "", CONV_NUMERIC_LITERAL, API_RUNTIME}}, // 600
// CUDA_ERROR_ILLEGAL_ADDRESS
{"cudaErrorIllegalAddress", {"hipErrorIllegalAddress", "", CONV_NUMERIC_LITERAL, API_RUNTIME}}, // 700
// CUDA_ERROR_LAUNCH_OUT_OF_RESOURCES
{"cudaErrorLaunchOutOfResources", {"hipErrorLaunchOutOfResources", "", CONV_NUMERIC_LITERAL, API_RUNTIME}}, // 701
// CUDA_ERROR_LAUNCH_TIMEOUT
{"cudaErrorLaunchTimeout", {"hipErrorLaunchTimeOut", "", CONV_NUMERIC_LITERAL, API_RUNTIME}}, // 702
// CUDA_ERROR_LAUNCH_INCOMPATIBLE_TEXTURING
{"cudaErrorLaunchIncompatibleTexturing", {"hipErrorLaunchIncompatibleTexturing", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 703
// CUDA_ERROR_PEER_ACCESS_ALREADY_ENABLED
{"cudaErrorPeerAccessAlreadyEnabled", {"hipErrorPeerAccessAlreadyEnabled", "", CONV_NUMERIC_LITERAL, API_RUNTIME}}, // 704
// CUDA_ERROR_PEER_ACCESS_NOT_ENABLED
{"cudaErrorPeerAccessNotEnabled", {"hipErrorPeerAccessNotEnabled", "", CONV_NUMERIC_LITERAL, API_RUNTIME}}, // 705
// CUDA_ERROR_PRIMARY_CONTEXT_ACTIVE
{"cudaErrorSetOnActiveProcess", {"hipErrorSetOnActiveProcess", "", CONV_NUMERIC_LITERAL, API_RUNTIME}}, // 708
// CUDA_ERROR_CONTEXT_IS_DESTROYED
{"cudaErrorContextIsDestroyed", {"hipErrorContextIsDestroyed", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 709
// CUDA_ERROR_ASSERT
{"cudaErrorAssert", {"hipErrorAssert", "", CONV_NUMERIC_LITERAL, API_RUNTIME}}, // 710
// CUDA_ERROR_TOO_MANY_PEERS
{"cudaErrorTooManyPeers", {"hipErrorTooManyPeers", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 711
// CUDA_ERROR_HOST_MEMORY_ALREADY_REGISTERED
{"cudaErrorHostMemoryAlreadyRegistered", {"hipErrorHostMemoryAlreadyRegistered", "", CONV_NUMERIC_LITERAL, API_RUNTIME}}, // 712
// CUDA_ERROR_HOST_MEMORY_NOT_REGISTERED
{"cudaErrorHostMemoryNotRegistered", {"hipErrorHostMemoryNotRegistered", "", CONV_NUMERIC_LITERAL, API_RUNTIME}}, // 713
// CUDA_ERROR_HARDWARE_STACK_ERROR
{"cudaErrorHardwareStackError", {"hipErrorHardwareStackError", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 714
// CUDA_ERROR_ILLEGAL_INSTRUCTION
{"cudaErrorIllegalInstruction", {"hipErrorIllegalInstruction", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 715
// CUDA_ERROR_MISALIGNED_ADDRESS
{"cudaErrorMisalignedAddress", {"hipErrorMisalignedAddress", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 716
// CUDA_ERROR_INVALID_ADDRESS_SPACE
{"cudaErrorInvalidAddressSpace", {"hipErrorInvalidAddressSpace", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 717
// CUDA_ERROR_INVALID_PC
{"cudaErrorInvalidPc", {"hipErrorInvalidPc", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 718
// CUDA_ERROR_LAUNCH_FAILED
{"cudaErrorLaunchFailure", {"hipErrorLaunchFailure", "", CONV_NUMERIC_LITERAL, API_RUNTIME}}, // 719
// no analogue
{"cudaErrorCooperativeLaunchTooLarge", {"hipErrorCooperativeLaunchTooLarge", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 720
// CUDA_ERROR_NOT_PERMITTED
{"cudaErrorNotPermitted", {"hipErrorNotPermitted", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 800
// CUDA_ERROR_NOT_SUPPORTED
{"cudaErrorNotSupported", {"hipErrorNotSupported", "", CONV_NUMERIC_LITERAL, API_RUNTIME}}, // 801
// CUDA_ERROR_SYSTEM_NOT_READY
{"cudaErrorSystemNotReady", {"hipErrorSystemNotReady", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 802
// CUDA_ERROR_SYSTEM_DRIVER_MISMATCH
{"cudaErrorSystemDriverMismatch", {"hipErrorSystemDriverMismatch", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 803
// CUDA_ERROR_COMPAT_NOT_SUPPORTED_ON_DEVICE
{"cudaErrorCompatNotSupportedOnDevice", {"hipErrorCompatNotSupportedOnDevice", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 804
// CUDA_ERROR_STREAM_CAPTURE_UNSUPPORTED
{"cudaErrorStreamCaptureUnsupported", {"hipErrorStreamCaptureUnsupported", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 900
// CUDA_ERROR_STREAM_CAPTURE_INVALIDATED
{"cudaErrorStreamCaptureInvalidated", {"hipErrorStreamCaptureInvalidated", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 901
// CUDA_ERROR_STREAM_CAPTURE_MERGE
{"cudaErrorStreamCaptureMerge", {"hipErrorStreamCaptureMerge", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 902
// CUDA_ERROR_STREAM_CAPTURE_UNMATCHED
{"cudaErrorStreamCaptureUnmatched", {"hipErrorStreamCaptureUnmatched", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 903
// CUDA_ERROR_STREAM_CAPTURE_UNJOINED
{"cudaErrorStreamCaptureUnjoined", {"hipErrorStreamCaptureUnjoined", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 904
// CUDA_ERROR_STREAM_CAPTURE_ISOLATION
{"cudaErrorStreamCaptureIsolation", {"hipErrorStreamCaptureIsolation", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 905
// CUDA_ERROR_STREAM_CAPTURE_IMPLICIT
{"cudaErrorStreamCaptureImplicit", {"hipErrorStreamCaptureImplicit", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 906
// CUDA_ERROR_CAPTURED_EVENT
{"cudaErrorCapturedEvent", {"hipErrorCapturedEvent", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 907
// CUDA_ERROR_STREAM_CAPTURE_WRONG_THREAD
{"cudaErrorStreamCaptureWrongThread", {"hipErrorStreamCaptureWrongThread", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 908
// CUDA_ERROR_TIMEOUT
{"cudaErrorTimeout", {"hipErrorTimeout", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 909
// CUDA_ERROR_GRAPH_EXEC_UPDATE_FAILURE
{"cudaErrorGraphExecUpdateFailure", {"hipErrorGraphExecUpdateFailure", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 910
// CUDA_ERROR_UNKNOWN
{"cudaErrorUnknown", {"hipErrorUnknown", "", CONV_NUMERIC_LITERAL, API_RUNTIME}}, // 999
// Deprecated since CUDA 4.1
{"cudaErrorApiFailureBase", {"hipErrorApiFailureBase", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 10000
// CUexternalMemoryHandleType
{"cudaExternalMemoryHandleType", {"hipExternalMemoryHandleType", "", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED}},
// cudaExternalMemoryHandleType enum values
// CU_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_FD
{"cudaExternalMemoryHandleTypeOpaqueFd", {"hipExternalMemoryHandleTypeOpaqueFD", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 1
// CU_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32
{"cudaExternalMemoryHandleTypeOpaqueWin32", {"hipExternalMemoryHandleTypeOpaqueWin32", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 2
// CU_EXTERNAL_MEMORY_HANDLE_TYPE_OPAQUE_WIN32_KMT
{"cudaExternalMemoryHandleTypeOpaqueWin32Kmt", {"hipExternalMemoryHandleTypeOpaqueWin32KMT", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 3
// CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_HEAP
{"cudaExternalMemoryHandleTypeD3D12Heap", {"hipExternalMemoryHandleTypeD3D12Heap", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 4
// CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D12_RESOURCE
{"cudaExternalMemoryHandleTypeD3D12Resource", {"hipExternalMemoryHandleTypeD3D12Resource", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 5
// CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_RESOURCE
{"cudaExternalMemoryHandleTypeD3D11Resource", {"hipExternalMemoryHandleTypeD3D11Resource", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 6
// CU_EXTERNAL_MEMORY_HANDLE_TYPE_D3D11_RESOURCE_KMT
{"cudaExternalMemoryHandleTypeD3D11ResourceKmt", {"hipExternalMemoryHandleTypeD3D11ResourceKmt", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 7
// CU_EXTERNAL_MEMORY_HANDLE_TYPE_NVSCIBUF
{"cudaExternalMemoryHandleTypeNvSciBuf", {"hipExternalMemoryHandleTypeNvSciBuf", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 8
// CUexternalSemaphoreHandleType
{"cudaExternalSemaphoreHandleType", {"hipExternalSemaphoreHandleType", "", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED}},
// cudaExternalSemaphoreHandleType enum values
// CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_FD
{"cudaExternalSemaphoreHandleTypeOpaqueFd", {"hipExternalSemaphoreHandleTypeOpaqueFD", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 1
// CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32
{"cudaExternalSemaphoreHandleTypeOpaqueWin32", {"hipExternalSemaphoreHandleTypeOpaqueWin32", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 2
// CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_OPAQUE_WIN32_KMT
{"cudaExternalSemaphoreHandleTypeOpaqueWin32Kmt", {"hipExternalSemaphoreHandleTypeOpaqueWin32KMT", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 3
// CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D12_FENCE
{"cudaExternalSemaphoreHandleTypeD3D12Fence", {"hipExternalSemaphoreHandleTypeD3D12Fence", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 4
// CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_FENCE
{"cudaExternalSemaphoreHandleTypeD3D11Fence", {"hipExternalSemaphoreHandleTypeD3D11Fence", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 5
// CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_NVSCISYNC
{"cudaExternalSemaphoreHandleTypeNvSciSync", {"hipExternalSemaphoreHandleTypeNvSciSync", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 6
// CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_KEYED_MUTEX
{"cudaExternalSemaphoreHandleTypeKeyedMutex", {"hipExternalSemaphoreHandleTypeKeyedMutex", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 7
// CU_EXTERNAL_SEMAPHORE_HANDLE_TYPE_D3D11_KEYED_MUTEX_KMT
{"cudaExternalSemaphoreHandleTypeKeyedMutexKmt", {"hipExternalSemaphoreHandleTypeKeyedMutexKmt", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 8
// CUfunction_attribute
// NOTE: only last, starting from 8, values are presented and are equal to Driver's ones
{"cudaFuncAttribute", {"hipFuncAttribute", "", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED}},
// cudaFuncAttribute enum values
// CU_FUNC_ATTRIBUTE_MAX_DYNAMIC_SHARED_SIZE_BYTES
{"cudaFuncAttributeMaxDynamicSharedMemorySize", {"hipFuncAttributeMaxDynamicSharedMemorySize", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 8
// CU_FUNC_ATTRIBUTE_PREFERRED_SHARED_MEMORY_CARVEOUT
{"cudaFuncAttributePreferredSharedMemoryCarveout", {"hipFuncAttributePreferredSharedMemoryCarveout", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 9
// CU_FUNC_ATTRIBUTE_MAX
{"cudaFuncAttributeMax", {"hipFuncAttributeMax", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 10
// CUfunc_cache
{"cudaFuncCache", {"hipFuncCache_t", "", CONV_TYPE, API_RUNTIME}},
// cudaFuncCache enum values
// CU_FUNC_CACHE_PREFER_NONE = 0x00
{"cudaFuncCachePreferNone", {"hipFuncCachePreferNone", "", CONV_NUMERIC_LITERAL, API_RUNTIME}}, // 0
// CU_FUNC_CACHE_PREFER_SHARED = 0x01
{"cudaFuncCachePreferShared", {"hipFuncCachePreferShared", "", CONV_NUMERIC_LITERAL, API_RUNTIME}}, // 1
// CU_FUNC_CACHE_PREFER_L1 = 0x02
{"cudaFuncCachePreferL1", {"hipFuncCachePreferL1", "", CONV_NUMERIC_LITERAL, API_RUNTIME}}, // 2
// CU_FUNC_CACHE_PREFER_EQUAL = 0x03
{"cudaFuncCachePreferEqual", {"hipFuncCachePreferEqual", "", CONV_NUMERIC_LITERAL, API_RUNTIME}}, // 3
// CUarray_cubemap_face
{"cudaGraphicsCubeFace", {"hipGraphicsCubeFace", "", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED}},
// cudaGraphicsCubeFace enum values
// CU_CUBEMAP_FACE_POSITIVE_X
{"cudaGraphicsCubeFacePositiveX", {"hipGraphicsCubeFacePositiveX", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 0x00
// CU_CUBEMAP_FACE_NEGATIVE_X
{"cudaGraphicsCubeFaceNegativeX", {"hipGraphicsCubeFaceNegativeX", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 0x01
// CU_CUBEMAP_FACE_POSITIVE_Y
{"cudaGraphicsCubeFacePositiveY", {"hipGraphicsCubeFacePositiveY", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 0x02
// CU_CUBEMAP_FACE_NEGATIVE_Y
{"cudaGraphicsCubeFaceNegativeY", {"hipGraphicsCubeFaceNegativeY", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 0x03
// CU_CUBEMAP_FACE_POSITIVE_Z
{"cudaGraphicsCubeFacePositiveZ", {"hipGraphicsCubeFacePositiveZ", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 0x04
// CU_CUBEMAP_FACE_NEGATIVE_Z
{"cudaGraphicsCubeFaceNegativeZ", {"hipGraphicsCubeFaceNegativeZ", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 0x05
// CUgraphicsMapResourceFlags
{"cudaGraphicsMapFlags", {"hipGraphicsMapFlags", "", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED}},
// cudaGraphicsMapFlags enum values
// CU_GRAPHICS_MAP_RESOURCE_FLAGS_NONE = 0x00
{"cudaGraphicsMapFlagsNone", {"hipGraphicsMapFlagsNone", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 0
// CU_GRAPHICS_MAP_RESOURCE_FLAGS_READ_ONLY = 0x01
{"cudaGraphicsMapFlagsReadOnly", {"hipGraphicsMapFlagsReadOnly", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 1
// CU_GRAPHICS_MAP_RESOURCE_FLAGS_WRITE_DISCARD = 0x02
{"cudaGraphicsMapFlagsWriteDiscard", {"hipGraphicsMapFlagsWriteDiscard", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 2
// CUgraphicsRegisterFlags
{"cudaGraphicsRegisterFlags", {"hipGraphicsRegisterFlags", "", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED}},
// cudaGraphicsRegisterFlags enum values
// CU_GRAPHICS_MAP_RESOURCE_FLAGS_NONE = 0x00
{"cudaGraphicsRegisterFlagsNone", {"hipGraphicsRegisterFlagsNone", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 0
// CU_GRAPHICS_MAP_RESOURCE_FLAGS_READ_ONLY = 0x01
{"cudaGraphicsRegisterFlagsReadOnly", {"hipGraphicsRegisterFlagsReadOnly", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 1
// CU_GRAPHICS_REGISTER_FLAGS_WRITE_DISCARD = 0x02
{"cudaGraphicsRegisterFlagsWriteDiscard", {"hipGraphicsRegisterFlagsWriteDiscard", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 2
// CU_GRAPHICS_REGISTER_FLAGS_SURFACE_LDST = 0x04
{"cudaGraphicsRegisterFlagsSurfaceLoadStore", {"hipGraphicsRegisterFlagsSurfaceLoadStore", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 4
// CU_GRAPHICS_REGISTER_FLAGS_TEXTURE_GATHER = 0x08
{"cudaGraphicsRegisterFlagsTextureGather", {"hipGraphicsRegisterFlagsTextureGather", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 8
// CUgraphNodeType
{"cudaGraphNodeType", {"hipGraphNodeType", "", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED}},
// cudaGraphNodeType enum values
// CU_GRAPH_NODE_TYPE_KERNEL = 0
{"cudaGraphNodeTypeKernel", {"hipGraphNodeTypeKernel", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 0x00
// CU_GRAPH_NODE_TYPE_MEMCPY = 1
{"cudaGraphNodeTypeMemcpy", {"hipGraphNodeTypeMemcpy", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 0x01
// CU_GRAPH_NODE_TYPE_MEMSET = 2
{"cudaGraphNodeTypeMemset", {"hipGraphNodeTypeMemset", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 0x02
// CU_GRAPH_NODE_TYPE_HOST = 3
{"cudaGraphNodeTypeHost", {"hipGraphNodeTypeHost", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 0x03
// CU_GRAPH_NODE_TYPE_GRAPH = 4
{"cudaGraphNodeTypeGraph", {"hipGraphNodeTypeGraph", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 0x04
// CU_GRAPH_NODE_TYPE_EMPTY = 5
{"cudaGraphNodeTypeEmpty", {"hipGraphNodeTypeEmpty", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 0x05
// CU_GRAPH_NODE_TYPE_COUNT
{"cudaGraphNodeTypeCount", {"hipGraphNodeTypeCount", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}},
// CUgraphExecUpdateResult
{"cudaGraphExecUpdateResult", {"hipGraphExecUpdateResult", "", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED}},
// cudaGraphExecUpdateResult enum values
// CU_GRAPH_EXEC_UPDATE_SUCCESS
{"cudaGraphExecUpdateSuccess", {"hipGraphExecUpdateSuccess", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 0x0
// CU_GRAPH_EXEC_UPDATE_ERROR
{"cudaGraphExecUpdateError", {"hipGraphExecUpdateError", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 0x1
// CU_GRAPH_EXEC_UPDATE_ERROR_TOPOLOGY_CHANGED
{"cudaGraphExecUpdateErrorTopologyChanged", {"hipGraphExecUpdateErrorTopologyChanged", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 0x2
// CU_GRAPH_EXEC_UPDATE_ERROR_NODE_TYPE_CHANGED
{"cudaGraphExecUpdateErrorNodeTypeChanged", {"hipGraphExecUpdateErrorNodeTypeChanged", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 0x3
// CU_GRAPH_EXEC_UPDATE_ERROR_FUNCTION_CHANGED
{"cudaGraphExecUpdateErrorFunctionChanged", {"hipGraphExecUpdateErrorFunctionChanged", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 0x4
// CU_GRAPH_EXEC_UPDATE_ERROR_PARAMETERS_CHANGED
{"cudaGraphExecUpdateErrorParametersChanged", {"hipGraphExecUpdateErrorParametersChanged", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 0x5
// CU_GRAPH_EXEC_UPDATE_ERROR_NOT_SUPPORTED
{"cudaGraphExecUpdateErrorNotSupported", {"hipGraphExecUpdateErrorNotSupported", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 0x6
// CUlimit
{"cudaLimit", {"hipLimit_t", "", CONV_TYPE, API_RUNTIME}},
// cudaLimit enum values
// CU_LIMIT_STACK_SIZE
{"cudaLimitStackSize", {"hipLimitStackSize", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 0x00
// CU_LIMIT_PRINTF_FIFO_SIZE
{"cudaLimitPrintfFifoSize", {"hipLimitPrintfFifoSize", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 0x01
// CU_LIMIT_MALLOC_HEAP_SIZE
{"cudaLimitMallocHeapSize", {"hipLimitMallocHeapSize", "", CONV_NUMERIC_LITERAL, API_RUNTIME}}, // 0x02
// CU_LIMIT_DEV_RUNTIME_SYNC_DEPTH
{"cudaLimitDevRuntimeSyncDepth", {"hipLimitDevRuntimeSyncDepth", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 0x03
// CU_LIMIT_DEV_RUNTIME_PENDING_LAUNCH_COUNT
{"cudaLimitDevRuntimePendingLaunchCount", {"hipLimitDevRuntimePendingLaunchCount", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 0x04
// CU_LIMIT_MAX_L2_FETCH_GRANULARITY
{"cudaLimitMaxL2FetchGranularity", {"hipLimitMaxL2FetchGranularity", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 0x05
// no analogue
{"cudaMemcpyKind", {"hipMemcpyKind", "", CONV_TYPE, API_RUNTIME}},
// cudaMemcpyKind enum values
{"cudaMemcpyHostToHost", {"hipMemcpyHostToHost", "", CONV_NUMERIC_LITERAL, API_RUNTIME}}, // 0
{"cudaMemcpyHostToDevice", {"hipMemcpyHostToDevice", "", CONV_NUMERIC_LITERAL, API_RUNTIME}}, // 1
{"cudaMemcpyDeviceToHost", {"hipMemcpyDeviceToHost", "", CONV_NUMERIC_LITERAL, API_RUNTIME}}, // 2
{"cudaMemcpyDeviceToDevice", {"hipMemcpyDeviceToDevice", "", CONV_NUMERIC_LITERAL, API_RUNTIME}}, // 3
{"cudaMemcpyDefault", {"hipMemcpyDefault", "", CONV_NUMERIC_LITERAL, API_RUNTIME}}, // 4
// CUmem_advise
{"cudaMemoryAdvise", {"hipMemAdvise", "", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED}},
// cudaMemoryAdvise enum values
// CU_MEM_ADVISE_SET_READ_MOSTLY
{"cudaMemAdviseSetReadMostly", {"hipMemAdviseSetReadMostly", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 1
// CU_MEM_ADVISE_UNSET_READ_MOSTLY
{"cudaMemAdviseUnsetReadMostly", {"hipMemAdviseUnsetReadMostly", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 2
// CU_MEM_ADVISE_SET_PREFERRED_LOCATION
{"cudaMemAdviseSetPreferredLocation", {"hipMemAdviseSetPreferredLocation", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 3
// CU_MEM_ADVISE_UNSET_PREFERRED_LOCATION
{"cudaMemAdviseUnsetPreferredLocation", {"hipMemAdviseUnsetPreferredLocation", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 4
// CU_MEM_ADVISE_SET_ACCESSED_BY
{"cudaMemAdviseSetAccessedBy", {"hipMemAdviseSetAccessedBy", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 5
// CU_MEM_ADVISE_UNSET_ACCESSED_BY
{"cudaMemAdviseUnsetAccessedBy", {"hipMemAdviseUnsetAccessedBy", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 6
// no analogue
// NOTE: CUmemorytype is partial analogue
{"cudaMemoryType", {"hipMemoryType_t", "", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED}},
// cudaMemoryType enum values
{"cudaMemoryTypeUnregistered", {"hipMemoryTypeUnregistered", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 0
{"cudaMemoryTypeHost", {"hipMemoryTypeHost", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 1
{"cudaMemoryTypeDevice", {"hipMemoryTypeDevice", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 2
{"cudaMemoryTypeManaged", {"hipMemoryTypeManaged", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 3
// CUmem_range_attribute
{"cudaMemRangeAttribute", {"hipMemRangeAttribute", "", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED}},
// cudaMemRangeAttribute enum values
// CU_MEM_RANGE_ATTRIBUTE_READ_MOSTLY
{"cudaMemRangeAttributeReadMostly", {"hipMemRangeAttributeReadMostly", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 1
// CU_MEM_RANGE_ATTRIBUTE_PREFERRED_LOCATION
{"cudaMemRangeAttributePreferredLocation", {"hipMemRangeAttributePreferredLocation", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 2
// CU_MEM_RANGE_ATTRIBUTE_ACCESSED_BY
{"cudaMemRangeAttributeAccessedBy", {"hipMemRangeAttributeAccessedBy", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 3
// CU_MEM_RANGE_ATTRIBUTE_LAST_PREFETCH_LOCATION
{"cudaMemRangeAttributeLastPrefetchLocation", {"hipMemRangeAttributeLastPrefetchLocation", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 4
// no analogue
{"cudaOutputMode", {"hipOutputMode", "", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED}},
{"cudaOutputMode_t", {"hipOutputMode", "", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED}},
// cudaOutputMode enum values
{"cudaKeyValuePair", {"hipKeyValuePair", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 0x00
{"cudaCSV", {"hipCSV", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 0x01
// CUresourcetype
{"cudaResourceType", {"hipResourceType", "", CONV_TYPE, API_RUNTIME}},
// cudaResourceType enum values
// CU_RESOURCE_TYPE_ARRAY
{"cudaResourceTypeArray", {"hipResourceTypeArray", "", CONV_NUMERIC_LITERAL, API_RUNTIME}}, // 0x00
// CU_RESOURCE_TYPE_MIPMAPPED_ARRAY
{"cudaResourceTypeMipmappedArray", {"hipResourceTypeMipmappedArray", "", CONV_NUMERIC_LITERAL, API_RUNTIME}}, // 0x01
// CU_RESOURCE_TYPE_LINEAR
{"cudaResourceTypeLinear", {"hipResourceTypeLinear", "", CONV_NUMERIC_LITERAL, API_RUNTIME}}, // 0x02
// CU_RESOURCE_TYPE_PITCH2D
{"cudaResourceTypePitch2D", {"hipResourceTypePitch2D", "", CONV_NUMERIC_LITERAL, API_RUNTIME}}, // 0x03
// CUresourceViewFormat
{"cudaResourceViewFormat", {"hipResourceViewFormat", "", CONV_TYPE, API_RUNTIME}},
// enum cudaResourceViewFormat
// CU_RES_VIEW_FORMAT_NONE
{"cudaResViewFormatNone", {"hipResViewFormatNone", "", CONV_NUMERIC_LITERAL, API_RUNTIME}}, // 0x00
// CU_RES_VIEW_FORMAT_UINT_1X8
{"cudaResViewFormatUnsignedChar1", {"hipResViewFormatUnsignedChar1", "", CONV_NUMERIC_LITERAL, API_RUNTIME}}, // 0x01
// CU_RES_VIEW_FORMAT_UINT_2X8
{"cudaResViewFormatUnsignedChar2", {"hipResViewFormatUnsignedChar2", "", CONV_NUMERIC_LITERAL, API_RUNTIME}}, // 0x02
// CU_RES_VIEW_FORMAT_UINT_4X8
{"cudaResViewFormatUnsignedChar4", {"hipResViewFormatUnsignedChar4", "", CONV_NUMERIC_LITERAL, API_RUNTIME}}, // 0x03
// CU_RES_VIEW_FORMAT_SINT_1X8
{"cudaResViewFormatSignedChar1", {"hipResViewFormatSignedChar1", "", CONV_NUMERIC_LITERAL, API_RUNTIME}}, // 0x04
// CU_RES_VIEW_FORMAT_SINT_2X8
{"cudaResViewFormatSignedChar2", {"hipResViewFormatSignedChar2", "", CONV_NUMERIC_LITERAL, API_RUNTIME}}, // 0x05
// CU_RES_VIEW_FORMAT_SINT_4X8
{"cudaResViewFormatSignedChar4", {"hipResViewFormatSignedChar4", "", CONV_NUMERIC_LITERAL, API_RUNTIME}}, // 0x06
// CU_RES_VIEW_FORMAT_UINT_1X16
{"cudaResViewFormatUnsignedShort1", {"hipResViewFormatUnsignedShort1", "", CONV_NUMERIC_LITERAL, API_RUNTIME}}, // 0x07
// CU_RES_VIEW_FORMAT_UINT_2X16
{"cudaResViewFormatUnsignedShort2", {"hipResViewFormatUnsignedShort2", "", CONV_NUMERIC_LITERAL, API_RUNTIME}}, // 0x08
// CU_RES_VIEW_FORMAT_UINT_4X16
{"cudaResViewFormatUnsignedShort4", {"hipResViewFormatUnsignedShort4", "", CONV_NUMERIC_LITERAL, API_RUNTIME}}, // 0x09
// CU_RES_VIEW_FORMAT_SINT_1X16
{"cudaResViewFormatSignedShort1", {"hipResViewFormatSignedShort1", "", CONV_NUMERIC_LITERAL, API_RUNTIME}}, // 0x0a
// CU_RES_VIEW_FORMAT_SINT_2X16
{"cudaResViewFormatSignedShort2", {"hipResViewFormatSignedShort2", "", CONV_NUMERIC_LITERAL, API_RUNTIME}}, // 0x0b
// CU_RES_VIEW_FORMAT_SINT_4X16
{"cudaResViewFormatSignedShort4", {"hipResViewFormatSignedShort4", "", CONV_NUMERIC_LITERAL, API_RUNTIME}}, // 0x0c
// CU_RES_VIEW_FORMAT_UINT_1X32
{"cudaResViewFormatUnsignedInt1", {"hipResViewFormatUnsignedInt1", "", CONV_NUMERIC_LITERAL, API_RUNTIME}}, // 0x0d
// CU_RES_VIEW_FORMAT_UINT_2X32
{"cudaResViewFormatUnsignedInt2", {"hipResViewFormatUnsignedInt2", "", CONV_NUMERIC_LITERAL, API_RUNTIME}}, // 0x0e
// CU_RES_VIEW_FORMAT_UINT_4X32
{"cudaResViewFormatUnsignedInt4", {"hipResViewFormatUnsignedInt4", "", CONV_NUMERIC_LITERAL, API_RUNTIME}}, // 0x0f
// CU_RES_VIEW_FORMAT_SINT_1X32
{"cudaResViewFormatSignedInt1", {"hipResViewFormatSignedInt1", "", CONV_NUMERIC_LITERAL, API_RUNTIME}}, // 0x10
// CU_RES_VIEW_FORMAT_SINT_2X32
{"cudaResViewFormatSignedInt2", {"hipResViewFormatSignedInt2", "", CONV_NUMERIC_LITERAL, API_RUNTIME}}, // 0x11
// CU_RES_VIEW_FORMAT_SINT_4X32
{"cudaResViewFormatSignedInt4", {"hipResViewFormatSignedInt4", "", CONV_NUMERIC_LITERAL, API_RUNTIME}}, // 0x12
// CU_RES_VIEW_FORMAT_FLOAT_1X16
{"cudaResViewFormatHalf1", {"hipResViewFormatHalf1", "", CONV_NUMERIC_LITERAL, API_RUNTIME}}, // 0x13
// CU_RES_VIEW_FORMAT_FLOAT_2X16
{"cudaResViewFormatHalf2", {"hipResViewFormatHalf2", "", CONV_NUMERIC_LITERAL, API_RUNTIME}}, // 0x14
// CU_RES_VIEW_FORMAT_FLOAT_4X16
{"cudaResViewFormatHalf4", {"hipResViewFormatHalf4", "", CONV_NUMERIC_LITERAL, API_RUNTIME}}, // 0x15
// CU_RES_VIEW_FORMAT_FLOAT_1X32
{"cudaResViewFormatFloat1", {"hipResViewFormatFloat1", "", CONV_NUMERIC_LITERAL, API_RUNTIME}}, // 0x16
// CU_RES_VIEW_FORMAT_FLOAT_2X32
{"cudaResViewFormatFloat2", {"hipResViewFormatFloat2", "", CONV_NUMERIC_LITERAL, API_RUNTIME}}, // 0x17
// CU_RES_VIEW_FORMAT_FLOAT_4X32
{"cudaResViewFormatFloat4", {"hipResViewFormatFloat4", "", CONV_NUMERIC_LITERAL, API_RUNTIME}}, // 0x18
// CU_RES_VIEW_FORMAT_UNSIGNED_BC1
{"cudaResViewFormatUnsignedBlockCompressed1", {"hipResViewFormatUnsignedBlockCompressed1", "", CONV_NUMERIC_LITERAL, API_RUNTIME}}, // 0x19
// CU_RES_VIEW_FORMAT_UNSIGNED_BC2
{"cudaResViewFormatUnsignedBlockCompressed2", {"hipResViewFormatUnsignedBlockCompressed2", "", CONV_NUMERIC_LITERAL, API_RUNTIME}}, // 0x1a
// CU_RES_VIEW_FORMAT_UNSIGNED_BC3
{"cudaResViewFormatUnsignedBlockCompressed3", {"hipResViewFormatUnsignedBlockCompressed3", "", CONV_NUMERIC_LITERAL, API_RUNTIME}}, // 0x1b
// CU_RES_VIEW_FORMAT_UNSIGNED_BC4
{"cudaResViewFormatUnsignedBlockCompressed4", {"hipResViewFormatUnsignedBlockCompressed4", "", CONV_NUMERIC_LITERAL, API_RUNTIME}}, // 0x1c
// CU_RES_VIEW_FORMAT_SIGNED_BC4
{"cudaResViewFormatSignedBlockCompressed4", {"hipResViewFormatSignedBlockCompressed4", "", CONV_NUMERIC_LITERAL, API_RUNTIME}}, // 0x1d
// CU_RES_VIEW_FORMAT_UNSIGNED_BC5
{"cudaResViewFormatUnsignedBlockCompressed5", {"hipResViewFormatUnsignedBlockCompressed5", "", CONV_NUMERIC_LITERAL, API_RUNTIME}}, // 0x1e
// CU_RES_VIEW_FORMAT_SIGNED_BC5
{"cudaResViewFormatSignedBlockCompressed5", {"hipResViewFormatSignedBlockCompressed5", "", CONV_NUMERIC_LITERAL, API_RUNTIME}}, // 0x1f
// CU_RES_VIEW_FORMAT_UNSIGNED_BC6H
{"cudaResViewFormatUnsignedBlockCompressed6H", {"hipResViewFormatUnsignedBlockCompressed6H", "", CONV_NUMERIC_LITERAL, API_RUNTIME}}, // 0x20
// CU_RES_VIEW_FORMAT_SIGNED_BC6H
{"cudaResViewFormatSignedBlockCompressed6H", {"hipResViewFormatSignedBlockCompressed6H", "", CONV_NUMERIC_LITERAL, API_RUNTIME}}, // 0x21
// CU_RES_VIEW_FORMAT_UNSIGNED_BC7
{"cudaResViewFormatUnsignedBlockCompressed7", {"hipResViewFormatUnsignedBlockCompressed7", "", CONV_NUMERIC_LITERAL, API_RUNTIME}}, // 0x22
// CUshared_carveout
{"cudaSharedCarveout", {"hipSharedCarveout", "", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED}},
// cudaSharedCarveout enum values
// CU_SHAREDMEM_CARVEOUT_DEFAULT
{"cudaSharedmemCarveoutDefault", {"hipSharedmemCarveoutDefault", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // -1
// CU_SHAREDMEM_CARVEOUT_MAX_SHARED
{"cudaSharedmemCarveoutMaxShared", {"hipSharedmemCarveoutMaxShared", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 100
// CU_SHAREDMEM_CARVEOUT_MAX_L1
{"cudaSharedmemCarveoutMaxL1", {"hipSharedmemCarveoutMaxL1", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 0
// CUsharedconfig
{"cudaSharedMemConfig", {"hipSharedMemConfig", "", CONV_TYPE, API_RUNTIME}},
// cudaSharedMemConfig enum values
// CU_SHARED_MEM_CONFIG_DEFAULT_BANK_SIZE = 0x00
{"cudaSharedMemBankSizeDefault", {"hipSharedMemBankSizeDefault", "", CONV_NUMERIC_LITERAL, API_RUNTIME}}, // 0
// CU_SHARED_MEM_CONFIG_FOUR_BYTE_BANK_SIZE = 0x01
{"cudaSharedMemBankSizeFourByte", {"hipSharedMemBankSizeFourByte", "", CONV_NUMERIC_LITERAL, API_RUNTIME}}, // 1
// CU_SHARED_MEM_CONFIG_EIGHT_BYTE_BANK_SIZE = 0x02
{"cudaSharedMemBankSizeEightByte", {"hipSharedMemBankSizeEightByte", "", CONV_NUMERIC_LITERAL, API_RUNTIME}}, // 2
// CUstreamCaptureStatus
{"cudaStreamCaptureStatus", {"hipStreamCaptureStatus", "", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED}},
// cudaStreamCaptureStatus enum values
// CU_STREAM_CAPTURE_STATUS_NONE
{"cudaStreamCaptureStatusNone", {"hipStreamCaptureStatusNone", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 0
// CU_STREAM_CAPTURE_STATUS_ACTIVE
{"cudaStreamCaptureStatusActive", {"hipStreamCaptureStatusActive", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 1
// CU_STREAM_CAPTURE_STATUS_INVALIDATED
{"cudaStreamCaptureStatusInvalidated", {"hipStreamCaptureStatusInvalidated", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 2
// CUstreamCaptureMode
{"cudaStreamCaptureMode", {"hipStreamCaptureMode", "", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED}},
// cudaStreamCaptureMode enum values
// CU_STREAM_CAPTURE_MODE_GLOBAL
{"cudaStreamCaptureModeGlobal", {"hipStreamCaptureModeGlobal", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 0
// CU_STREAM_CAPTURE_MODE_THREAD_LOCAL
{"cudaStreamCaptureModeThreadLocal", {"hipStreamCaptureModeThreadLocal", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 1
// CU_STREAM_CAPTURE_MODE_RELAXED
{"cudaStreamCaptureModeRelaxed", {"hipStreamCaptureModeRelaxed", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 2
// no analogue
{"cudaSurfaceBoundaryMode", {"hipSurfaceBoundaryMode", "", CONV_TYPE, API_RUNTIME}},
// cudaSurfaceBoundaryMode enum values
{"cudaBoundaryModeZero", {"hipBoundaryModeZero", "", CONV_NUMERIC_LITERAL, API_RUNTIME}}, // 0
{"cudaBoundaryModeClamp", {"hipBoundaryModeClamp", "", CONV_NUMERIC_LITERAL, API_RUNTIME}}, // 1
{"cudaBoundaryModeTrap", {"hipBoundaryModeTrap", "", CONV_NUMERIC_LITERAL, API_RUNTIME}}, // 2
// no analogue
{"cudaSurfaceFormatMode", {"hipSurfaceFormatMode", "", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED}},
// enum cudaSurfaceFormatMode
{"cudaFormatModeForced", {"hipFormatModeForced", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 0
{"cudaFormatModeAuto", {"hipFormatModeAuto", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 1
// no analogue
{"cudaTextureAddressMode", {"hipTextureAddressMode", "", CONV_TYPE, API_RUNTIME}},
// cudaTextureAddressMode enum values
{"cudaAddressModeWrap", {"hipAddressModeWrap", "", CONV_NUMERIC_LITERAL, API_RUNTIME}}, // 0
{"cudaAddressModeClamp", {"hipAddressModeClamp", "", CONV_NUMERIC_LITERAL, API_RUNTIME}}, // 1
{"cudaAddressModeMirror", {"hipAddressModeMirror", "", CONV_NUMERIC_LITERAL, API_RUNTIME}}, // 2
{"cudaAddressModeBorder", {"hipAddressModeBorder", "", CONV_NUMERIC_LITERAL, API_RUNTIME}}, // 3
// CUfilter_mode
{"cudaTextureFilterMode", {"hipTextureFilterMode", "", CONV_TYPE, API_RUNTIME}},
// cudaTextureFilterMode enum values
// CU_TR_FILTER_MODE_POINT
{"cudaFilterModePoint", {"hipFilterModePoint", "", CONV_NUMERIC_LITERAL, API_RUNTIME}}, // 0
// CU_TR_FILTER_MODE_LINEAR
{"cudaFilterModeLinear", {"hipFilterModeLinear", "", CONV_NUMERIC_LITERAL, API_RUNTIME}}, // 1
// no analogue
{"cudaTextureReadMode", {"hipTextureReadMode", "", CONV_TYPE, API_RUNTIME}},
// cudaTextureReadMode enum values
{"cudaReadModeElementType", {"hipReadModeElementType", "", CONV_NUMERIC_LITERAL, API_RUNTIME}}, // 0
{"cudaReadModeNormalizedFloat", {"hipReadModeNormalizedFloat", "", CONV_NUMERIC_LITERAL, API_RUNTIME}}, // 1
// CUGLDeviceList
{"cudaGLDeviceList", {"hipGLDeviceList", "", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED}},
// cudaGLDeviceList enum values
// CU_GL_DEVICE_LIST_ALL = 0x01
{"cudaGLDeviceListAll", {"hipGLDeviceListAll", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 1
// CU_GL_DEVICE_LIST_CURRENT_FRAME = 0x02
{"cudaGLDeviceListCurrentFrame", {"hipGLDeviceListCurrentFrame", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 2
// CU_GL_DEVICE_LIST_NEXT_FRAME = 0x03
{"cudaGLDeviceListNextFrame", {"hipGLDeviceListNextFrame", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 3
// CUGLmap_flags
{"cudaGLMapFlags", {"hipGLMapFlags", "", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED}},
// cudaGLMapFlags enum values
// CU_GL_MAP_RESOURCE_FLAGS_NONE = 0x00
{"cudaGLMapFlagsNone", {"hipGLMapFlagsNone", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 0
// CU_GL_MAP_RESOURCE_FLAGS_READ_ONLY = 0x01
{"cudaGLMapFlagsReadOnly", {"hipGLMapFlagsReadOnly", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 1
// CU_GL_MAP_RESOURCE_FLAGS_WRITE_DISCARD = 0x02
{"cudaGLMapFlagsWriteDiscard", {"hipGLMapFlagsWriteDiscard", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 2
// CUd3d9DeviceList
{"cudaD3D9DeviceList", {"hipD3D9DeviceList", "", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED}},
// CUd3d9DeviceList enum values
// CU_D3D9_DEVICE_LIST_ALL = 0x01
{"cudaD3D9DeviceListAll", {"HIP_D3D9_DEVICE_LIST_ALL", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 1
// CU_D3D9_DEVICE_LIST_CURRENT_FRAME = 0x02
{"cudaD3D9DeviceListCurrentFrame", {"HIP_D3D9_DEVICE_LIST_CURRENT_FRAME", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 2
// CU_D3D9_DEVICE_LIST_NEXT_FRAME = 0x03
{"cudaD3D9DeviceListNextFrame", {"HIP_D3D9_DEVICE_LIST_NEXT_FRAME", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 3
// CUd3d9map_flags
{"cudaD3D9MapFlags", {"hipD3D9MapFlags", "", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED}},
// cudaD3D9MapFlags enum values
// CU_D3D9_MAPRESOURCE_FLAGS_NONE = 0x00
{"cudaD3D9MapFlagsNone", {"HIP_D3D9_MAPRESOURCE_FLAGS_NONE", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 0
// CU_D3D9_MAPRESOURCE_FLAGS_READONLY = 0x01
{"cudaD3D9MapFlagsReadOnly", {"HIP_D3D9_MAPRESOURCE_FLAGS_READONLY", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 1
// CU_D3D9_MAPRESOURCE_FLAGS_WRITEDISCARD = 0x02
{"cudaD3D9MapFlagsWriteDiscard", {"HIP_D3D9_MAPRESOURCE_FLAGS_WRITEDISCARD", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 2
// CUd3d9Register_flags
{"cudaD3D9RegisterFlags", {"hipD3D9RegisterFlags", "", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED}},
// cudaD3D9RegisterFlags enum values
// CU_D3D9_REGISTER_FLAGS_NONE = 0x00
{"cudaD3D9RegisterFlagsNone", {"HIP_D3D9_REGISTER_FLAGS_NONE", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 0
// CU_D3D9_REGISTER_FLAGS_ARRAY = 0x01
{"cudaD3D9RegisterFlagsArray", {"HIP_D3D9_REGISTER_FLAGS_ARRAY", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 1
// CUd3d10DeviceList
{"cudaD3D10DeviceList", {"hipd3d10DeviceList", "", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED}},
// cudaD3D10DeviceList enum values
// CU_D3D10_DEVICE_LIST_ALL = 0x01
{"cudaD3D10DeviceListAll", {"HIP_D3D10_DEVICE_LIST_ALL", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 1
// CU_D3D10_DEVICE_LIST_CURRENT_FRAME = 0x02
{"cudaD3D10DeviceListCurrentFrame", {"HIP_D3D10_DEVICE_LIST_CURRENT_FRAME", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 2
// CU_D3D10_DEVICE_LIST_NEXT_FRAME = 0x03
{"cudaD3D10DeviceListNextFrame", {"HIP_D3D10_DEVICE_LIST_NEXT_FRAME", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 3
// CUd3d10map_flags
{"cudaD3D10MapFlags", {"hipD3D10MapFlags", "", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED}},
// cudaD3D10MapFlags enum values
// CU_D3D10_MAPRESOURCE_FLAGS_NONE = 0x00
{"cudaD3D10MapFlagsNone", {"HIP_D3D10_MAPRESOURCE_FLAGS_NONE", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 0
// CU_D3D10_MAPRESOURCE_FLAGS_READONLY = 0x01
{"cudaD3D10MapFlagsReadOnly", {"HIP_D3D10_MAPRESOURCE_FLAGS_READONLY", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 1
// CU_D3D10_MAPRESOURCE_FLAGS_WRITEDISCARD = 0x02
{"cudaD3D10MapFlagsWriteDiscard", {"HIP_D3D10_MAPRESOURCE_FLAGS_WRITEDISCARD", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 2
// CUd3d10Register_flags
{"cudaD3D10RegisterFlags", {"hipD3D10RegisterFlags", "", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED}},
// cudaD3D10RegisterFlags enum values
// CU_D3D10_REGISTER_FLAGS_NONE = 0x00
{"cudaD3D10RegisterFlagsNone", {"HIP_D3D10_REGISTER_FLAGS_NONE", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 0
// CU_D3D10_REGISTER_FLAGS_ARRAY = 0x01
{"cudaD3D10RegisterFlagsArray", {"HIP_D3D10_REGISTER_FLAGS_ARRAY", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 1
// CUd3d11DeviceList
{"cudaD3D11DeviceList", {"hipd3d11DeviceList", "", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED}},
// cudaD3D11DeviceList enum values
// CU_D3D11_DEVICE_LIST_ALL = 0x01
{"cudaD3D11DeviceListAll", {"HIP_D3D11_DEVICE_LIST_ALL", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 1
// CU_D3D11_DEVICE_LIST_CURRENT_FRAME = 0x02
{"cudaD3D11DeviceListCurrentFrame", {"HIP_D3D11_DEVICE_LIST_CURRENT_FRAME", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 2
// CU_D3D11_DEVICE_LIST_NEXT_FRAME = 0x03
{"cudaD3D11DeviceListNextFrame", {"HIP_D3D11_DEVICE_LIST_NEXT_FRAME", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}}, // 3
// no analogue
{"libraryPropertyType", {"hipLibraryPropertyType_t", "", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED}},
{"libraryPropertyType_t", {"hipLibraryPropertyType_t", "", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED}},
// no analogue
{"MAJOR_VERSION", {"hipLibraryMajorVersion", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}},
// no analogue
{"MINOR_VERSION", {"hipLibraryMinorVersion", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}},
// no analogue
{"PATCH_LEVEL", {"hipLibraryPatchVersion", "", CONV_NUMERIC_LITERAL, API_RUNTIME, HIP_UNSUPPORTED}},
// 4. Typedefs
// CUhostFn
{"cudaHostFn_t", {"hipHostFn", "", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED}},
// CUstreamCallback
{"cudaStreamCallback_t", {"hipStreamCallback_t", "", CONV_TYPE, API_RUNTIME}},
// CUsurfObject
{"cudaSurfaceObject_t", {"hipSurfaceObject_t", "", CONV_TYPE, API_RUNTIME}},
// CUtexObject
{"cudaTextureObject_t", {"hipTextureObject_t", "", CONV_TYPE, API_RUNTIME}},
// CUuuid
{"cudaUUID_t", {"hipUUID_t", "", CONV_TYPE, API_RUNTIME, HIP_UNSUPPORTED}},
// 5. Defines
// no analogue
{"CUDA_EGL_MAX_PLANES", {"HIP_EGL_MAX_PLANES", "", CONV_DEFINE, API_RUNTIME, HIP_UNSUPPORTED}}, // 3
// CU_IPC_HANDLE_SIZE
{"CUDA_IPC_HANDLE_SIZE", {"HIP_IPC_HANDLE_SIZE", "", CONV_DEFINE, API_RUNTIME, HIP_UNSUPPORTED}}, // 64
// no analogue
{"cudaArrayDefault", {"hipArrayDefault", "", CONV_DEFINE, API_RUNTIME}}, // 0x00
// CUDA_ARRAY3D_LAYERED
{"cudaArrayLayered", {"hipArrayLayered", "", CONV_DEFINE, API_RUNTIME}}, // 0x01
// CUDA_ARRAY3D_SURFACE_LDST
{"cudaArraySurfaceLoadStore", {"hipArraySurfaceLoadStore", "", CONV_DEFINE, API_RUNTIME}}, // 0x02
// CUDA_ARRAY3D_CUBEMAP
{"cudaArrayCubemap", {"hipArrayCubemap", "", CONV_DEFINE, API_RUNTIME}}, // 0x04
// CUDA_ARRAY3D_TEXTURE_GATHER
{"cudaArrayTextureGather", {"hipArrayTextureGather", "", CONV_DEFINE, API_RUNTIME}}, // 0x08
// CUDA_ARRAY3D_COLOR_ATTACHMENT
{"cudaArrayColorAttachment", {"hipArrayColorAttachment", "", CONV_DEFINE, API_RUNTIME, HIP_UNSUPPORTED}}, // 0x20
// CUDA_COOPERATIVE_LAUNCH_MULTI_DEVICE_NO_PRE_LAUNCH_SYNC
{"cudaCooperativeLaunchMultiDeviceNoPreSync", {"hipCooperativeLaunchMultiDeviceNoPreSync", "", CONV_DEFINE, API_RUNTIME, HIP_UNSUPPORTED}}, // 0x01
// CUDA_COOPERATIVE_LAUNCH_MULTI_DEVICE_NO_POST_LAUNCH_SYNC
{"cudaCooperativeLaunchMultiDeviceNoPostSync", {"hipCooperativeLaunchMultiDeviceNoPostSync", "", CONV_DEFINE, API_RUNTIME, HIP_UNSUPPORTED}}, // 0x02
// CU_DEVICE_CPU ((CUdevice)-1)
{"cudaCpuDeviceId", {"hipCpuDeviceId", "", CONV_DEFINE, API_RUNTIME, HIP_UNSUPPORTED}}, // ((int)-1)
// CU_DEVICE_INVALID ((CUdevice)-2)
{"cudaInvalidDeviceId", {"hipInvalidDeviceId", "", CONV_DEFINE, API_RUNTIME, HIP_UNSUPPORTED}}, // ((int)-2)
// CU_CTX_BLOCKING_SYNC
// NOTE: Deprecated since CUDA 4.0 and replaced with cudaDeviceScheduleBlockingSync
{"cudaDeviceBlockingSync", {"hipDeviceScheduleBlockingSync", "", CONV_DEFINE, API_RUNTIME}}, // 0x04
// CU_CTX_LMEM_RESIZE_TO_MAX
{"cudaDeviceLmemResizeToMax", {"hipDeviceLmemResizeToMax", "", CONV_DEFINE, API_RUNTIME}}, // 0x10
// CU_CTX_MAP_HOST
{"cudaDeviceMapHost", {"hipDeviceMapHost", "", CONV_DEFINE, API_RUNTIME}}, // 0x08
// CU_CTX_FLAGS_MASK
{"cudaDeviceMask", {"hipDeviceMask", "", CONV_DEFINE, API_RUNTIME, HIP_UNSUPPORTED}}, // 0x1f
// no analogue
{"cudaDevicePropDontCare", {"hipDevicePropDontCare", "", CONV_DEFINE, API_RUNTIME, HIP_UNSUPPORTED}},
// CU_CTX_SCHED_AUTO
{"cudaDeviceScheduleAuto", {"hipDeviceScheduleAuto", "", CONV_DEFINE, API_RUNTIME}}, // 0x00
// CU_CTX_SCHED_SPIN
{"cudaDeviceScheduleSpin", {"hipDeviceScheduleSpin", "", CONV_DEFINE, API_RUNTIME}}, // 0x01
// CU_CTX_SCHED_YIELD
{"cudaDeviceScheduleYield", {"hipDeviceScheduleYield", "", CONV_DEFINE, API_RUNTIME}}, // 0x02
// CU_CTX_SCHED_BLOCKING_SYNC
{"cudaDeviceScheduleBlockingSync", {"hipDeviceScheduleBlockingSync", "", CONV_DEFINE, API_RUNTIME}}, // 0x04
// CU_CTX_SCHED_MASK
{"cudaDeviceScheduleMask", {"hipDeviceScheduleMask", "", CONV_DEFINE, API_RUNTIME}}, // 0x07
// CU_EVENT_DEFAULT
{"cudaEventDefault", {"hipEventDefault", "", CONV_DEFINE, API_RUNTIME}}, // 0x00
// CU_EVENT_BLOCKING_SYNC
{"cudaEventBlockingSync", {"hipEventBlockingSync", "", CONV_DEFINE, API_RUNTIME}}, // 0x01
// CU_EVENT_DISABLE_TIMING
{"cudaEventDisableTiming", {"hipEventDisableTiming", "", CONV_DEFINE, API_RUNTIME}}, // 0x02
// CU_EVENT_INTERPROCESS
{"cudaEventInterprocess", {"hipEventInterprocess", "", CONV_DEFINE, API_RUNTIME}}, // 0x04
// CUDA_EXTERNAL_MEMORY_DEDICATED
{"cudaExternalMemoryDedicated", {"hipExternalMemoryDedicated", "", CONV_DEFINE, API_RUNTIME, HIP_UNSUPPORTED}}, // 0x1
// CUDA_EXTERNAL_SEMAPHORE_SIGNAL_SKIP_NVSCIBUF_MEMSYNC
{"cudaExternalSemaphoreSignalSkipNvSciBufMemSync", {"hipExternalSemaphoreSignalSkipNvSciBufMemSync", "", CONV_DEFINE, API_RUNTIME, HIP_UNSUPPORTED}}, // 0x01
// CUDA_EXTERNAL_SEMAPHORE_WAIT_SKIP_NVSCIBUF_MEMSYNC
{"cudaExternalSemaphoreWaitSkipNvSciBufMemSync", {"hipExternalSemaphoreWaitSkipNvSciBufMemSync", "", CONV_DEFINE, API_RUNTIME, HIP_UNSUPPORTED}}, // 0x02
// CUDA_NVSCISYNC_ATTR_SIGNAL
{"cudaNvSciSyncAttrSignal", {"hipNvSciSyncAttrSignal", "", CONV_DEFINE, API_RUNTIME, HIP_UNSUPPORTED}}, // 0x1
// CUDA_NVSCISYNC_ATTR_WAIT
{"cudaNvSciSyncAttrWait", {"hipNvSciSyncAttrWait", "", CONV_DEFINE, API_RUNTIME, HIP_UNSUPPORTED}}, // 0x2
// no analogue
{"cudaHostAllocDefault", {"hipHostMallocDefault", "", CONV_DEFINE, API_RUNTIME}}, // 0x00
// CU_MEMHOSTALLOC_PORTABLE
{"cudaHostAllocPortable", {"hipHostMallocPortable", "", CONV_DEFINE, API_RUNTIME}}, // 0x01
// CU_MEMHOSTALLOC_DEVICEMAP
{"cudaHostAllocMapped", {"hipHostMallocMapped", "", CONV_DEFINE, API_RUNTIME}}, // 0x02
// CU_MEMHOSTALLOC_WRITECOMBINED
{"cudaHostAllocWriteCombined", {"hipHostMallocWriteCombined", "", CONV_DEFINE, API_RUNTIME}}, // 0x04
// no analogue
{"cudaHostRegisterDefault", {"hipHostRegisterDefault", "", CONV_DEFINE, API_RUNTIME}}, // 0x00
// CU_MEMHOSTREGISTER_PORTABLE
{"cudaHostRegisterPortable", {"hipHostRegisterPortable", "", CONV_DEFINE, API_RUNTIME}}, // 0x01
// CU_MEMHOSTREGISTER_DEVICEMAP
{"cudaHostRegisterMapped", {"hipHostRegisterMapped", "", CONV_DEFINE, API_RUNTIME}}, // 0x02
// CU_MEMHOSTREGISTER_IOMEMORY
{"cudaHostRegisterIoMemory", {"hipHostRegisterIoMemory", "", CONV_DEFINE, API_RUNTIME}}, // 0x04
// CU_IPC_MEM_LAZY_ENABLE_PEER_ACCESS
{"cudaIpcMemLazyEnablePeerAccess", {"hipIpcMemLazyEnablePeerAccess", "", CONV_DEFINE, API_RUNTIME}}, // 0x01
// CU_MEM_ATTACH_GLOBAL
{"cudaMemAttachGlobal", {"hipMemAttachGlobal", "", CONV_DEFINE, API_RUNTIME}}, // 0x01
// CU_MEM_ATTACH_HOST
{"cudaMemAttachHost", {"hipMemAttachHost", "", CONV_DEFINE, API_RUNTIME}}, // 0x02
// CU_MEM_ATTACH_SINGLE
{"cudaMemAttachSingle", {"hipMemAttachSingle", "", CONV_DEFINE, API_RUNTIME, HIP_UNSUPPORTED}}, // 0x04
// no analogue
{"cudaTextureType1D", {"hipTextureType1D", "", CONV_DEFINE, API_RUNTIME}}, // 0x01
// no analogue
{"cudaTextureType2D", {"hipTextureType2D", "", CONV_DEFINE, API_RUNTIME}}, // 0x02
// no analogue
{"cudaTextureType3D", {"hipTextureType3D", "", CONV_DEFINE, API_RUNTIME}}, // 0x03
// no analogue
{"cudaTextureTypeCubemap", {"hipTextureTypeCubemap", "", CONV_DEFINE, API_RUNTIME}}, // 0x0C
// no analogue
{"cudaTextureType1DLayered", {"hipTextureType1DLayered", "", CONV_DEFINE, API_RUNTIME}}, // 0xF1
// no analogue
{"cudaTextureType2DLayered", {"hipTextureType2DLayered", "", CONV_DEFINE, API_RUNTIME}}, // 0xF2
// no analogue
{"cudaTextureTypeCubemapLayered", {"hipTextureTypeCubemapLayered", "", CONV_DEFINE, API_RUNTIME}}, // 0xFC
// CU_OCCUPANCY_DEFAULT
{"cudaOccupancyDefault", {"hipOccupancyDefault", "", CONV_DEFINE, API_RUNTIME, HIP_UNSUPPORTED}}, // 0x00
// CU_OCCUPANCY_DISABLE_CACHING_OVERRIDE
{"cudaOccupancyDisableCachingOverride", {"hipOccupancyDisableCachingOverride", "", CONV_DEFINE, API_RUNTIME, HIP_UNSUPPORTED}}, // 0x01
// CU_STREAM_DEFAULT
{"cudaStreamDefault", {"hipStreamDefault", "", CONV_DEFINE, API_RUNTIME}}, // 0x00
// CU_STREAM_NON_BLOCKING
{"cudaStreamNonBlocking", {"hipStreamNonBlocking", "", CONV_DEFINE, API_RUNTIME}}, // 0x01
// CU_STREAM_LEGACY ((CUstream)0x1)
{"cudaStreamLegacy", {"hipStreamLegacy", "", CONV_DEFINE, API_RUNTIME, HIP_UNSUPPORTED}}, // ((cudaStream_t)0x1)
// CU_STREAM_PER_THREAD ((CUstream)0x2)
{"cudaStreamPerThread", {"hipStreamPerThread", "", CONV_DEFINE, API_RUNTIME, HIP_UNSUPPORTED}}, // ((cudaStream_t)0x2)
};
| 1 | 8,902 | Please remove `HIP_UNSUPPORTED` | ROCm-Developer-Tools-HIP | cpp |
@@ -438,7 +438,7 @@ type (
// CreateWorkflowExecutionResponse is the response to CreateWorkflowExecutionRequest
CreateWorkflowExecutionResponse struct {
- HistorySize int64
+ NewMutableStateStats MutableStateStatus
}
// GetWorkflowExecutionRequest is used to retrieve the info of a workflow execution | 1 | // The MIT License
//
// Copyright (c) 2020 Temporal Technologies Inc. All rights reserved.
//
// Copyright (c) 2020 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
//go:generate mockgen -copyright_file ../../LICENSE -package $GOPACKAGE -source $GOFILE -destination dataInterfaces_mock.go
package persistence
import (
"fmt"
"net"
"strings"
"time"
"github.com/pborman/uuid"
commonpb "go.temporal.io/api/common/v1"
enumspb "go.temporal.io/api/enums/v1"
historypb "go.temporal.io/api/history/v1"
enumsspb "go.temporal.io/server/api/enums/v1"
persistencespb "go.temporal.io/server/api/persistence/v1"
"go.temporal.io/server/common/persistence/serialization"
"go.temporal.io/server/common/primitives"
)
// CreateWorkflowMode workflow creation mode
type CreateWorkflowMode int
// QueueType is an enum that represents various queue types in persistence
type QueueType int32
// Queue types used in queue table
// Use positive numbers for queue type
// Negative numbers are reserved for DLQ
const (
NamespaceReplicationQueueType QueueType = iota + 1
)
// Create Workflow Execution Mode
const (
// CreateWorkflowModeBrandNew fail if current record exists
// Only applicable for CreateWorkflowExecution
CreateWorkflowModeBrandNew CreateWorkflowMode = iota
// CreateWorkflowModeWorkflowIDReuse update current record only if workflow is closed
// Only applicable for CreateWorkflowExecution
CreateWorkflowModeWorkflowIDReuse
// CreateWorkflowModeContinueAsNew update current record only if workflow is open
// Only applicable for UpdateWorkflowExecution
CreateWorkflowModeContinueAsNew
// CreateWorkflowModeZombie do not update current record since workflow is in zombie state
// applicable for CreateWorkflowExecution, UpdateWorkflowExecution
CreateWorkflowModeZombie
)
// UpdateWorkflowMode update mode
type UpdateWorkflowMode int
// Update Workflow Execution Mode
const (
// UpdateWorkflowModeUpdateCurrent update workflow, including current record
// NOTE: update on current record is a condition update
UpdateWorkflowModeUpdateCurrent UpdateWorkflowMode = iota
// UpdateWorkflowModeBypassCurrent update workflow, without current record
// NOTE: current record CANNOT point to the workflow to be updated
UpdateWorkflowModeBypassCurrent
)
// ConflictResolveWorkflowMode conflict resolve mode
type ConflictResolveWorkflowMode int
// Conflict Resolve Workflow Mode
const (
// ConflictResolveWorkflowModeUpdateCurrent conflict resolve workflow, including current record
// NOTE: update on current record is a condition update
ConflictResolveWorkflowModeUpdateCurrent ConflictResolveWorkflowMode = iota
// ConflictResolveWorkflowModeBypassCurrent conflict resolve workflow, without current record
// NOTE: current record CANNOT point to the workflow to be updated
ConflictResolveWorkflowModeBypassCurrent
)
// UnknownNumRowsAffected is returned when the number of rows that an API affected cannot be determined
const UnknownNumRowsAffected = -1
const (
// InitialFailoverNotificationVersion is the initial failover version for a namespace
InitialFailoverNotificationVersion int64 = 0
// TransferTaskTransferTargetWorkflowID is the the dummy workflow ID for transfer tasks of types
// that do not have a target workflow
TransferTaskTransferTargetWorkflowID = "20000000-0000-f000-f000-000000000001"
)
const numItemsInGarbageInfo = 3
type (
// InvalidPersistenceRequestError represents invalid request to persistence
InvalidPersistenceRequestError struct {
Msg string
}
// CurrentWorkflowConditionFailedError represents a failed conditional update for current workflow record
CurrentWorkflowConditionFailedError struct {
Msg string
RequestID string
RunID string
State enumsspb.WorkflowExecutionState
Status enumspb.WorkflowExecutionStatus
LastWriteVersion int64
}
// WorkflowConditionFailedError represents a failed conditional update for workflow record
WorkflowConditionFailedError struct {
Msg string
NextEventID int64
DBRecordVersion int64
}
// ConditionFailedError represents a failed conditional update for execution record
ConditionFailedError struct {
Msg string
}
// ShardAlreadyExistError is returned when conditionally creating a shard fails
ShardAlreadyExistError struct {
Msg string
}
// ShardOwnershipLostError is returned when conditional update fails due to RangeID for the shard
ShardOwnershipLostError struct {
ShardID int32
Msg string
}
// TimeoutError is returned when a write operation fails due to a timeout
TimeoutError struct {
Msg string
}
// TransactionSizeLimitError is returned when the transaction size is too large
TransactionSizeLimitError struct {
Msg string
}
// ShardInfoWithFailover describes a shard
ShardInfoWithFailover struct {
*persistencespb.ShardInfo
TransferFailoverLevels map[string]TransferFailoverLevel // uuid -> TransferFailoverLevel
TimerFailoverLevels map[string]TimerFailoverLevel // uuid -> TimerFailoverLevel
}
// TransferFailoverLevel contains corresponding start / end level
TransferFailoverLevel struct {
StartTime time.Time
MinLevel int64
CurrentLevel int64
MaxLevel int64
NamespaceIDs map[string]struct{}
}
// TimerFailoverLevel contains namespace IDs and corresponding start / end level
TimerFailoverLevel struct {
StartTime time.Time
MinLevel time.Time
CurrentLevel time.Time
MaxLevel time.Time
NamespaceIDs map[string]struct{}
}
// ReplicationTaskInfoWrapper describes a replication task.
ReplicationTaskInfoWrapper struct {
*persistencespb.ReplicationTaskInfo
}
// Task is the generic interface for workflow tasks
Task interface {
GetType() enumsspb.TaskType
GetVersion() int64
SetVersion(version int64)
GetTaskID() int64
SetTaskID(id int64)
GetVisibilityTime() time.Time
SetVisibilityTime(timestamp time.Time)
}
// TaskQueueKey is the struct used to identity TaskQueues
TaskQueueKey struct {
NamespaceID string
Name string
TaskType enumspb.TaskQueueType
}
// ActivityTask identifies a transfer task for activity
ActivityTask struct {
VisibilityTimestamp time.Time
TaskID int64
NamespaceID string
TaskQueue string
ScheduleID int64
Version int64
}
// WorkflowTask identifies a transfer task for workflow task
WorkflowTask struct {
VisibilityTimestamp time.Time
TaskID int64
NamespaceID string
TaskQueue string
ScheduleID int64
Version int64
}
// ResetWorkflowTask identifies a transfer task to reset workflow
ResetWorkflowTask struct {
VisibilityTimestamp time.Time
TaskID int64
Version int64
}
// CloseExecutionTask identifies a transfer task for deletion of execution
CloseExecutionTask struct {
VisibilityTimestamp time.Time
TaskID int64
Version int64
}
// DeleteHistoryEventTask identifies a timer task for deletion of history events of completed execution.
DeleteHistoryEventTask struct {
VisibilityTimestamp time.Time
TaskID int64
Version int64
}
// WorkflowTaskTimeoutTask identifies a timeout task.
WorkflowTaskTimeoutTask struct {
VisibilityTimestamp time.Time
TaskID int64
EventID int64
ScheduleAttempt int32
TimeoutType enumspb.TimeoutType
Version int64
}
// WorkflowTimeoutTask identifies a timeout task.
WorkflowTimeoutTask struct {
VisibilityTimestamp time.Time
TaskID int64
Version int64
}
// CancelExecutionTask identifies a transfer task for cancel of execution
CancelExecutionTask struct {
VisibilityTimestamp time.Time
TaskID int64
TargetNamespaceID string
TargetWorkflowID string
TargetRunID string
TargetChildWorkflowOnly bool
InitiatedID int64
Version int64
}
// SignalExecutionTask identifies a transfer task for signal execution
SignalExecutionTask struct {
VisibilityTimestamp time.Time
TaskID int64
TargetNamespaceID string
TargetWorkflowID string
TargetRunID string
TargetChildWorkflowOnly bool
InitiatedID int64
Version int64
}
// StartChildExecutionTask identifies a transfer task for starting child execution
StartChildExecutionTask struct {
VisibilityTimestamp time.Time
TaskID int64
TargetNamespaceID string
TargetWorkflowID string
InitiatedID int64
Version int64
}
// ActivityTimeoutTask identifies a timeout task.
ActivityTimeoutTask struct {
VisibilityTimestamp time.Time
TaskID int64
TimeoutType enumspb.TimeoutType
EventID int64
Attempt int32
Version int64
}
// UserTimerTask identifies a timeout task.
UserTimerTask struct {
VisibilityTimestamp time.Time
TaskID int64
EventID int64
Version int64
}
// ActivityRetryTimerTask to schedule a retry task for activity
ActivityRetryTimerTask struct {
VisibilityTimestamp time.Time
TaskID int64
EventID int64
Version int64
Attempt int32
}
// WorkflowBackoffTimerTask to schedule first workflow task for retried workflow
WorkflowBackoffTimerTask struct {
VisibilityTimestamp time.Time
TaskID int64
Version int64
WorkflowBackoffType enumsspb.WorkflowBackoffType
}
// HistoryReplicationTask is the replication task created for shipping history replication events to other clusters
HistoryReplicationTask struct {
VisibilityTimestamp time.Time
TaskID int64
FirstEventID int64
NextEventID int64
Version int64
BranchToken []byte
NewRunBranchToken []byte
}
// StartExecutionVisibilityTask identifies a visibility task for start workflow execution.
StartExecutionVisibilityTask struct {
VisibilityTimestamp time.Time
TaskID int64
Version int64
}
// UpsertExecutionVisibilityTask identifies a visibility task for upsert workflow execution search attributes.
UpsertExecutionVisibilityTask struct {
VisibilityTimestamp time.Time
TaskID int64
// this version is not used by task processing for validation,
// instead, the version is used by elastic search
Version int64
}
// CloseExecutionVisibilityTask identifies a visibility task for close workflow execution.
CloseExecutionVisibilityTask struct {
VisibilityTimestamp time.Time
TaskID int64
Version int64
}
// DeleteExecutionVisibilityTask identifies a visibility task for deletion of execution.
DeleteExecutionVisibilityTask struct {
VisibilityTimestamp time.Time
TaskID int64
Version int64
}
// SyncActivityTask is the replication task created for shipping activity info to other clusters
SyncActivityTask struct {
VisibilityTimestamp time.Time
TaskID int64
Version int64
ScheduledID int64
}
// CreateShardRequest is used to create a shard in executions table
CreateShardRequest struct {
ShardInfo *persistencespb.ShardInfo
}
// GetShardRequest is used to get shard information
GetShardRequest struct {
ShardID int32
}
// GetShardResponse is the response to GetShard
GetShardResponse struct {
ShardInfo *persistencespb.ShardInfo
}
// UpdateShardRequest is used to update shard information
UpdateShardRequest struct {
ShardInfo *persistencespb.ShardInfo
PreviousRangeID int64
}
// AddTasksRequest is used to write new tasks
AddTasksRequest struct {
ShardID int32
RangeID int64
NamespaceID string
WorkflowID string
RunID string
TransferTasks []Task
TimerTasks []Task
ReplicationTasks []Task
VisibilityTasks []Task
}
// CreateWorkflowExecutionRequest is used to write a new workflow execution
CreateWorkflowExecutionRequest struct {
ShardID int32
RangeID int64
Mode CreateWorkflowMode
PreviousRunID string
PreviousLastWriteVersion int64
NewWorkflowSnapshot WorkflowSnapshot
NewWorkflowEvents []*WorkflowEvents
}
// CreateWorkflowExecutionResponse is the response to CreateWorkflowExecutionRequest
CreateWorkflowExecutionResponse struct {
HistorySize int64
}
// GetWorkflowExecutionRequest is used to retrieve the info of a workflow execution
GetWorkflowExecutionRequest struct {
ShardID int32
NamespaceID string
Execution commonpb.WorkflowExecution
}
// GetWorkflowExecutionResponse is the response to GetWorkflowExecutionRequest
GetWorkflowExecutionResponse struct {
State *persistencespb.WorkflowMutableState
DBRecordVersion int64
MutableStateStats *MutableStateStats
}
// GetCurrentExecutionRequest is used to retrieve the current RunId for an execution
GetCurrentExecutionRequest struct {
ShardID int32
NamespaceID string
WorkflowID string
}
// ListConcreteExecutionsRequest is request to ListConcreteExecutions
ListConcreteExecutionsRequest struct {
ShardID int32
PageSize int
PageToken []byte
}
// ListConcreteExecutionsResponse is response to ListConcreteExecutions
ListConcreteExecutionsResponse struct {
States []*persistencespb.WorkflowMutableState
PageToken []byte
}
// GetCurrentExecutionResponse is the response to GetCurrentExecution
GetCurrentExecutionResponse struct {
StartRequestID string
RunID string
State enumsspb.WorkflowExecutionState
Status enumspb.WorkflowExecutionStatus
LastWriteVersion int64
}
// UpdateWorkflowExecutionRequest is used to update a workflow execution
UpdateWorkflowExecutionRequest struct {
ShardID int32
RangeID int64
Mode UpdateWorkflowMode
UpdateWorkflowMutation WorkflowMutation
UpdateWorkflowEvents []*WorkflowEvents
NewWorkflowSnapshot *WorkflowSnapshot
NewWorkflowEvents []*WorkflowEvents
}
// ConflictResolveWorkflowExecutionRequest is used to reset workflow execution state for a single run
ConflictResolveWorkflowExecutionRequest struct {
ShardID int32
RangeID int64
Mode ConflictResolveWorkflowMode
// workflow to be resetted
ResetWorkflowSnapshot WorkflowSnapshot
ResetWorkflowEvents []*WorkflowEvents
// maybe new workflow
NewWorkflowSnapshot *WorkflowSnapshot
NewWorkflowEvents []*WorkflowEvents
// current workflow
CurrentWorkflowMutation *WorkflowMutation
CurrentWorkflowEvents []*WorkflowEvents
}
// ResetWorkflowExecutionRequest is used to reset workflow execution state for current run and create new run
ResetWorkflowExecutionRequest struct {
RangeID int64
// for base run (we need to make sure the baseRun hasn't been deleted after forking)
BaseRunID string
BaseRunNextEventID int64
// for current workflow record
CurrentRunID string
CurrentRunNextEventID int64
// for current mutable state
CurrentWorkflowMutation *WorkflowMutation
// For new mutable state
NewWorkflowSnapshot WorkflowSnapshot
}
// WorkflowEvents is used as generic workflow history events transaction container
WorkflowEvents struct {
NamespaceID string
WorkflowID string
RunID string
BranchToken []byte
PrevTxnID int64
TxnID int64
Events []*historypb.HistoryEvent
}
// WorkflowMutation is used as generic workflow execution state mutation
WorkflowMutation struct {
ExecutionInfo *persistencespb.WorkflowExecutionInfo
ExecutionState *persistencespb.WorkflowExecutionState
// TODO deprecate NextEventID in favor of DBRecordVersion
NextEventID int64
UpsertActivityInfos map[int64]*persistencespb.ActivityInfo
DeleteActivityInfos map[int64]struct{}
UpsertTimerInfos map[string]*persistencespb.TimerInfo
DeleteTimerInfos map[string]struct{}
UpsertChildExecutionInfos map[int64]*persistencespb.ChildExecutionInfo
DeleteChildExecutionInfos map[int64]struct{}
UpsertRequestCancelInfos map[int64]*persistencespb.RequestCancelInfo
DeleteRequestCancelInfos map[int64]struct{}
UpsertSignalInfos map[int64]*persistencespb.SignalInfo
DeleteSignalInfos map[int64]struct{}
UpsertSignalRequestedIDs map[string]struct{}
DeleteSignalRequestedIDs map[string]struct{}
NewBufferedEvents []*historypb.HistoryEvent
ClearBufferedEvents bool
TransferTasks []Task
ReplicationTasks []Task
TimerTasks []Task
VisibilityTasks []Task
// TODO deprecate Condition in favor of DBRecordVersion
Condition int64
DBRecordVersion int64
Checksum *persistencespb.Checksum
}
// WorkflowSnapshot is used as generic workflow execution state snapshot
WorkflowSnapshot struct {
ExecutionInfo *persistencespb.WorkflowExecutionInfo
ExecutionState *persistencespb.WorkflowExecutionState
// TODO deprecate NextEventID in favor of DBRecordVersion
NextEventID int64
ActivityInfos map[int64]*persistencespb.ActivityInfo
TimerInfos map[string]*persistencespb.TimerInfo
ChildExecutionInfos map[int64]*persistencespb.ChildExecutionInfo
RequestCancelInfos map[int64]*persistencespb.RequestCancelInfo
SignalInfos map[int64]*persistencespb.SignalInfo
SignalRequestedIDs map[string]struct{}
TransferTasks []Task
ReplicationTasks []Task
TimerTasks []Task
VisibilityTasks []Task
// TODO deprecate Condition in favor of DBRecordVersion
Condition int64
DBRecordVersion int64
Checksum *persistencespb.Checksum
}
// DeleteWorkflowExecutionRequest is used to delete a workflow execution
DeleteWorkflowExecutionRequest struct {
ShardID int32
NamespaceID string
WorkflowID string
RunID string
}
// DeleteCurrentWorkflowExecutionRequest is used to delete the current workflow execution
DeleteCurrentWorkflowExecutionRequest struct {
ShardID int32
NamespaceID string
WorkflowID string
RunID string
}
// GetTransferTaskRequest is the request for GetTransferTask
GetTransferTaskRequest struct {
ShardID int32
TaskID int64
}
// GetTransferTaskResponse is the response to GetTransferTask
GetTransferTaskResponse struct {
TransferTaskInfo *persistencespb.TransferTaskInfo
}
// GetTransferTasksRequest is used to read tasks from the transfer task queue
GetTransferTasksRequest struct {
ShardID int32
ReadLevel int64
MaxReadLevel int64
BatchSize int
NextPageToken []byte
}
// GetTransferTasksResponse is the response to GetTransferTasksRequest
GetTransferTasksResponse struct {
Tasks []*persistencespb.TransferTaskInfo
NextPageToken []byte
}
// GetVisibilityTaskRequest is the request for GetVisibilityTask
GetVisibilityTaskRequest struct {
ShardID int32
TaskID int64
}
// GetVisibilityTaskResponse is the response to GetVisibilityTask
GetVisibilityTaskResponse struct {
VisibilityTaskInfo *persistencespb.VisibilityTaskInfo
}
// GetVisibilityTasksRequest is used to read tasks from the visibility task queue
GetVisibilityTasksRequest struct {
ShardID int32
ReadLevel int64
MaxReadLevel int64
BatchSize int
NextPageToken []byte
}
// GetVisibilityTasksResponse is the response to GetVisibilityTasksRequest
GetVisibilityTasksResponse struct {
Tasks []*persistencespb.VisibilityTaskInfo
NextPageToken []byte
}
// GetReplicationTaskRequest is the request for GetReplicationTask
GetReplicationTaskRequest struct {
ShardID int32
TaskID int64
}
// GetReplicationTaskResponse is the response to GetReplicationTask
GetReplicationTaskResponse struct {
ReplicationTaskInfo *persistencespb.ReplicationTaskInfo
}
// GetReplicationTasksRequest is used to read tasks from the replication task queue
GetReplicationTasksRequest struct {
ShardID int32
MinTaskID int64
MaxTaskID int64
BatchSize int
NextPageToken []byte
}
// GetReplicationTasksResponse is the response to GetReplicationTask
GetReplicationTasksResponse struct {
Tasks []*persistencespb.ReplicationTaskInfo
NextPageToken []byte
}
// CompleteTransferTaskRequest is used to complete a task in the transfer task queue
CompleteTransferTaskRequest struct {
ShardID int32
TaskID int64
}
// RangeCompleteTransferTaskRequest is used to complete a range of tasks in the transfer task queue
RangeCompleteTransferTaskRequest struct {
ShardID int32
ExclusiveBeginTaskID int64
InclusiveEndTaskID int64
}
// CompleteVisibilityTaskRequest is used to complete a task in the visibility task queue
CompleteVisibilityTaskRequest struct {
ShardID int32
TaskID int64
}
// RangeCompleteVisibilityTaskRequest is used to complete a range of tasks in the visibility task queue
RangeCompleteVisibilityTaskRequest struct {
ShardID int32
ExclusiveBeginTaskID int64
InclusiveEndTaskID int64
}
// CompleteReplicationTaskRequest is used to complete a task in the replication task queue
CompleteReplicationTaskRequest struct {
ShardID int32
TaskID int64
}
// RangeCompleteReplicationTaskRequest is used to complete a range of task in the replication task queue
RangeCompleteReplicationTaskRequest struct {
ShardID int32
InclusiveEndTaskID int64
}
// PutReplicationTaskToDLQRequest is used to put a replication task to dlq
PutReplicationTaskToDLQRequest struct {
ShardID int32
SourceClusterName string
TaskInfo *persistencespb.ReplicationTaskInfo
}
// GetReplicationTasksFromDLQRequest is used to get replication tasks from dlq
GetReplicationTasksFromDLQRequest struct {
ShardID int32
SourceClusterName string
GetReplicationTasksRequest
}
// DeleteReplicationTaskFromDLQRequest is used to delete replication task from DLQ
DeleteReplicationTaskFromDLQRequest struct {
ShardID int32
SourceClusterName string
TaskID int64
}
// RangeDeleteReplicationTaskFromDLQRequest is used to delete replication tasks from DLQ
RangeDeleteReplicationTaskFromDLQRequest struct {
ShardID int32
SourceClusterName string
ExclusiveBeginTaskID int64
InclusiveEndTaskID int64
}
// GetReplicationTasksFromDLQResponse is the response for GetReplicationTasksFromDLQ
GetReplicationTasksFromDLQResponse = GetReplicationTasksResponse
// RangeCompleteTimerTaskRequest is used to complete a range of tasks in the timer task queue
RangeCompleteTimerTaskRequest struct {
ShardID int32
InclusiveBeginTimestamp time.Time
ExclusiveEndTimestamp time.Time
}
// CompleteTimerTaskRequest is used to complete a task in the timer task queue
CompleteTimerTaskRequest struct {
ShardID int32
VisibilityTimestamp time.Time
TaskID int64
}
// LeaseTaskQueueRequest is used to request lease of a task queue
LeaseTaskQueueRequest struct {
NamespaceID string
TaskQueue string
TaskType enumspb.TaskQueueType
TaskQueueKind enumspb.TaskQueueKind
RangeID int64
}
// LeaseTaskQueueResponse is response to LeaseTaskQueueRequest
LeaseTaskQueueResponse struct {
TaskQueueInfo *PersistedTaskQueueInfo
}
// UpdateTaskQueueRequest is used to update task queue implementation information
UpdateTaskQueueRequest struct {
RangeID int64
TaskQueueInfo *persistencespb.TaskQueueInfo
}
// UpdateTaskQueueResponse is the response to UpdateTaskQueue
UpdateTaskQueueResponse struct {
}
// ListTaskQueueRequest contains the request params needed to invoke ListTaskQueue API
ListTaskQueueRequest struct {
PageSize int
PageToken []byte
}
// ListTaskQueueResponse is the response from ListTaskQueue API
ListTaskQueueResponse struct {
Items []*PersistedTaskQueueInfo
NextPageToken []byte
}
// DeleteTaskQueueRequest contains the request params needed to invoke DeleteTaskQueue API
DeleteTaskQueueRequest struct {
TaskQueue *TaskQueueKey
RangeID int64
}
// CreateTasksRequest is used to create a new task for a workflow execution
CreateTasksRequest struct {
TaskQueueInfo *PersistedTaskQueueInfo
Tasks []*persistencespb.AllocatedTaskInfo
}
// CreateTasksResponse is the response to CreateTasksRequest
CreateTasksResponse struct {
}
PersistedTaskQueueInfo struct {
Data *persistencespb.TaskQueueInfo
RangeID int64
}
// GetTasksRequest is used to retrieve tasks of a task queue
GetTasksRequest struct {
NamespaceID string
TaskQueue string
TaskType enumspb.TaskQueueType
ReadLevel int64 // range exclusive
MaxReadLevel *int64 // optional: range inclusive when specified
BatchSize int
}
// GetTasksResponse is the response to GetTasksRequests
GetTasksResponse struct {
Tasks []*persistencespb.AllocatedTaskInfo
}
// CompleteTaskRequest is used to complete a task
CompleteTaskRequest struct {
TaskQueue *TaskQueueKey
TaskID int64
}
// CompleteTasksLessThanRequest contains the request params needed to invoke CompleteTasksLessThan API
CompleteTasksLessThanRequest struct {
NamespaceID string
TaskQueueName string
TaskType enumspb.TaskQueueType
TaskID int64 // Tasks less than or equal to this ID will be completed
Limit int // Limit on the max number of tasks that can be completed. Required param
}
// GetTimerTaskRequest is the request for GetTimerTask
GetTimerTaskRequest struct {
ShardID int32
TaskID int64
VisibilityTimestamp time.Time
}
// GetTimerTaskResponse is the response to GetTimerTask
GetTimerTaskResponse struct {
TimerTaskInfo *persistencespb.TimerTaskInfo
}
// GetTimerIndexTasksRequest is the request for GetTimerIndexTasks
// TODO: replace this with an iterator that can configure min and max index.
GetTimerIndexTasksRequest struct {
ShardID int32
MinTimestamp time.Time
MaxTimestamp time.Time
BatchSize int
NextPageToken []byte
}
// GetTimerIndexTasksResponse is the response for GetTimerIndexTasks
GetTimerIndexTasksResponse struct {
Timers []*persistencespb.TimerTaskInfo
NextPageToken []byte
}
// CreateNamespaceRequest is used to create the namespace
CreateNamespaceRequest struct {
Namespace *persistencespb.NamespaceDetail
IsGlobalNamespace bool
}
// CreateNamespaceResponse is the response for CreateNamespace
CreateNamespaceResponse struct {
ID string
}
// GetNamespaceRequest is used to read namespace
GetNamespaceRequest struct {
ID string
Name string
}
// GetNamespaceResponse is the response for GetNamespace
GetNamespaceResponse struct {
Namespace *persistencespb.NamespaceDetail
IsGlobalNamespace bool
NotificationVersion int64
}
// UpdateNamespaceRequest is used to update namespace
UpdateNamespaceRequest struct {
Namespace *persistencespb.NamespaceDetail
NotificationVersion int64
}
// DeleteNamespaceRequest is used to delete namespace entry from namespaces table
DeleteNamespaceRequest struct {
ID string
}
// DeleteNamespaceByNameRequest is used to delete namespace entry from namespaces_by_name table
DeleteNamespaceByNameRequest struct {
Name string
}
// ListNamespacesRequest is used to list namespaces
ListNamespacesRequest struct {
PageSize int
NextPageToken []byte
}
// ListNamespacesResponse is the response for GetNamespace
ListNamespacesResponse struct {
Namespaces []*GetNamespaceResponse
NextPageToken []byte
}
// GetMetadataResponse is the response for GetMetadata
GetMetadataResponse struct {
NotificationVersion int64
}
// MutableStateStats is the size stats for MutableState
MutableStateStats struct {
// Total size of mutable state
MutableStateSize int
// Breakdown of size into more granular stats
ExecutionInfoSize int
ActivityInfoSize int
TimerInfoSize int
ChildInfoSize int
SignalInfoSize int
BufferedEventsSize int
// Item count for various information captured within mutable state
ActivityInfoCount int
TimerInfoCount int
ChildInfoCount int
SignalInfoCount int
RequestCancelInfoCount int
BufferedEventsCount int
}
// MutableStateUpdateSessionStats is size stats for mutableState updating session
MutableStateUpdateSessionStats struct {
MutableStateSize int // Total size of mutable state update
// Breakdown of mutable state size update for more granular stats
ExecutionInfoSize int
ActivityInfoSize int
TimerInfoSize int
ChildInfoSize int
SignalInfoSize int
BufferedEventsSize int
// Item counts in this session update
ActivityInfoCount int
TimerInfoCount int
ChildInfoCount int
SignalInfoCount int
RequestCancelInfoCount int
// Deleted item counts in this session update
DeleteActivityInfoCount int
DeleteTimerInfoCount int
DeleteChildInfoCount int
DeleteSignalInfoCount int
DeleteRequestCancelInfoCount int
// History size diff
UpdateWorkflowHistorySizeDiff int64
NewWorkflowHistorySizeDiff int64
}
// UpdateWorkflowExecutionResponse is response for UpdateWorkflowExecutionRequest
UpdateWorkflowExecutionResponse struct {
MutableStateUpdateSessionStats *MutableStateUpdateSessionStats
}
ConflictResolveWorkflowExecutionResponse struct {
ResetWorkflowHistorySizeDiff int64
NewWorkflowHistorySizeDiff int64
CurrentWorkflowHistorySizeDiff int64
}
// AppendHistoryNodesRequest is used to append a batch of history nodes
AppendHistoryNodesRequest struct {
// The shard to get history node data
ShardID int32
// true if this is the first append request to the branch
IsNewBranch bool
// the info for clean up data in background
Info string
// The branch to be appended
BranchToken []byte
// The batch of events to be appended. The first eventID will become the nodeID of this batch
Events []*historypb.HistoryEvent
// TransactionID for events before these events. For events chaining
PrevTransactionID int64
// requested TransactionID for this write operation. For the same eventID, the node with larger TransactionID always wins
TransactionID int64
}
// AppendHistoryNodesResponse is a response to AppendHistoryNodesRequest
AppendHistoryNodesResponse struct {
// the size of the event data that has been appended
Size int
}
// ReadHistoryBranchRequest is used to read a history branch
ReadHistoryBranchRequest struct {
// The shard to get history branch data
ShardID int32
// The branch to be read
BranchToken []byte
// Get the history nodes from MinEventID. Inclusive.
MinEventID int64
// Get the history nodes upto MaxEventID. Exclusive.
MaxEventID int64
// Maximum number of batches of events per page. Not that number of events in a batch >=1, it is not number of events per page.
// However for a single page, it is also possible that the returned events is less than PageSize (event zero events) due to stale events.
PageSize int
// Token to continue reading next page of history append transactions. Pass in empty slice for first page
NextPageToken []byte
}
// ReadHistoryBranchResponse is the response to ReadHistoryBranchRequest
ReadHistoryBranchResponse struct {
// History events
HistoryEvents []*historypb.HistoryEvent
// Token to read next page if there are more events beyond page size.
// Use this to set NextPageToken on ReadHistoryBranchRequest to read the next page.
// Empty means we have reached the last page, not need to continue
NextPageToken []byte
// Size of history read from store
Size int
}
// ReadHistoryBranchByBatchResponse is the response to ReadHistoryBranchRequest
ReadHistoryBranchByBatchResponse struct {
// History events by batch
History []*historypb.History
// Token to read next page if there are more events beyond page size.
// Use this to set NextPageToken on ReadHistoryBranchRequest to read the next page.
// Empty means we have reached the last page, not need to continue
NextPageToken []byte
// Size of history read from store
Size int
}
// ReadRawHistoryBranchResponse is the response to ReadHistoryBranchRequest
ReadRawHistoryBranchResponse struct {
// HistoryEventBlobs history event blobs
HistoryEventBlobs []*commonpb.DataBlob
// Token to read next page if there are more events beyond page size.
// Use this to set NextPageToken on ReadHistoryBranchRequest to read the next page.
// Empty means we have reached the last page, not need to continue
NextPageToken []byte
// Size of history read from store
Size int
}
// ForkHistoryBranchRequest is used to fork a history branch
ForkHistoryBranchRequest struct {
// The shard to get history branch data
ShardID int32
// The base branch to fork from
ForkBranchToken []byte
// The nodeID to fork from, the new branch will start from ( inclusive ), the base branch will stop at(exclusive)
// Application must provide a void forking nodeID, it must be a valid nodeID in that branch. A valid nodeID is the firstEventID of a valid batch of events.
// And ForkNodeID > 1 because forking from 1 doesn't make any sense.
ForkNodeID int64
// the info for clean up data in background
Info string
}
// ForkHistoryBranchResponse is the response to ForkHistoryBranchRequest
ForkHistoryBranchResponse struct {
// branchToken to represent the new branch
NewBranchToken []byte
}
// CompleteForkBranchRequest is used to complete forking
CompleteForkBranchRequest struct {
// the new branch returned from ForkHistoryBranchRequest
BranchToken []byte
// true means the fork is success, will update the flag, otherwise will delete the new branch
Success bool
// The shard to update history branch data
ShardID *int
}
// DeleteHistoryBranchRequest is used to remove a history branch
DeleteHistoryBranchRequest struct {
// The shard to delete history branch data
ShardID int32
// branch to be deleted
BranchToken []byte
}
// TrimHistoryBranchRequest is used to validate & trim a history branch
TrimHistoryBranchRequest struct {
// The shard to delete history branch data
ShardID int32
// branch to be validated & trimmed
BranchToken []byte
// known valid node ID
NodeID int64
// known valid transaction ID
TransactionID int64
}
// TrimHistoryBranchResponse is the response to TrimHistoryBranchRequest
TrimHistoryBranchResponse struct {
}
// GetHistoryTreeRequest is used to retrieve branch info of a history tree
GetHistoryTreeRequest struct {
// A UUID of a tree
TreeID string
// Get data from this shard
ShardID *int32
// optional: can provide treeID via branchToken if treeID is empty
BranchToken []byte
}
// HistoryBranchDetail contains detailed information of a branch
HistoryBranchDetail struct {
TreeID string
BranchID string
ForkTime *time.Time
Info string
}
// GetHistoryTreeResponse is a response to GetHistoryTreeRequest
GetHistoryTreeResponse struct {
// all branches of a tree
Branches []*persistencespb.HistoryBranch
}
// GetAllHistoryTreeBranchesRequest is a request of GetAllHistoryTreeBranches
GetAllHistoryTreeBranchesRequest struct {
// pagination token
NextPageToken []byte
// maximum number of branches returned per page
PageSize int
}
// GetAllHistoryTreeBranchesResponse is a response to GetAllHistoryTreeBranches
GetAllHistoryTreeBranchesResponse struct {
// pagination token
NextPageToken []byte
// all branches of all trees
Branches []HistoryBranchDetail
}
// GetClusterMetadataResponse is the response to GetClusterMetadata
GetClusterMetadataResponse struct {
persistencespb.ClusterMetadata
Version int64
}
SaveClusterMetadataRequest struct {
persistencespb.ClusterMetadata
Version int64
}
// GetClusterMembersRequest is the response to GetClusterMembers
GetClusterMembersRequest struct {
LastHeartbeatWithin time.Duration
RPCAddressEquals net.IP
HostIDEquals uuid.UUID
RoleEquals ServiceType
SessionStartedAfter time.Time
NextPageToken []byte
PageSize int
}
// GetClusterMembersResponse is the response to GetClusterMembers
GetClusterMembersResponse struct {
ActiveMembers []*ClusterMember
NextPageToken []byte
}
// ClusterMember is used as a response to GetClusterMembers
ClusterMember struct {
Role ServiceType
HostID uuid.UUID
RPCAddress net.IP
RPCPort uint16
SessionStart time.Time
LastHeartbeat time.Time
RecordExpiry time.Time
}
// UpsertClusterMembershipRequest is the request to UpsertClusterMembership
UpsertClusterMembershipRequest struct {
Role ServiceType
HostID uuid.UUID
RPCAddress net.IP
RPCPort uint16
SessionStart time.Time
RecordExpiry time.Duration
}
// PruneClusterMembershipRequest is the request to PruneClusterMembership
PruneClusterMembershipRequest struct {
MaxRecordsPruned int
}
// Closeable is an interface for any entity that supports a close operation to release resources
Closeable interface {
Close()
}
// ShardManager is used to manage all shards
ShardManager interface {
Closeable
GetName() string
CreateShard(request *CreateShardRequest) error
GetShard(request *GetShardRequest) (*GetShardResponse, error)
UpdateShard(request *UpdateShardRequest) error
}
// ExecutionManager is used to manage workflow executions
ExecutionManager interface {
Closeable
GetName() string
CreateWorkflowExecution(request *CreateWorkflowExecutionRequest) (*CreateWorkflowExecutionResponse, error)
GetWorkflowExecution(request *GetWorkflowExecutionRequest) (*GetWorkflowExecutionResponse, error)
UpdateWorkflowExecution(request *UpdateWorkflowExecutionRequest) (*UpdateWorkflowExecutionResponse, error)
ConflictResolveWorkflowExecution(request *ConflictResolveWorkflowExecutionRequest) (*ConflictResolveWorkflowExecutionResponse, error)
DeleteWorkflowExecution(request *DeleteWorkflowExecutionRequest) error
DeleteCurrentWorkflowExecution(request *DeleteCurrentWorkflowExecutionRequest) error
GetCurrentExecution(request *GetCurrentExecutionRequest) (*GetCurrentExecutionResponse, error)
// Scan operations
ListConcreteExecutions(request *ListConcreteExecutionsRequest) (*ListConcreteExecutionsResponse, error)
// Tasks related APIs
AddTasks(request *AddTasksRequest) error
// transfer tasks
GetTransferTask(request *GetTransferTaskRequest) (*GetTransferTaskResponse, error)
GetTransferTasks(request *GetTransferTasksRequest) (*GetTransferTasksResponse, error)
CompleteTransferTask(request *CompleteTransferTaskRequest) error
RangeCompleteTransferTask(request *RangeCompleteTransferTaskRequest) error
// timer tasks
GetTimerTask(request *GetTimerTaskRequest) (*GetTimerTaskResponse, error)
GetTimerIndexTasks(request *GetTimerIndexTasksRequest) (*GetTimerIndexTasksResponse, error)
CompleteTimerTask(request *CompleteTimerTaskRequest) error
RangeCompleteTimerTask(request *RangeCompleteTimerTaskRequest) error
// replication tasks
GetReplicationTask(request *GetReplicationTaskRequest) (*GetReplicationTaskResponse, error)
GetReplicationTasks(request *GetReplicationTasksRequest) (*GetReplicationTasksResponse, error)
CompleteReplicationTask(request *CompleteReplicationTaskRequest) error
RangeCompleteReplicationTask(request *RangeCompleteReplicationTaskRequest) error
PutReplicationTaskToDLQ(request *PutReplicationTaskToDLQRequest) error
GetReplicationTasksFromDLQ(request *GetReplicationTasksFromDLQRequest) (*GetReplicationTasksFromDLQResponse, error)
DeleteReplicationTaskFromDLQ(request *DeleteReplicationTaskFromDLQRequest) error
RangeDeleteReplicationTaskFromDLQ(request *RangeDeleteReplicationTaskFromDLQRequest) error
// visibility tasks
GetVisibilityTask(request *GetVisibilityTaskRequest) (*GetVisibilityTaskResponse, error)
GetVisibilityTasks(request *GetVisibilityTasksRequest) (*GetVisibilityTasksResponse, error)
CompleteVisibilityTask(request *CompleteVisibilityTaskRequest) error
RangeCompleteVisibilityTask(request *RangeCompleteVisibilityTaskRequest) error
// The below are history V2 APIs
// V2 regards history events growing as a tree, decoupled from workflow concepts
// For Temporal, treeID is new runID, except for fork(reset), treeID will be the runID that it forks from.
// AppendHistoryNodes add a node to history node table
AppendHistoryNodes(request *AppendHistoryNodesRequest) (*AppendHistoryNodesResponse, error)
// ReadHistoryBranch returns history node data for a branch
ReadHistoryBranch(request *ReadHistoryBranchRequest) (*ReadHistoryBranchResponse, error)
// ReadHistoryBranchByBatch returns history node data for a branch ByBatch
ReadHistoryBranchByBatch(request *ReadHistoryBranchRequest) (*ReadHistoryBranchByBatchResponse, error)
// ReadRawHistoryBranch returns history node raw data for a branch ByBatch
// NOTE: this API should only be used by 3+DC
ReadRawHistoryBranch(request *ReadHistoryBranchRequest) (*ReadRawHistoryBranchResponse, error)
// ForkHistoryBranch forks a new branch from a old branch
ForkHistoryBranch(request *ForkHistoryBranchRequest) (*ForkHistoryBranchResponse, error)
// DeleteHistoryBranch removes a branch
// If this is the last branch to delete, it will also remove the root node
DeleteHistoryBranch(request *DeleteHistoryBranchRequest) error
// TrimHistoryBranch validate & trim a history branch
TrimHistoryBranch(request *TrimHistoryBranchRequest) (*TrimHistoryBranchResponse, error)
// GetHistoryTree returns all branch information of a tree
GetHistoryTree(request *GetHistoryTreeRequest) (*GetHistoryTreeResponse, error)
// GetAllHistoryTreeBranches returns all branches of all trees
GetAllHistoryTreeBranches(request *GetAllHistoryTreeBranchesRequest) (*GetAllHistoryTreeBranchesResponse, error)
}
// TaskManager is used to manage tasks
TaskManager interface {
Closeable
GetName() string
LeaseTaskQueue(request *LeaseTaskQueueRequest) (*LeaseTaskQueueResponse, error)
UpdateTaskQueue(request *UpdateTaskQueueRequest) (*UpdateTaskQueueResponse, error)
ListTaskQueue(request *ListTaskQueueRequest) (*ListTaskQueueResponse, error)
DeleteTaskQueue(request *DeleteTaskQueueRequest) error
CreateTasks(request *CreateTasksRequest) (*CreateTasksResponse, error)
GetTasks(request *GetTasksRequest) (*GetTasksResponse, error)
CompleteTask(request *CompleteTaskRequest) error
// CompleteTasksLessThan completes tasks less than or equal to the given task id
// This API takes a limit parameter which specifies the count of maxRows that
// can be deleted. This parameter may be ignored by the underlying storage, but
// its mandatory to specify it. On success this method returns the number of rows
// actually deleted. If the underlying storage doesn't support "limit", all rows
// less than or equal to taskID will be deleted.
// On success, this method returns:
// - number of rows actually deleted, if limit is honored
// - UnknownNumRowsDeleted, when all rows below value are deleted
CompleteTasksLessThan(request *CompleteTasksLessThanRequest) (int, error)
}
// MetadataManager is used to manage metadata CRUD for namespace entities
MetadataManager interface {
Closeable
GetName() string
CreateNamespace(request *CreateNamespaceRequest) (*CreateNamespaceResponse, error)
GetNamespace(request *GetNamespaceRequest) (*GetNamespaceResponse, error)
UpdateNamespace(request *UpdateNamespaceRequest) error
DeleteNamespace(request *DeleteNamespaceRequest) error
DeleteNamespaceByName(request *DeleteNamespaceByNameRequest) error
ListNamespaces(request *ListNamespacesRequest) (*ListNamespacesResponse, error)
GetMetadata() (*GetMetadataResponse, error)
InitializeSystemNamespaces(currentClusterName string) error
}
// ClusterMetadataManager is used to manage cluster-wide metadata and configuration
ClusterMetadataManager interface {
Closeable
GetName() string
GetClusterMembers(request *GetClusterMembersRequest) (*GetClusterMembersResponse, error)
UpsertClusterMembership(request *UpsertClusterMembershipRequest) error
PruneClusterMembership(request *PruneClusterMembershipRequest) error
GetClusterMetadata() (*GetClusterMetadataResponse, error)
SaveClusterMetadata(request *SaveClusterMetadataRequest) (bool, error)
}
)
func (e *InvalidPersistenceRequestError) Error() string {
return e.Msg
}
func (e *CurrentWorkflowConditionFailedError) Error() string {
return e.Msg
}
func (e *WorkflowConditionFailedError) Error() string {
return e.Msg
}
func (e *ConditionFailedError) Error() string {
return e.Msg
}
func (e *ShardAlreadyExistError) Error() string {
return e.Msg
}
func (e *ShardOwnershipLostError) Error() string {
return e.Msg
}
func (e *TimeoutError) Error() string {
return e.Msg
}
func (e *TransactionSizeLimitError) Error() string {
return e.Msg
}
// GetType returns the type of the activity task
func (a *ActivityTask) GetType() enumsspb.TaskType {
return enumsspb.TASK_TYPE_TRANSFER_ACTIVITY_TASK
}
// GetVersion returns the version of the activity task
func (a *ActivityTask) GetVersion() int64 {
return a.Version
}
// SetVersion returns the version of the activity task
func (a *ActivityTask) SetVersion(version int64) {
a.Version = version
}
// GetTaskID returns the sequence ID of the activity task
func (a *ActivityTask) GetTaskID() int64 {
return a.TaskID
}
// SetTaskID sets the sequence ID of the activity task
func (a *ActivityTask) SetTaskID(id int64) {
a.TaskID = id
}
// GetVisibilityTime get the visibility timestamp
func (a *ActivityTask) GetVisibilityTime() time.Time {
return a.VisibilityTimestamp
}
// SetVisibilityTime set the visibility timestamp
func (a *ActivityTask) SetVisibilityTime(timestamp time.Time) {
a.VisibilityTimestamp = timestamp
}
// GetType returns the type of the workflow task
func (d *WorkflowTask) GetType() enumsspb.TaskType {
return enumsspb.TASK_TYPE_TRANSFER_WORKFLOW_TASK
}
// GetVersion returns the version of the workflow task
func (d *WorkflowTask) GetVersion() int64 {
return d.Version
}
// SetVersion returns the version of the workflow task
func (d *WorkflowTask) SetVersion(version int64) {
d.Version = version
}
// GetTaskID returns the sequence ID of the workflow task.
func (d *WorkflowTask) GetTaskID() int64 {
return d.TaskID
}
// SetTaskID sets the sequence ID of the workflow task
func (d *WorkflowTask) SetTaskID(id int64) {
d.TaskID = id
}
// GetVisibilityTime get the visibility timestamp
func (d *ReplicationTaskInfoWrapper) GetVisibilityTime() *time.Time {
return &time.Time{}
}
// GetVisibilityTime get the visibility timestamp
func (d *WorkflowTask) GetVisibilityTime() time.Time {
return d.VisibilityTimestamp
}
// SetVisibilityTime set the visibility timestamp
func (d *WorkflowTask) SetVisibilityTime(timestamp time.Time) {
d.VisibilityTimestamp = timestamp
}
// GetType returns the type of the ResetWorkflowTask
func (a *ResetWorkflowTask) GetType() enumsspb.TaskType {
return enumsspb.TASK_TYPE_TRANSFER_RESET_WORKFLOW
}
// GetVersion returns the version of the ResetWorkflowTask
func (a *ResetWorkflowTask) GetVersion() int64 {
return a.Version
}
// SetVersion returns the version of the ResetWorkflowTask
func (a *ResetWorkflowTask) SetVersion(version int64) {
a.Version = version
}
// GetTaskID returns the sequence ID of the ResetWorkflowTask
func (a *ResetWorkflowTask) GetTaskID() int64 {
return a.TaskID
}
// SetTaskID sets the sequence ID of the ResetWorkflowTask
func (a *ResetWorkflowTask) SetTaskID(id int64) {
a.TaskID = id
}
// GetVisibilityTime get the visibility timestamp
func (a *ResetWorkflowTask) GetVisibilityTime() time.Time {
return a.VisibilityTimestamp
}
// SetVisibilityTime set the visibility timestamp
func (a *ResetWorkflowTask) SetVisibilityTime(timestamp time.Time) {
a.VisibilityTimestamp = timestamp
}
// GetType returns the type of the close execution task
func (a *CloseExecutionTask) GetType() enumsspb.TaskType {
return enumsspb.TASK_TYPE_TRANSFER_CLOSE_EXECUTION
}
// GetVersion returns the version of the close execution task
func (a *CloseExecutionTask) GetVersion() int64 {
return a.Version
}
// SetVersion returns the version of the close execution task
func (a *CloseExecutionTask) SetVersion(version int64) {
a.Version = version
}
// GetTaskID returns the sequence ID of the close execution task
func (a *CloseExecutionTask) GetTaskID() int64 {
return a.TaskID
}
// SetTaskID sets the sequence ID of the close execution task
func (a *CloseExecutionTask) SetTaskID(id int64) {
a.TaskID = id
}
// GetVisibilityTime get the visibility timestamp
func (a *CloseExecutionTask) GetVisibilityTime() time.Time {
return a.VisibilityTimestamp
}
// SetVisibilityTime set the visibility timestamp
func (a *CloseExecutionTask) SetVisibilityTime(timestamp time.Time) {
a.VisibilityTimestamp = timestamp
}
// GetType returns the type of the delete execution task
func (a *DeleteHistoryEventTask) GetType() enumsspb.TaskType {
return enumsspb.TASK_TYPE_DELETE_HISTORY_EVENT
}
// GetVersion returns the version of the delete execution task
func (a *DeleteHistoryEventTask) GetVersion() int64 {
return a.Version
}
// SetVersion returns the version of the delete execution task
func (a *DeleteHistoryEventTask) SetVersion(version int64) {
a.Version = version
}
// GetTaskID returns the sequence ID of the delete execution task
func (a *DeleteHistoryEventTask) GetTaskID() int64 {
return a.TaskID
}
// SetTaskID sets the sequence ID of the delete execution task
func (a *DeleteHistoryEventTask) SetTaskID(id int64) {
a.TaskID = id
}
// GetVisibilityTime get the visibility timestamp
func (a *DeleteHistoryEventTask) GetVisibilityTime() time.Time {
return a.VisibilityTimestamp
}
// SetVisibilityTime set the visibility timestamp
func (a *DeleteHistoryEventTask) SetVisibilityTime(timestamp time.Time) {
a.VisibilityTimestamp = timestamp
}
// GetType returns the type of the timer task
func (d *WorkflowTaskTimeoutTask) GetType() enumsspb.TaskType {
return enumsspb.TASK_TYPE_WORKFLOW_TASK_TIMEOUT
}
// GetVersion returns the version of the timer task
func (d *WorkflowTaskTimeoutTask) GetVersion() int64 {
return d.Version
}
// SetVersion returns the version of the timer task
func (d *WorkflowTaskTimeoutTask) SetVersion(version int64) {
d.Version = version
}
// GetTaskID returns the sequence ID.
func (d *WorkflowTaskTimeoutTask) GetTaskID() int64 {
return d.TaskID
}
// SetTaskID sets the sequence ID.
func (d *WorkflowTaskTimeoutTask) SetTaskID(id int64) {
d.TaskID = id
}
// GetVisibilityTime gets the visibility time stamp
func (d *WorkflowTaskTimeoutTask) GetVisibilityTime() time.Time {
return d.VisibilityTimestamp
}
// SetVisibilityTime gets the visibility time stamp
func (d *WorkflowTaskTimeoutTask) SetVisibilityTime(t time.Time) {
d.VisibilityTimestamp = t
}
// GetType returns the type of the timer task
func (a *ActivityTimeoutTask) GetType() enumsspb.TaskType {
return enumsspb.TASK_TYPE_ACTIVITY_TIMEOUT
}
// GetVersion returns the version of the timer task
func (a *ActivityTimeoutTask) GetVersion() int64 {
return a.Version
}
// SetVersion returns the version of the timer task
func (a *ActivityTimeoutTask) SetVersion(version int64) {
a.Version = version
}
// GetTaskID returns the sequence ID.
func (a *ActivityTimeoutTask) GetTaskID() int64 {
return a.TaskID
}
// SetTaskID sets the sequence ID.
func (a *ActivityTimeoutTask) SetTaskID(id int64) {
a.TaskID = id
}
// GetVisibilityTime gets the visibility time stamp
func (a *ActivityTimeoutTask) GetVisibilityTime() time.Time {
return a.VisibilityTimestamp
}
// SetVisibilityTime gets the visibility time stamp
func (a *ActivityTimeoutTask) SetVisibilityTime(t time.Time) {
a.VisibilityTimestamp = t
}
// GetType returns the type of the timer task
func (u *UserTimerTask) GetType() enumsspb.TaskType {
return enumsspb.TASK_TYPE_USER_TIMER
}
// GetVersion returns the version of the timer task
func (u *UserTimerTask) GetVersion() int64 {
return u.Version
}
// SetVersion returns the version of the timer task
func (u *UserTimerTask) SetVersion(version int64) {
u.Version = version
}
// GetTaskID returns the sequence ID of the timer task.
func (u *UserTimerTask) GetTaskID() int64 {
return u.TaskID
}
// SetTaskID sets the sequence ID of the timer task.
func (u *UserTimerTask) SetTaskID(id int64) {
u.TaskID = id
}
// GetVisibilityTime gets the visibility time stamp
func (u *UserTimerTask) GetVisibilityTime() time.Time {
return u.VisibilityTimestamp
}
// SetVisibilityTime gets the visibility time stamp
func (u *UserTimerTask) SetVisibilityTime(t time.Time) {
u.VisibilityTimestamp = t
}
// GetType returns the type of the retry timer task
func (r *ActivityRetryTimerTask) GetType() enumsspb.TaskType {
return enumsspb.TASK_TYPE_ACTIVITY_RETRY_TIMER
}
// GetVersion returns the version of the retry timer task
func (r *ActivityRetryTimerTask) GetVersion() int64 {
return r.Version
}
// SetVersion returns the version of the retry timer task
func (r *ActivityRetryTimerTask) SetVersion(version int64) {
r.Version = version
}
// GetTaskID returns the sequence ID.
func (r *ActivityRetryTimerTask) GetTaskID() int64 {
return r.TaskID
}
// SetTaskID sets the sequence ID.
func (r *ActivityRetryTimerTask) SetTaskID(id int64) {
r.TaskID = id
}
// GetVisibilityTime gets the visibility time stamp
func (r *ActivityRetryTimerTask) GetVisibilityTime() time.Time {
return r.VisibilityTimestamp
}
// SetVisibilityTime gets the visibility time stamp
func (r *ActivityRetryTimerTask) SetVisibilityTime(t time.Time) {
r.VisibilityTimestamp = t
}
// GetType returns the type of the retry timer task
func (r *WorkflowBackoffTimerTask) GetType() enumsspb.TaskType {
return enumsspb.TASK_TYPE_WORKFLOW_BACKOFF_TIMER
}
// GetVersion returns the version of the retry timer task
func (r *WorkflowBackoffTimerTask) GetVersion() int64 {
return r.Version
}
// SetVersion returns the version of the retry timer task
func (r *WorkflowBackoffTimerTask) SetVersion(version int64) {
r.Version = version
}
// GetTaskID returns the sequence ID.
func (r *WorkflowBackoffTimerTask) GetTaskID() int64 {
return r.TaskID
}
// SetTaskID sets the sequence ID.
func (r *WorkflowBackoffTimerTask) SetTaskID(id int64) {
r.TaskID = id
}
// GetVisibilityTime gets the visibility time stamp
func (r *WorkflowBackoffTimerTask) GetVisibilityTime() time.Time {
return r.VisibilityTimestamp
}
// SetVisibilityTime gets the visibility time stamp
func (r *WorkflowBackoffTimerTask) SetVisibilityTime(t time.Time) {
r.VisibilityTimestamp = t
}
// GetType returns the type of the timeout task.
func (u *WorkflowTimeoutTask) GetType() enumsspb.TaskType {
return enumsspb.TASK_TYPE_WORKFLOW_RUN_TIMEOUT
}
// GetVersion returns the version of the timeout task
func (u *WorkflowTimeoutTask) GetVersion() int64 {
return u.Version
}
// SetVersion returns the version of the timeout task
func (u *WorkflowTimeoutTask) SetVersion(version int64) {
u.Version = version
}
// GetTaskID returns the sequence ID of the cancel transfer task.
func (u *WorkflowTimeoutTask) GetTaskID() int64 {
return u.TaskID
}
// SetTaskID sets the sequence ID of the cancel transfer task.
func (u *WorkflowTimeoutTask) SetTaskID(id int64) {
u.TaskID = id
}
// GetVisibilityTime gets the visibility time stamp
func (u *WorkflowTimeoutTask) GetVisibilityTime() time.Time {
return u.VisibilityTimestamp
}
// SetVisibilityTime gets the visibility time stamp
func (u *WorkflowTimeoutTask) SetVisibilityTime(t time.Time) {
u.VisibilityTimestamp = t
}
// GetType returns the type of the cancel transfer task
func (u *CancelExecutionTask) GetType() enumsspb.TaskType {
return enumsspb.TASK_TYPE_TRANSFER_CANCEL_EXECUTION
}
// GetVersion returns the version of the cancel transfer task
func (u *CancelExecutionTask) GetVersion() int64 {
return u.Version
}
// SetVersion returns the version of the cancel transfer task
func (u *CancelExecutionTask) SetVersion(version int64) {
u.Version = version
}
// GetTaskID returns the sequence ID of the cancel transfer task.
func (u *CancelExecutionTask) GetTaskID() int64 {
return u.TaskID
}
// SetTaskID sets the sequence ID of the cancel transfer task.
func (u *CancelExecutionTask) SetTaskID(id int64) {
u.TaskID = id
}
// GetVisibilityTime get the visibility timestamp
func (u *CancelExecutionTask) GetVisibilityTime() time.Time {
return u.VisibilityTimestamp
}
// SetVisibilityTime set the visibility timestamp
func (u *CancelExecutionTask) SetVisibilityTime(timestamp time.Time) {
u.VisibilityTimestamp = timestamp
}
// GetType returns the type of the signal transfer task
func (u *SignalExecutionTask) GetType() enumsspb.TaskType {
return enumsspb.TASK_TYPE_TRANSFER_SIGNAL_EXECUTION
}
// GetVersion returns the version of the signal transfer task
func (u *SignalExecutionTask) GetVersion() int64 {
return u.Version
}
// SetVersion returns the version of the signal transfer task
func (u *SignalExecutionTask) SetVersion(version int64) {
u.Version = version
}
// GetTaskID returns the sequence ID of the signal transfer task.
func (u *SignalExecutionTask) GetTaskID() int64 {
return u.TaskID
}
// SetTaskID sets the sequence ID of the signal transfer task.
func (u *SignalExecutionTask) SetTaskID(id int64) {
u.TaskID = id
}
// GetVisibilityTime get the visibility timestamp
func (u *SignalExecutionTask) GetVisibilityTime() time.Time {
return u.VisibilityTimestamp
}
// SetVisibilityTime set the visibility timestamp
func (u *SignalExecutionTask) SetVisibilityTime(timestamp time.Time) {
u.VisibilityTimestamp = timestamp
}
// GetType returns the type of the start child transfer task
func (u *StartChildExecutionTask) GetType() enumsspb.TaskType {
return enumsspb.TASK_TYPE_TRANSFER_START_CHILD_EXECUTION
}
// GetVersion returns the version of the start child transfer task
func (u *StartChildExecutionTask) GetVersion() int64 {
return u.Version
}
// SetVersion returns the version of the start child transfer task
func (u *StartChildExecutionTask) SetVersion(version int64) {
u.Version = version
}
// GetTaskID returns the sequence ID of the start child transfer task
func (u *StartChildExecutionTask) GetTaskID() int64 {
return u.TaskID
}
// SetTaskID sets the sequence ID of the start child transfer task
func (u *StartChildExecutionTask) SetTaskID(id int64) {
u.TaskID = id
}
// GetVisibilityTime get the visibility timestamp
func (u *StartChildExecutionTask) GetVisibilityTime() time.Time {
return u.VisibilityTimestamp
}
// SetVisibilityTime set the visibility timestamp
func (u *StartChildExecutionTask) SetVisibilityTime(timestamp time.Time) {
u.VisibilityTimestamp = timestamp
}
// GetType returns the type of the history replication task
func (a *HistoryReplicationTask) GetType() enumsspb.TaskType {
return enumsspb.TASK_TYPE_REPLICATION_HISTORY
}
// GetVersion returns the version of the history replication task
func (a *HistoryReplicationTask) GetVersion() int64 {
return a.Version
}
// SetVersion returns the version of the history replication task
func (a *HistoryReplicationTask) SetVersion(version int64) {
a.Version = version
}
// GetTaskID returns the sequence ID of the history replication task
func (a *HistoryReplicationTask) GetTaskID() int64 {
return a.TaskID
}
// SetTaskID sets the sequence ID of the history replication task
func (a *HistoryReplicationTask) SetTaskID(id int64) {
a.TaskID = id
}
// GetVisibilityTime get the visibility timestamp
func (a *HistoryReplicationTask) GetVisibilityTime() time.Time {
return a.VisibilityTimestamp
}
// SetVisibilityTime set the visibility timestamp
func (a *HistoryReplicationTask) SetVisibilityTime(timestamp time.Time) {
a.VisibilityTimestamp = timestamp
}
// GetType returns the type of the upsert search attributes transfer task
func (t *StartExecutionVisibilityTask) GetType() enumsspb.TaskType {
return enumsspb.TASK_TYPE_VISIBILITY_START_EXECUTION
}
// GetVersion returns the version of the upsert search attributes transfer task
func (t *StartExecutionVisibilityTask) GetVersion() int64 {
return t.Version
}
// SetVersion returns the version of the upsert search attributes transfer task
func (t *StartExecutionVisibilityTask) SetVersion(version int64) {
t.Version = version
}
// GetTaskID returns the sequence ID of the signal transfer task.
func (t *StartExecutionVisibilityTask) GetTaskID() int64 {
return t.TaskID
}
// SetTaskID sets the sequence ID of the signal transfer task.
func (t *StartExecutionVisibilityTask) SetTaskID(id int64) {
t.TaskID = id
}
// GetVisibilityTime get the visibility timestamp
func (t *StartExecutionVisibilityTask) GetVisibilityTime() time.Time {
return t.VisibilityTimestamp
}
// SetVisibilityTime set the visibility timestamp
func (t *StartExecutionVisibilityTask) SetVisibilityTime(timestamp time.Time) {
t.VisibilityTimestamp = timestamp
}
// GetType returns the type of the upsert search attributes transfer task
func (t *UpsertExecutionVisibilityTask) GetType() enumsspb.TaskType {
return enumsspb.TASK_TYPE_VISIBILITY_UPSERT_EXECUTION
}
// GetVersion returns the version of the upsert search attributes transfer task
func (t *UpsertExecutionVisibilityTask) GetVersion() int64 {
return t.Version
}
// SetVersion returns the version of the upsert search attributes transfer task
func (t *UpsertExecutionVisibilityTask) SetVersion(version int64) {
t.Version = version
}
// GetTaskID returns the sequence ID of the signal transfer task.
func (t *UpsertExecutionVisibilityTask) GetTaskID() int64 {
return t.TaskID
}
// SetTaskID sets the sequence ID of the signal transfer task.
func (t *UpsertExecutionVisibilityTask) SetTaskID(id int64) {
t.TaskID = id
}
// GetVisibilityTime get the visibility timestamp
func (t *UpsertExecutionVisibilityTask) GetVisibilityTime() time.Time {
return t.VisibilityTimestamp
}
// SetVisibilityTime set the visibility timestamp
func (t *UpsertExecutionVisibilityTask) SetVisibilityTime(timestamp time.Time) {
t.VisibilityTimestamp = timestamp
}
// GetType returns the type of the upsert search attributes transfer task
func (t *CloseExecutionVisibilityTask) GetType() enumsspb.TaskType {
return enumsspb.TASK_TYPE_VISIBILITY_CLOSE_EXECUTION
}
// GetVersion returns the version of the upsert search attributes transfer task
func (t *CloseExecutionVisibilityTask) GetVersion() int64 {
return t.Version
}
// SetVersion returns the version of the upsert search attributes transfer task
func (t *CloseExecutionVisibilityTask) SetVersion(version int64) {
t.Version = version
}
// GetTaskID returns the sequence ID of the signal transfer task.
func (t *CloseExecutionVisibilityTask) GetTaskID() int64 {
return t.TaskID
}
// SetTaskID sets the sequence ID of the signal transfer task.
func (t *CloseExecutionVisibilityTask) SetTaskID(id int64) {
t.TaskID = id
}
// GetVisibilityTime get the visibility timestamp
func (t *CloseExecutionVisibilityTask) GetVisibilityTime() time.Time {
return t.VisibilityTimestamp
}
// SetVisibilityTime set the visibility timestamp
func (t *CloseExecutionVisibilityTask) SetVisibilityTime(timestamp time.Time) {
t.VisibilityTimestamp = timestamp
}
// GetType returns the type of the upsert search attributes transfer task
func (t *DeleteExecutionVisibilityTask) GetType() enumsspb.TaskType {
return enumsspb.TASK_TYPE_VISIBILITY_DELETE_EXECUTION
}
// GetVersion returns the version of the upsert search attributes transfer task
func (t *DeleteExecutionVisibilityTask) GetVersion() int64 {
return t.Version
}
// SetVersion returns the version of the upsert search attributes transfer task
func (t *DeleteExecutionVisibilityTask) SetVersion(version int64) {
t.Version = version
}
// GetTaskID returns the sequence ID of the signal transfer task.
func (t *DeleteExecutionVisibilityTask) GetTaskID() int64 {
return t.TaskID
}
// SetTaskID sets the sequence ID of the signal transfer task.
func (t *DeleteExecutionVisibilityTask) SetTaskID(id int64) {
t.TaskID = id
}
// GetVisibilityTime get the visibility timestamp
func (t *DeleteExecutionVisibilityTask) GetVisibilityTime() time.Time {
return t.VisibilityTimestamp
}
// SetVisibilityTime set the visibility timestamp
func (t *DeleteExecutionVisibilityTask) SetVisibilityTime(timestamp time.Time) {
t.VisibilityTimestamp = timestamp
}
// GetType returns the type of the history replication task
func (a *SyncActivityTask) GetType() enumsspb.TaskType {
return enumsspb.TASK_TYPE_REPLICATION_SYNC_ACTIVITY
}
// GetVersion returns the version of the history replication task
func (a *SyncActivityTask) GetVersion() int64 {
return a.Version
}
// SetVersion returns the version of the history replication task
func (a *SyncActivityTask) SetVersion(version int64) {
a.Version = version
}
// GetTaskID returns the sequence ID of the history replication task
func (a *SyncActivityTask) GetTaskID() int64 {
return a.TaskID
}
// SetTaskID sets the sequence ID of the history replication task
func (a *SyncActivityTask) SetTaskID(id int64) {
a.TaskID = id
}
// GetVisibilityTime get the visibility timestamp
func (a *SyncActivityTask) GetVisibilityTime() time.Time {
return a.VisibilityTimestamp
}
// SetVisibilityTime set the visibility timestamp
func (a *SyncActivityTask) SetVisibilityTime(timestamp time.Time) {
a.VisibilityTimestamp = timestamp
}
// UnixMilliseconds returns t as a Unix time, the number of milliseconds elapsed since January 1, 1970 UTC.
// It should be used for all CQL timestamp.
func UnixMilliseconds(t time.Time) int64 {
// Handling zero time separately because UnixNano is undefined for zero times.
if t.IsZero() {
return 0
}
unixNano := t.UnixNano()
if unixNano < 0 {
// Time is before January 1, 1970 UTC
return 0
}
return unixNano / int64(time.Millisecond)
}
// NewHistoryBranchToken return a new branch token
func NewHistoryBranchToken(treeID string) ([]byte, error) {
branchID := primitives.NewUUID().String()
bi := &persistencespb.HistoryBranch{
TreeId: treeID,
BranchId: branchID,
Ancestors: []*persistencespb.HistoryBranchRange{},
}
datablob, err := serialization.HistoryBranchToBlob(bi)
if err != nil {
return nil, err
}
token := datablob.Data
return token, nil
}
// NewHistoryBranchTokenByBranchID return a new branch token with treeID/branchID
func NewHistoryBranchTokenByBranchID(treeID, branchID string) ([]byte, error) {
bi := &persistencespb.HistoryBranch{
TreeId: treeID,
BranchId: branchID,
Ancestors: []*persistencespb.HistoryBranchRange{},
}
datablob, err := serialization.HistoryBranchToBlob(bi)
if err != nil {
return nil, err
}
token := datablob.Data
return token, nil
}
// BuildHistoryGarbageCleanupInfo combine the workflow identity information into a string
func BuildHistoryGarbageCleanupInfo(namespaceID, workflowID, runID string) string {
return fmt.Sprintf("%v:%v:%v", namespaceID, workflowID, runID)
}
// SplitHistoryGarbageCleanupInfo returns workflow identity information
func SplitHistoryGarbageCleanupInfo(info string) (namespaceID, workflowID, runID string, err error) {
ss := strings.Split(info, ":")
// workflowID can contain ":" so len(ss) can be greater than 3
if len(ss) < numItemsInGarbageInfo {
return "", "", "", fmt.Errorf("not able to split info for %s", info)
}
namespaceID = ss[0]
runID = ss[len(ss)-1]
workflowEnd := len(info) - len(runID) - 1
workflowID = info[len(namespaceID)+1 : workflowEnd]
return
}
// NewGetReplicationTasksFromDLQRequest creates a new GetReplicationTasksFromDLQRequest
func NewGetReplicationTasksFromDLQRequest(
shardID int32,
sourceClusterName string,
readLevel int64,
maxReadLevel int64,
batchSize int,
nextPageToken []byte,
) *GetReplicationTasksFromDLQRequest {
return &GetReplicationTasksFromDLQRequest{
ShardID: shardID,
SourceClusterName: sourceClusterName,
GetReplicationTasksRequest: GetReplicationTasksRequest{
MinTaskID: readLevel,
MaxTaskID: maxReadLevel,
BatchSize: batchSize,
NextPageToken: nextPageToken,
},
}
}
type ServiceType int
const (
All ServiceType = iota
Frontend
History
Matching
Worker
)
| 1 | 12,637 | if this is a "Status" type then the variable name "Stats" (which implies "statistics") seems wrong (or at least confusing to me) | temporalio-temporal | go |
@@ -35,6 +35,10 @@ const (
exportKeystorePath = "/var/lib/sonm/worker_keystore"
)
+var (
+ leakedInsecureKey = common.HexToAddress("0x8125721c2413d99a33e351e1f6bb4e56b6b633fd")
+)
+
type options struct {
version string
cfg *Config | 1 | package worker
import (
"context"
"crypto/ecdsa"
"crypto/rand"
"crypto/rsa"
"crypto/x509"
"encoding/pem"
"errors"
"fmt"
"os"
"github.com/ethereum/go-ethereum/accounts/keystore"
"github.com/ethereum/go-ethereum/common"
"github.com/ethereum/go-ethereum/crypto"
"github.com/noxiouz/zapctx/ctxlog"
"github.com/sonm-io/core/blockchain"
"github.com/sonm-io/core/insonmnia/benchmarks"
"github.com/sonm-io/core/insonmnia/matcher"
"github.com/sonm-io/core/insonmnia/state"
"github.com/sonm-io/core/insonmnia/worker/plugin"
"github.com/sonm-io/core/proto"
"github.com/sonm-io/core/util"
"github.com/sonm-io/core/util/multierror"
"github.com/sonm-io/core/util/netutil"
"github.com/sonm-io/core/util/xgrpc"
"golang.org/x/crypto/ssh"
"google.golang.org/grpc/credentials"
)
const (
sshPrivateKeyKey = "ssh_private_key"
ethereumPrivateKeyKey = "ethereum_private_key"
exportKeystorePath = "/var/lib/sonm/worker_keystore"
)
type options struct {
version string
cfg *Config
ctx context.Context
ovs Overseer
ssh SSH
key *ecdsa.PrivateKey
publicIPs []string
benchmarks benchmarks.BenchList
storage *state.Storage
eth blockchain.API
dwh sonm.DWHClient
creds credentials.TransportCredentials
certRotator util.HitlessCertRotator
plugins *plugin.Repository
whitelist Whitelist
matcher matcher.Matcher
}
func (m *options) validate() error {
err := multierror.NewMultiError()
if m.cfg == nil {
err = multierror.Append(err, errors.New("config is mandatory for Worker options"))
}
if m.storage == nil {
err = multierror.Append(err, errors.New("state storage is mandatory"))
}
return err.ErrorOrNil()
}
func (m *options) SetupDefaults() error {
if err := m.validate(); err != nil {
return err
}
if m.ctx == nil {
m.ctx = context.Background()
}
if err := m.setupKey(); err != nil {
return err
}
if err := m.setupBlockchainAPI(); err != nil {
return err
}
if err := m.setupPlugins(); err != nil {
return err
}
if err := m.setupCreds(); err != nil {
return err
}
if err := m.setupDWH(); err != nil {
return err
}
if err := m.setupWhitelist(); err != nil {
return err
}
if err := m.setupMatcher(); err != nil {
return err
}
if err := m.setupBenchmarks(); err != nil {
return err
}
if err := m.setupNetworkOptions(); err != nil {
return err
}
if err := m.setupOverseer(); err != nil {
return err
}
return nil
}
func (m *options) setupKey() error {
if m.key == nil {
var data []byte
loaded, err := m.storage.Load(ethereumPrivateKeyKey, &data)
if err != nil {
return err
}
if !loaded {
key, err := crypto.GenerateKey()
if err != nil {
return err
}
if err := m.storage.Save(ethereumPrivateKeyKey, crypto.FromECDSA(key)); err != nil {
return err
}
m.key = key
} else {
key, err := crypto.ToECDSA(data)
if err != nil {
return err
}
m.key = key
}
}
if err := m.exportKey(); err != nil {
return err
}
return nil
}
func (m *options) exportKey() error {
if err := os.MkdirAll(exportKeystorePath, 0700); err != nil {
return err
}
ks := keystore.NewKeyStore(exportKeystorePath, keystore.LightScryptN, keystore.LightScryptP)
if !ks.HasAddress(crypto.PubkeyToAddress(m.key.PublicKey)) {
_, err := ks.ImportECDSA(m.key, "sonm")
return err
}
return nil
}
func (m *options) setupBlockchainAPI() error {
if m.eth == nil {
eth, err := blockchain.NewAPI(m.ctx, blockchain.WithConfig(m.cfg.Blockchain), blockchain.WithNiceMarket())
if err != nil {
return err
}
m.eth = eth
}
return nil
}
func (m *options) setupPlugins() error {
plugins, err := plugin.NewRepository(m.ctx, m.cfg.Plugins)
if err != nil {
return err
}
m.plugins = plugins
return nil
}
func (m *options) setupCreds() error {
if m.creds == nil {
if m.certRotator != nil {
return errors.New("have certificate rotator in options, but do not have credentials")
}
certRotator, TLSConfig, err := util.NewHitlessCertRotator(m.ctx, m.key)
if err != nil {
return err
}
m.certRotator = certRotator
m.creds = util.NewTLS(TLSConfig)
}
return nil
}
func (m *options) setupDWH() error {
if m.dwh == nil {
cc, err := xgrpc.NewClient(m.ctx, m.cfg.DWH.Endpoint, m.creds)
if err != nil {
return err
}
m.dwh = sonm.NewDWHClient(cc)
}
return nil
}
func (m *options) setupWhitelist() error {
if m.whitelist == nil {
cfg := m.cfg.Whitelist
if len(cfg.PrivilegedAddresses) == 0 {
cfg.PrivilegedAddresses = append(cfg.PrivilegedAddresses, crypto.PubkeyToAddress(m.key.PublicKey).Hex())
cfg.PrivilegedAddresses = append(cfg.PrivilegedAddresses, m.cfg.Master.Hex())
if m.cfg.Admin != nil {
cfg.PrivilegedAddresses = append(cfg.PrivilegedAddresses, m.cfg.Admin.Hex())
}
}
m.whitelist = NewWhitelist(m.ctx, &cfg)
}
return nil
}
func (m *options) setupMatcher() error {
if m.matcher == nil {
if m.cfg.Matcher != nil {
matcher, err := matcher.NewMatcher(&matcher.Config{
Key: m.key,
DWH: m.dwh,
Eth: m.eth,
PollDelay: m.cfg.Matcher.PollDelay,
QueryLimit: m.cfg.Matcher.QueryLimit,
Log: ctxlog.S(m.ctx),
})
if err != nil {
return fmt.Errorf("cannot create matcher: %v", err)
}
m.matcher = matcher
} else {
m.matcher = matcher.NewDisabledMatcher()
}
}
return nil
}
func (m *options) setupBenchmarks() error {
if m.benchmarks == nil {
benchList, err := benchmarks.NewBenchmarksList(m.ctx, m.cfg.Benchmarks)
if err != nil {
return err
}
m.benchmarks = benchList
}
return nil
}
func (m *options) setupNetworkOptions() error {
// Use public IPs from config (if provided).
publicIPs := m.cfg.PublicIPs
if len(publicIPs) > 0 {
m.publicIPs = netutil.SortedIPs(publicIPs)
return nil
}
// Scan interfaces if there's no config and no NAT.
rawPublicIPs, err := netutil.GetPublicIPs()
if err != nil {
return err
}
for _, ip := range rawPublicIPs {
publicIPs = append(publicIPs, ip.String())
}
m.publicIPs = netutil.SortedIPs(publicIPs)
return nil
}
func encodeRSAPrivateKeyToPEM(privateKey *rsa.PrivateKey) []byte {
bytes := x509.MarshalPKCS1PrivateKey(privateKey)
block := pem.Block{
Type: "RSA PRIVATE KEY",
Headers: nil,
Bytes: bytes,
}
return pem.EncodeToMemory(&block)
}
func (m *options) setupSSH(view OverseerView) error {
if m.cfg.SSH != nil {
signer, err := m.loadOrGenerateSSHSigner()
if err != nil {
return err
}
authorizedKeys := []common.Address{
crypto.PubkeyToAddress(m.key.PublicKey),
m.cfg.Master,
}
if m.cfg.Admin != nil {
authorizedKeys = append(authorizedKeys, *m.cfg.Admin)
}
ssh, err := NewSSHServer(*m.cfg.SSH, signer, m.creds, authorizedKeys, view, ctxlog.S(m.ctx))
if err != nil {
return err
}
m.ssh = ssh
return nil
}
if m.ssh == nil {
m.ssh = nilSSH{}
}
return nil
}
func (m *options) loadOrGenerateSSHSigner() (ssh.Signer, error) {
var privateKeyData []byte
ok, err := m.storage.Load(sshPrivateKeyKey, &privateKeyData)
if err != nil {
return nil, err
}
if ok {
return ssh.ParsePrivateKey(privateKeyData)
}
privateKey, err := rsa.GenerateKey(rand.Reader, 2048)
if err != nil {
return nil, err
}
if err := m.storage.Save(sshPrivateKeyKey, encodeRSAPrivateKeyToPEM(privateKey)); err != nil {
return nil, err
}
return ssh.NewSignerFromSigner(privateKey)
}
func (m *options) setupOverseer() error {
if m.ovs == nil {
ovs, err := NewOverseer(m.ctx, m.plugins)
if err != nil {
return err
}
m.ovs = ovs
}
return nil
}
type Option func(*options)
func WithConfig(cfg *Config) Option {
return func(opts *options) {
opts.cfg = cfg
}
}
func WithContext(ctx context.Context) Option {
return func(opts *options) {
opts.ctx = ctx
}
}
func WithOverseer(ovs Overseer) Option {
return func(opts *options) {
opts.ovs = ovs
}
}
func WithSSH(ssh SSH) Option {
return func(opts *options) {
opts.ssh = ssh
}
}
func WithKey(key *ecdsa.PrivateKey) Option {
return func(opts *options) {
opts.key = key
}
}
func WithBenchmarkList(list benchmarks.BenchList) Option {
return func(opts *options) {
opts.benchmarks = list
}
}
func WithStateStorage(s *state.Storage) Option {
return func(o *options) {
o.storage = s
}
}
func WithETH(e blockchain.API) Option {
return func(o *options) {
o.eth = e
}
}
func WithDWH(d sonm.DWHClient) Option {
return func(o *options) {
o.dwh = d
}
}
func WithCreds(creds credentials.TransportCredentials) Option {
return func(o *options) {
o.creds = creds
}
}
func WithVersion(v string) Option {
return func(o *options) {
o.version = v
}
}
func WithCertRotator(certRotator util.HitlessCertRotator) Option {
return func(o *options) {
o.certRotator = certRotator
}
}
func WithPlugins(plugins *plugin.Repository) Option {
return func(o *options) {
o.plugins = plugins
}
}
func WithWhitelist(whitelist Whitelist) Option {
return func(o *options) {
o.whitelist = whitelist
}
}
func WithMatcher(matcher matcher.Matcher) Option {
return func(o *options) {
o.matcher = matcher
}
}
| 1 | 7,858 | maybe will be better to put it into the default worker's config rather than hardcoding the address? | sonm-io-core | go |
@@ -2737,14 +2737,11 @@ describe('Find', function() {
var db = client.db(configuration.db);
var collection = db.collection('shouldNotMutateUserOptions');
var options = { raw: 'TEST' };
- collection.find({}, options, function(error) {
- test.equal(null, error);
- test.equal(undefined, options.skip);
- test.equal(undefined, options.limit);
- test.equal('TEST', options.raw);
- client.close();
- done();
- });
+ expect(() => collection.find({}, options)).to.throw(
+ 'raw should be of type boolean, but is of type string.'
+ );
+ client.close();
+ done();
});
}
}); | 1 | 'use strict';
const test = require('./shared').assert;
const setupDatabase = require('./shared').setupDatabase;
const expect = require('chai').expect;
const Buffer = require('safe-buffer').Buffer;
describe('Find', function() {
before(function() {
return setupDatabase(this.configuration);
});
/**
* Test a simple find
* @ignore
*/
it('shouldCorrectlyPerformSimpleFind', {
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 });
client.connect(function(err, client) {
var db = client.db(configuration.db);
db.collection('test_find_simple', function(err, collection) {
var doc1 = null;
// Insert some test documents
collection.insert([{ a: 2 }, { b: 3 }], configuration.writeConcernMax(), function(
err,
r
) {
doc1 = r.ops[0];
// Ensure correct insertion testing via the cursor and the count function
collection.find().toArray(function(err, documents) {
test.equal(2, documents.length);
collection.count(function(err, count) {
test.equal(2, count);
// Fetch values by selection
collection.find({ a: doc1.a }).toArray(function(err, documents) {
test.equal(1, documents.length);
test.equal(doc1.a, documents[0].a);
// Let's close the db
client.close();
done();
});
});
});
});
});
});
}
});
/**
* Test a simple find chained
* @ignore
*/
it('shouldCorrectlyPerformSimpleChainedFind', {
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 });
client.connect(function(err, client) {
var db = client.db(configuration.db);
db.createCollection('test_find_simple_chained', function(err) {
test.equal(null, err);
db.collection('test_find_simple_chained', function(err, collection) {
var doc1 = null;
// Insert some test documents
collection.insert([{ a: 2 }, { b: 3 }], configuration.writeConcernMax(), function(
err,
r
) {
doc1 = r.ops[0];
// Ensure correct insertion testing via the cursor and the count function
collection.find().toArray(function(err, documents) {
test.equal(2, documents.length);
collection.count(function(err, count) {
test.equal(2, count);
// Fetch values by selection
collection.find({ a: doc1.a }).toArray(function(err, documents) {
test.equal(1, documents.length);
test.equal(doc1.a, documents[0].a);
// Let's close the db
client.close();
done();
});
});
});
});
});
});
});
}
});
/**
* Test advanced find
* @ignore
*/
it('shouldCorrectlyPerformAdvancedFinds', {
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 });
client.connect(function(err, client) {
var db = client.db(configuration.db);
var collection = db.collection('test_find_advanced');
// Insert some test documents
collection.insert([{ a: 1 }, { a: 2 }, { b: 3 }], configuration.writeConcernMax(), function(
err,
r
) {
var doc1 = r.ops[0],
doc2 = r.ops[1];
// Locate by less than
collection.find({ a: { $lt: 10 } }).toArray(function(err, documents) {
test.equal(2, documents.length);
// Check that the correct documents are returned
var results = [];
// Check that we have all the results we want
documents.forEach(function(doc) {
if (doc.a === 1 || doc.a === 2) results.push(1);
});
test.equal(2, results.length);
// Locate by greater than
collection.find({ a: { $gt: 1 } }).toArray(function(err, documents) {
test.equal(1, documents.length);
test.equal(2, documents[0].a);
// Locate by less than or equal to
collection.find({ a: { $lte: 1 } }).toArray(function(err, documents) {
test.equal(1, documents.length);
test.equal(1, documents[0].a);
// Locate by greater than or equal to
collection.find({ a: { $gte: 1 } }).toArray(function(err, documents) {
test.equal(2, documents.length);
// Check that the correct documents are returned
var results = [];
// Check that we have all the results we want
documents.forEach(function(doc) {
if (doc.a === 1 || doc.a === 2) results.push(1);
});
test.equal(2, results.length);
// Locate by between
collection.find({ a: { $gt: 1, $lt: 3 } }).toArray(function(err, documents) {
test.equal(1, documents.length);
test.equal(2, documents[0].a);
// Locate in clause
collection.find({ a: { $in: [1, 2] } }).toArray(function(err, documents) {
test.equal(2, documents.length);
// Check that the correct documents are returned
var results = [];
// Check that we have all the results we want
documents.forEach(function(doc) {
if (doc.a === 1 || doc.a === 2) results.push(1);
});
test.equal(2, results.length);
// Locate in _id clause
collection
.find({ _id: { $in: [doc1['_id'], doc2['_id']] } })
.toArray(function(err, documents) {
test.equal(2, documents.length);
// Check that the correct documents are returned
var results = [];
// Check that we have all the results we want
documents.forEach(function(doc) {
if (doc.a === 1 || doc.a === 2) results.push(1);
});
test.equal(2, results.length);
// Let's close the db
client.close();
done();
});
});
});
});
});
});
});
});
});
}
});
/**
* Test sorting of results
* @ignore
*/
it('shouldCorrectlyPerformFindWithSort', {
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 });
client.connect(function(err, client) {
var db = client.db(configuration.db);
db.createCollection('test_find_sorting', function(err) {
test.equal(null, err);
db.collection('test_find_sorting', function(err, collection) {
// Insert some test documents
collection.insert(
[{ a: 1, b: 2 }, { a: 2, b: 1 }, { a: 3, b: 2 }, { a: 4, b: 1 }],
configuration.writeConcernMax(),
function(err) {
test.equal(null, err);
// Test sorting (ascending)
collection
.find({ a: { $lt: 10 } }, { sort: [['a', 1]] })
.toArray(function(err, documents) {
test.equal(4, documents.length);
test.equal(1, documents[0].a);
test.equal(2, documents[1].a);
test.equal(3, documents[2].a);
test.equal(4, documents[3].a);
// Test sorting (descending)
collection
.find({ a: { $lt: 10 } }, { sort: [['a', -1]] })
.toArray(function(err, documents) {
test.equal(4, documents.length);
test.equal(4, documents[0].a);
test.equal(3, documents[1].a);
test.equal(2, documents[2].a);
test.equal(1, documents[3].a);
// Test sorting (descending), sort is hash
collection
.find({ a: { $lt: 10 } }, { sort: { a: -1 } })
.toArray(function(err, documents) {
test.equal(4, documents.length);
test.equal(4, documents[0].a);
test.equal(3, documents[1].a);
test.equal(2, documents[2].a);
test.equal(1, documents[3].a);
// Sorting using array of names, assumes ascending order
collection
.find({ a: { $lt: 10 } }, { sort: ['a'] })
.toArray(function(err, documents) {
test.equal(4, documents.length);
test.equal(1, documents[0].a);
test.equal(2, documents[1].a);
test.equal(3, documents[2].a);
test.equal(4, documents[3].a);
// Sorting using single name, assumes ascending order
collection
.find({ a: { $lt: 10 } }, { sort: 'a' })
.toArray(function(err, documents) {
test.equal(4, documents.length);
test.equal(1, documents[0].a);
test.equal(2, documents[1].a);
test.equal(3, documents[2].a);
test.equal(4, documents[3].a);
// Sorting using single name, assumes ascending order, sort is hash
collection
.find({ a: { $lt: 10 } }, { sort: { a: 1 } })
.toArray(function(err, documents) {
test.equal(4, documents.length);
test.equal(1, documents[0].a);
test.equal(2, documents[1].a);
test.equal(3, documents[2].a);
test.equal(4, documents[3].a);
collection
.find({ a: { $lt: 10 } }, { sort: ['b', 'a'] })
.toArray(function(err, documents) {
test.equal(4, documents.length);
test.equal(2, documents[0].a);
test.equal(4, documents[1].a);
test.equal(1, documents[2].a);
test.equal(3, documents[3].a);
// Sorting using empty array, no order guarantee should not blow up
collection
.find({ a: { $lt: 10 } }, { sort: [] })
.toArray(function(err, documents) {
test.equal(4, documents.length);
/* NONACTUAL */
// Sorting using ordered hash
collection
.find({ a: { $lt: 10 } }, { sort: { a: -1 } })
.toArray(function(err, documents) {
// Fail test if not an error
test.equal(4, documents.length);
// Let's close the db
client.close();
done();
});
});
});
});
});
});
});
});
});
}
);
});
});
});
}
});
/**
* Test the limit function of the db
* @ignore
*/
it('shouldCorrectlyPerformFindWithLimit', {
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 });
client.connect(function(err, client) {
var db = client.db(configuration.db);
db.createCollection('test_find_limits', function(err) {
test.equal(null, err);
db.collection('test_find_limits', function(err, collection) {
// Insert some test documents
collection.insert(
[{ a: 1 }, { b: 2 }, { c: 3 }, { d: 4 }],
configuration.writeConcernMax(),
function(err) {
test.equal(null, err);
// Test limits
collection.find({}, { limit: 1 }).toArray(function(err, documents) {
test.equal(1, documents.length);
collection.find({}, { limit: 2 }).toArray(function(err, documents) {
test.equal(2, documents.length);
collection.find({}, { limit: 3 }).toArray(function(err, documents) {
test.equal(3, documents.length);
collection.find({}, { limit: 4 }).toArray(function(err, documents) {
test.equal(4, documents.length);
collection.find({}, {}).toArray(function(err, documents) {
test.equal(4, documents.length);
collection.find({}, { limit: 99 }).toArray(function(err, documents) {
test.equal(4, documents.length);
// Let's close the db
client.close();
done();
});
});
});
});
});
});
}
);
});
});
});
}
});
/**
* Test find by non-quoted values (issue #128)
* @ignore
*/
it('shouldCorrectlyFindWithNonQuotedValues', {
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 });
client.connect(function(err, client) {
var db = client.db(configuration.db);
db.createCollection('test_find_non_quoted_values', function(err) {
test.equal(null, err);
db.collection('test_find_non_quoted_values', function(err, collection) {
// insert test document
collection.insert(
[{ a: 19, b: 'teststring', c: 59920303 }, { a: '19', b: 'teststring', c: 3984929 }],
configuration.writeConcernMax(),
function(err) {
test.equal(null, err);
collection.find({ a: 19 }).toArray(function(err, documents) {
test.equal(1, documents.length);
client.close();
done();
});
}
);
});
});
});
}
});
/**
* Test for querying embedded document using dot-notation (issue #126)
* @ignore
*/
it('shouldCorrectlyFindEmbeddedDocument', {
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 });
client.connect(function(err, client) {
var db = client.db(configuration.db);
db.createCollection('test_find_embedded_document', function(err) {
test.equal(null, err);
db.collection('test_find_embedded_document', function(err, collection) {
// insert test document
collection.insert(
[
{ a: { id: 10, value: 'foo' }, b: 'bar', c: { id: 20, value: 'foobar' } },
{ a: { id: 11, value: 'foo' }, b: 'bar2', c: { id: 20, value: 'foobar' } }
],
configuration.writeConcernMax(),
function(err) {
test.equal(null, err);
// test using integer value
collection.find({ 'a.id': 10 }).toArray(function(err, documents) {
test.equal(1, documents.length);
test.equal('bar', documents[0].b);
// test using string value
collection.find({ 'a.value': 'foo' }).toArray(function(err, documents) {
// should yield 2 documents
test.equal(2, documents.length);
test.equal('bar', documents[0].b);
test.equal('bar2', documents[1].b);
client.close();
done();
});
});
}
);
});
});
});
}
});
/**
* Find no records
* @ignore
*/
it('shouldCorrectlyFindNoRecords', {
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 });
client.connect(function(err, client) {
var db = client.db(configuration.db);
db.createCollection('test_find_one_no_records', function(err) {
test.equal(null, err);
db.collection('test_find_one_no_records', function(err, collection) {
test.equal(null, err);
collection.find({ a: 1 }, {}).toArray(function(err, documents) {
test.equal(0, documents.length);
// Let's close the db
client.close();
done();
});
});
});
});
}
});
/**
* @ignore
*/
it('shouldCorrectlyPerformFindByWhere', {
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
var Code = configuration.require.Code;
var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 });
client.connect(function(err, client) {
var db = client.db(configuration.db);
db.createCollection('test_where', function(err, collection) {
collection.insert(
[{ a: 1 }, { a: 2 }, { a: 3 }],
configuration.writeConcernMax(),
function(err) {
test.equal(null, err);
collection.count(function(err, count) {
test.equal(null, err);
test.equal(3, count);
// Let's test usage of the $where statement
collection.find({ $where: new Code('this.a > 2') }).count(function(err, count) {
test.equal(null, err);
test.equal(1, count);
collection
.find({ $where: new Code('this.a > i', { i: 1 }) })
.count(function(err, count) {
test.equal(null, err);
test.equal(2, count);
// Let's close the db
client.close();
done();
});
});
});
}
);
});
});
}
});
/**
* @ignore
*/
it('shouldCorrectlyPerformFindsWithHintTurnedOn', {
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 });
client.connect(function(err, client) {
var db = client.db(configuration.db);
db.createCollection('test_hint', function(err, collection) {
collection.insert({ a: 1 }, configuration.writeConcernMax(), function(err) {
test.equal(null, err);
db.createIndex(
collection.collectionName,
'a',
configuration.writeConcernMax(),
function(err) {
test.equal(null, err);
collection.find({ a: 1 }, { hint: 'a' }).toArray(function(err) {
test.ok(err != null);
collection.find({ a: 1 }, { hint: ['a'] }).toArray(function(err, items) {
test.equal(1, items.length);
collection.find({ a: 1 }, { hint: { a: 1 } }).toArray(function(err, items) {
test.equal(1, items.length);
// Modify hints
collection.hint = 'a_1';
test.equal('a_1', collection.hint);
collection.find({ a: 1 }).toArray(function(err, items) {
test.equal(1, items.length);
collection.hint = ['a'];
test.equal(1, collection.hint['a']);
collection.find({ a: 1 }).toArray(function(err, items) {
test.equal(1, items.length);
collection.hint = { a: 1 };
test.equal(1, collection.hint['a']);
collection.find({ a: 1 }).toArray(function(err, items) {
test.equal(1, items.length);
collection.hint = null;
test.ok(collection.hint == null);
collection.find({ a: 1 }).toArray(function(err, items) {
test.equal(1, items.length);
// Let's close the db
client.close();
done();
});
});
});
});
});
});
});
}
);
});
});
});
}
});
/**
* @ignore
*/
it('shouldCorrectlyPerformFindByObjectID', {
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
var ObjectID = configuration.require.ObjectID;
var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 });
client.connect(function(err, client) {
var db = client.db(configuration.db);
db.createCollection('test_find_by_oid', function(err, collection) {
collection.save({ hello: 'mike' }, configuration.writeConcernMax(), function(err, r) {
var docs = r.ops[0];
test.ok(
docs._id instanceof ObjectID ||
Object.prototype.toString.call(docs._id) === '[object ObjectID]'
);
collection.findOne({ _id: docs._id }, function(err, doc) {
test.equal('mike', doc.hello);
var id = doc._id.toString();
collection.findOne({ _id: new ObjectID(id) }, function(err, doc) {
test.equal('mike', doc.hello);
// Let's close the db
client.close();
done();
});
});
});
});
});
}
});
/**
* @ignore
*/
it('shouldCorrectlyReturnDocumentWithOriginalStructure', {
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
var ObjectID = configuration.require.ObjectID;
var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 });
client.connect(function(err, client) {
var db = client.db(configuration.db);
db.createCollection('test_find_by_oid_with_subdocs', function(err, collection) {
var c1 = { _id: new ObjectID(), comments: [], title: 'number 1' };
var c2 = { _id: new ObjectID(), comments: [], title: 'number 2' };
var doc = {
numbers: [],
owners: [],
comments: [c1, c2],
_id: new ObjectID()
};
collection.insert(doc, configuration.writeConcernMax(), function(err) {
test.equal(null, err);
collection.findOne({ _id: doc._id }, { w: 1, fields: undefined }, function(err, doc) {
expect(err).to.not.exist;
test.equal(2, doc.comments.length);
test.equal('number 1', doc.comments[0].title);
test.equal('number 2', doc.comments[1].title);
client.close();
done();
});
});
});
});
}
});
/**
* @ignore
*/
it('shouldCorrectlyRetrieveSingleRecord', {
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
var p_client = configuration.newClient(configuration.writeConcernMax(), {
poolSize: 1,
auto_reconnect: false
});
p_client.connect(function(err, client) {
var db = client.db(configuration.db);
db.createCollection('test_should_correctly_retrieve_one_record', function(err, collection) {
collection.insert({ a: 0 }, configuration.writeConcernMax(), function(err) {
test.equal(null, err);
db.collection('test_should_correctly_retrieve_one_record', function(
err,
usercollection
) {
usercollection.findOne({ a: 0 }, function(err) {
test.equal(null, err);
p_client.close();
done();
});
});
});
});
});
}
});
/**
* @ignore
*/
it('shouldCorrectlyHandleError', {
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
var ObjectID = configuration.require.ObjectID;
var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 });
client.connect(function(err, client) {
var db = client.db(configuration.db);
db.createCollection('test_find_one_error_handling', function(err, collection) {
// Try to fetch an object using a totally invalid and wrong hex string... what we're interested in here
// is the error handling of the findOne Method
try {
collection.findOne(
{ _id: ObjectID.createFromHexString('5e9bd59248305adf18ebc15703a1') },
function() {}
);
} catch (err) {
client.close();
done();
}
});
});
}
});
/**
* Test field select with options
* @ignore
*/
it('shouldCorrectlyPerformFindWithOptions', {
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 });
client.connect(function(err, client) {
var db = client.db(configuration.db);
db.createCollection('test_field_select_with_options', function(err) {
test.equal(null, err);
db.collection('test_field_select_with_options', function(err, collection) {
var docCount = 25,
docs = [];
// Insert some test documents
while (docCount--) docs.push({ a: docCount, b: docCount });
collection.insert(docs, configuration.writeConcernMax(), function(err, retDocs) {
docs = retDocs;
collection
.find({}, { limit: 3, sort: [['a', -1]], projection: { a: 1 } })
.toArray(function(err, documents) {
test.equal(3, documents.length);
documents.forEach(function(doc, idx) {
test.equal(undefined, doc.b); // making sure field select works
test.equal(24 - idx, doc.a); // checking limit sort object with field select
});
client.close();
done();
});
});
});
});
});
}
});
/**
* Test findAndModify a document
* @ignore
*/
it('shouldCorrectlyFindAndModifyDocument', {
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 });
client.connect(function(err, client) {
var db = client.db(configuration.db);
db.createCollection('test_find_and_modify_a_document_1', function(err, collection) {
// Test return new document on change
collection.insert({ a: 1, b: 2 }, configuration.writeConcernMax(), function(err) {
test.equal(null, err);
// Let's modify the document in place
collection.findAndModify(
{ a: 1 },
[['a', 1]],
{ $set: { b: 3 } },
{ new: true },
function(err, updated_doc) {
test.equal(1, updated_doc.value.a);
test.equal(3, updated_doc.value.b);
// Test return old document on change
collection.insert({ a: 2, b: 2 }, configuration.writeConcernMax(), function(err) {
test.equal(null, err);
// Let's modify the document in place
collection.findAndModify(
{ a: 2 },
[['a', 1]],
{ $set: { b: 3 } },
configuration.writeConcernMax(),
function(err, result) {
test.equal(2, result.value.a);
test.equal(2, result.value.b);
// Test remove object on change
collection.insert({ a: 3, b: 2 }, configuration.writeConcernMax(), function(
err
) {
test.equal(null, err);
// Let's modify the document in place
collection.findAndModify(
{ a: 3 },
[],
{ $set: { b: 3 } },
{ remove: true },
function(err, updated_doc) {
test.equal(3, updated_doc.value.a);
test.equal(2, updated_doc.value.b);
// Let's upsert!
collection.findAndModify(
{ a: 4 },
[],
{ $set: { b: 3 } },
{ new: true, upsert: true },
function(err, updated_doc) {
test.equal(4, updated_doc.value.a);
test.equal(3, updated_doc.value.b);
// Test selecting a subset of fields
collection.insert(
{ a: 100, b: 101 },
configuration.writeConcernMax(),
function(err, r) {
test.equal(null, err);
collection.findAndModify(
{ a: 100 },
[],
{ $set: { b: 5 } },
{ new: true, fields: { b: 1 } },
function(err, updated_doc) {
test.equal(2, Object.keys(updated_doc.value).length);
test.equal(
r.ops[0]['_id'].toHexString(),
updated_doc.value._id.toHexString()
);
test.equal(5, updated_doc.value.b);
test.equal('undefined', typeof updated_doc.value.a);
client.close();
done();
}
);
}
);
}
);
}
);
});
}
);
});
}
);
});
});
});
}
});
/**
* Test findAndModify a document with fields
* @ignore
*/
it('shouldCorrectlyFindAndModifyDocumentAndReturnSelectedFieldsOnly', {
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 });
client.connect(function(err, client) {
var db = client.db(configuration.db);
db.createCollection('test_find_and_modify_a_document_2', function(err, collection) {
// Test return new document on change
collection.insert({ a: 1, b: 2 }, configuration.writeConcernMax(), function(err) {
test.equal(null, err);
// Let's modify the document in place
collection.findAndModify(
{ a: 1 },
[['a', 1]],
{ $set: { b: 3 } },
{ new: true, fields: { a: 1 } },
function(err, updated_doc) {
test.equal(2, Object.keys(updated_doc.value).length);
test.equal(1, updated_doc.value.a);
client.close();
done();
}
);
});
});
});
}
});
/**
* @ignore
*/
it('ShouldCorrectlyLocatePostAndIncValues', {
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 });
client.connect(function(err, client) {
var db = client.db(configuration.db);
db.createCollection('shouldCorrectlyExecuteFindOneWithAnInSearchTag', function(
err,
collection
) {
// Test return new document on change
collection.insert(
{
title: 'Tobi',
author: 'Brian',
newTitle: 'Woot',
meta: { visitors: 0 }
},
configuration.writeConcernMax(),
function(err, r) {
// Fetch the id
var id = r.ops[0]._id;
collection.update(
{ _id: id },
{ $inc: { 'meta.visitors': 1 } },
configuration.writeConcernMax(),
function(err, r) {
test.equal(1, r.result.n);
test.equal(null, err);
collection.findOne({ _id: id }, function(err, item) {
test.equal(1, item.meta.visitors);
client.close();
done();
});
}
);
}
);
});
});
}
});
/**
* Test findAndModify a document
* @ignore
*/
it('Should Correctly Handle FindAndModify Duplicate Key Error', {
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 });
client.connect(function(err, client) {
var db = client.db(configuration.db);
db.createCollection('FindAndModifyDuplicateKeyError', function(err, collection) {
collection.ensureIndex(['name', 1], { unique: true, w: 1 }, function(err) {
test.equal(null, err);
// Test return new document on change
collection.insert(
[{ name: 'test1' }, { name: 'test2' }],
configuration.writeConcernMax(),
function(err) {
test.equal(null, err);
// Let's modify the document in place
collection.findAndModify(
{ name: 'test1' },
[],
{ $set: { name: 'test2' } },
{},
function(err, updated_doc) {
test.equal(null, updated_doc);
test.ok(err != null);
client.close();
done();
}
);
}
);
});
});
});
}
});
/**
* @ignore
*/
it('Should correctly return null when attempting to modify a non-existing document', {
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 });
client.connect(function(err, client) {
var db = client.db(configuration.db);
db.createCollection('AttemptToFindAndModifyNonExistingDocument', function(err, collection) {
// Let's modify the document in place
collection.findAndModify({ name: 'test1' }, [], { $set: { name: 'test2' } }, {}, function(
err,
updated_doc
) {
test.equal(null, updated_doc.value);
test.ok(err == null || err.errmsg.match('No matching object found'));
client.close();
done();
});
});
});
}
});
/**
* @ignore
*/
it('Should correctly handle chained skip and limit on find with toArray', {
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 });
client.connect(function(err, client) {
var db = client.db(configuration.db);
db.createCollection('skipAndLimitOnFindWithToArray', function(err, collection) {
collection.insert(
[{ a: 1 }, { b: 2 }, { c: 3 }],
configuration.writeConcernMax(),
function(err) {
test.equal(null, err);
collection
.find()
.skip(1)
.limit(-1)
.toArray(function(err, items) {
test.equal(null, err);
test.equal(1, items.length);
test.equal(2, items[0].b);
client.close();
done();
});
}
);
});
});
}
});
/**
* @ignore
*/
it('Should correctly handle chained skip and negative limit on find with toArray', {
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 });
client.connect(function(err, client) {
var db = client.db(configuration.db);
db.createCollection('skipAndNegativeLimitOnFindWithToArray', function(err, collection) {
collection.insert(
[{ a: 1 }, { b: 2 }, { c: 3 }, { d: 4 }, { e: 5 }],
configuration.writeConcernMax(),
function(err) {
test.equal(null, err);
collection
.find()
.skip(1)
.limit(-3)
.toArray(function(err, items) {
test.equal(null, err);
test.equal(3, items.length);
test.equal(2, items[0].b);
test.equal(3, items[1].c);
test.equal(4, items[2].d);
client.close();
done();
});
}
);
});
});
}
});
/**
* @ignore
*/
it('Should correctly pass timeout options to cursor', {
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 });
client.connect(function(err, client) {
var db = client.db(configuration.db);
db.createCollection('timeoutFalse', function(err, collection) {
collection.find({}, { timeout: false }, function(err, cursor) {
test.equal(false, cursor.s.cmd.noCursorTimeout);
collection.find({}, { timeout: true }, function(err, cursor) {
test.equal(true, cursor.s.cmd.noCursorTimeout);
collection.find({}, {}, function(err, cursor) {
test.ok(!cursor.s.cmd.noCursorTimeout);
client.close();
done();
});
});
});
});
});
}
});
/**
* Test findAndModify a document with strict mode enabled
* @ignore
*/
it('shouldCorrectlyFindAndModifyDocumentWithDBStrict', {
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
var p_client = configuration.newClient(configuration.writeConcernMax(), {
poolSize: 1,
auto_reconnect: false
});
p_client.connect(function(err, client) {
var db = client.db(configuration.db);
db.createCollection('shouldCorrectlyFindAndModifyDocumentWithDBStrict', function(
err,
collection
) {
// Test return old document on change
collection.insert({ a: 2, b: 2 }, configuration.writeConcernMax(), function(err) {
test.equal(null, err);
// Let's modify the document in place
collection.findAndModify(
{ a: 2 },
[['a', 1]],
{ $set: { b: 3 } },
{ new: true },
function(err, result) {
test.equal(2, result.value.a);
test.equal(3, result.value.b);
p_client.close();
done();
}
);
});
});
});
}
});
/**
* Test findAndModify a document that fails in first step before safe
* @ignore
*/
it('shouldCorrectlyFindAndModifyDocumentThatFailsInFirstStep', {
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 });
client.connect(function(err, client) {
var db = client.db(configuration.db);
db.createCollection('shouldCorrectlyFindAndModifyDocumentThatFailsInFirstStep', function(
err,
collection
) {
// Set up an index to force duplicate index erro
collection.ensureIndex([['failIndex', 1]], { unique: true, w: 1 }, function(err) {
test.equal(null, err);
// Setup a new document
collection.insert(
{ a: 2, b: 2, failIndex: 2 },
configuration.writeConcernMax(),
function(err) {
test.equal(null, err);
// Let's attempt to upsert with a duplicate key error
collection.findAndModify(
{ c: 2 },
[['a', 1]],
{ a: 10, b: 10, failIndex: 2 },
{ w: 1, upsert: true },
function(err, result) {
test.equal(null, result);
test.ok(err.errmsg.match('duplicate key'));
client.close();
done();
}
);
}
);
});
});
});
}
});
/**
* @ignore
*/
it('Should correctly return new modified document', {
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
var ObjectID = configuration.require.ObjectID;
var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 });
client.connect(function(err, client) {
var db = client.db(configuration.db);
db.createCollection('Should_correctly_return_new_modified_document', function(
err,
collection
) {
var id = new ObjectID();
var doc = { _id: id, a: 1, b: 1, c: { a: 1, b: 1 } };
collection.insert(doc, configuration.writeConcernMax(), function(err) {
test.equal(null, err);
// Find and modify returning the new object
collection.findAndModify(
{ _id: id },
[],
{ $set: { 'c.c': 100 } },
{ new: true },
function(err, item) {
test.equal(doc._id.toString(), item.value._id.toString());
test.equal(doc.a, item.value.a);
test.equal(doc.b, item.value.b);
test.equal(doc.c.a, item.value.c.a);
test.equal(doc.c.b, item.value.c.b);
test.equal(100, item.value.c.c);
client.close();
done();
}
);
});
});
});
}
});
/**
* Should correctly execute findAndModify that is breaking in prod
* @ignore
*/
it('shouldCorrectlyExecuteFindAndModify', {
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
var ObjectID = configuration.require.ObjectID;
var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 });
client.connect(function(err, client) {
var db = client.db(configuration.db);
db.createCollection('shouldCorrectlyExecuteFindAndModify', function(err, collection) {
var self = { _id: new ObjectID() };
var _uuid = 'sddffdss';
collection.findAndModify(
{ _id: self._id, 'plays.uuid': _uuid },
[],
{ $set: { 'plays.$.active': true } },
{ new: true, fields: { plays: 0, results: 0 }, safe: true },
function(err) {
test.equal(null, err);
client.close();
done();
}
);
});
});
}
});
/**
* @ignore
*/
it('Should correctly return record with 64-bit id', {
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
var ObjectID = configuration.require.ObjectID,
Long = configuration.require.Long;
var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 });
client.connect(function(err, client) {
var db = client.db(configuration.db);
db.createCollection('should_correctly_return_record_with_64bit_id', function(
err,
collection
) {
var _lowerId = new ObjectID();
var _higherId = new ObjectID();
var lowerId = new Long.fromString('133118461172916224', 10);
var higherId = new Long.fromString('133118461172916225', 10);
var lowerDoc = { _id: _lowerId, id: lowerId };
var higherDoc = { _id: _higherId, id: higherId };
collection.insert([lowerDoc, higherDoc], configuration.writeConcernMax(), function(err) {
test.equal(null, err);
// Select record with id of 133118461172916225 using $gt directive
collection.find({ id: { $gt: lowerId } }, {}).toArray(function(err, arr) {
test.ok(err == null);
test.equal(
arr.length,
1,
'Selecting record via $gt directive on 64-bit integer should return a record with higher Id'
);
test.equal(
arr[0].id.toString(),
'133118461172916225',
'Returned Id should be equal to 133118461172916225'
);
client.close();
done();
});
});
});
});
}
});
/**
* @ignore
*/
it('Should Correctly find a Document using findOne excluding _id field', {
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
var ObjectID = configuration.require.ObjectID;
var p_client = configuration.newClient(configuration.writeConcernMax(), {
poolSize: 1,
auto_reconnect: false
});
p_client.connect(function(err, client) {
var db = client.db(configuration.db);
db.createCollection(
'Should_Correctly_find_a_Document_using_findOne_excluding__id_field',
function(err, collection) {
var doc = { _id: new ObjectID(), a: 1, c: 2 };
// insert doc
collection.insert(doc, configuration.writeConcernMax(), function(err) {
test.equal(null, err);
// Get one document, excluding the _id field
collection.findOne({ a: 1 }, { fields: { _id: 0 } }, function(err, item) {
test.equal(undefined, item._id);
test.equal(1, item.a);
test.equal(2, item.c);
collection.find({ a: 1 }, { fields: { _id: 0 } }).toArray(function(err, items) {
var item = items[0];
test.equal(undefined, item._id);
test.equal(1, item.a);
test.equal(2, item.c);
p_client.close();
done();
});
});
});
}
);
});
}
});
/**
* @ignore
*/
it('Should correctly execute find queries with selector set to null', {
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
var ObjectID = configuration.require.ObjectID;
var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 });
client.connect(function(err, client) {
var db = client.db(configuration.db);
db.createCollection(
'Should_correctly_execute_find_and_findOne_queries_in_the_same_way',
function(err, collection) {
var doc = { _id: new ObjectID(), a: 1, c: 2, comments: [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] };
// insert doc
collection.insert(doc, configuration.writeConcernMax(), function(err) {
test.equal(null, err);
collection
.find({ _id: doc._id })
.project({ comments: { $slice: -5 } })
.toArray(function(err, docs) {
test.equal(5, docs[0].comments.length);
client.close();
done();
});
});
}
);
});
}
});
/**
* @ignore
*/
it('shouldCorrectlyHandlerErrorForFindAndModifyWhenNoRecordExists', {
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 });
client.connect(function(err, client) {
var db = client.db(configuration.db);
db.createCollection(
'shouldCorrectlyHandlerErrorForFindAndModifyWhenNoRecordExists',
function(err, collection) {
collection.findAndModify({ a: 1 }, [], { $set: { b: 3 } }, { new: true }, function(
err,
updated_doc
) {
test.equal(null, err);
test.equal(null, updated_doc.value);
client.close();
done();
});
}
);
});
}
});
/**
* @ignore
*/
it('shouldCorrectlyExecuteFindAndModifyShouldGenerateCorrectBSON', {
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
var ObjectID = configuration.require.ObjectID;
var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 });
client.connect(function(err, client) {
var db = client.db(configuration.db);
var transaction = {};
transaction.document = {};
transaction.document.type = 'documentType';
transaction.document.id = new ObjectID();
transaction.transactionId = new ObjectID();
transaction.amount = 12.3333;
var transactions = [];
transactions.push(transaction);
// Wrapping object
var wrapingObject = {
funds: {
remaining: 100.5
},
transactions: transactions
};
db.createCollection('shouldCorrectlyExecuteFindAndModify', function(err, collection) {
test.equal(null, err);
collection.insert(wrapingObject, configuration.writeConcernMax(), function(err, r) {
test.equal(null, err);
collection.findOne(
{
_id: r.ops[0]._id,
'funds.remaining': { $gte: 3.0 },
'transactions.id': { $ne: transaction.transactionId }
},
function(err, item) {
test.ok(item != null);
collection.findAndModify(
{
_id: r.ops[0]._id,
'funds.remaining': { $gte: 3.0 },
'transactions.id': { $ne: transaction.transactionId }
},
[],
{ $push: { transactions: transaction } },
{ new: true, safe: true },
function(err) {
test.equal(null, err);
client.close();
done();
}
);
}
);
});
});
});
}
});
/**
* @ignore
*/
it('shouldCorrectlyExecuteMultipleFindsInParallel', {
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
var p_client = configuration.newClient(configuration.writeConcernMax(), {
poolSize: 1,
auto_reconnect: false
});
p_client.connect(function(err, client) {
var db = client.db(configuration.db);
db.createCollection('tasks', function(err, collection) {
var numberOfOperations = 0;
// Test return old document on change
collection.insert({ a: 2, b: 2 }, configuration.writeConcernMax(), function(err) {
test.equal(null, err);
collection
.find(
{
user_id: '4e9fc8d55883d90100000003',
lc_status: { $ne: 'deleted' },
owner_rating: { $exists: false }
},
{ skip: 0, limit: 10, sort: { updated: -1 } }
)
.count(function(err) {
test.equal(null, err);
numberOfOperations = numberOfOperations + 1;
if (numberOfOperations === 2) {
done();
p_client.close();
}
});
collection
.find(
{
user_id: '4e9fc8d55883d90100000003',
lc_status: { $ne: 'deleted' },
owner_rating: { $exists: false }
},
{ skip: 0, limit: 10, sort: { updated: -1 } }
)
.count(function(err) {
test.equal(null, err);
numberOfOperations = numberOfOperations + 1;
if (numberOfOperations === 2) {
done();
p_client.close();
}
});
});
});
});
}
});
/**
* @ignore
*/
it('shouldCorrectlyReturnErrorFromMongodbOnFindAndModifyForcedError', {
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
var ObjectID = configuration.require.ObjectID;
var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 });
client.connect(function(err, client) {
var db = client.db(configuration.db);
db.createCollection(
'shouldCorrectlyReturnErrorFromMongodbOnFindAndModifyForcedError',
function(err, collection) {
var q = { x: 1 };
var set = { y: 2, _id: new ObjectID() };
var opts = { new: true, upsert: true };
// Original doc
var doc = { _id: new ObjectID(), x: 1 };
// Insert original doc
collection.insert(doc, configuration.writeConcernMax(), function(err) {
test.equal(null, err);
collection.findAndModify(q, [], set, opts, function(/* err */) {
client.close();
done();
});
});
}
);
});
}
});
/**
* @ignore
*/
it('shouldCorrectlyExecuteFindAndModifyUnderConcurrentLoad', {
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
var ObjectID = configuration.require.ObjectID;
var p_client = configuration.newClient(configuration.writeConcernMax(), {
poolSize: 1,
auto_reconnect: false
});
var running = true;
p_client.connect(function(err, client) {
var db = client.db(configuration.db);
// Create a collection
db.collection('collection1', function(err, collection) {
// Wait a bit and then execute something that will throw a duplicate error
setTimeout(function() {
var id = new ObjectID();
collection.insert({ _id: id, a: 1 }, configuration.writeConcernMax(), function(err) {
test.equal(null, err);
collection.insert({ _id: id, a: 1 }, configuration.writeConcernMax(), function(err) {
test.ok(err !== null);
running = false;
done();
p_client.close();
});
});
}, 200);
});
db.collection('collection2', function(err, collection) {
// Keep hammering in inserts
var insert = function() {
process.nextTick(function() {
collection.insert({ a: 1 });
if (running) process.nextTick(insert);
});
};
});
});
}
});
/**
* @ignore
*/
it('shouldCorrectlyIterateOverCollection', {
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
var p_client = configuration.newClient(configuration.writeConcernMax(), {
poolSize: 1,
auto_reconnect: false
});
var numberOfSteps = 0;
// Open db connection
p_client.connect(function(err, client) {
var db = client.db(configuration.db);
// Create a collection
var collection = db.collection('shouldCorrectlyIterateOverCollection');
// Insert 1000 documents
var insertF = function(l, callback) {
collection.insert(
{ a: 1, b: 2, c: { d: 3, f: 'sfdsffffffffffffffffffffffffffffff' } },
function() {
l = l - 1;
if (l > 0) return insertF(l, callback);
callback();
}
);
};
insertF(500, function() {
var cursor = collection.find({}, {});
cursor.count(function(err) {
test.equal(null, err);
cursor.each(function(err, obj) {
if (obj == null) {
p_client.close();
test.equal(500, numberOfSteps);
done();
} else {
numberOfSteps = numberOfSteps + 1;
}
});
});
});
});
}
});
/**
* @ignore
*/
it('shouldCorrectlyErrorOutFindAndModifyOnDuplicateRecord', {
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
var p_client = configuration.newClient(configuration.writeConcernMax(), {
poolSize: 1,
auto_reconnect: false
});
p_client.connect(function(err, client) {
var db = client.db(configuration.db);
test.equal(err, null);
db.createCollection('shouldCorrectlyErrorOutFindAndModifyOnDuplicateRecord', function(
err,
collection
) {
test.equal(err, null);
// Test return old document on change
collection.insert(
[{ login: 'user1' }, { login: 'user2' }],
configuration.writeConcernMax(),
function(err, r) {
test.equal(err, null);
var id = r.ops[1]._id;
// Set an index
collection.ensureIndex('login', { unique: true, w: 1 }, function(err) {
test.equal(null, err);
// Attemp to modify document
collection.findAndModify(
{ _id: id },
[],
{ $set: { login: 'user1' } },
{},
function(err) {
test.ok(err !== null);
p_client.close();
done();
}
);
});
}
);
});
});
}
});
/**
* An example of using find with a very large in parameter
*
* @ignore
*/
it('shouldPerformSimpleFindInArray', {
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 });
client.connect(function(err, client) {
var db = client.db(configuration.db);
// Create a collection we want to drop later
db.createCollection('simple_find_in_array', function(err, collection) {
test.equal(null, err);
var docs = [];
for (var i = 0; i < 100; i++) docs.push({ a: i });
// Insert some test documentations
collection.insert(docs, configuration.writeConcernMax(), function(err) {
test.equal(null, err);
// Find all the variables in a specific array
for (var i = 0; i < 100; i++) docs.push(i);
// Fin all in
collection.find({ a: { $in: docs } }).toArray(function(err, items) {
test.equal(null, err);
test.equal(100, items.length);
client.close();
done();
});
});
});
});
}
});
it('shouldReturnInstanceofErrorWithBadFieldSelection', {
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 });
client.connect(function(err, client) {
var db = client.db(configuration.db);
test.equal(null, err);
var col = db.collection('bad_field_selection');
col.insert(
[{ a: 1, b: 1 }, { a: 2, b: 2 }, { a: 3, b: 3 }],
configuration.writeConcernMax(),
function(err) {
test.equal(null, err);
col.find({}, { skip: 1, limit: 1, fields: { _id: 1, b: 0 } }).toArray(function(err) {
test.ok(err instanceof Error);
client.close();
done();
});
}
);
});
}
});
/**
* A simple query using find and fields
*/
it('shouldPerformASimpleLimitSkipFindWithFields', {
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 });
client.connect(function(err, client) {
var db = client.db(configuration.db);
// Create a collection we want to drop later
db.createCollection('simple_find_with_fields', function(err, collection) {
test.equal(null, err);
// Insert a bunch of documents for the testing
collection.insert(
[{ a: 1, b: 1 }, { a: 2, b: 2 }, { a: 3, b: 3 }],
configuration.writeConcernMax(),
function(err) {
test.equal(null, err);
// Perform a simple find and return all the documents
collection
.find({ a: 2 })
.project({ b: 1 })
.toArray(function(err, docs) {
test.equal(null, err);
test.equal(1, docs.length);
test.equal(undefined, docs[0].a);
test.equal(2, docs[0].b);
// Perform a simple find and return all the documents
collection
.find({ a: 2 })
.project({ b: 1 })
.toArray(function(err, docs) {
test.equal(null, err);
test.equal(1, docs.length);
test.equal(undefined, docs[0].a);
test.equal(2, docs[0].b);
client.close();
done();
});
});
}
);
});
});
}
});
/**
* A simple query using find and fields
*/
it('shouldPerformASimpleLimitSkipFindWithFields2', {
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 });
client.connect(function(err, client) {
var db = client.db(configuration.db);
// Create a collection we want to drop later
db.createCollection('simple_find_with_fields_2', function(err, collection) {
test.equal(null, err);
// Insert a bunch of documents for the testing
collection.insert(
[{ a: 1, b: 1 }, { a: 2, b: 2 }, { a: 3, b: 3 }],
configuration.writeConcernMax(),
function(err) {
test.equal(null, err);
// Perform a simple find and return all the documents
collection
.find({ a: 2 })
.project({ b: 1 })
.toArray(function(err, docs) {
test.equal(null, err);
test.equal(1, docs.length);
test.equal(undefined, docs[0].a);
test.equal(2, docs[0].b);
client.close();
done();
});
}
);
});
});
}
});
/**
* A simple query with a different batchSize
*/
it('shouldPerformQueryWithBatchSizeDifferentToStandard', {
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 });
client.connect(function(err, client) {
var db = client.db(configuration.db);
// Create a collection we want to drop later
db.createCollection('shouldPerformQueryWithBatchSizeDifferentToStandard', function(
err,
collection
) {
test.equal(null, err);
var docs = [];
for (var i = 0; i < 1000; i++) {
docs.push({ a: i });
}
// Insert a bunch of documents for the testing
collection.insert(docs, configuration.writeConcernMax(), function(err) {
test.equal(null, err);
// Perform a simple find and return all the documents
collection.find({}, { batchSize: 1000 }).toArray(function(err, docs) {
test.equal(null, err);
test.equal(1000, docs.length);
client.close();
done();
});
});
});
});
}
});
/**
* A simple query with negative limit
*/
it('shouldCorrectlyPerformNegativeLimit', {
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 });
client.connect(function(err, client) {
var db = client.db(configuration.db);
// Create a collection we want to drop later
db.collection('shouldCorrectlyPerformNegativeLimit', function(err, collection) {
var docs = [];
for (var i = 0; i < 1000; i++) {
docs.push({
a: 1,
b:
'helloworld helloworld helloworld helloworld helloworld helloworld helloworld helloworld helloworld helloworld'
});
}
// Insert a bunch of documents
collection.insert(docs, configuration.writeConcernMax(), function(err) {
test.equal(null, err);
// Perform a simple find and return all the documents
collection
.find({})
.limit(-10)
.toArray(function(err, docs) {
test.equal(null, err);
test.equal(10, docs.length);
client.close();
done();
});
});
});
});
}
});
/**
* Should perform an exhaust find query
*/
it('shouldCorrectlyExecuteExhaustQuery', {
metadata: { requires: { topology: ['single', 'replicaset'] } },
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
var Binary = configuration.require.Binary;
var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 });
client.connect(function(err, client) {
var db = client.db(configuration.db);
// Create a collection we want to drop later
db.collection('shouldCorrectlyExecuteExhaustQuery', function(err, collection) {
test.equal(null, err);
var docs1 = [];
for (var i = 0; i < 1000; i++) {
docs1.push({
a: 1,
b:
'helloworld helloworld helloworld helloworld helloworld helloworld helloworld helloworld helloworld helloworld',
c: new Binary(Buffer.alloc(1024))
});
}
// Insert a bunch of documents
collection.insert(docs1, configuration.writeConcernMax(), function(err) {
test.equal(null, err);
for (var i = 0; i < 1000; i++) {
var docs2 = [];
docs2.push({
a: 1,
b:
'helloworld helloworld helloworld helloworld helloworld helloworld helloworld helloworld helloworld helloworld',
c: new Binary(Buffer.alloc(1024))
});
}
collection.insert(docs2, configuration.writeConcernMax(), function(err) {
test.equal(null, err);
// Perform a simple find and return all the documents
collection.find({}, { exhaust: true }).toArray(function(err, docs3) {
test.equal(null, err);
test.equal(docs1.length + docs2.length, docs3.length);
client.close();
done();
});
});
});
});
});
}
});
it('Readpreferences should work fine when using a single server instance', {
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 });
client.connect(function(err, client) {
var db = client.db(configuration.db);
test.equal(null, err);
var docs = [];
for (var i = 0; i < 1; i++) {
docs.push({
a: 1,
b:
'helloworld helloworld helloworld helloworld helloworld helloworld helloworld helloworld helloworld helloworld'
});
}
// Create a collection we want to drop later
db.collection('Readpreferencesshouldworkfine', function(err, collection) {
// Insert a bunch of documents
collection.insert(docs, configuration.writeConcernMax(), function(err) {
test.equal(null, err);
// Perform a simple find and return all the documents
collection.find({}, { exhaust: true }).toArray(function(err, docs2) {
test.equal(null, err);
test.equal(docs.length, docs2.length);
client.close();
done();
});
});
});
});
}
});
it('Each should not hang on iterating over no results', {
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 });
client.connect(function(err, client) {
var db = client.db(configuration.db);
test.equal(null, err);
// Create a collection we want to drop later
db.collection('noresultAvailableForEachToIterate', function(err, collection) {
// Perform a simple find and return all the documents
collection.find({}).each(function(err, item) {
test.equal(null, item);
client.close();
done();
});
});
});
}
});
it('shouldCorrectlyFindDocumentsByRegExp', {
metadata: { requires: { topology: ['single', 'replicaset'] } },
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 });
client.connect(function(err, client) {
var db = client.db(configuration.db);
// Serialized regexes contain extra trailing chars. Sometimes these trailing chars contain / which makes
// the original regex invalid, and leads to segmentation fault.
db.createCollection('test_regex_serialization', function(err, collection) {
collection.insert(
{ keywords: ['test', 'segmentation', 'fault', 'regex', 'serialization', 'native'] },
configuration.writeConcernMax(),
function(err) {
test.equal(null, err);
var count = 20,
run = function(i) {
// search by regex
collection.findOne(
{ keywords: { $all: [/ser/, /test/, /seg/, /fault/, /nat/] } },
function(err, item) {
test.equal(6, item.keywords.length);
if (i === 0) {
client.close();
done();
}
}
);
};
// loop a few times to catch the / in trailing chars case
while (count--) {
run(count);
}
}
);
});
});
}
});
it('shouldCorrectlyDoFindMinMax', {
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 });
client.connect(function(err, client) {
var db = client.db(configuration.db);
// Serialized regexes contain extra trailing chars. Sometimes these trailing chars contain / which makes
// the original regex invalid, and leads to segmentation fault.
db.createCollection('shouldCorrectlyDoFindMinMax', function(err, collection) {
collection.insert(
{ _id: 123, name: 'some name', min: 1, max: 10 },
configuration.writeConcernMax(),
function(err) {
test.equal(null, err);
collection
.find({ _id: { $in: ['some', 'value', 123] } })
.project({ _id: 1, max: 1 })
.toArray(function(err, docs) {
test.equal(null, err);
test.equal(10, docs[0].max);
collection
.find({ _id: { $in: ['some', 'value', 123] } }, { fields: { _id: 1, max: 1 } })
.toArray(function(err, docs) {
test.equal(null, err);
test.equal(10, docs[0].max);
client.close();
done();
});
});
}
);
});
});
}
});
it('Should correctly execute parallelCollectionScan with multiple cursors using each', {
// Add a tag that our runner can trigger on
// in this case we are setting that node needs to be higher than 0.10.X to run
metadata: { requires: { mongodb: '>2.5.5 <=4.1.0', topology: ['single', 'replicaset'] } },
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 });
client.connect(function(err, client) {
var db = client.db(configuration.db);
var docs = [];
// Insert some documents
for (var i = 0; i < 1000; i++) {
docs.push({ a: i });
}
// Get the collection
var collection = db.collection('parallelCollectionScan_2');
// Insert 2000 documents in a batch
collection.insert(docs, function(err) {
test.equal(null, err);
var results = [];
var numCursors = 3;
// Execute parallelCollectionScan command
collection.parallelCollectionScan({ numCursors: numCursors }, function(err, cursors) {
test.equal(null, err);
test.ok(cursors != null);
test.ok(cursors.length > 0);
test.ok(cursors.length <= numCursors);
var left = cursors.length;
for (var i = 0; i < cursors.length; i++) {
cursors[i].each(function(err, item) {
test.equal(err, null);
// Add item to list
if (item) results.push(item);
// Finished each
if (item == null) {
left = left - 1;
// No more cursors let's ensure we got all results
if (left === 0) {
test.equal(docs.length, results.length);
// Ensure all cursors are closed
for (var j = 0; j < cursors.length; j++) {
test.equal(true, cursors[j].isClosed());
}
client.close();
return done();
}
}
});
}
});
});
});
}
});
it('Should correctly execute parallelCollectionScan with multiple cursors using next', {
// Add a tag that our runner can trigger on
// in this case we are setting that node needs to be higher than 0.10.X to run
metadata: { requires: { mongodb: '>2.5.5 <=4.1.0', topology: ['single', 'replicaset'] } },
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 });
client.connect(function(err, client) {
var db = client.db(configuration.db);
var docs = [];
// Insert some documents
for (var i = 0; i < 1000; i++) {
docs.push({ a: i });
}
// Get the collection
var collection = db.collection('parallelCollectionScan_3');
// Insert 1000 documents in a batch
collection.insert(docs, function(err) {
test.equal(null, err);
var results = [];
var numCursors = 3;
// Execute parallelCollectionScan command
collection.parallelCollectionScan({ numCursors: numCursors }, function(err, cursors) {
test.equal(null, err);
test.ok(cursors != null);
test.ok(cursors.length > 0);
test.ok(cursors.length <= numCursors);
var left = cursors.length;
var iterate = _cursor => {
_cursor.toArray().then(_docs => {
results = results.concat(_docs);
left--;
// No more cursors let's ensure we got all results
test.equal(true, _cursor.isClosed());
if (left === 0) {
test.equal(docs.length, results.length);
client.close();
return done();
}
});
};
for (var i = 0; i < cursors.length; i++) {
iterate(cursors[i]);
}
});
});
});
}
});
it('Should correctly execute parallelCollectionScan with single cursor and close', {
// Add a tag that our runner can trigger on
// in this case we are setting that node needs to be higher than 0.10.X to run
metadata: { requires: { mongodb: '>2.5.5 <=4.1.0', topology: ['single', 'replicaset'] } },
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 });
client.connect(function(err, client) {
var db = client.db(configuration.db);
var docs = [];
// Insert some documents
for (var i = 0; i < 1000; i++) {
docs.push({ a: i });
}
// Get the collection
var collection = db.collection('parallelCollectionScan_4');
// Insert 1000 documents in a batch
collection.insert(docs, function(err) {
test.equal(null, err);
var numCursors = 1;
// Execute parallelCollectionScan command
collection.parallelCollectionScan({ numCursors: numCursors }, function(err, cursors) {
test.equal(null, err);
test.ok(cursors != null);
test.ok(cursors.length > 0);
cursors[0].close(function(err, result) {
test.equal(null, err);
test.ok(result != null);
test.equal(true, cursors[0].isClosed());
client.close();
done();
});
});
});
});
}
});
it('Should correctly execute parallelCollectionScan with single cursor streaming', {
// Add a tag that our runner can trigger on
// in this case we are setting that node needs to be higher than 0.10.X to run
metadata: { requires: { mongodb: '>2.5.5 <=4.1.0', topology: ['single', 'replicaset'] } },
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 });
client.connect(function(err, client) {
var db = client.db(configuration.db);
var docs = [];
// Insert some documents
for (var i = 0; i < 1000; i++) {
docs.push({ a: i });
}
// Get the collection
var collection = db.collection('parallelCollectionScan_5');
// Insert 1000 documents in a batch
collection.insert(docs, function(err) {
test.equal(null, err);
var results = [];
var numCursors = 1;
// Execute parallelCollectionScan command
collection.parallelCollectionScan({ numCursors: numCursors }, function(err, cursors) {
test.equal(null, err);
test.ok(cursors != null);
test.ok(cursors.length > 0);
cursors[0].on('data', function(data) {
results.push(data);
});
cursors[0].on('end', function() {
test.equal(docs.length, results.length);
test.equal(true, cursors[0].isClosed());
client.close();
done();
});
});
});
});
}
});
it('Should not use a session when using parallelCollectionScan', {
metadata: {
requires: {
mongodb: '>=3.6.0 <= 4.1.0',
topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger']
}
},
test: function(done) {
const configuration = this.configuration;
const client = configuration.newClient();
client.connect(function(err, client) {
var db = client.db(configuration.db);
var docs = [];
// Insert some documents
for (var i = 0; i < 1000; i++) {
docs.push({ a: i });
}
// Get the collection
var collection = db.collection('parallelCollectionScan_4');
// Insert 1000 documents in a batch
collection.insert(docs, function(err) {
expect(err).to.be.null;
var numCursors = 1;
// Execute parallelCollectionScan command
collection.parallelCollectionScan({ numCursors: numCursors }, function(err, cursors) {
expect(err).to.be.null;
expect(cursors)
.to.be.an('array')
.with.lengthOf(1);
const cursor = cursors[0];
expect(cursor).to.not.have.nested.property('s.session');
expect(cursor)
.to.have.nested.property('s.explicitlyIgnoreSession')
.that.equals(true);
cursor.toArray(err => {
expect(err).to.be.null;
expect(cursor).to.not.have.nested.property('s.session');
cursor.close().then(() => client.close().then(() => done()));
});
});
});
});
}
});
it('Should correctly sort using text search on 2.6 or higher in find', {
// Add a tag that our runner can trigger on
// in this case we are setting that node needs to be higher than 0.10.X to run
metadata: {
requires: {
mongodb: '>2.5.5',
topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger']
}
},
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 });
client.connect(function(err, client) {
var db = client.db(configuration.db);
// Get the collection
var collection = db.collection('textSearchWithSort');
collection.ensureIndex({ s: 'text' }, function(err) {
test.equal(null, err);
collection.insert(
[{ s: 'spam' }, { s: 'spam eggs and spam' }, { s: 'sausage and eggs' }],
function(err) {
test.equal(null, err);
collection
.find(
{ $text: { $search: 'spam' } },
{ fields: { _id: false, s: true, score: { $meta: 'textScore' } } }
)
.sort({ score: { $meta: 'textScore' } })
.toArray(function(err, items) {
test.equal(null, err);
test.equal('spam eggs and spam', items[0].s);
client.close();
done();
});
}
);
});
});
}
});
it('shouldNotMutateUserOptions', {
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 });
client.connect(function(err, client) {
var db = client.db(configuration.db);
var collection = db.collection('shouldNotMutateUserOptions');
var options = { raw: 'TEST' };
collection.find({}, options, function(error) {
test.equal(null, error);
test.equal(undefined, options.skip);
test.equal(undefined, options.limit);
test.equal('TEST', options.raw);
client.close();
done();
});
});
}
});
it(
'Should correctly execute parallelCollectionScan with single cursor emitting raw buffers and close',
{
// Add a tag that our runner can trigger on
// in this case we are setting that node needs to be higher than 0.10.X to run
metadata: { requires: { mongodb: '>2.5.5 <=4.1.0', topology: ['single', 'replicaset'] } },
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 });
client.connect(function(err, client) {
var db = client.db(configuration.db);
var docs = [];
// Insert some documents
for (var i = 0; i < 1000; i++) {
docs.push({ a: i });
}
// Get the collection
var collection = db.collection('parallelCollectionScan_4');
// Insert 1000 documents in a batch
collection.insert(docs, function(err) {
test.equal(null, err);
var numCursors = 1;
// Execute parallelCollectionScan command
collection.parallelCollectionScan({ numCursors: numCursors, raw: true }, function(
err,
cursors
) {
test.equal(null, err);
test.ok(cursors != null);
test.ok(cursors.length > 0);
cursors[0].next(function(err) {
test.equal(null, err);
// We need to close the cursor since it is not exhausted,
// and we need to end the implicit session
cursors[0].close();
client.close();
done();
});
});
});
});
}
}
);
it('Should simulate closed cursor', {
// Add a tag that our runner can trigger on
// in this case we are setting that node needs to be higher than 0.10.X to run
metadata: { requires: { mongodb: '>2.5.5', topology: ['single', 'replicaset'] } },
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 });
client.connect(function(err, client) {
var db = client.db(configuration.db);
var docs = [];
// Insert some documents
for (var i = 0; i < 1000; i++) {
docs.push({ a: i });
}
// Get the collection
var collection = db.collection('parallelCollectionScan_4');
// Insert 1000 documents in a batch
collection.insert(docs, function(err) {
test.equal(null, err);
// Get the cursor
var cursor = collection.find({}).batchSize(2);
// Get next document
cursor.next(function(err, doc) {
test.equal(null, err);
test.ok(doc != null);
// Mess with state forcing a call to isDead on the cursor
cursor.s.state = 2;
cursor.next(function(err) {
test.ok(err !== null);
// We need to close the cursor since it is not exhausted,
// and we need to end the implicit session
cursor.close();
client.close();
done();
});
});
});
});
}
});
/**
* Find and modify should allow for a write Concern without failing
* @ignore
*/
it('should correctly execute a findAndModifyWithAWriteConcern', {
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 });
client.connect(function(err, client) {
var db = client.db(configuration.db);
db.createCollection('test_find_and_modify_a_document_3', function(err, collection) {
// Test return new document on change
collection.insert({ a: 1, b: 2 }, configuration.writeConcernMax(), function(err) {
test.equal(null, err);
// Let's modify the document in place
collection.findAndModify(
{ a: 1 },
[['a', 1]],
{ $set: { b: 3 } },
{ new: true },
function(err, updated_doc) {
test.equal(1, updated_doc.value.a);
test.equal(3, updated_doc.value.b);
client.close();
done();
}
);
});
});
});
}
});
/**
* Test a simple find
* @ignore
*/
it('should execute query using batchSize of 0', {
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 });
client.connect(function(err, client) {
var db = client.db(configuration.db);
db.collection('test_find_simple_batchsize_0', function(err, collection) {
// Insert some test documents
collection.insert(
[{ a: 2 }, { b: 3 }, { b: 4 }],
configuration.writeConcernMax(),
function(err) {
test.equal(null, err);
// Ensure correct insertion testing via the cursor and the count function
collection
.find()
.batchSize(-5)
.toArray(function(err, documents) {
test.equal(null, err);
test.equal(3, documents.length);
// Let's close the db
client.close();
done();
});
}
);
});
});
}
});
/**
* Test a simple find
* @ignore
*/
it('should execute query using limit of 0', {
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 });
client.connect(function(err, client) {
var db = client.db(configuration.db);
db.collection('test_find_simple_limit_0', function(err, collection) {
test.equal(null, err);
// Insert some test documents
collection.insert(
[{ a: 2 }, { b: 3 }, { b: 4 }],
configuration.writeConcernMax(),
function(err) {
test.equal(null, err);
// Ensure correct insertion testing via the cursor and the count function
collection
.find()
.limit(-5)
.toArray(function(err, documents) {
test.equal(null, err);
test.equal(3, documents.length);
// Let's close the db
client.close();
done();
});
}
);
});
});
}
});
/**
* Test a simple find
* @ignore
*/
it('should execute query using $elemMatch', {
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 });
client.connect(function(err, client) {
var db = client.db(configuration.db);
db.collection('elem_match_test', function(err, collection) {
test.equal(null, err);
// Insert some test documents
collection.insert(
[{ _id: 1, results: [82, 85, 88] }, { _id: 2, results: [75, 88, 89] }],
configuration.writeConcernMax(),
function(err) {
test.equal(null, err);
// Ensure correct insertion testing via the cursor and the count function
collection
.find({ results: { $elemMatch: { $gte: 80, $lt: 85 } } })
.toArray(function(err, documents) {
test.equal(null, err);
test.deepEqual([{ _id: 1, results: [82, 85, 88] }], documents);
// Let's close the db
client.close();
done();
});
}
);
});
});
}
});
/**
* Test a simple find
* @ignore
*/
it('should execute query using limit of 101', {
metadata: {
requires: { topology: ['single', 'replicaset', 'sharded', 'ssl', 'heap', 'wiredtiger'] }
},
// The actual test we wish to run
test: function(done) {
var configuration = this.configuration;
var client = configuration.newClient(configuration.writeConcernMax(), { poolSize: 1 });
client.connect(function(err, client) {
var db = client.db(configuration.db);
db.collection('test_find_simple_limit_101', function(err, collection) {
test.equal(null, err);
function clone(obj) {
var o = {};
for (var name in obj) o[name] = obj[name];
return o;
}
var template = {
linkid: '12633170',
advertisercid: '4612127',
websitename: 'Car Rental 8',
destinationurl: 'https://www.carrental8.com/en/',
who: '8027061-12633170-1467924618000',
href: 'http://www.tkqlhce.com',
src: 'http://www.awltovhc.com',
r1: 3,
r2: 44,
r3: 24,
r4: 58
};
var docs = [];
for (var i = 0; i < 1000; i++) {
docs.push(clone(template));
}
// Insert some test documents
collection.insertMany(docs, configuration.writeConcernMax(), function(err, r) {
test.equal(null, err);
test.ok(r);
// Ensure correct insertion testing via the cursor and the count function
collection
.find()
.limit(200)
.toArray(function(err, documents) {
test.equal(null, err);
test.equal(200, documents.length);
// Let's close the db
client.close();
done();
});
});
});
});
}
});
/**
* Test a simple find
* @ignore
*/
it('Should correctly apply db level options to find cursor', {
metadata: { requires: { topology: ['single'] } },
// The actual test we wish to run
test: function(done) {
var listener = require('../..').instrument(function(err) {
test.equal(null, err);
});
var configuration = this.configuration;
const client = configuration.newClient({}, { ignoreUndefined: true });
client.connect(function(err, client) {
var db = client.db(configuration.db);
var collection = db.collection('test_find_simple_cursor_inheritance');
// Insert some test documents
collection.insert([{ a: 2 }, { b: 3, c: undefined }], function(err) {
test.equal(null, err);
// Ensure correct insertion testing via the cursor and the count function
var cursor = collection.find({ c: undefined });
test.equal(true, cursor.s.options.ignoreUndefined);
cursor.toArray(function(err, documents) {
test.equal(2, documents.length);
// process.exit(0)
listener.uninstrument();
// Let's close the db
client.close();
done();
});
});
});
}
});
});
| 1 | 14,965 | It seems like this is not meant to test the option failure, but rather that `raw` can be set. This may be a superfluous test now, and we might want to delete it. | mongodb-node-mongodb-native | js |
@@ -715,6 +715,16 @@ struct roots_seat_view *roots_seat_view_from_view(
return seat_view;
}
+static void release_fullscreen(struct roots_output *output) {
+ if (output->fullscreen_view) {
+ if (output->fullscreen_view->set_fullscreen) {
+ output->fullscreen_view->set_fullscreen(
+ output->fullscreen_view, false);
+ }
+ view_set_fullscreen(output->fullscreen_view, false, output->wlr_output);
+ }
+}
+
void roots_seat_set_focus(struct roots_seat *seat, struct roots_view *view) {
// Make sure the view will be rendered on top of others, even if it's
// already focused in this seat | 1 | #include <assert.h>
#include <stdlib.h>
#include <string.h>
#include <wayland-server.h>
#include <wlr/config.h>
#include <wlr/types/wlr_idle.h>
#include <wlr/types/wlr_xcursor_manager.h>
#include <wlr/util/log.h>
#include "rootston/cursor.h"
#include "rootston/input.h"
#include "rootston/keyboard.h"
#include "rootston/seat.h"
#include "rootston/xcursor.h"
static void handle_keyboard_key(struct wl_listener *listener, void *data) {
struct roots_keyboard *keyboard =
wl_container_of(listener, keyboard, keyboard_key);
struct roots_desktop *desktop = keyboard->input->server->desktop;
wlr_idle_notify_activity(desktop->idle, keyboard->seat->seat);
struct wlr_event_keyboard_key *event = data;
roots_keyboard_handle_key(keyboard, event);
}
static void handle_keyboard_modifiers(struct wl_listener *listener,
void *data) {
struct roots_keyboard *keyboard =
wl_container_of(listener, keyboard, keyboard_modifiers);
struct roots_desktop *desktop = keyboard->input->server->desktop;
wlr_idle_notify_activity(desktop->idle, keyboard->seat->seat);
roots_keyboard_handle_modifiers(keyboard);
}
static void handle_cursor_motion(struct wl_listener *listener, void *data) {
struct roots_cursor *cursor =
wl_container_of(listener, cursor, motion);
struct roots_desktop *desktop = cursor->seat->input->server->desktop;
wlr_idle_notify_activity(desktop->idle, cursor->seat->seat);
struct wlr_event_pointer_motion *event = data;
roots_cursor_handle_motion(cursor, event);
}
static void handle_cursor_motion_absolute(struct wl_listener *listener,
void *data) {
struct roots_cursor *cursor =
wl_container_of(listener, cursor, motion_absolute);
struct roots_desktop *desktop = cursor->seat->input->server->desktop;
wlr_idle_notify_activity(desktop->idle, cursor->seat->seat);
struct wlr_event_pointer_motion_absolute *event = data;
roots_cursor_handle_motion_absolute(cursor, event);
}
static void handle_cursor_button(struct wl_listener *listener, void *data) {
struct roots_cursor *cursor =
wl_container_of(listener, cursor, button);
struct roots_desktop *desktop = cursor->seat->input->server->desktop;
wlr_idle_notify_activity(desktop->idle, cursor->seat->seat);
struct wlr_event_pointer_button *event = data;
roots_cursor_handle_button(cursor, event);
}
static void handle_cursor_axis(struct wl_listener *listener, void *data) {
struct roots_cursor *cursor =
wl_container_of(listener, cursor, axis);
struct roots_desktop *desktop = cursor->seat->input->server->desktop;
wlr_idle_notify_activity(desktop->idle, cursor->seat->seat);
struct wlr_event_pointer_axis *event = data;
roots_cursor_handle_axis(cursor, event);
}
static void handle_touch_down(struct wl_listener *listener, void *data) {
struct roots_cursor *cursor =
wl_container_of(listener, cursor, touch_down);
struct roots_desktop *desktop = cursor->seat->input->server->desktop;
wlr_idle_notify_activity(desktop->idle, cursor->seat->seat);
struct wlr_event_touch_down *event = data;
roots_cursor_handle_touch_down(cursor, event);
}
static void handle_touch_up(struct wl_listener *listener, void *data) {
struct roots_cursor *cursor =
wl_container_of(listener, cursor, touch_up);
struct roots_desktop *desktop = cursor->seat->input->server->desktop;
wlr_idle_notify_activity(desktop->idle, cursor->seat->seat);
struct wlr_event_touch_up *event = data;
roots_cursor_handle_touch_up(cursor, event);
}
static void handle_touch_motion(struct wl_listener *listener, void *data) {
struct roots_cursor *cursor =
wl_container_of(listener, cursor, touch_motion);
struct roots_desktop *desktop = cursor->seat->input->server->desktop;
wlr_idle_notify_activity(desktop->idle, cursor->seat->seat);
struct wlr_event_touch_motion *event = data;
roots_cursor_handle_touch_motion(cursor, event);
}
static void handle_tool_axis(struct wl_listener *listener, void *data) {
struct roots_cursor *cursor =
wl_container_of(listener, cursor, tool_axis);
struct roots_desktop *desktop = cursor->seat->input->server->desktop;
wlr_idle_notify_activity(desktop->idle, cursor->seat->seat);
struct wlr_event_tablet_tool_axis *event = data;
roots_cursor_handle_tool_axis(cursor, event);
}
static void handle_tool_tip(struct wl_listener *listener, void *data) {
struct roots_cursor *cursor =
wl_container_of(listener, cursor, tool_tip);
struct roots_desktop *desktop = cursor->seat->input->server->desktop;
wlr_idle_notify_activity(desktop->idle, cursor->seat->seat);
struct wlr_event_tablet_tool_tip *event = data;
roots_cursor_handle_tool_tip(cursor, event);
}
static void handle_request_set_cursor(struct wl_listener *listener,
void *data) {
struct roots_cursor *cursor =
wl_container_of(listener, cursor, request_set_cursor);
struct roots_desktop *desktop = cursor->seat->input->server->desktop;
wlr_idle_notify_activity(desktop->idle, cursor->seat->seat);
struct wlr_seat_pointer_request_set_cursor_event *event = data;
roots_cursor_handle_request_set_cursor(cursor, event);
}
static void seat_reset_device_mappings(struct roots_seat *seat,
struct wlr_input_device *device) {
struct wlr_cursor *cursor = seat->cursor->cursor;
struct roots_config *config = seat->input->config;
wlr_cursor_map_input_to_output(cursor, device, NULL);
struct roots_device_config *dconfig;
if ((dconfig = roots_config_get_device(config, device))) {
wlr_cursor_map_input_to_region(cursor, device, dconfig->mapped_box);
}
}
static void seat_set_device_output_mappings(struct roots_seat *seat,
struct wlr_input_device *device, struct wlr_output *output) {
struct wlr_cursor *cursor = seat->cursor->cursor;
struct roots_config *config = seat->input->config;
struct roots_device_config *dconfig;
dconfig = roots_config_get_device(config, device);
if (dconfig && dconfig->mapped_output &&
strcmp(dconfig->mapped_output, output->name) == 0) {
wlr_cursor_map_input_to_output(cursor, device, output);
}
}
void roots_seat_configure_cursor(struct roots_seat *seat) {
struct roots_config *config = seat->input->config;
struct roots_desktop *desktop = seat->input->server->desktop;
struct wlr_cursor *cursor = seat->cursor->cursor;
struct roots_pointer *pointer;
struct roots_touch *touch;
struct roots_tablet_tool *tablet_tool;
struct roots_output *output;
// reset mappings
wlr_cursor_map_to_output(cursor, NULL);
wl_list_for_each(pointer, &seat->pointers, link) {
seat_reset_device_mappings(seat, pointer->device);
}
wl_list_for_each(touch, &seat->touch, link) {
seat_reset_device_mappings(seat, touch->device);
}
wl_list_for_each(tablet_tool, &seat->tablet_tools, link) {
seat_reset_device_mappings(seat, tablet_tool->device);
}
// configure device to output mappings
const char *mapped_output = NULL;
struct roots_cursor_config *cc =
roots_config_get_cursor(config, seat->seat->name);
if (cc != NULL) {
mapped_output = cc->mapped_output;
}
wl_list_for_each(output, &desktop->outputs, link) {
if (mapped_output &&
strcmp(mapped_output, output->wlr_output->name) == 0) {
wlr_cursor_map_to_output(cursor, output->wlr_output);
}
wl_list_for_each(pointer, &seat->pointers, link) {
seat_set_device_output_mappings(seat, pointer->device,
output->wlr_output);
}
wl_list_for_each(tablet_tool, &seat->tablet_tools, link) {
seat_set_device_output_mappings(seat, tablet_tool->device,
output->wlr_output);
}
wl_list_for_each(touch, &seat->touch, link) {
seat_set_device_output_mappings(seat, touch->device,
output->wlr_output);
}
}
}
static void roots_seat_init_cursor(struct roots_seat *seat) {
seat->cursor = roots_cursor_create(seat);
if (!seat->cursor) {
return;
}
seat->cursor->seat = seat;
struct wlr_cursor *wlr_cursor = seat->cursor->cursor;
struct roots_desktop *desktop = seat->input->server->desktop;
wlr_cursor_attach_output_layout(wlr_cursor, desktop->layout);
roots_seat_configure_cursor(seat);
roots_seat_configure_xcursor(seat);
// add input signals
wl_signal_add(&wlr_cursor->events.motion, &seat->cursor->motion);
seat->cursor->motion.notify = handle_cursor_motion;
wl_signal_add(&wlr_cursor->events.motion_absolute,
&seat->cursor->motion_absolute);
seat->cursor->motion_absolute.notify = handle_cursor_motion_absolute;
wl_signal_add(&wlr_cursor->events.button, &seat->cursor->button);
seat->cursor->button.notify = handle_cursor_button;
wl_signal_add(&wlr_cursor->events.axis, &seat->cursor->axis);
seat->cursor->axis.notify = handle_cursor_axis;
wl_signal_add(&wlr_cursor->events.touch_down, &seat->cursor->touch_down);
seat->cursor->touch_down.notify = handle_touch_down;
wl_signal_add(&wlr_cursor->events.touch_up, &seat->cursor->touch_up);
seat->cursor->touch_up.notify = handle_touch_up;
wl_signal_add(&wlr_cursor->events.touch_motion,
&seat->cursor->touch_motion);
seat->cursor->touch_motion.notify = handle_touch_motion;
wl_signal_add(&wlr_cursor->events.tablet_tool_axis,
&seat->cursor->tool_axis);
seat->cursor->tool_axis.notify = handle_tool_axis;
wl_signal_add(&wlr_cursor->events.tablet_tool_tip, &seat->cursor->tool_tip);
seat->cursor->tool_tip.notify = handle_tool_tip;
wl_signal_add(&seat->seat->events.request_set_cursor,
&seat->cursor->request_set_cursor);
seat->cursor->request_set_cursor.notify = handle_request_set_cursor;
}
static void roots_drag_icon_handle_surface_commit(struct wl_listener *listener,
void *data) {
struct roots_drag_icon *icon =
wl_container_of(listener, icon, surface_commit);
roots_drag_icon_damage_whole(icon);
}
static void roots_drag_icon_handle_map(struct wl_listener *listener,
void *data) {
struct roots_drag_icon *icon =
wl_container_of(listener, icon, map);
roots_drag_icon_damage_whole(icon);
}
static void roots_drag_icon_handle_destroy(struct wl_listener *listener,
void *data) {
struct roots_drag_icon *icon =
wl_container_of(listener, icon, destroy);
roots_drag_icon_damage_whole(icon);
wl_list_remove(&icon->link);
wl_list_remove(&icon->surface_commit.link);
wl_list_remove(&icon->map.link);
wl_list_remove(&icon->destroy.link);
free(icon);
}
static void roots_seat_handle_new_drag_icon(struct wl_listener *listener,
void *data) {
struct roots_seat *seat = wl_container_of(listener, seat, new_drag_icon);
struct wlr_drag_icon *wlr_drag_icon = data;
struct roots_drag_icon *icon = calloc(1, sizeof(struct roots_drag_icon));
if (icon == NULL) {
return;
}
icon->seat = seat;
icon->wlr_drag_icon = wlr_drag_icon;
icon->surface_commit.notify = roots_drag_icon_handle_surface_commit;
wl_signal_add(&wlr_drag_icon->surface->events.commit, &icon->surface_commit);
icon->map.notify = roots_drag_icon_handle_map;
wl_signal_add(&wlr_drag_icon->events.map, &icon->map);
icon->destroy.notify = roots_drag_icon_handle_destroy;
wl_signal_add(&wlr_drag_icon->events.destroy, &icon->destroy);
wl_list_insert(&seat->drag_icons, &icon->link);
}
void roots_drag_icon_update_position(struct roots_drag_icon *icon) {
roots_drag_icon_damage_whole(icon);
struct wlr_drag_icon *wlr_icon = icon->wlr_drag_icon;
struct roots_seat *seat = icon->seat;
struct wlr_cursor *cursor = seat->cursor->cursor;
if (wlr_icon->is_pointer) {
icon->x = cursor->x + wlr_icon->sx;
icon->y = cursor->y + wlr_icon->sy;
} else {
struct wlr_touch_point *point =
wlr_seat_touch_get_point(seat->seat, wlr_icon->touch_id);
if (point == NULL) {
return;
}
icon->x = seat->touch_x + wlr_icon->sx;
icon->y = seat->touch_y + wlr_icon->sy;
}
roots_drag_icon_damage_whole(icon);
}
void roots_drag_icon_damage_whole(struct roots_drag_icon *icon) {
struct roots_output *output;
wl_list_for_each(output, &icon->seat->input->server->desktop->outputs,
link) {
output_damage_whole_drag_icon(output, icon);
}
}
static void seat_view_destroy(struct roots_seat_view *seat_view);
static void roots_seat_handle_destroy(struct wl_listener *listener,
void *data) {
struct roots_seat *seat = wl_container_of(listener, seat, destroy);
// TODO: probably more to be freed here
wl_list_remove(&seat->destroy.link);
struct roots_seat_view *view, *nview;
wl_list_for_each_safe(view, nview, &seat->views, link) {
seat_view_destroy(view);
}
}
void roots_seat_destroy(struct roots_seat *seat) {
roots_seat_handle_destroy(&seat->destroy, seat->seat);
wlr_seat_destroy(seat->seat);
}
struct roots_seat *roots_seat_create(struct roots_input *input, char *name) {
struct roots_seat *seat = calloc(1, sizeof(struct roots_seat));
if (!seat) {
return NULL;
}
wl_list_init(&seat->keyboards);
wl_list_init(&seat->pointers);
wl_list_init(&seat->touch);
wl_list_init(&seat->tablet_tools);
wl_list_init(&seat->views);
wl_list_init(&seat->drag_icons);
seat->input = input;
seat->seat = wlr_seat_create(input->server->wl_display, name);
if (!seat->seat) {
free(seat);
return NULL;
}
roots_seat_init_cursor(seat);
if (!seat->cursor) {
wlr_seat_destroy(seat->seat);
free(seat);
return NULL;
}
wl_list_insert(&input->seats, &seat->link);
seat->new_drag_icon.notify = roots_seat_handle_new_drag_icon;
wl_signal_add(&seat->seat->events.new_drag_icon, &seat->new_drag_icon);
seat->destroy.notify = roots_seat_handle_destroy;
wl_signal_add(&seat->seat->events.destroy, &seat->destroy);
return seat;
}
static void seat_update_capabilities(struct roots_seat *seat) {
uint32_t caps = 0;
if (!wl_list_empty(&seat->keyboards)) {
caps |= WL_SEAT_CAPABILITY_KEYBOARD;
}
if (!wl_list_empty(&seat->pointers) || !wl_list_empty(&seat->tablet_tools)) {
caps |= WL_SEAT_CAPABILITY_POINTER;
}
if (!wl_list_empty(&seat->touch)) {
caps |= WL_SEAT_CAPABILITY_TOUCH;
}
wlr_seat_set_capabilities(seat->seat, caps);
// Hide cursor if seat doesn't have pointer capability
if ((caps & WL_SEAT_CAPABILITY_POINTER) == 0) {
wlr_cursor_set_image(seat->cursor->cursor, NULL, 0, 0, 0, 0, 0, 0);
} else {
wlr_xcursor_manager_set_cursor_image(seat->cursor->xcursor_manager,
seat->cursor->default_xcursor, seat->cursor->cursor);
}
}
static void handle_keyboard_destroy(struct wl_listener *listener, void *data) {
struct roots_keyboard *keyboard =
wl_container_of(listener, keyboard, device_destroy);
struct roots_seat *seat = keyboard->seat;
wl_list_remove(&keyboard->device_destroy.link);
wl_list_remove(&keyboard->keyboard_key.link);
wl_list_remove(&keyboard->keyboard_modifiers.link);
roots_keyboard_destroy(keyboard);
seat_update_capabilities(seat);
}
static void seat_add_keyboard(struct roots_seat *seat,
struct wlr_input_device *device) {
assert(device->type == WLR_INPUT_DEVICE_KEYBOARD);
struct roots_keyboard *keyboard =
roots_keyboard_create(device, seat->input);
if (keyboard == NULL) {
wlr_log(L_ERROR, "could not allocate keyboard for seat");
return;
}
keyboard->seat = seat;
wl_list_insert(&seat->keyboards, &keyboard->link);
keyboard->device_destroy.notify = handle_keyboard_destroy;
wl_signal_add(&keyboard->device->events.destroy, &keyboard->device_destroy);
keyboard->keyboard_key.notify = handle_keyboard_key;
wl_signal_add(&keyboard->device->keyboard->events.key,
&keyboard->keyboard_key);
keyboard->keyboard_modifiers.notify = handle_keyboard_modifiers;
wl_signal_add(&keyboard->device->keyboard->events.modifiers,
&keyboard->keyboard_modifiers);
wlr_seat_set_keyboard(seat->seat, device);
}
static void handle_pointer_destroy(struct wl_listener *listener, void *data) {
struct roots_pointer *pointer =
wl_container_of(listener, pointer, device_destroy);
struct roots_seat *seat = pointer->seat;
wl_list_remove(&pointer->link);
wlr_cursor_detach_input_device(seat->cursor->cursor, pointer->device);
wl_list_remove(&pointer->device_destroy.link);
free(pointer);
seat_update_capabilities(seat);
}
static void seat_add_pointer(struct roots_seat *seat,
struct wlr_input_device *device) {
struct roots_pointer *pointer = calloc(sizeof(struct roots_pointer), 1);
if (!pointer) {
wlr_log(L_ERROR, "could not allocate pointer for seat");
return;
}
device->data = pointer;
pointer->device = device;
pointer->seat = seat;
wl_list_insert(&seat->pointers, &pointer->link);
pointer->device_destroy.notify = handle_pointer_destroy;
wl_signal_add(&pointer->device->events.destroy, &pointer->device_destroy);
wlr_cursor_attach_input_device(seat->cursor->cursor, device);
roots_seat_configure_cursor(seat);
}
static void handle_touch_destroy(struct wl_listener *listener, void *data) {
struct roots_pointer *touch =
wl_container_of(listener, touch, device_destroy);
struct roots_seat *seat = touch->seat;
wl_list_remove(&touch->link);
wlr_cursor_detach_input_device(seat->cursor->cursor, touch->device);
wl_list_remove(&touch->device_destroy.link);
free(touch);
seat_update_capabilities(seat);
}
static void seat_add_touch(struct roots_seat *seat,
struct wlr_input_device *device) {
struct roots_touch *touch = calloc(sizeof(struct roots_touch), 1);
if (!touch) {
wlr_log(L_ERROR, "could not allocate touch for seat");
return;
}
device->data = touch;
touch->device = device;
touch->seat = seat;
wl_list_insert(&seat->touch, &touch->link);
touch->device_destroy.notify = handle_touch_destroy;
wl_signal_add(&touch->device->events.destroy, &touch->device_destroy);
wlr_cursor_attach_input_device(seat->cursor->cursor, device);
roots_seat_configure_cursor(seat);
}
static void seat_add_tablet_pad(struct roots_seat *seat,
struct wlr_input_device *device) {
// TODO
}
static void handle_tablet_tool_destroy(struct wl_listener *listener,
void *data) {
struct roots_tablet_tool *tablet_tool =
wl_container_of(listener, tablet_tool, device_destroy);
struct roots_seat *seat = tablet_tool->seat;
wlr_cursor_detach_input_device(seat->cursor->cursor, tablet_tool->device);
wl_list_remove(&tablet_tool->device_destroy.link);
wl_list_remove(&tablet_tool->link);
free(tablet_tool);
seat_update_capabilities(seat);
}
static void seat_add_tablet_tool(struct roots_seat *seat,
struct wlr_input_device *device) {
struct roots_tablet_tool *tablet_tool =
calloc(sizeof(struct roots_tablet_tool), 1);
if (!tablet_tool) {
wlr_log(L_ERROR, "could not allocate tablet_tool for seat");
return;
}
device->data = tablet_tool;
tablet_tool->device = device;
tablet_tool->seat = seat;
wl_list_insert(&seat->tablet_tools, &tablet_tool->link);
tablet_tool->device_destroy.notify = handle_tablet_tool_destroy;
wl_signal_add(&tablet_tool->device->events.destroy,
&tablet_tool->device_destroy);
wlr_cursor_attach_input_device(seat->cursor->cursor, device);
roots_seat_configure_cursor(seat);
}
void roots_seat_add_device(struct roots_seat *seat,
struct wlr_input_device *device) {
switch (device->type) {
case WLR_INPUT_DEVICE_KEYBOARD:
seat_add_keyboard(seat, device);
break;
case WLR_INPUT_DEVICE_POINTER:
seat_add_pointer(seat, device);
break;
case WLR_INPUT_DEVICE_TOUCH:
seat_add_touch(seat, device);
break;
case WLR_INPUT_DEVICE_TABLET_PAD:
seat_add_tablet_pad(seat, device);
break;
case WLR_INPUT_DEVICE_TABLET_TOOL:
seat_add_tablet_tool(seat, device);
break;
}
seat_update_capabilities(seat);
}
void roots_seat_configure_xcursor(struct roots_seat *seat) {
const char *cursor_theme = NULL;
struct roots_cursor_config *cc =
roots_config_get_cursor(seat->input->config, seat->seat->name);
if (cc != NULL) {
cursor_theme = cc->theme;
if (cc->default_image != NULL) {
seat->cursor->default_xcursor = cc->default_image;
}
}
if (!seat->cursor->xcursor_manager) {
seat->cursor->xcursor_manager =
wlr_xcursor_manager_create(cursor_theme, ROOTS_XCURSOR_SIZE);
if (seat->cursor->xcursor_manager == NULL) {
wlr_log(L_ERROR, "Cannot create XCursor manager for theme %s",
cursor_theme);
return;
}
}
struct roots_output *output;
wl_list_for_each(output, &seat->input->server->desktop->outputs, link) {
float scale = output->wlr_output->scale;
if (wlr_xcursor_manager_load(seat->cursor->xcursor_manager, scale)) {
wlr_log(L_ERROR, "Cannot load xcursor theme for output '%s' "
"with scale %f", output->wlr_output->name, scale);
}
}
wlr_xcursor_manager_set_cursor_image(seat->cursor->xcursor_manager,
seat->cursor->default_xcursor, seat->cursor->cursor);
wlr_cursor_warp(seat->cursor->cursor, NULL, seat->cursor->cursor->x,
seat->cursor->cursor->y);
}
bool roots_seat_has_meta_pressed(struct roots_seat *seat) {
struct roots_keyboard *keyboard;
wl_list_for_each(keyboard, &seat->keyboards, link) {
if (!keyboard->config->meta_key) {
continue;
}
uint32_t modifiers =
wlr_keyboard_get_modifiers(keyboard->device->keyboard);
if ((modifiers ^ keyboard->config->meta_key) == 0) {
return true;
}
}
return false;
}
struct roots_view *roots_seat_get_focus(struct roots_seat *seat) {
if (!seat->has_focus || wl_list_empty(&seat->views)) {
return NULL;
}
struct roots_seat_view *seat_view =
wl_container_of(seat->views.next, seat_view, link);
return seat_view->view;
}
static void seat_view_destroy(struct roots_seat_view *seat_view) {
struct roots_seat *seat = seat_view->seat;
if (seat_view->view == roots_seat_get_focus(seat)) {
seat->has_focus = false;
seat->cursor->mode = ROOTS_CURSOR_PASSTHROUGH;
}
if (seat_view == seat->cursor->pointer_view) {
seat->cursor->pointer_view = NULL;
}
wl_list_remove(&seat_view->view_unmap.link);
wl_list_remove(&seat_view->view_destroy.link);
wl_list_remove(&seat_view->link);
free(seat_view);
// Focus first view
if (!wl_list_empty(&seat->views)) {
struct roots_seat_view *first_seat_view = wl_container_of(
seat->views.next, first_seat_view, link);
roots_seat_set_focus(seat, first_seat_view->view);
}
}
static void seat_view_handle_unmap(struct wl_listener *listener, void *data) {
struct roots_seat_view *seat_view =
wl_container_of(listener, seat_view, view_unmap);
seat_view_destroy(seat_view);
}
static void seat_view_handle_destroy(struct wl_listener *listener, void *data) {
struct roots_seat_view *seat_view =
wl_container_of(listener, seat_view, view_destroy);
seat_view_destroy(seat_view);
}
static struct roots_seat_view *seat_add_view(struct roots_seat *seat,
struct roots_view *view) {
struct roots_seat_view *seat_view =
calloc(1, sizeof(struct roots_seat_view));
if (seat_view == NULL) {
return NULL;
}
seat_view->seat = seat;
seat_view->view = view;
wl_list_insert(seat->views.prev, &seat_view->link);
seat_view->view_unmap.notify = seat_view_handle_unmap;
wl_signal_add(&view->events.unmap, &seat_view->view_unmap);
seat_view->view_destroy.notify = seat_view_handle_destroy;
wl_signal_add(&view->events.destroy, &seat_view->view_destroy);
return seat_view;
}
struct roots_seat_view *roots_seat_view_from_view(
struct roots_seat *seat, struct roots_view *view) {
if (view == NULL) {
return NULL;
}
bool found = false;
struct roots_seat_view *seat_view = NULL;
wl_list_for_each(seat_view, &seat->views, link) {
if (seat_view->view == view) {
found = true;
break;
}
}
if (!found) {
seat_view = seat_add_view(seat, view);
if (seat_view == NULL) {
wlr_log(L_ERROR, "Allocation failed");
return NULL;
}
}
return seat_view;
}
void roots_seat_set_focus(struct roots_seat *seat, struct roots_view *view) {
// Make sure the view will be rendered on top of others, even if it's
// already focused in this seat
if (view != NULL) {
wl_list_remove(&view->link);
wl_list_insert(&seat->input->server->desktop->views, &view->link);
}
struct roots_view *prev_focus = roots_seat_get_focus(seat);
if (view == prev_focus) {
return;
}
#ifdef WLR_HAS_XWAYLAND
if (view && view->type == ROOTS_XWAYLAND_VIEW &&
wlr_xwayland_surface_is_unmanaged(view->xwayland_surface)) {
return;
}
#endif
struct roots_seat_view *seat_view = NULL;
if (view != NULL) {
seat_view = roots_seat_view_from_view(seat, view);
if (seat_view == NULL) {
return;
}
}
seat->has_focus = false;
// Deactivate the old view if it is not focused by some other seat
if (prev_focus != NULL && !input_view_has_focus(seat->input, prev_focus)) {
view_activate(prev_focus, false);
}
if (view == NULL) {
seat->cursor->mode = ROOTS_CURSOR_PASSTHROUGH;
return;
}
view_activate(view, true);
seat->has_focus = true;
wl_list_remove(&seat_view->link);
wl_list_insert(&seat->views, &seat_view->link);
view_damage_whole(view);
struct wlr_keyboard *keyboard = wlr_seat_get_keyboard(seat->seat);
if (keyboard != NULL) {
wlr_seat_keyboard_notify_enter(seat->seat, view->wlr_surface,
keyboard->keycodes, keyboard->num_keycodes,
&keyboard->modifiers);
} else {
wlr_seat_keyboard_notify_enter(seat->seat, view->wlr_surface,
NULL, 0, NULL);
}
}
void roots_seat_cycle_focus(struct roots_seat *seat) {
if (wl_list_empty(&seat->views)) {
return;
}
struct roots_seat_view *first_seat_view = wl_container_of(
seat->views.next, first_seat_view, link);
if (!seat->has_focus) {
roots_seat_set_focus(seat, first_seat_view->view);
return;
}
if (wl_list_length(&seat->views) < 2) {
return;
}
// Focus the next view
struct roots_seat_view *next_seat_view = wl_container_of(
first_seat_view->link.next, next_seat_view, link);
roots_seat_set_focus(seat, next_seat_view->view);
// Move the first view to the end of the list
wl_list_remove(&first_seat_view->link);
wl_list_insert(seat->views.prev, &first_seat_view->link);
}
void roots_seat_begin_move(struct roots_seat *seat, struct roots_view *view) {
struct roots_cursor *cursor = seat->cursor;
cursor->mode = ROOTS_CURSOR_MOVE;
cursor->offs_x = cursor->cursor->x;
cursor->offs_y = cursor->cursor->y;
if (view->maximized) {
cursor->view_x = view->saved.x;
cursor->view_y = view->saved.y;
} else {
cursor->view_x = view->x;
cursor->view_y = view->y;
}
view_maximize(view, false);
wlr_seat_pointer_clear_focus(seat->seat);
wlr_xcursor_manager_set_cursor_image(seat->cursor->xcursor_manager,
ROOTS_XCURSOR_MOVE, seat->cursor->cursor);
}
void roots_seat_begin_resize(struct roots_seat *seat, struct roots_view *view,
uint32_t edges) {
struct roots_cursor *cursor = seat->cursor;
cursor->mode = ROOTS_CURSOR_RESIZE;
cursor->offs_x = cursor->cursor->x;
cursor->offs_y = cursor->cursor->y;
if (view->maximized) {
cursor->view_x = view->saved.x;
cursor->view_y = view->saved.y;
cursor->view_width = view->saved.width;
cursor->view_height = view->saved.height;
} else {
cursor->view_x = view->x;
cursor->view_y = view->y;
struct wlr_box box;
view_get_box(view, &box);
cursor->view_width = box.width;
cursor->view_height = box.height;
}
cursor->resize_edges = edges;
view_maximize(view, false);
wlr_seat_pointer_clear_focus(seat->seat);
const char *resize_name = wlr_xcursor_get_resize_name(edges);
wlr_xcursor_manager_set_cursor_image(seat->cursor->xcursor_manager,
resize_name, seat->cursor->cursor);
}
void roots_seat_begin_rotate(struct roots_seat *seat, struct roots_view *view) {
struct roots_cursor *cursor = seat->cursor;
cursor->mode = ROOTS_CURSOR_ROTATE;
cursor->offs_x = cursor->cursor->x;
cursor->offs_y = cursor->cursor->y;
cursor->view_rotation = view->rotation;
view_maximize(view, false);
wlr_seat_pointer_clear_focus(seat->seat);
wlr_xcursor_manager_set_cursor_image(seat->cursor->xcursor_manager,
ROOTS_XCURSOR_ROTATE, seat->cursor->cursor);
}
void roots_seat_end_compositor_grab(struct roots_seat *seat) {
struct roots_cursor *cursor = seat->cursor;
struct roots_view *view = roots_seat_get_focus(seat);
if (view == NULL) {
return;
}
switch(cursor->mode) {
case ROOTS_CURSOR_MOVE:
view_move(view, cursor->view_x, cursor->view_y);
break;
case ROOTS_CURSOR_RESIZE:
view_move_resize(view, cursor->view_x, cursor->view_y, cursor->view_width, cursor->view_height);
break;
case ROOTS_CURSOR_ROTATE:
view->rotation = cursor->view_rotation;
break;
case ROOTS_CURSOR_PASSTHROUGH:
break;
}
cursor->mode = ROOTS_CURSOR_PASSTHROUGH;
}
| 1 | 10,064 | This is already done by `view_set_fullscreen` | swaywm-wlroots | c |
@@ -101,6 +101,15 @@ func (pr *PeerRing) RemoveAll() []peer.Peer {
return peers
}
+// All returns a snapshot of all the peers from the ring as a list.
+func (pr *PeerRing) All() []peer.Peer {
+ peers := make([]peer.Peer, 0, len(pr.peerToNode))
+ for _, node := range pr.peerToNode {
+ peers = append(peers, getPeerForRingNode(node))
+ }
+ return peers
+}
+
func (pr *PeerRing) popNode(node *ring.Ring) peer.Peer {
p := getPeerForRingNode(node)
| 1 | // Copyright (c) 2016 Uber Technologies, Inc.
//
// Permission is hereby granted, free of charge, to any person obtaining a copy
// of this software and associated documentation files (the "Software"), to deal
// in the Software without restriction, including without limitation the rights
// to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
// copies of the Software, and to permit persons to whom the Software is
// furnished to do so, subject to the following conditions:
//
// The above copyright notice and this permission notice shall be included in
// all copies or substantial portions of the Software.
//
// THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
// IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
// FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
// AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
// LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
// OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
// THE SOFTWARE.
package roundrobin
import (
"container/ring"
"go.uber.org/yarpc/api/peer"
)
// NewPeerRing creates a new PeerRing with an initial capacity
func NewPeerRing(capacity int) *PeerRing {
return &PeerRing{
peerToNode: make(map[string]*ring.Ring, capacity),
}
}
// PeerRing provides a safe way to interact (Add/Remove/Get) with a potentially
// changing list of peer objects
// PeerRing is NOT Thread-safe, make sure to only call PeerRing functions with a lock
type PeerRing struct {
peerToNode map[string]*ring.Ring
nextNode *ring.Ring
}
// GetPeer returns the Peer from the Ring or Nil
func (pr *PeerRing) GetPeer(pid peer.Identifier) peer.Peer {
node, ok := pr.peerToNode[pid.Identifier()]
if !ok {
return nil
}
return getPeerForRingNode(node)
}
// Add a peer.Peer to the end of the PeerRing, if the ring is empty
// it initializes the nextNode marker
func (pr *PeerRing) Add(p peer.Peer) error {
if _, ok := pr.peerToNode[p.Identifier()]; ok {
// Peer Already in ring, ignore the add
return peer.ErrPeerAddAlreadyInList(p.Identifier())
}
newNode := newPeerRingNode(p)
pr.peerToNode[p.Identifier()] = newNode
if pr.nextNode == nil {
// Empty ring, add the first node
pr.nextNode = newNode
} else {
// Push the node to the ring
pushBeforeNode(pr.nextNode, newNode)
}
return nil
}
func newPeerRingNode(p peer.Peer) *ring.Ring {
newNode := ring.New(1)
newNode.Value = p
return newNode
}
// Remove a peer Peer from the PeerRing, if the PeerID is not
// in the ring return an error
func (pr *PeerRing) Remove(p peer.Peer) error {
node, ok := pr.peerToNode[p.Identifier()]
if !ok {
// Peer doesn't exist in the list
return peer.ErrPeerRemoveNotInList(p.Identifier())
}
pr.popNode(node)
return nil
}
// RemoveAll pops all the peers from the ring and returns them in a list
func (pr *PeerRing) RemoveAll() []peer.Peer {
peers := make([]peer.Peer, 0, len(pr.peerToNode))
for _, node := range pr.peerToNode {
peers = append(peers, pr.popNode(node))
}
return peers
}
func (pr *PeerRing) popNode(node *ring.Ring) peer.Peer {
p := getPeerForRingNode(node)
if isLastRingNode(node) {
pr.nextNode = nil
} else {
if pr.isNextNode(node) {
pr.nextNode = pr.nextNode.Next()
}
popNodeFromRing(node)
}
// Remove the node from our node map
delete(pr.peerToNode, p.Identifier())
return p
}
func (pr *PeerRing) isNextNode(node *ring.Ring) bool {
return pr.nextNode == node
}
// Next returns the next peer in the ring, or nil if there is no peer in the ring
// after it has the next peer, it increments the nextPeer marker in the ring
func (pr *PeerRing) Next() peer.Peer {
if pr.nextNode == nil {
return nil
}
p := getPeerForRingNode(pr.nextNode)
pr.nextNode = pr.nextNode.Next()
return p
}
func getPeerForRingNode(rNode *ring.Ring) peer.Peer {
return rNode.Value.(peer.Peer)
}
func isLastRingNode(rNode *ring.Ring) bool {
return rNode.Next() == rNode
}
func popNodeFromRing(rNode *ring.Ring) {
rNode.Prev().Unlink(1)
}
func pushBeforeNode(curNode, newNode *ring.Ring) {
curNode.Prev().Link(newNode)
}
| 1 | 11,935 | mmm I followed the convention of this collection. It has Add, Remove, RemoveAll, so All sounds reasonable. Else It would likely be AddPeer, RemovePeer, RemovePeers and Peers. | yarpc-yarpc-go | go |
@@ -65,6 +65,15 @@ class FlagFacade
return $this->flagRepository->getByIds($flagIds);
}
+ /**
+ * @param string $uuid
+ * @return \Shopsys\FrameworkBundle\Model\Product\Flag\Flag
+ */
+ public function getByUuid(string $uuid): Flag
+ {
+ return $this->flagRepository->getByUuid($uuid);
+ }
+
/**
* @param \Shopsys\FrameworkBundle\Model\Product\Flag\FlagData $flagData
* @return \Shopsys\FrameworkBundle\Model\Product\Flag\Flag | 1 | <?php
declare(strict_types=1);
namespace Shopsys\FrameworkBundle\Model\Product\Flag;
use Doctrine\ORM\EntityManagerInterface;
use Symfony\Component\EventDispatcher\EventDispatcherInterface;
class FlagFacade
{
/**
* @var \Doctrine\ORM\EntityManagerInterface
*/
protected $em;
/**
* @var \Shopsys\FrameworkBundle\Model\Product\Flag\FlagRepository
*/
protected $flagRepository;
/**
* @var \Shopsys\FrameworkBundle\Model\Product\Flag\FlagFactory
*/
protected $flagFactory;
/**
* @var \Symfony\Component\EventDispatcher\EventDispatcherInterface
*/
protected $eventDispatcher;
/**
* @param \Doctrine\ORM\EntityManagerInterface $em
* @param \Shopsys\FrameworkBundle\Model\Product\Flag\FlagRepository $flagRepository
* @param \Shopsys\FrameworkBundle\Model\Product\Flag\FlagFactory $flagFactory
* @param \Symfony\Component\EventDispatcher\EventDispatcherInterface $eventDispatcher
*/
public function __construct(
EntityManagerInterface $em,
FlagRepository $flagRepository,
FlagFactory $flagFactory,
EventDispatcherInterface $eventDispatcher
) {
$this->em = $em;
$this->flagRepository = $flagRepository;
$this->flagFactory = $flagFactory;
$this->eventDispatcher = $eventDispatcher;
}
/**
* @param int $flagId
* @return \Shopsys\FrameworkBundle\Model\Product\Flag\Flag
*/
public function getById($flagId)
{
return $this->flagRepository->getById($flagId);
}
/**
* @param int[] $flagIds
* @return \Shopsys\FrameworkBundle\Model\Product\Flag\Flag[]
*/
public function getByIds(array $flagIds): array
{
return $this->flagRepository->getByIds($flagIds);
}
/**
* @param \Shopsys\FrameworkBundle\Model\Product\Flag\FlagData $flagData
* @return \Shopsys\FrameworkBundle\Model\Product\Flag\Flag
*/
public function create(FlagData $flagData)
{
$flag = $this->flagFactory->create($flagData);
$this->em->persist($flag);
$this->em->flush();
$this->dispatchFlagEvent($flag, FlagEvent::CREATE);
return $flag;
}
/**
* @param int $flagId
* @param \Shopsys\FrameworkBundle\Model\Product\Flag\FlagData $flagData
* @return \Shopsys\FrameworkBundle\Model\Product\Flag\Flag
*/
public function edit($flagId, FlagData $flagData)
{
$flag = $this->flagRepository->getById($flagId);
$flag->edit($flagData);
$this->em->flush();
$this->dispatchFlagEvent($flag, FlagEvent::UPDATE);
return $flag;
}
/**
* @param int $flagId
*/
public function deleteById($flagId)
{
$flag = $this->flagRepository->getById($flagId);
$this->em->remove($flag);
$this->dispatchFlagEvent($flag, FlagEvent::DELETE);
$this->em->flush();
}
/**
* @return \Shopsys\FrameworkBundle\Model\Product\Flag\Flag[]
*/
public function getAll()
{
return $this->flagRepository->getAll();
}
/**
* @param \Shopsys\FrameworkBundle\Model\Product\Flag\Flag $flag
* @param string $eventType
* @see \Shopsys\FrameworkBundle\Model\Product\Flag\FlagEvent class
*/
protected function dispatchFlagEvent(Flag $flag, string $eventType): void
{
$this->eventDispatcher->dispatch(new FlagEvent($flag), $eventType);
}
}
| 1 | 23,792 | _nitpick_ I'm thinking about whether this method should be in the previous commit or not. I know it's not yet used there, but in theory, neither do UUID itself. | shopsys-shopsys | php |
@@ -760,6 +760,13 @@ void RaftPart::replicateLogs(folly::EventBase* eb,
break;
}
+ if (term_ != currTerm) {
+ VLOG(2) << idStr_ << "Term has been updated, previous "
+ << currTerm << ", current " << term_;
+ currTerm = term_;
+ break;
+ }
+
hosts = hosts_;
} while (false);
| 1 | /* Copyright (c) 2018 vesoft inc. All rights reserved.
*
* This source code is licensed under Apache 2.0 License,
* attached with Common Clause Condition 1.0, found in the LICENSES directory.
*/
#include "base/Base.h"
#include "kvstore/raftex/RaftPart.h"
#include <folly/io/async/EventBaseManager.h>
#include <folly/executors/IOThreadPoolExecutor.h>
#include <folly/gen/Base.h>
#include "gen-cpp2/RaftexServiceAsyncClient.h"
#include "base/CollectNSucceeded.h"
#include "thrift/ThriftClientManager.h"
#include "network/NetworkUtils.h"
#include "thread/NamedThread.h"
#include "kvstore/wal/FileBasedWal.h"
#include "kvstore/raftex/LogStrListIterator.h"
#include "kvstore/raftex/Host.h"
#include "time/WallClock.h"
#include "base/SlowOpTracker.h"
DEFINE_uint32(raft_heartbeat_interval_secs, 5,
"Seconds between each heartbeat");
DEFINE_uint64(raft_snapshot_timeout, 60 * 5, "Max seconds between two snapshot requests");
DEFINE_uint32(max_batch_size, 256, "The max number of logs in a batch");
DEFINE_int32(wal_ttl, 14400, "Default wal ttl");
DEFINE_int64(wal_file_size, 16 * 1024 * 1024, "Default wal file size");
DEFINE_int32(wal_buffer_size, 8 * 1024 * 1024, "Default wal buffer size");
DEFINE_int32(wal_buffer_num, 2, "Default wal buffer number");
DEFINE_bool(wal_sync, false, "Whether fsync needs to be called every write");
DEFINE_bool(trace_raft, false, "Enable trace one raft request");
namespace nebula {
namespace raftex {
using nebula::network::NetworkUtils;
using nebula::thrift::ThriftClientManager;
using nebula::wal::FileBasedWal;
using nebula::wal::FileBasedWalPolicy;
using OpProcessor = folly::Function<folly::Optional<std::string>(AtomicOp op)>;
class AppendLogsIterator final : public LogIterator {
public:
AppendLogsIterator(LogID firstLogId,
TermID termId,
RaftPart::LogCache logs,
OpProcessor opCB)
: firstLogId_(firstLogId)
, termId_(termId)
, logId_(firstLogId)
, logs_(std::move(logs))
, opCB_(std::move(opCB)) {
leadByAtomicOp_ = processAtomicOp();
valid_ = idx_ < logs_.size();
hasNonAtomicOpLogs_ = !leadByAtomicOp_ && valid_;
if (valid_) {
currLogType_ = lastLogType_ = logType();
}
}
AppendLogsIterator(const AppendLogsIterator&) = delete;
AppendLogsIterator(AppendLogsIterator&&) = default;
AppendLogsIterator& operator=(const AppendLogsIterator&) = delete;
AppendLogsIterator& operator=(AppendLogsIterator&&) = default;
bool leadByAtomicOp() const {
return leadByAtomicOp_;
}
bool hasNonAtomicOpLogs() const {
return hasNonAtomicOpLogs_;
}
LogID firstLogId() const {
return firstLogId_;
}
// Return true if the current log is a AtomicOp, otherwise return false
bool processAtomicOp() {
while (idx_ < logs_.size()) {
auto& tup = logs_.at(idx_);
auto logType = std::get<1>(tup);
if (logType != LogType::ATOMIC_OP) {
// Not a AtomicOp
return false;
}
// Process AtomicOp log
CHECK(!!opCB_);
opResult_ = opCB_(std::move(std::get<3>(tup)));
if (opResult_.hasValue()) {
// AtomicOp Succeeded
return true;
} else {
// AtomicOp failed, move to the next log, but do not increment the logId_
++idx_;
}
}
// Reached the end
return false;
}
LogIterator& operator++() override {
++idx_;
++logId_;
if (idx_ < logs_.size()) {
currLogType_ = logType();
valid_ = currLogType_ != LogType::ATOMIC_OP;
if (valid_) {
hasNonAtomicOpLogs_ = true;
}
valid_ = valid_ && lastLogType_ != LogType::COMMAND;
lastLogType_ = currLogType_;
} else {
valid_ = false;
}
return *this;
}
// The iterator becomes invalid when exhausting the logs
// **OR** running into a AtomicOp log
bool valid() const override {
return valid_;
}
LogID logId() const override {
DCHECK(valid());
return logId_;
}
TermID logTerm() const override {
return termId_;
}
ClusterID logSource() const override {
DCHECK(valid());
return std::get<0>(logs_.at(idx_));
}
folly::StringPiece logMsg() const override {
DCHECK(valid());
if (currLogType_ == LogType::ATOMIC_OP) {
CHECK(opResult_.hasValue());
return opResult_.value();
} else {
return std::get<2>(logs_.at(idx_));
}
}
// Return true when there is no more log left for processing
bool empty() const {
return idx_ >= logs_.size();
}
// Resume the iterator so that we can continue to process the remaining logs
void resume() {
CHECK(!valid_);
if (!empty()) {
leadByAtomicOp_ = processAtomicOp();
valid_ = idx_ < logs_.size();
hasNonAtomicOpLogs_ = !leadByAtomicOp_ && valid_;
if (valid_) {
currLogType_ = lastLogType_ = logType();
}
}
}
LogType logType() const {
return std::get<1>(logs_.at(idx_));
}
private:
size_t idx_{0};
bool leadByAtomicOp_{false};
bool hasNonAtomicOpLogs_{false};
bool valid_{true};
LogType lastLogType_{LogType::NORMAL};
LogType currLogType_{LogType::NORMAL};
folly::Optional<std::string> opResult_;
LogID firstLogId_;
TermID termId_;
LogID logId_;
RaftPart::LogCache logs_;
OpProcessor opCB_;
};
/********************************************************
*
* Implementation of RaftPart
*
*******************************************************/
RaftPart::RaftPart(ClusterID clusterId,
GraphSpaceID spaceId,
PartitionID partId,
HostAddr localAddr,
const folly::StringPiece walRoot,
std::shared_ptr<folly::IOThreadPoolExecutor> pool,
std::shared_ptr<thread::GenericThreadPool> workers,
std::shared_ptr<folly::Executor> executor,
std::shared_ptr<SnapshotManager> snapshotMan)
: idStr_{folly::stringPrintf("[Port: %d, Space: %d, Part: %d] ",
localAddr.second, spaceId, partId)}
, clusterId_{clusterId}
, spaceId_{spaceId}
, partId_{partId}
, addr_{localAddr}
, status_{Status::STARTING}
, role_{Role::FOLLOWER}
, leader_{0, 0}
, ioThreadPool_{pool}
, bgWorkers_{workers}
, executor_(executor)
, snapshot_(snapshotMan)
, weight_(1) {
FileBasedWalPolicy policy;
policy.ttl = FLAGS_wal_ttl;
policy.fileSize = FLAGS_wal_file_size;
policy.bufferSize = FLAGS_wal_buffer_size;
policy.numBuffers = FLAGS_wal_buffer_num;
policy.sync = FLAGS_wal_sync;
wal_ = FileBasedWal::getWal(walRoot,
idStr_,
policy,
[this] (LogID logId,
TermID logTermId,
ClusterID logClusterId,
const std::string& log) {
return this->preProcessLog(logId,
logTermId,
logClusterId,
log);
});
logs_.reserve(FLAGS_max_batch_size);
CHECK(!!executor_) << idStr_ << "Should not be nullptr";
}
RaftPart::~RaftPart() {
std::lock_guard<std::mutex> g(raftLock_);
// Make sure the partition has stopped
CHECK(status_ == Status::STOPPED);
LOG(INFO) << idStr_ << " The part has been destroyed...";
}
const char* RaftPart::roleStr(Role role) const {
switch (role) {
case Role::LEADER:
return "Leader";
case Role::FOLLOWER:
return "Follower";
case Role::CANDIDATE:
return "Candidate";
case Role::LEARNER:
return "Learner";
default:
LOG(FATAL) << idStr_ << "Invalid role";
}
return nullptr;
}
void RaftPart::start(std::vector<HostAddr>&& peers, bool asLearner) {
std::lock_guard<std::mutex> g(raftLock_);
lastLogId_ = wal_->lastLogId();
lastLogTerm_ = wal_->lastLogTerm();
term_ = proposedTerm_ = lastLogTerm_;
// Set the quorum number
quorum_ = (peers.size() + 1) / 2;
auto logIdAndTerm = lastCommittedLogId();
committedLogId_ = logIdAndTerm.first;
if (lastLogId_ < committedLogId_) {
LOG(INFO) << idStr_ << "Reset lastLogId " << lastLogId_
<< " to be the committedLogId " << committedLogId_;
lastLogId_ = committedLogId_;
lastLogTerm_ = term_;
wal_->reset();
}
LOG(INFO) << idStr_ << "There are "
<< peers.size()
<< " peer hosts, and total "
<< peers.size() + 1
<< " copies. The quorum is " << quorum_ + 1
<< ", as learner " << asLearner
<< ", lastLogId " << lastLogId_
<< ", lastLogTerm " << lastLogTerm_
<< ", committedLogId " << committedLogId_
<< ", term " << term_;
// Start all peer hosts
for (auto& addr : peers) {
LOG(INFO) << idStr_ << "Add peer " << addr;
auto hostPtr = std::make_shared<Host>(addr, shared_from_this());
hosts_.emplace_back(hostPtr);
}
// Change the status
status_ = Status::RUNNING;
if (asLearner) {
role_ = Role::LEARNER;
}
startTimeMs_ = time::WallClock::fastNowInMilliSec();
// Set up a leader election task
size_t delayMS = 100 + folly::Random::rand32(900);
bgWorkers_->addDelayTask(delayMS, [self = shared_from_this(), startTime = startTimeMs_] {
self->statusPolling(startTime);
});
}
void RaftPart::stop() {
VLOG(2) << idStr_ << "Stopping the partition";
decltype(hosts_) hosts;
{
std::unique_lock<std::mutex> lck(raftLock_);
status_ = Status::STOPPED;
leader_ = {0, 0};
role_ = Role::FOLLOWER;
hosts = std::move(hosts_);
}
for (auto& h : hosts) {
h->stop();
}
VLOG(2) << idStr_ << "Invoked stop() on all peer hosts";
for (auto& h : hosts) {
VLOG(2) << idStr_ << "Waiting " << h->idStr() << " to stop";
h->waitForStop();
VLOG(2) << idStr_ << h->idStr() << "has stopped";
}
hosts.clear();
LOG(INFO) << idStr_ << "Partition has been stopped";
}
AppendLogResult RaftPart::canAppendLogs() {
CHECK(!raftLock_.try_lock());
if (status_ == Status::STARTING) {
LOG(ERROR) << idStr_ << "The partition is still starting";
return AppendLogResult::E_NOT_READY;
}
if (status_ == Status::STOPPED) {
LOG(ERROR) << idStr_ << "The partition is stopped";
return AppendLogResult::E_STOPPED;
}
if (role_ != Role::LEADER) {
LOG_EVERY_N(ERROR, 100) << idStr_ << "The partition is not a leader";
return AppendLogResult::E_NOT_A_LEADER;
}
return AppendLogResult::SUCCEEDED;
}
void RaftPart::addLearner(const HostAddr& addr) {
CHECK(!raftLock_.try_lock());
if (addr == addr_) {
LOG(INFO) << idStr_ << "I am learner!";
return;
}
auto it = std::find_if(hosts_.begin(), hosts_.end(), [&addr] (const auto& h) {
return h->address() == addr;
});
if (it == hosts_.end()) {
hosts_.emplace_back(std::make_shared<Host>(addr, shared_from_this(), true));
LOG(INFO) << idStr_ << "Add learner " << addr;
} else {
LOG(INFO) << idStr_ << "The host " << addr << " has been existed as "
<< ((*it)->isLearner() ? " learner " : " group member");
}
}
void RaftPart::preProcessTransLeader(const HostAddr& target) {
CHECK(!raftLock_.try_lock());
LOG(INFO) << idStr_ << "Pre process transfer leader to " << target;
switch (role_) {
case Role::FOLLOWER: {
if (target != addr_ && target != HostAddr(0, 0)) {
LOG(INFO) << idStr_ << "I am follower, just wait for the new leader.";
} else {
LOG(INFO) << idStr_ << "I will be the new leader, trigger leader election now!";
bgWorkers_->addTask([self = shared_from_this()] {
{
std::unique_lock<std::mutex> lck(self->raftLock_);
self->role_ = Role::CANDIDATE;
self->leader_ = HostAddr(0, 0);
}
self->leaderElection();
});
}
break;
}
default: {
LOG(INFO) << idStr_ << "My role is " << roleStr(role_)
<< ", so do nothing when pre process transfer leader";
break;
}
}
}
void RaftPart::commitTransLeader(const HostAddr& target) {
CHECK(!raftLock_.try_lock());
LOG(INFO) << idStr_ << "Commit transfer leader to " << target;
switch (role_) {
case Role::LEADER: {
if (target != addr_ && !hosts_.empty()) {
auto iter = std::find_if(hosts_.begin(), hosts_.end(), [] (const auto& h) {
return !h->isLearner();
});
if (iter != hosts_.end()) {
lastMsgRecvDur_.reset();
role_ = Role::FOLLOWER;
leader_ = HostAddr(0, 0);
LOG(INFO) << idStr_ << "Give up my leadership!";
}
} else {
LOG(INFO) << idStr_ << "I am already the leader!";
}
break;
}
case Role::FOLLOWER:
case Role::CANDIDATE: {
LOG(INFO) << idStr_ << "I am " << roleStr(role_) << ", just wait for the new leader!";
break;
}
case Role::LEARNER: {
LOG(INFO) << idStr_ << "I am learner, not in the raft group, skip the log";
break;
}
}
}
void RaftPart::updateQuorum() {
CHECK(!raftLock_.try_lock());
int32_t total = 0;
for (auto& h : hosts_) {
if (!h->isLearner()) {
total++;
}
}
quorum_ = (total + 1) / 2;
}
void RaftPart::addPeer(const HostAddr& peer) {
CHECK(!raftLock_.try_lock());
if (peer == addr_) {
if (role_ == Role::LEARNER) {
LOG(INFO) << idStr_ << "I am learner, promote myself to be follower";
role_ = Role::FOLLOWER;
updateQuorum();
} else {
LOG(INFO) << idStr_ << "I am already in the raft group!";
}
return;
}
auto it = std::find_if(hosts_.begin(), hosts_.end(), [&peer] (const auto& h) {
return h->address() == peer;
});
if (it == hosts_.end()) {
hosts_.emplace_back(std::make_shared<Host>(peer, shared_from_this()));
updateQuorum();
LOG(INFO) << idStr_ << "Add peer " << peer;
} else {
if ((*it)->isLearner()) {
LOG(INFO) << idStr_ << "The host " << peer
<< " has been existed as learner, promote it!";
(*it)->setLearner(false);
updateQuorum();
} else {
LOG(INFO) << idStr_ << "The host " << peer << " has been existed as follower!";
}
}
}
void RaftPart::removePeer(const HostAddr& peer) {
CHECK(!raftLock_.try_lock());
if (peer == addr_) {
// The part will be removed in REMOVE_PART_ON_SRC phase
LOG(INFO) << idStr_ << "Remove myself from the raft group.";
return;
}
auto it = std::find_if(hosts_.begin(), hosts_.end(), [&peer] (const auto& h) {
return h->address() == peer;
});
if (it == hosts_.end()) {
LOG(INFO) << idStr_ << "The peer " << peer << " not exist!";
} else {
if ((*it)->isLearner()) {
LOG(INFO) << idStr_ << "The peer is learner, remove it directly!";
hosts_.erase(it);
return;
}
hosts_.erase(it);
updateQuorum();
LOG(INFO) << idStr_ << "Remove peer " << peer;
}
}
void RaftPart::preProcessRemovePeer(const HostAddr& peer) {
CHECK(!raftLock_.try_lock());
if (role_ == Role::LEADER) {
LOG(INFO) << idStr_ << "I am leader, skip remove peer in preProcessLog";
return;
}
removePeer(peer);
}
void RaftPart::commitRemovePeer(const HostAddr& peer) {
CHECK(!raftLock_.try_lock());
if (role_ == Role::FOLLOWER || role_ == Role::LEARNER) {
LOG(INFO) << idStr_ << "I am " << roleStr(role_)
<< ", skip remove peer in commit";
return;
}
CHECK(Role::LEADER == role_);
removePeer(peer);
}
folly::Future<AppendLogResult> RaftPart::appendAsync(ClusterID source,
std::string log) {
if (source < 0) {
source = clusterId_;
}
return appendLogAsync(source, LogType::NORMAL, std::move(log));
}
folly::Future<AppendLogResult> RaftPart::atomicOpAsync(AtomicOp op) {
return appendLogAsync(clusterId_, LogType::ATOMIC_OP, "", std::move(op));
}
folly::Future<AppendLogResult> RaftPart::sendCommandAsync(std::string log) {
return appendLogAsync(clusterId_, LogType::COMMAND, std::move(log));
}
folly::Future<AppendLogResult> RaftPart::appendLogAsync(ClusterID source,
LogType logType,
std::string log,
AtomicOp op) {
if (blocking_) {
// No need to block heartbeats and empty log.
if ((logType == LogType::NORMAL && !log.empty()) || logType == LogType::ATOMIC_OP) {
return AppendLogResult::E_WRITE_BLOCKING;
}
}
LogCache swappedOutLogs;
auto retFuture = folly::Future<AppendLogResult>::makeEmpty();
if (bufferOverFlow_) {
LOG_EVERY_N(WARNING, 100) << idStr_
<< "The appendLog buffer is full."
" Please slow down the log appending rate."
<< "replicatingLogs_ :" << replicatingLogs_;
return AppendLogResult::E_BUFFER_OVERFLOW;
}
{
std::lock_guard<std::mutex> lck(logsLock_);
VLOG(2) << idStr_ << "Checking whether buffer overflow";
if (logs_.size() >= FLAGS_max_batch_size) {
// Buffer is full
LOG(WARNING) << idStr_
<< "The appendLog buffer is full."
" Please slow down the log appending rate."
<< "replicatingLogs_ :" << replicatingLogs_;
bufferOverFlow_ = true;
return AppendLogResult::E_BUFFER_OVERFLOW;
}
VLOG(2) << idStr_ << "Appending logs to the buffer";
// Append new logs to the buffer
DCHECK_GE(source, 0);
logs_.emplace_back(source, logType, std::move(log), std::move(op));
switch (logType) {
case LogType::ATOMIC_OP:
retFuture = cachingPromise_.getSingleFuture();
break;
case LogType::COMMAND:
retFuture = cachingPromise_.getAndRollSharedFuture();
break;
case LogType::NORMAL:
retFuture = cachingPromise_.getSharedFuture();
break;
}
bool expected = false;
if (replicatingLogs_.compare_exchange_strong(expected, true)) {
// We need to send logs to all followers
VLOG(2) << idStr_ << "Preparing to send AppendLog request";
sendingPromise_ = std::move(cachingPromise_);
cachingPromise_.reset();
std::swap(swappedOutLogs, logs_);
bufferOverFlow_ = false;
} else {
VLOG(2) << idStr_
<< "Another AppendLogs request is ongoing,"
" just return";
return retFuture;
}
}
LogID firstId = 0;
TermID termId = 0;
AppendLogResult res;
{
std::lock_guard<std::mutex> g(raftLock_);
res = canAppendLogs();
if (res == AppendLogResult::SUCCEEDED) {
firstId = lastLogId_ + 1;
termId = term_;
}
}
if (!checkAppendLogResult(res)) {
// Mosy likely failed because the parttion is not leader
LOG_EVERY_N(ERROR, 100) << idStr_ << "Cannot append logs, clean the buffer";
return res;
}
// Replicate buffered logs to all followers
// Replication will happen on a separate thread and will block
// until majority accept the logs, the leadership changes, or
// the partition stops
VLOG(2) << idStr_ << "Calling appendLogsInternal()";
AppendLogsIterator it(
firstId,
termId,
std::move(swappedOutLogs),
[this] (AtomicOp opCB) -> folly::Optional<std::string> {
CHECK(opCB != nullptr);
auto opRet = opCB();
if (!opRet.hasValue()) {
// Failed
sendingPromise_.setOneSingleValue(AppendLogResult::E_ATOMIC_OP_FAILURE);
}
return opRet;
});
appendLogsInternal(std::move(it), termId);
return retFuture;
}
void RaftPart::appendLogsInternal(AppendLogsIterator iter, TermID termId) {
TermID currTerm = 0;
LogID prevLogId = 0;
TermID prevLogTerm = 0;
LogID committed = 0;
LogID lastId = 0;
if (iter.valid()) {
VLOG(2) << idStr_ << "Ready to append logs from id "
<< iter.logId() << " (Current term is "
<< currTerm << ")";
} else {
LOG(ERROR) << idStr_ << "Only happend when Atomic op failed";
replicatingLogs_ = false;
return;
}
AppendLogResult res = AppendLogResult::SUCCEEDED;
do {
std::lock_guard<std::mutex> g(raftLock_);
if (status_ != Status::RUNNING) {
// The partition is not running
VLOG(2) << idStr_ << "The partition is stopped";
res = AppendLogResult::E_STOPPED;
break;
}
if (role_ != Role::LEADER) {
// Is not a leader any more
VLOG(2) << idStr_ << "The leader has changed";
res = AppendLogResult::E_NOT_A_LEADER;
break;
}
if (term_ != termId) {
VLOG(2) << idStr_ << "Term has been updated, origin "
<< termId << ", new " << term_;
res = AppendLogResult::E_TERM_OUT_OF_DATE;
break;
}
currTerm = term_;
prevLogId = lastLogId_;
prevLogTerm = lastLogTerm_;
committed = committedLogId_;
// Step 1: Write WAL
SlowOpTracker tracker;
if (!wal_->appendLogs(iter)) {
LOG(ERROR) << idStr_ << "Failed to write into WAL";
res = AppendLogResult::E_WAL_FAILURE;
break;
}
lastId = wal_->lastLogId();
if (tracker.slow()) {
tracker.output(idStr_, folly::stringPrintf("Write WAL, total %ld",
lastId - prevLogId + 1));
}
VLOG(2) << idStr_ << "Succeeded writing logs ["
<< iter.firstLogId() << ", " << lastId << "] to WAL";
} while (false);
if (!checkAppendLogResult(res)) {
LOG(ERROR) << idStr_ << "Failed append logs";
return;
}
// Step 2: Replicate to followers
auto* eb = ioThreadPool_->getEventBase();
replicateLogs(eb,
std::move(iter),
currTerm,
lastId,
committed,
prevLogTerm,
prevLogId);
return;
}
void RaftPart::replicateLogs(folly::EventBase* eb,
AppendLogsIterator iter,
TermID currTerm,
LogID lastLogId,
LogID committedId,
TermID prevLogTerm,
LogID prevLogId) {
using namespace folly; // NOLINT since the fancy overload of | operator
decltype(hosts_) hosts;
AppendLogResult res = AppendLogResult::SUCCEEDED;
do {
std::lock_guard<std::mutex> g(raftLock_);
if (status_ != Status::RUNNING) {
// The partition is not running
VLOG(2) << idStr_ << "The partition is stopped";
res = AppendLogResult::E_STOPPED;
break;
}
if (role_ != Role::LEADER) {
// Is not a leader any more
VLOG(2) << idStr_ << "The leader has changed";
res = AppendLogResult::E_NOT_A_LEADER;
break;
}
hosts = hosts_;
} while (false);
if (!checkAppendLogResult(res)) {
LOG(ERROR) << idStr_ << "Replicate logs failed";
return;
}
VLOG(2) << idStr_ << "About to replicate logs to all peer hosts";
lastMsgSentDur_.reset();
SlowOpTracker tracker;
collectNSucceeded(
gen::from(hosts)
| gen::map([self = shared_from_this(),
eb,
currTerm,
lastLogId,
prevLogId,
prevLogTerm,
committedId] (std::shared_ptr<Host> hostPtr) {
VLOG(2) << self->idStr_
<< "Appending logs to "
<< hostPtr->idStr();
return via(eb, [=] () -> Future<cpp2::AppendLogResponse> {
return hostPtr->appendLogs(eb,
currTerm,
lastLogId,
committedId,
prevLogTerm,
prevLogId);
});
})
| gen::as<std::vector>(),
// Number of succeeded required
quorum_,
// Result evaluator
[hosts] (size_t index, cpp2::AppendLogResponse& resp) {
return resp.get_error_code() == cpp2::ErrorCode::SUCCEEDED
&& !hosts[index]->isLearner();
})
.via(executor_.get())
.then([self = shared_from_this(),
eb,
it = std::move(iter),
currTerm,
lastLogId,
committedId,
prevLogId,
prevLogTerm,
pHosts = std::move(hosts),
tracker] (folly::Try<AppendLogResponses>&& result) mutable {
VLOG(2) << self->idStr_ << "Received enough response";
CHECK(!result.hasException());
if (tracker.slow()) {
tracker.output(self->idStr_, folly::stringPrintf("Total send logs: %ld",
lastLogId - prevLogId + 1));
}
self->processAppendLogResponses(*result,
eb,
std::move(it),
currTerm,
lastLogId,
committedId,
prevLogTerm,
prevLogId,
std::move(pHosts));
return *result;
});
}
void RaftPart::processAppendLogResponses(
const AppendLogResponses& resps,
folly::EventBase* eb,
AppendLogsIterator iter,
TermID currTerm,
LogID lastLogId,
LogID committedId,
TermID prevLogTerm,
LogID prevLogId,
std::vector<std::shared_ptr<Host>> hosts) {
// Make sure majority have succeeded
size_t numSucceeded = 0;
for (auto& res : resps) {
if (!hosts[res.first]->isLearner()
&& res.second.get_error_code() == cpp2::ErrorCode::SUCCEEDED) {
++numSucceeded;
}
}
if (numSucceeded >= quorum_) {
// Majority have succeeded
VLOG(2) << idStr_ << numSucceeded
<< " hosts have accepted the logs";
LogID firstLogId = 0;
AppendLogResult res = AppendLogResult::SUCCEEDED;
do {
std::lock_guard<std::mutex> g(raftLock_);
if (status_ != Status::RUNNING) {
LOG(INFO) << idStr_ << "The partition is stopped";
res = AppendLogResult::E_STOPPED;
break;
}
if (role_ != Role::LEADER) {
LOG(INFO) << idStr_ << "The leader has changed";
res = AppendLogResult::E_NOT_A_LEADER;
break;
}
if (currTerm != term_) {
LOG(INFO) << idStr_ << "The leader has changed, ABA problem.";
res = AppendLogResult::E_TERM_OUT_OF_DATE;
break;
}
lastLogId_ = lastLogId;
lastLogTerm_ = currTerm;
auto walIt = wal_->iterator(committedId + 1, lastLogId);
SlowOpTracker tracker;
// Step 3: Commit the batch
if (commitLogs(std::move(walIt))) {
committedLogId_ = lastLogId;
firstLogId = lastLogId_ + 1;
} else {
LOG(FATAL) << idStr_ << "Failed to commit logs";
}
if (tracker.slow()) {
tracker.output(idStr_, folly::stringPrintf("Total commit: %ld",
committedLogId_ - committedId));
}
VLOG(2) << idStr_ << "Leader succeeded in committing the logs "
<< committedId + 1 << " to " << lastLogId;
lastMsgAcceptedCostMs_ = lastMsgSentDur_.elapsedInMSec();
lastMsgAcceptedTime_ = time::WallClock::fastNowInMilliSec();
} while (false);
if (!checkAppendLogResult(res)) {
LOG(ERROR) << idStr_ << "processAppendLogResponses failed!";
return;
}
// Step 4: Fulfill the promise
if (iter.hasNonAtomicOpLogs()) {
sendingPromise_.setOneSharedValue(AppendLogResult::SUCCEEDED);
}
if (iter.leadByAtomicOp()) {
sendingPromise_.setOneSingleValue(AppendLogResult::SUCCEEDED);
}
// Step 5: Check whether need to continue
// the log replication
{
std::lock_guard<std::mutex> lck(logsLock_);
CHECK(replicatingLogs_);
// Continue to process the original AppendLogsIterator if necessary
iter.resume();
// If no more valid logs to be replicated in iter, create a new one if we have new log
if (iter.empty()) {
VLOG(2) << idStr_ << "logs size " << logs_.size();
if (logs_.size() > 0) {
// continue to replicate the logs
sendingPromise_ = std::move(cachingPromise_);
cachingPromise_.reset();
iter = AppendLogsIterator(
firstLogId,
currTerm,
std::move(logs_),
[this] (AtomicOp op) -> folly::Optional<std::string> {
auto opRet = op();
if (!opRet.hasValue()) {
// Failed
sendingPromise_.setOneSingleValue(
AppendLogResult::E_ATOMIC_OP_FAILURE);
}
return opRet;
});
logs_.clear();
bufferOverFlow_ = false;
}
// Reset replicatingLogs_ one of the following is true:
// 1. old iter is empty && logs_.size() == 0
// 2. old iter is empty && logs_.size() > 0, but all logs in new iter is atomic op,
// and all of them failed, which would make iter is empty again
if (iter.empty()) {
replicatingLogs_ = false;
VLOG(2) << idStr_ << "No more log to be replicated";
return;
}
}
}
this->appendLogsInternal(std::move(iter), currTerm);
} else {
// Not enough hosts accepted the log, re-try
LOG_EVERY_N(WARNING, 100) << idStr_ << "Only " << numSucceeded
<< " hosts succeeded, Need to try again";
replicateLogs(eb,
std::move(iter),
currTerm,
lastLogId,
committedId,
prevLogTerm,
prevLogId);
}
}
bool RaftPart::needToSendHeartbeat() {
std::lock_guard<std::mutex> g(raftLock_);
return status_ == Status::RUNNING &&
role_ == Role::LEADER &&
time::WallClock::fastNowInMilliSec() - lastMsgAcceptedTime_ >=
FLAGS_raft_heartbeat_interval_secs * 1000 * 2 / 5;
}
bool RaftPart::needToStartElection() {
std::lock_guard<std::mutex> g(raftLock_);
if (status_ == Status::RUNNING &&
role_ == Role::FOLLOWER &&
(lastMsgRecvDur_.elapsedInMSec() >= weight_ * FLAGS_raft_heartbeat_interval_secs * 1000 ||
term_ == 0)) {
LOG(INFO) << idStr_ << "Start leader election, reason: lastMsgDur "
<< lastMsgRecvDur_.elapsedInMSec()
<< ", term " << term_;
role_ = Role::CANDIDATE;
leader_ = HostAddr(0, 0);
}
return role_ == Role::CANDIDATE;
}
bool RaftPart::prepareElectionRequest(
cpp2::AskForVoteRequest& req,
std::vector<std::shared_ptr<Host>>& hosts) {
std::lock_guard<std::mutex> g(raftLock_);
// Make sure the partition is running
if (status_ != Status::RUNNING) {
VLOG(2) << idStr_ << "The partition is not running";
return false;
}
// Make sure the role is still CANDIDATE
if (role_ != Role::CANDIDATE) {
VLOG(2) << idStr_ << "A leader has been elected";
return false;
}
// Before start a new election, reset the votedAddr
votedAddr_ = std::make_pair(0, 0);
req.set_space(spaceId_);
req.set_part(partId_);
req.set_candidate_ip(addr_.first);
req.set_candidate_port(addr_.second);
req.set_term(++proposedTerm_); // Bump up the proposed term
req.set_last_log_id(lastLogId_);
req.set_last_log_term(lastLogTerm_);
hosts = followers();
return true;
}
typename RaftPart::Role RaftPart::processElectionResponses(
const RaftPart::ElectionResponses& results,
std::vector<std::shared_ptr<Host>> hosts,
TermID proposedTerm) {
std::lock_guard<std::mutex> g(raftLock_);
if (UNLIKELY(status_ == Status::STOPPED)) {
LOG(INFO) << idStr_
<< "The part has been stopped, skip the request";
return role_;
}
if (UNLIKELY(status_ == Status::STARTING)) {
LOG(INFO) << idStr_ << "The partition is still starting";
return role_;
}
if (UNLIKELY(status_ == Status::WAITING_SNAPSHOT)) {
LOG(INFO) << idStr_ << "The partition is still waitiong snapshot";
return role_;
}
if (role_ != Role::CANDIDATE) {
LOG(INFO) << idStr_ << "Partition's role has changed to "
<< roleStr(role_)
<< " during the election, so discard the results";
return role_;
}
size_t numSucceeded = 0;
for (auto& r : results) {
if (r.second.get_error_code() == cpp2::ErrorCode::SUCCEEDED) {
++numSucceeded;
} else if (r.second.get_error_code() == cpp2::ErrorCode::E_LOG_STALE) {
LOG(INFO) << idStr_ << "My last log id is less than " << hosts[r.first]->address()
<< ", double my election interval.";
uint64_t curWeight = weight_.load();
weight_.store(curWeight * 2);
} else {
LOG(ERROR) << idStr_ << "Receive response about askForVote from "
<< hosts[r.first]->address()
<< ", error code is " << static_cast<int32_t>(r.second.get_error_code());
}
}
CHECK(role_ == Role::CANDIDATE);
if (numSucceeded >= quorum_) {
LOG(INFO) << idStr_
<< "Partition is elected as the new leader for term "
<< proposedTerm;
term_ = proposedTerm;
role_ = Role::LEADER;
}
return role_;
}
bool RaftPart::leaderElection() {
VLOG(2) << idStr_ << "Start leader election...";
using namespace folly; // NOLINT since the fancy overload of | operator
cpp2::AskForVoteRequest voteReq;
decltype(hosts_) hosts;
if (!prepareElectionRequest(voteReq, hosts)) {
// Suppose we have three replicas A(leader), B, C, after A crashed,
// B, C will begin the election. B win, and send hb, C has gap with B
// and need the snapshot from B. Meanwhile C begin the election,
// C will be Candidate, but because C is in WAITING_SNAPSHOT,
// so prepareElectionRequest will return false and go on the election.
// Becasue C is in Candidate, so it will reject the snapshot request from B.
// Infinite loop begins.
// So we neeed to go back to the follower state to avoid the case.
std::lock_guard<std::mutex> g(raftLock_);
role_ = Role::FOLLOWER;
return false;
}
// Send out the AskForVoteRequest
LOG(INFO) << idStr_ << "Sending out an election request "
<< "(space = " << voteReq.get_space()
<< ", part = " << voteReq.get_part()
<< ", term = " << voteReq.get_term()
<< ", lastLogId = " << voteReq.get_last_log_id()
<< ", lastLogTerm = " << voteReq.get_last_log_term()
<< ", candidateIP = "
<< NetworkUtils::intToIPv4(voteReq.get_candidate_ip())
<< ", candidatePort = " << voteReq.get_candidate_port()
<< ")";
auto proposedTerm = voteReq.get_term();
auto resps = ElectionResponses();
if (hosts.empty()) {
VLOG(2) << idStr_ << "No peer found, I will be the leader";
} else {
auto eb = ioThreadPool_->getEventBase();
auto futures = collectNSucceeded(
gen::from(hosts)
| gen::map([eb, self = shared_from_this(), &voteReq] (auto& host) {
VLOG(2) << self->idStr_
<< "Sending AskForVoteRequest to "
<< host->idStr();
return via(
eb,
[&voteReq, &host, eb] ()
-> Future<cpp2::AskForVoteResponse> {
return host->askForVote(voteReq, eb);
});
})
| gen::as<std::vector>(),
// Number of succeeded required
quorum_,
// Result evaluator
[hosts] (size_t idx, cpp2::AskForVoteResponse& resp) {
return resp.get_error_code() == cpp2::ErrorCode::SUCCEEDED
&& !hosts[idx]->isLearner();
});
VLOG(2) << idStr_
<< "AskForVoteRequest has been sent to all peers"
", waiting for responses";
futures.wait();
CHECK(!futures.hasException())
<< "Got exception -- "
<< futures.result().exception().what().toStdString();
VLOG(2) << idStr_ << "Got AskForVote response back";
resps = std::move(futures).get();
}
// Process the responses
switch (processElectionResponses(resps, std::move(hosts), proposedTerm)) {
case Role::LEADER: {
// Elected
LOG(INFO) << idStr_
<< "The partition is elected as the leader";
{
std::lock_guard<std::mutex> g(raftLock_);
if (status_ == Status::RUNNING) {
leader_ = addr_;
for (auto& host : hosts_) {
host->reset();
}
bgWorkers_->addTask([self = shared_from_this(),
term = voteReq.get_term()] {
self->onElected(term);
});
lastMsgAcceptedTime_ = 0;
}
}
weight_ = 1;
sendHeartbeat();
return true;
}
case Role::FOLLOWER: {
// Someone was elected
LOG(INFO) << idStr_ << "Someone else was elected";
return true;
}
case Role::CANDIDATE: {
// No one has been elected
LOG(INFO) << idStr_
<< "No one is elected, continue the election";
return false;
}
case Role::LEARNER: {
LOG(FATAL) << idStr_ << " Impossible! There must be some bugs!";
return false;
}
}
LOG(FATAL) << "Should not reach here";
return false;
}
void RaftPart::statusPolling(int64_t startTime) {
{
std::lock_guard<std::mutex> g(raftLock_);
// If startTime is not same as the time when `statusPolling` is add to event loop,
// it means the part has been restarted (it only happens in ut for now), so don't
// add another `statusPolling`.
if (startTime != startTimeMs_) {
return;
}
}
size_t delay = FLAGS_raft_heartbeat_interval_secs * 1000 / 3;
if (needToStartElection()) {
if (leaderElection()) {
VLOG(2) << idStr_ << "Stop the election";
} else {
// No leader has been elected, need to continue
// (After sleeping a random period betwen [500ms, 2s])
VLOG(2) << idStr_ << "Wait for a while and continue the leader election";
delay = (folly::Random::rand32(1500) + 500) * weight_;
}
} else if (needToSendHeartbeat()) {
VLOG(2) << idStr_ << "Need to send heartbeat";
sendHeartbeat();
}
if (needToCleanupSnapshot()) {
LOG(INFO) << idStr_ << "Clean up the snapshot";
cleanupSnapshot();
}
{
std::lock_guard<std::mutex> g(raftLock_);
if (status_ == Status::RUNNING || status_ == Status::WAITING_SNAPSHOT) {
VLOG(3) << idStr_ << "Schedule new task";
bgWorkers_->addDelayTask(
delay,
[self = shared_from_this(), startTime] {
self->statusPolling(startTime);
});
}
}
}
bool RaftPart::needToCleanupSnapshot() {
std::lock_guard<std::mutex> g(raftLock_);
return status_ == Status::WAITING_SNAPSHOT &&
role_ != Role::LEADER &&
lastSnapshotRecvDur_.elapsedInSec() >= FLAGS_raft_snapshot_timeout;
}
void RaftPart::cleanupSnapshot() {
LOG(INFO) << idStr_ << "Clean up the snapshot";
std::lock_guard<std::mutex> g(raftLock_);
reset();
status_ = Status::RUNNING;
}
bool RaftPart::needToCleanWal() {
std::lock_guard<std::mutex> g(raftLock_);
if (status_ == Status::STARTING || status_ == Status::WAITING_SNAPSHOT) {
return false;
}
for (auto& host : hosts_) {
if (host->sendingSnapshot_) {
return false;
}
}
return true;
}
void RaftPart::processAskForVoteRequest(
const cpp2::AskForVoteRequest& req,
cpp2::AskForVoteResponse& resp) {
LOG(INFO) << idStr_
<< "Recieved a VOTING request"
<< ": space = " << req.get_space()
<< ", partition = " << req.get_part()
<< ", candidateAddr = "
<< NetworkUtils::intToIPv4(req.get_candidate_ip()) << ":"
<< req.get_candidate_port()
<< ", term = " << req.get_term()
<< ", lastLogId = " << req.get_last_log_id()
<< ", lastLogTerm = " << req.get_last_log_term();
std::lock_guard<std::mutex> g(raftLock_);
// Make sure the partition is running
if (UNLIKELY(status_ == Status::STOPPED)) {
LOG(INFO) << idStr_
<< "The part has been stopped, skip the request";
resp.set_error_code(cpp2::ErrorCode::E_BAD_STATE);
return;
}
if (UNLIKELY(status_ == Status::STARTING)) {
LOG(INFO) << idStr_ << "The partition is still starting";
resp.set_error_code(cpp2::ErrorCode::E_NOT_READY);
return;
}
if (UNLIKELY(status_ == Status::WAITING_SNAPSHOT)) {
LOG(INFO) << idStr_ << "The partition is still waiting snapshot";
resp.set_error_code(cpp2::ErrorCode::E_NOT_READY);
return;
}
LOG(INFO) << idStr_ << "The partition currently is a "
<< roleStr(role_) << ", lastLogId " << lastLogId_
<< ", lastLogTerm " << lastLogTerm_
<< ", committedLogId " << committedLogId_
<< ", term " << term_;
if (role_ == Role::LEARNER) {
resp.set_error_code(cpp2::ErrorCode::E_BAD_ROLE);
return;
}
auto candidate = HostAddr(req.get_candidate_ip(), req.get_candidate_port());
// Check term id
auto term = term_;
if (req.get_term() <= term) {
LOG(INFO) << idStr_
<< (role_ == Role::CANDIDATE
? "The partition is currently proposing term "
: "The partition currently is on term ")
<< term
<< ". The term proposed by the candidate is"
" no greater, so it will be rejected";
resp.set_error_code(cpp2::ErrorCode::E_TERM_OUT_OF_DATE);
return;
}
// Check the last term to receive a log
if (req.get_last_log_term() < lastLogTerm_) {
LOG(INFO) << idStr_
<< "The partition's last term to receive a log is "
<< lastLogTerm_
<< ", which is newer than the candidate's log "
<< req.get_last_log_term()
<< ". So the candidate will be rejected";
resp.set_error_code(cpp2::ErrorCode::E_TERM_OUT_OF_DATE);
return;
}
if (req.get_last_log_term() == lastLogTerm_) {
// Check last log id
if (req.get_last_log_id() < lastLogId_) {
LOG(INFO) << idStr_
<< "The partition's last log id is " << lastLogId_
<< ". The candidate's last log id " << req.get_last_log_id()
<< " is smaller, so it will be rejected";
resp.set_error_code(cpp2::ErrorCode::E_LOG_STALE);
return;
}
}
// If we have voted for somebody, we will reject other candidates under the proposedTerm.
if (votedAddr_ != std::make_pair(0, 0) && proposedTerm_ >= req.get_term()) {
LOG(INFO) << idStr_
<< "We have voted " << votedAddr_ << " on term " << proposedTerm_
<< ", so we should reject the candidate " << candidate
<< " request on term " << req.get_term();
resp.set_error_code(cpp2::ErrorCode::E_TERM_OUT_OF_DATE);
return;
}
auto hosts = followers();
auto it = std::find_if(hosts.begin(), hosts.end(), [&candidate] (const auto& h){
return h->address() == candidate;
});
if (it == hosts.end()) {
LOG(INFO) << idStr_ << "The candidate " << candidate << " is not my peers";
resp.set_error_code(cpp2::ErrorCode::E_WRONG_LEADER);
return;
}
// Ok, no reason to refuse, we will vote for the candidate
LOG(INFO) << idStr_ << "The partition will vote for the candidate";
resp.set_error_code(cpp2::ErrorCode::SUCCEEDED);
role_ = Role::FOLLOWER;
votedAddr_ = candidate;
proposedTerm_ = req.get_term();
leader_ = std::make_pair(0, 0);
// Reset the last message time
lastMsgRecvDur_.reset();
weight_ = 1;
return;
}
void RaftPart::processAppendLogRequest(
const cpp2::AppendLogRequest& req,
cpp2::AppendLogResponse& resp) {
if (FLAGS_trace_raft) {
LOG(INFO) << idStr_
<< "Received logAppend "
<< ": GraphSpaceId = " << req.get_space()
<< ", partition = " << req.get_part()
<< ", leaderIp = " << req.get_leader_ip()
<< ", leaderPort = " << req.get_leader_port()
<< ", current_term = " << req.get_current_term()
<< ", lastLogId = " << req.get_last_log_id()
<< ", committedLogId = " << req.get_committed_log_id()
<< ", lastLogIdSent = " << req.get_last_log_id_sent()
<< ", lastLogTermSent = " << req.get_last_log_term_sent()
<< folly::stringPrintf(
", num_logs = %ld, logTerm = %ld",
req.get_log_str_list().size(),
req.get_log_term())
<< ", sendingSnapshot = " << req.get_sending_snapshot()
<< ", local lastLogId = " << lastLogId_
<< ", local lastLogTerm = " << lastLogTerm_
<< ", local committedLogId = " << committedLogId_
<< ", local current term = " << term_;
}
std::lock_guard<std::mutex> g(raftLock_);
resp.set_current_term(term_);
resp.set_leader_ip(leader_.first);
resp.set_leader_port(leader_.second);
resp.set_committed_log_id(committedLogId_);
resp.set_last_log_id(lastLogId_ < committedLogId_ ? committedLogId_ : lastLogId_);
resp.set_last_log_term(lastLogTerm_);
// Check status
if (UNLIKELY(status_ == Status::STOPPED)) {
VLOG(2) << idStr_
<< "The part has been stopped, skip the request";
resp.set_error_code(cpp2::ErrorCode::E_BAD_STATE);
return;
}
if (UNLIKELY(status_ == Status::STARTING)) {
VLOG(2) << idStr_ << "The partition is still starting";
resp.set_error_code(cpp2::ErrorCode::E_NOT_READY);
return;
}
// Check leadership
cpp2::ErrorCode err = verifyLeader(req);
if (err != cpp2::ErrorCode::SUCCEEDED) {
// Wrong leadership
VLOG(2) << idStr_ << "Will not follow the leader";
resp.set_error_code(err);
return;
}
// Reset the timeout timer
lastMsgRecvDur_.reset();
if (req.get_sending_snapshot() && status_ != Status::WAITING_SNAPSHOT) {
LOG(INFO) << idStr_ << "Begin to wait for the snapshot"
<< " " << req.get_committed_log_id();
reset();
status_ = Status::WAITING_SNAPSHOT;
resp.set_error_code(cpp2::ErrorCode::E_WAITING_SNAPSHOT);
return;
}
if (UNLIKELY(status_ == Status::WAITING_SNAPSHOT)) {
VLOG(2) << idStr_
<< "The part is receiving snapshot,"
<< "so just accept the new wals, but don't commit them."
<< "last_log_id_sent " << req.get_last_log_id_sent()
<< ", total log number " << req.get_log_str_list().size();
if (lastLogId_ > 0 && req.get_last_log_id_sent() > lastLogId_) {
// There is a gap
LOG(INFO) << idStr_ << "Local is missing logs from id "
<< lastLogId_ << ". Need to catch up";
resp.set_error_code(cpp2::ErrorCode::E_LOG_GAP);
return;
}
// TODO(heng): if we have 3 node, one is leader, one is wait snapshot and return success,
// the other is follower, but leader replica log to follow failed,
// How to deal with leader crash? At this time, no leader will be elected.
size_t numLogs = req.get_log_str_list().size();
LogID firstId = req.get_last_log_id_sent() + 1;
VLOG(2) << idStr_ << "Writing log [" << firstId
<< ", " << firstId + numLogs - 1 << "] to WAL";
LogStrListIterator iter(firstId,
req.get_log_term(),
req.get_log_str_list());
if (wal_->appendLogs(iter)) {
// When leader has been sending a snapshot already, sometimes it would send a request
// with empty log list, and lastLogId in wal may be 0 because of reset.
if (numLogs != 0) {
CHECK_EQ(firstId + numLogs - 1, wal_->lastLogId());
}
lastLogId_ = wal_->lastLogId();
lastLogTerm_ = wal_->lastLogTerm();
resp.set_last_log_id(lastLogId_);
resp.set_last_log_term(lastLogTerm_);
resp.set_error_code(cpp2::ErrorCode::SUCCEEDED);
} else {
LOG(ERROR) << idStr_ << "Failed to append logs to WAL";
resp.set_error_code(cpp2::ErrorCode::E_WAL_FAIL);
}
return;
}
if (req.get_last_log_id_sent() < committedLogId_ && req.get_last_log_term_sent() <= term_) {
LOG(INFO) << idStr_ << "Stale log! The log " << req.get_last_log_id_sent()
<< ", term " << req.get_last_log_term_sent()
<< " i had committed yet. My committedLogId is "
<< committedLogId_ << ", term is " << term_;
resp.set_error_code(cpp2::ErrorCode::E_LOG_STALE);
return;
} else if (req.get_last_log_id_sent() < committedLogId_) {
LOG(INFO) << idStr_ << "What?? How it happens! The log id is "
<< req.get_last_log_id_sent()
<< ", the log term is " << req.get_last_log_term_sent()
<< ", but my committedLogId is " << committedLogId_
<< ", my term is " << term_
<< ", to make the cluster stable i will follow the high term"
<< " candidate and clenaup my data";
reset();
resp.set_committed_log_id(committedLogId_);
resp.set_last_log_id(lastLogId_);
resp.set_last_log_term(lastLogTerm_);
}
// req.get_last_log_id_sent() >= committedLogId_
if (lastLogTerm_ > 0 && req.get_last_log_term_sent() != lastLogTerm_) {
LOG(INFO) << idStr_ << "The local last log term is " << lastLogTerm_
<< ", which is different from the leader's prevLogTerm "
<< req.get_last_log_term_sent()
<< ", the prevLogId is " << req.get_last_log_id_sent()
<< ". So need to rollback to last committedLogId_ " << committedLogId_;
if (wal_->rollbackToLog(committedLogId_)) {
lastLogId_ = wal_->lastLogId();
lastLogTerm_ = wal_->lastLogTerm();
resp.set_last_log_id(lastLogId_);
resp.set_last_log_term(lastLogTerm_);
LOG(INFO) << idStr_ << "Rollback succeeded! lastLogId is " << lastLogId_
<< ", logLogTerm is " << lastLogTerm_
<< ", committedLogId is " << committedLogId_
<< ", term is " << term_;
}
resp.set_error_code(cpp2::ErrorCode::E_LOG_GAP);
return;
} else if (req.get_last_log_id_sent() > lastLogId_) {
// There is a gap
LOG(INFO) << idStr_ << "Local is missing logs from id "
<< lastLogId_ << ". Need to catch up";
resp.set_error_code(cpp2::ErrorCode::E_LOG_GAP);
return;
} else if (req.get_last_log_id_sent() < lastLogId_) {
LOG(INFO) << idStr_ << "Stale log! Local lastLogId " << lastLogId_
<< ", lastLogTerm " << lastLogTerm_
<< ", lastLogIdSent " << req.get_last_log_id_sent()
<< ", lastLogTermSent " << req.get_last_log_term_sent();
resp.set_error_code(cpp2::ErrorCode::E_LOG_STALE);
return;
}
// Append new logs
size_t numLogs = req.get_log_str_list().size();
LogID firstId = req.get_last_log_id_sent() + 1;
VLOG(2) << idStr_ << "Writing log [" << firstId
<< ", " << firstId + numLogs - 1 << "] to WAL";
LogStrListIterator iter(firstId,
req.get_log_term(),
req.get_log_str_list());
if (wal_->appendLogs(iter)) {
CHECK_EQ(firstId + numLogs - 1, wal_->lastLogId());
lastLogId_ = wal_->lastLogId();
lastLogTerm_ = wal_->lastLogTerm();
resp.set_last_log_id(lastLogId_);
resp.set_last_log_term(lastLogTerm_);
} else {
LOG(ERROR) << idStr_ << "Failed to append logs to WAL";
resp.set_error_code(cpp2::ErrorCode::E_WAL_FAIL);
return;
}
if (req.get_committed_log_id() > committedLogId_) {
// Commit some logs
// We can only commit logs from firstId to min(lastLogId_, leader's commit log id),
// follower can't always commit to leader's commit id because of lack of log
LogID lastLogIdCanCommit = std::min(lastLogId_, req.get_committed_log_id());
CHECK(committedLogId_ + 1 <= lastLogIdCanCommit);
if (commitLogs(wal_->iterator(committedLogId_ + 1, lastLogIdCanCommit))) {
VLOG(1) << idStr_ << "Follower succeeded committing log "
<< committedLogId_ + 1 << " to "
<< lastLogIdCanCommit;
committedLogId_ = lastLogIdCanCommit;
resp.set_committed_log_id(lastLogIdCanCommit);
} else {
LOG(ERROR) << idStr_ << "Failed to commit log "
<< committedLogId_ + 1 << " to "
<< req.get_committed_log_id();
resp.set_error_code(cpp2::ErrorCode::E_WAL_FAIL);
return;
}
}
resp.set_error_code(cpp2::ErrorCode::SUCCEEDED);
}
cpp2::ErrorCode RaftPart::verifyLeader(
const cpp2::AppendLogRequest& req) {
CHECK(!raftLock_.try_lock());
auto candidate = HostAddr(req.get_leader_ip(), req.get_leader_port());
auto hosts = followers();
auto it = std::find_if(hosts.begin(), hosts.end(), [&candidate] (const auto& h){
return h->address() == candidate;
});
if (it == hosts.end()) {
LOG(INFO) << idStr_ << "The candidate leader " << candidate << " is not my peers";
return cpp2::ErrorCode::E_WRONG_LEADER;
}
VLOG(2) << idStr_ << "The current role is " << roleStr(role_);
// Make sure the remote term is greater than local's
if (req.get_current_term() < term_) {
LOG_EVERY_N(INFO, 100) << idStr_
<< "The current role is " << roleStr(role_)
<< ". The local term is " << term_
<< ". The remote term is not newer";
return cpp2::ErrorCode::E_TERM_OUT_OF_DATE;
} else if (req.get_current_term() > term_) {
// Leader stickness, no matter the term in Request is larger or not.
// TODO(heng) Maybe we should reconsider the logic
if (leader_ != std::make_pair(0, 0) && leader_ != candidate
&& lastMsgRecvDur_.elapsedInMSec()
< FLAGS_raft_heartbeat_interval_secs * 1000) {
LOG_EVERY_N(INFO, 100) << idStr_ << "I believe the leader " << leader_ << " exists. "
<< "Refuse to append logs of " << candidate;
return cpp2::ErrorCode::E_WRONG_LEADER;
}
} else {
// req.get_current_term() == term_
do {
if (role_ != Role::LEADER
&& leader_ == std::make_pair(0, 0)) {
LOG_EVERY_N(INFO, 100) << idStr_
<< "I dont know who is leader for current term " << term_
<< ", so accept the candidate " << candidate;
break;
}
// Same leader
if (role_ != Role::LEADER
&& candidate == leader_) {
return cpp2::ErrorCode::SUCCEEDED;
} else {
LOG_EVERY_N(INFO, 100) << idStr_
<< "The local term is same as remote term " << term_
<< ", my role is " << roleStr(role_) << ", reject it!";
return cpp2::ErrorCode::E_TERM_OUT_OF_DATE;
}
} while (false);
}
// Update my state.
Role oldRole = role_;
TermID oldTerm = term_;
// Ok, no reason to refuse, just follow the leader
LOG(INFO) << idStr_ << "The current role is " << roleStr(role_)
<< ". Will follow the new leader "
<< network::NetworkUtils::intToIPv4(req.get_leader_ip())
<< ":" << req.get_leader_port()
<< " [Term: " << req.get_current_term() << "]";
if (role_ != Role::LEARNER) {
role_ = Role::FOLLOWER;
}
leader_ = candidate;
term_ = proposedTerm_ = req.get_current_term();
votedAddr_ = std::make_pair(0, 0);
weight_ = 1;
// Before accept the logs from the new leader, check the logs locally.
if (wal_->lastLogId() > lastLogId_) {
LOG(INFO) << idStr_ << "There is one log " << wal_->lastLogId()
<< " i did not commit when i was leader, rollback to " << lastLogId_;
wal_->rollbackToLog(lastLogId_);
}
if (oldRole == Role::LEADER) {
// Need to invoke onLostLeadership callback
bgWorkers_->addTask([self = shared_from_this(), oldTerm] {
self->onLostLeadership(oldTerm);
});
}
bgWorkers_->addTask([self = shared_from_this()] {
self->onDiscoverNewLeader(self->leader_);
});
return cpp2::ErrorCode::SUCCEEDED;
}
void RaftPart::processSendSnapshotRequest(const cpp2::SendSnapshotRequest& req,
cpp2::SendSnapshotResponse& resp) {
VLOG(1) << idStr_ << "Receive snapshot, total rows " << req.get_rows().size()
<< ", total count received " << req.get_total_count()
<< ", total size received " << req.get_total_size()
<< ", finished " << req.get_done();
std::lock_guard<std::mutex> g(raftLock_);
// Check status
if (UNLIKELY(status_ == Status::STOPPED)) {
LOG(ERROR) << idStr_
<< "The part has been stopped, skip the request";
resp.set_error_code(cpp2::ErrorCode::E_BAD_STATE);
return;
}
if (UNLIKELY(status_ == Status::STARTING)) {
LOG(ERROR) << idStr_ << "The partition is still starting";
resp.set_error_code(cpp2::ErrorCode::E_NOT_READY);
return;
}
if (UNLIKELY(role_ != Role::FOLLOWER && role_ != Role::LEARNER)) {
LOG(ERROR) << idStr_ << "Bad role " << roleStr(role_);
resp.set_error_code(cpp2::ErrorCode::E_BAD_STATE);
return;
}
if (UNLIKELY(leader_ != HostAddr(req.get_leader_ip(), req.get_leader_port())
|| term_ != req.get_term())) {
LOG(ERROR) << idStr_ << "Term out of date, current term " << term_
<< ", received term " << req.get_term();
resp.set_error_code(cpp2::ErrorCode::E_TERM_OUT_OF_DATE);
return;
}
if (status_ != Status::WAITING_SNAPSHOT) {
LOG(INFO) << idStr_ << "Begin to receive the snapshot";
reset();
status_ = Status::WAITING_SNAPSHOT;
}
lastSnapshotRecvDur_.reset();
// TODO(heng): Maybe we should save them into one sst firstly?
auto ret = commitSnapshot(req.get_rows(),
req.get_committed_log_id(),
req.get_committed_log_term(),
req.get_done());
lastTotalCount_ += ret.first;
lastTotalSize_ += ret.second;
if (lastTotalCount_ != req.get_total_count()
|| lastTotalSize_ != req.get_total_size()) {
LOG(ERROR) << idStr_ << "Bad snapshot, total rows received " << lastTotalCount_
<< ", total rows sended " << req.get_total_count()
<< ", total size received " << lastTotalSize_
<< ", total size sended " << req.get_total_size();
resp.set_error_code(cpp2::ErrorCode::E_PERSIST_SNAPSHOT_FAILED);
return;
}
if (req.get_done()) {
committedLogId_ = req.get_committed_log_id();
if (lastLogId_ < committedLogId_) {
lastLogId_ = committedLogId_;
lastLogTerm_ = req.get_committed_log_term();
}
if (wal_->lastLogId() <= committedLogId_) {
LOG(INFO) << "Reset invalid wal after snapshot received";
wal_->reset();
}
status_ = Status::RUNNING;
LOG(INFO) << idStr_ << "Receive all snapshot, committedLogId_ " << committedLogId_
<< ", lastLodId " << lastLogId_ << ", lastLogTermId " << lastLogTerm_;
}
resp.set_error_code(cpp2::ErrorCode::SUCCEEDED);
return;
}
folly::Future<AppendLogResult> RaftPart::sendHeartbeat() {
VLOG(2) << idStr_ << "Send heartbeat";
std::string log = "";
return appendLogAsync(clusterId_, LogType::NORMAL, std::move(log));
}
std::vector<std::shared_ptr<Host>> RaftPart::followers() const {
CHECK(!raftLock_.try_lock());
decltype(hosts_) hosts;
for (auto& h : hosts_) {
if (!h->isLearner()) {
hosts.emplace_back(h);
}
}
return hosts;
}
bool RaftPart::checkAppendLogResult(AppendLogResult res) {
if (res != AppendLogResult::SUCCEEDED) {
{
std::lock_guard<std::mutex> lck(logsLock_);
logs_.clear();
cachingPromise_.setValue(res);
cachingPromise_.reset();
bufferOverFlow_ = false;
}
sendingPromise_.setValue(res);
replicatingLogs_ = false;
return false;;
}
return true;
}
void RaftPart::reset() {
CHECK(!raftLock_.try_lock());
wal_->reset();
cleanup();
lastLogId_ = committedLogId_ = 0;
lastLogTerm_ = 0;
lastTotalCount_ = 0;
lastTotalSize_ = 0;
}
AppendLogResult RaftPart::isCatchedUp(const HostAddr& peer) {
std::lock_guard<std::mutex> lck(raftLock_);
LOG(INFO) << idStr_ << "Check whether I catch up";
if (role_ != Role::LEADER) {
LOG(INFO) << idStr_ << "I am not the leader";
return AppendLogResult::E_NOT_A_LEADER;
}
if (peer == addr_) {
LOG(INFO) << idStr_ << "I am the leader";
return AppendLogResult::SUCCEEDED;
}
for (auto& host : hosts_) {
if (host->addr_ == peer) {
if (host->followerCommittedLogId_ == 0
|| host->followerCommittedLogId_ < wal_->firstLogId()) {
LOG(INFO) << idStr_ << "The committed log id of peer is "
<< host->followerCommittedLogId_
<< ", which is invalid or less than my first wal log id";
return AppendLogResult::E_SENDING_SNAPSHOT;
}
return host->sendingSnapshot_ ? AppendLogResult::E_SENDING_SNAPSHOT
: AppendLogResult::SUCCEEDED;
}
}
return AppendLogResult::E_INVALID_PEER;
}
bool RaftPart::linkCurrentWAL(const char* newPath) {
CHECK_NOTNULL(newPath);
std::lock_guard<std::mutex> g(raftLock_);
return wal_->linkCurrentWAL(newPath);
}
void RaftPart::checkAndResetPeers(const std::vector<HostAddr>& peers) {
std::lock_guard<std::mutex> lck(raftLock_);
// To avoid the iterator invalid, we use another container for it.
decltype(hosts_) hosts = hosts_;
for (auto& h : hosts) {
LOG(INFO) << idStr_ << "Check host " << h->addr_;
auto it = std::find(peers.begin(), peers.end(), h->addr_);
if (it == peers.end()) {
LOG(INFO) << idStr_ << "The peer " << h->addr_ << " should not exist in my peers";
removePeer(h->addr_);
}
}
for (auto& p : peers) {
LOG(INFO) << idStr_ << "Add peer " << p << " if not exist!";
addPeer(p);
}
}
bool RaftPart::leaseValid() {
std::lock_guard<std::mutex> g(raftLock_);
if (hosts_.empty()) {
return true;
}
// When majority has accepted a log, leader obtains a lease which last for heartbeat.
// However, we need to take off the net io time. On the left side of the inequality is
// the time duration since last time leader send a log (the log has been accepted as well)
return time::WallClock::fastNowInMilliSec() - lastMsgAcceptedTime_
< FLAGS_raft_heartbeat_interval_secs * 1000 - lastMsgAcceptedCostMs_;
}
} // namespace raftex
} // namespace nebula
| 1 | 29,876 | "break" is right? You skip the line 768. | vesoft-inc-nebula | cpp |
@@ -0,0 +1,19 @@
+# RailsAdmin config file. Generated on May 30, 2012 16:34
+# See github.com/sferik/rails_admin for more informations
+
+RailsAdmin.config do |config|
+ config.authenticate_with do
+ unless current_user
+ session[:return_to] = request.url
+ redirect_to "/sign_in", :alert => "You must first log in or sign up before accessing this page."
+ end
+ end
+
+ config.authorize_with do
+ redirect_to "/", :alert => "You are not authorized to access that page" unless current_user.admin?
+ end
+
+ config.current_user_method { current_user }
+
+ config.main_app_name = ['Workshops', 'Admin']
+end | 1 | 1 | 6,330 | Can this use the route helper instead? | thoughtbot-upcase | rb |
|
@@ -44,6 +44,7 @@ var Set = wire.NewSet(
// The zero value is a server with the default options.
type Server struct {
reqlog requestlog.Logger
+ Handler http.Handler
healthHandler health.Handler
te trace.Exporter
sampler trace.Sampler | 1 | // Copyright 2018 The Go Cloud Development Kit Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// https://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package server provides a preconfigured HTTP server with diagnostic hooks.
package server // import "gocloud.dev/server"
import (
"context"
"net/http"
"path"
"sync"
"time"
"github.com/google/wire"
"gocloud.dev/health"
"gocloud.dev/requestlog"
"gocloud.dev/server/driver"
"go.opencensus.io/trace"
)
// Set is a Wire provider set that produces a *Server given the fields of
// Options. This set might add new inputs over time, but they can always be the
// zero value.
var Set = wire.NewSet(
New,
Options{},
wire.Value(&DefaultDriver{}),
wire.Bind(new(driver.Server), new(DefaultDriver)),
)
// Server is a preconfigured HTTP server with diagnostic hooks.
// The zero value is a server with the default options.
type Server struct {
reqlog requestlog.Logger
healthHandler health.Handler
te trace.Exporter
sampler trace.Sampler
once sync.Once
driver driver.Server
}
// Options is the set of optional parameters.
type Options struct {
// RequestLogger specifies the logger that will be used to log requests.
RequestLogger requestlog.Logger
// HealthChecks specifies the health checks to be run when the
// /healthz/readiness endpoint is requested.
HealthChecks []health.Checker
// TraceExporter exports sampled trace spans.
TraceExporter trace.Exporter
// DefaultSamplingPolicy is a function that takes a
// trace.SamplingParameters struct and returns a true or false decision about
// whether it should be sampled and exported.
DefaultSamplingPolicy trace.Sampler
// Driver serves HTTP requests.
Driver driver.Server
}
// New creates a new server. New(nil) is the same as new(Server).
func New(opts *Options) *Server {
srv := new(Server)
if opts != nil {
srv.reqlog = opts.RequestLogger
srv.te = opts.TraceExporter
for _, c := range opts.HealthChecks {
srv.healthHandler.Add(c)
}
srv.sampler = opts.DefaultSamplingPolicy
srv.driver = opts.Driver
}
return srv
}
func (srv *Server) init() {
srv.once.Do(func() {
if srv.te != nil {
trace.RegisterExporter(srv.te)
}
if srv.sampler != nil {
trace.ApplyConfig(trace.Config{DefaultSampler: srv.sampler})
}
if srv.driver == nil {
srv.driver = NewDefaultDriver()
}
})
}
// ListenAndServe is a wrapper to use wherever http.ListenAndServe is used.
// It wraps the passed-in http.Handler with a handler that handles tracing and
// request logging. If the handler is nil, then http.DefaultServeMux will be used.
// A configured Requestlogger will log all requests except HealthChecks.
func (srv *Server) ListenAndServe(addr string, h http.Handler) error {
srv.init()
// Setup health checks, /healthz route is taken by health checks by default.
// Note: App Engine Flex uses /_ah/health by default, which can be changed
// in app.yaml. We may want to do an auto-detection for flex in future.
hr := "/healthz/"
hcMux := http.NewServeMux()
hcMux.HandleFunc(path.Join(hr, "liveness"), health.HandleLive)
hcMux.Handle(path.Join(hr, "readiness"), &srv.healthHandler)
mux := http.NewServeMux()
mux.Handle(hr, hcMux)
h = http.Handler(handler{h})
if srv.reqlog != nil {
h = requestlog.NewHandler(srv.reqlog, h)
}
mux.Handle("/", h)
return srv.driver.ListenAndServe(addr, mux)
}
// Shutdown gracefully shuts down the server without interrupting any active connections.
func (srv *Server) Shutdown(ctx context.Context) error {
if srv.driver == nil {
return nil
}
return srv.driver.Shutdown(ctx)
}
// handler is a handler wrapper that handles tracing through OpenCensus for users.
// TODO(shantuo): unify handler types from trace, requestlog, health checks, etc together.
type handler struct {
handler http.Handler
}
func (h handler) ServeHTTP(w http.ResponseWriter, r *http.Request) {
ctx, span := trace.StartSpan(r.Context(), r.URL.Host+r.URL.Path)
defer span.End()
r = r.WithContext(ctx)
if h.handler == nil {
h.handler = http.DefaultServeMux
}
h.handler.ServeHTTP(w, r)
}
// DefaultDriver implements the driver.Server interface. The zero value is a valid http.Server.
type DefaultDriver struct {
Server http.Server
}
// NewDefaultDriver creates a driver with an http.Server with default timeouts.
func NewDefaultDriver() *DefaultDriver {
return &DefaultDriver{
Server: http.Server{
ReadTimeout: 30 * time.Second,
WriteTimeout: 30 * time.Second,
IdleTimeout: 120 * time.Second,
},
}
}
// ListenAndServe sets the address and handler on DefaultDriver's http.Server,
// then calls ListenAndServe on it.
func (dd *DefaultDriver) ListenAndServe(addr string, h http.Handler) error {
dd.Server.Addr = addr
dd.Server.Handler = h
return dd.Server.ListenAndServe()
}
// Shutdown gracefully shuts down the server without interrupting any active connections,
// by calling Shutdown on DefaultDriver's http.Server
func (dd *DefaultDriver) Shutdown(ctx context.Context) error {
return dd.Server.Shutdown(ctx)
}
| 1 | 16,459 | Why is this field exported and others aren't? | google-go-cloud | go |
@@ -273,6 +273,7 @@ var testFiles = [
, '/test/functional/byo_promises_tests.js'
// Functionality tests
+ , '/test/functional/dns_txt_records_tests.js'
, '/test/functional/mongo_client_tests.js'
, '/test/functional/collection_tests.js'
, '/test/functional/db_tests.js' | 1 | "use strict";
var Runner = require('integra').Runner
, Cover = require('integra').Cover
, RCover = require('integra').RCover
, f = require('util').format
, path = require('path')
, NodeVersionFilter = require('./filters/node_version_filter')
, MongoDBVersionFilter = require('./filters/mongodb_version_filter')
, MongoDBTopologyFilter = require('./filters/mongodb_topology_filter')
, ES6PromisesSupportedFilter = require('./filters/es6_promises_supported_filter')
, ES6GeneratorsSupportedFilter = require('./filters/es6_generators_supported_filter')
, OSFilter = require('./filters/os_filter')
, TravisFilter = require('./filters/travis_filter')
, FileFilter = require('integra').FileFilter
, TestNameFilter = require('integra').TestNameFilter
, semver = require('semver');
var detector = require('gleak')();
var smokePlugin = require('./smoke_plugin.js');
var argv = require('optimist')
.usage('Usage: $0 -t [target] -e [environment] -n [name] -f [filename] -r [smoke report file]')
.demand(['t'])
.argv;
// MongoDB Topology Manager
var ServerManager = require('mongodb-topology-manager').Server,
ReplSetManager = require('mongodb-topology-manager').ReplSet,
ShardingManager = require('./topology_test_definitions.js').Sharded;
// Skipping parameters
var startupOptions = {
skipStartup: true
, skipRestart: true
, skipShutdown: true
, skip: false
}
// Skipping parameters
var startupOptions = {
skipStartup: false
, skipRestart: false
, skipShutdown: false
, skip: false
}
// Skipping parameters
if(argv.s) {
var startupOptions = {
skipStartup: true
, skipRestart: true
, skipShutdown: true
, skip: false
}
}
/**
* Standalone MongoDB Configuration
*/
var f = require('util').format;
var mongo = require('..');
var Logger = mongo.Logger;
var clone = function(obj) {
var copy = {};
for(var name in obj) copy[name] = obj[name];
return copy;
}
var Configuration = function(options) {
options = options || {};
var host = options.host || 'localhost';
var port = options.port || 27017;
var db = options.db || 'integration_tests';
var url = options.url || "mongodb://%slocalhost:27017/" + db;
var manager = options.manager;
var skipStart = typeof options.skipStart == 'boolean' ? options.skipStart : false;
var skipTermination = typeof options.skipTermination == 'boolean' ? options.skipTermination : false;
var setName = options.setName || 'rs';
var replicasetName = options.replicasetName || 'rs';
// Write concerns
var writeConcern = options.writeConcern || {w:1};
var writeConcernMax = options.writeConcernMax || {w:1};
// Default function
var defaultFunction = function(host, port, options) {
return new mongo.Server(host, port, options || {});
};
// Create a topology function
var topology = options.topology || defaultFunction;
return function(context) {
return {
start: function(callback) {
var self = this;
if(skipStart) return callback();
manager.discover().then(function(result) {
// Create string representation
var currentVersion = result.version.join('.');
console.log("==== Running against MongodDB " + currentVersion);
// If we have a ReplSetManager and the version is >= 3.4.0
if(semver.satisfies(currentVersion, ">=3.4.0")) {
if(manager instanceof ReplSetManager) {
manager.managers = manager.managers.map(function(manager) {
manager.options.enableMajorityReadConcern = null;
return manager;
});
}
}
// Purge the database
manager.purge().then(function() {
console.log("[purge the directories]");
var Logger = require('mongodb-topology-manager').Logger;
return manager.start().then(function() {
console.log("[started the topology]");
var Logger = require('mongodb-topology-manager').Logger;
// Logger.setLevel('info');
// Create an instance
new mongo.Db(self.db, topology(host, port)).open(function(err, db) {
if(err) return callback(err);
db.dropDatabase(function(err) {
db.close();
callback();
});
});
}).catch(function(err) {
console.log(err.stack);
});
}).catch(function(err) {
console.log(err.stack);
});
}).catch(function(err) {
console.log(err.stack);
});
},
stop: function(callback) {
if(skipTermination) return callback();
// Stop the servers
manager.stop(9).then(function() {
callback();
});
},
restart: function(options, callback) {
if(typeof options == 'function') callback = options, options = {purge:true, kill:true};
if(skipTermination) return callback();
// Stop the servers
manager.restart().then(function() {
callback();
});
},
setup: function(callback) {
callback();
},
teardown: function(callback) {
callback();
},
newDbInstance: function(dbOptions, serverOptions) {
serverOptions = serverOptions || {};
// Override implementation
if(options.newDbInstance) {
return options.newDbInstance(dbOptions, serverOptions);
}
// Set up the options
var keys = Object.keys(options);
if(keys.indexOf('sslOnNormalPorts') != -1) serverOptions.ssl = true;
// Fall back
var port = serverOptions && serverOptions.port || options.port || 27017;
var host = serverOptions && serverOptions.host || 'localhost';
// Default topology
var topology = mongo.Server;
// If we have a specific topology
if(options.topology) {
topology = options.topology;
}
// Return a new db instance
return new mongo.Db(db, new topology(host, port, serverOptions), dbOptions);
},
newDbInstanceWithDomainSocket: function(dbOptions, serverOptions) {
// Override implementation
if(options.newDbInstanceWithDomainSocket) return options.newDbInstanceWithDomainSocket(dbOptions, serverOptions);
// Default topology
var topology = mongo.Server;
// If we have a specific topology
if(options.topology) {
topology = options.topology;
}
// Fall back
var host = serverOptions && serverOptions.host || "/tmp/mongodb-27017.sock";
// Set up the options
var keys = Object.keys(options);
if(keys.indexOf('sslOnNormalPorts') != -1) serverOptions.ssl = true;
// If we explicitly testing undefined port behavior
if(serverOptions && serverOptions.port == 'undefined') {
return new mongo.Db(db, topology(host, undefined, serverOptions), dbOptions);
}
// Normal socket connection
return new mongo.Db(db, topology(host, serverOptions), dbOptions);
},
url: function(username, password) {
// Fall back
var auth = "";
if(username && password) {
auth = f("%s:%s@", username, password);
}
return f(url, auth);
},
// Additional parameters needed
database: db || options.db,
require: mongo,
port: port,
host: host,
setName: setName,
db: db,
manager: manager,
replicasetName: replicasetName,
writeConcern: function() { return clone(writeConcern) },
writeConcernMax: function() { return clone(writeConcernMax) }
}
}
}
// Set up the runner
var runner = new Runner({
logLevel:'error'
// logLevel:'info'
, runners: 1
, failFast: true
});
var testFiles = [
// Logging tests
'/test/functional/logger_tests.js'
// APM tests
, '/test/functional/apm_tests.js'
// Connection spec tests
, '/test/functional/connection_string_spec_tests.js'
// Replicaset read concern (make sure no illegal state due to teardown tests)
, '/test/functional/readconcern_tests.js'
// Promise tests
, '/test/functional/promises_db_tests.js'
, '/test/functional/promises_collection_tests.js'
, '/test/functional/promises_cursor_tests.js'
, '/test/functional/operation_promises_example_tests.js'
, '/test/functional/byo_promises_tests.js'
// Functionality tests
, '/test/functional/mongo_client_tests.js'
, '/test/functional/collection_tests.js'
, '/test/functional/db_tests.js'
, '/test/functional/cursor_tests.js'
, '/test/functional/insert_tests.js'
, '/test/functional/aggregation_tests.js'
, '/test/functional/connection_tests.js'
, '/test/functional/cursorstream_tests.js'
, '/test/functional/custom_pk_tests.js'
, '/test/functional/error_tests.js'
, '/test/functional/find_tests.js'
, '/test/functional/index_tests.js'
, '/test/functional/mapreduce_tests.js'
, '/test/functional/maxtimems_tests.js'
, '/test/functional/multiple_db_tests.js'
, '/test/functional/object_id_tests.js'
, '/test/functional/raw_tests.js'
, '/test/functional/readpreference_tests.js'
, '/test/functional/remove_tests.js'
, '/test/functional/unicode_tests.js'
, '/test/functional/uri_tests.js'
, '/test/functional/url_parser_tests.js'
, '/test/functional/gridfs_tests.js'
, '/test/functional/bulk_tests.js'
, '/test/functional/operation_example_tests.js'
, '/test/functional/crud_api_tests.js'
, '/test/functional/document_validation_tests.js'
, '/test/functional/ignore_undefined_tests.js'
, '/test/functional/mongo_client_options_tests.js'
, '/test/functional/decimal128_tests.js'
, '/test/functional/find_and_modify_tests.js'
, '/test/functional/hang_tests.js',
, '/test/functional/disconnect_handler_tests.js',
, '/test/functional/promote_values_tests.js',
, '/test/functional/promote_buffers_tests.js',
// Replicaset tests
, '/test/functional/replset_read_preference_tests.js'
, '/test/functional/replset_operations_tests.js'
, '/test/functional/replset_failover_tests.js'
, '/test/functional/replset_connection_tests.js'
, '/test/functional/sdam_tests.js'
// Sharding tests
, '/test/functional/sharding_failover_tests.js'
, '/test/functional/sharding_connection_tests.js'
, '/test/functional/sharding_read_preference_tests.js'
// SSL tests
, '/test/functional/ssl_mongoclient_tests.js'
, '/test/functional/ssl_validation_tests.js'
, '/test/functional/ssl_x509_connect_tests.js'
, '/test/functional/sni_tests.js'
// SCRAM tests
, '/test/functional/scram_tests.js'
// LDAP Tests
, '/test/functional/ldap_tests.js'
// Kerberos Tests
, '/test/functional/kerberos_tests.js'
// Authentication Tests
, '/test/functional/authentication_tests.js'
// GridFS
, '/test/functional/gridfs_stream_tests.js'
// Reconnection tests
, '/test/functional/reconnect_tests.js',
// Bug tests
, '/test/functional/jira_bug_tests.js',
// Domain tests
, '/test/functional/domain_tests.js'
]
// Check if we support es6 generators
try {
eval("(function *(){})");
// Generator tests
testFiles.push('/test/functional/operation_generators_example_tests.js');
// Mock tests
testFiles.push('/test/functional/command_write_concern_tests.js');
testFiles.push('/test/functional/collations_tests.js');
testFiles.push('/test/functional/max_staleness_tests.js');
// testFiles.push('/test/functional/buffering_proxy_tests.js');
testFiles.push('/test/functional/view_tests.js');
testFiles.push('/test/functional/crud_spec_tests.js');
testFiles.push('/test/functional/generator_based_tests.js');
testFiles.push('/test/functional/replicaset_mock_tests.js');
// Test examples
testFiles.push('/test/functional/examples_tests.js');
} catch(err) {}
// Add all the tests to run
testFiles.forEach(function(t) {
if(t != "") runner.add(t);
});
// // Add the Coverage plugin
// runner.plugin(new Cover({
// logLevel: "info"
// , filters: [
// /_tests.js/
// , "js-bson"
// , "/tests/"
// , "/tools/"
// ]
// }));
// // Add the RCoverage plugin
// runner.plugin(new RCover({
// logLevel: "info"
// , filters: [
// /_tests.js/
// , "js-bson"
// , "/tests/"
// , "/tools/"
// ]
// }));
// Filter by OS
runner.plugin(new OSFilter());
// Add a Node version plugin
runner.plugin(new NodeVersionFilter(startupOptions));
// Add a MongoDB version plugin
runner.plugin(new MongoDBVersionFilter(startupOptions));
// Add a Topology filter plugin
runner.plugin(new MongoDBTopologyFilter(startupOptions));
// Add a Filter allowing us to specify that a function requires Promises
runner.plugin(new ES6PromisesSupportedFilter());
// Add a Filter allowing us to validate if generators are available
runner.plugin(new ES6GeneratorsSupportedFilter());
// Exit when done
runner.on('exit', function(errors, results) {
process.exit(0)
});
// Set Logger level for driver
// Logger.setLevel('info');
Logger.setLevel('error');
// Logger.setLevel('debug');
// Logger.filter('class', ['ReplSet', 'Server', 'Connection']);
// Logger.filter('class', ['ReplSet', 'Server', 'Pool', 'Connection']);
// Logger.filter('class', ['ReplSet', 'Server', 'Cursor']);
//Logger.filter('class', ['Mongos', 'Server']);
//Logger.filter('class', ['Mongos', 'Server']);
// Logger.filter('class', ['Mongos']);
// Logger.filter('class', ['ReplSet']);
// We want to export a smoke.py style json file
if(argv.r) {
console.log("Writing smoke output to " + argv.r);
smokePlugin.attachToRunner(runner, argv.r);
}
// Are we running a functional test
if(argv.t == 'functional') {
// Contain the config
var config = null;
//
// Execute the final code
var executeTestSuite = function(_config) {
// If we have a test we are filtering by
if(argv.f) {
runner.plugin(new FileFilter(argv.f));
}
if(argv.n) {
runner.plugin(new TestNameFilter(argv.n));
}
// Add travis filter
runner.plugin(new TravisFilter());
// Skip startup
if(startupOptions.skipStartup) {
return runner.run(Configuration(_config));
}
// Skip the version download and use local mongod in PATH
if(argv.l) {
return runner.run(Configuration(_config));
}
// Get the mongodb-version-manager
var m = require('mongodb-version-manager');
// Kill any running MongoDB processes and
// `install $MONGODB_VERSION` || `use existing installation` || `install stable`
m(function(err){
if(err) return console.error(err) && process.exit(1);
m.current(function(err, version){
if(err) return console.error(err) && process.exit(1);
console.log('Running tests against MongoDB version `%s`', version);
// Run the configuration
runner.run(Configuration(_config));
});
});
}
//
// Replicaset configuration
if(argv.e == 'replicaset') {
// Establish the server version
new ServerManager('mongod').discover().then(function(r) {
// The individual nodes
var nodes = [{
tags: {loc: 'ny'},
// mongod process options
options: {
bind_ip: 'localhost',
port: 31000,
dbpath: f('%s/../db/31000', __dirname),
setParameter: ['enableTestCommands=1']
}
}, {
tags: {loc: 'sf'},
options: {
bind_ip: 'localhost',
port: 31001,
dbpath: f('%s/../db/31001', __dirname),
setParameter: ['enableTestCommands=1']
}
}, {
tags: {loc: 'sf'},
options: {
bind_ip: 'localhost',
port: 31002,
dbpath: f('%s/../db/31002', __dirname),
setParameter: ['enableTestCommands=1']
}
}, {
tags: {loc: 'sf'},
priority: 0,
options: {
bind_ip: 'localhost',
port: 31003,
dbpath: f('%s/../db/31003', __dirname),
setParameter: ['enableTestCommands=1']
}
}, {
arbiter: true,
options: {
bind_ip: 'localhost',
port: 31004,
dbpath: f('%s/../db/31004', __dirname),
setParameter: ['enableTestCommands=1']
}
}];
// Do we have 3.2
if(r.version[0] == 3 && r.version[1] == 2) {
nodes = nodes.map(function(x) {
x.options.enableMajorityReadConcern = null;
return x;
});
}
// Test suite Configuration
config = {
host: 'localhost', port: 31000, setName: 'rs'
, url: "mongodb://%slocalhost:31000/integration_tests?rs_name=rs"
, writeConcernMax: {w: 'majority', wtimeout: 30000}
, replicasetName: 'rs'
, skipStart: startupOptions.skipStartup
, skipTermination: startupOptions.skipShutdown
, topology: function(host, port, serverOptions) {
host = host || 'localhost'; port = port || 31000;
serverOptions = clone(serverOptions);
serverOptions.rs_name = 'rs';
serverOptions.poolSize = 1;
serverOptions.autoReconnect = false;
return new mongo.ReplSet([
new mongo.Server(host, port, serverOptions)
], serverOptions);
}
, manager: new ReplSetManager('mongod', nodes, {
replSet: 'rs'
})
}
// Execute test suite
executeTestSuite(config);
});
}
//
// Sharded configuration
if(argv.e == 'sharded') {
//
// Sharded
config = {
host: 'localhost'
, port: 51000
, url: "mongodb://%slocalhost:51000/integration_tests"
, writeConcernMax: {w: 'majority', wtimeout: 30000}
, skipStart: startupOptions.skipStartup
, skipTermination: startupOptions.skipShutdown
, topology: function(host, port, options) {
var options = options || {};
options.autoReconnect = false;
return new mongo.Mongos([
new mongo.Server(host, port, options)
], options);
}, manager: new ShardingManager({})
}
executeTestSuite(config);
}
//
// SSL configuration
if(argv.e == 'ssl') {
// Create ssl server
config = {
sslOnNormalPorts: null
, fork:null
, sslPEMKeyFile: __dirname + "/functional/ssl/server.pem"
, url: "mongodb://%slocalhost:27017/integration_tests?ssl=true&sslValidate=false"
, topology: function(host, port, serverOptions) {
host = host || 'localhost';
port = port || 27017;
serverOptions = clone(serverOptions);
serverOptions.poolSize = 1;
serverOptions.ssl = true;
serverOptions.sslValidate = false;
return new mongo.Server(host, port, serverOptions);
}, manager: new ServerManager('mongod', {
dbpath: path.join(path.resolve('db'), f("data-%d", 27017)),
sslOnNormalPorts: null,
sslPEMKeyFile: __dirname + "/functional/ssl/server.pem",
setParameter: ['enableTestCommands=1']
})
}
executeTestSuite(config);
}
//
// SSL configuration
if(argv.e == 'scram') {
// Create ssl server
config = {
url: "mongodb://%slocalhost:27017/integration_tests"
, topology: function(host, port, serverOptions) {
host = host || 'localhost';
port = port || 27017;
serverOptions = clone(serverOptions);
serverOptions.poolSize = 1;
return new mongo.Server(host, port, serverOptions);
}, manager: new ServerManager('mongod', {
dbpath: path.join(path.resolve('db'), f("data-%d", 27017)),
auth:null
})
}
executeTestSuite(config);
}
//
// Authentication Configuration
if(argv.e == 'auth') {
// Create ssl server
config = {
url: "mongodb://%slocalhost:27017/integration_tests"
, topology: function(host, port, serverOptions) {
host = host || 'localhost';
port = port || 27017;
serverOptions = clone(serverOptions);
serverOptions.poolSize = 1;
return new mongo.Server(host, port, serverOptions);
}, manager: new ServerManager('mongod', {
dbpath: path.join(path.resolve('db'), f("data-%d", 27017)),
auth:null
})
}
executeTestSuite(config);
}
//
// Single server
if(!argv.e || (argv.e == 'kerberos' || argv.e == 'ldap' || argv.e == 'sni')) {
config = {
host: 'localhost'
, port: 27017
, skipStart: startupOptions.skipStartup
, skipTermination: startupOptions.skipShutdown
, manager: new ServerManager('mongod', {
dbpath: path.join(path.resolve('db'), f("data-%d", 27017)),
setParameter: ['enableTestCommands=1']
})
}
executeTestSuite(config);
}
}
process.on('unhandledRejection', function(reason, p) {
console.log('Unhandled Rejection at: Promise', p, 'reason:', reason);
// application specific logging, throwing an error, or other logic here
}); | 1 | 14,145 | should we rename this like `mongodb_srv_tests.js`? | mongodb-node-mongodb-native | js |
@@ -46,7 +46,7 @@ class SearchConsoleDashboardWidgetSiteStats extends Component {
setOptions() {
const { selectedStats, series, vAxes } = this.props;
- const pageTitle = '' === googlesitekit.pageTitle ? '' : sprintf( __( 'Search Traffic Summary for %s', 'google-site-kit' ), decodeHtmlEntity( googlesitekit.pageTitle ) );
+ const pageTitle = googlesitekit.pageTitle && googlesitekit.pageTitle.length ? sprintf( __( 'Search Traffic Summary for %s', 'google-site-kit' ), decodeHtmlEntity( googlesitekit.pageTitle ) ) : '';
const options = {
chart: { | 1 | /**
* SearchConsoleDashboardWidgetSiteStats component.
*
* Site Kit by Google, Copyright 2019 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* External dependencies
*/
import withData from 'GoogleComponents/higherorder/withdata';
import { TYPE_MODULES } from 'GoogleComponents/data';
import GoogleChart from 'GoogleComponents/google-chart.js';
import PreviewBlock from 'GoogleComponents/preview-block';
import { decodeHtmlEntity, getTimeInSeconds } from 'GoogleUtil';
/**
* Internal dependencies
*/
import { extractSearchConsoleDashboardData } from './util';
/**
* WordPress dependencies
*/
import { __, sprintf } from '@wordpress/i18n';
import { Component } from '@wordpress/element';
class SearchConsoleDashboardWidgetSiteStats extends Component {
constructor( props ) {
super( props );
this.setOptions = this.setOptions.bind( this );
}
setOptions() {
const { selectedStats, series, vAxes } = this.props;
const pageTitle = '' === googlesitekit.pageTitle ? '' : sprintf( __( 'Search Traffic Summary for %s', 'google-site-kit' ), decodeHtmlEntity( googlesitekit.pageTitle ) );
const options = {
chart: {
title: pageTitle,
},
curveType: 'line',
height: 270,
width: '100%',
chartArea: {
height: '80%',
width: '87%',
},
legend: {
position: 'top',
textStyle: {
color: '#616161',
fontSize: 12,
},
},
hAxis: {
format: 'M/d/yy',
gridlines: {
color: '#fff',
},
textStyle: {
color: '#616161',
fontSize: 12,
},
},
vAxis: {
gridlines: {
color: '#eee',
},
minorGridlines: {
color: '#eee',
},
textStyle: {
color: '#616161',
fontSize: 12,
},
titleTextStyle: {
color: '#616161',
fontSize: 12,
italic: false,
},
},
};
options.series = series;
options.vAxes = vAxes;
// Clean up chart if more than three stats are selected.
if ( 3 <= selectedStats.length ) {
options.vAxis.textPosition = 'none';
options.vAxis.gridlines.color = '#fff';
options.vAxis.minorGridlines.color = '#fff';
options.vAxes = {};
options.chartArea.width = '98%';
}
return options;
}
render() {
const { data, selectedStats } = this.props;
if ( ! data || ! data.length ) {
return null;
}
const options = this.setOptions();
const processedData = extractSearchConsoleDashboardData( data );
return (
<section className="mdc-layout-grid">
<div className="mdc-layout-grid__inner">
<div className="mdc-layout-grid__cell mdc-layout-grid__cell--span-12">
<GoogleChart
selectedStats={ selectedStats }
data={ processedData.dataMap }
options={ options }
singleStat={ false }
/>
</div>
</div>
</section>
);
}
}
export default withData(
SearchConsoleDashboardWidgetSiteStats,
[
{
type: TYPE_MODULES,
identifier: 'search-console',
datapoint: 'searchanalytics',
data: {
dimensions: 'date',
compareDateRanges: true,
},
priority: 1,
maxAge: getTimeInSeconds( 'day' ),
context: 'Single',
},
],
<PreviewBlock width="100%" height="270px" padding />,
{ createGrid: true }
);
| 1 | 25,462 | This should still have a value when there is no page title, e.g. just `__( 'Search Traffic Summary', 'google-site-kit' )`. | google-site-kit-wp | js |
@@ -60,7 +60,10 @@ import static org.apache.solr.common.params.CollectionParams.CollectionAction.*;
import static org.apache.solr.common.params.CommonAdminParams.ASYNC;
import static org.apache.solr.common.params.CommonAdminParams.NUM_SUB_SHARDS;
-
+/**
+ * Index split request processed by Overseer. Requests from here go to the host of the parent shard,
+ * and are processed by @link.
+ */
public class SplitShardCmd implements OverseerCollectionMessageHandler.Cmd {
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
private static final int MIN_NUM_SUB_SHARDS = 2; | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one or more
* contributor license agreements. See the NOTICE file distributed with
* this work for additional information regarding copyright ownership.
* The ASF licenses this file to You under the Apache License, Version 2.0
* (the "License"); you may not use this file except in compliance with
* the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package org.apache.solr.cloud.api.collections;
import org.apache.solr.client.solrj.cloud.DistribStateManager;
import org.apache.solr.client.solrj.cloud.NodeStateProvider;
import org.apache.solr.client.solrj.cloud.SolrCloudManager;
import org.apache.solr.client.solrj.cloud.VersionedData;
import org.apache.solr.client.solrj.request.CoreAdminRequest;
import org.apache.solr.cloud.Overseer;
import org.apache.solr.cloud.api.collections.OverseerCollectionMessageHandler.ShardRequestTracker;
import org.apache.solr.cloud.overseer.OverseerAction;
import org.apache.solr.common.SolrException;
import org.apache.solr.common.SolrException.ErrorCode;
import org.apache.solr.common.cloud.*;
import org.apache.solr.common.cloud.rule.ImplicitSnitch;
import org.apache.solr.common.params.CollectionAdminParams;
import org.apache.solr.common.params.CommonAdminParams;
import org.apache.solr.common.params.CommonParams;
import org.apache.solr.common.params.CoreAdminParams;
import org.apache.solr.common.params.ModifiableSolrParams;
import org.apache.solr.common.util.NamedList;
import org.apache.solr.common.util.SimpleOrderedMap;
import org.apache.solr.common.util.Utils;
import org.apache.solr.handler.component.ShardHandler;
import org.apache.solr.update.SolrIndexSplitter;
import org.apache.solr.util.RTimerTree;
import org.apache.solr.util.TestInjection;
import org.apache.zookeeper.CreateMode;
import org.apache.zookeeper.KeeperException;
import org.apache.zookeeper.data.Stat;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
import java.lang.invoke.MethodHandles;
import java.util.*;
import java.util.concurrent.atomic.AtomicInteger;
import java.util.concurrent.atomic.AtomicReference;
import static org.apache.solr.client.solrj.impl.SolrClientNodeStateProvider.Variable.CORE_IDX;
import static org.apache.solr.common.cloud.ZkStateReader.*;
import static org.apache.solr.common.params.CollectionAdminParams.FOLLOW_ALIASES;
import static org.apache.solr.common.params.CollectionParams.CollectionAction.*;
import static org.apache.solr.common.params.CommonAdminParams.ASYNC;
import static org.apache.solr.common.params.CommonAdminParams.NUM_SUB_SHARDS;
public class SplitShardCmd implements OverseerCollectionMessageHandler.Cmd {
private static final Logger log = LoggerFactory.getLogger(MethodHandles.lookup().lookupClass());
private static final int MIN_NUM_SUB_SHARDS = 2;
// This is an arbitrary number that seems reasonable at this time.
private static final int MAX_NUM_SUB_SHARDS = 8;
private static final int DEFAULT_NUM_SUB_SHARDS = 2;
private final OverseerCollectionMessageHandler ocmh;
public SplitShardCmd(OverseerCollectionMessageHandler ocmh) {
this.ocmh = ocmh;
}
@SuppressWarnings("unchecked")
@Override
public void call(ClusterState state, ZkNodeProps message, @SuppressWarnings({"rawtypes"})NamedList results) throws Exception {
split(state, message,(NamedList<Object>) results);
}
@SuppressWarnings({"rawtypes"})
public boolean split(ClusterState clusterState, ZkNodeProps message, NamedList<Object> results) throws Exception {
final String asyncId = message.getStr(ASYNC);
boolean waitForFinalState = message.getBool(CommonAdminParams.WAIT_FOR_FINAL_STATE, false);
String methodStr = message.getStr(CommonAdminParams.SPLIT_METHOD, SolrIndexSplitter.SplitMethod.REWRITE.toLower());
SolrIndexSplitter.SplitMethod splitMethod = SolrIndexSplitter.SplitMethod.get(methodStr);
if (splitMethod == null) {
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Unknown value '" + CommonAdminParams.SPLIT_METHOD +
": " + methodStr);
}
boolean withTiming = message.getBool(CommonParams.TIMING, false);
String extCollectionName = message.getStr(CoreAdminParams.COLLECTION);
boolean followAliases = message.getBool(FOLLOW_ALIASES, false);
String collectionName;
if (followAliases) {
collectionName = ocmh.cloudManager.getClusterStateProvider().resolveSimpleAlias(extCollectionName);
} else {
collectionName = extCollectionName;
}
log.debug("Split shard invoked: {}", message);
ZkStateReader zkStateReader = ocmh.zkStateReader;
zkStateReader.forceUpdateCollection(collectionName);
AtomicReference<String> slice = new AtomicReference<>();
slice.set(message.getStr(ZkStateReader.SHARD_ID_PROP));
Set<String> offlineSlices = new HashSet<>();
RTimerTree timings = new RTimerTree();
String splitKey = message.getStr("split.key");
DocCollection collection = clusterState.getCollection(collectionName);
Slice parentSlice = getParentSlice(clusterState, collectionName, slice, splitKey);
if (parentSlice.getState() != Slice.State.ACTIVE) {
throw new SolrException(SolrException.ErrorCode.INVALID_STATE, "Parent slice is not active: " +
collectionName + "/ " + parentSlice.getName() + ", state=" + parentSlice.getState());
}
// find the leader for the shard
Replica parentShardLeader;
try {
parentShardLeader = zkStateReader.getLeaderRetry(collectionName, slice.get(), 10000);
} catch (InterruptedException e) {
Thread.currentThread().interrupt();
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Interrupted.");
}
RTimerTree t;
if (ocmh.overseer.getCoreContainer().getNodeConfig().getMetricsConfig().isEnabled()) {
t = timings.sub("checkDiskSpace");
checkDiskSpace(collectionName, slice.get(), parentShardLeader, splitMethod, ocmh.cloudManager);
t.stop();
}
// let's record the ephemeralOwner of the parent leader node
Stat leaderZnodeStat = zkStateReader.getZkClient().exists(ZkStateReader.LIVE_NODES_ZKNODE + "/" + parentShardLeader.getNodeName(), null, true);
if (leaderZnodeStat == null) {
// we just got to know the leader but its live node is gone already!
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "The shard leader node: " + parentShardLeader.getNodeName() + " is not live anymore!");
}
List<DocRouter.Range> subRanges = new ArrayList<>();
List<String> subSlices = new ArrayList<>();
List<String> subShardNames = new ArrayList<>();
// reproduce the currently existing number of replicas per type
AtomicInteger numNrt = new AtomicInteger();
AtomicInteger numTlog = new AtomicInteger();
AtomicInteger numPull = new AtomicInteger();
parentSlice.getReplicas().forEach(r -> {
switch (r.getType()) {
case NRT:
numNrt.incrementAndGet();
break;
case TLOG:
numTlog.incrementAndGet();
break;
case PULL:
numPull.incrementAndGet();
}
});
int repFactor = numNrt.get() + numTlog.get() + numPull.get();
boolean success = false;
try {
// type of the first subreplica will be the same as leader
boolean firstNrtReplica = parentShardLeader.getType() == Replica.Type.NRT;
// verify that we indeed have the right number of correct replica types
if ((firstNrtReplica && numNrt.get() < 1) || (!firstNrtReplica && numTlog.get() < 1)) {
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "aborting split - inconsistent replica types in collection " + collectionName +
": nrt=" + numNrt.get() + ", tlog=" + numTlog.get() + ", pull=" + numPull.get() + ", shard leader type is " +
parentShardLeader.getType());
}
// check for the lock
if (!lockForSplit(ocmh.cloudManager, collectionName, parentSlice.getName())) {
// mark as success to avoid clearing the lock in the "finally" block
success = true;
throw new SolrException(SolrException.ErrorCode.INVALID_STATE, "Can't lock parent slice for splitting (another split operation running?): " +
collectionName + "/" + parentSlice.getName());
}
List<Map<String, Object>> replicas = new ArrayList<>((repFactor - 1) * 2);
@SuppressWarnings("deprecation")
ShardHandler shardHandler = ocmh.shardHandlerFactory.getShardHandler();
if (message.getBool(CommonAdminParams.SPLIT_BY_PREFIX, false)) {
t = timings.sub("getRanges");
ModifiableSolrParams params = new ModifiableSolrParams();
params.set(CoreAdminParams.ACTION, CoreAdminParams.CoreAdminAction.SPLIT.toString());
params.set(CoreAdminParams.GET_RANGES, "true");
params.set(CommonAdminParams.SPLIT_METHOD, splitMethod.toLower());
params.set(CoreAdminParams.CORE, parentShardLeader.getStr("core"));
// Only 2 is currently supported
// int numSubShards = message.getInt(NUM_SUB_SHARDS, DEFAULT_NUM_SUB_SHARDS);
// params.set(NUM_SUB_SHARDS, Integer.toString(numSubShards));
{
final ShardRequestTracker shardRequestTracker = ocmh.syncRequestTracker();
shardRequestTracker.sendShardRequest(parentShardLeader.getNodeName(), params, shardHandler);
SimpleOrderedMap<Object> getRangesResults = new SimpleOrderedMap<>();
String msgOnError = "SPLITSHARD failed to invoke SPLIT.getRanges core admin command";
shardRequestTracker.processResponses(getRangesResults, shardHandler, true, msgOnError);
// Extract the recommended splits from the shard response (if it exists)
// example response: getRangesResults={success={127.0.0.1:62086_solr={responseHeader={status=0,QTime=1},ranges=10-20,3a-3f}}}
NamedList successes = (NamedList)getRangesResults.get("success");
if (successes != null && successes.size() > 0) {
NamedList shardRsp = (NamedList)successes.getVal(0);
String splits = (String)shardRsp.get(CoreAdminParams.RANGES);
if (splits != null) {
log.info("Resulting split ranges to be used: {} slice={} leader={}", splits, slice, parentShardLeader);
// change the message to use the recommended split ranges
message = message.plus(CoreAdminParams.RANGES, splits);
}
}
}
t.stop();
}
t = timings.sub("fillRanges");
String rangesStr = fillRanges(ocmh.cloudManager, message, collection, parentSlice, subRanges, subSlices, subShardNames, firstNrtReplica);
t.stop();
boolean oldShardsDeleted = false;
for (String subSlice : subSlices) {
Slice oSlice = collection.getSlice(subSlice);
if (oSlice != null) {
final Slice.State state = oSlice.getState();
if (state == Slice.State.ACTIVE) {
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
"Sub-shard: " + subSlice + " exists in active state. Aborting split shard.");
} else {
// delete the shards
log.info("Sub-shard: {} already exists therefore requesting its deletion", subSlice);
Map<String, Object> propMap = new HashMap<>();
propMap.put(Overseer.QUEUE_OPERATION, "deleteshard");
propMap.put(COLLECTION_PROP, collectionName);
propMap.put(SHARD_ID_PROP, subSlice);
ZkNodeProps m = new ZkNodeProps(propMap);
try {
ocmh.commandMap.get(DELETESHARD).call(clusterState, m, new NamedList());
} catch (Exception e) {
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "Unable to delete already existing sub shard: " + subSlice,
e);
}
oldShardsDeleted = true;
}
}
}
if (oldShardsDeleted) {
// refresh the locally cached cluster state
// we know we have the latest because otherwise deleteshard would have failed
clusterState = zkStateReader.getClusterState();
collection = clusterState.getCollection(collectionName);
}
String nodeName = parentShardLeader.getNodeName();
t = timings.sub("createSubSlicesAndLeadersInState");
for (int i = 0; i < subRanges.size(); i++) {
String subSlice = subSlices.get(i);
String subShardName = subShardNames.get(i);
DocRouter.Range subRange = subRanges.get(i);
log.debug("Creating slice {} of collection {} on {}", subSlice, collectionName, nodeName);
Map<String, Object> propMap = new HashMap<>();
propMap.put(Overseer.QUEUE_OPERATION, CREATESHARD.toLower());
propMap.put(ZkStateReader.SHARD_ID_PROP, subSlice);
propMap.put(ZkStateReader.COLLECTION_PROP, collectionName);
propMap.put(ZkStateReader.SHARD_RANGE_PROP, subRange.toString());
propMap.put(ZkStateReader.SHARD_STATE_PROP, Slice.State.CONSTRUCTION.toString());
propMap.put(ZkStateReader.SHARD_PARENT_PROP, parentSlice.getName());
propMap.put("shard_parent_node", nodeName);
propMap.put("shard_parent_zk_session", leaderZnodeStat.getEphemeralOwner());
ocmh.overseer.offerStateUpdate(Utils.toJSON(new ZkNodeProps(propMap)));
// wait until we are able to see the new shard in cluster state and refresh the local view of the cluster state
clusterState = ocmh.waitForNewShard(collectionName, subSlice);
log.debug("Adding first replica {} as part of slice {} of collection {} on {}"
, subShardName, subSlice, collectionName, nodeName);
propMap = new HashMap<>();
propMap.put(Overseer.QUEUE_OPERATION, ADDREPLICA.toLower());
propMap.put(COLLECTION_PROP, collectionName);
propMap.put(SHARD_ID_PROP, subSlice);
propMap.put(REPLICA_TYPE, firstNrtReplica ? Replica.Type.NRT.toString() : Replica.Type.TLOG.toString());
propMap.put("node", nodeName);
propMap.put(CoreAdminParams.NAME, subShardName);
propMap.put(CommonAdminParams.WAIT_FOR_FINAL_STATE, Boolean.toString(waitForFinalState));
// copy over property params:
for (String key : message.keySet()) {
if (key.startsWith(CollectionAdminParams.PROPERTY_PREFIX)) {
propMap.put(key, message.getStr(key));
}
}
// add async param
if (asyncId != null) {
propMap.put(ASYNC, asyncId);
}
ocmh.addReplica(clusterState, new ZkNodeProps(propMap), results, null);
}
{
final ShardRequestTracker syncRequestTracker = ocmh.syncRequestTracker();
String msgOnError = "SPLITSHARD failed to create subshard leaders";
syncRequestTracker.processResponses(results, shardHandler, true, msgOnError);
handleFailureOnAsyncRequest(results, msgOnError);
}
t.stop();
t = timings.sub("waitForSubSliceLeadersAlive");
{
final ShardRequestTracker shardRequestTracker = ocmh.asyncRequestTracker(asyncId);
for (String subShardName : subShardNames) {
// wait for parent leader to acknowledge the sub-shard core
log.debug("Asking parent leader to wait for: {} to be alive on: {}", subShardName, nodeName);
String coreNodeName = ocmh.waitForCoreNodeName(collectionName, nodeName, subShardName);
CoreAdminRequest.WaitForState cmd = new CoreAdminRequest.WaitForState();
cmd.setCoreName(subShardName);
cmd.setNodeName(nodeName);
cmd.setCoreNodeName(coreNodeName);
cmd.setState(Replica.State.ACTIVE);
cmd.setCheckLive(true);
cmd.setOnlyIfLeader(true);
ModifiableSolrParams p = new ModifiableSolrParams(cmd.getParams());
shardRequestTracker.sendShardRequest(nodeName, p, shardHandler);
}
String msgOnError = "SPLITSHARD timed out waiting for subshard leaders to come up";
shardRequestTracker.processResponses(results, shardHandler, true, msgOnError);
handleFailureOnAsyncRequest(results, msgOnError);
}
t.stop();
log.debug("Successfully created all sub-shards for collection {} parent shard: {} on: {}"
, collectionName, slice, parentShardLeader);
if (log.isInfoEnabled()) {
log.info("Splitting shard {} as part of slice {} of collection {} on {}"
, parentShardLeader.getName(), slice, collectionName, parentShardLeader);
}
ModifiableSolrParams params = new ModifiableSolrParams();
params.set(CoreAdminParams.ACTION, CoreAdminParams.CoreAdminAction.SPLIT.toString());
params.set(CommonAdminParams.SPLIT_METHOD, splitMethod.toLower());
params.set(CoreAdminParams.CORE, parentShardLeader.getStr("core"));
for (int i = 0; i < subShardNames.size(); i++) {
String subShardName = subShardNames.get(i);
params.add(CoreAdminParams.TARGET_CORE, subShardName);
}
params.set(CoreAdminParams.RANGES, rangesStr);
t = timings.sub("splitParentCore");
{
final ShardRequestTracker shardRequestTracker = ocmh.asyncRequestTracker(asyncId);
shardRequestTracker.sendShardRequest(parentShardLeader.getNodeName(), params, shardHandler);
String msgOnError = "SPLITSHARD failed to invoke SPLIT core admin command";
shardRequestTracker.processResponses(results, shardHandler, true, msgOnError);
handleFailureOnAsyncRequest(results, msgOnError);
}
t.stop();
if (log.isDebugEnabled()) {
log.debug("Index on shard: {} split into {} successfully", nodeName, subShardNames.size());
}
t = timings.sub("applyBufferedUpdates");
// apply buffered updates on sub-shards
{
final ShardRequestTracker shardRequestTracker = ocmh.asyncRequestTracker(asyncId);
for (int i = 0; i < subShardNames.size(); i++) {
String subShardName = subShardNames.get(i);
log.debug("Applying buffered updates on : {}", subShardName);
params = new ModifiableSolrParams();
params.set(CoreAdminParams.ACTION, CoreAdminParams.CoreAdminAction.REQUESTAPPLYUPDATES.toString());
params.set(CoreAdminParams.NAME, subShardName);
shardRequestTracker.sendShardRequest(nodeName, params, shardHandler);
}
String msgOnError = "SPLITSHARD failed while asking sub shard leaders to apply buffered updates";
shardRequestTracker.processResponses(results, shardHandler, true, msgOnError);
handleFailureOnAsyncRequest(results, msgOnError);
}
t.stop();
log.debug("Successfully applied buffered updates on : {}", subShardNames);
// Replica creation for the new Slices
Set<String> nodes = clusterState.getLiveNodes();
List<String> nodeList = new ArrayList<>(nodes.size());
nodeList.addAll(nodes);
// Remove the node that hosts the parent shard for replica creation.
nodeList.remove(nodeName);
// TODO: change this to handle sharding a slice into > 2 sub-shards.
// we have already created one subReplica for each subShard on the parent node.
// identify locations for the remaining replicas
if (firstNrtReplica) {
numNrt.decrementAndGet();
} else {
numTlog.decrementAndGet();
}
t = timings.sub("identifyNodesForReplicas");
Assign.AssignRequest assignRequest = new Assign.AssignRequestBuilder()
.forCollection(collectionName)
.forShard(subSlices)
.assignNrtReplicas(numNrt.get())
.assignTlogReplicas(numTlog.get())
.assignPullReplicas(numPull.get())
.onNodes(new ArrayList<>(clusterState.getLiveNodes()))
.build();
Assign.AssignStrategy assignStrategy = Assign.createAssignStrategy(
ocmh.overseer.getCoreContainer(),
clusterState, collection);
List<ReplicaPosition> replicaPositions = assignStrategy.assign(ocmh.cloudManager, assignRequest);
t.stop();
t = timings.sub("createReplicaPlaceholders");
for (ReplicaPosition replicaPosition : replicaPositions) {
String sliceName = replicaPosition.shard;
String subShardNodeName = replicaPosition.node;
String solrCoreName = Assign.buildSolrCoreName(collectionName, sliceName, replicaPosition.type, replicaPosition.index);
log.debug("Creating replica shard {} as part of slice {} of collection {} on {}"
, solrCoreName, sliceName, collectionName, subShardNodeName);
// we first create all replicas in DOWN state without actually creating their cores in order to
// avoid a race condition where Overseer may prematurely activate the new sub-slices (and deactivate
// the parent slice) before all new replicas are added. This situation may lead to a loss of performance
// because the new shards will be activated with possibly many fewer replicas.
ZkNodeProps props = new ZkNodeProps(Overseer.QUEUE_OPERATION, ADDREPLICA.toLower(),
ZkStateReader.COLLECTION_PROP, collectionName,
ZkStateReader.SHARD_ID_PROP, sliceName,
ZkStateReader.CORE_NAME_PROP, solrCoreName,
ZkStateReader.REPLICA_TYPE, replicaPosition.type.name(),
ZkStateReader.STATE_PROP, Replica.State.DOWN.toString(),
ZkStateReader.NODE_NAME_PROP, subShardNodeName,
CommonAdminParams.WAIT_FOR_FINAL_STATE, Boolean.toString(waitForFinalState));
ocmh.overseer.offerStateUpdate(Utils.toJSON(props));
HashMap<String, Object> propMap = new HashMap<>();
propMap.put(Overseer.QUEUE_OPERATION, ADDREPLICA.toLower());
propMap.put(COLLECTION_PROP, collectionName);
propMap.put(SHARD_ID_PROP, sliceName);
propMap.put(REPLICA_TYPE, replicaPosition.type.name());
propMap.put("node", subShardNodeName);
propMap.put(CoreAdminParams.NAME, solrCoreName);
// copy over property params:
for (String key : message.keySet()) {
if (key.startsWith(CollectionAdminParams.PROPERTY_PREFIX)) {
propMap.put(key, message.getStr(key));
}
}
// add async param
if (asyncId != null) {
propMap.put(ASYNC, asyncId);
}
// special flag param to instruct addReplica not to create the replica in cluster state again
propMap.put(OverseerCollectionMessageHandler.SKIP_CREATE_REPLICA_IN_CLUSTER_STATE, "true");
propMap.put(CommonAdminParams.WAIT_FOR_FINAL_STATE, Boolean.toString(waitForFinalState));
replicas.add(propMap);
}
t.stop();
assert TestInjection.injectSplitFailureBeforeReplicaCreation();
long ephemeralOwner = leaderZnodeStat.getEphemeralOwner();
// compare against the ephemeralOwner of the parent leader node
leaderZnodeStat = zkStateReader.getZkClient().exists(ZkStateReader.LIVE_NODES_ZKNODE + "/" + parentShardLeader.getNodeName(), null, true);
if (leaderZnodeStat == null || ephemeralOwner != leaderZnodeStat.getEphemeralOwner()) {
// put sub-shards in recovery_failed state
Map<String, Object> propMap = new HashMap<>();
propMap.put(Overseer.QUEUE_OPERATION, OverseerAction.UPDATESHARDSTATE.toLower());
for (String subSlice : subSlices) {
propMap.put(subSlice, Slice.State.RECOVERY_FAILED.toString());
}
propMap.put(ZkStateReader.COLLECTION_PROP, collectionName);
ZkNodeProps m = new ZkNodeProps(propMap);
ocmh.overseer.offerStateUpdate(Utils.toJSON(m));
if (leaderZnodeStat == null) {
// the leader is not live anymore, fail the split!
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "The shard leader node: " + parentShardLeader.getNodeName() + " is not live anymore!");
} else if (ephemeralOwner != leaderZnodeStat.getEphemeralOwner()) {
// there's a new leader, fail the split!
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR,
"The zk session id for the shard leader node: " + parentShardLeader.getNodeName() + " has changed from "
+ ephemeralOwner + " to " + leaderZnodeStat.getEphemeralOwner() + ". This can cause data loss so we must abort the split");
}
}
// we must set the slice state into recovery before actually creating the replica cores
// this ensures that the logic inside ReplicaMutator to update sub-shard state to 'active'
// always gets a chance to execute. See SOLR-7673
if (repFactor == 1) {
// A commit is needed so that documents are visible when the sub-shard replicas come up
// (Note: This commit used to be after the state switch, but was brought here before the state switch
// as per SOLR-13945 so that sub shards don't come up empty, momentarily, after being marked active)
t = timings.sub("finalCommit");
ocmh.commit(results, slice.get(), parentShardLeader);
t.stop();
// switch sub shard states to 'active'
log.info("Replication factor is 1 so switching shard states");
Map<String, Object> propMap = new HashMap<>();
propMap.put(Overseer.QUEUE_OPERATION, OverseerAction.UPDATESHARDSTATE.toLower());
propMap.put(slice.get(), Slice.State.INACTIVE.toString());
for (String subSlice : subSlices) {
propMap.put(subSlice, Slice.State.ACTIVE.toString());
}
propMap.put(ZkStateReader.COLLECTION_PROP, collectionName);
ZkNodeProps m = new ZkNodeProps(propMap);
ocmh.overseer.offerStateUpdate(Utils.toJSON(m));
} else {
log.info("Requesting shard state be set to 'recovery'");
Map<String, Object> propMap = new HashMap<>();
propMap.put(Overseer.QUEUE_OPERATION, OverseerAction.UPDATESHARDSTATE.toLower());
for (String subSlice : subSlices) {
propMap.put(subSlice, Slice.State.RECOVERY.toString());
}
propMap.put(ZkStateReader.COLLECTION_PROP, collectionName);
ZkNodeProps m = new ZkNodeProps(propMap);
ocmh.overseer.offerStateUpdate(Utils.toJSON(m));
}
t = timings.sub("createCoresForReplicas");
// now actually create replica cores on sub shard nodes
for (Map<String, Object> replica : replicas) {
ocmh.addReplica(clusterState, new ZkNodeProps(replica), results, null);
}
assert TestInjection.injectSplitFailureAfterReplicaCreation();
{
final ShardRequestTracker syncRequestTracker = ocmh.syncRequestTracker();
String msgOnError = "SPLITSHARD failed to create subshard replicas";
syncRequestTracker.processResponses(results, shardHandler, true, msgOnError);
handleFailureOnAsyncRequest(results, msgOnError);
}
t.stop();
log.info("Successfully created all replica shards for all sub-slices {}", subSlices);
// The final commit was added in SOLR-4997 so that documents are visible
// when the sub-shard replicas come up
if (repFactor > 1) {
t = timings.sub("finalCommit");
ocmh.commit(results, slice.get(), parentShardLeader);
t.stop();
}
if (withTiming) {
results.add(CommonParams.TIMING, timings.asNamedList());
}
success = true;
// don't unlock the shard yet - only do this if the final switch-over in
// ReplicaMutator succeeds (or fails)
return true;
} catch (SolrException e) {
throw e;
} catch (Exception e) {
log.error("Error executing split operation for collection: {} parent shard: {}", collectionName, slice, e);
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, null, e);
} finally {
if (!success) {
cleanupAfterFailure(zkStateReader, collectionName, parentSlice.getName(), subSlices, offlineSlices);
unlockForSplit(ocmh.cloudManager, collectionName, parentSlice.getName());
}
}
}
/**
* In case of async requests, the ShardRequestTracker's processResponses() does not
* abort on failure (as it should). Handling this here temporarily for now.
*/
private void handleFailureOnAsyncRequest(@SuppressWarnings({"rawtypes"})NamedList results, String msgOnError) {
Object splitResultFailure = results.get("failure");
if (splitResultFailure != null) {
throw new SolrException(ErrorCode.SERVER_ERROR, msgOnError);
}
}
// public and static to facilitate reuse in the simulation framework and in tests
public static void checkDiskSpace(String collection, String shard, Replica parentShardLeader, SolrIndexSplitter.SplitMethod method, SolrCloudManager cloudManager) throws SolrException {
// check that enough disk space is available on the parent leader node
// otherwise the actual index splitting will always fail
NodeStateProvider nodeStateProvider = cloudManager.getNodeStateProvider();
Map<String, Object> nodeValues = nodeStateProvider.getNodeValues(parentShardLeader.getNodeName(),
Collections.singletonList(ImplicitSnitch.DISK));
Map<String, Map<String, List<Replica>>> infos = nodeStateProvider.getReplicaInfo(parentShardLeader.getNodeName(),
Collections.singletonList(CORE_IDX.metricsAttribute));
if (infos.get(collection) == null || infos.get(collection).get(shard) == null) {
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "missing replica information for parent shard leader");
}
// find the leader
List<Replica> lst = infos.get(collection).get(shard);
Double indexSize = null;
for (Replica info : lst) {
if (info.getCoreName().equals(parentShardLeader.getCoreName())) {
Number size = (Number)info.get( CORE_IDX.metricsAttribute);
if (size == null) {
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "missing index size information for parent shard leader");
}
indexSize = (Double) CORE_IDX.convertVal(size);
break;
}
}
if (indexSize == null) {
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "missing replica information for parent shard leader");
}
Number freeSize = (Number)nodeValues.get(ImplicitSnitch.DISK);
if (freeSize == null) {
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "missing node disk space information for parent shard leader");
}
// 100% more for REWRITE, 5% more for LINK
double neededSpace = method == SolrIndexSplitter.SplitMethod.REWRITE ? 2.0 * indexSize : 1.05 * indexSize;
if (freeSize.doubleValue() < neededSpace) {
throw new SolrException(SolrException.ErrorCode.SERVER_ERROR, "not enough free disk space to perform index split on node " +
parentShardLeader.getNodeName() + ", required: " + neededSpace + ", available: " + freeSize);
}
}
private void cleanupAfterFailure(ZkStateReader zkStateReader, String collectionName, String parentShard,
List<String> subSlices, Set<String> offlineSlices) {
log.info("Cleaning up after a failed split of {}/{}", collectionName, parentShard);
// get the latest state
try {
zkStateReader.forceUpdateCollection(collectionName);
} catch (KeeperException | InterruptedException e) {
log.warn("Cleanup failed after failed split of {}/{} : (force update collection)", collectionName, parentShard, e);
return;
}
ClusterState clusterState = zkStateReader.getClusterState();
DocCollection coll = clusterState.getCollectionOrNull(collectionName);
if (coll == null) { // may have been deleted
return;
}
// If parent is inactive and all sub shards are active, then rolling back
// to make the parent active again will cause data loss.
if (coll.getSlice(parentShard).getState() == Slice.State.INACTIVE) {
boolean allSubSlicesActive = true;
for (String sub: subSlices) {
if (coll.getSlice(sub).getState() != Slice.State.ACTIVE) {
allSubSlicesActive = false;
break;
}
}
if (allSubSlicesActive) {
return;
}
}
// set already created sub shards states to CONSTRUCTION - this prevents them
// from entering into RECOVERY or ACTIVE (SOLR-9455)
final Map<String, Object> propMap = new HashMap<>();
boolean sendUpdateState = false;
propMap.put(Overseer.QUEUE_OPERATION, OverseerAction.UPDATESHARDSTATE.toLower());
propMap.put(ZkStateReader.COLLECTION_PROP, collectionName);
for (Slice s : coll.getSlices()) {
if (!subSlices.contains(s.getName())) {
continue;
}
propMap.put(s.getName(), Slice.State.CONSTRUCTION.toString());
sendUpdateState = true;
}
// if parent is inactive activate it again
Slice parentSlice = coll.getSlice(parentShard);
if (parentSlice.getState() == Slice.State.INACTIVE) {
sendUpdateState = true;
propMap.put(parentShard, Slice.State.ACTIVE.toString());
}
// plus any other previously deactivated slices
for (String sliceName : offlineSlices) {
propMap.put(sliceName, Slice.State.ACTIVE.toString());
sendUpdateState = true;
}
if (sendUpdateState) {
try {
ZkNodeProps m = new ZkNodeProps(propMap);
ocmh.overseer.offerStateUpdate(Utils.toJSON(m));
} catch (Exception e) {
// don't give up yet - just log the error, we may still be able to clean up
log.warn("Cleanup failed after failed split of {}/{}: (slice state changes)", collectionName, parentShard, e);
}
}
// delete existing subShards
for (String subSlice : subSlices) {
Slice s = coll.getSlice(subSlice);
if (s == null) {
continue;
}
log.debug("- sub-shard: {} exists therefore requesting its deletion", subSlice);
HashMap<String, Object> props = new HashMap<>();
props.put(Overseer.QUEUE_OPERATION, "deleteshard");
props.put(COLLECTION_PROP, collectionName);
props.put(SHARD_ID_PROP, subSlice);
ZkNodeProps m = new ZkNodeProps(props);
try {
ocmh.commandMap.get(DELETESHARD).call(clusterState, m, new NamedList<Object>());
} catch (Exception e) {
log.warn("Cleanup failed after failed split of {}/{} : (deleting existing sub shard{})", collectionName, parentShard, subSlice, e);
}
}
}
public static Slice getParentSlice(ClusterState clusterState, String collectionName, AtomicReference<String> slice, String splitKey) {
DocCollection collection = clusterState.getCollection(collectionName);
DocRouter router = collection.getRouter() != null ? collection.getRouter() : DocRouter.DEFAULT;
Slice parentSlice;
if (slice.get() == null) {
if (router instanceof CompositeIdRouter) {
Collection<Slice> searchSlices = router.getSearchSlicesSingle(splitKey, new ModifiableSolrParams(), collection);
if (searchSlices.isEmpty()) {
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Unable to find an active shard for split.key: " + splitKey);
}
if (searchSlices.size() > 1) {
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
"Splitting a split.key: " + splitKey + " which spans multiple shards is not supported");
}
parentSlice = searchSlices.iterator().next();
slice.set(parentSlice.getName());
log.info("Split by route.key: {}, parent shard is: {} ", splitKey, slice);
} else {
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
"Split by route key can only be used with CompositeIdRouter or subclass. Found router: "
+ router.getClass().getName());
}
} else {
parentSlice = collection.getSlice(slice.get());
}
if (parentSlice == null) {
// no chance of the collection being null because ClusterState#getCollection(String) would have thrown
// an exception already
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "No shard with the specified name exists: " + slice);
}
return parentSlice;
}
public static String fillRanges(SolrCloudManager cloudManager, ZkNodeProps message, DocCollection collection, Slice parentSlice,
List<DocRouter.Range> subRanges, List<String> subSlices, List<String> subShardNames,
boolean firstReplicaNrt) {
String splitKey = message.getStr("split.key");
String rangesStr = message.getStr(CoreAdminParams.RANGES);
String fuzzStr = message.getStr(CommonAdminParams.SPLIT_FUZZ, "0");
float fuzz = 0.0f;
try {
fuzz = Float.parseFloat(fuzzStr);
} catch (Exception e) {
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Invalid numeric value of 'fuzz': " + fuzzStr);
}
DocRouter.Range range = parentSlice.getRange();
if (range == null) {
range = new PlainIdRouter().fullRange();
}
DocRouter router = collection.getRouter() != null ? collection.getRouter() : DocRouter.DEFAULT;
if (rangesStr != null) {
String[] ranges = rangesStr.split(",");
if (ranges.length == 0 || ranges.length == 1) {
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "There must be at least two ranges specified to split a shard");
} else {
for (int i = 0; i < ranges.length; i++) {
String r = ranges[i];
try {
subRanges.add(DocRouter.DEFAULT.fromString(r));
} catch (Exception e) {
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Exception in parsing hexadecimal hash range: " + r, e);
}
if (!subRanges.get(i).isSubsetOf(range)) {
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
"Specified hash range: " + r + " is not a subset of parent shard's range: " + range.toString());
}
}
List<DocRouter.Range> temp = new ArrayList<>(subRanges); // copy to preserve original order
Collections.sort(temp);
if (!range.equals(new DocRouter.Range(temp.get(0).min, temp.get(temp.size() - 1).max))) {
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
"Specified hash ranges: " + rangesStr + " do not cover the entire range of parent shard: " + range);
}
for (int i = 1; i < temp.size(); i++) {
if (temp.get(i - 1).max + 1 != temp.get(i).min) {
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "Specified hash ranges: " + rangesStr
+ " either overlap with each other or " + "do not cover the entire range of parent shard: " + range);
}
}
}
} else if (splitKey != null) {
if (router instanceof CompositeIdRouter) {
CompositeIdRouter compositeIdRouter = (CompositeIdRouter) router;
List<DocRouter.Range> tmpSubRanges = compositeIdRouter.partitionRangeByKey(splitKey, range);
if (tmpSubRanges.size() == 1) {
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "The split.key: " + splitKey
+ " has a hash range that is exactly equal to hash range of shard: " + parentSlice.getName());
}
for (DocRouter.Range subRange : tmpSubRanges) {
if (subRange.min == subRange.max) {
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST, "The split.key: " + splitKey + " must be a compositeId");
}
}
subRanges.addAll(tmpSubRanges);
if (log.isInfoEnabled()) {
log.info("Partitioning parent shard {} range: {} yields: {}", parentSlice.getName(), parentSlice.getRange(), subRanges);
}
rangesStr = "";
for (int i = 0; i < subRanges.size(); i++) {
DocRouter.Range subRange = subRanges.get(i);
rangesStr += subRange.toString();
if (i < subRanges.size() - 1) rangesStr += ',';
}
}
} else {
int numSubShards = message.getInt(NUM_SUB_SHARDS, DEFAULT_NUM_SUB_SHARDS);
log.info("{} set at: {}", NUM_SUB_SHARDS, numSubShards);
if(numSubShards < MIN_NUM_SUB_SHARDS || numSubShards > MAX_NUM_SUB_SHARDS)
throw new SolrException(SolrException.ErrorCode.BAD_REQUEST,
"A shard can only be split into "+MIN_NUM_SUB_SHARDS+" to " + MAX_NUM_SUB_SHARDS
+ " subshards in one split request. Provided "+NUM_SUB_SHARDS+"=" + numSubShards);
subRanges.addAll(router.partitionRange(numSubShards, range, fuzz));
}
for (int i = 0; i < subRanges.size(); i++) {
String subSlice = parentSlice.getName() + "_" + i;
subSlices.add(subSlice);
String subShardName = Assign.buildSolrCoreName(cloudManager.getDistribStateManager(), collection, subSlice,
firstReplicaNrt ? Replica.Type.NRT : Replica.Type.TLOG);
subShardNames.add(subShardName);
}
return rangesStr;
}
public static boolean lockForSplit(SolrCloudManager cloudManager, String collection, String shard) throws Exception {
String path = ZkStateReader.COLLECTIONS_ZKNODE + "/" + collection + "/" + shard + "-splitting";
final DistribStateManager stateManager = cloudManager.getDistribStateManager();
synchronized (stateManager) {
if (stateManager.hasData(path)) {
VersionedData vd = stateManager.getData(path);
return false;
}
Map<String, Object> map = new HashMap<>();
map.put(ZkStateReader.STATE_TIMESTAMP_PROP, String.valueOf(cloudManager.getTimeSource().getEpochTimeNs()));
byte[] data = Utils.toJSON(map);
try {
cloudManager.getDistribStateManager().makePath(path, data, CreateMode.EPHEMERAL, true);
} catch (Exception e) {
throw new SolrException(SolrException.ErrorCode.INVALID_STATE, "Can't lock parent slice for splitting (another split operation running?): " +
collection + "/" + shard, e);
}
return true;
}
}
public static void unlockForSplit(SolrCloudManager cloudManager, String collection, String shard) throws Exception {
if (shard != null) {
String path = ZkStateReader.COLLECTIONS_ZKNODE + "/" + collection + "/" + shard + "-splitting";
cloudManager.getDistribStateManager().removeRecursively(path, true, true);
} else {
String path = ZkStateReader.COLLECTIONS_ZKNODE + "/" + collection;
try {
List<String> names = cloudManager.getDistribStateManager().listData(path);
for (String name : cloudManager.getDistribStateManager().listData(path)) {
if (name.endsWith("-splitting")) {
try {
cloudManager.getDistribStateManager().removeData(path + "/" + name, -1);
} catch (NoSuchElementException nse) {
// ignore
}
}
}
} catch (NoSuchElementException nse) {
// ignore
}
}
}
}
| 1 | 39,865 | Oops.. meant to link to SplitOp here. I'll clean up in my next commit | apache-lucene-solr | java |
@@ -14,6 +14,10 @@ import android.widget.Toast;
import com.afollestad.materialdialogs.MaterialDialog;
import com.fasterxml.jackson.databind.JsonNode;
import com.firebase.jobdispatcher.JobParameters;
+
+import io.reactivex.SingleObserver;
+import io.reactivex.android.schedulers.AndroidSchedulers;
+import io.reactivex.disposables.Disposable;
import io.reactivex.schedulers.Schedulers;
import okhttp3.MediaType;
import okhttp3.OkHttpClient; | 1 | package openfoodfacts.github.scrachx.openfood.network;
import android.app.Activity;
import android.app.Dialog;
import android.app.ProgressDialog;
import android.content.Context;
import android.content.Intent;
import android.content.SharedPreferences;
import android.os.AsyncTask;
import android.os.Bundle;
import android.support.annotation.NonNull;
import android.util.Log;
import android.widget.Toast;
import com.afollestad.materialdialogs.MaterialDialog;
import com.fasterxml.jackson.databind.JsonNode;
import com.firebase.jobdispatcher.JobParameters;
import io.reactivex.schedulers.Schedulers;
import okhttp3.MediaType;
import okhttp3.OkHttpClient;
import okhttp3.RequestBody;
import okhttp3.ResponseBody;
import openfoodfacts.github.scrachx.openfood.BuildConfig;
import openfoodfacts.github.scrachx.openfood.R;
import openfoodfacts.github.scrachx.openfood.jobs.SavedProductUploadJob;
import openfoodfacts.github.scrachx.openfood.models.*;
import openfoodfacts.github.scrachx.openfood.utils.ImageUploadListener;
import openfoodfacts.github.scrachx.openfood.utils.Utils;
import openfoodfacts.github.scrachx.openfood.views.AddProductActivity;
import openfoodfacts.github.scrachx.openfood.views.product.ProductActivity;
import org.apache.commons.lang3.StringUtils;
import retrofit2.Call;
import retrofit2.Callback;
import retrofit2.Response;
import retrofit2.Retrofit;
import retrofit2.adapter.rxjava2.RxJava2CallAdapterFactory;
import retrofit2.converter.jackson.JacksonConverterFactory;
import retrofit2.converter.scalars.ScalarsConverterFactory;
import java.io.File;
import java.lang.ref.WeakReference;
import java.util.*;
import static openfoodfacts.github.scrachx.openfood.models.ProductImageField.*;
import static openfoodfacts.github.scrachx.openfood.network.OpenFoodAPIService.PRODUCT_API_COMMENT;
public class OpenFoodAPIClient {
private AllergenDao mAllergenDao;
private HistoryProductDao mHistoryProductDao;
private ToUploadProductDao mToUploadProductDao;
private OfflineUploadingTask task = new OfflineUploadingTask();
private static final JacksonConverterFactory jacksonConverterFactory = JacksonConverterFactory.create();
private DaoSession daoSession;
private static OkHttpClient httpClient = Utils.HttpClientBuilder();
private final OpenFoodAPIService apiService;
private Context mActivity;
public OpenFoodAPIClient(Activity activity) {
this(BuildConfig.HOST);
mAllergenDao = Utils.getAppDaoSession(activity).getAllergenDao();
mHistoryProductDao = Utils.getAppDaoSession(activity).getHistoryProductDao();
mToUploadProductDao = Utils.getAppDaoSession(activity).getToUploadProductDao();
mActivity = activity;
}
//used to upload in background
public OpenFoodAPIClient(Context context) {
this(BuildConfig.HOST);
daoSession = Utils.getDaoSession(context);
mToUploadProductDao = daoSession.getToUploadProductDao();
}
public OpenFoodAPIClient(Activity activity, String url) {
this(url);
mAllergenDao = Utils.getAppDaoSession(activity).getAllergenDao();
mHistoryProductDao = Utils.getAppDaoSession(activity).getHistoryProductDao();
mToUploadProductDao = Utils.getAppDaoSession(activity).getToUploadProductDao();
}
private OpenFoodAPIClient(String apiUrl) {
apiService = new Retrofit.Builder()
.baseUrl(apiUrl)
.client(httpClient)
.addConverterFactory(ScalarsConverterFactory.create())
.addConverterFactory(jacksonConverterFactory)
.addCallAdapterFactory(RxJava2CallAdapterFactory.createWithScheduler(Schedulers.io()))
.build()
.create(OpenFoodAPIService.class);
}
/**
* Open the product activity if the barcode exist.
* Also add it in the history if the product exist.
*
* @param barcode product barcode
* @param activity
*/
public void getProduct(final String barcode, final Activity activity) {
apiService.getFullProductByBarcode(barcode, Utils.getUserAgent(Utils.HEADER_USER_AGENT_SEARCH)).enqueue(new Callback<State>() {
@Override
public void onResponse(@NonNull Call<State> call, @NonNull Response<State> response) {
if (activity == null || activity.isFinishing()) {
return;
}
final State s = response.body();
if (s.getStatus() == 0) {
new MaterialDialog.Builder(activity)
.title(R.string.txtDialogsTitle)
.content(R.string.txtDialogsContent)
.positiveText(R.string.txtYes)
.negativeText(R.string.txtNo)
.onPositive((dialog, which) -> {
if (!activity.isFinishing()) {
Intent intent = new Intent(activity, AddProductActivity.class);
State st = new State();
Product pd = new Product();
pd.setCode(barcode);
st.setProduct(pd);
intent.putExtra("state", st);
activity.startActivity(intent);
activity.finish();
}
})
.onNegative((dialog, which) -> activity.onBackPressed())
.show();
} else {
new HistoryTask().doInBackground(s.getProduct());
Intent intent = new Intent(activity, ProductActivity.class);
Bundle bundle = new Bundle();
bundle.putSerializable("state", s);
intent.putExtras(bundle);
activity.startActivity(intent);
}
}
@Override
public void onFailure(@NonNull Call<State> call, @NonNull Throwable t) {
if (activity == null || activity.isFinishing()) {
return;
}
new MaterialDialog.Builder(activity)
.title(R.string.txtDialogsTitle)
.content(R.string.txtDialogsContent)
.positiveText(R.string.txtYes)
.negativeText(R.string.txtNo)
.onPositive((dialog, which) -> {
if (!activity.isFinishing()) {
Intent intent = new Intent(activity, AddProductActivity.class);
State st = new State();
Product pd = new Product();
pd.setCode(barcode);
st.setProduct(pd);
intent.putExtra("state", st);
activity.startActivity(intent);
activity.finish();
}
})
.show();
}
});
}
public void getIngredients(String barcode,final OnIngredientListCallback ingredientListCallback)
{
ArrayList<ProductIngredient> ProductIngredients=new ArrayList<>();
apiService.getIngredientsByBarcode(barcode).enqueue(new Callback<JsonNode>() {
@Override
public void onResponse(@NonNull Call<JsonNode> call, Response<JsonNode> response) {
final JsonNode node=response.body();
final JsonNode ingredientsJsonNode=node.findValue("ingredients");
for(int i=0;i<ingredientsJsonNode.size();i++)
{
ProductIngredient ProductIngredient=new ProductIngredient();
ProductIngredient.setId(ingredientsJsonNode.get(i).findValue("id").toString());
ProductIngredient.setText(ingredientsJsonNode.get(i).findValue("text").toString());
ProductIngredient.setRank(Long.valueOf(ingredientsJsonNode.get(i).findValue("rank").toString()));
ProductIngredients.add(ProductIngredient);
}
ingredientListCallback.onIngredientListResponse(true,ProductIngredients);
}
@Override
public void onFailure(@NonNull Call<JsonNode> call, Throwable t) {
ingredientListCallback.onIngredientListResponse(false,null);
}
});
}
public void searchProduct(final String name, final int page, final Activity activity, final OnProductsCallback productsCallback) {
apiService.searchProductByName(name, page).enqueue(new Callback<Search>() {
@Override
public void onResponse(@NonNull Call<Search> call, @NonNull Response<Search> response) {
if (!response.isSuccessful()) {
productsCallback.onProductsResponse(false, null, -1);
return;
}
Search s = response.body();
if (Integer.valueOf(s.getCount()) == 0) {
productsCallback.onProductsResponse(false, null, -2);
} else {
productsCallback.onProductsResponse(true, s, Integer.parseInt(s.getCount()));
}
}
@Override
public void onFailure(@NonNull Call<Search> call, @NonNull Throwable t) {
Toast.makeText(activity, activity.getString(R.string.errorWeb), Toast.LENGTH_LONG).show();
productsCallback.onProductsResponse(false, null, -1);
}
});
}
public void onResponseCallForPostFunction(Call<State> call, Response<State> response, Context activity, final OnProductSentCallback productSentCallback, SendProduct product) {
if (!response.isSuccessful() || response.body().getStatus() == 0) {
//lt.error();
productSentCallback.onProductSentResponse(false);
return;
}
String imguploadFront = product.getImgupload_front();
if (StringUtils.isNotEmpty(imguploadFront)) {
ProductImage image = new ProductImage(product.getBarcode(), FRONT, new File(imguploadFront));
postImg(activity, image, null);
}
String imguploadIngredients = product.getImgupload_ingredients();
if (StringUtils.isNotEmpty(imguploadIngredients)) {
postImg(activity, new ProductImage(product.getBarcode(), INGREDIENTS, new File(imguploadIngredients)), null);
}
String imguploadNutrition = product.getImgupload_nutrition();
if (StringUtils.isNotBlank(imguploadNutrition)) {
postImg(activity, new ProductImage(product.getBarcode(), NUTRITION, new File(imguploadNutrition)), null);
}
//lt.success();
productSentCallback.onProductSentResponse(true);
}
/**
* @return This api service gets products of provided brand.
*/
public OpenFoodAPIService getAPIService() {
return apiService;
}
public void getBrand(final String brand, final int page, final OnBrandCallback onBrandCallback) {
apiService.getProductByBrands(brand, page).enqueue(new Callback<Search>() {
@Override
public void onResponse(Call<Search> call, Response<Search> response) {
onBrandCallback.onBrandResponse(true, response.body());
}
@Override
public void onFailure(Call<Search> call, Throwable t) {
onBrandCallback.onBrandResponse(false, null);
}
});
}
public interface OnImagesCallback {
void onImageResponse(boolean value, String response);
}
public void getImages(String barcode, OnImagesCallback onImagesCallback) {
apiService.getProductImages(barcode).enqueue(new Callback<String>() {
@Override
public void onResponse(Call<String> call, Response<String> response) {
onImagesCallback.onImageResponse(true, response.body());
}
@Override
public void onFailure(Call<String> call, Throwable t) {
onImagesCallback.onImageResponse(false, null);
}
});
}
/**
* This method is used to upload products.
* Conditional statements in this method ensures that data which is being sent on server is correct
* and if the product is already present with more information then the server doesn't assume to delete that
* and write new product's data over that.
*/
public void post(final Context activity, final SendProduct product, final OnProductSentCallback productSentCallback) {
// final LoadToast lt = new LoadToast(activity);
ProgressDialog dialog = new ProgressDialog(activity, ProgressDialog.STYLE_SPINNER);
dialog.setIndeterminate(true);
dialog.setMessage(activity.getString(R.string.toastSending));
// lt.show();
if (product.getName().equals("") && product.getBrands().equals("") && product.getQuantity() == null) {
apiService.saveProductWithoutNameBrandsAndQuantity(product.getBarcode(), product.getLang(), product.getUserId(), product.getPassword(), PRODUCT_API_COMMENT).enqueue(new Callback<State>() {
@Override
public void onResponse(Call<State> call, Response<State> response) {
onResponseCallForPostFunction(call, response, activity, productSentCallback, product);
dialog.dismiss();
}
@Override
public void onFailure(Call<State> call, Throwable t) {
// lt.error();
productSentCallback.onProductSentResponse(false);
dialog.dismiss();
}
});
} else if (product.getName().equals("") && product.getBrands().equals("")) {
apiService.saveProductWithoutNameAndBrands(product.getBarcode(), product.getLang(), product.getQuantity(), product.getUserId(), product.getPassword(), PRODUCT_API_COMMENT).enqueue(new Callback<State>() {
@Override
public void onResponse(Call<State> call, Response<State> response) {
onResponseCallForPostFunction(call, response, activity, productSentCallback, product);
dialog.dismiss();
}
@Override
public void onFailure(Call<State> call, Throwable t) {
// lt.error();
productSentCallback.onProductSentResponse(false);
dialog.dismiss();
}
});
} else if (product.getName().equals("") && product.getQuantity() == null) {
apiService.saveProductWithoutNameAndQuantity(product.getBarcode(), product.getLang(), product.getBrands(), product.getUserId(), product.getPassword(), PRODUCT_API_COMMENT).enqueue(new Callback<State>() {
@Override
public void onResponse(Call<State> call, Response<State> response) {
onResponseCallForPostFunction(call, response, activity, productSentCallback, product);
dialog.dismiss();
}
@Override
public void onFailure(Call<State> call, Throwable t) {
//lt.error();
productSentCallback.onProductSentResponse(false);
dialog.dismiss();
}
});
} else if (product.getBrands().equals("") && product.getQuantity() == null) {
apiService.saveProductWithoutBrandsAndQuantity(product.getBarcode(), product.getLang(), product.getName(), product.getUserId(), product.getPassword(), PRODUCT_API_COMMENT).enqueue(new Callback<State>() {
@Override
public void onResponse(Call<State> call, Response<State> response) {
onResponseCallForPostFunction(call, response, activity, productSentCallback, product);
dialog.dismiss();
}
@Override
public void onFailure(Call<State> call, Throwable t) {
// lt.error();
productSentCallback.onProductSentResponse(false);
dialog.dismiss();
}
});
} else {
apiService.saveProduct(product.getBarcode(), product.getLang(), product.getName(), product.getBrands(), product.getQuantity(), product
.getUserId(), product.getPassword(), PRODUCT_API_COMMENT).enqueue(new Callback<State>() {
@Override
public void onResponse(Call<State> call, Response<State> response) {
onResponseCallForPostFunction(call, response, activity, productSentCallback, product);
dialog.dismiss();
}
@Override
public void onFailure(Call<State> call, Throwable t) {
// lt.error();
productSentCallback.onProductSentResponse(false);
dialog.dismiss();
}
});
}
}
public void postImg(final Context context, final ProductImage image, ImageUploadListener imageUploadListener) {
/** final LoadToast lt = new LoadToast(context);
lt.show();**/
apiService.saveImage(getUploadableMap(image, context))
.enqueue(new Callback<JsonNode>() {
@Override
public void onResponse(@NonNull Call<JsonNode> call, @NonNull Response<JsonNode> response) {
Log.d("onResponse", response.toString());
if (!response.isSuccessful()) {
ToUploadProduct product = new ToUploadProduct(image.getBarcode(), image.getFilePath(), image.getImageField().toString());
mToUploadProductDao.insertOrReplace(product);
Toast.makeText(context, response.toString(), Toast.LENGTH_LONG).show();
if(imageUploadListener != null){
imageUploadListener.onFailure(response.toString());
}
//lt.error();
return;
}
JsonNode body = response.body();
Log.d("onResponse", body.toString());
if(imageUploadListener != null){
imageUploadListener.onSuccess();
}
if (!body.isObject()) {
//lt.error();
} else if (body.get("status").asText().contains("status not ok")) {
Toast.makeText(context, body.get("error").asText(), Toast.LENGTH_LONG).show();
// lt.error();
} else {
//lt.success();
}
}
@Override
public void onFailure(@NonNull Call<JsonNode> call, @NonNull Throwable t) {
Log.d("onResponse", t.toString());
if(imageUploadListener != null){
imageUploadListener.onFailure(context.getString(R.string.uploadLater));
}
ToUploadProduct product = new ToUploadProduct(image.getBarcode(), image.getFilePath(), image.getImageField().toString());
mToUploadProductDao.insertOrReplace(product);
Toast.makeText(context, context.getString(R.string.uploadLater), Toast.LENGTH_LONG).show();
// lt.error();
}
});
}
private Map<String, RequestBody> getUploadableMap(ProductImage image, Context context) {
String lang = Locale.getDefault().getLanguage();
Map<String, RequestBody> imgMap = new HashMap<>();
imgMap.put("code", image.getCode());
imgMap.put("imagefield", image.getField());
if (image.getImguploadFront() != null)
imgMap.put("imgupload_front\"; filename=\"front_" + lang + ".png\"", image.getImguploadFront());
if (image.getImguploadIngredients() != null)
imgMap.put("imgupload_ingredients\"; filename=\"ingredients_" + lang + ".png\"", image.getImguploadIngredients());
if (image.getImguploadNutrition() != null)
imgMap.put("imgupload_nutrition\"; filename=\"nutrition_" + lang + ".png\"", image.getImguploadNutrition());
if (image.getImguploadOther() != null)
imgMap.put("imgupload_other\"; filename=\"other_" + lang + ".png\"", image.getImguploadOther());
// Attribute the upload to the connected user
final SharedPreferences settings = context.getSharedPreferences("login", 0);
final String login = settings.getString("user", "");
final String password = settings.getString("pass", "");
if (!login.isEmpty() && !password.isEmpty()) {
imgMap.put("user_id", RequestBody.create(MediaType.parse("text/plain"), login));
imgMap.put("password", RequestBody.create(MediaType.parse("text/plain"), password));
}
return imgMap;
}
public interface OnProductsCallback {
void onProductsResponse(boolean isOk, Search searchResponse, int countProducts);
}
public interface OnAllergensCallback {
void onAllergensResponse(boolean value, Search allergen);
}
public interface OnBrandCallback {
void onBrandResponse(boolean value, Search brand);
}
public interface OnStoreCallback {
void onStoreResponse(boolean value, Search store);
}
public interface OnPackagingCallback {
void onPackagingResponse(boolean value, Search packaging);
}
public interface OnAdditiveCallback {
void onAdditiveResponse(boolean value, Search brand);
}
public interface OnProductSentCallback {
void onProductSentResponse(boolean value);
}
public interface onCountryCallback {
void onCountryResponse(boolean value, Search country);
}
public interface onLabelCallback {
void onLabelResponse(boolean value, Search label);
}
public interface onCategoryCallback {
void onCategoryResponse(boolean value, Search category);
}
public interface onContributorCallback {
void onContributorResponse(boolean value, Search contributor);
}
public interface OnIngredientListCallback {
void onIngredientListResponse(boolean value, ArrayList<ProductIngredient> productIngredients);
}
/**
* Create an history product asynchronously
*/
private class HistoryTask extends AsyncTask<Product, Void, Void> {
@Override
protected Void doInBackground(Product... products) {
Product product = products[0];
List<HistoryProduct> historyProducts = mHistoryProductDao.queryBuilder().where(HistoryProductDao.Properties.Barcode.eq(product.getCode())).list();
HistoryProduct hp;
if (historyProducts.size() == 1) {
hp = historyProducts.get(0);
hp.setLastSeen(new Date());
} else {
hp = new HistoryProduct(product.getProductName(), product.getBrands(), product.getImageSmallUrl(), product.getCode(), product
.getQuantity(), product.getNutritionGradeFr());
}
mHistoryProductDao.insertOrReplace(hp);
return null;
}
}
public void uploadOfflineImages(Context context, boolean cancel, JobParameters job, SavedProductUploadJob service) {
if (!cancel) {
// Toast.makeText(context, "called function", Toast.LENGTH_SHORT).show();
task.job = job;
task.service = new WeakReference<>(service);
task.execute(context);
} else {
task.cancel(true);
}
}
public class OfflineUploadingTask extends AsyncTask<Context, Void, Void> {
JobParameters job;
WeakReference<SavedProductUploadJob> service;
@Override
protected Void doInBackground(Context... context) {
List<ToUploadProduct> toUploadProductList = mToUploadProductDao.queryBuilder().where(ToUploadProductDao.Properties.Uploaded.eq(false)
).list();
int totalSize = toUploadProductList.size();
for (int i = 0; i < totalSize; i++) {
ToUploadProduct uploadProduct = toUploadProductList.get(i);
File imageFile;
try {
imageFile = new File(uploadProduct.getImageFilePath());
} catch (Exception e) {
e.printStackTrace();
continue;
}
ProductImage productImage = new ProductImage(uploadProduct.getBarcode(),
uploadProduct.getProductField(), imageFile);
apiService.saveImage(getUploadableMap(productImage, context[0]))
.enqueue(new Callback<JsonNode>() {
@Override
public void onResponse(@NonNull Call<JsonNode> call, @NonNull Response<JsonNode> response) {
if (!response.isSuccessful()) {
Toast.makeText(context[0], response.toString(), Toast.LENGTH_LONG).show();
return;
}
JsonNode body = response.body();
Log.d("onResponse", body.toString());
if (!body.isObject()) {
} else if (body.get("status").asText().contains("status not ok")) {
mToUploadProductDao.delete(uploadProduct);
} else {
mToUploadProductDao.delete(uploadProduct);
}
}
@Override
public void onFailure(@NonNull Call<JsonNode> call, @NonNull Throwable t) {
}
});
}
return null;
}
@Override
protected void onPostExecute(Void aVoid) {
super.onPostExecute(aVoid);
Log.d("serviceValue", service.get().toString());
service.get().jobFinished(job, false);
}
}
public void getProductsByBrand(final String brand, final int page, final OnBrandCallback onBrandCallback) {
apiService.getProductByBrands(brand, page).enqueue(new Callback<Search>() {
@Override
public void onResponse(@NonNull Call<Search> call, @NonNull Response<Search> response) {
if (response.isSuccessful()) {
onBrandCallback.onBrandResponse(true, response.body());
} else {
onBrandCallback.onBrandResponse(false, null);
}
}
@Override
public void onFailure(@NonNull Call<Search> call, @NonNull Throwable t) {
onBrandCallback.onBrandResponse(false, null);
}
});
}
public interface OnEditImageCallback {
void onEditResponse(boolean value, String response);
}
public void editImage(String code, Map<String, String> imgMap, OnEditImageCallback onEditImageCallback) {
apiService.editImages(code, imgMap).enqueue(new Callback<String>() {
@Override
public void onResponse(Call<String> call, Response<String> response) {
onEditImageCallback.onEditResponse(true, response.body());
}
@Override
public void onFailure(Call<String> call, Throwable t) {
onEditImageCallback.onEditResponse(false, null);
}
});
}
public void getProductsByPackaging(final String packaging, final int page, final OnPackagingCallback onPackagingCallback) {
apiService.getProductByPackaging(packaging, page).enqueue(new Callback<Search>() {
@Override
public void onResponse(@NonNull Call<Search> call, @NonNull Response<Search> response) {
if (response.isSuccessful()) {
onPackagingCallback.onPackagingResponse(true, response.body());
} else {
onPackagingCallback.onPackagingResponse(false, null);
}
}
@Override
public void onFailure(@NonNull Call<Search> call, @NonNull Throwable t) {
onPackagingCallback.onPackagingResponse(false, null);
}
});
}
public void syncOldHistory() {
new SyncOldHistoryTask().execute();
}
public class SyncOldHistoryTask extends AsyncTask<Void, Void, Void> {
boolean success = true;
@Override
protected Void doInBackground(Void... voids) {
List<HistoryProduct> historyProducts = mHistoryProductDao.loadAll();
int size = historyProducts.size();
for (int i = 0; i < size; i++) {
HistoryProduct historyProduct = historyProducts.get(i);
apiService.getShortProductByBarcode(historyProduct.getBarcode(), Utils.getUserAgent(Utils.HEADER_USER_AGENT_SEARCH)).enqueue(new Callback<State>() {
@Override
public void onResponse(@NonNull Call<State> call, @NonNull Response<State> response) {
final State s = response.body();
if (s.getStatus() != 0) {
Product product = s.getProduct();
HistoryProduct hp = new HistoryProduct(product.getProductName(), product.getBrands(), product.getImageSmallUrl(),
product.getCode(), product.getQuantity(), product.getNutritionGradeFr());
Log.d("syncOldHistory", hp.toString());
hp.setLastSeen(historyProduct.getLastSeen());
mHistoryProductDao.insertOrReplace(hp);
}
}
@Override
public void onFailure(@NonNull Call<State> call, @NonNull Throwable t) {
success = false;
}
});
}
return null;
}
@Override
protected void onPostExecute(Void aVoid) {
super.onPostExecute(aVoid);
if (success) {
mActivity.getSharedPreferences("prefs", 0).edit().putBoolean("is_old_history_data_synced", true).apply();
}
}
}
public void getProductsByStore(final String store, final int page, final OnStoreCallback onStoreCallback) {
apiService.getProductByStores(store, page).enqueue(new Callback<Search>() {
@Override
public void onResponse(@NonNull Call<Search> call, @NonNull Response<Search> response) {
if (response.isSuccessful()) {
onStoreCallback.onStoreResponse(true, response.body());
} else {
onStoreCallback.onStoreResponse(false, null);
}
}
@Override
public void onFailure(@NonNull Call<Search> call, @NonNull Throwable t) {
onStoreCallback.onStoreResponse(false, null);
}
});
}
public void getProductsByCountry(String country, final int page, final onCountryCallback onCountryCallback) {
apiService.getProductsByCountry(country, page).enqueue(new Callback<Search>() {
@Override
public void onResponse(@NonNull Call<Search> call, @NonNull Response<Search> response) {
if (response.isSuccessful()) {
onCountryCallback.onCountryResponse(true, response.body());
} else {
onCountryCallback.onCountryResponse(false, null);
}
}
@Override
public void onFailure(@NonNull Call<Search> call, @NonNull Throwable t) {
onCountryCallback.onCountryResponse(false, null);
}
});
}
public void getProductsByAdditive(final String additive, final int page, final OnAdditiveCallback onAdditiveCallback) {
apiService.getProductsByAdditive(additive, page).enqueue(new Callback<Search>() {
@Override
public void onResponse(@NonNull Call<Search> call, @NonNull Response<Search> response) {
if (response.isSuccessful()) {
onAdditiveCallback.onAdditiveResponse(true, response.body());
} else {
onAdditiveCallback.onAdditiveResponse(false, null);
}
}
@Override
public void onFailure(@NonNull Call<Search> call, @NonNull Throwable t) {
onAdditiveCallback.onAdditiveResponse(false, null);
}
});
}
public void getProductsByAllergen(final String allergen, final int page, final OnAllergensCallback onAllergensCallback) {
apiService.getProductsByAllergen(allergen, page).enqueue(new Callback<Search>() {
@Override
public void onResponse(Call<Search> call, Response<Search> response) {
if (response.isSuccessful()) {
onAllergensCallback.onAllergensResponse(true, response.body());
} else {
onAllergensCallback.onAllergensResponse(false, null);
}
}
@Override
public void onFailure(Call<Search> call, Throwable t) {
onAllergensCallback.onAllergensResponse(false, null);
}
});
}
public void getProductsByLabel(String label, final int page, final onLabelCallback onLabelCallback) {
apiService.getProductByLabel(label, page).enqueue(new Callback<Search>() {
@Override
public void onResponse(@NonNull Call<Search> call, @NonNull Response<Search> response) {
if (response.isSuccessful()) {
onLabelCallback.onLabelResponse(true, response.body());
} else {
onLabelCallback.onLabelResponse(false, null);
}
}
@Override
public void onFailure(@NonNull Call<Search> call, @NonNull Throwable t) {
onLabelCallback.onLabelResponse(false, null);
}
});
}
public void getProductsByCategory(String category, final int page, final onCategoryCallback onCategoryCallback) {
apiService.getProductByCategory(category, page).enqueue(new Callback<Search>() {
@Override
public void onResponse(@NonNull Call<Search> call, @NonNull Response<Search> response) {
if (response.isSuccessful()) {
onCategoryCallback.onCategoryResponse(true, response.body());
} else {
onCategoryCallback.onCategoryResponse(false, null);
}
}
@Override
public void onFailure(@NonNull Call<Search> call, @NonNull Throwable t) {
onCategoryCallback.onCategoryResponse(false, null);
}
});
}
public void getProductsByContributor(String contributor, final int page, final onContributorCallback onContributorCallback) {
apiService.searchProductsByContributor(contributor, page).enqueue(new Callback<Search>() {
@Override
public void onResponse(@NonNull Call<Search> call, @NonNull Response<Search> response) {
if (response.isSuccessful()) {
onContributorCallback.onContributorResponse(true, response.body());
} else {
onContributorCallback.onContributorResponse(false, null);
}
}
@Override
public void onFailure(@NonNull Call<Search> call, @NonNull Throwable t) {
onContributorCallback.onContributorResponse(false, null);
}
});
}
public interface OnIncompleteCallback {
void onIncompleteResponse(boolean value, Search incompleteProducts);
}
public void getIncompleteProducts(int page, OnIncompleteCallback onIncompleteCallback) {
apiService.getIncompleteProducts(page).enqueue(new Callback<Search>() {
@Override
public void onResponse(Call<Search> call, Response<Search> response) {
if (response.isSuccessful()) {
onIncompleteCallback.onIncompleteResponse(true, response.body());
} else {
onIncompleteCallback.onIncompleteResponse(false, null);
}
}
@Override
public void onFailure(Call<Search> call, Throwable t) {
onIncompleteCallback.onIncompleteResponse(false, null);
}
});
}
public void getToBeCompletedProductsByContributor(String contributor, final int page, final onContributorCallback onContributorCallback) {
apiService.getToBeCompletedProductsByContributor(contributor, page).enqueue(new Callback<Search>() {
@Override
public void onResponse(@NonNull Call<Search> call, @NonNull Response<Search> response) {
if (response.isSuccessful()) {
onContributorCallback.onContributorResponse(true, response.body());
} else {
onContributorCallback.onContributorResponse(false, null);
}
}
@Override
public void onFailure(@NonNull Call<Search> call, @NonNull Throwable t) {
onContributorCallback.onContributorResponse(false, null);
}
});
}
public void getPicturesContributedProducts(String contributor, final int page, final onContributorCallback onContributorCallback) {
apiService.getPicturesContributedProducts(contributor, page).enqueue(new Callback<Search>() {
@Override
public void onResponse(@NonNull Call<Search> call, @NonNull Response<Search> response) {
if (response.isSuccessful()) {
onContributorCallback.onContributorResponse(true, response.body());
} else {
onContributorCallback.onContributorResponse(false, null);
}
}
@Override
public void onFailure(@NonNull Call<Search> call, @NonNull Throwable t) {
onContributorCallback.onContributorResponse(false, null);
}
});
}
public void getPicturesContributedIncompleteProducts(String contributor, final int page, final onContributorCallback onContributorCallback) {
apiService.getPicturesContributedIncompleteProducts(contributor, page).enqueue(new Callback<Search>() {
@Override
public void onResponse(@NonNull Call<Search> call, @NonNull Response<Search> response) {
if (response.isSuccessful()) {
onContributorCallback.onContributorResponse(true, response.body());
} else {
onContributorCallback.onContributorResponse(false, null);
}
}
@Override
public void onFailure(@NonNull Call<Search> call, @NonNull Throwable t) {
onContributorCallback.onContributorResponse(false, null);
}
});
}
public void getInfoAddedProducts(String contributor, final int page, final onContributorCallback onContributorCallback) {
apiService.getInfoAddedProducts(contributor, page).enqueue(new Callback<Search>() {
@Override
public void onResponse(@NonNull Call<Search> call, @NonNull Response<Search> response) {
if (response.isSuccessful()) {
onContributorCallback.onContributorResponse(true, response.body());
} else {
onContributorCallback.onContributorResponse(false, null);
}
}
@Override
public void onFailure(@NonNull Call<Search> call, @NonNull Throwable t) {
onContributorCallback.onContributorResponse(false, null);
}
});
}
public void getInfoAddedIncompleteProducts(String contributor, final int page, final onContributorCallback onContributorCallback) {
apiService.getInfoAddedIncompleteProducts(contributor, page).enqueue(new Callback<Search>() {
@Override
public void onResponse(@NonNull Call<Search> call, @NonNull Response<Search> response) {
if (response.isSuccessful()) {
onContributorCallback.onContributorResponse(true, response.body());
} else {
onContributorCallback.onContributorResponse(false, null);
}
}
@Override
public void onFailure(@NonNull Call<Search> call, @NonNull Throwable t) {
onContributorCallback.onContributorResponse(false, null);
}
});
}
public interface onStateCallback {
void onStateResponse(boolean value, Search state);
}
public void getProductsByStates(String state, final int page, final onStateCallback onStateCallback) {
apiService.getProductsByState(state, page).enqueue(new Callback<Search>() {
@Override
public void onResponse(@NonNull Call<Search> call, @NonNull Response<Search> response) {
if (response.isSuccessful()) {
onStateCallback.onStateResponse(true, response.body());
} else {
onStateCallback.onStateResponse(false, null);
}
}
@Override
public void onFailure(@NonNull Call<Search> call, @NonNull Throwable t) {
onStateCallback.onStateResponse(false, null);
}
});
}
/**
* OnResponseCall for uploads through notifications
*/
public void onResponseCallForNotificationPostFunction(Call<State> call, Response<State> response, Context context, final OnProductSentCallback productSentCallback, SendProduct product) {
if (!response.isSuccessful() || response.body().getStatus() == 0) {
productSentCallback.onProductSentResponse(false);
return;
}
String imguploadFront = product.getImgupload_front();
if (StringUtils.isNotEmpty(imguploadFront)) {
ProductImage image = new ProductImage(product.getBarcode(), FRONT, new File(imguploadFront));
postImg(context, image, null);
}
String imguploadIngredients = product.getImgupload_ingredients();
if (StringUtils.isNotEmpty(imguploadIngredients)) {
postImg(context, new ProductImage(product.getBarcode(), INGREDIENTS, new File(imguploadIngredients)), null);
}
String imguploadNutrition = product.getImgupload_nutrition();
if (StringUtils.isNotBlank(imguploadNutrition)) {
postImg(context, new ProductImage(product.getBarcode(), NUTRITION, new File(imguploadNutrition)), null);
}
productSentCallback.onProductSentResponse(true);
}
/**
* Post method for upload through notification
*/
public void postForNotification(final Context context, final SendProduct product, final OnProductSentCallback productSentCallback) {
if (product.getName().equals("") && product.getBrands().equals("") && product.getQuantity() == null) {
apiService.saveProductWithoutNameBrandsAndQuantity(product.getBarcode(), product.getLang(), product.getUserId(), product.getPassword(), PRODUCT_API_COMMENT).enqueue(new Callback<State>() {
@Override
public void onResponse(Call<State> call, Response<State> response) {
onResponseCallForNotificationPostFunction(call, response, context, productSentCallback, product);
}
@Override
public void onFailure(Call<State> call, Throwable t) {
productSentCallback.onProductSentResponse(false);
}
});
} else if (product.getName().equals("") && product.getBrands().equals("")) {
apiService.saveProductWithoutNameAndBrands(product.getBarcode(), product.getLang(), product.getQuantity(), product.getUserId(), product.getPassword(), PRODUCT_API_COMMENT).enqueue(new Callback<State>() {
@Override
public void onResponse(Call<State> call, Response<State> response) {
onResponseCallForNotificationPostFunction(call, response, context, productSentCallback, product);
}
@Override
public void onFailure(Call<State> call, Throwable t) {
productSentCallback.onProductSentResponse(false);
}
});
} else if (product.getName().equals("") && product.getQuantity() == null) {
apiService.saveProductWithoutNameAndQuantity(product.getBarcode(), product.getLang(), product.getBrands(), product.getUserId(), product.getPassword(), PRODUCT_API_COMMENT).enqueue(new Callback<State>() {
@Override
public void onResponse(Call<State> call, Response<State> response) {
onResponseCallForNotificationPostFunction(call, response, context, productSentCallback, product);
}
@Override
public void onFailure(Call<State> call, Throwable t) {
productSentCallback.onProductSentResponse(false);
}
});
} else if (product.getBrands().equals("") && product.getQuantity() == null) {
apiService.saveProductWithoutBrandsAndQuantity(product.getBarcode(), product.getLang(), product.getName(), product.getUserId(), product.getPassword(), PRODUCT_API_COMMENT).enqueue(new Callback<State>() {
@Override
public void onResponse(Call<State> call, Response<State> response) {
onResponseCallForNotificationPostFunction(call, response, context, productSentCallback, product);
}
@Override
public void onFailure(Call<State> call, Throwable t) {
productSentCallback.onProductSentResponse(false);
}
});
} else {
apiService.saveProduct(product.getBarcode(), product.getLang(), product.getName(), product.getBrands(), product.getQuantity(), product
.getUserId(), product.getPassword(), PRODUCT_API_COMMENT).enqueue(new Callback<State>() {
@Override
public void onResponse(Call<State> call, Response<State> response) {
onResponseCallForNotificationPostFunction(call, response, context, productSentCallback, product);
}
@Override
public void onFailure(Call<State> call, Throwable t) {
productSentCallback.onProductSentResponse(false);
}
});
}
}
}
| 1 | 66,411 | Remove these unnecessary imports that you've added. | openfoodfacts-openfoodfacts-androidapp | java |
@@ -28,8 +28,8 @@ from qutebrowser.utils import usertypes
# Note this has entries for success/error/warn from widgets.webview:LoadStatus
-UrlType = usertypes.enum('UrlType', ['success', 'error', 'warn', 'hover',
- 'normal'])
+UrlType = usertypes.enum('UrlType', ['success', 'success_https', ' error',
+ 'warn', 'hover', 'normal'])
class UrlText(textbase.TextBase): | 1 | # vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2014-2015 Florian Bruhin (The Compiler) <[email protected]>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""URL displayed in the statusbar."""
from PyQt5.QtCore import pyqtSlot, pyqtProperty, Qt
from qutebrowser.browser import webview
from qutebrowser.mainwindow.statusbar import textbase
from qutebrowser.config import style
from qutebrowser.utils import usertypes
# Note this has entries for success/error/warn from widgets.webview:LoadStatus
UrlType = usertypes.enum('UrlType', ['success', 'error', 'warn', 'hover',
'normal'])
class UrlText(textbase.TextBase):
"""URL displayed in the statusbar.
Attributes:
_normal_url: The normal URL to be displayed as a UrlType instance.
_normal_url_type: The type of the normal URL as a UrlType instance.
_hover_url: The URL we're currently hovering over.
_ssl_errors: Whether SSL errors occurred while loading.
Class attributes:
_urltype: The URL type to show currently (normal/ok/error/warn/hover).
Accessed via the urltype property.
For some reason we need to have this as class attribute so
pyqtProperty works correctly.
"""
_urltype = None
STYLESHEET = """
QLabel#UrlText[urltype="normal"] {
{{ color['statusbar.url.fg'] }}
}
QLabel#UrlText[urltype="success"] {
{{ color['statusbar.url.fg.success'] }}
}
QLabel#UrlText[urltype="error"] {
{{ color['statusbar.url.fg.error'] }}
}
QLabel#UrlText[urltype="warn"] {
{{ color['statusbar.url.fg.warn'] }}
}
QLabel#UrlText[urltype="hover"] {
{{ color['statusbar.url.fg.hover'] }}
}
"""
def __init__(self, parent=None):
"""Override TextBase.__init__ to elide in the middle by default."""
super().__init__(parent, Qt.ElideMiddle)
self.setObjectName(self.__class__.__name__)
style.set_register_stylesheet(self)
self._hover_url = None
self._normal_url = None
self._normal_url_type = UrlType.normal
@pyqtProperty(str)
def urltype(self):
"""Getter for self.urltype, so it can be used as Qt property.
Return:
The urltype as a string (!)
"""
if self._urltype is None:
return ""
else:
return self._urltype.name
def _update_url(self):
"""Update the displayed URL if the url or the hover url changed."""
if self._hover_url is not None:
self.setText(self._hover_url)
self._urltype = UrlType.hover
elif self._normal_url is not None:
self.setText(self._normal_url)
self._urltype = self._normal_url_type
else:
self.setText('')
self._urltype = UrlType.normal
self.setStyleSheet(style.get_stylesheet(self.STYLESHEET))
@pyqtSlot(str)
def on_load_status_changed(self, status_str):
"""Slot for load_status_changed. Sets URL color accordingly.
Args:
status_str: The LoadStatus as string.
"""
status = webview.LoadStatus[status_str]
if status in (webview.LoadStatus.success, webview.LoadStatus.error,
webview.LoadStatus.warn):
self._normal_url_type = UrlType[status_str]
else:
self._normal_url_type = UrlType.normal
self._update_url()
@pyqtSlot(str)
def set_url(self, s):
"""Setter to be used as a Qt slot.
Args:
s: The URL to set as string.
"""
self._normal_url = s
self._normal_url_type = UrlType.normal
self._update_url()
@pyqtSlot(str, str, str)
def set_hover_url(self, link, _title, _text):
"""Setter to be used as a Qt slot.
Saves old shown URL in self._old_url and restores it later if a link is
"un-hovered" when it gets called with empty parameters.
Args:
link: The link which was hovered (string)
_title: The title of the hovered link (string)
_text: The text of the hovered link (string)
"""
if link:
self._hover_url = link
else:
self._hover_url = None
self._update_url()
@pyqtSlot(int)
def on_tab_changed(self, tab):
"""Update URL if the tab changed."""
self._hover_url = None
self._normal_url = tab.cur_url.toDisplayString()
self.on_load_status_changed(tab.load_status.name)
self._update_url()
| 1 | 13,759 | That space shouldn't be here :wink: This lead to an exception whenever a page with an error was loaded, e.g. an inexistent host - I just fixed that :smile: | qutebrowser-qutebrowser | py |
@@ -735,7 +735,7 @@ class TLSServerAutomaton(Automaton):
self.local_port)
return
self.socket, addr = s.accept()
- self.remote_ip, self.remote_port = addr
+ self.remote_ip, self.remote_port = addr, self.local_port
raise self.WAITING_FOR_ClientHello()
| 1 | ## This file is part of Scapy
## Copyright (C) 2007, 2008, 2009 Arnaud Ebalard
## 2015, 2016 Maxence Tury
## This program is published under a GPLv2 license
"""
TLS automatons. This makes for a primitive TLS stack.
SSLv3 is not guaranteed, and SSLv2 is not supported.
Obviously you need rights for network access.
Launch a server on tcp/4433:
from scapy.all import *
t = TLSServerAutomaton(mycert='<cert.pem>', mykey='<key.pem>')
t.run()
Launch a client to tcp/50000 with one cipher suite of your choice:
from scapy.all import *
ch = TLSClientHello(ciphers=<int code of the cipher suite>)
t = TLSClientAutomaton(dport=50000, client_hello=ch)
t.run()
"""
import socket
import struct
from scapy.error import warning
from scapy.automaton import Automaton, ATMT
from scapy.layers.tls.cert import Cert, PrivKey, PrivKeyRSA, PrivKeyECDSA
from scapy.layers.tls.basefields import _tls_version
from scapy.layers.tls.session import tlsSession
from scapy.layers.tls.handshake import *
from scapy.layers.tls.record import (TLS, TLSAlert, TLSChangeCipherSpec,
TLSApplicationData)
from scapy.layers.tls.crypto.suites import (_tls_cipher_suites_cls,
_tls_cipher_suites,
get_usable_ciphersuites)
###############################################################################
### Client automaton ###
###############################################################################
class TLSClientAutomaton(Automaton):
"""
The TLS client automaton.
- server : default value is '127.0.0.1';
- dport : default value is 4433;
- server_name : default value is None;
- mycert : optional when there is no client authentication;
- mykey : optional when there is no client authentication;
- client_hello : optional definition of the ClientHello to be sent to the
server, this is faster than automaton overloading and enables quick
cipher suite choice (make sure it is usable, though);
- data : optional application_data to be sent after the handshake, if this
is not defined we send a simple GET request.
"""
def parse_args(self, server="127.0.0.1", dport=4433,
server_name=None, mycert=None, mykey=None,
client_hello=None, data=None, **kargs):
Automaton.parse_args(self, **kargs)
tmp = socket.getaddrinfo(server, dport)
self.remote_name = None
try:
if ':' in server:
socket.inet_pton(socket.AF_INET6, server)
else:
socket.inet_pton(socket.AF_INET, server)
except:
self.remote_name = socket.getfqdn(server)
if self.remote_name != server:
tmp = socket.getaddrinfo(self.remote_name, dport)
if server_name:
self.remote_name = server_name
self.remote_family = tmp[0][0]
self.remote_ip = tmp[0][4][0]
self.remote_port = dport
self.local_ip = None
self.local_port = None
self.cur_pkt = None
self.cur_session = None
self.msg_list = []
self.remain = ""
self.socket = None
self.cert_req = None
self.client_hello = client_hello
self.data = data
if mycert and mykey:
self.mycert = Cert(mycert)
self.mykey = PrivKey(mykey)
else:
self.mycert = None
self.mykey = None
def get_next_msg(self, socket_timeout=5, retry=5):
"""
The purpose of the function is to make next message(s) available in
self.msg_list. If the list is not empty, nothing is done. If not, in
order to fill it, the function uses the data already available in
self.remain from a previous call and waits till there are enough to
dissect a TLS packet (expected length is in the 5 first bytes of the
packet). Once dissected, the content of the TLS packet (carried
messages) is appended to self.msg_list.
We have to grab enough data to dissect a TLS packet, i.e. at least
5 bytes in order to access the expected length of the TLS packet.
"""
if self.msg_list: # a message is already available
return
self.socket.settimeout(socket_timeout)
grablen = 5
while retry and (grablen == 5 or len(self.remain) < grablen):
if grablen == 5 and len(self.remain) >= 5:
grablen = struct.unpack('!H', self.remain[3:5])[0] + 5
if grablen == len(self.remain):
break
try:
tmp = self.socket.recv(grablen - len(self.remain))
if not tmp:
retry -= 1
else:
self.remain += tmp
except:
retry -= 1
if self.remain < 5 or len(self.remain) != grablen:
# Remote peer is not willing to respond
return
# Instantiate the TLS packet (record header only, at this point)
p = TLS(self.remain, tls_session=self.cur_session)
self.cur_session = p.tls_session
self.remain = ""
self.msg_list += p.msg
while p.payload:
if isinstance(p.payload, Raw):
self.remain += p.payload.load
p = p.payload
elif isinstance(p.payload, TLS):
p = p.payload
self.msg_list += p.msg
@ATMT.state(initial=True)
def INITIAL(self):
raise self.INIT_TLS_SESSION()
@ATMT.state()
def INIT_TLS_SESSION(self):
self.cur_session = tlsSession()
self.cur_session.client_certs = self.mycert
self.cur_session.client_key = self.mykey
raise self.CONNECT()
@ATMT.state()
def CONNECT(self):
s = socket.socket(self.remote_family, socket.SOCK_STREAM)
s.connect((self.remote_ip, self.remote_port))
self.socket = s
self.local_ip, self.local_port = self.socket.getsockname()[:2]
raise self.PREPARE_FIRST_PKT()
@ATMT.state()
def PREPARE_FIRST_PKT(self):
self.cur_pkt = TLS(tls_session=self.cur_session)
@ATMT.condition(PREPARE_FIRST_PKT)
def should_add_ClientHello(self):
raise self.ADDED_ClientHello()
@ATMT.action(should_add_ClientHello, prio=1)
def add_ClientHello(self):
"""
Default TLSClientHello() offers only TLS_DHE_RSA_WITH_AES_128_CBC_SHA.
For fast server testing, typical alternatives (DHE only, RSAkx with CBC
only, ECDHE with appropriate extensions) may be found in tls.uts,
and then brought here through the client_hello argument.
"""
p = self.client_hello or TLSClientHello()
self.cur_pkt.msg.append(p)
@ATMT.state()
def ADDED_ClientHello(self):
pass
@ATMT.condition(ADDED_ClientHello)
def should_send_ClientHello(self):
raise self.SENT_ClientHello()
@ATMT.action(should_send_ClientHello, prio=1)
def send_ClientHello(self):
self.socket.send(str(self.cur_pkt))
self.cur_pkt = None
@ATMT.state()
def SENT_ClientHello(self):
raise self.WAITING_FOR_ServerHello()
@ATMT.state()
def WAITING_FOR_ServerHello(self):
self.get_next_msg()
raise self.PREPROCESS_ServerHello()
@ATMT.state()
def PREPROCESS_ServerHello(self):
pass
@ATMT.condition(PREPROCESS_ServerHello, prio=1)
def should_HANDLE_ServerHello(self):
"""
XXX We should check the ServerHello attributes for discrepancies with
our own ClientHello.
"""
if (not self.msg_list or
not isinstance(self.msg_list[0], TLSServerHello)):
return
p = self.msg_list[0]
self.msg_list = self.msg_list[1:]
raise self.HANDLE_SH()
@ATMT.state()
def HANDLE_SH(self):
pass
@ATMT.condition(PREPROCESS_ServerHello, prio=2)
def missing_server_hello(self):
raise self.MISSING_SH()
@ATMT.state(final=True)
def MISSING_SH(self):
print "Missing TLS Server Hello message"
@ATMT.condition(HANDLE_SH, prio=1)
def should_HANDLE_CERT(self):
self.get_next_msg()
if (not self.msg_list or
not isinstance(self.msg_list[0], TLSCertificate)):
return
p = self.msg_list[0]
self.msg_list = self.msg_list[1:]
raise self.HANDLE_CERT()
@ATMT.state()
def HANDLE_CERT(self):
pass
@ATMT.condition(HANDLE_SH, prio=2)
def missing_certificate(self):
raise self.MISSING_CERT()
@ATMT.state(final=True)
def MISSING_CERT(self):
print "Missing TLS Certificate message"
@ATMT.state()
def HANDLE_CERT_REQ(self):
pass
@ATMT.condition(HANDLE_CERT, prio=1)
def should_HANDLE_SKE_from_CERT(self):
"""
XXX We should check the ServerKeyExchange attributes for discrepancies
with our own ClientHello, along with the ServerHello and Certificate.
"""
self.get_next_msg()
if (not self.msg_list or
not isinstance(self.msg_list[0], TLSServerKeyExchange)):
return
p = self.msg_list[0]
self.msg_list = self.msg_list[1:]
raise self.HANDLE_SKE()
@ATMT.state(final=True)
def MISSING_SKE(self):
pass
@ATMT.condition(HANDLE_CERT, prio=2)
def expected_server_key_exchange(self):
if self.cur_session.prcs.key_exchange.server_kx_msg_cls:
# Should have received a SKE
raise self.MISSING_SKE()
@ATMT.state()
def HANDLE_SKE(self):
# XXX Move that refill code somewhere else
self.get_next_msg()
@ATMT.condition(HANDLE_SKE, prio=2)
def should_HANDLE_CERT_REQ_from_SKE(self):
self.get_next_msg()
"""
XXX We should check the CertificateRequest attributes for discrepancies
with the cipher suite, etc.
"""
if (not self.msg_list or
not isinstance(self.msg_list[0], TLSCertificateRequest)):
return
p = self.msg_list[0]
self.msg_list = self.msg_list[1:]
self.cert_req = p
raise self.HANDLE_CERT_REQ()
@ATMT.condition(HANDLE_CERT, prio=3)
def should_HANDLE_CERT_REQ(self):
"""
XXX We should check the CertificateRequest attributes for discrepancies
with the cipher suite, etc.
"""
if (not self.msg_list or
not isinstance(self.msg_list[0], TLSCertificateRequest)):
return
p = self.msg_list[0]
self.msg_list = self.msg_list[1:]
self.cert_req = p
raise self.HANDLE_CERT_REQ()
@ATMT.condition(HANDLE_SKE, prio=1)
def should_HANDLE_SHD(self):
self.get_next_msg()
if (not self.msg_list or
not isinstance(self.msg_list[0], TLSServerHelloDone)):
return
p = self.msg_list[0]
self.msg_list = self.msg_list[1:]
raise self.HANDLE_SHD()
@ATMT.condition(HANDLE_CERT_REQ, prio=4)
def should_HANDLE_SHD_from_CERT_REQ(self):
if (not self.msg_list or
not isinstance(self.msg_list[0], TLSServerHelloDone)):
return
p = self.msg_list[0]
self.msg_list = self.msg_list[1:]
raise self.HANDLE_SHD()
@ATMT.condition(HANDLE_CERT)
def should_HANDLE_SHD_from_CERT(self):
self.get_next_msg()
if (not self.msg_list or
not isinstance(self.msg_list[0], TLSServerHelloDone)):
return
p = self.msg_list[0]
self.msg_list = self.msg_list[1:]
raise self.HANDLE_SHD()
@ATMT.state()
def HANDLE_SHD(self):
raise self.PREPARE_PKT2()
# Second packet sent by us
@ATMT.state()
def PREPARE_PKT2(self):
pass
@ATMT.condition(PREPARE_PKT2, prio=1)
def should_ADD_CLIENT_CERT(self):
"""
If the server sent a CertificateRequest, we send a Certificate message.
If no certificate is available, an empty Certificate message is sent:
- this is a SHOULD in RFC 4346 (Section 7.4.6)
- this is a MUST in RFC 5246 (Section 7.4.6)
XXX We may want to add a complete chain.
"""
if not self.cert_req:
return
certs = []
if self.mycert:
certs = [self.mycert]
self.cur_pkt = TLS(tls_session=self.cur_session, msg=[])
p = TLSCertificate(certs=certs)
self.cur_pkt.msg.append(p)
self.socket.send(str(self.cur_pkt))
self.cur_pkt = None
raise self.ADD_CLIENT_CERT()
@ATMT.state()
def ADD_CLIENT_CERT(self):
pass
@ATMT.condition(PREPARE_PKT2, prio=2)
def should_ADD_CKE_from_PREPARE_PKT2(self):
self.cur_pkt = TLS(tls_session=self.cur_session, msg=[])
p = TLSClientKeyExchange()
self.cur_pkt.msg.append(p)
self.socket.send(str(self.cur_pkt))
self.cur_pkt = None
raise self.ADD_CKE()
@ATMT.condition(ADD_CLIENT_CERT, prio=2)
def should_ADD_CKE_from_ADD_CLIENT_CERT(self):
self.cur_pkt = TLS(tls_session=self.cur_session, msg=[])
p = TLSClientKeyExchange()
self.cur_pkt.msg.append(p)
self.socket.send(str(self.cur_pkt))
self.cur_pkt = None
raise self.ADD_CKE()
@ATMT.state()
def ADD_CKE(self):
pass
@ATMT.condition(ADD_CKE, prio=1)
def should_ADD_CV_from_ADD_CKE(self):
"""
XXX Section 7.4.7.1 of RFC 5246 states that the CertificateVerify
message is only sent following a client certificate that has signing
capability (i.e. not those containing fixed DH params).
We should verify that before adding the message. We should also handle
the case when the Certificate message was empty.
"""
if (not self.cert_req or
self.mycert is None or
self.mykey is None):
return
self.cur_pkt = TLS(tls_session=self.cur_session, msg=[])
p = TLSCertificateVerify()
self.cur_pkt.msg.append(p)
self.socket.send(str(self.cur_pkt))
self.cur_pkt = None
raise self.ADD_CV()
@ATMT.state()
def ADD_CV(self):
pass
@ATMT.condition(ADD_CV)
def should_ADD_CCS_from_ADD_CV(self):
self.cur_pkt = TLS(type=20, tls_session=self.cur_session, msg=[])
p = TLSChangeCipherSpec()
self.cur_pkt.msg.append(p)
self.socket.send(str(self.cur_pkt))
self.cur_pkt = None
raise self.ADD_CCS()
@ATMT.condition(ADD_CKE, prio=2)
def should_ADD_CCS_from_ADD_CKE(self):
self.cur_pkt = TLS(type=20, tls_session=self.cur_session, msg=[])
p = TLSChangeCipherSpec()
self.cur_pkt.msg.append(p)
self.socket.send(str(self.cur_pkt))
self.cur_pkt = None
raise self.ADD_CCS()
@ATMT.state()
def ADD_CCS(self):
pass
@ATMT.condition(ADD_CCS)
def should_ADD_FINISHED(self):
self.cur_pkt = TLS(tls_session=self.cur_session, msg=[])
p = TLSFinished()
self.cur_pkt.msg.append(p)
self.socket.send(str(self.cur_pkt))
self.cur_pkt = None
raise self.ADD_FINISHED()
@ATMT.state()
def ADD_FINISHED(self):
pass
@ATMT.condition(ADD_FINISHED)
def should_SEND_SECOND_PKT(self):
raise self.SEND_SECOND_PKT()
@ATMT.state()
def SEND_SECOND_PKT(self):
raise self.WAIT_FOR_RESP2()
@ATMT.state()
def WAIT_FOR_RESP2(self):
self.socket.settimeout(10)
s = self.socket.recv(100000)
p = TLS(s, tls_session=self.cur_session)
self.msg_list = p.msg
while p.payload:
if isinstance(p.payload, Raw):
self.remain += p.payload.load
p = p.payload
elif isinstance(p.payload, TLS):
p = p.payload
self.msg_list += p.msg
raise self.PREPROCESS_RESP2()
# Second response from the server
@ATMT.state()
def PREPROCESS_RESP2(self):
pass
@ATMT.condition(PREPROCESS_RESP2)
def should_HANDLE_CCS(self):
self.get_next_msg()
if (not self.msg_list or
not isinstance(self.msg_list[0], TLSChangeCipherSpec)):
return
p = self.msg_list[0]
self.msg_list = self.msg_list[1:]
raise self.HANDLE_CCS()
@ATMT.state()
def HANDLE_CCS(self):
pass
@ATMT.condition(HANDLE_CCS)
def should_HANDLE_FINISHED(self):
self.get_next_msg()
if (not self.msg_list or
not isinstance(self.msg_list[0], TLSFinished)):
return
p = self.msg_list[0]
self.msg_list = self.msg_list[1:]
raise self.HANDLE_FINISHED()
@ATMT.state()
def HANDLE_FINISHED(self):
pass
@ATMT.condition(HANDLE_FINISHED)
def should_test_connection(self):
raise self.TESTED_CONNECTION()
@ATMT.action(should_test_connection, prio=1)
def send_recv_data(self):
"""
XXX No live input from the user ; one unique send for now.
XXX We might want not to send any ApplicationData message.
XXX We do not wait very long for server answer.
"""
txt = self.data or "GET /\r\n\r\n" # GET HTTP/1.1\r\n\r\n"
p = TLS(type=23, tls_session=self.cur_session, msg=[Raw(load=txt)])
self.socket.send(str(p))
print "Sent to server: \n%r" % txt
self.get_next_msg(1, 0)
if self.msg_list:
p = self.msg_list[0]
self.msg_list = self.msg_list[1:]
if isinstance(p, Raw):
print "Received from server: \n%s" % p.load
else:
print "Received from server: \n%s" % p
@ATMT.state()
def TESTED_CONNECTION(self):
pass
@ATMT.condition(TESTED_CONNECTION)
def should_close_session(self):
raise self.CLOSED_TLS_SESSION()
@ATMT.action(should_close_session, prio=1)
def close_session(self):
"""
We end the session properly after 2 seconds,
with a TLS Alert (warning, close_notify).
"""
time.sleep(2)
self.cur_pkt = TLS(type=21, msg=[], tls_session=self.cur_session)
p = TLSAlert(level=1, descr=0)
self.cur_pkt.msg.append(p)
try:
self.socket.send(str(self.cur_pkt))
except:
print "Could not send termination Alert (maybe the server stopped)"
self.cur_pkt = None
@ATMT.state()
def CLOSED_TLS_SESSION(self):
raise self.FINAL()
@ATMT.state(final=True)
def FINAL(self):
"""
We might call shutdown, but it may happen that the server
did not wait for us to shutdown after answering our data query.
#self.socket.shutdown(1)
"""
self.socket.close()
###############################################################################
### Server automaton ###
###############################################################################
class TLSServerAutomaton(Automaton):
"""
The TLS client automaton.
- server : default value is '127.0.0.1';
- sport : default value is 4433;
- mycert : optional when there is no client authentication;
- mykey : optional when there is no client authentication;
- preferred_ciphersuite : optional cipher suite to be selected should the
client offer it through its ClientHello.
"""
def parse_args(self, server="127.0.0.1", sport=4433,
mycert=None, mykey=None,
preferred_ciphersuite=None, **kargs):
Automaton.parse_args(self, **kargs)
self.mycert = Cert(mycert)
self.mykey = PrivKey(mykey)
try:
if ':' in server:
socket.inet_pton(socket.AF_INET6, server)
else:
socket.inet_pton(socket.AF_INET, server)
tmp = socket.getaddrinfo(server, sport)
except:
tmp = socket.getaddrinfo(socket.getfqdn(server), sport)
self.ip_family = tmp[0][0]
self.local_ip = tmp[0][4][0]
self.local_port = sport
self.remote_ip = None
self.remote_port = None
self.cur_pkt = None
self.cur_session = None
self.msg_list = []
self.remain = ""
self.socket = None
self.cert_req = None
self.preferred_ciphersuite = preferred_ciphersuite
def get_next_msg(self):
"""
The purpose of the function is to make next message(s) available
in self.msg_list. If the list is not empty, nothing is done. If
not, in order to fill it, the function uses the data already
available in self.remain from a previous call and waits till there
are enough to dissect a TLS packet (expected length is in the 5
first bytes of the packet). Once dissected, the content of the
TLS packet (carried messages) is appended to self.msg_list.
We have to grab enough data to dissect a TLS packet, i.e. at least
5 bytes in order to access the expected length of the TLS packet.
"""
if self.msg_list: # a message is already available
return
self.socket.settimeout(5)
retry = 5
grablen = 5
while retry and (grablen == 5 or len(self.remain) < grablen):
if grablen == 5 and len(self.remain) >= 5:
grablen = struct.unpack('!H', self.remain[3:5])[0] + 5
if grablen == len(self.remain):
break
try:
tmp = self.socket.recv(grablen - len(self.remain))
if not tmp:
retry -= 1
else:
self.remain += tmp
except:
retry -= 1
if self.remain < 5 or len(self.remain) != grablen:
# Remote peer is not willing to respond
return
# Instantiate TLS packet (record header only, at this point)
p = TLS(self.remain, tls_session=self.cur_session)
self.cur_session = p.tls_session
self.remain = ""
self.msg_list += p.msg
while p.payload:
if isinstance(p.payload, Raw):
self.remain += p.payload.load
p = p.payload
elif isinstance(p.payload, TLS):
p = p.payload
self.msg_list += p.msg
@ATMT.state(initial=True)
def INITIAL(self):
raise self.INIT_TLS_SESSION()
@ATMT.state()
def INIT_TLS_SESSION(self):
"""
XXX We should offer the right key according to the client's suites. For
now server_rsa_key is only used for RSAkx, but we should try to replace
every server_key with both server_rsa_key and server_ecdsa_key.
"""
self.cur_session = tlsSession(connection_end="server")
self.cur_session.server_certs = [self.mycert]
self.cur_session.server_key = self.mykey
if isinstance(self.mykey, PrivKeyRSA):
self.cur_session.server_rsa_key = self.mykey
#elif isinstance(self.mykey, PrivKeyECDSA):
# self.cur_session.server_ecdsa_key = self.mykey
raise self.BIND_AND_WAIT()
@ATMT.state()
def BIND_AND_WAIT(self):
s = socket.socket(self.ip_family, socket.SOCK_STREAM)
s.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
try:
s.bind((self.local_ip, self.local_port))
s.listen(1)
except:
print "Unable to bind on address %s and port %d" % (self.local_ip,
self.local_port)
return
self.socket, addr = s.accept()
self.remote_ip, self.remote_port = addr
raise self.WAITING_FOR_ClientHello()
@ATMT.state()
def WAITING_FOR_ClientHello(self):
self.get_next_msg()
raise self.PREPROCESS_ClientHello()
@ATMT.state()
def PREPROCESS_ClientHello(self):
pass
@ATMT.condition(PREPROCESS_ClientHello, prio=1)
def should_HANDLE_ClientHello(self):
if (not self.msg_list or
not isinstance(self.msg_list[0], TLSClientHello)):
return
self.cur_pkt = self.msg_list[0]
self.msg_list = self.msg_list[1:]
raise self.HANDLE_CH()
@ATMT.state()
def HANDLE_CH(self):
pass
@ATMT.condition(HANDLE_CH, prio=1)
def should_NO_USABLE_CIPHERSUITE(self):
"""
We extract cipher suites candidates from the client's proposition.
"""
l = self.cur_pkt.ciphers
if isinstance(self.mykey, PrivKeyRSA):
kx = "RSA"
elif isinstance(self.mykey, PrivKeyECDSA):
kx = "ECDSA"
l = get_usable_ciphersuites(l, kx)
if l:
return
raise self.NO_USABLE_CIPHERSUITE()
@ATMT.state(final=True)
def NO_USABLE_CIPHERSUITE(self):
"""
If there is no available cipher suite, close the session with an Alert.
"""
print "No usable cipher suite, closing connection"
self.cur_pkt = TLS(type=21, msg=[], tls_session=self.cur_session)
p = TLSAlert(level=1, descr=0)
self.cur_pkt.msg.append(p)
self.socket.send(str(self.cur_pkt))
self.cur_pkt = None
@ATMT.condition(PREPROCESS_ClientHello, prio=2)
def missing_client_hello(self):
raise self.MISSING_CH()
@ATMT.state(final=True)
def MISSING_CH(self):
print "Missing TLS Client Hello message"
@ATMT.condition(HANDLE_CH, prio=2)
def should_REPLY_TO_CH(self):
"""
XXX Several enhancements needed here.
Selecting a cipher suite should be no trouble as we already caught the
None case previously. However, regarding the protocol version, we
might want to try resending a ClientHello when the advertised
version is not deemed satisfying.
Then, the sending of ServerHello, Certificate, ServerKeyExchange and
ServerHelloDone should be split into multiple states, in order for the
user to overload only the ones he's interested in.
Also, we do not manage extensions at all.
"""
if isinstance(self.mykey, PrivKeyRSA):
kx = "RSA"
elif isinstance(self.mykey, PrivKeyECDSA):
kx = "ECDSA"
usable_suites = get_usable_ciphersuites(self.cur_pkt.ciphers, kx)
c = usable_suites[0]
if self.preferred_ciphersuite in usable_suites:
c = self.preferred_ciphersuite
comp = 0
if self.cur_pkt.comp and 1 in self.cur_pkt.comp:
comp = 1
self.cur_session.advertised_tls_version = self.cur_pkt.version
self.cur_session.tls_version = self.cur_pkt.version
#XXX there should be some checks on this version from the ClientHello
v = self.cur_session.tls_version
print "\nVersion: " + _tls_version[v]
print "Cipher suite: " + _tls_cipher_suites[c]
self.cur_pkt = TLS(tls_session=self.cur_session, msg=[])
p = TLSServerHello(cipher=c, comp=[comp])
self.cur_pkt.msg.append(p)
p = TLSCertificate(certs=self.cur_session.server_certs)
self.cur_pkt.msg.append(p)
if not _tls_cipher_suites_cls[c].kx_alg.no_ske:
p = TLSServerKeyExchange()
self.cur_pkt.msg.append(p)
p = TLSServerHelloDone()
self.cur_pkt.msg.append(p)
self.socket.send(str(self.cur_pkt))
self.cur_pkt = None
raise self.SENT_SH()
@ATMT.state()
def SENT_SH(self):
pass
@ATMT.condition(SENT_SH, prio=1)
def should_HANDLE_CKE(self):
self.get_next_msg()
if (not self.msg_list or
not isinstance(self.msg_list[0], TLSClientKeyExchange)):
return
self.cur_pkt = self.msg_list[0]
self.msg_list = self.msg_list[1:]
raise self.HANDLE_CKE()
@ATMT.state()
def HANDLE_CKE(self):
pass
@ATMT.condition(SENT_SH, prio=2)
def should_HANDLE_ALERT_INSTEAD_OF_CKE(self):
self.get_next_msg()
if (not self.msg_list or
not isinstance(self.msg_list[0], TLSAlert)):
return
self.cur_pkt = self.msg_list[0]
self.msg_list = self.msg_list[1:]
raise self.HANDLE_ALERT_INSTEAD_OF_CKE()
@ATMT.state()
def HANDLE_ALERT_INSTEAD_OF_CKE(self):
print "Received Alert message instead of CKE"
@ATMT.condition(SENT_SH, prio=3)
def should_HANDLE_MISSING_CKE(self):
raise self.HANDLE_MISSING_CKE()
@ATMT.state()
def HANDLE_MISSING_CKE(self):
print "Missing CKE in client's reply"
@ATMT.condition(HANDLE_CKE, prio=1)
def should_HANDLE_CCS(self):
self.get_next_msg()
if (not self.msg_list or
not isinstance(self.msg_list[0], TLSChangeCipherSpec)):
return
self.cur_pkt = self.msg_list[0]
self.msg_list = self.msg_list[1:]
raise self.HANDLE_CCS()
@ATMT.state()
def HANDLE_CCS(self):
pass
@ATMT.condition(HANDLE_CKE, prio=2)
def should_HANDLE_ALERT_INSTEAD_OF_CCS(self):
self.get_next_msg()
if (not self.msg_list or
not isinstance(self.msg_list[0], TLSAlert)):
return
self.cur_pkt = self.msg_list[0]
self.msg_list = self.msg_list[1:]
raise self.HANDLE_ALERT_INSTEAD_OF_CCS()
@ATMT.state()
def HANDLE_ALERT_INSTEAD_OF_CCS(self):
print "Received Alert message instead of CCS"
@ATMT.condition(HANDLE_CKE, prio=3)
def should_HANDLE_MISSING_CCS(self):
raise self.HANDLE_MISSING_CCS()
@ATMT.state()
def HANDLE_MISSING_CCS(self):
print "Missing CCS in client's reply"
@ATMT.condition(HANDLE_CCS, prio=1)
def should_HANDLE_Finished(self):
self.get_next_msg()
if (not self.msg_list or
not isinstance(self.msg_list[0], TLSFinished)):
return
self.cur_pkt = self.msg_list[0]
self.msg_list = self.msg_list[1:]
raise self.HANDLE_FINISHED()
@ATMT.state()
def HANDLE_FINISHED(self):
pass
@ATMT.condition(HANDLE_CCS, prio=2)
def should_HANDLE_ALERT_INSTEAD_OF_Finished(self):
self.get_next_msg()
if (not self.msg_list or
not isinstance(self.msg_list[0], TLSAlert)):
return
self.cur_pkt = self.msg_list[0]
self.msg_list = self.msg_list[1:]
raise self.HANDLE_ALERT_INSTEAD_OF_FINISHED()
@ATMT.state()
def HANDLE_ALERT_INSTEAD_OF_FINISHED(self):
print "Received Alert message instead of Finished"
@ATMT.condition(HANDLE_CCS, prio=3)
def should_HANDLE_MISSING_FINISHED(self):
raise self.HANDLE_MISSING_FINISHED()
@ATMT.state()
def HANDLE_MISSING_FINISHED(self):
print "Missing Finished in client's reply"
@ATMT.condition(HANDLE_FINISHED, prio=1)
def should_SEND_CCS(self):
ccs = TLSChangeCipherSpec()
self.cur_pkt = TLS(type=20, msg=[ccs], tls_session=self.cur_session)
self.socket.send(str(self.cur_pkt))
self.cur_pkt = None
raise self.SEND_CCS()
@ATMT.state()
def SEND_CCS(self):
pass
@ATMT.condition(SEND_CCS, prio=2)
def should_SEND_FINISHED(self):
p = TLSFinished()
self.cur_pkt = TLS(tls_session=self.cur_session, msg=[p])
self.socket.send(str(self.cur_pkt))
self.cur_pkt = None
raise self.FINISHED_SENT()
@ATMT.state()
def FINISHED_SENT(self):
pass
@ATMT.condition(FINISHED_SENT, prio=0)
def should_HANDLE_NO_CLIENT(self):
self.get_next_msg()
if self.msg_list:
return
print "Client left. Closing connection..."
raise self.FINAL()
@ATMT.condition(FINISHED_SENT, prio=1)
def should_HANDLE_ALERT_FROM_FINISHED(self):
self.get_next_msg()
if (not self.msg_list or
not isinstance(self.msg_list[0], TLSAlert)):
return
raise self.HANDLE_ALERT_FROM_FINISHED_SENT()
@ATMT.state()
def HANDLE_ALERT_FROM_FINISHED_SENT(self):
self.cur_pkt = self.msg_list[0]
self.msg_list = self.msg_list[1:]
print "Received Alert Message after sending Finished"
print "Closing connection"
#XXX no support for new connections, for now
raise self.FINAL()
@ATMT.condition(FINISHED_SENT, prio=2)
def should_WAIT_DATA(self):
self.get_next_msg()
if self.msg_list:
return
# Client did not send anything, let's wait
raise self.FINISHED_SENT()
@ATMT.condition(FINISHED_SENT, prio=3)
def should_PROCESS_DATA(self):
self.get_next_msg()
if (not self.msg_list or
not isinstance(self.msg_list[0], TLSApplicationData)):
return
raise self.PROCESS_DATA()
@ATMT.state()
def PROCESS_DATA(self):
"""
In the beginning, we return a small page with useful information.
Then, we act as an echo server.
"""
self.cur_pkt = self.msg_list[0]
self.msg_list = self.msg_list[1:]
recv_data = self.cur_pkt.data
print "Received %s" % repr(recv_data)
if recv_data.startswith("GET / HTTP/1."):
header = "HTTP/1.1 200 OK\r\n"
header += "Server: Scapy TLS Extension\r\n"
header += "Content-type: text/html\r\n"
header += "Content-length: %d\r\n\r\n"
s = "Information on current TLS session:\n\n"
s += "Local end : %s:%d\n" % (self.local_ip, self.local_port)
s += "Remote end : %s:%d\n" % (self.remote_ip, self.remote_port)
v = self.cur_session.advertised_tls_version
v = "%s (0x%04x)" % (_tls_version[v], v)
s += "TLS version : %s\n" % v
s += repr(self.cur_session.wcs)
body = "<html><body><pre>%s</pre></body></html>\r\n\r\n" % s
page = (header+body) % len(body)
else:
page = recv_data
p = Raw(load=page)
self.cur_pkt = TLS(type=23, msg=[p], tls_session=self.cur_session)
self.socket.send(str(self.cur_pkt))
raise self.FINISHED_SENT()
@ATMT.state(final=True)
def FINAL(self):
"""
We might call shutdown, but unit tests with s_client fail with this.
#self.socket.shutdown(1)
"""
self.socket.close()
| 1 | 9,412 | It seems this is not the way to fix issue #505. | secdev-scapy | py |
@@ -104,7 +104,7 @@ def binary_cross_entropy(pred,
if weight is not None:
weight = weight.float()
loss = F.binary_cross_entropy_with_logits(
- pred, label.float(), pos_weight=class_weight, reduction='none')
+ pred, label.float(), weight=class_weight, reduction='none')
# do the reduction for the weighted loss
loss = weight_reduce_loss(
loss, weight, reduction=reduction, avg_factor=avg_factor) | 1 | import torch
import torch.nn as nn
import torch.nn.functional as F
from ..builder import LOSSES
from .utils import weight_reduce_loss
def cross_entropy(pred,
label,
weight=None,
reduction='mean',
avg_factor=None,
class_weight=None,
ignore_index=-100):
"""Calculate the CrossEntropy loss.
Args:
pred (torch.Tensor): The prediction with shape (N, C), C is the number
of classes.
label (torch.Tensor): The learning label of the prediction.
weight (torch.Tensor, optional): Sample-wise loss weight.
reduction (str, optional): The method used to reduce the loss.
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
class_weight (list[float], optional): The weight for each class.
ignore_index (int | None): The label index to be ignored.
If None, it will be set to default value. Default: -100.
Returns:
torch.Tensor: The calculated loss
"""
# The default value of ignore_index is the same as F.cross_entropy
ignore_index = -100 if ignore_index is None else ignore_index
# element-wise losses
loss = F.cross_entropy(
pred,
label,
weight=class_weight,
reduction='none',
ignore_index=ignore_index)
# apply weights and do the reduction
if weight is not None:
weight = weight.float()
loss = weight_reduce_loss(
loss, weight=weight, reduction=reduction, avg_factor=avg_factor)
return loss
def _expand_onehot_labels(labels, label_weights, label_channels, ignore_index):
"""Expand onehot labels to match the size of prediction."""
bin_labels = labels.new_full((labels.size(0), label_channels), 0)
valid_mask = (labels >= 0) & (labels != ignore_index)
inds = torch.nonzero(
valid_mask & (labels < label_channels), as_tuple=False)
if inds.numel() > 0:
bin_labels[inds, labels[inds]] = 1
valid_mask = valid_mask.view(-1, 1).expand(labels.size(0),
label_channels).float()
if label_weights is None:
bin_label_weights = valid_mask
else:
bin_label_weights = label_weights.view(-1, 1).repeat(1, label_channels)
bin_label_weights *= valid_mask
return bin_labels, bin_label_weights
def binary_cross_entropy(pred,
label,
weight=None,
reduction='mean',
avg_factor=None,
class_weight=None,
ignore_index=-100):
"""Calculate the binary CrossEntropy loss.
Args:
pred (torch.Tensor): The prediction with shape (N, 1).
label (torch.Tensor): The learning label of the prediction.
weight (torch.Tensor, optional): Sample-wise loss weight.
reduction (str, optional): The method used to reduce the loss.
Options are "none", "mean" and "sum".
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
class_weight (list[float], optional): The weight for each class.
ignore_index (int | None): The label index to be ignored.
If None, it will be set to default value. Default: -100.
Returns:
torch.Tensor: The calculated loss.
"""
# The default value of ignore_index is the same as F.cross_entropy
ignore_index = -100 if ignore_index is None else ignore_index
if pred.dim() != label.dim():
label, weight = _expand_onehot_labels(label, weight, pred.size(-1),
ignore_index)
# weighted element-wise losses
if weight is not None:
weight = weight.float()
loss = F.binary_cross_entropy_with_logits(
pred, label.float(), pos_weight=class_weight, reduction='none')
# do the reduction for the weighted loss
loss = weight_reduce_loss(
loss, weight, reduction=reduction, avg_factor=avg_factor)
return loss
def mask_cross_entropy(pred,
target,
label,
reduction='mean',
avg_factor=None,
class_weight=None,
ignore_index=None):
"""Calculate the CrossEntropy loss for masks.
Args:
pred (torch.Tensor): The prediction with shape (N, C, *), C is the
number of classes. The trailing * indicates arbitrary shape.
target (torch.Tensor): The learning label of the prediction.
label (torch.Tensor): ``label`` indicates the class label of the mask
corresponding object. This will be used to select the mask in the
of the class which the object belongs to when the mask prediction
if not class-agnostic.
reduction (str, optional): The method used to reduce the loss.
Options are "none", "mean" and "sum".
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
class_weight (list[float], optional): The weight for each class.
ignore_index (None): Placeholder, to be consistent with other loss.
Default: None.
Returns:
torch.Tensor: The calculated loss
Example:
>>> N, C = 3, 11
>>> H, W = 2, 2
>>> pred = torch.randn(N, C, H, W) * 1000
>>> target = torch.rand(N, H, W)
>>> label = torch.randint(0, C, size=(N,))
>>> reduction = 'mean'
>>> avg_factor = None
>>> class_weights = None
>>> loss = mask_cross_entropy(pred, target, label, reduction,
>>> avg_factor, class_weights)
>>> assert loss.shape == (1,)
"""
assert ignore_index is None, 'BCE loss does not support ignore_index'
# TODO: handle these two reserved arguments
assert reduction == 'mean' and avg_factor is None
num_rois = pred.size()[0]
inds = torch.arange(0, num_rois, dtype=torch.long, device=pred.device)
pred_slice = pred[inds, label].squeeze(1)
return F.binary_cross_entropy_with_logits(
pred_slice, target, weight=class_weight, reduction='mean')[None]
@LOSSES.register_module()
class CrossEntropyLoss(nn.Module):
def __init__(self,
use_sigmoid=False,
use_mask=False,
reduction='mean',
class_weight=None,
ignore_index=None,
loss_weight=1.0):
"""CrossEntropyLoss.
Args:
use_sigmoid (bool, optional): Whether the prediction uses sigmoid
of softmax. Defaults to False.
use_mask (bool, optional): Whether to use mask cross entropy loss.
Defaults to False.
reduction (str, optional): . Defaults to 'mean'.
Options are "none", "mean" and "sum".
class_weight (list[float], optional): Weight of each class.
Defaults to None.
ignore_index (int | None): The label index to be ignored.
Defaults to None.
loss_weight (float, optional): Weight of the loss. Defaults to 1.0.
"""
super(CrossEntropyLoss, self).__init__()
assert (use_sigmoid is False) or (use_mask is False)
self.use_sigmoid = use_sigmoid
self.use_mask = use_mask
self.reduction = reduction
self.loss_weight = loss_weight
self.class_weight = class_weight
self.ignore_index = ignore_index
if self.use_sigmoid:
self.cls_criterion = binary_cross_entropy
elif self.use_mask:
self.cls_criterion = mask_cross_entropy
else:
self.cls_criterion = cross_entropy
def forward(self,
cls_score,
label,
weight=None,
avg_factor=None,
reduction_override=None,
ignore_index=None,
**kwargs):
"""Forward function.
Args:
cls_score (torch.Tensor): The prediction.
label (torch.Tensor): The learning label of the prediction.
weight (torch.Tensor, optional): Sample-wise loss weight.
avg_factor (int, optional): Average factor that is used to average
the loss. Defaults to None.
reduction_override (str, optional): The method used to reduce the
loss. Options are "none", "mean" and "sum".
ignore_index (int | None): The label index to be ignored.
If not None, it will override the default value. Default: None.
Returns:
torch.Tensor: The calculated loss.
"""
assert reduction_override in (None, 'none', 'mean', 'sum')
reduction = (
reduction_override if reduction_override else self.reduction)
if ignore_index is None:
ignore_index = self.ignore_index
if self.class_weight is not None:
class_weight = cls_score.new_tensor(
self.class_weight, device=cls_score.device)
else:
class_weight = None
loss_cls = self.loss_weight * self.cls_criterion(
cls_score,
label,
weight,
class_weight=class_weight,
reduction=reduction,
avg_factor=avg_factor,
ignore_index=ignore_index,
**kwargs)
return loss_cls
| 1 | 24,946 | Please take a look at the docstring of `F.binary_cross_entropy_with_logits`. `weight` should be a tensor that matches the input tensor shape. It is Not the class-aware weight. `pos_weight` should be a vector with a length equal to the number of classes. | open-mmlab-mmdetection | py |
@@ -85,12 +85,15 @@ func TestSuite(
testSuiteName, fmt.Sprintf("[%v] %v", testType, "Windows 2012 R2 two disks, Network setting (path)"))
machineImageImportStorageLocationTestCase := junitxml.NewTestCase(
testSuiteName, fmt.Sprintf("[%v] %v", testType, "Centos 7.4, Storage location"))
+ machineImageImportNetworkTagsTestCase := junitxml.NewTestCase(
+ testSuiteName, fmt.Sprintf("[%v] %v", testType, "Import with network tags"))
testsMap[testType] = map[*junitxml.TestCase]func(
context.Context, *junitxml.TestCase, *log.Logger, *testconfig.Project, e2e.CLITestType){}
testsMap[testType][machineImageImportUbuntu3DisksNetworkSettingsNameTestCase] = runOVFMachineImageImportUbuntu3DisksNetworkSettingsName
testsMap[testType][machineImageImportWindows2012R2TwoDisksNetworkSettingsPathTestCase] = runOVFMachineImageImportWindows2012R2TwoDisksNetworkSettingsPath
testsMap[testType][machineImageImportStorageLocationTestCase] = runOVFMachineImageImportCentos74StorageLocation
+ testsMap[testType][machineImageImportNetworkTagsTestCase] = runOVFMachineImageImportWithNetworkTags
}
// gcloud only tests | 1 | // Copyright 2020 Google Inc. All Rights Reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
// Package ovfmachineimageimporttestsuite contains e2e tests for machine image
// import cli tools
package ovfmachineimageimporttestsuite
import (
"context"
"fmt"
"log"
"regexp"
"strings"
"sync"
ovfimporttestsuite "github.com/GoogleCloudPlatform/compute-image-tools/cli_tools_tests/e2e/gce_ovf_import/test_suites"
"github.com/GoogleCloudPlatform/compute-image-tools/common/gcp"
"github.com/GoogleCloudPlatform/compute-image-tools/cli_tools/common/utils/paramhelper"
"github.com/GoogleCloudPlatform/compute-image-tools/cli_tools/common/utils/path"
"github.com/GoogleCloudPlatform/compute-image-tools/cli_tools_tests/e2e"
daisyCompute "github.com/GoogleCloudPlatform/compute-image-tools/daisy/compute"
"github.com/GoogleCloudPlatform/compute-image-tools/go/e2e_test_utils/junitxml"
testconfig "github.com/GoogleCloudPlatform/compute-image-tools/go/e2e_test_utils/test_config"
)
const (
testSuiteName = "OVFMachineImageImportTests"
ovaBucket = "compute-image-tools-test-resources"
)
var (
cmds = map[e2e.CLITestType]string{
e2e.Wrapper: "./gce_ovf_import",
e2e.GcloudBetaProdWrapperLatest: "gcloud",
e2e.GcloudBetaLatestWrapperLatest: "gcloud",
e2e.GcloudGaLatestWrapperRelease: "gcloud",
}
// Apply this as instance metadata if the OS config agent is not
// supported for the platform or version being imported.
skipOSConfigMetadata = map[string]string{"osconfig_not_supported": "true"}
// argMap stores test args from e2e test CLI.
argMap map[string]string
)
type ovfMachineImageImportTestProperties struct {
machineImageName string
storageLocation string
ovfimporttestsuite.OvfImportTestProperties
}
// TestSuite is image import test suite.
func TestSuite(
ctx context.Context, tswg *sync.WaitGroup, testSuites chan *junitxml.TestSuite,
logger *log.Logger, testSuiteRegex, testCaseRegex *regexp.Regexp,
testProjectConfig *testconfig.Project, argMapInput map[string]string) {
argMap = argMapInput
testsMap := map[e2e.CLITestType]map[*junitxml.TestCase]func(
context.Context, *junitxml.TestCase, *log.Logger, *testconfig.Project, e2e.CLITestType){}
testTypes := []e2e.CLITestType{
e2e.Wrapper,
e2e.GcloudBetaProdWrapperLatest,
e2e.GcloudBetaLatestWrapperLatest,
}
for _, testType := range testTypes {
machineImageImportUbuntu3DisksNetworkSettingsNameTestCase := junitxml.NewTestCase(
testSuiteName, fmt.Sprintf("[%v] %v", testType, "Ubuntu 3 disks, one data disk larger than 10GB, Network setting (name only)"))
machineImageImportWindows2012R2TwoDisksNetworkSettingsPathTestCase := junitxml.NewTestCase(
testSuiteName, fmt.Sprintf("[%v] %v", testType, "Windows 2012 R2 two disks, Network setting (path)"))
machineImageImportStorageLocationTestCase := junitxml.NewTestCase(
testSuiteName, fmt.Sprintf("[%v] %v", testType, "Centos 7.4, Storage location"))
testsMap[testType] = map[*junitxml.TestCase]func(
context.Context, *junitxml.TestCase, *log.Logger, *testconfig.Project, e2e.CLITestType){}
testsMap[testType][machineImageImportUbuntu3DisksNetworkSettingsNameTestCase] = runOVFMachineImageImportUbuntu3DisksNetworkSettingsName
testsMap[testType][machineImageImportWindows2012R2TwoDisksNetworkSettingsPathTestCase] = runOVFMachineImageImportWindows2012R2TwoDisksNetworkSettingsPath
testsMap[testType][machineImageImportStorageLocationTestCase] = runOVFMachineImageImportCentos74StorageLocation
}
// gcloud only tests
machineImageImportDisabledDefaultServiceAccountSuccessTestCase := junitxml.NewTestCase(
testSuiteName, fmt.Sprintf("[%v] %v", e2e.GcloudBetaLatestWrapperLatest, "Machine image import without default service account, success by specifying a custom Compute service account"))
machineImageImportDefaultServiceAccountWithMissingPermissionsSuccessTestCase := junitxml.NewTestCase(
testSuiteName, fmt.Sprintf("[%v] %v", e2e.GcloudBetaLatestWrapperLatest, "Machine image import without permission on default service account, success by specifying a custom Compute service account"))
machineImageImportDisabledDefaultServiceAccountFailTestCase := junitxml.NewTestCase(
testSuiteName, fmt.Sprintf("[%v] %v", e2e.GcloudBetaLatestWrapperLatest, "Machine image import without default service account failed"))
machineImageImportDefaultServiceAccountWithMissingPermissionsFailTestCase := junitxml.NewTestCase(
testSuiteName, fmt.Sprintf("[%v] %v", e2e.GcloudBetaLatestWrapperLatest, "Machine image import without permission on default service account failed"))
machineImageImportDefaultServiceAccountCustomAccessScopeTestCase := junitxml.NewTestCase(
testSuiteName, fmt.Sprintf("[%v] %v", e2e.GcloudBetaLatestWrapperLatest, "Machine image import with default service account custom access scopes set"))
testsMap[e2e.Wrapper][machineImageImportDisabledDefaultServiceAccountSuccessTestCase] = runMachineImageImportDisabledDefaultServiceAccountSuccessTest
testsMap[e2e.Wrapper][machineImageImportDefaultServiceAccountWithMissingPermissionsSuccessTestCase] = runMachineImageImportOSDefaultServiceAccountWithMissingPermissionsSuccessTest
testsMap[e2e.Wrapper][machineImageImportDisabledDefaultServiceAccountFailTestCase] = runMachineImageImportWithDisabledDefaultServiceAccountFailTest
testsMap[e2e.Wrapper][machineImageImportDefaultServiceAccountWithMissingPermissionsFailTestCase] = runMachineImageImportDefaultServiceAccountWithMissingPermissionsFailTest
testsMap[e2e.Wrapper][machineImageImportDefaultServiceAccountCustomAccessScopeTestCase] = runMachineImageImportDefaultServiceAccountCustomAccessScope
e2e.CLITestSuite(ctx, tswg, testSuites, logger, testSuiteRegex, testCaseRegex,
testProjectConfig, testSuiteName, testsMap)
}
func runOVFMachineImageImportUbuntu3DisksNetworkSettingsName(ctx context.Context, testCase *junitxml.TestCase, logger *log.Logger,
testProjectConfig *testconfig.Project, testType e2e.CLITestType) {
suffix := path.RandString(5)
props := &ovfMachineImageImportTestProperties{
machineImageName: fmt.Sprintf("test-machine-image-ubuntu-3-disks-%v", suffix),
OvfImportTestProperties: ovfimporttestsuite.OvfImportTestProperties{
VerificationStartupScript: ovfimporttestsuite.LoadScriptContent(
"scripts/ovf_import_test_ubuntu_3_disks.sh", logger),
Zone: testProjectConfig.TestZone,
ExpectedStartupOutput: "All tests passed!",
FailureMatches: []string{"TestFailed:"},
SourceURI: fmt.Sprintf("gs://%v/ova/ubuntu-1604-three-disks", ovaBucket),
Os: "ubuntu-1604",
InstanceMetadata: skipOSConfigMetadata,
MachineType: "n1-standard-4",
Network: fmt.Sprintf("%v-vpc-1", testProjectConfig.TestProjectID),
Subnet: fmt.Sprintf("%v-subnet-1", testProjectConfig.TestProjectID),
},
}
runOVFMachineImageImportTest(ctx, buildTestArgs(props, testProjectConfig)[testType], testType, testProjectConfig, logger, testCase, props)
}
func runOVFMachineImageImportWindows2012R2TwoDisksNetworkSettingsPath(ctx context.Context, testCase *junitxml.TestCase, logger *log.Logger,
testProjectConfig *testconfig.Project, testType e2e.CLITestType) {
suffix := path.RandString(5)
region, _ := paramhelper.GetRegion(testProjectConfig.TestZone)
props := &ovfMachineImageImportTestProperties{
machineImageName: fmt.Sprintf("test-machine-image-w2k12-r2-%v", suffix),
OvfImportTestProperties: ovfimporttestsuite.OvfImportTestProperties{VerificationStartupScript: ovfimporttestsuite.LoadScriptContent(
"scripts/ovf_import_test_windows_two_disks.ps1", logger),
Zone: testProjectConfig.TestZone,
ExpectedStartupOutput: "All Tests Passed",
FailureMatches: []string{"Test Failed:"},
SourceURI: fmt.Sprintf("gs://%v/ova/w2k12-r2", ovaBucket),
Os: "windows-2012r2",
MachineType: "n1-standard-8",
IsWindows: true,
Network: fmt.Sprintf("global/networks/%v-vpc-1", testProjectConfig.TestProjectID),
Subnet: fmt.Sprintf("projects/%v/regions/%v/subnetworks/%v-subnet-1", testProjectConfig.TestProjectID, region, testProjectConfig.TestProjectID),
}}
runOVFMachineImageImportTest(ctx, buildTestArgs(props, testProjectConfig)[testType], testType, testProjectConfig, logger, testCase, props)
}
func runOVFMachineImageImportCentos74StorageLocation(ctx context.Context, testCase *junitxml.TestCase, logger *log.Logger,
testProjectConfig *testconfig.Project, testType e2e.CLITestType) {
suffix := path.RandString(5)
props := &ovfMachineImageImportTestProperties{
machineImageName: fmt.Sprintf("test-gmi-storage-location-%v", suffix),
storageLocation: "us-west2",
OvfImportTestProperties: ovfimporttestsuite.OvfImportTestProperties{
VerificationStartupScript: ovfimporttestsuite.LoadScriptContent(
"daisy_integration_tests/scripts/post_translate_test.sh", logger),
Zone: testProjectConfig.TestZone,
ExpectedStartupOutput: "All tests passed!",
FailureMatches: []string{"FAILED:", "TestFailed:"},
SourceURI: fmt.Sprintf("gs://%v/ova/centos-7.4/", ovaBucket),
Os: "centos-7",
MachineType: "n2-standard-2",
}}
runOVFMachineImageImportTest(ctx, buildTestArgs(props, testProjectConfig)[testType], testType, testProjectConfig, logger, testCase, props)
}
func runMachineImageImportDisabledDefaultServiceAccountSuccessTest(ctx context.Context, testCase *junitxml.TestCase, logger *log.Logger,
testProjectConfig *testconfig.Project, testType e2e.CLITestType) {
testVariables, ok := e2e.GetServiceAccountTestVariables(argMap, true)
if !ok {
e2e.Failure(testCase, logger, fmt.Sprintln("Failed to get service account test args"))
return
}
suffix := path.RandString(5)
props := &ovfMachineImageImportTestProperties{
machineImageName: fmt.Sprintf("test-without-service-account-%v", suffix),
OvfImportTestProperties: ovfimporttestsuite.OvfImportTestProperties{VerificationStartupScript: ovfimporttestsuite.LoadScriptContent(
"daisy_integration_tests/scripts/post_translate_test.sh", logger),
Zone: testProjectConfig.TestZone,
ExpectedStartupOutput: "All tests passed!",
FailureMatches: []string{"FAILED:", "TestFailed:"},
SourceURI: fmt.Sprintf("gs://%v/ova/centos-7.4/", ovaBucket),
Os: "centos-7",
MachineType: "n1-standard-4",
Project: testVariables.ProjectID,
ComputeServiceAccount: testVariables.ComputeServiceAccount,
InstanceServiceAccount: testVariables.InstanceServiceAccount,
InstanceAccessScopes: "https://www.googleapis.com/auth/compute,https://www.googleapis.com/auth/datastore",
}}
runOVFMachineImageImportTest(ctx, buildTestArgs(props, testProjectConfig)[testType], testType, testProjectConfig, logger, testCase, props)
}
// With insufficient permissions on default service account, import success by specifying a custom account.
func runMachineImageImportOSDefaultServiceAccountWithMissingPermissionsSuccessTest(ctx context.Context, testCase *junitxml.TestCase, logger *log.Logger,
testProjectConfig *testconfig.Project, testType e2e.CLITestType) {
testVariables, ok := e2e.GetServiceAccountTestVariables(argMap, false)
if !ok {
e2e.Failure(testCase, logger, fmt.Sprintln("Failed to get service account test args"))
return
}
suffix := path.RandString(5)
props := &ovfMachineImageImportTestProperties{
machineImageName: fmt.Sprintf("test-missing-cse-permissions-%v", suffix),
OvfImportTestProperties: ovfimporttestsuite.OvfImportTestProperties{VerificationStartupScript: ovfimporttestsuite.LoadScriptContent(
"daisy_integration_tests/scripts/post_translate_test.sh", logger),
Zone: testProjectConfig.TestZone,
ExpectedStartupOutput: "All tests passed!",
FailureMatches: []string{"FAILED:", "TestFailed:"},
SourceURI: fmt.Sprintf("gs://%v/ova/centos-7.4/", ovaBucket),
Os: "centos-7",
MachineType: "n1-standard-4",
Project: testVariables.ProjectID,
ComputeServiceAccount: testVariables.ComputeServiceAccount,
InstanceServiceAccount: testVariables.InstanceServiceAccount,
}}
runOVFMachineImageImportTest(ctx, buildTestArgs(props, testProjectConfig)[testType], testType, testProjectConfig, logger, testCase, props)
}
// With insufficient permissions on default service account, import failed.
func runMachineImageImportWithDisabledDefaultServiceAccountFailTest(ctx context.Context, testCase *junitxml.TestCase, logger *log.Logger,
testProjectConfig *testconfig.Project, testType e2e.CLITestType) {
testVariables, ok := e2e.GetServiceAccountTestVariables(argMap, true)
if !ok {
e2e.Failure(testCase, logger, fmt.Sprintln("Failed to get service account test args"))
return
}
suffix := path.RandString(5)
props := &ovfMachineImageImportTestProperties{
machineImageName: fmt.Sprintf("test-missing-permn-on-default-csa-fail-%v", suffix),
OvfImportTestProperties: ovfimporttestsuite.OvfImportTestProperties{VerificationStartupScript: ovfimporttestsuite.LoadScriptContent(
"daisy_integration_tests/scripts/post_translate_test.sh", logger),
Zone: testProjectConfig.TestZone,
ExpectedStartupOutput: "All tests passed!",
FailureMatches: []string{"FAILED:", "TestFailed:"},
SourceURI: fmt.Sprintf("gs://%v/ova/centos-7.4/", ovaBucket),
Os: "centos-7",
MachineType: "n1-standard-4",
Project: testVariables.ProjectID,
}}
e2e.RunTestCommandAssertErrorMessage(cmds[testType], buildTestArgs(props, testProjectConfig)[testType], "Failed to download GCS path", logger, testCase)
}
// With insufficient permissions on default service account, import failed.
func runMachineImageImportDefaultServiceAccountWithMissingPermissionsFailTest(ctx context.Context, testCase *junitxml.TestCase, logger *log.Logger,
testProjectConfig *testconfig.Project, testType e2e.CLITestType) {
testVariables, ok := e2e.GetServiceAccountTestVariables(argMap, false)
if !ok {
e2e.Failure(testCase, logger, fmt.Sprintln("Failed to get service account test args"))
return
}
suffix := path.RandString(5)
props := &ovfMachineImageImportTestProperties{
machineImageName: fmt.Sprintf("test-insufficient-perm-default-csa-fail-%v", suffix),
OvfImportTestProperties: ovfimporttestsuite.OvfImportTestProperties{VerificationStartupScript: ovfimporttestsuite.LoadScriptContent(
"daisy_integration_tests/scripts/post_translate_test.sh", logger),
Zone: testProjectConfig.TestZone,
ExpectedStartupOutput: "All tests passed!",
FailureMatches: []string{"FAILED:", "TestFailed:"},
SourceURI: fmt.Sprintf("gs://%v/ova/centos-7.4/", ovaBucket),
Os: "centos-7",
MachineType: "n1-standard-4",
Project: testVariables.ProjectID,
}}
e2e.RunTestCommandAssertErrorMessage(cmds[testType], buildTestArgs(props, testProjectConfig)[testType], "Failed to download GCS path", logger, testCase)
}
// Ensure custom access scopes are set on the machine image even when default service account is used
func runMachineImageImportDefaultServiceAccountCustomAccessScope(ctx context.Context, testCase *junitxml.TestCase, logger *log.Logger,
testProjectConfig *testconfig.Project, testType e2e.CLITestType) {
testVariables, ok := e2e.GetServiceAccountTestVariables(argMap, false)
if !ok {
e2e.Failure(testCase, logger, fmt.Sprintln("Failed to get service account test args"))
return
}
suffix := path.RandString(5)
props := &ovfMachineImageImportTestProperties{
machineImageName: fmt.Sprintf("test-custom-sc-default-sa-%v", suffix),
OvfImportTestProperties: ovfimporttestsuite.OvfImportTestProperties{VerificationStartupScript: ovfimporttestsuite.LoadScriptContent(
"daisy_integration_tests/scripts/post_translate_test.sh", logger),
Zone: testProjectConfig.TestZone,
ExpectedStartupOutput: "All tests passed!",
FailureMatches: []string{"FAILED:", "TestFailed:"},
SourceURI: fmt.Sprintf("gs://%v/ova/centos-7.4/", ovaBucket),
Os: "centos-7",
MachineType: "n1-standard-4",
Project: testVariables.ProjectID,
InstanceAccessScopes: "https://www.googleapis.com/auth/compute,https://www.googleapis.com/auth/datastore",
}}
e2e.RunTestCommandAssertErrorMessage(cmds[testType], buildTestArgs(props, testProjectConfig)[testType], "Failed to download GCS path", logger, testCase)
}
func buildTestArgs(props *ovfMachineImageImportTestProperties, testProjectConfig *testconfig.Project) map[e2e.CLITestType][]string {
gcloudBetaArgs := []string{
"beta", "compute", "machine-images", "import", props.machineImageName, "--quiet",
"--docker-image-tag=latest"}
gcloudArgs := []string{"compute", "machine-images", "import", props.machineImageName, "--quiet"}
wrapperArgs := []string{
"-client-id=e2e",
fmt.Sprintf("-machine-image-name=%s", props.machineImageName),
}
if props.storageLocation != "" {
gcloudBetaArgs = append(gcloudBetaArgs, fmt.Sprintf("--storage-location=%v", props.storageLocation))
gcloudArgs = append(gcloudBetaArgs, fmt.Sprintf("--storage-location=%v", props.storageLocation))
wrapperArgs = append(wrapperArgs, fmt.Sprintf("-machine-image-storage-location=%v", props.storageLocation))
}
return ovfimporttestsuite.BuildArgsMap(&props.OvfImportTestProperties, testProjectConfig, gcloudBetaArgs, gcloudArgs, wrapperArgs)
}
func runOVFMachineImageImportTest(ctx context.Context, args []string, testType e2e.CLITestType,
testProjectConfig *testconfig.Project, logger *log.Logger, testCase *junitxml.TestCase,
props *ovfMachineImageImportTestProperties) {
if e2e.RunTestForTestType(cmds[testType], args, testType, logger, testCase) {
verifyImportedMachineImage(ctx, testCase, testProjectConfig, logger, props)
}
}
func verifyImportedMachineImage(
ctx context.Context, testCase *junitxml.TestCase, testProjectConfig *testconfig.Project,
logger *log.Logger, props *ovfMachineImageImportTestProperties) {
project := ovfimporttestsuite.GetProject(&props.OvfImportTestProperties, testProjectConfig)
client, err := daisyCompute.NewClient(ctx)
if err != nil {
e2e.Failure(testCase, logger, fmt.Sprintf("Error creating client: %v", err))
return
}
logger.Printf("Verifying imported machine image...")
testInstanceName := props.machineImageName + "-test-instance"
logger.Printf("Creating `%v` test instance.", testInstanceName)
// Verify storage location
if props.storageLocation != "" {
gmi, gmiErr := gcp.CreateMachineImageObject(ctx, project, props.machineImageName)
if gmiErr != nil {
e2e.Failure(testCase, logger, fmt.Sprintf("Error when loading machine image '%v': %v", props.machineImageName, err))
return
}
storageLocationMatch := false
for _, storageLocation := range gmi.StorageLocations {
storageLocationMatch = storageLocationMatch || strings.Contains(storageLocation, props.storageLocation)
}
if !storageLocationMatch {
e2e.Failure(testCase, logger,
fmt.Sprintf("Machine image storage locations (%v) do not contain storage location from the flag:: %v",
strings.Join(gmi.StorageLocations, ","), props.storageLocation))
return
}
}
instance, err := gcp.CreateInstanceBeta(
ctx, project, props.Zone, testInstanceName, props.IsWindows, props.machineImageName)
if err != nil {
e2e.Failure(testCase, logger, fmt.Sprintf("Error when creating test instance `%v` from machine image '%v': %v", testInstanceName, props.machineImageName, err))
return
}
// Clean-up
defer func() {
logger.Printf("Deleting instance `%v`", testInstanceName)
if err := instance.Cleanup(); err != nil {
logger.Printf("Instance '%v' failed to clean up: %v", testInstanceName, err)
} else {
logger.Printf("Instance '%v' cleaned up.", testInstanceName)
}
logger.Printf("Deleting machine image `%v`", props.machineImageName)
if err := client.DeleteMachineImage(project, props.machineImageName); err != nil {
logger.Printf("Machine image '%v' failed to clean up: %v", props.machineImageName, err)
} else {
logger.Printf("Machine image '%v' cleaned up.", props.machineImageName)
}
}()
ovfimporttestsuite.VerifyInstance(instance, client, testCase, project, logger, &props.OvfImportTestProperties)
}
| 1 | 13,629 | Same here, merge with an existing test. | GoogleCloudPlatform-compute-image-tools | go |
@@ -28,7 +28,7 @@ EFI_DRIVER_BINDING_PROTOCOL gSdMmcPciHcDriverBinding = {
NULL
};
-#define SLOT_INIT_TEMPLATE {0, UnknownSlot, 0, 0, 0, 0,\
+#define SLOT_INIT_TEMPLATE {0, UnknownSlot, 0, 0, 0,\
{EDKII_SD_MMC_BUS_WIDTH_IGNORE,\
EDKII_SD_MMC_CLOCK_FREQ_IGNORE,\
{EDKII_SD_MMC_DRIVER_STRENGTH_IGNORE}}} | 1 | /** @file
This driver is used to manage SD/MMC PCI host controllers which are compliance
with SD Host Controller Simplified Specification version 3.00 plus the 64-bit
System Addressing support in SD Host Controller Simplified Specification version
4.20.
It would expose EFI_SD_MMC_PASS_THRU_PROTOCOL for upper layer use.
Copyright (c) 2018-2019, NVIDIA CORPORATION. All rights reserved.
Copyright (c) 2015 - 2020, Intel Corporation. All rights reserved.<BR>
SPDX-License-Identifier: BSD-2-Clause-Patent
**/
#include "SdMmcPciHcDxe.h"
EDKII_SD_MMC_OVERRIDE *mOverride;
//
// Driver Global Variables
//
EFI_DRIVER_BINDING_PROTOCOL gSdMmcPciHcDriverBinding = {
SdMmcPciHcDriverBindingSupported,
SdMmcPciHcDriverBindingStart,
SdMmcPciHcDriverBindingStop,
0x10,
NULL,
NULL
};
#define SLOT_INIT_TEMPLATE {0, UnknownSlot, 0, 0, 0, 0,\
{EDKII_SD_MMC_BUS_WIDTH_IGNORE,\
EDKII_SD_MMC_CLOCK_FREQ_IGNORE,\
{EDKII_SD_MMC_DRIVER_STRENGTH_IGNORE}}}
//
// Template for SD/MMC host controller private data.
//
SD_MMC_HC_PRIVATE_DATA gSdMmcPciHcTemplate = {
SD_MMC_HC_PRIVATE_SIGNATURE, // Signature
NULL, // ControllerHandle
NULL, // PciIo
{ // PassThru
sizeof (UINT32),
SdMmcPassThruPassThru,
SdMmcPassThruGetNextSlot,
SdMmcPassThruBuildDevicePath,
SdMmcPassThruGetSlotNumber,
SdMmcPassThruResetDevice
},
0, // PciAttributes
0, // PreviousSlot
NULL, // TimerEvent
NULL, // ConnectEvent
// Queue
INITIALIZE_LIST_HEAD_VARIABLE (gSdMmcPciHcTemplate.Queue),
{ // Slot
SLOT_INIT_TEMPLATE,
SLOT_INIT_TEMPLATE,
SLOT_INIT_TEMPLATE,
SLOT_INIT_TEMPLATE,
SLOT_INIT_TEMPLATE,
SLOT_INIT_TEMPLATE
},
{ // Capability
{ 0 },
},
{ // MaxCurrent
0,
},
{
0 // ControllerVersion
}
};
SD_DEVICE_PATH mSdDpTemplate = {
{
MESSAGING_DEVICE_PATH,
MSG_SD_DP,
{
(UINT8)(sizeof (SD_DEVICE_PATH)),
(UINT8)((sizeof (SD_DEVICE_PATH)) >> 8)
}
},
0
};
EMMC_DEVICE_PATH mEmmcDpTemplate = {
{
MESSAGING_DEVICE_PATH,
MSG_EMMC_DP,
{
(UINT8)(sizeof (EMMC_DEVICE_PATH)),
(UINT8)((sizeof (EMMC_DEVICE_PATH)) >> 8)
}
},
0
};
//
// Prioritized function list to detect card type.
// User could add other card detection logic here.
//
CARD_TYPE_DETECT_ROUTINE mCardTypeDetectRoutineTable[] = {
EmmcIdentification,
SdCardIdentification,
NULL
};
/**
The entry point for SD host controller driver, used to install this driver on the ImageHandle.
@param[in] ImageHandle The firmware allocated handle for this driver image.
@param[in] SystemTable Pointer to the EFI system table.
@retval EFI_SUCCESS Driver loaded.
@retval other Driver not loaded.
**/
EFI_STATUS
EFIAPI
InitializeSdMmcPciHcDxe (
IN EFI_HANDLE ImageHandle,
IN EFI_SYSTEM_TABLE *SystemTable
)
{
EFI_STATUS Status;
Status = EfiLibInstallDriverBindingComponentName2 (
ImageHandle,
SystemTable,
&gSdMmcPciHcDriverBinding,
ImageHandle,
&gSdMmcPciHcComponentName,
&gSdMmcPciHcComponentName2
);
ASSERT_EFI_ERROR (Status);
return Status;
}
/**
Call back function when the timer event is signaled.
@param[in] Event The Event this notify function registered to.
@param[in] Context Pointer to the context data registered to the
Event.
**/
VOID
EFIAPI
ProcessAsyncTaskList (
IN EFI_EVENT Event,
IN VOID *Context
)
{
SD_MMC_HC_PRIVATE_DATA *Private;
LIST_ENTRY *Link;
SD_MMC_HC_TRB *Trb;
EFI_STATUS Status;
EFI_SD_MMC_PASS_THRU_COMMAND_PACKET *Packet;
BOOLEAN InfiniteWait;
EFI_EVENT TrbEvent;
Private = (SD_MMC_HC_PRIVATE_DATA *)Context;
//
// Check if the first entry in the async I/O queue is done or not.
//
Status = EFI_SUCCESS;
Trb = NULL;
Link = GetFirstNode (&Private->Queue);
if (!IsNull (&Private->Queue, Link)) {
Trb = SD_MMC_HC_TRB_FROM_THIS (Link);
if (!Private->Slot[Trb->Slot].MediaPresent) {
Status = EFI_NO_MEDIA;
goto Done;
}
if (!Trb->Started) {
//
// Check whether the cmd/data line is ready for transfer.
//
Status = SdMmcCheckTrbEnv (Private, Trb);
if (!EFI_ERROR (Status)) {
Trb->Started = TRUE;
Status = SdMmcExecTrb (Private, Trb);
if (EFI_ERROR (Status)) {
goto Done;
}
} else {
goto Done;
}
}
Status = SdMmcCheckTrbResult (Private, Trb);
}
Done:
if ((Trb != NULL) && (Status == EFI_NOT_READY)) {
Packet = Trb->Packet;
if (Packet->Timeout == 0) {
InfiniteWait = TRUE;
} else {
InfiniteWait = FALSE;
}
if ((!InfiniteWait) && (Trb->Timeout-- == 0)) {
RemoveEntryList (Link);
Trb->Packet->TransactionStatus = EFI_TIMEOUT;
TrbEvent = Trb->Event;
SdMmcFreeTrb (Trb);
DEBUG ((DEBUG_VERBOSE, "ProcessAsyncTaskList(): Signal Event %p EFI_TIMEOUT\n", TrbEvent));
gBS->SignalEvent (TrbEvent);
return;
}
} else if ((Trb != NULL) && (Status == EFI_CRC_ERROR) && (Trb->Retries > 0)) {
Trb->Retries--;
Trb->Started = FALSE;
} else if ((Trb != NULL)) {
RemoveEntryList (Link);
Trb->Packet->TransactionStatus = Status;
TrbEvent = Trb->Event;
SdMmcFreeTrb (Trb);
DEBUG ((DEBUG_VERBOSE, "ProcessAsyncTaskList(): Signal Event %p with %r\n", TrbEvent, Status));
gBS->SignalEvent (TrbEvent);
}
return;
}
/**
Sd removable device enumeration callback function when the timer event is signaled.
@param[in] Event The Event this notify function registered to.
@param[in] Context Pointer to the context data registered to the
Event.
**/
VOID
EFIAPI
SdMmcPciHcEnumerateDevice (
IN EFI_EVENT Event,
IN VOID *Context
)
{
SD_MMC_HC_PRIVATE_DATA *Private;
EFI_STATUS Status;
UINT8 Slot;
BOOLEAN MediaPresent;
UINT32 RoutineNum;
CARD_TYPE_DETECT_ROUTINE *Routine;
UINTN Index;
LIST_ENTRY *Link;
LIST_ENTRY *NextLink;
SD_MMC_HC_TRB *Trb;
EFI_TPL OldTpl;
Private = (SD_MMC_HC_PRIVATE_DATA *)Context;
for (Slot = 0; Slot < SD_MMC_HC_MAX_SLOT; Slot++) {
if ((Private->Slot[Slot].Enable) && (Private->Slot[Slot].SlotType == RemovableSlot)) {
Status = SdMmcHcCardDetect (Private->PciIo, Slot, &MediaPresent);
if ((Status == EFI_MEDIA_CHANGED) && !MediaPresent) {
DEBUG ((DEBUG_INFO, "SdMmcPciHcEnumerateDevice: device disconnected at slot %d of pci %p\n", Slot, Private->PciIo));
Private->Slot[Slot].MediaPresent = FALSE;
Private->Slot[Slot].Initialized = FALSE;
//
// Signal all async task events at the slot with EFI_NO_MEDIA status.
//
OldTpl = gBS->RaiseTPL (TPL_NOTIFY);
for (Link = GetFirstNode (&Private->Queue);
!IsNull (&Private->Queue, Link);
Link = NextLink)
{
NextLink = GetNextNode (&Private->Queue, Link);
Trb = SD_MMC_HC_TRB_FROM_THIS (Link);
if (Trb->Slot == Slot) {
RemoveEntryList (Link);
Trb->Packet->TransactionStatus = EFI_NO_MEDIA;
gBS->SignalEvent (Trb->Event);
SdMmcFreeTrb (Trb);
}
}
gBS->RestoreTPL (OldTpl);
//
// Notify the upper layer the connect state change through ReinstallProtocolInterface.
//
gBS->ReinstallProtocolInterface (
Private->ControllerHandle,
&gEfiSdMmcPassThruProtocolGuid,
&Private->PassThru,
&Private->PassThru
);
}
if ((Status == EFI_MEDIA_CHANGED) && MediaPresent) {
DEBUG ((DEBUG_INFO, "SdMmcPciHcEnumerateDevice: device connected at slot %d of pci %p\n", Slot, Private->PciIo));
//
// Reset the specified slot of the SD/MMC Pci Host Controller
//
Status = SdMmcHcReset (Private, Slot);
if (EFI_ERROR (Status)) {
continue;
}
//
// Reinitialize slot and restart identification process for the new attached device
//
Status = SdMmcHcInitHost (Private, Slot);
if (EFI_ERROR (Status)) {
continue;
}
Private->Slot[Slot].MediaPresent = TRUE;
Private->Slot[Slot].Initialized = TRUE;
RoutineNum = sizeof (mCardTypeDetectRoutineTable) / sizeof (CARD_TYPE_DETECT_ROUTINE);
for (Index = 0; Index < RoutineNum; Index++) {
Routine = &mCardTypeDetectRoutineTable[Index];
if (*Routine != NULL) {
Status = (*Routine)(Private, Slot);
if (!EFI_ERROR (Status)) {
break;
}
}
}
//
// This card doesn't get initialized correctly.
//
if (Index == RoutineNum) {
Private->Slot[Slot].Initialized = FALSE;
}
//
// Notify the upper layer the connect state change through ReinstallProtocolInterface.
//
gBS->ReinstallProtocolInterface (
Private->ControllerHandle,
&gEfiSdMmcPassThruProtocolGuid,
&Private->PassThru,
&Private->PassThru
);
}
}
}
return;
}
/**
Tests to see if this driver supports a given controller. If a child device is provided,
it further tests to see if this driver supports creating a handle for the specified child device.
This function checks to see if the driver specified by This supports the device specified by
ControllerHandle. Drivers will typically use the device path attached to
ControllerHandle and/or the services from the bus I/O abstraction attached to
ControllerHandle to determine if the driver supports ControllerHandle. This function
may be called many times during platform initialization. In order to reduce boot times, the tests
performed by this function must be very small, and take as little time as possible to execute. This
function must not change the state of any hardware devices, and this function must be aware that the
device specified by ControllerHandle may already be managed by the same driver or a
different driver. This function must match its calls to AllocatePages() with FreePages(),
AllocatePool() with FreePool(), and OpenProtocol() with CloseProtocol().
Since ControllerHandle may have been previously started by the same driver, if a protocol is
already in the opened state, then it must not be closed with CloseProtocol(). This is required
to guarantee the state of ControllerHandle is not modified by this function.
@param[in] This A pointer to the EFI_DRIVER_BINDING_PROTOCOL instance.
@param[in] ControllerHandle The handle of the controller to test. This handle
must support a protocol interface that supplies
an I/O abstraction to the driver.
@param[in] RemainingDevicePath A pointer to the remaining portion of a device path. This
parameter is ignored by device drivers, and is optional for bus
drivers. For bus drivers, if this parameter is not NULL, then
the bus driver must determine if the bus controller specified
by ControllerHandle and the child controller specified
by RemainingDevicePath are both supported by this
bus driver.
@retval EFI_SUCCESS The device specified by ControllerHandle and
RemainingDevicePath is supported by the driver specified by This.
@retval EFI_ALREADY_STARTED The device specified by ControllerHandle and
RemainingDevicePath is already being managed by the driver
specified by This.
@retval EFI_ACCESS_DENIED The device specified by ControllerHandle and
RemainingDevicePath is already being managed by a different
driver or an application that requires exclusive access.
Currently not implemented.
@retval EFI_UNSUPPORTED The device specified by ControllerHandle and
RemainingDevicePath is not supported by the driver specified by This.
**/
EFI_STATUS
EFIAPI
SdMmcPciHcDriverBindingSupported (
IN EFI_DRIVER_BINDING_PROTOCOL *This,
IN EFI_HANDLE Controller,
IN EFI_DEVICE_PATH_PROTOCOL *RemainingDevicePath
)
{
EFI_STATUS Status;
EFI_DEVICE_PATH_PROTOCOL *ParentDevicePath;
EFI_PCI_IO_PROTOCOL *PciIo;
PCI_TYPE00 PciData;
PciIo = NULL;
ParentDevicePath = NULL;
//
// SdPciHcDxe is a device driver, and should ingore the
// "RemainingDevicePath" according to EFI spec.
//
Status = gBS->OpenProtocol (
Controller,
&gEfiDevicePathProtocolGuid,
(VOID *)&ParentDevicePath,
This->DriverBindingHandle,
Controller,
EFI_OPEN_PROTOCOL_BY_DRIVER
);
if (EFI_ERROR (Status)) {
//
// EFI_ALREADY_STARTED is also an error.
//
return Status;
}
//
// Close the protocol because we don't use it here.
//
gBS->CloseProtocol (
Controller,
&gEfiDevicePathProtocolGuid,
This->DriverBindingHandle,
Controller
);
//
// Now test the EfiPciIoProtocol.
//
Status = gBS->OpenProtocol (
Controller,
&gEfiPciIoProtocolGuid,
(VOID **)&PciIo,
This->DriverBindingHandle,
Controller,
EFI_OPEN_PROTOCOL_BY_DRIVER
);
if (EFI_ERROR (Status)) {
return Status;
}
//
// Now further check the PCI header: Base class (offset 0x08) and
// Sub Class (offset 0x05). This controller should be an SD/MMC PCI
// Host Controller.
//
Status = PciIo->Pci.Read (
PciIo,
EfiPciIoWidthUint8,
0,
sizeof (PciData),
&PciData
);
if (EFI_ERROR (Status)) {
gBS->CloseProtocol (
Controller,
&gEfiPciIoProtocolGuid,
This->DriverBindingHandle,
Controller
);
return EFI_UNSUPPORTED;
}
//
// Since we already got the PciData, we can close protocol to avoid to carry it
// on for multiple exit points.
//
gBS->CloseProtocol (
Controller,
&gEfiPciIoProtocolGuid,
This->DriverBindingHandle,
Controller
);
//
// Examine SD PCI Host Controller PCI Configuration table fields.
//
if ((PciData.Hdr.ClassCode[2] == PCI_CLASS_SYSTEM_PERIPHERAL) &&
(PciData.Hdr.ClassCode[1] == PCI_SUBCLASS_SD_HOST_CONTROLLER) &&
((PciData.Hdr.ClassCode[0] == 0x00) || (PciData.Hdr.ClassCode[0] == 0x01)))
{
return EFI_SUCCESS;
}
return EFI_UNSUPPORTED;
}
/**
Starts a device controller or a bus controller.
The Start() function is designed to be invoked from the EFI boot service ConnectController().
As a result, much of the error checking on the parameters to Start() has been moved into this
common boot service. It is legal to call Start() from other locations,
but the following calling restrictions must be followed or the system behavior will not be deterministic.
1. ControllerHandle must be a valid EFI_HANDLE.
2. If RemainingDevicePath is not NULL, then it must be a pointer to a naturally aligned
EFI_DEVICE_PATH_PROTOCOL.
3. Prior to calling Start(), the Supported() function for the driver specified by This must
have been called with the same calling parameters, and Supported() must have returned EFI_SUCCESS.
@param[in] This A pointer to the EFI_DRIVER_BINDING_PROTOCOL instance.
@param[in] ControllerHandle The handle of the controller to start. This handle
must support a protocol interface that supplies
an I/O abstraction to the driver.
@param[in] RemainingDevicePath A pointer to the remaining portion of a device path. This
parameter is ignored by device drivers, and is optional for bus
drivers. For a bus driver, if this parameter is NULL, then handles
for all the children of Controller are created by this driver.
If this parameter is not NULL and the first Device Path Node is
not the End of Device Path Node, then only the handle for the
child device specified by the first Device Path Node of
RemainingDevicePath is created by this driver.
If the first Device Path Node of RemainingDevicePath is
the End of Device Path Node, no child handle is created by this
driver.
@retval EFI_SUCCESS The device was started.
@retval EFI_DEVICE_ERROR The device could not be started due to a device error.Currently not implemented.
@retval EFI_OUT_OF_RESOURCES The request could not be completed due to a lack of resources.
@retval Others The driver failded to start the device.
**/
EFI_STATUS
EFIAPI
SdMmcPciHcDriverBindingStart (
IN EFI_DRIVER_BINDING_PROTOCOL *This,
IN EFI_HANDLE Controller,
IN EFI_DEVICE_PATH_PROTOCOL *RemainingDevicePath
)
{
EFI_STATUS Status;
SD_MMC_HC_PRIVATE_DATA *Private;
EFI_PCI_IO_PROTOCOL *PciIo;
UINT64 Supports;
UINT64 PciAttributes;
UINT8 SlotNum;
UINT8 FirstBar;
UINT8 Slot;
UINT8 Index;
CARD_TYPE_DETECT_ROUTINE *Routine;
UINT32 RoutineNum;
BOOLEAN MediaPresent;
BOOLEAN Support64BitDma;
DEBUG ((DEBUG_INFO, "SdMmcPciHcDriverBindingStart: Start\n"));
//
// Open PCI I/O Protocol and save pointer to open protocol
// in private data area.
//
PciIo = NULL;
Status = gBS->OpenProtocol (
Controller,
&gEfiPciIoProtocolGuid,
(VOID **)&PciIo,
This->DriverBindingHandle,
Controller,
EFI_OPEN_PROTOCOL_BY_DRIVER
);
if (EFI_ERROR (Status)) {
return Status;
}
//
// Enable the SD Host Controller MMIO space
//
Private = NULL;
Status = PciIo->Attributes (
PciIo,
EfiPciIoAttributeOperationGet,
0,
&PciAttributes
);
if (EFI_ERROR (Status)) {
goto Done;
}
Status = PciIo->Attributes (
PciIo,
EfiPciIoAttributeOperationSupported,
0,
&Supports
);
if (!EFI_ERROR (Status)) {
Supports &= (UINT64)EFI_PCI_DEVICE_ENABLE;
Status = PciIo->Attributes (
PciIo,
EfiPciIoAttributeOperationEnable,
Supports,
NULL
);
} else {
goto Done;
}
Private = AllocateCopyPool (sizeof (SD_MMC_HC_PRIVATE_DATA), &gSdMmcPciHcTemplate);
if (Private == NULL) {
Status = EFI_OUT_OF_RESOURCES;
goto Done;
}
Private->ControllerHandle = Controller;
Private->PciIo = PciIo;
Private->PciAttributes = PciAttributes;
InitializeListHead (&Private->Queue);
//
// Get SD/MMC Pci Host Controller Slot info
//
Status = SdMmcHcGetSlotInfo (PciIo, &FirstBar, &SlotNum);
if (EFI_ERROR (Status)) {
goto Done;
}
//
// Attempt to locate the singleton instance of the SD/MMC override protocol,
// which implements platform specific workarounds for non-standard SDHCI
// implementations.
//
if (mOverride == NULL) {
Status = gBS->LocateProtocol (
&gEdkiiSdMmcOverrideProtocolGuid,
NULL,
(VOID **)&mOverride
);
if (!EFI_ERROR (Status)) {
DEBUG ((
DEBUG_INFO,
"%a: found SD/MMC override protocol\n",
__FUNCTION__
));
}
}
Support64BitDma = TRUE;
for (Slot = FirstBar; Slot < (FirstBar + SlotNum); Slot++) {
Private->Slot[Slot].Enable = TRUE;
//
// Get SD/MMC Pci Host Controller Version
//
Status = SdMmcHcGetControllerVersion (PciIo, Slot, &Private->ControllerVersion[Slot]);
if (EFI_ERROR (Status)) {
continue;
}
Status = SdMmcHcGetCapability (PciIo, Slot, &Private->Capability[Slot]);
if (EFI_ERROR (Status)) {
continue;
}
Private->BaseClkFreq[Slot] = Private->Capability[Slot].BaseClkFreq;
if (mOverride != NULL) {
if (mOverride->Capability != NULL) {
Status = mOverride->Capability (
Controller,
Slot,
&Private->Capability[Slot],
&Private->BaseClkFreq[Slot]
);
if (EFI_ERROR (Status)) {
DEBUG ((
DEBUG_WARN,
"%a: Failed to override capability - %r\n",
__FUNCTION__,
Status
));
continue;
}
}
if (mOverride->NotifyPhase != NULL) {
Status = mOverride->NotifyPhase (
Controller,
Slot,
EdkiiSdMmcGetOperatingParam,
(VOID *)&Private->Slot[Slot].OperatingParameters
);
if (EFI_ERROR (Status)) {
DEBUG ((DEBUG_WARN, "%a: Failed to get operating parameters, using defaults\n", __FUNCTION__));
}
}
}
DumpCapabilityReg (Slot, &Private->Capability[Slot]);
DEBUG ((
DEBUG_INFO,
"Slot[%d] Base Clock Frequency: %dMHz\n",
Slot,
Private->BaseClkFreq[Slot]
));
//
// If any of the slots does not support 64b system bus
// do not enable 64b DMA in the PCI layer.
//
if (((Private->ControllerVersion[Slot] == SD_MMC_HC_CTRL_VER_300) &&
(Private->Capability[Slot].SysBus64V3 == 0)) ||
((Private->ControllerVersion[Slot] == SD_MMC_HC_CTRL_VER_400) &&
(Private->Capability[Slot].SysBus64V3 == 0)) ||
((Private->ControllerVersion[Slot] >= SD_MMC_HC_CTRL_VER_410) &&
(Private->Capability[Slot].SysBus64V4 == 0)))
{
Support64BitDma = FALSE;
}
Status = SdMmcHcGetMaxCurrent (PciIo, Slot, &Private->MaxCurrent[Slot]);
if (EFI_ERROR (Status)) {
continue;
}
Private->Slot[Slot].SlotType = Private->Capability[Slot].SlotType;
if ((Private->Slot[Slot].SlotType != RemovableSlot) && (Private->Slot[Slot].SlotType != EmbeddedSlot)) {
DEBUG ((DEBUG_INFO, "SdMmcPciHcDxe doesn't support the slot type [%d]!!!\n", Private->Slot[Slot].SlotType));
continue;
}
//
// Reset the specified slot of the SD/MMC Pci Host Controller
//
Status = SdMmcHcReset (Private, Slot);
if (EFI_ERROR (Status)) {
continue;
}
//
// Check whether there is a SD/MMC card attached
//
if (Private->Slot[Slot].SlotType == RemovableSlot) {
Status = SdMmcHcCardDetect (PciIo, Slot, &MediaPresent);
if (EFI_ERROR (Status) && (Status != EFI_MEDIA_CHANGED)) {
continue;
} else if (!MediaPresent) {
DEBUG ((
DEBUG_INFO,
"SdMmcHcCardDetect: No device attached in Slot[%d]!!!\n",
Slot
));
continue;
}
}
Status = SdMmcHcInitHost (Private, Slot);
if (EFI_ERROR (Status)) {
continue;
}
Private->Slot[Slot].MediaPresent = TRUE;
Private->Slot[Slot].Initialized = TRUE;
RoutineNum = sizeof (mCardTypeDetectRoutineTable) / sizeof (CARD_TYPE_DETECT_ROUTINE);
for (Index = 0; Index < RoutineNum; Index++) {
Routine = &mCardTypeDetectRoutineTable[Index];
if (*Routine != NULL) {
Status = (*Routine)(Private, Slot);
if (!EFI_ERROR (Status)) {
break;
}
}
}
//
// This card doesn't get initialized correctly.
//
if (Index == RoutineNum) {
Private->Slot[Slot].Initialized = FALSE;
}
}
//
// Enable 64-bit DMA support in the PCI layer if this controller
// supports it.
//
if (Support64BitDma) {
Status = PciIo->Attributes (
PciIo,
EfiPciIoAttributeOperationEnable,
EFI_PCI_IO_ATTRIBUTE_DUAL_ADDRESS_CYCLE,
NULL
);
if (EFI_ERROR (Status)) {
DEBUG ((DEBUG_WARN, "SdMmcPciHcDriverBindingStart: failed to enable 64-bit DMA (%r)\n", Status));
}
}
//
// Start the asynchronous I/O monitor
//
Status = gBS->CreateEvent (
EVT_TIMER | EVT_NOTIFY_SIGNAL,
TPL_NOTIFY,
ProcessAsyncTaskList,
Private,
&Private->TimerEvent
);
if (EFI_ERROR (Status)) {
goto Done;
}
Status = gBS->SetTimer (Private->TimerEvent, TimerPeriodic, SD_MMC_HC_ASYNC_TIMER);
if (EFI_ERROR (Status)) {
goto Done;
}
//
// Start the Sd removable device connection enumeration
//
Status = gBS->CreateEvent (
EVT_TIMER | EVT_NOTIFY_SIGNAL,
TPL_CALLBACK,
SdMmcPciHcEnumerateDevice,
Private,
&Private->ConnectEvent
);
if (EFI_ERROR (Status)) {
goto Done;
}
Status = gBS->SetTimer (Private->ConnectEvent, TimerPeriodic, SD_MMC_HC_ENUM_TIMER);
if (EFI_ERROR (Status)) {
goto Done;
}
Status = gBS->InstallMultipleProtocolInterfaces (
&Controller,
&gEfiSdMmcPassThruProtocolGuid,
&(Private->PassThru),
NULL
);
DEBUG ((DEBUG_INFO, "SdMmcPciHcDriverBindingStart: %r End on %x\n", Status, Controller));
Done:
if (EFI_ERROR (Status)) {
if ((Private != NULL) && (Private->PciAttributes != 0)) {
//
// Restore original PCI attributes
//
PciIo->Attributes (
PciIo,
EfiPciIoAttributeOperationSet,
Private->PciAttributes,
NULL
);
}
gBS->CloseProtocol (
Controller,
&gEfiPciIoProtocolGuid,
This->DriverBindingHandle,
Controller
);
if ((Private != NULL) && (Private->TimerEvent != NULL)) {
gBS->CloseEvent (Private->TimerEvent);
}
if ((Private != NULL) && (Private->ConnectEvent != NULL)) {
gBS->CloseEvent (Private->ConnectEvent);
}
if (Private != NULL) {
FreePool (Private);
}
}
return Status;
}
/**
Stops a device controller or a bus controller.
The Stop() function is designed to be invoked from the EFI boot service DisconnectController().
As a result, much of the error checking on the parameters to Stop() has been moved
into this common boot service. It is legal to call Stop() from other locations,
but the following calling restrictions must be followed or the system behavior will not be deterministic.
1. ControllerHandle must be a valid EFI_HANDLE that was used on a previous call to this
same driver's Start() function.
2. The first NumberOfChildren handles of ChildHandleBuffer must all be a valid
EFI_HANDLE. In addition, all of these handles must have been created in this driver's
Start() function, and the Start() function must have called OpenProtocol() on
ControllerHandle with an Attribute of EFI_OPEN_PROTOCOL_BY_CHILD_CONTROLLER.
@param[in] This A pointer to the EFI_DRIVER_BINDING_PROTOCOL instance.
@param[in] ControllerHandle A handle to the device being stopped. The handle must
support a bus specific I/O protocol for the driver
to use to stop the device.
@param[in] NumberOfChildren The number of child device handles in ChildHandleBuffer.
@param[in] ChildHandleBuffer An array of child handles to be freed. May be NULL
if NumberOfChildren is 0.
@retval EFI_SUCCESS The device was stopped.
@retval EFI_DEVICE_ERROR The device could not be stopped due to a device error.
**/
EFI_STATUS
EFIAPI
SdMmcPciHcDriverBindingStop (
IN EFI_DRIVER_BINDING_PROTOCOL *This,
IN EFI_HANDLE Controller,
IN UINTN NumberOfChildren,
IN EFI_HANDLE *ChildHandleBuffer
)
{
EFI_STATUS Status;
EFI_SD_MMC_PASS_THRU_PROTOCOL *PassThru;
SD_MMC_HC_PRIVATE_DATA *Private;
EFI_PCI_IO_PROTOCOL *PciIo;
LIST_ENTRY *Link;
LIST_ENTRY *NextLink;
SD_MMC_HC_TRB *Trb;
DEBUG ((DEBUG_INFO, "SdMmcPciHcDriverBindingStop: Start\n"));
Status = gBS->OpenProtocol (
Controller,
&gEfiSdMmcPassThruProtocolGuid,
(VOID **)&PassThru,
This->DriverBindingHandle,
Controller,
EFI_OPEN_PROTOCOL_GET_PROTOCOL
);
if (EFI_ERROR (Status)) {
return Status;
}
Private = SD_MMC_HC_PRIVATE_FROM_THIS (PassThru);
//
// Close Non-Blocking timer and free Task list.
//
if (Private->TimerEvent != NULL) {
gBS->CloseEvent (Private->TimerEvent);
Private->TimerEvent = NULL;
}
if (Private->ConnectEvent != NULL) {
gBS->CloseEvent (Private->ConnectEvent);
Private->ConnectEvent = NULL;
}
//
// As the timer is closed, there is no needs to use TPL lock to
// protect the critical region "queue".
//
for (Link = GetFirstNode (&Private->Queue);
!IsNull (&Private->Queue, Link);
Link = NextLink)
{
NextLink = GetNextNode (&Private->Queue, Link);
RemoveEntryList (Link);
Trb = SD_MMC_HC_TRB_FROM_THIS (Link);
Trb->Packet->TransactionStatus = EFI_ABORTED;
gBS->SignalEvent (Trb->Event);
SdMmcFreeTrb (Trb);
}
//
// Uninstall Block I/O protocol from the device handle
//
Status = gBS->UninstallProtocolInterface (
Controller,
&gEfiSdMmcPassThruProtocolGuid,
&(Private->PassThru)
);
if (EFI_ERROR (Status)) {
return Status;
}
gBS->CloseProtocol (
Controller,
&gEfiPciIoProtocolGuid,
This->DriverBindingHandle,
Controller
);
//
// Restore original PCI attributes
//
PciIo = Private->PciIo;
Status = PciIo->Attributes (
PciIo,
EfiPciIoAttributeOperationSet,
Private->PciAttributes,
NULL
);
ASSERT_EFI_ERROR (Status);
FreePool (Private);
DEBUG ((DEBUG_INFO, "SdMmcPciHcDriverBindingStop: End with %r\n", Status));
return Status;
}
/**
Execute TRB synchronously.
@param[in] Private Pointer to driver private data.
@param[in] Trb Pointer to TRB to execute.
@retval EFI_SUCCESS TRB executed successfully.
@retval Other TRB failed.
**/
EFI_STATUS
SdMmcPassThruExecSyncTrb (
IN SD_MMC_HC_PRIVATE_DATA *Private,
IN SD_MMC_HC_TRB *Trb
)
{
EFI_STATUS Status;
EFI_TPL OldTpl;
//
// Wait async I/O list is empty before execute sync I/O operation.
//
while (TRUE) {
OldTpl = gBS->RaiseTPL (TPL_NOTIFY);
if (IsListEmpty (&Private->Queue)) {
gBS->RestoreTPL (OldTpl);
break;
}
gBS->RestoreTPL (OldTpl);
}
while (Trb->Retries) {
Status = SdMmcWaitTrbEnv (Private, Trb);
if (EFI_ERROR (Status)) {
return Status;
}
Status = SdMmcExecTrb (Private, Trb);
if (EFI_ERROR (Status)) {
return Status;
}
Status = SdMmcWaitTrbResult (Private, Trb);
if (Status == EFI_CRC_ERROR) {
Trb->Retries--;
} else {
return Status;
}
}
return Status;
}
/**
Sends SD command to an SD card that is attached to the SD controller.
The PassThru() function sends the SD command specified by Packet to the SD card
specified by Slot.
If Packet is successfully sent to the SD card, then EFI_SUCCESS is returned.
If a device error occurs while sending the Packet, then EFI_DEVICE_ERROR is returned.
If Slot is not in a valid range for the SD controller, then EFI_INVALID_PARAMETER
is returned.
If Packet defines a data command but both InDataBuffer and OutDataBuffer are NULL,
EFI_INVALID_PARAMETER is returned.
@param[in] This A pointer to the EFI_SD_MMC_PASS_THRU_PROTOCOL instance.
@param[in] Slot The slot number of the SD card to send the command to.
@param[in,out] Packet A pointer to the SD command data structure.
@param[in] Event If Event is NULL, blocking I/O is performed. If Event is
not NULL, then nonblocking I/O is performed, and Event
will be signaled when the Packet completes.
@retval EFI_SUCCESS The SD Command Packet was sent by the host.
@retval EFI_DEVICE_ERROR A device error occurred while attempting to send the SD
command Packet.
@retval EFI_INVALID_PARAMETER Packet, Slot, or the contents of the Packet is invalid.
@retval EFI_INVALID_PARAMETER Packet defines a data command but both InDataBuffer and
OutDataBuffer are NULL.
@retval EFI_NO_MEDIA SD Device not present in the Slot.
@retval EFI_UNSUPPORTED The command described by the SD Command Packet is not
supported by the host controller.
@retval EFI_BAD_BUFFER_SIZE The InTransferLength or OutTransferLength exceeds the
limit supported by SD card ( i.e. if the number of bytes
exceed the Last LBA).
**/
EFI_STATUS
EFIAPI
SdMmcPassThruPassThru (
IN EFI_SD_MMC_PASS_THRU_PROTOCOL *This,
IN UINT8 Slot,
IN OUT EFI_SD_MMC_PASS_THRU_COMMAND_PACKET *Packet,
IN EFI_EVENT Event OPTIONAL
)
{
EFI_STATUS Status;
SD_MMC_HC_PRIVATE_DATA *Private;
SD_MMC_HC_TRB *Trb;
if ((This == NULL) || (Packet == NULL)) {
return EFI_INVALID_PARAMETER;
}
if ((Packet->SdMmcCmdBlk == NULL) || (Packet->SdMmcStatusBlk == NULL)) {
return EFI_INVALID_PARAMETER;
}
if ((Packet->OutDataBuffer == NULL) && (Packet->OutTransferLength != 0)) {
return EFI_INVALID_PARAMETER;
}
if ((Packet->InDataBuffer == NULL) && (Packet->InTransferLength != 0)) {
return EFI_INVALID_PARAMETER;
}
Private = SD_MMC_HC_PRIVATE_FROM_THIS (This);
if (!Private->Slot[Slot].Enable) {
return EFI_INVALID_PARAMETER;
}
if (!Private->Slot[Slot].MediaPresent) {
return EFI_NO_MEDIA;
}
if (!Private->Slot[Slot].Initialized) {
return EFI_DEVICE_ERROR;
}
Trb = SdMmcCreateTrb (Private, Slot, Packet, Event);
if (Trb == NULL) {
return EFI_OUT_OF_RESOURCES;
}
//
// Immediately return for async I/O.
//
if (Event != NULL) {
return EFI_SUCCESS;
}
Status = SdMmcPassThruExecSyncTrb (Private, Trb);
SdMmcFreeTrb (Trb);
return Status;
}
/**
Used to retrieve next slot numbers supported by the SD controller. The function
returns information about all available slots (populated or not-populated).
The GetNextSlot() function retrieves the next slot number on an SD controller.
If on input Slot is 0xFF, then the slot number of the first slot on the SD controller
is returned.
If Slot is a slot number that was returned on a previous call to GetNextSlot(), then
the slot number of the next slot on the SD controller is returned.
If Slot is not 0xFF and Slot was not returned on a previous call to GetNextSlot(),
EFI_INVALID_PARAMETER is returned.
If Slot is the slot number of the last slot on the SD controller, then EFI_NOT_FOUND
is returned.
@param[in] This A pointer to the EFI_SD_MMMC_PASS_THRU_PROTOCOL instance.
@param[in,out] Slot On input, a pointer to a slot number on the SD controller.
On output, a pointer to the next slot number on the SD controller.
An input value of 0xFF retrieves the first slot number on the SD
controller.
@retval EFI_SUCCESS The next slot number on the SD controller was returned in Slot.
@retval EFI_NOT_FOUND There are no more slots on this SD controller.
@retval EFI_INVALID_PARAMETER Slot is not 0xFF and Slot was not returned on a previous call
to GetNextSlot().
**/
EFI_STATUS
EFIAPI
SdMmcPassThruGetNextSlot (
IN EFI_SD_MMC_PASS_THRU_PROTOCOL *This,
IN OUT UINT8 *Slot
)
{
SD_MMC_HC_PRIVATE_DATA *Private;
UINT8 Index;
if ((This == NULL) || (Slot == NULL)) {
return EFI_INVALID_PARAMETER;
}
Private = SD_MMC_HC_PRIVATE_FROM_THIS (This);
if (*Slot == 0xFF) {
for (Index = 0; Index < SD_MMC_HC_MAX_SLOT; Index++) {
if (Private->Slot[Index].Enable) {
*Slot = Index;
Private->PreviousSlot = Index;
return EFI_SUCCESS;
}
}
return EFI_NOT_FOUND;
} else if (*Slot == Private->PreviousSlot) {
for (Index = *Slot + 1; Index < SD_MMC_HC_MAX_SLOT; Index++) {
if (Private->Slot[Index].Enable) {
*Slot = Index;
Private->PreviousSlot = Index;
return EFI_SUCCESS;
}
}
return EFI_NOT_FOUND;
} else {
return EFI_INVALID_PARAMETER;
}
}
/**
Used to allocate and build a device path node for an SD card on the SD controller.
The BuildDevicePath() function allocates and builds a single device node for the SD
card specified by Slot.
If the SD card specified by Slot is not present on the SD controller, then EFI_NOT_FOUND
is returned.
If DevicePath is NULL, then EFI_INVALID_PARAMETER is returned.
If there are not enough resources to allocate the device path node, then EFI_OUT_OF_RESOURCES
is returned.
Otherwise, DevicePath is allocated with the boot service AllocatePool(), the contents of
DevicePath are initialized to describe the SD card specified by Slot, and EFI_SUCCESS is
returned.
@param[in] This A pointer to the EFI_SD_MMMC_PASS_THRU_PROTOCOL instance.
@param[in] Slot Specifies the slot number of the SD card for which a device
path node is to be allocated and built.
@param[in,out] DevicePath A pointer to a single device path node that describes the SD
card specified by Slot. This function is responsible for
allocating the buffer DevicePath with the boot service
AllocatePool(). It is the caller's responsibility to free
DevicePath when the caller is finished with DevicePath.
@retval EFI_SUCCESS The device path node that describes the SD card specified by
Slot was allocated and returned in DevicePath.
@retval EFI_NOT_FOUND The SD card specified by Slot does not exist on the SD controller.
@retval EFI_INVALID_PARAMETER DevicePath is NULL.
@retval EFI_OUT_OF_RESOURCES There are not enough resources to allocate DevicePath.
**/
EFI_STATUS
EFIAPI
SdMmcPassThruBuildDevicePath (
IN EFI_SD_MMC_PASS_THRU_PROTOCOL *This,
IN UINT8 Slot,
IN OUT EFI_DEVICE_PATH_PROTOCOL **DevicePath
)
{
SD_MMC_HC_PRIVATE_DATA *Private;
SD_DEVICE_PATH *SdNode;
EMMC_DEVICE_PATH *EmmcNode;
if ((This == NULL) || (DevicePath == NULL) || (Slot >= SD_MMC_HC_MAX_SLOT)) {
return EFI_INVALID_PARAMETER;
}
Private = SD_MMC_HC_PRIVATE_FROM_THIS (This);
if ((!Private->Slot[Slot].Enable) || (!Private->Slot[Slot].MediaPresent)) {
return EFI_NOT_FOUND;
}
if (Private->Slot[Slot].CardType == SdCardType) {
SdNode = AllocateCopyPool (sizeof (SD_DEVICE_PATH), &mSdDpTemplate);
if (SdNode == NULL) {
return EFI_OUT_OF_RESOURCES;
}
SdNode->SlotNumber = Slot;
*DevicePath = (EFI_DEVICE_PATH_PROTOCOL *)SdNode;
} else if (Private->Slot[Slot].CardType == EmmcCardType) {
EmmcNode = AllocateCopyPool (sizeof (EMMC_DEVICE_PATH), &mEmmcDpTemplate);
if (EmmcNode == NULL) {
return EFI_OUT_OF_RESOURCES;
}
EmmcNode->SlotNumber = Slot;
*DevicePath = (EFI_DEVICE_PATH_PROTOCOL *)EmmcNode;
} else {
//
// Currently we only support SD and EMMC two device nodes.
//
return EFI_NOT_FOUND;
}
return EFI_SUCCESS;
}
/**
This function retrieves an SD card slot number based on the input device path.
The GetSlotNumber() function retrieves slot number for the SD card specified by
the DevicePath node. If DevicePath is NULL, EFI_INVALID_PARAMETER is returned.
If DevicePath is not a device path node type that the SD Pass Thru driver supports,
EFI_UNSUPPORTED is returned.
@param[in] This A pointer to the EFI_SD_MMC_PASS_THRU_PROTOCOL instance.
@param[in] DevicePath A pointer to the device path node that describes a SD
card on the SD controller.
@param[out] Slot On return, points to the slot number of an SD card on
the SD controller.
@retval EFI_SUCCESS SD card slot number is returned in Slot.
@retval EFI_INVALID_PARAMETER Slot or DevicePath is NULL.
@retval EFI_UNSUPPORTED DevicePath is not a device path node type that the SD
Pass Thru driver supports.
**/
EFI_STATUS
EFIAPI
SdMmcPassThruGetSlotNumber (
IN EFI_SD_MMC_PASS_THRU_PROTOCOL *This,
IN EFI_DEVICE_PATH_PROTOCOL *DevicePath,
OUT UINT8 *Slot
)
{
SD_MMC_HC_PRIVATE_DATA *Private;
SD_DEVICE_PATH *SdNode;
EMMC_DEVICE_PATH *EmmcNode;
UINT8 SlotNumber;
if ((This == NULL) || (DevicePath == NULL) || (Slot == NULL)) {
return EFI_INVALID_PARAMETER;
}
Private = SD_MMC_HC_PRIVATE_FROM_THIS (This);
//
// Check whether the DevicePath belongs to SD_DEVICE_PATH or EMMC_DEVICE_PATH
//
if ((DevicePath->Type != MESSAGING_DEVICE_PATH) ||
((DevicePath->SubType != MSG_SD_DP) &&
(DevicePath->SubType != MSG_EMMC_DP)) ||
(DevicePathNodeLength (DevicePath) != sizeof (SD_DEVICE_PATH)) ||
(DevicePathNodeLength (DevicePath) != sizeof (EMMC_DEVICE_PATH)))
{
return EFI_UNSUPPORTED;
}
if (DevicePath->SubType == MSG_SD_DP) {
SdNode = (SD_DEVICE_PATH *)DevicePath;
SlotNumber = SdNode->SlotNumber;
} else {
EmmcNode = (EMMC_DEVICE_PATH *)DevicePath;
SlotNumber = EmmcNode->SlotNumber;
}
if (SlotNumber >= SD_MMC_HC_MAX_SLOT) {
return EFI_NOT_FOUND;
}
if (Private->Slot[SlotNumber].Enable) {
*Slot = SlotNumber;
return EFI_SUCCESS;
} else {
return EFI_NOT_FOUND;
}
}
/**
Resets an SD card that is connected to the SD controller.
The ResetDevice() function resets the SD card specified by Slot.
If this SD controller does not support a device reset operation, EFI_UNSUPPORTED is
returned.
If Slot is not in a valid slot number for this SD controller, EFI_INVALID_PARAMETER
is returned.
If the device reset operation is completed, EFI_SUCCESS is returned.
@param[in] This A pointer to the EFI_SD_MMC_PASS_THRU_PROTOCOL instance.
@param[in] Slot Specifies the slot number of the SD card to be reset.
@retval EFI_SUCCESS The SD card specified by Slot was reset.
@retval EFI_UNSUPPORTED The SD controller does not support a device reset operation.
@retval EFI_INVALID_PARAMETER Slot number is invalid.
@retval EFI_NO_MEDIA SD Device not present in the Slot.
@retval EFI_DEVICE_ERROR The reset command failed due to a device error
**/
EFI_STATUS
EFIAPI
SdMmcPassThruResetDevice (
IN EFI_SD_MMC_PASS_THRU_PROTOCOL *This,
IN UINT8 Slot
)
{
SD_MMC_HC_PRIVATE_DATA *Private;
LIST_ENTRY *Link;
LIST_ENTRY *NextLink;
SD_MMC_HC_TRB *Trb;
EFI_TPL OldTpl;
if (This == NULL) {
return EFI_INVALID_PARAMETER;
}
Private = SD_MMC_HC_PRIVATE_FROM_THIS (This);
if (!Private->Slot[Slot].Enable) {
return EFI_INVALID_PARAMETER;
}
if (!Private->Slot[Slot].MediaPresent) {
return EFI_NO_MEDIA;
}
if (!Private->Slot[Slot].Initialized) {
return EFI_DEVICE_ERROR;
}
//
// Free all async I/O requests in the queue
//
OldTpl = gBS->RaiseTPL (TPL_NOTIFY);
for (Link = GetFirstNode (&Private->Queue);
!IsNull (&Private->Queue, Link);
Link = NextLink)
{
NextLink = GetNextNode (&Private->Queue, Link);
RemoveEntryList (Link);
Trb = SD_MMC_HC_TRB_FROM_THIS (Link);
Trb->Packet->TransactionStatus = EFI_ABORTED;
gBS->SignalEvent (Trb->Event);
SdMmcFreeTrb (Trb);
}
gBS->RestoreTPL (OldTpl);
return EFI_SUCCESS;
}
| 1 | 17,688 | @aimanrosli23 Could you help to confirm if you do not revert the change in below commit: SHA-1: 643623147a1feaddd734ddd84604e1d8e9dcebee * MdeModulePkg/SdMmcPciHcDxe: Send SEND_STATUS at lower frequency | tianocore-edk2 | c |
@@ -76,8 +76,10 @@ public class AvroSchemaUtil {
return TypeUtil.visit(type, new TypeToSchema(names));
}
- public static Type convert(Schema schema) {
- return AvroSchemaVisitor.visit(schema, new SchemaToType(schema));
+ public static org.apache.iceberg.Schema convert(Schema schema) {
+ final Type type = AvroSchemaVisitor.visit(schema, new SchemaToType(schema));
+ final List<Types.NestedField> fields = type.asNestedType().asStructType().fields();
+ return new org.apache.iceberg.Schema(fields);
}
static boolean hasIds(Schema schema) { | 1 | /*
* Licensed to the Apache Software Foundation (ASF) under one
* or more contributor license agreements. See the NOTICE file
* distributed with this work for additional information
* regarding copyright ownership. The ASF licenses this file
* to you under the Apache License, Version 2.0 (the
* "License"); you may not use this file except in compliance
* with the License. You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing,
* software distributed under the License is distributed on an
* "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
* KIND, either express or implied. See the License for the
* specific language governing permissions and limitations
* under the License.
*/
package org.apache.iceberg.avro;
import com.google.common.base.Preconditions;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import com.google.common.collect.Lists;
import java.util.List;
import java.util.Map;
import java.util.Set;
import org.apache.avro.JsonProperties;
import org.apache.avro.LogicalType;
import org.apache.avro.LogicalTypes;
import org.apache.avro.Schema;
import org.apache.iceberg.mapping.MappedField;
import org.apache.iceberg.mapping.NameMapping;
import org.apache.iceberg.types.Type;
import org.apache.iceberg.types.TypeUtil;
import org.apache.iceberg.types.Types;
public class AvroSchemaUtil {
private AvroSchemaUtil() {}
// Original Iceberg field name corresponding to a sanitized Avro name
public static final String ICEBERG_FIELD_NAME_PROP = "iceberg-field-name";
public static final String FIELD_ID_PROP = "field-id";
public static final String KEY_ID_PROP = "key-id";
public static final String VALUE_ID_PROP = "value-id";
public static final String ELEMENT_ID_PROP = "element-id";
public static final String ADJUST_TO_UTC_PROP = "adjust-to-utc";
private static final Schema NULL = Schema.create(Schema.Type.NULL);
private static final Schema.Type MAP = Schema.Type.MAP;
private static final Schema.Type ARRAY = Schema.Type.ARRAY;
private static final Schema.Type UNION = Schema.Type.UNION;
private static final Schema.Type RECORD = Schema.Type.RECORD;
public static Schema convert(org.apache.iceberg.Schema schema,
String tableName) {
return convert(schema, ImmutableMap.of(schema.asStruct(), tableName));
}
public static Schema convert(org.apache.iceberg.Schema schema,
Map<Types.StructType, String> names) {
return TypeUtil.visit(schema, new TypeToSchema(names));
}
public static Schema convert(Type type) {
return convert(type, ImmutableMap.of());
}
public static Schema convert(Types.StructType type, String name) {
return convert(type, ImmutableMap.of(type, name));
}
public static Schema convert(Type type, Map<Types.StructType, String> names) {
return TypeUtil.visit(type, new TypeToSchema(names));
}
public static Type convert(Schema schema) {
return AvroSchemaVisitor.visit(schema, new SchemaToType(schema));
}
static boolean hasIds(Schema schema) {
return AvroCustomOrderSchemaVisitor.visit(schema, new HasIds());
}
public static Map<Type, Schema> convertTypes(Types.StructType type, String name) {
TypeToSchema converter = new TypeToSchema(ImmutableMap.of(type, name));
TypeUtil.visit(type, converter);
return ImmutableMap.copyOf(converter.getConversionMap());
}
public static Schema pruneColumns(Schema schema, Set<Integer> selectedIds, NameMapping nameMapping) {
return new PruneColumns(selectedIds, nameMapping).rootSchema(schema);
}
public static Schema buildAvroProjection(Schema schema, org.apache.iceberg.Schema expected,
Map<String, String> renames) {
return AvroCustomOrderSchemaVisitor.visit(schema, new BuildAvroProjection(expected, renames));
}
public static boolean isTimestamptz(Schema schema) {
LogicalType logicalType = schema.getLogicalType();
if (logicalType != null && logicalType instanceof LogicalTypes.TimestampMicros) {
// timestamptz is adjusted to UTC
Object value = schema.getObjectProp(ADJUST_TO_UTC_PROP);
if (value instanceof Boolean) {
return (Boolean) value;
} else if (value instanceof String) {
return Boolean.parseBoolean((String) value);
}
}
return false;
}
static boolean isOptionSchema(Schema schema) {
if (schema.getType() == UNION && schema.getTypes().size() == 2) {
if (schema.getTypes().get(0).getType() == Schema.Type.NULL) {
return true;
} else if (schema.getTypes().get(1).getType() == Schema.Type.NULL) {
return true;
}
}
return false;
}
static Schema toOption(Schema schema) {
if (schema.getType() == UNION) {
Preconditions.checkArgument(isOptionSchema(schema),
"Union schemas are not supported: %s", schema);
return schema;
} else {
return Schema.createUnion(NULL, schema);
}
}
static Schema fromOption(Schema schema) {
Preconditions.checkArgument(schema.getType() == UNION,
"Expected union schema but was passed: %s", schema);
Preconditions.checkArgument(schema.getTypes().size() == 2,
"Expected optional schema, but was passed: %s", schema);
if (schema.getTypes().get(0).getType() == Schema.Type.NULL) {
return schema.getTypes().get(1);
} else {
return schema.getTypes().get(0);
}
}
static Schema fromOptions(List<Schema> options) {
Preconditions.checkArgument(options.size() == 2,
"Expected two schemas, but was passed: %s options", options.size());
if (options.get(0).getType() == Schema.Type.NULL) {
return options.get(1);
} else {
return options.get(0);
}
}
static boolean isKeyValueSchema(Schema schema) {
return schema.getType() == RECORD && schema.getFields().size() == 2;
}
static Schema createMap(int keyId, Schema keySchema,
int valueId, Schema valueSchema) {
String keyValueName = "k" + keyId + "_v" + valueId;
Schema.Field keyField = new Schema.Field("key", keySchema, null, (Object) null);
keyField.addProp(FIELD_ID_PROP, keyId);
Schema.Field valueField = new Schema.Field("value", valueSchema, null,
isOptionSchema(valueSchema) ? JsonProperties.NULL_VALUE : null);
valueField.addProp(FIELD_ID_PROP, valueId);
return LogicalMap.get().addToSchema(Schema.createArray(Schema.createRecord(
keyValueName, null, null, false, ImmutableList.of(keyField, valueField))));
}
static Schema createProjectionMap(String recordName,
int keyId, String keyName, Schema keySchema,
int valueId, String valueName, Schema valueSchema) {
String keyValueName = "k" + keyId + "_v" + valueId;
Schema.Field keyField = new Schema.Field("key", keySchema, null, (Object) null);
if (!"key".equals(keyName)) {
keyField.addAlias(keyName);
}
keyField.addProp(FIELD_ID_PROP, keyId);
Schema.Field valueField = new Schema.Field("value", valueSchema, null,
isOptionSchema(valueSchema) ? JsonProperties.NULL_VALUE : null);
valueField.addProp(FIELD_ID_PROP, valueId);
if (!"value".equals(valueName)) {
valueField.addAlias(valueName);
}
Schema keyValueRecord = Schema.createRecord(
keyValueName, null, null, false, ImmutableList.of(keyField, valueField));
if (!keyValueName.equals(recordName)) {
keyValueRecord.addAlias(recordName);
}
return LogicalMap.get().addToSchema(Schema.createArray(keyValueRecord));
}
private static Integer getId(Schema schema, String propertyName) {
Integer id = getId(schema, propertyName, null, null);
Preconditions.checkNotNull(id, "Missing expected '%s' property", propertyName);
return id;
}
private static Integer getId(Schema schema, String propertyName, NameMapping nameMapping, List<String> names) {
if (schema.getType() == UNION) {
return getId(fromOption(schema), propertyName, nameMapping, names);
}
Object id = schema.getObjectProp(propertyName);
if (id != null) {
return toInt(id);
} else if (nameMapping != null) {
MappedField mappedField = nameMapping.find(names);
if (mappedField != null) {
return mappedField.id();
}
}
return null;
}
static boolean hasProperty(Schema schema, String propertyName) {
if (schema.getType() == UNION) {
return hasProperty(fromOption(schema), propertyName);
}
return schema.getObjectProp(propertyName) != null;
}
public static int getKeyId(Schema schema) {
Preconditions.checkArgument(schema.getType() == MAP,
"Cannot get map key id for non-map schema: %s", schema);
return getId(schema, KEY_ID_PROP);
}
static Integer getKeyId(Schema schema, NameMapping nameMapping, Iterable<String> parentFieldNames) {
Preconditions.checkArgument(schema.getType() == MAP,
"Cannot get map key id for non-map schema: %s", schema);
List<String> names = Lists.newArrayList(parentFieldNames);
names.add("key");
return getId(schema, KEY_ID_PROP, nameMapping, names);
}
public static int getValueId(Schema schema) {
Preconditions.checkArgument(schema.getType() == MAP,
"Cannot get map value id for non-map schema: %s", schema);
return getId(schema, VALUE_ID_PROP);
}
static Integer getValueId(Schema schema, NameMapping nameMapping, Iterable<String> parentFieldNames) {
Preconditions.checkArgument(schema.getType() == MAP,
"Cannot get map value id for non-map schema: %s", schema);
List<String> names = Lists.newArrayList(parentFieldNames);
names.add("value");
return getId(schema, VALUE_ID_PROP, nameMapping, names);
}
public static int getElementId(Schema schema) {
Preconditions.checkArgument(schema.getType() == ARRAY,
"Cannot get array element id for non-array schema: %s", schema);
return getId(schema, ELEMENT_ID_PROP);
}
static Integer getElementId(Schema schema, NameMapping nameMapping, Iterable<String> parentFieldNames) {
Preconditions.checkArgument(schema.getType() == ARRAY,
"Cannot get array element id for non-array schema: %s", schema);
List<String> names = Lists.newArrayList(parentFieldNames);
names.add("element");
return getId(schema, ELEMENT_ID_PROP, nameMapping, names);
}
public static int getFieldId(Schema.Field field) {
Integer id = getFieldId(field, null, null);
Preconditions.checkNotNull(id, "Missing expected '%s' property", FIELD_ID_PROP);
return id;
}
static Integer getFieldId(Schema.Field field, NameMapping nameMapping, Iterable<String> parentFieldNames) {
Object id = field.getObjectProp(FIELD_ID_PROP);
if (id != null) {
return toInt(id);
} else if (nameMapping != null) {
List<String> names = Lists.newArrayList(parentFieldNames);
names.add(field.name());
MappedField mappedField = nameMapping.find(names);
if (mappedField != null) {
return mappedField.id();
}
}
return null;
}
public static boolean hasFieldId(Schema.Field field) {
return field.getObjectProp(FIELD_ID_PROP) != null;
}
private static int toInt(Object value) {
if (value instanceof Number) {
return ((Number) value).intValue();
} else if (value instanceof String) {
return Integer.parseInt((String) value);
}
throw new UnsupportedOperationException("Cannot coerce value to int: " + value);
}
static Schema copyRecord(Schema record, List<Schema.Field> newFields, String newName) {
Schema copy;
if (newName != null) {
copy = Schema.createRecord(newName, record.getDoc(), null, record.isError(), newFields);
// the namespace is defaulted to the record's namespace if it is null, which causes renames
// without the namespace to fail. using "" instead of null changes this behavior to match the
// original schema.
copy.addAlias(record.getName(), record.getNamespace() == null ? "" : record.getNamespace());
} else {
copy = Schema.createRecord(record.getName(),
record.getDoc(), record.getNamespace(), record.isError(), newFields);
}
for (Map.Entry<String, Object> prop : record.getObjectProps().entrySet()) {
copy.addProp(prop.getKey(), prop.getValue());
}
return copy;
}
static Schema.Field copyField(Schema.Field field, Schema newSchema, String newName) {
Schema.Field copy = new Schema.Field(newName,
newSchema, field.doc(), field.defaultVal(), field.order());
for (Map.Entry<String, Object> prop : field.getObjectProps().entrySet()) {
copy.addProp(prop.getKey(), prop.getValue());
}
if (!newName.equals(field.name())) {
copy.addAlias(field.name());
}
return copy;
}
public static String makeCompatibleName(String name) {
if (!validAvroName(name)) {
return sanitize(name);
}
return name;
}
static boolean validAvroName(String name) {
int length = name.length();
Preconditions.checkArgument(length > 0, "Empty name");
char first = name.charAt(0);
if (!(Character.isLetter(first) || first == '_')) {
return false;
}
for (int i = 1; i < length; i++) {
char character = name.charAt(i);
if (!(Character.isLetterOrDigit(character) || character == '_')) {
return false;
}
}
return true;
}
static String sanitize(String name) {
int length = name.length();
StringBuilder sb = new StringBuilder(name.length());
char first = name.charAt(0);
if (!(Character.isLetter(first) || first == '_')) {
sb.append(sanitize(first));
} else {
sb.append(first);
}
for (int i = 1; i < length; i++) {
char character = name.charAt(i);
if (!(Character.isLetterOrDigit(character) || character == '_')) {
sb.append(sanitize(character));
} else {
sb.append(character);
}
}
return sb.toString();
}
private static String sanitize(char character) {
if (Character.isDigit(character)) {
return "_" + character;
}
return "_x" + Integer.toHexString(character).toUpperCase();
}
}
| 1 | 16,319 | Can we do this without a breaking API change? What about adding a different name to convert directly to a Schema? | apache-iceberg | java |
@@ -1,7 +1,8 @@
'use strict';
-var MongoClient = require('../../').MongoClient,
- expect = require('chai').expect;
+const Promise = require('bluebird');
+const MongoClient = require('../../').MongoClient;
+const expect = require('chai').expect;
function connectToDb(url, db, options, callback) {
if (typeof options === 'function') { | 1 | 'use strict';
var MongoClient = require('../../').MongoClient,
expect = require('chai').expect;
function connectToDb(url, db, options, callback) {
if (typeof options === 'function') {
callback = options;
options = {};
}
MongoClient.connect(url, options || {}, function(err, client) {
if (err) return callback(err);
callback(null, client.db(db), client);
});
}
function setupDatabase(configuration, dbsToClean) {
dbsToClean = Array.isArray(dbsToClean) ? dbsToClean : [];
var configDbName = configuration.db;
var client = configuration.newClient(configuration.writeConcernMax(), {
poolSize: 1
});
dbsToClean.push(configDbName);
return client.connect().then(function() {
var cleanPromises = [];
dbsToClean.forEach(function(dbName) {
var cleanPromise = client
.db(dbName)
.command({
dropAllUsersFromDatabase: 1,
writeConcern: { w: 1 }
})
.then(function() {
return client.db(dbName).dropDatabase();
});
cleanPromises.push(cleanPromise);
});
return Promise.all(cleanPromises);
});
}
var assert = {
equal: function(a, b) {
expect(a).to.equal(b);
},
deepEqual: function(a, b) {
expect(a).to.eql(b);
},
strictEqual: function(a, b) {
expect(a).to.eql(b);
},
notEqual: function(a, b) {
expect(a).to.not.equal(b);
},
ok: function(a) {
expect(a).to.be.ok;
},
throws: function(func) {
expect(func).to.throw;
}
};
var delay = function(timeout) {
return new Promise(function(resolve) {
setTimeout(function() {
resolve();
}, timeout);
});
};
module.exports = {
connectToDb: connectToDb,
setupDatabase: setupDatabase,
assert: assert,
delay: delay
};
| 1 | 14,229 | Why do we need to pull bluebird in here? | mongodb-node-mongodb-native | js |
@@ -121,9 +121,10 @@ public class FlowPreparer extends AbstractFlowPreparer {
(flowPrepCompletionTime - criticalSectionStartTime) / 1000,
flow.getExecutionId(), execDir.getPath());
} catch (final Exception ex) {
- FileIOUtils.deleteDirectorySilently(tempDir);
LOGGER.error("Error in preparing flow execution {}", flow.getExecutionId(), ex);
throw new ExecutorManagerException(ex);
+ } finally {
+ FileIOUtils.deleteDirectorySilently(tempDir);
}
}
| 1 | /*
* Copyright 2017 LinkedIn Corp.
*
* Licensed under the Apache License, Version 2.0 (the "License"); you may not
* use this file except in compliance with the License. You may obtain a copy of
* the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
* WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
* License for the specific language governing permissions and limitations under
* the License.
*
*/
package azkaban.execapp;
import azkaban.execapp.metric.ProjectCacheHitRatio;
import azkaban.executor.ExecutableFlow;
import azkaban.executor.ExecutorManagerException;
import azkaban.storage.ProjectStorageManager;
import azkaban.utils.DependencyTransferManager;
import azkaban.utils.FileIOUtils;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Preconditions;
import java.io.File;
import java.io.IOException;
import java.nio.file.Files;
import java.nio.file.Path;
import java.nio.file.Paths;
import java.nio.file.attribute.FileTime;
import java.util.Optional;
import org.slf4j.Logger;
import org.slf4j.LoggerFactory;
/**
* FlowPreparer implementation for ExecutorServer implementation. It enables project cache for
* optimal performance.
*/
public class FlowPreparer extends AbstractFlowPreparer {
private static final Logger LOGGER = LoggerFactory.getLogger(FlowPreparer.class);
// TODO spyne: move to config class
private final File executionsDir;
// TODO spyne: move to config class
private final File projectCacheDir;
// Null if cache clean-up is disabled
private final Optional<ProjectCacheCleaner> projectCacheCleaner;
private final ProjectCacheHitRatio projectCacheHitRatio;
FlowPreparer(final ProjectStorageManager projectStorageManager,
final DependencyTransferManager dependencyTransferManager, final File projectsDir,
final ProjectCacheCleaner cleaner, final ProjectCacheHitRatio projectCacheHitRatio,
final File executionsDir) {
super(projectStorageManager, dependencyTransferManager);
Preconditions.checkNotNull(executionsDir);
Preconditions.checkNotNull(projectsDir);
Preconditions.checkNotNull(projectCacheHitRatio);
Preconditions.checkArgument(projectsDir.exists());
Preconditions.checkArgument(executionsDir.exists());
this.executionsDir = executionsDir;
this.projectCacheDir = projectsDir;
this.projectCacheCleaner = Optional.ofNullable(cleaner);
this.projectCacheHitRatio = projectCacheHitRatio;
}
/**
* Prepare the flow directory for execution.
*
* @param flow Executable Flow instance.
*/
@Override
public void setup(final ExecutableFlow flow) throws ExecutorManagerException {
File tempDir = null;
try {
final ProjectDirectoryMetadata project = new ProjectDirectoryMetadata(
flow.getProjectId(),
flow.getVersion());
final long flowPrepStartTime = System.currentTimeMillis();
tempDir = downloadProjectIfNotExists(project, flow.getExecutionId());
LOGGER.info("Project is setup for execution {}", flow.getExecutionId());
// With synchronization, only one thread is allowed to proceed to avoid complicated race
// conditions which could arise when multiple threads are downloading/deleting/hard-linking
// the same project. But it doesn't prevent multiple executor processes interfering with each
// other triggering race conditions. So it's important to operationally make sure that only
// one executor process is setting up flow execution against the shared project directory.
long criticalSectionStartTime = -1;
File execDir = null;
synchronized (this) {
LOGGER.info("Setting up execution dir for {}", flow.getExecutionId());
criticalSectionStartTime = System.currentTimeMillis();
if (!project.getInstalledDir().exists() && tempDir != null) {
// If new project is downloaded and project dir cache clean-up feature is enabled, then
// perform clean-up if size of all project dirs exceeds the cache size.
this.projectCacheCleaner.ifPresent(cacheCleaner -> cacheCleaner
.deleteProjectDirsIfNecessary(project.getDirSizeInByte()));
// Rename temp dir to a proper project directory name.
Files.move(tempDir.toPath(), project.getInstalledDir().toPath());
}
final long start = System.currentTimeMillis();
execDir = setupExecutionDir(project.getInstalledDir().toPath(), flow);
final long end = System.currentTimeMillis();
LOGGER.info("Setting up execution dir {} took {} sec(s)", execDir, (end - start) / 1000);
}
final long flowPrepCompletionTime = System.currentTimeMillis();
LOGGER.info("Flow preparation completed in {} sec(s), out of which {} sec(s) was spent inside "
+ "critical section. [execid: {}, path: {}]",
(flowPrepCompletionTime - flowPrepStartTime) / 1000,
(flowPrepCompletionTime - criticalSectionStartTime) / 1000,
flow.getExecutionId(), execDir.getPath());
} catch (final Exception ex) {
FileIOUtils.deleteDirectorySilently(tempDir);
LOGGER.error("Error in preparing flow execution {}", flow.getExecutionId(), ex);
throw new ExecutorManagerException(ex);
}
}
@Override
protected File setupExecutionDir(final Path dir, final ExecutableFlow flow)
throws ExecutorManagerException {
File execDir = null;
try {
execDir = createExecDir(flow);
// Create hardlinks from the project
FileIOUtils.createDeepHardlink(dir.toFile(), execDir);
return execDir;
} catch (final Exception ex) {
FileIOUtils.deleteDirectorySilently(execDir);
throw new ExecutorManagerException(ex);
}
}
/**
* Update last modified time of the file if it exists.
*
* @param path path to the target file
*/
@VisibleForTesting
void updateLastModifiedTime(final Path path) {
try {
Files.setLastModifiedTime(path, FileTime.fromMillis(System.currentTimeMillis()));
} catch (final IOException ex) {
LOGGER.warn("Error when updating last modified time for {}", path, ex);
}
}
/**
* @return the project directory name of a project
*/
private String generateProjectDirName(final ProjectDirectoryMetadata proj) {
return String.valueOf(proj.getProjectId()) + "." + String.valueOf(proj.getVersion());
}
private File createTempDir(final ProjectDirectoryMetadata proj) {
final String projectDir = generateProjectDirName(proj);
final File tempDir = new File(this.projectCacheDir,
"_temp." + projectDir + "." + System.currentTimeMillis());
tempDir.mkdirs();
return tempDir;
}
/**
* Download project zip and unzip it if not exists locally.
*
* @param proj project to download
* @param execId execution id number
* @return the temp dir where the new project is downloaded to, null if no project is downloaded.
* @throws IOException if downloading or unzipping fails.
*/
@VisibleForTesting
File downloadProjectIfNotExists(final ProjectDirectoryMetadata proj, final int execId)
throws IOException {
final String projectDir = generateProjectDirName(proj);
if (proj.getInstalledDir() == null) {
proj.setInstalledDir(new File(this.projectCacheDir, projectDir));
}
// If directory exists, assume it's prepared and skip.
if (proj.getInstalledDir().exists()) {
LOGGER.info("Project {} already cached. Skipping download. ExecId: {}", proj, execId);
// Hit the local cache.
this.projectCacheHitRatio.markHit();
// Update last modified time of the file keeping project dir size when the project is
// accessed. This last modified time will be used to determined least recently used
// projects when performing project directory clean-up.
updateLastModifiedTime(
Paths.get(proj.getInstalledDir().getPath(), PROJECT_DIR_SIZE_FILE_NAME));
return null;
}
this.projectCacheHitRatio.markMiss();
// Download project to a temp dir if not exists in local cache.
final File tempDir = createTempDir(proj);
downloadAndUnzipProject(proj, execId, tempDir);
return tempDir;
}
private File createExecDir(final ExecutableFlow flow) {
final int execId = flow.getExecutionId();
final File execDir = new File(this.executionsDir, String.valueOf(execId));
flow.setExecutionPath(execDir.getPath());
execDir.mkdirs();
return execDir;
}
public void shutdown() {
if (projectCacheCleaner.isPresent()) {
this.projectCacheCleaner.get().shutdown();
}
}
}
| 1 | 22,120 | If *two* threads successfully download the same project into two different temp dirs, and one of them successfully renames one to its final destination, the `if`on line 102 will be false _in the other thread_, and it would leave its temp dir behind. | azkaban-azkaban | java |
@@ -37,6 +37,7 @@ struct pool_entry_t {
h2o_socket_export_t sockinfo;
h2o_socketpool_target_t *target;
h2o_linklist_t link;
+ h2o_linklist_t target_link;
uint64_t added_at;
};
| 1 | /*
* Copyright (c) 2014-2016 DeNA Co., Ltd., Kazuho Oku
*
* Permission is hereby granted, free of charge, to any person obtaining a copy
* of this software and associated documentation files (the "Software"), to
* deal in the Software without restriction, including without limitation the
* rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
* sell copies of the Software, and to permit persons to whom the Software is
* furnished to do so, subject to the following conditions:
*
* The above copyright notice and this permission notice shall be included in
* all copies or substantial portions of the Software.
*
* THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
* IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
* FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
* AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
* LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
* FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
* IN THE SOFTWARE.
*/
#include <assert.h>
#include <errno.h>
#include <netdb.h>
#include <stdlib.h>
#include <sys/socket.h>
#include <sys/types.h>
#include <sys/un.h>
#include <netinet/in.h>
#include "h2o/hostinfo.h"
#include "h2o/linklist.h"
#include "h2o/socketpool.h"
#include "h2o/string_.h"
#include "h2o/timeout.h"
struct pool_entry_t {
h2o_socket_export_t sockinfo;
h2o_socketpool_target_t *target;
h2o_linklist_t link;
uint64_t added_at;
};
struct st_h2o_socketpool_connect_request_t {
void *data;
h2o_socketpool_connect_cb cb;
h2o_socketpool_t *pool;
h2o_loop_t *loop;
h2o_hostinfo_getaddr_req_t *getaddr_req;
h2o_socket_t *sock;
struct {
h2o_multithread_receiver_t *getaddr_receiver;
h2o_socketpool_target_vector_t *targets;
size_t selected;
int *tried;
size_t try_count;
} lb;
};
struct on_close_data_t {
h2o_socketpool_t *pool;
h2o_socketpool_target_t *target;
};
struct round_robin_t {
size_t next_pos;
pthread_mutex_t mutex;
};
static void start_connect(h2o_socketpool_connect_request_t *req, struct sockaddr *addr, socklen_t addrlen);
static void on_getaddr(h2o_hostinfo_getaddr_req_t *getaddr_req, const char *errstr, struct addrinfo *res, void *_req);
static void destroy_detached(struct pool_entry_t *entry)
{
h2o_socket_dispose_export(&entry->sockinfo);
free(entry);
}
static void destroy_attached(struct pool_entry_t *entry)
{
h2o_linklist_unlink(&entry->link);
destroy_detached(entry);
}
static void destroy_expired(h2o_socketpool_t *pool)
{
/* caller should lock the mutex */
uint64_t expire_before = h2o_now(pool->_interval_cb.loop) - pool->timeout;
while (!h2o_linklist_is_empty(&pool->_shared.sockets)) {
struct pool_entry_t *entry = H2O_STRUCT_FROM_MEMBER(struct pool_entry_t, link, pool->_shared.sockets.next);
if (entry->added_at > expire_before)
break;
destroy_attached(entry);
__sync_sub_and_fetch(&pool->_shared.count, 1);
}
}
static void on_timeout(h2o_timeout_entry_t *timeout_entry)
{
/* FIXME decrease the frequency of this function being called; the expiration
* check can be (should be) performed in the `connect` fuction as well
*/
h2o_socketpool_t *pool = H2O_STRUCT_FROM_MEMBER(h2o_socketpool_t, _interval_cb.entry, timeout_entry);
if (pthread_mutex_trylock(&pool->_shared.mutex) == 0) {
destroy_expired(pool);
pthread_mutex_unlock(&pool->_shared.mutex);
}
h2o_timeout_link(pool->_interval_cb.loop, &pool->_interval_cb.timeout, &pool->_interval_cb.entry);
}
static void common_init(h2o_socketpool_t *pool, h2o_socketpool_target_vector_t targets, size_t capacity,
h2o_socketpool_lb_initializer lb_init, h2o_socketpool_lb_selector lb_selector,
h2o_socketpool_lb_dispose_cb lb_dispose)
{
memset(pool, 0, sizeof(*pool));
pool->capacity = capacity;
pool->timeout = UINT64_MAX;
pthread_mutex_init(&pool->_shared.mutex, NULL);
h2o_linklist_init_anchor(&pool->_shared.sockets);
memcpy(&pool->targets, &targets, sizeof(targets));
lb_init(&pool->targets, &pool->_lb.data);
pool->_lb.selector = lb_selector;
pool->_lb.dispose = lb_dispose;
}
static void lb_rr_init(h2o_socketpool_target_vector_t *targets, void **data)
{
struct round_robin_t *self = h2o_mem_alloc(sizeof(*self));
self->next_pos = 0;
pthread_mutex_init(&self->mutex, NULL);
*data = self;
}
static size_t lb_rr_selector(h2o_socketpool_target_vector_t *targets, void *_data, int *tried)
{
size_t i;
size_t result = 0;
struct round_robin_t *self = _data;
pthread_mutex_lock(&self->mutex);
for (i = 0; i < targets->size; i++) {
if (!tried[self->next_pos]) {
result = self->next_pos;
break;
}
self->next_pos++;
self->next_pos %= targets->size;
}
assert(i < targets->size);
self->next_pos++;
self->next_pos %= targets->size;
pthread_mutex_unlock(&self->mutex);
return result;
}
static void lb_rr_dispose(void *data)
{
struct round_robin_t *self = data;
pthread_mutex_destroy(&self->mutex);
free(data);
}
void h2o_socketpool_init_target_by_address(h2o_socketpool_target_t *target, struct sockaddr *sa, socklen_t salen, int is_ssl,
h2o_url_t *url)
{
char host[NI_MAXHOST];
size_t host_len;
assert(salen <= sizeof(target->peer.sockaddr.bytes));
if ((host_len = h2o_socket_getnumerichost(sa, salen, host)) == SIZE_MAX) {
if (sa->sa_family != AF_UNIX)
h2o_fatal("failed to convert a non-unix socket address to a numerical representation");
/* use the sockaddr_un::sun_path as the SNI indicator (is that the right thing to do?) */
strcpy(host, ((struct sockaddr_un *)sa)->sun_path);
host_len = strlen(host);
}
target->is_ssl = is_ssl;
target->type = H2O_SOCKETPOOL_TYPE_SOCKADDR;
target->peer.host = h2o_strdup(NULL, host, host_len);
memcpy(&target->peer.sockaddr.bytes, sa, salen);
target->peer.sockaddr.len = salen;
if (url != NULL) {
target->url = h2o_mem_alloc(sizeof(*target->url));
h2o_url_copy(NULL, target->url, url);
} else {
target->url = NULL;
}
}
void h2o_socketpool_init_by_address(h2o_socketpool_t *pool, struct sockaddr *sa, socklen_t salen, int is_ssl, size_t capacity)
{
h2o_socketpool_target_vector_t targets = {};
h2o_vector_reserve(NULL, &targets, 1);
h2o_socketpool_init_target_by_address(&targets.entries[0], sa, salen, is_ssl, NULL);
targets.size = 1;
common_init(pool, targets, capacity, lb_rr_init, lb_rr_selector, lb_rr_dispose);
}
void h2o_socketpool_init_target_by_hostport(h2o_socketpool_target_t *target, h2o_iovec_t host, uint16_t port, int is_ssl,
h2o_url_t *url)
{
struct sockaddr_in sin;
memset(&sin, 0, sizeof(sin));
if (h2o_hostinfo_aton(host, &sin.sin_addr) == 0) {
sin.sin_family = AF_INET;
sin.sin_port = htons(port);
h2o_socketpool_init_target_by_address(target, (void *)&sin, sizeof(sin), is_ssl, url);
return;
}
target->is_ssl = is_ssl;
target->type = H2O_SOCKETPOOL_TYPE_NAMED;
target->peer.host = h2o_strdup(NULL, host.base, host.len);
target->peer.named_serv.base = h2o_mem_alloc(sizeof(H2O_UINT16_LONGEST_STR));
target->peer.named_serv.len = sprintf(target->peer.named_serv.base, "%u", (unsigned)port);
if (url != NULL) {
target->url = h2o_mem_alloc(sizeof(*target->url));
h2o_url_copy(NULL, target->url, url);
} else {
target->url = NULL;
}
}
void h2o_socketpool_init_by_hostport(h2o_socketpool_t *pool, h2o_iovec_t host, uint16_t port, int is_ssl, size_t capacity)
{
h2o_socketpool_target_vector_t targets = {};
h2o_vector_reserve(NULL, &targets, 1);
h2o_socketpool_init_target_by_hostport(&targets.entries[0], host, port, is_ssl, NULL);
targets.size = 1;
common_init(pool, targets, capacity, lb_rr_init, lb_rr_selector, lb_rr_dispose);
}
void h2o_socketpool_init_by_targets(h2o_socketpool_t *pool, h2o_socketpool_target_vector_t targets, size_t capacity)
{
assert(targets.size > 0);
common_init(pool, targets, capacity, lb_rr_init, lb_rr_selector, lb_rr_dispose);
}
void h2o_socketpool_dispose(h2o_socketpool_t *pool)
{
size_t i;
pthread_mutex_lock(&pool->_shared.mutex);
while (!h2o_linklist_is_empty(&pool->_shared.sockets)) {
struct pool_entry_t *entry = H2O_STRUCT_FROM_MEMBER(struct pool_entry_t, link, pool->_shared.sockets.next);
destroy_attached(entry);
__sync_sub_and_fetch(&pool->_shared.count, 1);
}
pthread_mutex_unlock(&pool->_shared.mutex);
pthread_mutex_destroy(&pool->_shared.mutex);
pool->_lb.dispose(pool->_lb.data);
if (pool->_interval_cb.loop != NULL) {
h2o_timeout_unlink(&pool->_interval_cb.entry);
h2o_timeout_dispose(pool->_interval_cb.loop, &pool->_interval_cb.timeout);
}
for (i = 0; i < pool->targets.size; i++) {
h2o_socketpool_target_t *target = &pool->targets.entries[i];
free(target->peer.host.base);
switch (target->type) {
case H2O_SOCKETPOOL_TYPE_NAMED:
free(target->peer.named_serv.base);
break;
case H2O_SOCKETPOOL_TYPE_SOCKADDR:
break;
}
if (target->url != NULL) {
free(target->url->authority.base);
free(target->url->host.base);
free(target->url->path.base);
free(target->url);
}
}
free(pool->targets.entries);
}
void h2o_socketpool_set_timeout(h2o_socketpool_t *pool, h2o_loop_t *loop, uint64_t msec)
{
pool->timeout = msec;
pool->_interval_cb.loop = loop;
h2o_timeout_init(loop, &pool->_interval_cb.timeout, 1000);
pool->_interval_cb.entry.cb = on_timeout;
h2o_timeout_link(loop, &pool->_interval_cb.timeout, &pool->_interval_cb.entry);
}
static void call_connect_cb(h2o_socketpool_connect_request_t *req, const char *errstr)
{
h2o_socketpool_connect_cb cb = req->cb;
h2o_socket_t *sock = req->sock;
void *data = req->data;
h2o_socketpool_target_vector_t *targets = req->lb.targets;
size_t selected = req->lb.selected;
free(req->lb.tried);
free(req);
cb(sock, errstr, data, &targets->entries[selected]);
}
static void try_connect(h2o_socketpool_connect_request_t *req)
{
h2o_socketpool_target_t *target;
req->lb.selected = req->pool->_lb.selector(&req->pool->targets, req->pool->_lb.data, req->lb.tried);
assert(!req->lb.tried[req->lb.selected]);
req->lb.try_count++;
req->lb.tried[req->lb.selected] = 1;
target = &req->pool->targets.entries[req->lb.selected];
switch (target->type) {
case H2O_SOCKETPOOL_TYPE_NAMED:
/* resolve the name, and connect */
req->getaddr_req = h2o_hostinfo_getaddr(req->lb.getaddr_receiver, target->peer.host, target->peer.named_serv, AF_UNSPEC,
SOCK_STREAM, IPPROTO_TCP, AI_ADDRCONFIG | AI_NUMERICSERV, on_getaddr, req);
break;
case H2O_SOCKETPOOL_TYPE_SOCKADDR:
/* connect (using sockaddr_in) */
start_connect(req, (void *)&target->peer.sockaddr.bytes, target->peer.sockaddr.len);
break;
}
}
static void on_connect(h2o_socket_t *sock, const char *err)
{
h2o_socketpool_connect_request_t *req = sock->data;
const char *errstr = NULL;
assert(req->sock == sock);
if (err != NULL) {
h2o_socket_close(sock);
if (req->lb.try_count == req->lb.targets->size) {
req->sock = NULL;
errstr = "connection failed";
} else {
try_connect(req);
return;
}
}
call_connect_cb(req, errstr);
}
static void on_close(void *data)
{
struct on_close_data_t *close_data = data;
h2o_socketpool_t *pool = close_data->pool;
free(close_data);
__sync_sub_and_fetch(&pool->_shared.count, 1);
}
static void start_connect(h2o_socketpool_connect_request_t *req, struct sockaddr *addr, socklen_t addrlen)
{
struct on_close_data_t *close_data;
req->sock = h2o_socket_connect(req->loop, addr, addrlen, on_connect);
if (req->sock == NULL) {
__sync_sub_and_fetch(&req->pool->_shared.count, 1);
call_connect_cb(req, "failed to connect to host");
return;
}
close_data = h2o_mem_alloc(sizeof(*close_data));
close_data->pool = req->pool;
close_data->target = &req->lb.targets->entries[req->lb.selected];
req->sock->data = req;
req->sock->on_close.cb = on_close;
req->sock->on_close.data = close_data;
}
static void on_getaddr(h2o_hostinfo_getaddr_req_t *getaddr_req, const char *errstr, struct addrinfo *res, void *_req)
{
h2o_socketpool_connect_request_t *req = _req;
assert(getaddr_req == req->getaddr_req);
req->getaddr_req = NULL;
if (errstr != NULL) {
__sync_sub_and_fetch(&req->pool->_shared.count, 1);
call_connect_cb(req, errstr);
return;
}
struct addrinfo *selected = h2o_hostinfo_select_one(res);
start_connect(req, selected->ai_addr, selected->ai_addrlen);
}
void h2o_socketpool_connect(h2o_socketpool_connect_request_t **_req, h2o_socketpool_t *pool, h2o_loop_t *loop,
h2o_multithread_receiver_t *getaddr_receiver, h2o_socketpool_connect_cb cb, void *data)
{
struct pool_entry_t *entry = NULL;
struct on_close_data_t *close_data;
if (_req != NULL)
*_req = NULL;
/* fetch an entry and return it */
pthread_mutex_lock(&pool->_shared.mutex);
destroy_expired(pool);
while (1) {
if (h2o_linklist_is_empty(&pool->_shared.sockets))
break;
entry = H2O_STRUCT_FROM_MEMBER(struct pool_entry_t, link, pool->_shared.sockets.next);
h2o_linklist_unlink(&entry->link);
pthread_mutex_unlock(&pool->_shared.mutex);
/* test if the connection is still alive */
char buf[1];
ssize_t rret = recv(entry->sockinfo.fd, buf, 1, MSG_PEEK);
if (rret == -1 && (errno == EAGAIN || errno == EWOULDBLOCK)) {
/* yes! return it */
h2o_socketpool_target_t *target = entry->target;
h2o_socket_t *sock = h2o_socket_import(loop, &entry->sockinfo);
free(entry);
close_data = h2o_mem_alloc(sizeof(*close_data));
close_data->pool = pool;
close_data->target = target;
sock->on_close.cb = on_close;
sock->on_close.data = close_data;
cb(sock, NULL, data, target);
return;
}
/* connection is dead, report, close, and retry */
if (rret <= 0) {
static long counter = 0;
if (__sync_fetch_and_add(&counter, 1) == 0)
fprintf(stderr, "[WARN] detected close by upstream before the expected timeout (see issue #679)\n");
} else {
static long counter = 0;
if (__sync_fetch_and_add(&counter, 1) == 0)
fprintf(stderr, "[WARN] unexpectedly received data to a pooled socket (see issue #679)\n");
}
destroy_detached(entry);
pthread_mutex_lock(&pool->_shared.mutex);
}
pthread_mutex_unlock(&pool->_shared.mutex);
/* FIXME repsect `capacity` */
__sync_add_and_fetch(&pool->_shared.count, 1);
/* prepare request object */
h2o_socketpool_connect_request_t *req = h2o_mem_alloc(sizeof(*req));
*req = (h2o_socketpool_connect_request_t){data, cb, pool, loop};
if (_req != NULL)
*_req = req;
assert(pool->targets.size != 0);
req->lb.getaddr_receiver = getaddr_receiver;
req->lb.targets = &pool->targets;
req->lb.tried = h2o_mem_alloc(sizeof(int) * pool->targets.size);
memset(req->lb.tried, 0, sizeof(int) * pool->targets.size);
req->lb.selected = 0;
req->lb.try_count = 0;
try_connect(req);
}
void h2o_socketpool_cancel_connect(h2o_socketpool_connect_request_t *req)
{
if (req->getaddr_req != NULL) {
h2o_hostinfo_getaddr_cancel(req->getaddr_req);
req->getaddr_req = NULL;
}
if (req->sock != NULL)
h2o_socket_close(req->sock);
free(req->lb.tried);
free(req);
}
int h2o_socketpool_return(h2o_socketpool_t *pool, h2o_socket_t *sock)
{
struct pool_entry_t *entry;
struct on_close_data_t *close_data;
h2o_socketpool_target_t *target;
close_data = sock->on_close.data;
target = close_data->target;
/* reset the on_close callback */
assert(close_data->pool == pool);
free(close_data);
sock->on_close.cb = NULL;
sock->on_close.data = NULL;
entry = h2o_mem_alloc(sizeof(*entry));
if (h2o_socket_export(sock, &entry->sockinfo) != 0) {
free(entry);
__sync_sub_and_fetch(&pool->_shared.count, 1);
return -1;
}
memset(&entry->link, 0, sizeof(entry->link));
entry->added_at = h2o_now(h2o_socket_get_loop(sock));
entry->target = target;
pthread_mutex_lock(&pool->_shared.mutex);
destroy_expired(pool);
h2o_linklist_insert(&pool->_shared.sockets, &entry->link);
pthread_mutex_unlock(&pool->_shared.mutex);
return 0;
}
| 1 | 12,523 | I would appreciate it if you could rename `link` to `all_link` so that the roles of the two links (the other is `target_link`) become clearer. | h2o-h2o | c |
@@ -826,10 +826,16 @@ class InitialDevelopmentTests(unittest.TestCase):
"Testing testing ",
speech.PitchCommand(offset=100),
"1 2 3 4",
- smi.create_ConfigProfileTriggerCommand(t1, True, expectedToBecomeIndex=1),
- smi.create_ConfigProfileTriggerCommand(t2, True, expectedToBecomeIndex=2),
+ ExpectedIndex(1),
+ # The preceeding index is expected,
+ # as the following profile trigger commands will cause the utterance to be split here.
+ ConfigProfileTriggerCommand(t1, True),
+ ConfigProfileTriggerCommand(t2, True),
"5 6 7 8",
- smi.create_ConfigProfileTriggerCommand(t1, False, expectedToBecomeIndex=3),
+ ExpectedIndex(2),
+ # The preceeding index is expected,
+ # as the following profile trigger commands will cause the utterance to be split here.
+ ConfigProfileTriggerCommand(t1, False),
"9 10 11 12"
]
with smi.expectation(): | 1 | # A part of NonVisual Desktop Access (NVDA)
# This file is covered by the GNU General Public License.
# See the file COPYING for more details.
# Copyright (C) 2020 NV Access Limited
"""Unit tests for speech/manager module
"""
import unittest
import config
import speech
from unittest import mock
from unittest.mock import (
patch,
)
import speech.manager
from speech.commands import (
PitchCommand,
ConfigProfileTriggerCommand,
_CancellableSpeechCommand,
)
from .speechManagerTestHarness import (
_IndexT,
ExpectedIndex,
ExpectedProsody,
SpeechManagerInteractions,
)
#: Enable logging used to aid in the development of unit
# Hard coding this to True where it is defined makes creating new unit tests easier, since all interactions
# can be tracked.
speech.manager.IS_UNIT_TEST_LOG_ENABLED = True
_isIndexABeforeIndexB = speech.manager.SpeechManager._isIndexABeforeIndexB
MAX_INDEX = speech.manager.SpeechManager.MAX_INDEX
WRAPPED_INDEX_MAGNITUDE = speech.manager.SpeechManager._WRAPPED_INDEX_MAGNITUDE
class TestSpeechIndexComparison(unittest.TestCase):
"""Speech indexes wrap around after a limit which must be considered when comparing relative positions of
indexes
"""
def testSimple_before(self):
self.assertTrue(_isIndexABeforeIndexB(2, 3))
def testSimple_equal(self):
self.assertFalse(_isIndexABeforeIndexB(2, 2))
def testSimple_after(self):
self.assertFalse(_isIndexABeforeIndexB(3, 2))
def testWrapped_beforeAfter(self):
lastIndex = MAX_INDEX # last index
firstIndex: _IndexT = 1 # first index after wrapping.
self.assertTrue(_isIndexABeforeIndexB(lastIndex, firstIndex))
self.assertFalse(_isIndexABeforeIndexB(firstIndex, lastIndex))
def testExtents_relativeToFirstIndex(self):
"""How far ahead of the first index can b get before a is no longer considered before
"""
firstIndex: _IndexT = 1
boundaryAfterFirstIndex = WRAPPED_INDEX_MAGNITUDE + 1
boundaryBeforeFirstIndex = boundaryAfterFirstIndex + 1
# Relative to firstIndex
self.assertTrue(_isIndexABeforeIndexB(firstIndex, boundaryAfterFirstIndex))
self.assertFalse(_isIndexABeforeIndexB(firstIndex, boundaryBeforeFirstIndex))
# And ensure symmetry
self.assertTrue(_isIndexABeforeIndexB(boundaryBeforeFirstIndex, firstIndex))
self.assertFalse(_isIndexABeforeIndexB(boundaryAfterFirstIndex, firstIndex))
def testExtents_relativeToLastIndex(self):
"""How far ahead of the "last index" can b get before a is no longer considered before
"""
lastIndex = MAX_INDEX
boundaryAfterLastIndex = WRAPPED_INDEX_MAGNITUDE
boundaryBeforeLastIndex = boundaryAfterLastIndex + 1
self.assertTrue(_isIndexABeforeIndexB(lastIndex, boundaryAfterLastIndex))
self.assertFalse(_isIndexABeforeIndexB(lastIndex, boundaryBeforeLastIndex))
# And ensure symmetry
self.assertFalse(_isIndexABeforeIndexB(boundaryAfterLastIndex, lastIndex))
self.assertTrue(_isIndexABeforeIndexB(boundaryBeforeLastIndex, lastIndex))
def testExtents_relativeToMidPoint(self):
""" Despite the largest possible index being odd, there is an even number of possible indexes since
zero is not used as an index. For a given A value there will be a B value (in a space of 9998
values) that is equal, leaving 9997 values which can not be split evenly into before / after.
"""
lastIndex = MAX_INDEX
firstIndex: _IndexT = 1
self.assertTrue(_isIndexABeforeIndexB(firstIndex, WRAPPED_INDEX_MAGNITUDE))
self.assertTrue(_isIndexABeforeIndexB(WRAPPED_INDEX_MAGNITUDE + 1, lastIndex))
# And ensure symmetry
self.assertFalse(_isIndexABeforeIndexB(lastIndex, WRAPPED_INDEX_MAGNITUDE + 1))
self.assertFalse(_isIndexABeforeIndexB(WRAPPED_INDEX_MAGNITUDE, firstIndex))
def testAllValuesHaveSymmetry(self):
"""Test all B values for a given A value.
Confirm the number of equivalent, A-before, B-before, and assert no B values are both before and after!
"""
stationary: _IndexT = WRAPPED_INDEX_MAGNITUDE
stationaryBeforeCount = 0
movingBeforeCount = 0
bothBefore = [] # a before b and b before a should never happen
indexesWithEquivalence = []
for moving in range(1, MAX_INDEX):
isStationaryBefore = _isIndexABeforeIndexB(stationary, moving)
isMovingBefore = _isIndexABeforeIndexB(moving, stationary)
if isMovingBefore and isStationaryBefore:
bothBefore.append((stationary, moving))
elif isStationaryBefore:
stationaryBeforeCount = stationaryBeforeCount + 1
elif isMovingBefore:
movingBeforeCount = movingBeforeCount + 1
else:
indexesWithEquivalence.append((stationary, moving))
# There should only be one pair with equivalence (the equal pair)
self.assertEqual(
len(indexesWithEquivalence), 1,
msg=f"Indexes with neither true: {indexesWithEquivalence!r}"
)
# Ensure equivalent indexes really are equal
self.assertEqual(indexesWithEquivalence[0][0], indexesWithEquivalence[0][1])
# None should be: A < B < A
self.assertEqual(
len(bothBefore), 0,
msg=f"Indexes with both true: {bothBefore!r}"
)
# Check that the number of B values before and after is as expected.
self.assertAlmostEqual(
stationaryBeforeCount,
movingBeforeCount,
delta=1 # Odd number of available indexes since 0 is excluded and one pair is equivalent.
)
class TestExpectedClasses(unittest.TestCase):
def test_expectedProsodyEqualsMatching(self):
p = speech.PitchCommand(offset=2)
e = ExpectedProsody(speech.PitchCommand(offset=2))
self.assertEqual(p, e)
def test_expectedProsodyNotMatching(self):
p = speech.PitchCommand(offset=2)
e = ExpectedProsody(speech.PitchCommand(offset=5))
self.assertNotEqual(p, e)
class CancellableSpeechTests(unittest.TestCase):
"""Tests behaviour related to CancellableSpeech"""
def setUp(self):
import speechDictHandler
speechDictHandler.initialize() # setting the synth depends on dictionary["voice"]
config.conf['featureFlag']['cancelExpiredFocusSpeech'] = 1 # yes
def test_validSpeechSpoken(self):
"""Tests the outcome when speech stays valid. It should not be cancelled."""
smi = SpeechManagerInteractions(self)
with smi.expectation():
text = "This stays valid"
smi.speak([text, _CancellableSpeechCommand(lambda: True)])
smi.expect_synthSpeak(sequence=[text, smi.create_ExpectedIndex(expectedToBecomeIndex=1)])
def test_invalidSpeechNotSpoken(self):
"""Ensure that invalid speech is not sent to the synth"""
smi = SpeechManagerInteractions(self)
with smi.expectation():
text = "This stays invalid"
smi.speak([text, _CancellableSpeechCommand(lambda: False)])
def test_invalidated_indexHit(self):
"""Hitting an index should cause a cancellation of speech that has become
invalid after being sent to the synth.
For particularly long utterances with many indexes, the speech can be
stopped sooner.
"""
smi = SpeechManagerInteractions(self)
isSpeechNumberValid = {}
def _checkIfValid(speechNumber):
return isSpeechNumberValid.get(speechNumber, False)
with smi.expectation():
initiallyValidSequence = [
"text 1",
smi.create_CallBackCommand(expectedToBecomeIndex=1),
"text 2",
smi.create_ExpectedIndex(expectedToBecomeIndex=2),
_CancellableSpeechCommand(lambda: _checkIfValid(1)),
]
isSpeechNumberValid[1] = True # initially valid
smi.speak(initiallyValidSequence)
smi.expect_synthSpeak(sequence=initiallyValidSequence[:4])
with smi.expectation():
isSpeechNumberValid[1] = False # becomes invalid
smi.indexReached(1)
smi.expect_indexReachedCallback(forIndex=1)
smi.pumpAll()
smi.expect_synthCancel()
def test_invalidated_newSpeech(self):
"""New speech should cause a cancellation of speech that has become
invalid after being sent to the synth.
Ensure that new speech can be started as soon as possible to reduce
the stammering of the synth.
"""
smi = SpeechManagerInteractions(self)
isSpeechNumberValid = {}
def _checkIfValid(speechNumber):
return isSpeechNumberValid.get(speechNumber, False)
with smi.expectation():
initiallyValidSequence = [
"text 1",
smi.create_CallBackCommand(expectedToBecomeIndex=1),
"text 2",
smi.create_ExpectedIndex(expectedToBecomeIndex=2),
_CancellableSpeechCommand(lambda: _checkIfValid(1)),
]
isSpeechNumberValid[1] = True # initially valid
smi.speak(initiallyValidSequence)
smi.expect_synthSpeak(sequence=initiallyValidSequence[:4])
isSpeechNumberValid[1] = False
with smi.expectation():
newSequence = [
"text 3",
smi.create_CallBackCommand(expectedToBecomeIndex=3),
"text 4",
smi.create_ExpectedIndex(expectedToBecomeIndex=4),
]
smi.speak(newSequence)
smi.expect_synthCancel()
smi.expect_synthSpeak(sequence=newSequence[:4])
def test_invalidated_newSpeechWithCancellable(self):
"""New speech that contains a cancellableSpeechCommand should cause a
cancellation of speech that has become invalid after being sent to the
synth.
Similar to test_invalidated_newSpeech, but ensure that the
cancellableSpeechCommand does not affect the behaviour.
"""
smi = SpeechManagerInteractions(self)
isSpeechNumberValid = {}
def _checkIfValid(speechNumber):
return isSpeechNumberValid.get(speechNumber, False)
with smi.expectation():
initiallyValidSequence = [
"text 1",
smi.create_CallBackCommand(expectedToBecomeIndex=1),
"text 2",
smi.create_ExpectedIndex(expectedToBecomeIndex=2),
_CancellableSpeechCommand(lambda: _checkIfValid(1)),
]
isSpeechNumberValid[1] = True # initially valid
smi.speak(initiallyValidSequence)
smi.expect_synthSpeak(sequence=initiallyValidSequence[:4])
isSpeechNumberValid[1] = False
isSpeechNumberValid[2] = True
with smi.expectation():
newValidSequence = [
"text 3",
smi.create_CallBackCommand(expectedToBecomeIndex=3),
"text 4",
smi.create_ExpectedIndex(expectedToBecomeIndex=4),
_CancellableSpeechCommand(lambda: _checkIfValid(2)),
]
smi.speak(newValidSequence)
smi.expect_synthCancel()
smi.expect_synthSpeak(sequence=newValidSequence[:4])
def test_validSpeechAfterInvalid(self):
"""Tests that calling speak with invalid speech will not lock up the SpeechManager.
Under certain circumstances this resulted in NVDA going silent when more speech was in the queue.
"""
smi = SpeechManagerInteractions(self)
with smi.expectation():
smi.speak([
"Stays invalid",
_CancellableSpeechCommand(lambda: False),
smi.create_ExpectedIndex(expectedToBecomeIndex=1)
])
with smi.expectation():
smi.speak(["Stays valid", _CancellableSpeechCommand(lambda: True)])
smi.expect_synthSpeak(sequence=[
"Stays valid", smi.create_ExpectedIndex(expectedToBecomeIndex=2)
])
class SayAllEmulatedTests(unittest.TestCase):
"""Tests for speechManager operations."""
def setUp(self):
import speechDictHandler
speechDictHandler.initialize() # setting the synth depends on dictionary["voice"]
config.conf['featureFlag']['cancelExpiredFocusSpeech'] = 2 # no
def test_simpleSpeech(self):
smi = SpeechManagerInteractions(self)
with smi.expectation():
seqIndex = smi.speak(["hello ", "world", ExpectedIndex(1)])
smi.expect_synthSpeak(seqIndex)
def test_moreSpeechOnIndexReached(self):
smi = SpeechManagerInteractions(self)
with smi.expectation():
seqIndex1 = smi.speak(["hello ", "world", ExpectedIndex(1)])
seqIndex2 = smi.speak(["goodBye ", "earth", ExpectedIndex(2)])
smi.expect_synthSpeak(seqIndex1)
with smi.expectation():
smi.indexReached(1)
smi.pumpAll()
smi.expect_synthSpeak(seqIndex2)
def test_standardSayAll(self):
"""See #11144: Mimic behaviour that caused repeated speech during say-all - Full example.
"""
smi = SpeechManagerInteractions(self)
callBack = smi.create_CallBackCommand
expectIndex = smi.create_ExpectedIndex
# First sayAllHandler queues up a number of utterances.
with smi.expectation():
seqNum = smi.speak([
callBack(expectedToBecomeIndex=1),
'sequence 0 ',
callBack(expectedToBecomeIndex=2)
])
self.assertEqual(seqNum, 0)
# Speech manager is expected to get started immediately
smi.expect_synthSpeak(seqNum)
seqNum = smi.speak([
'sequence 1 before call back ',
callBack(expectedToBecomeIndex=3),
'sequence 1 after call back ',
callBack(expectedToBecomeIndex=4)
])
self.assertEqual(seqNum, 1)
# Synth has already reached the first index, callback shouldn't be called until
# pump all is called.
smi.indexReached(1) # start of sequenceNumber 0
seqNum = smi.speak([
'sequence 2 ',
callBack(expectedToBecomeIndex=5)
])
self.assertEqual(seqNum, 2)
seqNum = smi.speak([
# for some reason say-all handler does not give this sequence callback commands
'sequence 3 ',
expectIndex(expectedToBecomeIndex=6)
])
self.assertEqual(seqNum, 3)
with smi.expectation():
smi.pumpAll()
# no side effect, sayAllHandler does not send more speech until the final callback index is hit.
smi.expect_indexReachedCallback(forIndex=1, sideEffect=None)
with smi.expectation():
smi.indexReached(2) # end of sequenceNumber 0
smi.pumpAll()
smi.expect_indexReachedCallback(forIndex=2, sideEffect=None)
smi.expect_synthSpeak(1) # new speech expected after reaching endOfUtterance
with smi.expectation():
smi.indexReached(3) # in sequenceNumber 1
smi.pumpAll()
smi.expect_indexReachedCallback(forIndex=3, sideEffect=None)
# Don't expect new speech to be sent to the synth, index is not at the end of the utterance.
with smi.expectation():
smi.indexReached(4) # in sequenceNumber 1
smi.pumpAll()
smi.expect_indexReachedCallback(forIndex=4, sideEffect=None)
smi.expect_synthSpeak(2)
with smi.expectation():
smi.indexReached(5) # in sequenceNumber 2
# Only 1 pending speech sequence from say all, this results in new speech being sent during the
# callback which triggers the duplicate speech.
smi.pumpAllAndSendSpeechOnCallback(
expectCallbackForIndex=5,
expectedSendSequenceNumber=4,
seq=[
callBack(expectedToBecomeIndex=7),
'sequence 4 ', expectIndex(expectedToBecomeIndex=8)
]
)
# Now ensure there is no double speaking!
smi.expect_synthSpeak(3)
smi.indexReached(6) # in sequenceNumber 4, does not have a callback
with smi.expectation():
smi.pumpAll() # No callback, so no handle index call
smi.expect_synthSpeak(4)
def test_speechNotRepeated(self):
"""Minimal test for #11144: Repeated speech during say-all.
Ensure that adding new speech during a callback does not result in double speaking.
"""
smi = SpeechManagerInteractions(self)
callBack = smi.create_CallBackCommand
expectIndex = smi.create_ExpectedIndex
with smi.expectation():
seq0 = smi.speak(['sequence 0 ', callBack(expectedToBecomeIndex=1)])
seq1 = smi.speak(['sequence 1 ', callBack(expectedToBecomeIndex=2)])
smi.expect_synthSpeak(seq0)
with smi.expectation():
smi.indexReached(1) # in sequenceNumber 2
# Only 1 pending speech sequence from say all, this results in new speech being sent during the
# callback which triggers the duplicate speech.
smi.pumpAllAndSendSpeechOnCallback(
expectCallbackForIndex=1,
expectedSendSequenceNumber=2,
seq=[
callBack(expectedToBecomeIndex=3),
'sequence 2 ', expectIndex(expectedToBecomeIndex=4)
]
)
# Now ensure there is no double speaking!
smi.expect_synthSpeak(seq1)
class SayAllEmulatedTests_withCancellableSpeechEnabled(SayAllEmulatedTests):
"""Note, while cancellable speech is configurable test with and without it enabled."""
def setUp(self):
super().setUp()
config.conf['featureFlag']['cancelExpiredFocusSpeech'] = 1 # yes
class InitialDevelopmentTests(unittest.TestCase):
"""These tests were run manually during the initial development of speechManager.
See PR #7599 for source of tests.
Test numbers match order of tests defined on original PR, however they are grouped in this file based on
the features they test.
Manual test steps are kept in unit tests doc string, they can be run in the NVDA python console after
the following imports:
import sayAllHandler, appModuleHandler
"""
def setUp(self):
import speechDictHandler
speechDictHandler.initialize() # setting the synth depends on dictionary["voice"]
config.conf['featureFlag']['cancelExpiredFocusSpeech'] = 2 # no
@patch.object(speech.commands.WaveFileCommand, 'run')
@patch.object(speech.commands.BeepCommand, 'run')
def test_1(self, mock_BeepCommand_run, mock_WaveFileCommand_run):
r"""Text, beep, beep, sound, text.
Manual Test (in NVDA python console):
wx.CallLater(500, speech.speak, [
u"This is some speech and then comes a", speech.BeepCommand(440, 10),
u"beep. If you liked that, let's ", speech.BeepCommand(880, 10),
u"beep again. I'll speak the rest of this in a ", speech.PitchCommand(offset=50),
u"higher pitch. And for the finale, let's ",
speech.WaveFileCommand(r"waves\browseMode.wav"), u"play a sound."
])
"""
smi = SpeechManagerInteractions(self)
smi.addMockCallMonitoring([mock_BeepCommand_run, mock_WaveFileCommand_run])
_beepCommand = smi.create_BeepCommand
_waveFileCommand = smi.create_WaveFileCommand
sequence = [
"This is some speech and then comes a",
_beepCommand(440, 10, expectedToBecomeIndex=1),
"beep. If you liked that, let's ",
_beepCommand(880, 10, expectedToBecomeIndex=2),
"beep again. I'll speak the rest of this in a ",
speech.PitchCommand(offset=50),
"higher pitch. And for the finale, let's ",
_waveFileCommand(r"waves\browseMode.wav", expectedToBecomeIndex=3),
"play a sound.",
smi.create_ExpectedIndex(expectedToBecomeIndex=4)
]
with smi.expectation():
smi.speak(sequence)
smi.expect_synthSpeak(0)
for i in range(1, 5):
with smi.expectation():
smi.indexReached(i)
smi.pumpAll()
if i in [1, 2, ]:
smi.expect_mockCall(mock_BeepCommand_run)
if i in [3, ]:
smi.expect_mockCall(mock_WaveFileCommand_run)
def test2(self):
"""Text, end utterance, text.
Manual Test (in NVDA python console):
wx.CallLater(500, speech.speak, [
u"This is the first utterance", speech.EndUtteranceCommand(), u"And this is the second"
])
"""
smi = SpeechManagerInteractions(self)
sequence = [
u"This is the first utterance",
smi.create_EndUtteranceCommand(expectedToBecomeIndex=1),
u"And this is the second",
smi.create_ExpectedIndex(expectedToBecomeIndex=2),
]
with smi.expectation():
smi.speak(sequence)
smi.expect_synthSpeak(0)
with smi.expectation():
smi.indexReached(1)
smi.pumpAll()
smi.expect_synthSpeak(1)
def test3(self):
"""Change pitch, text, end utterance, text.
wx.CallLater(500, speech.speak, [
speech.PitchCommand(offset=50),
u"This is the first utterance in a higher pitch",
speech.EndUtteranceCommand(), u"And this is the second"
])
Expected: All should be higher pitch.
"""
smi = SpeechManagerInteractions(self)
sequence = [
speech.PitchCommand(offset=50),
u"This is the first utterance in a higher pitch",
# EndUtterance effectively splits the sequence, two sequence numbers are returned
# from smi.speak for ease of adding expectations.
smi.create_EndUtteranceCommand(expectedToBecomeIndex=1),
smi.create_ExpectedProsodyCommand(speech.PitchCommand(offset=50)),
u"And this is the second",
smi.create_ExpectedIndex(expectedToBecomeIndex=2),
]
with smi.expectation():
smi.speak(sequence)
smi.expect_synthSpeak(0)
with smi.expectation():
smi.indexReached(1)
smi.pumpAll()
smi.expect_synthSpeak(1)
def test_6_SPRI(self):
"""Two utterances at SPRI_NORMAL in same sequence. Two separate sequences at SPRI_NEXT.
Manual Test (in NVDA python console):
wx.CallLater(500, speech.speak, [
u"1 2 3 ", u"4 5", speech.EndUtteranceCommand(), u"16 17 18 19 20"
])
wx.CallLater(510, speech.speak, [u"6 7 8 9 10"], priority=speech.SPRI_NEXT)
wx.CallLater(520, speech.speak, [u"11 12 13 14 15"], priority=speech.SPRI_NEXT)
Expected result: numbers in order from 1 to 20.
"""
smi = SpeechManagerInteractions(self)
with smi.expectation():
first, last = smi.speak([
"1 2 3 ", "4 5",
# EndUtterance effectively splits the sequence, two sequence numbers are returned
# for ease of adding expectations.
smi.create_EndUtteranceCommand(expectedToBecomeIndex=1),
"16 17 18 19 20",
smi.create_ExpectedIndex(expectedToBecomeIndex=2),
])
smi.expect_synthSpeak(first)
interrupt1 = smi.speak(priority=speech.Spri.NEXT, seq=[
"6 7 8 9 10",
smi.create_ExpectedIndex(expectedToBecomeIndex=3)
])
interrupt2 = smi.speak(priority=speech.Spri.NEXT, seq=[
"11 12 13 14 15",
smi.create_ExpectedIndex(expectedToBecomeIndex=4)
])
with smi.expectation():
smi.indexReached(1) # endUtterance
smi.pumpAll()
smi.expect_synthSpeak(interrupt1)
with smi.expectation():
smi.indexReached(3) # end of interrupt1
smi.pumpAll()
smi.expect_synthSpeak(interrupt2)
with smi.expectation():
smi.indexReached(4) # end of interrupt2
smi.pumpAll()
smi.expect_synthSpeak(last)
fin = smi.speak(["finally", smi.create_ExpectedIndex(expectedToBecomeIndex=5)])
with smi.expectation():
# End of second utterance in first speak call.
# Note, indexes can be out of order because there are in different queues!
smi.indexReached(2)
smi.pumpAll()
smi.expect_synthSpeak(fin)
@patch.object(speech.commands.BeepCommand, 'run')
def test_7_SPRI(self, mock_BeepCommand_run):
"""Utterance at SPRI_NORMAL including a beep. Utterance at SPRI_NOW.
Manual Test (in NVDA python console):
wx.CallLater(500, speech.speak, [
u"Text before the beep ", speech.BeepCommand(440, 10),
u"text after the beep, text, text, text, text"
])
wx.CallLater(1500, speech.speak, [u"This is an interruption"], priority=speech.SPRI_NOW)
Expected:
Text before the beep, beep, Text after..., This is an interruption., Text after the beep, text...
"""
smi = SpeechManagerInteractions(self)
smi.addMockCallMonitoring(mock_BeepCommand_run)
_beepCommand = smi.create_BeepCommand
toBeInterrupted = [
"Text before the beep ",
_beepCommand(440, 10, expectedToBecomeIndex=1),
"text after the beep, text, text, text, text",
smi.create_ExpectedIndex(expectedToBecomeIndex=2)
]
postInterruption = toBeInterrupted[2:]
with smi.expectation():
first = smi.speak(toBeInterrupted)
smi.expect_synthSpeak(first)
with smi.expectation():
smi.indexReached(1)
smi.pumpAll()
smi.expect_mockCall(mock_BeepCommand_run)
with smi.expectation():
interrupt = smi.speak(
priority=speech.Spri.NOW,
seq=["This is an interruption", smi.create_ExpectedIndex(expectedToBecomeIndex=3)]
)
smi.expect_synthCancel()
smi.expect_synthSpeak(interrupt)
with smi.expectation():
smi.indexReached(3)
smi.pumpAll()
smi.expect_synthSpeak(
sequence=postInterruption
)
def test_8_SPRI(self):
"""Utterance with two sequences at SPRI_NOW. Utterance at SPRI_NOW.
Manual Test (in NVDA python console):
wx.CallLater(500, speech.speak, [u"First ", u"utterance"], priority=speech.SPRI_NOW)
wx.CallLater(510, speech.speak, [u"Second ", u"utterance"], priority=speech.SPRI_NOW)
Expected result: First utterance, second utterance
"""
smi = SpeechManagerInteractions(self)
with smi.expectation():
first = smi.speak(priority=speech.Spri.NOW, seq=[
"First ", "utterance", smi.create_ExpectedIndex(expectedToBecomeIndex=1)
])
smi.expect_synthSpeak(first)
smi.expect_synthCancel()
with smi.expectation():
second = smi.speak(priority=speech.Spri.NOW, seq=[
"Second ", "utterance", smi.create_ExpectedIndex(expectedToBecomeIndex=2)
])
with smi.expectation():
smi.indexReached(1)
smi.pumpAll()
smi.expect_synthSpeak(second)
def test_9_SPRI(self):
"""Utterance with two sequences at SPRI_NOW. Utterance at SPRI_NEXT.
Manual Test (in NVDA python console):
wx.CallLater(500, speech.speak, [u"First ", u"utterance"], priority=speech.SPRI_NOW)
wx.CallLater(501, speech.speak, [u"Second ", u"utterance"], priority=speech.SPRI_NEXT)
Expected result: First utterance, second utterance
"""
smi = SpeechManagerInteractions(self)
with smi.expectation():
first = smi.speak(priority=speech.Spri.NOW, seq=[
"First ", "utterance", smi.create_ExpectedIndex(expectedToBecomeIndex=1)
])
smi.expect_synthSpeak(first)
smi.expect_synthCancel()
with smi.expectation():
second = smi.speak(priority=speech.Spri.NEXT, seq=[
"Second ", "utterance", smi.create_ExpectedIndex(expectedToBecomeIndex=2)
])
with smi.expectation():
smi.indexReached(1)
smi.pumpAll()
smi.expect_synthSpeak(second)
@patch.object(speech.commands.BeepCommand, 'run')
def test_13_SPRI_interruptBeforeIndexReached(self, mock_BeepCommand_run):
"""The same as the other test_13, but the first index is not reached before the interruption.
In this cases speech manager is expected to finish the first utterance before interrupting.
"""
smi = SpeechManagerInteractions(self)
smi.addMockCallMonitoring([mock_BeepCommand_run])
firstSeq = [
PitchCommand(offset=100),
"Text before the beep ",
smi.create_BeepCommand(440, 10, expectedToBecomeIndex=1),
"text after the beep, text, text, text, text",
smi.create_ExpectedIndex(expectedToBecomeIndex=2),
]
with smi.expectation():
first = smi.speak(priority=speech.Spri.NORMAL, seq=firstSeq)
smi.expect_synthSpeak(first)
with smi.expectation():
interrupt = smi.speak(
priority=speech.Spri.NOW,
seq=[
"This is an interruption",
smi.create_ExpectedIndex(expectedToBecomeIndex=3)
]
)
smi.expect_synthSpeak(interrupt)
smi.expect_synthCancel()
with smi.expectation():
# the first sequence was canceled, after reaching the end of the interruption sequence expect to return
# to the first sequence. Note that it speaks the whole first utterance again.
smi.indexReached(3)
smi.pumpAll()
smi.expect_synthSpeak(first)
@patch.object(speech.commands.BeepCommand, 'run')
def test_13_SPRI_interruptAfterIndexReached(self, mock_BeepCommand_run):
"""Utterance at SPRI_NORMAL including a pitch change and beep. Utterance at SPRI_NOW.
Manual Test (in NVDA python console):
wx.CallLater(500, speech.speak, [
speech.PitchCommand(offset=100),
u"Text before the beep ",
speech.BeepCommand(440, 10),
u"text after the beep, text, text, text, text"
])
wx.CallLater(1500, speech.speak, [u"This is an interruption"], priority=speech.SPRI_NOW)
Expected: Text speaks with higher pitch, beep, text gets interrupted,
interruption speaks with normal pitch, text after the beep speaks again with higher pitch
"""
smi = SpeechManagerInteractions(self)
smi.addMockCallMonitoring([mock_BeepCommand_run])
firstSeq = [
PitchCommand(offset=100),
"Text before the beep ",
smi.create_BeepCommand(440, 10, expectedToBecomeIndex=1),
"text after the beep, text, text, text, text",
smi.create_ExpectedIndex(expectedToBecomeIndex=2),
]
with smi.expectation():
first = smi.speak(priority=speech.Spri.NORMAL, seq=firstSeq)
smi.expect_synthSpeak(first)
with smi.expectation():
smi.indexReached(1)
smi.pumpAll()
smi.expect_mockCall(mock_BeepCommand_run)
with smi.expectation():
interrupt = smi.speak(priority=speech.Spri.NOW, seq=[
"This is an interruption",
smi.create_ExpectedIndex(expectedToBecomeIndex=3)
])
smi.expect_synthSpeak(interrupt)
smi.expect_synthCancel()
with smi.expectation():
smi.indexReached(3)
smi.pumpAll()
resume = [
smi.create_ExpectedProsodyCommand(firstSeq[0]),
*firstSeq[3:]
]
smi.expect_synthSpeak(sequence=resume)
class FakeProfileTrigger(config.ProfileTrigger):
spec = "fakeProfileTriggerSpec"
def __init__(self, name):
self.spec = f"{name} - {self.spec}"
self.enter = mock.Mock(name=f"enter {self.spec}")
self.exit = mock.Mock(name=f"exit {self.spec}")
@property
def hasProfile(self):
return True
def test_4_profiles(self):
"""Text, pitch, text, enter profile1, enter profile2, text, exit profile1, text.
Manual Test (in NVDA python console):
import sayAllHandler, appModuleHandler
t1 = sayAllHandler.SayAllProfileTrigger()
t2 = appModuleHandler.AppProfileTrigger("notepad")
wx.CallLater(500, speech.speak, [
u"Testing testing ", speech.PitchCommand(offset=100), "1 2 3 4",
speech.ConfigProfileTriggerCommand(t1, True), speech.ConfigProfileTriggerCommand(t2, True),
u"5 6 7 8", speech.ConfigProfileTriggerCommand(t1, False), u"9 10 11 12"
])
Expected:
All text after 1 2 3 4 should be higher pitch.
5 6 7 8 should have profile 1 and 2.
9 10 11 12 should be just profile 2.
"""
t1 = InitialDevelopmentTests.FakeProfileTrigger("t1")
t2 = InitialDevelopmentTests.FakeProfileTrigger("t2")
smi = SpeechManagerInteractions(self)
smi.addMockCallMonitoring([t1.enter, t1.exit, t2.enter, t2.exit])
seq = [
"Testing testing ",
speech.PitchCommand(offset=100),
"1 2 3 4",
smi.create_ConfigProfileTriggerCommand(t1, True, expectedToBecomeIndex=1),
smi.create_ConfigProfileTriggerCommand(t2, True, expectedToBecomeIndex=2),
"5 6 7 8",
smi.create_ConfigProfileTriggerCommand(t1, False, expectedToBecomeIndex=3),
"9 10 11 12"
]
with smi.expectation():
smi.speak(seq)
smi.expect_synthSpeak(sequence=seq[:4])
with smi.expectation():
smi.indexReached(1)
smi.pumpAll()
with smi.expectation():
smi.doneSpeaking()
smi.pumpAll()
smi.expect_synthCancel()
smi.expect_synthSpeak(sequence=[
seq[1], # PitchCommand
seq[4], # IndexCommand index=2 (derived from ConfigProfileTriggerCommand)
])
smi.expect_mockCall(t1.enter)
with smi.expectation():
smi.indexReached(2)
smi.pumpAll()
with smi.expectation():
smi.doneSpeaking()
smi.pumpAll()
smi.expect_synthCancel()
smi.expect_synthSpeak(sequence=[
seq[1], # PitchCommand
'5 6 7 8',
seq[6], # IndexCommand index=3 (derived from ConfigProfileTriggerCommand)
])
smi.expect_mockCall(t2.enter)
with smi.expectation():
smi.indexReached(3)
smi.pumpAll()
with smi.expectation():
smi.doneSpeaking()
smi.pumpAll()
smi.expect_synthCancel()
smi.expect_synthSpeak(sequence=[
seq[1], # PitchCommand
'9 10 11 12',
smi.create_ExpectedIndex(expectedToBecomeIndex=4)
])
smi.expect_mockCall(t1.exit)
with smi.expectation():
smi.indexReached(4)
smi.pumpAll()
with smi.expectation():
smi.doneSpeaking()
smi.pumpAll()
smi.expect_synthCancel()
smi.expect_mockCall(t2.exit)
def test_5_profiles(self):
"""Enter profile, text, exit profile.
Manual Test (in NVDA python console):
import sayAllHandler
trigger = sayAllHandler.SayAllProfileTrigger()
wx.CallLater(500, speech.speak, [
speech.ConfigProfileTriggerCommand(trigger, True), u"5 6 7 8",
speech.ConfigProfileTriggerCommand(trigger, False),
u"9 10 11 12"
])
Expected: 5 6 7 8 in different profile, 9 10 11 12 with base config.
"""
t1 = InitialDevelopmentTests.FakeProfileTrigger("t1")
smi = SpeechManagerInteractions(self)
smi.addMockCallMonitoring([t1.enter, t1.exit])
seq = [
ConfigProfileTriggerCommand(t1, True),
"5 6 7 8",
smi.create_ConfigProfileTriggerCommand(t1, False, expectedToBecomeIndex=1),
"9 10 11 12",
]
with smi.expectation():
smi.speak(seq)
smi.expect_synthSpeak(sequence=seq[1:3])
smi.expect_synthCancel()
smi.expect_mockCall(t1.enter)
with smi.expectation():
smi.indexReached(1)
smi.pumpAll()
with smi.expectation():
smi.doneSpeaking()
smi.pumpAll()
smi.expect_synthSpeak(sequence=['9 10 11 12', smi.create_ExpectedIndex(expectedToBecomeIndex=2)])
smi.expect_synthCancel()
smi.expect_mockCall(t1.exit)
with smi.expectation():
smi.indexReached(2)
smi.pumpAll()
with smi.expectation():
smi.doneSpeaking()
smi.pumpAll()
def test_10_SPRI_profiles(self):
"""Utterance at SPRI_NORMAL. Utterance at SPRI_NOW with profile switch.
Manual Test (in NVDA python console):
import sayAllHandler;
trigger = sayAllHandler.SayAllProfileTrigger();
wx.CallLater(500, speech.speak, [
speech.ConfigProfileTriggerCommand(trigger, True),
u"This is a normal utterance with a different profile"
])
wx.CallLater(1000, speech.speak, [u"This is an interruption"], priority=speech.SPRI_NOW)
Expected: Normal speaks but gets interrupted, interruption with different profile, normal speaks again
"""
t1 = InitialDevelopmentTests.FakeProfileTrigger("t1")
smi = SpeechManagerInteractions(self)
smi.addMockCallMonitoring([t1.enter, t1.exit])
with smi.expectation():
first = smi.speak([
"This is a normal utterance, text, text,",
smi.create_ExpectedIndex(expectedToBecomeIndex=1)
])
smi.expect_synthSpeak(first)
# before the first utterance can finish, it is interrupted.
with smi.expectation():
interrupt = [
speech.ConfigProfileTriggerCommand(t1, True),
"This is an interruption with a different profile",
smi.create_ExpectedIndex(expectedToBecomeIndex=2)
]
smi.speak(priority=speech.Spri.NOW, seq=interrupt)
smi.expect_synthCancel() # twice ??
smi.expect_synthCancel()
smi.expect_mockCall(t1.enter)
smi.expect_synthSpeak(sequence=interrupt[1:])
with smi.expectation():
smi.indexReached(2)
smi.pumpAll()
with smi.expectation():
smi.doneSpeaking()
smi.pumpAll()
smi.expect_synthCancel()
smi.expect_synthSpeak(first)
smi.expect_mockCall(t1.exit)
def test_11_SPRI_Profile(self):
"""Utterance at SPRI_NORMAL with profile switch. Utterance at SPRI_NOW.
Manual Test (in NVDA python console):
import sayAllHandler
trigger = sayAllHandler.SayAllProfileTrigger()
wx.CallLater(500, speech.speak, [
speech.ConfigProfileTriggerCommand(trigger, True),
u"This is a normal utterance with a different profile"
])
wx.CallLater(1000, speech.speak, [u"This is an interruption"], priority=speech.SPRI_NOW)
Expected:
Normal speaks with different profile but gets interrupted, interruption speaks with base config,
normal speaks again with different profile
"""
t1 = InitialDevelopmentTests.FakeProfileTrigger("t1")
smi = SpeechManagerInteractions(self)
smi.addMockCallMonitoring([t1.enter, t1.exit])
with smi.expectation():
first = [
speech.ConfigProfileTriggerCommand(t1, True),
"This is a normal utterance with a different profile",
smi.create_ExpectedIndex(expectedToBecomeIndex=1)
]
smi.speak(first)
smi.expect_synthSpeak(sequence=first[1:])
smi.expect_mockCall(t1.enter)
smi.expect_synthCancel()
# Before the first index is reached, there is an interruption
with smi.expectation():
interrupt = [
"This is an interruption",
smi.create_ExpectedIndex(expectedToBecomeIndex=2)
]
interruptIndex = smi.speak(priority=speech.Spri.NOW, seq=interrupt)
smi.expect_synthCancel() # 2 calls ??
smi.expect_synthCancel()
smi.expect_synthSpeak(interruptIndex)
smi.expect_mockCall(t1.exit)
# Reach the end of the interruption speech sequence
with smi.expectation():
smi.indexReached(2)
smi.pumpAll()
# Once done speaking, expect to return to the lower priority speech, including swapping back to the
# initial profile trigger.
with smi.expectation():
smi.doneSpeaking()
smi.pumpAll()
smi.expect_synthCancel()
smi.expect_synthSpeak(sequence=first[1:])
smi.expect_mockCall(t1.enter)
# Reach the end of the lower priority (initial) speech
with smi.expectation():
smi.indexReached(1)
smi.pumpAll()
with smi.expectation():
smi.doneSpeaking()
smi.pumpAll()
smi.expect_synthCancel()
smi.expect_mockCall(t1.exit)
def test_12_SPRI_profile(self):
"""Utterance at SPRI_NORMAL with profile 1. Utterance at SPRI_NOW with profile 2.
Manual Test (in NVDA python console):
import sayAllHandler, appModuleHandler
t1 = sayAllHandler.SayAllProfileTrigger()
t2 = appModuleHandler.AppProfileTrigger("notepad")
wx.CallLater(500, speech.speak, [
speech.ConfigProfileTriggerCommand(t1, True),
u"This is a normal utterance with profile 1"
])
wx.CallLater(1000, speech.speak, [
speech.ConfigProfileTriggerCommand(t2, True),
u"This is an interruption with profile 2"
], priority=speech.SPRI_NOW)
Expected: Normal speaks with profile 1 but gets interrupted, interruption speaks with profile 2,
normal speaks again with profile 1
"""
t1 = InitialDevelopmentTests.FakeProfileTrigger("t1")
t2 = InitialDevelopmentTests.FakeProfileTrigger("t2")
smi = SpeechManagerInteractions(self)
smi.addMockCallMonitoring([t1.enter, t1.exit, t2.enter, t2.exit])
with smi.expectation():
first = [
speech.ConfigProfileTriggerCommand(t1, True),
"This is a normal utterance with profile 1",
smi.create_ExpectedIndex(expectedToBecomeIndex=1)
]
smi.speak(first)
smi.expect_synthSpeak(sequence=first[1:])
smi.expect_mockCall(t1.enter)
smi.expect_synthCancel()
# Before the first index is reached, there is an interruption
with smi.expectation():
interrupt = [
speech.ConfigProfileTriggerCommand(t2, True),
"This is an interruption with profile 2",
smi.create_ExpectedIndex(expectedToBecomeIndex=2)
]
smi.speak(priority=speech.Spri.NOW, seq=interrupt)
smi.expect_synthCancel() # 3 calls ??
smi.expect_synthCancel()
smi.expect_synthCancel()
smi.expect_synthSpeak(sequence=interrupt[1:])
smi.expect_mockCall(t1.exit)
smi.expect_mockCall(t2.enter)
# Reach the end of the interruption speech sequence
with smi.expectation():
smi.indexReached(2)
smi.pumpAll()
# Once done speaking, expect to return to the lower priority speech, including swapping back to the
# initial profile trigger.
with smi.expectation():
smi.doneSpeaking()
smi.pumpAll()
smi.expect_synthCancel()
smi.expect_synthCancel()
smi.expect_synthSpeak(sequence=first[1:])
smi.expect_mockCall(t2.exit)
smi.expect_mockCall(t1.enter)
# Reach the end of the lower priority (initial) speech
with smi.expectation():
smi.indexReached(1)
smi.pumpAll()
with smi.expectation():
smi.doneSpeaking()
smi.pumpAll()
smi.expect_synthCancel()
smi.expect_mockCall(t1.exit)
class InitialDevelopmentTests_withCancellableSpeechEnabled(InitialDevelopmentTests):
"""Note, while cancellable speech is configurable test with and without it enabled."""
def setUp(self):
super().setUp()
config.conf['featureFlag']['cancelExpiredFocusSpeech'] = 1 # yes
| 1 | 30,834 | Is there a reason not to use `smi.create_expectedIndex` here (and in other places)? The `smi.create_expectedIndex` function will also check to make sure that you don't accidentally skip or duplicate any indexes. With the goal of avoiding errors in the test. It does mean that you have to be diligent with how the expected indexes are created, which can be annoying too. My logic was that it is better to be annoyed by constraints during development time than to find out some time later that the test didn't do what I thought. | nvaccess-nvda | py |
@@ -43,8 +43,8 @@ class StripeSubscription
def create_customer
new_stripe_customer = Stripe::Customer.create(
card: @checkout.stripe_token,
- description: @checkout.email,
- email: @checkout.email
+ description: @checkout.user_email,
+ email: @checkout.user_email,
)
@checkout.stripe_customer_id = new_stripe_customer.id
end | 1 | class StripeSubscription
attr_reader :id
def initialize(checkout)
@checkout = checkout
end
def create
rescue_stripe_exception do
ensure_customer_exists
update_subscription
end
end
private
def rescue_stripe_exception
yield
true
rescue Stripe::StripeError => exception
@checkout.errors[:base] <<
I18n.t("checkout.problem_with_card", message: exception.message.downcase)
false
end
def ensure_customer_exists
if customer_exists?
update_card
else
create_customer
end
end
def update_card
stripe_customer.card = @checkout.stripe_token
stripe_customer.save
end
def customer_exists?
@checkout.stripe_customer_id.present?
end
def create_customer
new_stripe_customer = Stripe::Customer.create(
card: @checkout.stripe_token,
description: @checkout.email,
email: @checkout.email
)
@checkout.stripe_customer_id = new_stripe_customer.id
end
def update_subscription
if stripe_customer.subscriptions.total_count == 0
subscription =
stripe_customer.subscriptions.create(subscription_attributes)
else
subscription = stripe_customer.subscriptions.first
subscription_attributes.each { |key, value| subscription[key] = value }
subscription.save
end
@id = subscription.id
end
def subscription_attributes
base_subscription_attributes.merge(coupon_attributes)
end
def base_subscription_attributes
{
plan: @checkout.plan_sku,
quantity: @checkout.quantity
}
end
def coupon_attributes
if @checkout.stripe_coupon_id.present?
{ coupon: @checkout.stripe_coupon_id }
else
{}
end
end
def stripe_customer
@stripe_customer ||=
Stripe::Customer.retrieve(@checkout.stripe_customer_id)
end
end
| 1 | 15,659 | Put a comma after the last parameter of a multiline method call. | thoughtbot-upcase | rb |
@@ -29,7 +29,7 @@ type ImageWithPort struct {
type LBFargateConfig struct {
RoutingRule `yaml:"http,flow"`
ContainersConfig `yaml:",inline"`
- Public bool `yaml:"public"`
+ Public *bool `yaml:"public"` // A pointer because we don't want the environment override to default to false.
Scaling *AutoScalingConfig `yaml:",flow"`
}
| 1 | // Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package manifest
import (
"bytes"
"text/template"
"github.com/aws/amazon-ecs-cli-v2/templates"
)
// LBFargateManifest holds the configuration to build a container image with an exposed port that receives
// requests through a load balancer with AWS Fargate as the compute engine.
type LBFargateManifest struct {
AppManifest `yaml:",inline"`
Image ImageWithPort `yaml:",flow"`
LBFargateConfig `yaml:",inline"`
Environments map[string]LBFargateConfig `yaml:",flow"` // Fields to override per environment.
}
// ImageWithPort represents a container image with an exposed port.
type ImageWithPort struct {
AppImage `yaml:",inline"`
Port int `yaml:"port"`
}
// LBFargateConfig represents a load balanced web application with AWS Fargate as compute.
type LBFargateConfig struct {
RoutingRule `yaml:"http,flow"`
ContainersConfig `yaml:",inline"`
Public bool `yaml:"public"`
Scaling *AutoScalingConfig `yaml:",flow"`
}
// ContainersConfig represents the resource boundaries and environment variables for the containers in the service.
type ContainersConfig struct {
CPU int `yaml:"cpu"`
Memory int `yaml:"memory"`
Count int `yaml:"count"`
Variables map[string]string `yaml:"variables"`
Secrets map[string]string `yaml:"secrets"`
}
// RoutingRule holds the path to route requests to the service.
type RoutingRule struct {
Path string `yaml:"path"`
}
// AutoScalingConfig is the configuration to scale the service with target tracking scaling policies.
type AutoScalingConfig struct {
MinCount int `yaml:"minCount"`
MaxCount int `yaml:"maxCount"`
TargetCPU float64 `yaml:"targetCPU"`
TargetMemory float64 `yaml:"targetMemory"`
}
// NewLoadBalancedFargateManifest creates a new public load balanced web service with an exposed port of 80, receives
// all the requests from the load balancer and has a single task with minimal CPU and Memory thresholds.
func NewLoadBalancedFargateManifest(appName string, dockerfile string) *LBFargateManifest {
return &LBFargateManifest{
AppManifest: AppManifest{
Name: appName,
Type: LoadBalancedWebApplication,
},
Image: ImageWithPort{
AppImage: AppImage{
Build: dockerfile,
},
Port: 80,
},
LBFargateConfig: LBFargateConfig{
RoutingRule: RoutingRule{
Path: "*",
},
ContainersConfig: ContainersConfig{
CPU: 256,
Memory: 512,
Count: 1,
},
},
}
}
// Marshal serializes the manifest object into a YAML document.
func (m *LBFargateManifest) Marshal() ([]byte, error) {
box := templates.Box()
content, err := box.FindString("lb-fargate-service/manifest.yml")
if err != nil {
return nil, err
}
tpl, err := template.New("template").Parse(content)
if err != nil {
return nil, err
}
var buf bytes.Buffer
if err := tpl.Execute(&buf, *m); err != nil {
return nil, err
}
return buf.Bytes(), nil
}
// CFNTemplate serializes the manifest object into a CloudFormation template.
func (m *LBFargateManifest) CFNTemplate() (string, error) {
return "", nil
}
| 1 | 10,757 | I don't know if we even need this parameter. | aws-copilot-cli | go |
@@ -1,5 +1,5 @@
"""Test that we are emitting arguments-differ when the arguments are different."""
-# pylint: disable=missing-docstring, too-few-public-methods, unused-argument,useless-super-delegation, useless-object-inheritance
+# pylint: disable= arguments-renamed, missing-docstring, too-few-public-methods, unused-argument,useless-super-delegation, useless-object-inheritance
class Parent(object):
| 1 | """Test that we are emitting arguments-differ when the arguments are different."""
# pylint: disable=missing-docstring, too-few-public-methods, unused-argument,useless-super-delegation, useless-object-inheritance
class Parent(object):
def test(self):
pass
class Child(Parent):
def test(self, arg): # [arguments-differ]
pass
class ParentDefaults(object):
def test(self, arg=None, barg=None):
pass
class ChildDefaults(ParentDefaults):
def test(self, arg=None): # [arguments-differ]
pass
class Classmethod(object):
@classmethod
def func(cls, data):
return data
@classmethod
def func1(cls):
return cls
class ClassmethodChild(Classmethod):
@staticmethod
def func(): # [arguments-differ]
pass
@classmethod
def func1(cls):
return cls()
class Builtins(dict):
"""Ignore for builtins, for which we don't know the number of required args."""
@classmethod
def fromkeys(cls, arg, arg1):
pass
class Varargs(object):
def has_kwargs(self, arg, **kwargs):
pass
def no_kwargs(self, args):
pass
class VarargsChild(Varargs):
def has_kwargs(self, arg): # [arguments-differ]
"Not okay to lose capabilities."
def no_kwargs(self, arg, **kwargs): # [arguments-differ]
"Not okay to add extra capabilities."
class Super(object):
def __init__(self):
pass
def __private(self):
pass
def __private2_(self):
pass
def ___private3(self):
pass
def method(self, param):
raise NotImplementedError
class Sub(Super):
# pylint: disable=unused-argument
def __init__(self, arg):
super().__init__()
def __private(self, arg):
pass
def __private2_(self, arg):
pass
def ___private3(self, arg):
pass
def method(self, param='abc'):
pass
class Staticmethod(object):
@staticmethod
def func(data):
return data
class StaticmethodChild(Staticmethod):
@classmethod
def func(cls, data):
return data
class Property(object):
@property
def close(self):
pass
class PropertySetter(Property):
@property
def close(self):
pass
@close.setter
def close(self, attr):
return attr
class StaticmethodChild2(Staticmethod):
def func(self, data):
super().func(data)
class SuperClass(object):
@staticmethod
def impl(arg1, arg2, **kwargs):
return arg1 + arg2
class MyClass(SuperClass):
def impl(self, *args, **kwargs):
"""
Acceptable use of vararg in subclass because it does not violate LSP.
"""
super().impl(*args, **kwargs)
class FirstHasArgs(object):
def test(self, *args):
pass
class SecondChangesArgs(FirstHasArgs):
def test(self, first, second, *args): # [arguments-differ]
pass
class Positional(object):
def test(self, first, second):
pass
class PositionalChild(Positional):
def test(self, *args):
"""
Acceptable use of vararg in subclass because it does not violate LSP.
"""
super().test(args[0], args[1])
class Mixed(object):
def mixed(self, first, second, *, third, fourth):
pass
class MixedChild1(Mixed):
def mixed(self, first, *args, **kwargs):
"""
Acceptable use of vararg in subclass because it does not violate LSP.
"""
super().mixed(first, *args, **kwargs)
class MixedChild2(Mixed):
def mixed(self, first, *args, third, **kwargs):
"""
Acceptable use of vararg in subclass because it does not violate LSP.
"""
super().mixed(first, *args, third, **kwargs)
class HasSpecialMethod(object):
def __getitem__(self, key):
return key
class OverridesSpecialMethod(HasSpecialMethod):
def __getitem__(self, cheie):
return cheie + 1
class ParentClass(object):
def meth(self, arg, arg1):
raise NotImplementedError
class ChildClass(ParentClass):
def meth(self, _arg, dummy):
pass
| 1 | 13,534 | What is the new result of this file if we keep it the same than before? It's just to picture easily what changed in this MR :) (thinking is hard) | PyCQA-pylint | py |
@@ -165,7 +165,7 @@ namespace Microsoft.AspNetCore.Server.Kestrel.FunctionalTests
using (var reader = new StreamReader(stream, Encoding.ASCII))
{
- var response = reader.ReadToEnd();
+ var response = await reader.ReadToEndAsync();
Assert.Contains($"bytesRead: {data.Length}", response);
}
} | 1 | // Copyright (c) .NET Foundation. All rights reserved.
// Licensed under the Apache License, Version 2.0. See License.txt in the project root for license information.
using System;
using System.Collections.Generic;
using System.IO;
using System.Linq;
using System.Net;
using System.Net.Sockets;
using System.Text;
using System.Threading;
using System.Threading.Tasks;
using Microsoft.AspNetCore.Builder;
using Microsoft.AspNetCore.Hosting;
using Microsoft.AspNetCore.Http;
using Microsoft.AspNetCore.Testing;
using Xunit;
namespace Microsoft.AspNetCore.Server.Kestrel.FunctionalTests
{
public class MaxRequestBufferSizeTests
{
private const int _dataLength = 20 * 1024 * 1024;
private static readonly string[] _requestLines = new[]
{
"POST / HTTP/1.0\r\n",
$"Content-Length: {_dataLength}\r\n",
"\r\n"
};
public static IEnumerable<object[]> LargeUploadData
{
get
{
var maxRequestBufferSizeValues = new Tuple<long?, bool>[] {
// Smallest buffer that can hold a test request line without causing
// the server to hang waiting for the end of the request line or
// a header line.
Tuple.Create((long?)(_requestLines.Max(line => line.Length)), true),
// Small buffer, but large enough to hold all request headers.
Tuple.Create((long?)16 * 1024, true),
// Default buffer.
Tuple.Create((long?)1024 * 1024, true),
// Larger than default, but still significantly lower than data, so client should be paused.
// On Windows, the client is usually paused around (MaxRequestBufferSize + 700,000).
// On Linux, the client is usually paused around (MaxRequestBufferSize + 10,000,000).
Tuple.Create((long?)5 * 1024 * 1024, true),
// Even though maxRequestBufferSize < _dataLength, client should not be paused since the
// OS-level buffers in client and/or server will handle the overflow.
Tuple.Create((long?)_dataLength - 1, false),
// Buffer is exactly the same size as data. Exposed race condition where
// IConnectionControl.Resume() was called after socket was disconnected.
Tuple.Create((long?)_dataLength, false),
// Largest possible buffer, should never trigger backpressure.
Tuple.Create((long?)long.MaxValue, false),
// Disables all code related to computing and limiting the size of the input buffer.
Tuple.Create((long?)null, false)
};
var sslValues = new[] { true, false };
return from maxRequestBufferSize in maxRequestBufferSizeValues
from ssl in sslValues
select new object[] {
maxRequestBufferSize.Item1,
ssl,
maxRequestBufferSize.Item2
};
}
}
[Theory]
[MemberData(nameof(LargeUploadData))]
public async Task LargeUpload(long? maxRequestBufferSize, bool connectionAdapter, bool expectPause)
{
// Parameters
var data = new byte[_dataLength];
var bytesWrittenTimeout = TimeSpan.FromMilliseconds(100);
var bytesWrittenPollingInterval = TimeSpan.FromMilliseconds(bytesWrittenTimeout.TotalMilliseconds / 10);
var maxSendSize = 4096;
// Initialize data with random bytes
(new Random()).NextBytes(data);
var startReadingRequestBody = new TaskCompletionSource<object>();
var clientFinishedSendingRequestBody = new TaskCompletionSource<object>();
var lastBytesWritten = DateTime.MaxValue;
using (var host = StartWebHost(maxRequestBufferSize, data, connectionAdapter, startReadingRequestBody, clientFinishedSendingRequestBody))
{
var port = host.GetPort();
using (var socket = CreateSocket(port))
using (var stream = new NetworkStream(socket))
{
await WritePostRequestHeaders(stream, data.Length);
var bytesWritten = 0;
Func<Task> sendFunc = async () =>
{
while (bytesWritten < data.Length)
{
var size = Math.Min(data.Length - bytesWritten, maxSendSize);
await stream.WriteAsync(data, bytesWritten, size).ConfigureAwait(false);
bytesWritten += size;
lastBytesWritten = DateTime.Now;
}
Assert.Equal(data.Length, bytesWritten);
clientFinishedSendingRequestBody.TrySetResult(null);
};
var sendTask = sendFunc();
if (expectPause)
{
// The minimum is (maxRequestBufferSize - maxSendSize + 1), since if bytesWritten is
// (maxRequestBufferSize - maxSendSize) or smaller, the client should be able to
// complete another send.
var minimumExpectedBytesWritten = maxRequestBufferSize.Value - maxSendSize + 1;
// The maximum is harder to determine, since there can be OS-level buffers in both the client
// and server, which allow the client to send more than maxRequestBufferSize before getting
// paused. We assume the combined buffers are smaller than the difference between
// data.Length and maxRequestBufferSize.
var maximumExpectedBytesWritten = data.Length - 1;
// Block until the send task has gone a while without writing bytes AND
// the bytes written exceeds the minimum expected. This indicates the server buffer
// is full.
//
// If the send task is paused before the expected number of bytes have been
// written, keep waiting since the pause may have been caused by something else
// like a slow machine.
while ((DateTime.Now - lastBytesWritten) < bytesWrittenTimeout ||
bytesWritten < minimumExpectedBytesWritten)
{
await Task.Delay(bytesWrittenPollingInterval);
}
// Verify the number of bytes written before the client was paused.
Assert.InRange(bytesWritten, minimumExpectedBytesWritten, maximumExpectedBytesWritten);
// Tell server to start reading request body
startReadingRequestBody.TrySetResult(null);
// Wait for sendTask to finish sending the remaining bytes
await sendTask;
}
else
{
// Ensure all bytes can be sent before the server starts reading
await sendTask;
// Tell server to start reading request body
startReadingRequestBody.TrySetResult(null);
}
using (var reader = new StreamReader(stream, Encoding.ASCII))
{
var response = reader.ReadToEnd();
Assert.Contains($"bytesRead: {data.Length}", response);
}
}
}
}
[Fact]
public async Task ServerShutsDownGracefullyWhenMaxRequestBufferSizeExceeded()
{
// Parameters
var data = new byte[_dataLength];
var bytesWrittenTimeout = TimeSpan.FromMilliseconds(100);
var bytesWrittenPollingInterval = TimeSpan.FromMilliseconds(bytesWrittenTimeout.TotalMilliseconds / 10);
var maxSendSize = 4096;
var startReadingRequestBody = new TaskCompletionSource<object>();
var clientFinishedSendingRequestBody = new TaskCompletionSource<object>();
var lastBytesWritten = DateTime.MaxValue;
using (var host = StartWebHost(16 * 1024, data, false, startReadingRequestBody, clientFinishedSendingRequestBody))
{
var port = host.GetPort();
using (var socket = CreateSocket(port))
using (var stream = new NetworkStream(socket))
{
await WritePostRequestHeaders(stream, data.Length);
var bytesWritten = 0;
Func<Task> sendFunc = async () =>
{
while (bytesWritten < data.Length)
{
var size = Math.Min(data.Length - bytesWritten, maxSendSize);
await stream.WriteAsync(data, bytesWritten, size).ConfigureAwait(false);
bytesWritten += size;
lastBytesWritten = DateTime.Now;
}
clientFinishedSendingRequestBody.TrySetResult(null);
};
var ignore = sendFunc();
// The minimum is (maxRequestBufferSize - maxSendSize + 1), since if bytesWritten is
// (maxRequestBufferSize - maxSendSize) or smaller, the client should be able to
// complete another send.
var minimumExpectedBytesWritten = (16 * 1024) - maxSendSize + 1;
// The maximum is harder to determine, since there can be OS-level buffers in both the client
// and server, which allow the client to send more than maxRequestBufferSize before getting
// paused. We assume the combined buffers are smaller than the difference between
// data.Length and maxRequestBufferSize.
var maximumExpectedBytesWritten = data.Length - 1;
// Block until the send task has gone a while without writing bytes AND
// the bytes written exceeds the minimum expected. This indicates the server buffer
// is full.
//
// If the send task is paused before the expected number of bytes have been
// written, keep waiting since the pause may have been caused by something else
// like a slow machine.
while ((DateTime.Now - lastBytesWritten) < bytesWrittenTimeout ||
bytesWritten < minimumExpectedBytesWritten)
{
await Task.Delay(bytesWrittenPollingInterval);
}
// Verify the number of bytes written before the client was paused.
Assert.InRange(bytesWritten, minimumExpectedBytesWritten, maximumExpectedBytesWritten);
// Dispose host prior to closing connection to verify the server doesn't throw during shutdown
// if a connection no longer has alloc and read callbacks configured.
host.Dispose();
}
}
}
private static IWebHost StartWebHost(long? maxRequestBufferSize,
byte[] expectedBody,
bool useConnectionAdapter,
TaskCompletionSource<object> startReadingRequestBody,
TaskCompletionSource<object> clientFinishedSendingRequestBody)
{
var host = new WebHostBuilder()
.UseKestrel(options =>
{
options.Listen(new IPEndPoint(IPAddress.Loopback, 0), listenOptions =>
{
if (useConnectionAdapter)
{
listenOptions.ConnectionAdapters.Add(new PassThroughConnectionAdapter());
}
});
options.Limits.MaxRequestBufferSize = maxRequestBufferSize;
if (maxRequestBufferSize.HasValue &&
maxRequestBufferSize.Value < options.Limits.MaxRequestLineSize)
{
options.Limits.MaxRequestLineSize = (int)maxRequestBufferSize;
}
if (maxRequestBufferSize.HasValue &&
maxRequestBufferSize.Value < options.Limits.MaxRequestHeadersTotalSize)
{
options.Limits.MaxRequestHeadersTotalSize = (int)maxRequestBufferSize;
}
})
.UseContentRoot(Directory.GetCurrentDirectory())
.Configure(app => app.Run(async context =>
{
await startReadingRequestBody.Task.TimeoutAfter(TimeSpan.FromSeconds(30));
var buffer = new byte[expectedBody.Length];
var bytesRead = 0;
while (bytesRead < buffer.Length)
{
bytesRead += await context.Request.Body.ReadAsync(buffer, bytesRead, buffer.Length - bytesRead);
}
await clientFinishedSendingRequestBody.Task.TimeoutAfter(TimeSpan.FromSeconds(30));
// Verify client didn't send extra bytes
if (context.Request.Body.ReadByte() != -1)
{
context.Response.StatusCode = StatusCodes.Status500InternalServerError;
await context.Response.WriteAsync("Client sent more bytes than expectedBody.Length");
return;
}
// Verify bytes received match expectedBody
for (int i = 0; i < expectedBody.Length; i++)
{
if (buffer[i] != expectedBody[i])
{
context.Response.StatusCode = StatusCodes.Status500InternalServerError;
await context.Response.WriteAsync($"Bytes received do not match expectedBody at position {i}");
return;
}
}
await context.Response.WriteAsync($"bytesRead: {bytesRead.ToString()}");
}))
.Build();
host.Start();
return host;
}
private static Socket CreateSocket(int port)
{
var socket = new Socket(AddressFamily.InterNetwork, SocketType.Stream, ProtocolType.Tcp);
// Timeouts large enough to prevent false positives, but small enough to fail quickly.
socket.SendTimeout = 10 * 1000;
socket.ReceiveTimeout = 30 * 1000;
socket.Connect(IPAddress.Loopback, port);
return socket;
}
private static async Task WritePostRequestHeaders(Stream stream, int contentLength)
{
using (var writer = new StreamWriter(stream, Encoding.ASCII, bufferSize: 1024, leaveOpen: true))
{
foreach (var line in _requestLines)
{
await writer.WriteAsync(line).ConfigureAwait(false);
}
}
}
}
}
| 1 | 12,834 | This needs to be synchronous for the timeout to work. | aspnet-KestrelHttpServer | .cs |
@@ -23,11 +23,7 @@ Where
<% if purchaseable.city.present? %><%= purchaseable.city %>, <% end %><%= purchaseable.state %> <%= purchaseable.zip %>
<% end -%>
-<% unless purchaseable.starts_immediately? -%>
-We will be in touch before <%= purchase.starts_on.to_s(:simple) %> with a reminder and any further instructions. However, please don't hesitate to get in touch with us at [email protected] should you have any questions or concerns.
-<% end -%>
-
-<%= purchaseable.reminder_email %>
+We will be in touch before the workshop with a reminder and any further instructions. However, please don't hesitate to get in touch with us at [email protected] should you have any questions or concerns.
<% if purchaseable.workshop.announcement.present? -%>
<%= purchaseable.workshop.announcement.message %> | 1 | Thank you! <%= purchase.name %> is now registered for <%= purchaseable.name %>.
You can view the workshop lessons and other workshop resources here:
<%= purchase_url(purchase) %>
<% if purchaseable.in_person? -%>
<% if purchase.comments.present? -%>
You provided us with the following comments:
<%= purchase.comments %>
<% else %>
If you have any special dietary restrictions, and you have not done so already, please let us know by replying to this email.
<% end -%>
<% end -%>
<% unless purchaseable.starts_immediately? -%>
When
<%= purchase_date_range(purchaseable) %>
<%= purchaseable.time_range %>
<% end -%>
<% if purchaseable.in_person? -%>
Where
<%= purchaseable.address %>
<% if purchaseable.city.present? %><%= purchaseable.city %>, <% end %><%= purchaseable.state %> <%= purchaseable.zip %>
<% end -%>
<% unless purchaseable.starts_immediately? -%>
We will be in touch before <%= purchase.starts_on.to_s(:simple) %> with a reminder and any further instructions. However, please don't hesitate to get in touch with us at [email protected] should you have any questions or concerns.
<% end -%>
<%= purchaseable.reminder_email %>
<% if purchaseable.workshop.announcement.present? -%>
<%= purchaseable.workshop.announcement.message %>
<% end -%>
<% unless purchaseable.starts_immediately? -%>
Thanks very much, we'll see you on <%= purchase.starts_on.to_s(:simple) %>!
<% end -%>
| 1 | 8,399 | Can you break this onto multiple lines? | thoughtbot-upcase | rb |
@@ -298,11 +298,11 @@ const instr_info_t A32_ext_bit7[][2] = {
*/
const instr_info_t A32_ext_bit19[][2] = {
{ /* 0 */
- {EXT_SIMD8, 0xf2800000, "(ext simd8 0)", xx, xx, xx, xx, xx, no, x, 0},
- {EXT_SIMD5, 0xf2880000, "(ext simd5 0)", xx, xx, xx, xx, xx, no, x, 0},
+ {EXT_SIMD8, 0xf2800010, "(ext simd8 0)", xx, xx, xx, xx, xx, no, x, 0},
+ {EXT_SIMD5, 0xf2880010, "(ext simd5 0)", xx, xx, xx, xx, xx, no, x, 0},
}, { /* 1 */
- {EXT_SIMD8, 0xf3800000, "(ext simd8 1)", xx, xx, xx, xx, xx, no, x, 1},
- {EXT_SIMD5, 0xf3880000, "(ext simd5 1)", xx, xx, xx, xx, xx, no, x, 1},
+ {EXT_SIMD8, 0xf3800010, "(ext simd8 1)", xx, xx, xx, xx, xx, no, x, 1},
+ {EXT_SIMD5, 0xf3880010, "(ext simd5 1)", xx, xx, xx, xx, xx, no, x, 1},
},
};
| 1 | /* **********************************************************
* Copyright (c) 2014-2015 Google, Inc. All rights reserved.
* **********************************************************/
/*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions are met:
*
* * Redistributions of source code must retain the above copyright notice,
* this list of conditions and the following disclaimer.
*
* * Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
*
* * Neither the name of Google, Inc. nor the names of its contributors may be
* used to endorse or promote products derived from this software without
* specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL VMWARE, INC. OR CONTRIBUTORS BE LIABLE
* FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
* DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
* SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
* CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
* LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
* OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
* DAMAGE.
*/
#include "../globals.h" /* need this to include decode.h (uint, etc.) */
#include "arch.h" /* need this to include decode.h (byte, etc. */
#include "decode.h"
#include "decode_private.h"
#include "table_private.h"
/* Indexed by bits 27:23,21:20.
* The D bit (0x04) has been removed. We are including the U bit. Is
* there some other order we could do to group the related instrs better
* while still minimizing the number of table lookup steps and keeping
* dense tables?
*/
const instr_info_t A32_unpred_opc7[] = {
/* {op/type, op encoding, name, dst1, dst2, src1, src2, src3, flags, eflags, code} */
/* 00 */
{INVALID, 0xf0000000, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf0100000, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf0200000, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf0300000, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf0800000, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf0900000, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf0a00000, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf0b00000, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
/* 10 */
{EXT_BITS16, 0xf1000000, "(ext bits16 8)", xx, xx, xx, xx, xx, no, x, 8},
{INVALID, 0xf1100000, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf1200000, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf1300000, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf1800000, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf1900000, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf1a00000, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf1b00000, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
/* 20 */
{EXT_SIMD6, 0xf2000000, "(ext simd6 0)", xx, xx, xx, xx, xx, no, x, 0},
{EXT_SIMD6, 0xf2100000, "(ext simd6 1)", xx, xx, xx, xx, xx, no, x, 1},
{EXT_SIMD6, 0xf2200000, "(ext simd6 2)", xx, xx, xx, xx, xx, no, x, 2},
{EXT_SIMD6, 0xf2300000, "(ext simd6 3)", xx, xx, xx, xx, xx, no, x, 3},
{EXT_BIT4, 0xf2800000, "(ext bit4 13)", xx, xx, xx, xx, xx, no, x, 13},
{EXT_BIT4, 0xf2900000, "(ext bit4 14)", xx, xx, xx, xx, xx, no, x, 14},
{EXT_BIT4, 0xf2a00000, "(ext bit4 15)", xx, xx, xx, xx, xx, no, x, 15},
{EXT_BIT4, 0xf2b00000, "(ext bit4 16)", xx, xx, xx, xx, xx, no, x, 16},
/* 30 */
{EXT_SIMD6, 0xf3000000, "(ext simd6 6)", xx, xx, xx, xx, xx, no, x, 6},
{EXT_SIMD6, 0xf3100000, "(ext simd6 7)", xx, xx, xx, xx, xx, no, x, 7},
{EXT_SIMD6, 0xf3200000, "(ext simd6 8)", xx, xx, xx, xx, xx, no, x, 8},
{EXT_SIMD6, 0xf3300000, "(ext simd6 9)", xx, xx, xx, xx, xx, no, x, 9},
{EXT_BIT4, 0xf3800000, "(ext bit4 17)", xx, xx, xx, xx, xx, no, x, 17},
{EXT_BIT4, 0xf3900000, "(ext bit4 18)", xx, xx, xx, xx, xx, no, x, 18},
{EXT_BIT4, 0xf3a00000, "(ext bit4 19)", xx, xx, xx, xx, xx, no, x, 19},
{EXT_BIT4, 0xf3b00000, "(ext bit4 20)", xx, xx, xx, xx, xx, no, x, 20},
/* 40 */
{EXT_VLDA, 0xf4000000, "(ext vldA 0)", xx, xx, xx, xx, xx, no, x, 0},
{OP_pli, 0xf450f000, "pli", xx, xx, MN12z, xx, xx, no, x, top7[0x35]},/*PUW=000*/
{EXT_VLDA, 0xf4200000, "(ext vldA 1)", xx, xx, xx, xx, xx, no, x, 1},
{INVALID, 0xf4300000, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{EXT_VLDB, 0xf4800000, "(ext vldB 0)", xx, xx, xx, xx, xx, no, x, 0},
{OP_pli, 0xf4d0f000, "pli", xx, xx, MP12z, xx, xx, no, x, top7[0x21]},/*PUW=010*/
{EXT_VLDB, 0xf4a00000, "(ext vldB 1)", xx, xx, xx, xx, xx, no, x, 1},
{INVALID, 0xf4b00000, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
/* 50 */
{INVALID, 0xf5000000, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{EXT_BIT22, 0xf5100000, "(ext bit22 0)", xx, xx, xx, xx, xx, no, x, 0},
{INVALID, 0xf5200000, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{EXT_FPB, 0xf5300000, "(ext fpb 14)", xx, xx, xx, xx, xx, no, x, 14},
{INVALID, 0xf5800000, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{EXT_BIT22, 0xf5900000, "(ext bit22 1)", xx, xx, xx, xx, xx, no, x, 1},
{INVALID, 0xf5a00000, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf5b00000, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
/* 60 */
{INVALID, 0xf6000000, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_pli, 0xf650f000, "pli", xx, xx, MNSz, xx, xx, no, x, END_LIST},/*PUW=000*/
{INVALID, 0xf6200000, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf6300000, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf6800000, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_pli, 0xf6d0f000, "pli", xx, xx, MPSz, xx, xx, no, x, top7[0x31]},/*PUW=010*/
{INVALID, 0xf6a00000, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf6b00000, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
/* 70 */
{INVALID, 0xf7000000, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{EXT_BIT22, 0xf7100000, "(ext bit22 2)", xx, xx, xx, xx, xx, no, x, 2},
{INVALID, 0xf7200000, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf7300000, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf7800000, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{EXT_BIT22, 0xf7900000, "(ext bit22 3)", xx, xx, xx, xx, xx, no, x, 3},
{INVALID, 0xf7a00000, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf7b00000, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
/* 80 */
{OP_srsda, 0xf84d0500, "srsda", Mq, xx, i5, LRw, SPSR, no, x, top7[0x42]},/*PUW=000*/
{OP_rfeda, 0xf8100a00, "rfeda", CPSR, xx, Mq, xx, xx, no, fWNZCVQG, END_LIST},/*PUW=000*/
{OP_srsda, 0xf86d0500, "srsda", Mq, SPw, i5, SPw, LRw, xop, x, exop[0x6]},/*PUW=001*/
{OP_rfeda, 0xf8300a00, "rfeda", RAw, CPSR, Mq, RAw, xx, no, fWNZCVQG, top7[0x41]},/*PUW=001*/
{OP_srs, 0xf8cd0500, "srs", Mq, xx, i5, LRw, SPSR, no, x, top7[0x46]},/*PUW=010*/
{OP_rfe, 0xf8900a00, "rfe", CPSR, xx, Mq, xx, xx, no, fWNZCVQG, END_LIST},/*PUW=010*/
{OP_srs, 0xf8ed0500, "srs", Mq, SPw, i5, SPw, LRw, xop, x, exop[0x6]},/*PUW=011*/
{OP_rfe, 0xf8b00a00, "rfe", RAw, CPSR, Mq, RAw, xx, no, fWNZCVQG, top7[0x45]},/*PUW=011*/
/* 90 */
{OP_srsdb, 0xf94d0500, "srsdb", Mq, xx, i5, LRw, SPSR, no, x, top7[0x4a]},/*PUW=100*/
{OP_rfedb, 0xf9100a00, "rfedb", CPSR, xx, Mq, xx, xx, no, fWNZCVQG, top7[0x4b]},/*PUW=100*/
{OP_srsdb, 0xf96d0500, "srsdb", Mq, SPw, i5, SPw, LRw, xop, x, exop[0x6]},/*PUW=101*/
{OP_rfedb, 0xf9300a00, "rfedb", RAw, CPSR, Mq, RAw, xx, no, fWNZCVQG, END_LIST},/*PUW=101*/
{OP_srsib, 0xf9cd0500, "srsib", Mq, xx, i5, LRw, SPSR, no, x, top7[0x4e]},/*PUW=110*/
{OP_rfeib, 0xf9900a00, "rfeib", CPSR, xx, Mq, xx, xx, no, fWNZCVQG, top7[0x4f]},/*PUW=110*/
{OP_srsib, 0xf9ed0500, "srsib", Mq, SPw, i5, SPw, LRw, xop, x, exop[0x6]},/*PUW=111*/
{OP_rfeib, 0xf9b00a00, "rfeib", RAw, CPSR, Mq, RAw, xx, no, fWNZCVQG, END_LIST},/*PUW=111*/
/* a0 */
{OP_blx, 0xfa000000, "blx", LRw, xx, j25x0_24, xx, xx, no, x, END_LIST},
{OP_blx, 0xfa100000, "blx", LRw, xx, j25x0_24, xx, xx, no, x, DUP_ENTRY},
{OP_blx, 0xfa200000, "blx", LRw, xx, j25x0_24, xx, xx, no, x, DUP_ENTRY},
{OP_blx, 0xfa300000, "blx", LRw, xx, j25x0_24, xx, xx, no, x, DUP_ENTRY},
{OP_blx, 0xfa800000, "blx", LRw, xx, j25x0_24, xx, xx, no, x, DUP_ENTRY},
{OP_blx, 0xfa900000, "blx", LRw, xx, j25x0_24, xx, xx, no, x, DUP_ENTRY},
{OP_blx, 0xfaa00000, "blx", LRw, xx, j25x0_24, xx, xx, no, x, DUP_ENTRY},
{OP_blx, 0xfab00000, "blx", LRw, xx, j25x0_24, xx, xx, no, x, DUP_ENTRY},
/* b0 */
{OP_blx, 0xfb000000, "blx", LRw, xx, j25x0_24, xx, xx, no, x, DUP_ENTRY},
{OP_blx, 0xfb000000, "blx", LRw, xx, j25x0_24, xx, xx, no, x, DUP_ENTRY},
{OP_blx, 0xfb000000, "blx", LRw, xx, j25x0_24, xx, xx, no, x, DUP_ENTRY},
{OP_blx, 0xfb000000, "blx", LRw, xx, j25x0_24, xx, xx, no, x, DUP_ENTRY},
{OP_blx, 0xfb000000, "blx", LRw, xx, j25x0_24, xx, xx, no, x, DUP_ENTRY},
{OP_blx, 0xfb000000, "blx", LRw, xx, j25x0_24, xx, xx, no, x, DUP_ENTRY},
{OP_blx, 0xfb000000, "blx", LRw, xx, j25x0_24, xx, xx, no, x, DUP_ENTRY},
{OP_blx, 0xfb000000, "blx", LRw, xx, j25x0_24, xx, xx, no, x, DUP_ENTRY},
/* c0 */
{EXT_BITS20, 0xfc000000, "(ext bits20 0)", xx, xx, xx, xx, xx, no, x, 0},
{EXT_BITS20, 0xfc100000, "(ext bits20 0)", xx, xx, xx, xx, xx, no, x, 0},
{EXT_BITS20, 0xfc200000, "(ext bits20 0)", xx, xx, xx, xx, xx, no, x, 0},
{EXT_BITS20, 0xfc300000, "(ext bits20 0)", xx, xx, xx, xx, xx, no, x, 0},
{EXT_BITS20, 0xfc800000, "(ext bits20 0)", xx, xx, xx, xx, xx, no, x, 0},
{EXT_BITS20, 0xfc900000, "(ext bits20 0)", xx, xx, xx, xx, xx, no, x, 0},
{EXT_BITS20, 0xfca00000, "(ext bits20 0)", xx, xx, xx, xx, xx, no, x, 0},
{EXT_BITS20, 0xfcb00000, "(ext bits20 0)", xx, xx, xx, xx, xx, no, x, 0},
/* d0 */
{EXT_BITS20, 0xfd000000, "(ext bits20 1)", xx, xx, xx, xx, xx, no, x, 1},
{EXT_BITS20, 0xfd100000, "(ext bits20 1)", xx, xx, xx, xx, xx, no, x, 1},
{EXT_BITS20, 0xfd200000, "(ext bits20 1)", xx, xx, xx, xx, xx, no, x, 1},
{EXT_BITS20, 0xfd300000, "(ext bits20 1)", xx, xx, xx, xx, xx, no, x, 1},
{EXT_BITS20, 0xfd800000, "(ext bits20 1)", xx, xx, xx, xx, xx, no, x, 1},
{EXT_BITS20, 0xfd900000, "(ext bits20 1)", xx, xx, xx, xx, xx, no, x, 1},
{EXT_BITS20, 0xfda00000, "(ext bits20 1)", xx, xx, xx, xx, xx, no, x, 1},
{EXT_BITS20, 0xfdb00000, "(ext bits20 1)", xx, xx, xx, xx, xx, no, x, 1},
/* e0 */
{EXT_FP, 0xfe000000, "(ext fp 46)", xx, xx, xx, xx, xx, no, x, 46},
{EXT_FP, 0xfe100000, "(ext fp 47)", xx, xx, xx, xx, xx, no, x, 47},
{EXT_FP, 0xfe200000, "(ext fp 48)", xx, xx, xx, xx, xx, no, x, 48},
{EXT_FP, 0xfe300000, "(ext fp 49)", xx, xx, xx, xx, xx, no, x, 49},
{EXT_FP, 0xfe800000, "(ext fp 50)", xx, xx, xx, xx, xx, no, x, 50},
{EXT_FP, 0xfe900000, "(ext fp 51)", xx, xx, xx, xx, xx, no, x, 51},
{EXT_FP, 0xfea00000, "(ext fp 52)", xx, xx, xx, xx, xx, no, x, 52},
{EXT_FP, 0xfeb00000, "(ext fp 53)", xx, xx, xx, xx, xx, no, x, 53},
/* f0 */
{INVALID, 0xff000000, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xff100000, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xff200000, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xff300000, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xff800000, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xff900000, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xffa00000, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xffb00000, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
};
/* Indexed by bits 23:20 */
const instr_info_t A32_ext_bits20[][16] = {
{ /* 0 */
{INVALID, 0xfc000000, "(bad)", xx, xx, xx, xx, xx, no, x, NA},/*PUW=000*/
{INVALID, 0xfc100000, "(bad)", xx, xx, xx, xx, xx, no, x, NA},/*PUW=000*/
{OP_stc2, 0xfc200000, "stc2", Mw, RAw, i4_8, CRBw, n8x4, xop_wb, x, END_LIST},/*PUW=001*/
{OP_ldc2, 0xfc300000, "ldc2", CRBw, RAw, Mw, i4_8, n8x4, xop_wb, x, END_LIST},/*PUW=001*/
{OP_mcrr2, 0xfc400000, "mcrr2", CRDw, RAw, RBw, i4_8, i4_7, srcX4, x, END_LIST},
{OP_mrrc2, 0xfc500000, "mrrc2", RBw, RAw, i4_8, i4_7, CRDw, no, x, END_LIST},
{OP_stc2l, 0xfc600000, "stc2l", Mw, RAw, i4_8, CRBw, n8x4, xop_wb, x, END_LIST},/*PUW=001*/
{OP_ldc2l, 0xfc700000, "ldc2l", CRBw, RAw, Mw, i4_8, n8x4, xop_wb, x, END_LIST},/*PUW=001*/
{OP_stc2, 0xfc800000, "stc2", Mw, xx, i4_8, CRBw, i8, no, x, tb20[0][0x02]},/*PUW=010*/
{OP_ldc2, 0xfc900000, "ldc2", CRBw, xx, Mw, i4_8, i8, no, x, tb20[0][0x03]},/*PUW=010*/
{OP_stc2, 0xfca00000, "stc2", Mw, RAw, i4_8, CRBw, i8x4, xop_wb, x, tb20[0][0x08]},/*PUW=011*/
{OP_ldc2, 0xfcb00000, "ldc2", CRBw, RAw, Mw, i4_8, i8x4, xop_wb, x, tb20[0][0x09]},/*PUW=011*/
{OP_stc2l, 0xfcc00000, "stc2l", Mw, xx, i4_8, CRBw, i8, no, x, tb20[0][0x06]},/*PUW=010*/
{OP_ldc2l, 0xfcd00000, "ldc2l", CRBw, xx, Mw, i4_8, i8, no, x, tb20[0][0x07]},/*PUW=010*/
{OP_stc2l, 0xfce00000, "stc2l", Mw, RAw, i4_8, CRBw, i8x4, xop_wb, x, tb20[0][0x0c]},/*PUW=011*/
{OP_ldc2l, 0xfcf00000, "ldc2l", CRBw, RAw, Mw, i4_8, i8x4, xop_wb, x, tb20[0][0x0d]},/*PUW=011*/
}, { /* 1 */
{OP_stc2, 0xfd000000, "stc2", MN8Xw, xx, i4_8, CRBw, n8x4, no, x, tb20[1][0x0a]},/*PUW=100*/
{OP_ldc2, 0xfd100000, "ldc2", CRBw, xx, MN8Xw, i4_8, i8x4, no, x, tb20[1][0x0b]},/*PUW=100*/
{OP_stc2, 0xfd200000, "stc2", MN8Xw, RAw, i4_8, CRBw, n8x4, xop_wb, x, tb20[0][0x0a]},/*PUW=101*/
{OP_ldc2, 0xfd300000, "ldc2", CRBw, RAw, MN8Xw, i4_8, n8x4, xop_wb, x, tb20[0][0x0b]},/*PUW=101*/
{OP_stc2l, 0xfd400000, "stc2l", MN8Xw, xx, i4_8, CRBw, n8x4, no, x, tb20[1][0x0e]},/*PUW=100*/
{OP_ldc2l, 0xfd500000, "ldc2l", CRBw, xx, MN8Xw, i4_8, i8x4, no, x, tb20[1][0x0f]},/*PUW=100*/
{OP_stc2l, 0xfd600000, "stc2l", MN8Xw, RAw, i4_8, CRBw, n8x4, xop_wb, x, tb20[0][0x0e]},/*PUW=101*/
{OP_ldc2l, 0xfd700000, "ldc2l", CRBw, RAw, MN8Xw, i4_8, n8x4, xop_wb, x, tb20[0][0x0f]},/*PUW=101*/
{OP_stc2, 0xfd800000, "stc2", MP8Xw, xx, i4_8, CRBw, i8x4, no, x, tb20[1][0x00]},/*PUW=110*/
{OP_ldc2, 0xfd900000, "ldc2", CRBw, xx, MP8Xw, i4_8, i8x4, no, x, tb20[1][0x01]},/*PUW=110*/
{OP_stc2, 0xfda00000, "stc2", MP8Xw, RAw, i4_8, CRBw, i8x4, xop_wb, x, tb20[1][0x02]},/*PUW=111*/
{OP_ldc2, 0xfdb00000, "ldc2", CRBw, RAw, MP8Xw, i4_8, i8x4, xop_wb, x, tb20[1][0x03]},/*PUW=111*/
{OP_stc2l, 0xfdc00000, "stc2l", MP8Xw, xx, i4_8, CRBw, i8x4, no, x, tb20[1][0x04]},/*PUW=110*/
{OP_ldc2l, 0xfdd00000, "ldc2l", CRBw, xx, MP8Xw, i4_8, i8x4, no, x, tb20[1][0x05]},/*PUW=110*/
{OP_stc2l, 0xfde00000, "stc2l", MP8Xw, RAw, i4_8, CRBw, i8x4, xop_wb, x, tb20[1][0x06]},/*PUW=111*/
{OP_ldc2l, 0xfdf00000, "ldc2l", CRBw, RAw, MP8Xw, i4_8, i8x4, xop_wb, x, tb20[1][0x07]},/*PUW=111*/
},
};
/* Indexed by whether imm4 in 20:16 is zero or not */
const instr_info_t A32_ext_imm2016[][2] = {
{ /* 0 */
{OP_vmovl_s32, 0xf2a00a10, "vmovl.s32", VBdq, xx, VCq, xx, xx, no, x, END_LIST},
{OP_vshll_s32, 0xf2a00a10, "vshll.s32", VBdq, xx, VCq, i5_16, xx, no, x, END_LIST},/*20:16 cannot be 0*/
}, { /* 1 */
{OP_vmovl_u32, 0xf3a00a10, "vmovl.u32", VBdq, xx, VCq, xx, xx, no, x, END_LIST},
{OP_vshll_u32, 0xf3a00a10, "vshll.u32", VBdq, xx, VCq, i5_16, xx, no, x, END_LIST},/*20:16 cannot be 0*/
},
};
/* Indexed by whether imm4 in 18:16 is zero or not */
const instr_info_t A32_ext_imm1816[][2] = {
{ /* 0 */
{OP_vmovl_s8, 0xf2880a10, "vmovl.s8", VBdq, xx, VCq, xx, xx, no, x, END_LIST},
{OP_vshll_s8, 0xf2880a10, "vshll.s8", VBdq, xx, VCq, i3_16, xx, no, x, END_LIST},/*18:16 cannot be 0*/
}, { /* 1 */
{OP_vmovl_u8, 0xf3880a10, "vmovl.u8", VBdq, xx, VCq, xx, xx, no, x, END_LIST},
{OP_vshll_u8, 0xf3880a10, "vshll.u8", VBdq, xx, VCq, i3_16, xx, no, x, END_LIST},/*18:16 cannot be 0*/
},
};
/* Indexed by bit 6 */
const instr_info_t A32_ext_bit6[][2] = {
{ /* 0 */
{OP_vext, 0xf2b00000, "vext.8", VBq, xx, VAq, VCq, i4_8, no, x, tb6[0][0x01]},/*XXX: reads from part of srcs, but complex which part*/
{OP_vext, 0xf2b00040, "vext.8", VBdq, xx, VAdq, VCdq, i4_8, no, x, END_LIST},/*XXX: reads from part of srcs, but complex which part*/
}, { /* 1 */
{OP_vmaxnm_f32, 0xfe800a00, "vmaxnm.f32", WBd, xx, WAd, WCd, xx, v8|vfp, x, END_LIST},
{OP_vminnm_f32, 0xfe800a40, "vminnm.f32", WBd, xx, WAd, WCd, xx, v8|vfp, x, END_LIST},
}, { /* 2 */
{OP_vmaxnm_f64, 0xfe800b00, "vmaxnm.f64", VBq, xx, VAq, VCq, xx, v8|vfp, x, END_LIST},
{OP_vminnm_f64, 0xfe800b40, "vminnm.f64", VBq, xx, VAq, VCq, xx, v8|vfp, x, END_LIST},
},
};
/* Indexed by bit 7 */
const instr_info_t A32_ext_bit7[][2] = {
{ /* 0 */
{EXT_BIT19, 0xf2800010, "(ext bit19 0)", xx, xx, xx, xx, xx, no, x, 0},
{EXT_IMM6L, 0xf2800090, "(ext imm6L 0)", xx, xx, xx, xx, xx, no, x, 0},
}, { /* 1 */
{EXT_SIMD6, 0xf2900010, "(ext simd6 4)", xx, xx, xx, xx, xx, no, x, 4},
{EXT_IMM6L, 0xf2900090, "(ext imm6L 0)", xx, xx, xx, xx, xx, no, x, 0},
}, { /* 2 */
/* The .*32 versions of the high-immed instrs can be 0xf2a or 0xf3b so we
* point at the same simd6[5], w/ bit4=1 ensuring we skip the right entries
* that we should not hit if we went there w/o checking bit4 first.
*/
{EXT_SIMD6, 0xf2a00010, "(ext simd6 5)", xx, xx, xx, xx, xx, no, x, 5},
{EXT_IMM6L, 0xf2a00090, "(ext imm6L 0)", xx, xx, xx, xx, xx, no, x, 0},
}, { /* 3 */
{EXT_BIT19, 0xf3800010, "(ext bit19 1)", xx, xx, xx, xx, xx, no, x, 1},
{EXT_IMM6L, 0xf3800090, "(ext imm6L 1)", xx, xx, xx, xx, xx, no, x, 1},
}, { /* 4 */
{EXT_SIMD6, 0xf3900010, "(ext simd6 10)", xx, xx, xx, xx, xx, no, x, 10},
{EXT_IMM6L, 0xf3900090, "(ext imm6L 1)", xx, xx, xx, xx, xx, no, x, 1},
}, { /* 5 */
/* Similarly, we need to share 0xffa with 0xffb when bit4 is set. */
{EXT_SIMD6, 0xf3a00010, "(ext simd6 11)", xx, xx, xx, xx, xx, no, x, 11},
{EXT_IMM6L, 0xf3a00090, "(ext imm6L 1)", xx, xx, xx, xx, xx, no, x, 1},
},
};
/* Indexed by bit 19. This up-front split is simpler than having to split
* 37+ entries inside A32_ext_simd5[] into 2-entry members of this table.
*/
const instr_info_t A32_ext_bit19[][2] = {
{ /* 0 */
{EXT_SIMD8, 0xf2800000, "(ext simd8 0)", xx, xx, xx, xx, xx, no, x, 0},
{EXT_SIMD5, 0xf2880000, "(ext simd5 0)", xx, xx, xx, xx, xx, no, x, 0},
}, { /* 1 */
{EXT_SIMD8, 0xf3800000, "(ext simd8 1)", xx, xx, xx, xx, xx, no, x, 1},
{EXT_SIMD5, 0xf3880000, "(ext simd5 1)", xx, xx, xx, xx, xx, no, x, 1},
},
};
/* Indexed by bit 22 */
const instr_info_t A32_ext_bit22[][2] = {
{ /* 0 */
{OP_pldw, 0xf510f000, "pldw", xx, xx, MN12z, xx, xx, no, x, tb22[3][0x00]},/*PUW=100*/
{OP_pld, 0xf550f000, "pld", xx, xx, MN12z, xx, xx, no, x, tb22[3][0x01]},/*PUW=100*/
}, { /* 1 */
{OP_pldw, 0xf590f000, "pldw", xx, xx, MP12z, xx, xx, no, x, tb22[0][0x00]},/*PUW=110*/
{OP_pld, 0xf5d0f000, "pld", xx, xx, MP12z, xx, xx, no, x, tb22[0][0x01]},/*PUW=110*/
}, { /* 2 */
{OP_pldw, 0xf710f000, "pldw", xx, xx, MNSz, xx, xx, no, x, END_LIST},/*PUW=100*/
{OP_pld, 0xf750f000, "pld", xx, xx, MNSz, xx, xx, no, x, END_LIST},/*PUW=100*/
}, { /* 3 */
{OP_pldw, 0xf790f000, "pldw", xx, xx, MPSz, xx, xx, no, x, tb22[2][0x00]},/*PUW=110*/
{OP_pld, 0xf7d0f000, "pld", xx, xx, MPSz, xx, xx, no, x, tb22[2][0x01]},/*PUW=110*/
},
};
/* Indexed by 6 bits 11:8,6,4 (thus: a-f | 0,1,4,5) */
const instr_info_t A32_ext_simd6[][64] = {
{ /* 0 */
{OP_vhadd_s8, 0xf2000000, "vhadd.s8", VBq, xx, VAq, VCq, xx, no, x, tsi6[0][0x02]},
{OP_vqadd_s8, 0xf2000010, "vqadd.s8", VBq, xx, VAq, VCq, xx, no, x, tsi6[0][0x03]},
{OP_vhadd_s8, 0xf2000040, "vhadd.s8", VBdq, xx, VAdq, VCdq, xx, no, x, END_LIST},
{OP_vqadd_s8, 0xf2000050, "vqadd.s8", VBdq, xx, VAdq, VCdq, xx, no, x, END_LIST},
{OP_vrhadd_s8, 0xf2000100, "vrhadd.s8", VBq, xx, VAq, VCq, xx, no, x, tsi6[0][0x06]},
{OP_vand, 0xf2000110, "vand", VBq, xx, VAq, VCq, xx, no, x, tsi6[0][0x07]},
{OP_vrhadd_s8, 0xf2000140, "vrhadd.s8", VBdq, xx, VAdq, VCdq, xx, no, x, END_LIST},
{OP_vand, 0xf2000150, "vand", VBdq, xx, VAdq, VCdq, xx, no, x, END_LIST},
{OP_vhsub_s8, 0xf2000200, "vhsub.s8", VBq, xx, VAq, VCq, xx, no, x, tsi6[0][0x0a]},
{OP_vqsub_s8, 0xf2000210, "vqsub.s8", VBq, xx, VAq, VCq, xx, no, x, tsi6[0][0x0b]},
{OP_vhsub_s8, 0xf2000240, "vhsub.s8", VBdq, xx, VAdq, VCdq, xx, no, x, END_LIST},
{OP_vqsub_s8, 0xf2000250, "vqsub.s8", VBdq, xx, VAdq, VCdq, xx, no, x, END_LIST},
{OP_vcgt_s8, 0xf2000300, "vcgt.s8", VBq, xx, VAq, VCq, xx, no, x, tsi6b[0][0x01]},
{OP_vcge_s8, 0xf2000310, "vcge.s8", VBq, xx, VAq, VCq, xx, no, x, tsi6b[0][0x03]},
{OP_vcgt_s8, 0xf2000340, "vcgt.s8", VBdq, xx, VAdq, VCdq, xx, no, x, END_LIST},
{OP_vcge_s8, 0xf2000350, "vcge.s8", VBdq, xx, VAdq, VCdq, xx, no, x, END_LIST},
{OP_vshl_s8, 0xf2000400, "vshl.s8", VBq, xx, VAq, VCq, xx, no, x, tsi6[0][0x12]},
{OP_vqshl_s8, 0xf2000410, "vqshl.s8", VBq, xx, VAq, VCq, xx, no, x, tsi5[0][0x0f]},
{OP_vshl_s8, 0xf2000440, "vshl.s8", VBdq, xx, VAdq, VCdq, xx, no, x, END_LIST},
{OP_vqshl_s8, 0xf2000450, "vqshl.s8", VBdq, xx, VAdq, VCdq, xx, no, x, END_LIST},
{OP_vrshl_s8, 0xf2000500, "vrshl.s8", VBq, xx, VAq, VCq, xx, no, x, tsi6[0][0x16]},
{OP_vqrshl_s8, 0xf2000510, "vqrshl.s8", VBq, xx, VAq, VCq, xx, no, x, tsi6[0][0x17]},
{OP_vrshl_s8, 0xf2000540, "vrshl.s8", VBdq, xx, VAdq, VCdq, xx, no, x, END_LIST},
{OP_vqrshl_s8, 0xf2000550, "vqrshl.s8", VBdq, xx, VAdq, VCdq, xx, no, x, END_LIST},
{OP_vmax_s8, 0xf2000600, "vmax.s8", VBq, xx, VAq, VCq, xx, no, x, tsi6[0][0x1a]},
{OP_vmin_s8, 0xf2000610, "vmin.s8", VBq, xx, VAq, VCq, xx, no, x, tsi6[0][0x1b]},
{OP_vmax_s8, 0xf2000640, "vmax.s8", VBdq, xx, VAdq, VCdq, xx, no, x, END_LIST},
{OP_vmin_s8, 0xf2000650, "vmin.s8", VBdq, xx, VAdq, VCdq, xx, no, x, END_LIST},
{OP_vabd_s8, 0xf2000700, "vabd.s8", VBq, xx, VAq, VCq, xx, no, x, tsi6[0][0x1e]},
{OP_vaba_s8, 0xf2000710, "vaba.s8", VBq, xx, VAq, VCq, xx, no, x, tsi6[0][0x1f]},
{OP_vabd_s8, 0xf2000740, "vabd.s8", VBdq, xx, VAdq, VCdq, xx, no, x, END_LIST},
{OP_vaba_s8, 0xf2000750, "vaba.s8", VBdq, xx, VAdq, VCdq, xx, no, x, END_LIST},
/* 0x80 */
{OP_vadd_i8, 0xf2000800, "vadd.i8", VBq, xx, VAq, VCq, xx, no, x, tsi6[0][0x22]},
{OP_vtst_8, 0xf2000810, "vtst.8", VBq, xx, VAq, VCq, xx, no, x, tsi6[0][0x23]},
{OP_vadd_i8, 0xf2000840, "vadd.i8", VBdq, xx, VAdq, VCdq, xx, no, x, END_LIST},
{OP_vtst_8, 0xf2000850, "vtst.8", VBdq, xx, VAdq, VCdq, xx, no, x, END_LIST},
{OP_vmla_i8, 0xf2000900, "vmla.i8", VBq, xx, VAq, VCq, xx, no, x, tsi6[0][0x26]},
{OP_vmul_i8, 0xf2000910, "vmul.i8", VBq, xx, VAq, VCq, xx, no, x, tsi6[0][0x27]},
{OP_vmla_i8, 0xf2000940, "vmla.i8", VBdq, xx, VAdq, VCdq, xx, no, x, END_LIST},
{OP_vmul_i8, 0xf2000950, "vmul.i8", VBdq, xx, VAdq, VCdq, xx, no, x, END_LIST},
{OP_vpmax_s8, 0xf2000a00, "vpmax.s8", VBq, xx, VAq, VCq, xx, no, x, END_LIST},
{OP_vpmin_s8, 0xf2000a10, "vpmin.s8", VBq, xx, VAq, VCq, xx, no, x, END_LIST},
{INVALID, 0xf2000a40, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf2000a50, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf2000b00, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vpadd_i8, 0xf2000b10, "vpadd.i8", VBq, xx, VAq, VCq, xx, no, x, END_LIST},
{INVALID, 0xf2000b40, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf2000b50, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf2000c00, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vfma_f32, 0xf2000c10, "vfma.f32", VBq, xx, VAq, VCq, xx, no, x, tsi6[0][0x33]},
{OP_sha1c_32, 0xf2000c40, "sha1c.32", VBdq, xx, VAdq, VCdq, xx, v8, x, END_LIST},
{OP_vfma_f32, 0xf2000c50, "vfma.f32", VBdq, xx, VAdq, VCdq, xx, no, x, tfpA[10][0x00]},
{OP_vadd_f32, 0xf2000d00, "vadd.f32", VBq, xx, VAq, VCq, xx, no, x, tsi6[0][0x36]},
{OP_vmla_f32, 0xf2000d10, "vmla.f32", VBq, xx, VAq, VCq, xx, v8, x, tsi6[0][0x37]},
{OP_vadd_f32, 0xf2000d40, "vadd.f32", VBdq, xx, VAdq, VCdq, xx, no, x, tfpA[3][0x00]},
{OP_vmla_f32, 0xf2000d50, "vmla.f32", VBdq, xx, VAdq, VCdq, xx, v8, x, tfpA[0][0x00]},
{OP_vceq_f32, 0xf2000e00, "vceq.f32", VBq, xx, VAq, VCq, xx, no, x, tsi6b[6][0x15]},
{INVALID, 0xf2000e10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vceq_f32, 0xf2000e40, "vceq.f32", VBdq, xx, VAdq, VCdq, xx, no, x, END_LIST},
{INVALID, 0xf2000e50, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vmax_f32, 0xf2000f00, "vmax.f32", VBq, xx, VAq, VCq, xx, no, x, tsi6[0][0x3e]},
{OP_vrecps_f32, 0xf2000f10, "vrecps.f32", VBq, xx, VAq, VCq, xx, no, x, tsi6[0][0x3f]},
{OP_vmax_f32, 0xf2000f40, "vmax.f32", VBdq, xx, VAdq, VCdq, xx, no, x, END_LIST},
{OP_vrecps_f32, 0xf2000f50, "vrecps.f32", VBdq, xx, VAdq, VCdq, xx, no, x, END_LIST},
}, { /* 1 */
{OP_vhadd_s16, 0xf2100000, "vhadd.s16", VBq, xx, VAq, VCq, xx, no, x, tsi6[1][0x02]},
{OP_vqadd_s16, 0xf2100010, "vqadd.s16", VBq, xx, VAq, VCq, xx, no, x, tsi6[1][0x03]},
{OP_vhadd_s16, 0xf2100040, "vhadd.s16", VBdq, xx, VAdq, VCdq, xx, no, x, END_LIST},
{OP_vqadd_s16, 0xf2100050, "vqadd.s16", VBdq, xx, VAdq, VCdq, xx, no, x, END_LIST},
{OP_vrhadd_s16, 0xf2100100, "vrhadd.s16", VBq, xx, VAq, VCq, xx, no, x, tsi6[1][0x06]},
{OP_vbic, 0xf2100110, "vbic", VBq, xx, VAq, VCq, xx, no, x, tsi6[1][0x07]},
{OP_vrhadd_s16, 0xf2100140, "vrhadd.s16", VBdq, xx, VAdq, VCdq, xx, no, x, END_LIST},
{OP_vbic, 0xf2100150, "vbic", VBdq, xx, VAdq, VCdq, xx, no, x, END_LIST},
{OP_vhsub_s16, 0xf2100200, "vhsub.s16", VBq, xx, VAq, VCq, xx, no, x, tsi6[1][0x0a]},
{OP_vqsub_s16, 0xf2100210, "vqsub.s16", VBq, xx, VAq, VCq, xx, no, x, tsi6[1][0x0b]},
{OP_vhsub_s16, 0xf2100240, "vhsub.s16", VBdq, xx, VAdq, VCdq, xx, no, x, END_LIST},
{OP_vqsub_s16, 0xf2100250, "vqsub.s16", VBdq, xx, VAdq, VCdq, xx, no, x, END_LIST},
{OP_vcgt_s16, 0xf2100300, "vcgt.s16", VBq, xx, VAq, VCq, xx, no, x, tsi6b[3][0x01]},
{OP_vcge_s16, 0xf2100310, "vcge.s16", VBq, xx, VAq, VCq, xx, no, x, tsi6b[3][0x03]},
{OP_vcgt_s16, 0xf2100340, "vcgt.s16", VBdq, xx, VAdq, VCdq, xx, no, x, END_LIST},
{OP_vcge_s16, 0xf2100350, "vcge.s16", VBdq, xx, VAdq, VCdq, xx, no, x, END_LIST},
{OP_vshl_s16, 0xf2100400, "vshl.s16", VBq, xx, VAq, VCq, xx, no, x, tsi6[1][0x12]},
{OP_vqshl_s16, 0xf2100410, "vqshl.s16", VBq, xx, VAq, VCq, xx, no, x, tsi6[4][0x1f]},
{OP_vshl_s16, 0xf2100440, "vshl.s16", VBdq, xx, VAdq, VCdq, xx, no, x, END_LIST},
{OP_vqshl_s16, 0xf2100450, "vqshl.s16", VBdq, xx, VAdq, VCdq, xx, no, x, END_LIST},
{OP_vrshl_s16, 0xf2100500, "vrshl.s16", VBq, xx, VAq, VCq, xx, no, x, tsi6[1][0x16]},
{OP_vqrshl_s16, 0xf2100510, "vqrshl.s16", VBq, xx, VAq, VCq, xx, no, x, tsi6[1][0x17]},
{OP_vrshl_s16, 0xf2100540, "vrshl.s16", VBdq, xx, VAdq, VCdq, xx, no, x, END_LIST},
{OP_vqrshl_s16, 0xf2100550, "vqrshl.s16", VBdq, xx, VAdq, VCdq, xx, no, x, END_LIST},
{OP_vmax_s16, 0xf2100600, "vmax.s16", VBq, xx, VAq, VCq, xx, no, x, tsi6[1][0x1a]},
{OP_vmin_s16, 0xf2100610, "vmin.s16", VBq, xx, VAq, VCq, xx, no, x, tsi6[1][0x1b]},
{OP_vmax_s16, 0xf2100640, "vmax.s16", VBdq, xx, VAdq, VCdq, xx, no, x, END_LIST},
{OP_vmin_s16, 0xf2100650, "vmin.s16", VBdq, xx, VAdq, VCdq, xx, no, x, END_LIST},
{OP_vabd_s16, 0xf2100700, "vabd.s16", VBq, xx, VAq, VCq, xx, no, x, tsi6[1][0x1e]},
{OP_vaba_s16, 0xf2100710, "vaba.s16", VBq, xx, VAq, VCq, xx, no, x, tsi6[1][0x1f]},
{OP_vabd_s16, 0xf2100740, "vabd.s16", VBdq, xx, VAdq, VCdq, xx, no, x, END_LIST},
{OP_vaba_s16, 0xf2100750, "vaba.s16", VBdq, xx, VAdq, VCdq, xx, no, x, END_LIST},
/* 0x80 */
{OP_vadd_i16, 0xf2100800, "vadd.i16", VBq, xx, VAq, VCq, xx, no, x, tsi6[1][0x22]},
{OP_vtst_16, 0xf2100810, "vtst.16", VBq, xx, VAq, VCq, xx, no, x, tsi6[1][0x23]},
{OP_vadd_i16, 0xf2100840, "vadd.i16", VBdq, xx, VAdq, VCdq, xx, no, x, END_LIST},
{OP_vtst_16, 0xf2100850, "vtst.16", VBdq, xx, VAdq, VCdq, xx, no, x, END_LIST},
{OP_vmla_i16, 0xf2100900, "vmla.i16", VBq, xx, VAq, VCq, xx, no, x, tsi6[10][0x02]},
{OP_vmul_i16, 0xf2100910, "vmul.i16", VBq, xx, VAq, VCq, xx, no, x, tsi6[10][0x22]},
{OP_vmla_i16, 0xf2100940, "vmla.i16", VBdq, xx, VAdq, VCdq, xx, no, x, END_LIST},
{OP_vmul_i16, 0xf2100950, "vmul.i16", VBdq, xx, VAdq, VCdq, xx, no, x, END_LIST},
{OP_vpmax_s16, 0xf2100a00, "vpmax.s16", VBq, xx, VAq, VCq, xx, no, x, END_LIST},
{OP_vpmin_s16, 0xf2100a10, "vpmin.s16", VBq, xx, VAq, VCq, xx, no, x, END_LIST},
{INVALID, 0xf2100a40, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf2100a50, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vqdmulh_s16, 0xf2100b00, "vqdmulh.s16", VBq, xx, VAq, VCq, xx, no, x, tsi6[10][0x32]},
{OP_vpadd_i16, 0xf2100b10, "vpadd.i16", VBq, xx, VAq, VCq, xx, no, x, END_LIST},
{OP_vqdmulh_s16, 0xf2100b40, "vqdmulh.s16", VBdq, xx, VAdq, VCdq, xx, no, x, END_LIST},
{INVALID, 0xf2100b50, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf2100c00, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf2100c10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_sha1p_32, 0xf2100c40, "sha1p.32", VBdq, xx, VAdq, VCdq, xx, v8, x, END_LIST},
{INVALID, 0xf2100c50, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf2100d00, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf2100d10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf2100d40, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf2100d50, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf2100e00, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf2100e10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf2100e40, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf2100e50, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf2100f00, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf2100f10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf2100f40, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf2100f50, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* 2 */
{OP_vhadd_s32, 0xf2200000, "vhadd.s32", VBq, xx, VAq, VCq, xx, no, x, tsi6[2][0x02]},
{OP_vqadd_s32, 0xf2200010, "vqadd.s32", VBq, xx, VAq, VCq, xx, no, x, tsi6[2][0x03]},
{OP_vhadd_s32, 0xf2200040, "vhadd.s32", VBdq, xx, VAdq, VCdq, xx, no, x, END_LIST},
{OP_vqadd_s32, 0xf2200050, "vqadd.s32", VBdq, xx, VAdq, VCdq, xx, no, x, END_LIST},
{OP_vrhadd_s32, 0xf2200100, "vrhadd.s32", VBq, xx, VAq, VCq, xx, no, x, tsi6[2][0x06]},
{OP_vorr, 0xf2200110, "vorr", VBq, xx, VAq, VCq, xx, no, x, tsi6[2][0x07]},/*XXX: if src1==src2 then "vmov"*/
{OP_vrhadd_s32, 0xf2200140, "vrhadd.s32", VBdq, xx, VAdq, VCdq, xx, no, x, END_LIST},
{OP_vorr, 0xf2200150, "vorr", VBdq, xx, VAdq, VCdq, xx, no, x, END_LIST},/*XXX: if src1==src2 then "vmov"*/
{OP_vhsub_s32, 0xf2200200, "vhsub.s32", VBq, xx, VAq, VCq, xx, no, x, tsi6[2][0x0a]},
{OP_vqsub_s32, 0xf2200210, "vqsub.s32", VBq, xx, VAq, VCq, xx, no, x, tsi6[2][0x0b]},
{OP_vhsub_s32, 0xf2200240, "vhsub.s32", VBdq, xx, VAdq, VCdq, xx, no, x, END_LIST},
{OP_vqsub_s32, 0xf2200250, "vqsub.s32", VBdq, xx, VAdq, VCdq, xx, no, x, END_LIST},
{OP_vcgt_s32, 0xf2200300, "vcgt.s32", VBq, xx, VAq, VCq, xx, no, x, tsi6b[6][0x01]},
{OP_vcge_s32, 0xf2200310, "vcge.s32", VBq, xx, VAq, VCq, xx, no, x, tsi6b[6][0x03]},
{OP_vcgt_s32, 0xf2200340, "vcgt.s32", VBdq, xx, VAdq, VCdq, xx, no, x, END_LIST},
{OP_vcge_s32, 0xf2200350, "vcge.s32", VBdq, xx, VAdq, VCdq, xx, no, x, END_LIST},
{OP_vshl_s32, 0xf2200400, "vshl.s32", VBq, xx, VAq, VCq, xx, no, x, tsi6[2][0x12]},
{OP_vqshl_s32, 0xf2200410, "vqshl.s32", VBq, xx, VAq, VCq, xx, no, x, tsi6[5][0x1f]},
{OP_vshl_s32, 0xf2200440, "vshl.s32", VBdq, xx, VAdq, VCdq, xx, no, x, END_LIST},
{OP_vqshl_s32, 0xf2200450, "vqshl.s32", VBdq, xx, VAdq, VCdq, xx, no, x, END_LIST},
{OP_vrshl_s32, 0xf2200500, "vrshl.s32", VBq, xx, VAq, VCq, xx, no, x, tsi6[2][0x16]},
{OP_vqrshl_s32, 0xf2200510, "vqrshl.s32", VBq, xx, VAq, VCq, xx, no, x, tsi6[2][0x17]},
{OP_vrshl_s32, 0xf2200540, "vrshl.s32", VBdq, xx, VAdq, VCdq, xx, no, x, END_LIST},
{OP_vqrshl_s32, 0xf2200550, "vqrshl.s32", VBdq, xx, VAdq, VCdq, xx, no, x, END_LIST},
{OP_vmax_s32, 0xf2200600, "vmax.s32", VBq, xx, VAq, VCq, xx, no, x, tsi6[2][0x1a]},
{OP_vmin_s32, 0xf2200610, "vmin.s32", VBq, xx, VAq, VCq, xx, no, x, tsi6[2][0x1b]},
{OP_vmax_s32, 0xf2200640, "vmax.s32", VBdq, xx, VAdq, VCdq, xx, no, x, END_LIST},
{OP_vmin_s32, 0xf2200650, "vmin.s32", VBdq, xx, VAdq, VCdq, xx, no, x, END_LIST},
{OP_vabd_s32, 0xf2200700, "vabd.s32", VBq, xx, VAq, VCq, xx, no, x, tsi6[2][0x1e]},
{OP_vaba_s32, 0xf2200710, "vaba.s32", VBq, xx, VAq, VCq, xx, no, x, tsi6[2][0x1f]},
{OP_vabd_s32, 0xf2200740, "vabd.s32", VBdq, xx, VAdq, VCdq, xx, no, x, END_LIST},
{OP_vaba_s32, 0xf2200750, "vaba.s32", VBdq, xx, VAdq, VCdq, xx, no, x, END_LIST},
{OP_vadd_i32, 0xf2200800, "vadd.i32", VBq, xx, VAq, VCq, xx, no, x, tsi6[2][0x22]},
{OP_vtst_32, 0xf2200810, "vtst.32", VBq, xx, VAq, VCq, xx, no, x, tsi6[2][0x23]},
{OP_vadd_i32, 0xf2200840, "vadd.i32", VBdq, xx, VAdq, VCdq, xx, no, x, END_LIST},
{OP_vtst_32, 0xf2200850, "vtst.32", VBdq, xx, VAdq, VCdq, xx, no, x, END_LIST},
{OP_vmla_i32, 0xf2200900, "vmla.i32", VBq, xx, VAq, VCq, xx, no, x, tsi6[11][0x02]},
{OP_vmul_i32, 0xf2200910, "vmul.i32", VBq, xx, VAq, VCq, xx, no, x, tsi6[11][0x22]},
{OP_vmla_i32, 0xf2200940, "vmla.i32", VBdq, xx, VAdq, VCdq, xx, no, x, END_LIST},
{OP_vmul_i32, 0xf2200950, "vmul.i32", VBdq, xx, VAdq, VCdq, xx, no, x, END_LIST},
{OP_vpmax_s32, 0xf2200a00, "vpmax.s32", VBq, xx, VAq, VCq, xx, no, x, END_LIST},
{OP_vpmin_s32, 0xf2200a10, "vpmin.s32", VBq, xx, VAq, VCq, xx, no, x, END_LIST},
{INVALID, 0xf2200a40, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf2200a50, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vqdmulh_s32, 0xf2200b00, "vqdmulh.s32", VBq, xx, VAq, VCq, xx, no, x, tsi6[11][0x32]},
{OP_vpadd_i32, 0xf2200b10, "vpadd.i32", VBq, xx, VAq, VCq, xx, no, x, END_LIST},
{OP_vqdmulh_s32, 0xf2200b40, "vqdmulh.s32", VBdq, xx, VAdq, VCdq, xx, no, x, END_LIST},
{INVALID, 0xf2200b50, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf2200c00, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vfms_f32, 0xf2200c10, "vfms.f32", VBq, xx, VAq, VCq, xx, no, x, tsi6[2][0x33]},
{OP_sha1m_32, 0xf2200c40, "sha1m.32", VBdq, xx, VAdq, VCdq, xx, v8, x, END_LIST},
{OP_vfms_f32, 0xf2200c50, "vfms.f32", VBdq, xx, VAdq, VCdq, xx, no, x, tfpA[10][0x02]},
{OP_vsub_f32, 0xf2200d00, "vsub.f32", VBq, xx, VAq, VCq, xx, no, x, tsi6[2][0x36]},
{OP_vmls_f32, 0xf2200d10, "vmls.f32", VBq, xx, VAq, VCq, xx, v8, x, tsi6[2][0x37]},
{OP_vsub_f32, 0xf2200d40, "vsub.f32", VBdq, xx, VAdq, VCdq, xx, no, x, tfpA[3][0x02]},
{OP_vmls_f32, 0xf2200d50, "vmls.f32", VBdq, xx, VAdq, VCdq, xx, v8, x, tfpA[0][0x02]},
{INVALID, 0xf2200e00, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf2200e10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf2200e40, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf2200e50, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vmin_f32, 0xf2200f00, "vmin.f32", VBq, xx, VAq, VCq, xx, no, x, tsi6[2][0x3e]},
{OP_vrsqrts_f32, 0xf2200f10, "vrsqrts.f32", VBq, xx, VAq, VCq, xx, no, x, tsi6[2][0x3f]},
{OP_vmin_f32, 0xf2200f40, "vmin.f32", VBdq, xx, VAdq, VCdq, xx, no, x, END_LIST},
{OP_vrsqrts_f32, 0xf2200f50, "vrsqrts.f32", VBdq, xx, VAdq, VCdq, xx, no, x, END_LIST},
}, { /* 3 */
/* XXX: this entry is sparse: should we make a new table to somehow compress it? */
{INVALID, 0xf2300000, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vqadd_s64, 0xf2300010, "vqadd.s64", VBq, xx, VAq, VCq, xx, no, x, tsi6[3][0x03]},
{INVALID, 0xf2300040, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vqadd_s64, 0xf2300050, "vqadd.s64", VBdq, xx, VAdq, VCdq, xx, no, x, END_LIST},
{INVALID, 0xf2300100, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vorn, 0xf2300110, "vorn", VBq, xx, VAq, VCq, xx, no, x, tsi6[3][0x07]},
{INVALID, 0xf2300140, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vorn, 0xf2300150, "vorn", VBdq, xx, VAdq, VCdq, xx, no, x, END_LIST},
{INVALID, 0xf2300200, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vqsub_s64, 0xf2300210, "vqsub.s64", VBq, xx, VAq, VCq, xx, no, x, tsi6[3][0x0b]},
{INVALID, 0xf2300240, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vqsub_s64, 0xf2300250, "vqsub.s64", VBdq, xx, VAdq, VCdq, xx, no, x, END_LIST},
{INVALID, 0xf2300300, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf2300310, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf2300340, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf2300350, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vshl_s64, 0xf2300400, "vshl.s64", VBq, xx, VAq, VCq, xx, no, x, tsi6[3][0x12]},
{OP_vqshl_s64, 0xf2300410, "vqshl.s64", VBq, xx, VAq, VCq, xx, no, x, ti6l[0][0x0f]},
{OP_vshl_s64, 0xf2300440, "vshl.s64", VBdq, xx, VAdq, VCdq, xx, no, x, END_LIST},
{OP_vqshl_s64, 0xf2300450, "vqshl.s64", VBdq, xx, VAdq, VCdq, xx, no, x, END_LIST},
{OP_vrshl_s64, 0xf2300500, "vrshl.s64", VBq, xx, VAq, VCq, xx, no, x, tsi6[3][0x16]},
{OP_vqrshl_s64, 0xf2300510, "vqrshl.s64", VBq, xx, VAq, VCq, xx, no, x, tsi6[3][0x17]},
{OP_vrshl_s64, 0xf2300540, "vrshl.s64", VBdq, xx, VAdq, VCdq, xx, no, x, END_LIST},
{OP_vqrshl_s64, 0xf2300550, "vqrshl.s64", VBdq, xx, VAdq, VCdq, xx, no, x, END_LIST},
{INVALID, 0xf2300600, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf2300610, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf2300640, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf2300650, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf2300700, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf2300710, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf2300740, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf2300750, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
/* 0x80 */
{OP_vadd_i64, 0xf2300800, "vadd.i64", VBq, xx, VAq, VCq, xx, no, x, tsi6[3][0x22]},
{INVALID, 0xf2300810, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vadd_i64, 0xf2300840, "vadd.i64", VBdq, xx, VAdq, VCdq, xx, no, x, END_LIST},
{INVALID, 0xf2300850, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf2300900, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf2300910, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf2300940, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf2300950, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf2300a00, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf2300a10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf2300a40, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf2300a50, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf2300b00, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf2300b10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf2300b40, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf2300b50, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf2300c00, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf2300c10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_sha1su0_32, 0xf2300c40, "sha1su0.32", VBdq, xx, VAdq, VCdq, xx, v8, x, END_LIST},
{INVALID, 0xf2300c50, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf2300d00, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf2300d10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf2300d40, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf2300d50, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf2300e00, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf2300e10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf2300e40, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf2300e50, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf2300f00, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf2300f10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf2300f40, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf2300f50, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* 4 */
{OP_vaddl_s16, 0xf2900000, "vaddl.s16", VBdq, xx, VAq, VCq, xx, no, x, END_LIST},
{OP_vshr_s16, 0xf2900010, "vshr.s16", VBq, xx, VCq, i4_16, xx, no, x, tsi6[4][0x03]},/*XXX: imm = 16-imm*/
{OP_vmla_i16, 0xf2900040, "vmla.i16", VBq, xx, VAq, VC3h_q, i2x5_3, no, x, tsi6[1][0x24]},
{OP_vshr_s16, 0xf2900050, "vshr.s16", VBdq, xx, VCdq, i4_16, xx, no, x, END_LIST},/*XXX: imm = 16-imm*/
{OP_vaddw_s16, 0xf2900100, "vaddw.s16", VBdq, xx, VAdq, VCq, xx, no, x, END_LIST},
{OP_vsra_s16, 0xf2900110, "vsra.s16", VBq, xx, VCq, i4_16, xx, no, x, tsi6[4][0x07]},/*XXX: imm = 16-imm*/
{INVALID, 0xf2900140, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vsra_s16, 0xf2900150, "vsra.s16", VBdq, xx, VCdq, i4_16, xx, no, x, END_LIST},/*XXX: imm = 16-imm*/
{OP_vsubl_s16, 0xf2900200, "vsubl.s16", VBdq, xx, VAq, VCq, xx, no, x, END_LIST},
{OP_vrshr_s16, 0xf2900210, "vrshr.s16", VBq, xx, VCq, i4_16, xx, no, x, tsi6[4][0x0b]},/*XXX: imm = 16-imm*/
{OP_vmlal_s16, 0xf2900240, "vmlal.s16", VBdq, xx, VAq, VC3h_q, i2x5_3, no, x, END_LIST},
{OP_vrshr_s16, 0xf2900250, "vrshr.s16", VBdq, xx, VCdq, i4_16, xx, no, x, END_LIST},/*XXX: imm = 16-imm*/
{OP_vsubw_s16, 0xf2900300, "vsubw.s16", VBdq, xx, VAdq, VCq, xx, no, x, END_LIST},
{OP_vrsra_s16, 0xf2900310, "vrsra.s16", VBq, xx, VCq, i4_16, xx, no, x, tsi6[4][0x0f]},/*XXX: imm = 16-imm*/
{OP_vqdmlal_s16, 0xf2900340, "vqdmlal.s16", VBdq, xx, VAq, VC3h_q, i2x5_3, no, x, END_LIST},
{OP_vrsra_s16, 0xf2900350, "vrsra.s16", VBdq, xx, VCdq, i4_16, xx, no, x, END_LIST},/*XXX: imm = 16-imm*/
{OP_vaddhn_i32, 0xf2900400, "vaddhn.i32", VBq, xx, VAdq, VCdq, xx, no, x, END_LIST},
{INVALID, 0xf2900410, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vmls_i16, 0xf2900440, "vmls.i16", VBq, xx, VAq, VC3h_q, i2x5_3, no, x, tsi6[10][0x12]},
{INVALID, 0xf2900450, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vabal_s16, 0xf2900500, "vabal.s16", VBdq, xx, VAq, VCq, xx, no, x, END_LIST},
{OP_vshl_i16, 0xf2900510, "vshl.i16", VBq, xx, VCq, i4_16, xx, no, x, tsi6[4][0x17]},/*XXX: imm = 16-imm?*/
{INVALID, 0xf2900540, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vshl_i16, 0xf2900550, "vshl.i16", VBdq, xx, VCdq, i4_16, xx, no, x, END_LIST},/*XXX: imm = 16-imm?*/
{OP_vsubhn_i32, 0xf2900600, "vsubhn.i32", VBq, xx, VAdq, VCdq, xx, no, x, END_LIST},
{INVALID, 0xf2900610, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vmlsl_s16, 0xf2900640, "vmlsl.s16", VBdq, xx, VAq, VC3h_q, i2x5_3, no, x, END_LIST},
{INVALID, 0xf2900650, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vabdl_s16, 0xf2900700, "vabdl.s16", VBdq, xx, VAq, VCq, xx, no, x, END_LIST},
{OP_vqshl_s16, 0xf2900710, "vqshl.s16", VBq, xx, VCq, i4_16, xx, no, x, tsi6[1][0x11]},/*XXX: imm = imm-16*/
{OP_vqdmlsl_s16, 0xf2900740, "vqdmlsl.s16", VBdq, xx, VAq, VC3h_q, i2x5_3, no, x, END_LIST},
{OP_vqshl_s16, 0xf2900750, "vqshl.s16", VBdq, xx, VCdq, i4_16, xx, no, x, tsi6[1][0x13]},/*XXX: imm = imm-16*/
/* 0x80 */
{OP_vmlal_s16, 0xf2900800, "vmlal.s16", VBdq, xx, VAq, VCq, xx, no, x, tsi6[4][0x0a]},
{OP_vshrn_i32, 0xf2900810, "vshrn.i32", VBq, xx, VCdq, i4_16, xx, no, x, END_LIST},/*XXX: imm = 16-imm*/
{OP_vmul_i16, 0xf2900840, "vmul.i16", VBq, xx, VAq, VC3h_q, i2x5_3, no, x, tsi6[1][0x25]},
{OP_vrshrn_i32, 0xf2900850, "vrshrn.i32", VBq, xx, VCq, i4_16, xx, no, x, END_LIST},/*XXX: imm = 16-imm*/
{OP_vqdmlal_s16, 0xf2900900, "vqdmlal.s16", VBdq, xx, VAq, VCq, xx, no, x, tsi6[4][0x0e]},
{OP_vqshrn_s32, 0xf2900910, "vqshrn.s32", VBq, xx, VCdq, i4_16, xx, no, x, END_LIST},/*XXX: imm = 16-imm*/
{INVALID, 0xf2900940, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vqrshrn_s32, 0xf2900950, "vqrshrn.s32", VBq, xx, VCdq, i4_16, xx, no, x, END_LIST},/*XXX: imm = 16-imm*/
{OP_vmlsl_s16, 0xf2900a00, "vmlsl.s16", VBdq, xx, VAq, VCq, xx, no, x, tsi6[4][0x1a]},
{EXT_IMM1916, 0xf2900a10, "(ext imm1916 1)", xx, xx, xx, xx, xx, no, x, 1},
{OP_vmull_s16, 0xf2900a40, "vmull.s16", VBdq, xx, VAq, VC3h_q, i2x5_3, no, x, END_LIST},
{INVALID, 0xf2900a50, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vqdmlsl_s16, 0xf2900b00, "vqdmlsl.s16", VBdq, xx, VAq, VCq, xx, no, x, tsi6[4][0x1e]},
{INVALID, 0xf2900b10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vqdmull_s16, 0xf2900b40, "vqdmull.s16", VBdq, xx, VAq, VC3h_q, i2x5_3, no, x, END_LIST},
{INVALID, 0xf2900b50, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vmull_s16, 0xf2900c00, "vmull.s16", VBdq, xx, VAq, VCq, xx, no, x, tsi6[4][0x2a]},
{INVALID, 0xf2900c10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vqdmulh_s16, 0xf2900c40, "vqdmulh.s16", VBq, xx, VAq, VC3h_q, i2x5_3, no, x, tsi6[1][0x2c]},
{INVALID, 0xf2900c50, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vqdmull_s16, 0xf2900d00, "vqdmull.s16", VBdq, xx, VAq, VCq, xx, no, x, tsi6[4][0x2e]},
{INVALID, 0xf2900d10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vqrdmulh_s16, 0xf2900d40, "vqrdmulh.s16", VBq, xx, VAq, VC3h_q, i2x5_3, no, x, tsi6[10][0x36]},
{INVALID, 0xf2900d50, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf2900e00, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf2900e10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf2900e40, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf2900e50, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf2900f00, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf2900f10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf2900f40, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf2900f50, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* 5 */
{OP_vaddl_s32, 0xf2a00000, "vaddl.s32", VBdq, xx, VAq, VCq, xx, no, x, END_LIST},
{OP_vshr_s32, 0xf2a00010, "vshr.s32", VBq, xx, VCq, i5_16, xx, no, x, tsi6[5][0x03]},/*XXX: imm = 32-imm*/
{OP_vmla_i32, 0xf2a00040, "vmla.i32", VBq, xx, VAq, VC4d_q, i1_5, no, x, tsi6[2][0x24]},
{OP_vshr_s32, 0xf2a00050, "vshr.s32", VBdq, xx, VCdq, i5_16, xx, no, x, END_LIST},/*XXX: imm = 32-imm*/
{OP_vaddw_s32, 0xf2a00100, "vaddw.s32", VBdq, xx, VAdq, VCq, xx, no, x, END_LIST},
{OP_vsra_s32, 0xf2a00110, "vsra.s32", VBq, xx, VCq, i5_16, xx, no, x, tsi6[5][0x07]},/*XXX: imm = 32-imm*/
{OP_vmla_f32, 0xf2a00140, "vmla.f32", VBq, xx, VAq, VC4d_q, i1_5, no, x, tsi6[11][0x06]},
{OP_vsra_s32, 0xf2a00150, "vsra.s32", VBdq, xx, VCdq, i5_16, xx, no, x, END_LIST},/*XXX: imm = 32-imm*/
{OP_vsubl_s32, 0xf2a00200, "vsubl.s32", VBdq, xx, VAq, VCq, xx, no, x, END_LIST},
{OP_vrshr_s32, 0xf2a00210, "vrshr.s32", VBq, xx, VCq, i5_16, xx, no, x, tsi6[5][0x0b]},/*XXX: imm = 32-imm*/
{OP_vmlal_s32, 0xf2a00240, "vmlal.s32", VBdq, xx, VAq, VC4d_q, i1_5, no, x, END_LIST},
{OP_vrshr_s32, 0xf2a00250, "vrshr.s32", VBdq, xx, VCdq, i5_16, xx, no, x, END_LIST},/*XXX: imm = 32-imm*/
{OP_vsubw_s32, 0xf2a00300, "vsubw.s32", VBdq, xx, VAdq, VCq, xx, no, x, END_LIST},
{OP_vrsra_s32, 0xf2a00310, "vrsra.s32", VBq, xx, VCq, i5_16, xx, no, x, tsi6[5][0x0f]},/*XXX: imm = 32-imm*/
{OP_vqdmlal_s32, 0xf2a00340, "vqdmlal.s32", VBdq, xx, VAq, VC4d_q, i1_5, no, x, END_LIST},
{OP_vrsra_s32, 0xf2a00350, "vrsra.s32", VBdq, xx, VCdq, i5_16, xx, no, x, END_LIST},/*XXX: imm = 32-imm*/
{OP_vaddhn_i64, 0xf2a00400, "vaddhn.i64", VBq, xx, VAdq, VCdq, xx, no, x, END_LIST},
{INVALID, 0xf2a00410, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vmls_i32, 0xf2a00440, "vmls.i32", VBq, xx, VAq, VC4d_q, i1_5, no, x, tsi6[11][0x12]},
{INVALID, 0xf2a00450, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vabal_s32, 0xf2a00500, "vabal.s32", VBdq, xx, VAq, VCq, xx, no, x, END_LIST},
{OP_vshl_i32, 0xf2a00510, "vshl.i32", VBq, xx, VCq, i5_16, xx, no, x, tsi6[5][0x17]},/*XXX: imm = 32-imm?*/
{OP_vmls_f32, 0xf2a00540, "vmls.f32", VBq, xx, VAq, VC4d_q, i1_5, no, x, tsi6[11][0x16]},
{OP_vshl_i32, 0xf2a00550, "vshl.i32", VBdq, xx, VCdq, i5_16, xx, no, x, END_LIST},/*XXX: imm = 32-imm?*/
{OP_vsubhn_i64, 0xf2a00600, "vsubhn.i64", VBq, xx, VAdq, VCdq, xx, no, x, END_LIST},
{INVALID, 0xf2a00610, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vmlsl_s32, 0xf2a00640, "vmlsl.s32", VBdq, xx, VAq, VC4d_q, i1_5, no, x, END_LIST},
{INVALID, 0xf2a00650, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vabdl_s32, 0xf2a00700, "vabdl.s32", VBdq, xx, VAq, VCq, xx, no, x, END_LIST},
{OP_vqshl_s32, 0xf2a00710, "vqshl.s32", VBq, xx, VCq, i5_16, xx, no, x, tsi6[2][0x11]},/*XXX: imm = imm-32*/
{OP_vqdmlsl_s32, 0xf2a00740, "vqdmlsl.s32", VBdq, xx, VAq, VC4d_q, i1_5, no, x, END_LIST},
{OP_vqshl_s32, 0xf2a00750, "vqshl.s32", VBdq, xx, VCdq, i5_16, xx, no, x, tsi6[2][0x13]},/*XXX: imm = imm-32*/
/* 0x80 */
{OP_vmlal_s32, 0xf2a00800, "vmlal.s32", VBdq, xx, VAq, VCq, xx, no, x, tsi6[5][0x0a]},
{OP_vshrn_i64, 0xf2a00810, "vshrn.i64", VBq, xx, VCdq, i5_16, xx, no, x, END_LIST},/*XXX: imm = 32-imm*/
{OP_vmul_i32, 0xf2a00840, "vmul.i32", VBq, xx, VAq, VC4d_q, i1_5, no, x, tsi6[2][0x25]},
{OP_vrshrn_i64, 0xf2a00850, "vrshrn.i64", VBq, xx, VCq, i5_16, xx, no, x, END_LIST},/*XXX: imm = 32-imm*/
{OP_vqdmlal_s32, 0xf2a00900, "vqdmlal.s32", VBdq, xx, VAq, VCq, xx, no, x, tsi6[5][0x0e]},
{OP_vqshrn_s64, 0xf2a00910, "vqshrn.s64", VBq, xx, VCdq, i5_16, xx, no, x, END_LIST},/*XXX: imm = 32-imm*/
{OP_vmul_f32, 0xf2a00940, "vmul.f32", VBq, xx, VAq, VC4d_q, i1_5, no, x, tsi6[11][0x26]},
{OP_vqrshrn_s64, 0xf2a00950, "vqrshrn.s64", VBq, xx, VCdq, i5_16, xx, no, x, END_LIST},/*XXX: imm = 32-imm*/
{OP_vmlsl_s32, 0xf2a00a00, "vmlsl.s32", VBdq, xx, VAq, VCq, xx, no, x, tsi6[5][0x1a]},
{EXT_IMM2016, 0xf2a00a10, "(ext imm2016 0)", xx, xx, xx, xx, xx, no, x, 0},
{OP_vmull_s32, 0xf2a00a40, "vmull.s32", VBdq, xx, VAq, VC4d_q, i1_5, no, x, END_LIST},
{INVALID, 0xf2a00a50, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vqdmlsl_s32, 0xf2a00b00, "vqdmlsl.s32", VBdq, xx, VAq, VCq, xx, no, x, tsi6[5][0x1e]},
{INVALID, 0xf2a00b10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vqdmull_s32, 0xf2a00b40, "vqdmull.s32", VBdq, xx, VAq, VC4d_q, i1_5, no, x, END_LIST},
{INVALID, 0xf2a00b50, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vmull_s32, 0xf2a00c00, "vmull.s32", VBdq, xx, VAq, VCq, xx, no, x, tsi6[5][0x2a]},
{INVALID, 0xf2a00c10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vqdmulh_s32, 0xf2a00c40, "vqdmulh.s32", VBq, xx, VAq, VC4d_q, i1_5, no, x, tsi6[2][0x2c]},
{INVALID, 0xf2a00c50, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vqdmull_s32, 0xf2a00d00, "vqdmull.s32", VBdq, xx, VAq, VCq, xx, no, x, tsi6[5][0x2e]},
{INVALID, 0xf2a00d10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vqrdmulh_s32, 0xf2a00d40, "vqrdmulh.s32", VBq, xx, VAq, VC4d_q, i1_5, no, x, tsi6[11][0x36]},
{INVALID, 0xf2a00d50, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vmull_p32, 0xf2a00e00, "vmull.p32", VBdq, xx, VAq, VCq, xx, no, x, END_LIST},
{OP_vcvt_f32_s32, 0xf2a00e10, "vcvt.f32.s32", VBq, xx, VCq, i6_16, xx, no, x, tsi6b[8][0x19]},
{INVALID, 0xf2a00e40, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vcvt_f32_s32, 0xf2a00e50, "vcvt.f32.s32", VBdq, xx, VCdq, i6_16, xx, no, x, t16[1][0x0a]},
{OP_vcvt_s32_f32, 0xf2a00f10, "vcvt.s32.f32", VBq, xx, VCq, i6_16, xx, no, x, tsi6b[8][0x1d]},
{INVALID, 0xf2a00f40, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vcvt_s32_f32, 0xf2a00f50, "vcvt.s32.f32", VBdq, xx, VCdq, i6_16, xx, no, x, t16[1][0x0e]},
}, { /* 6 */
{OP_vhadd_u8, 0xf3000000, "vhadd.u8", VBq, xx, VAq, VCq, xx, no, x, tsi6[6][0x02]},
{OP_vqadd_u8, 0xf3000010, "vqadd.u8", VBq, xx, VAq, VCq, xx, no, x, tsi6[6][0x03]},
{OP_vhadd_u8, 0xf3000040, "vhadd.u8", VBdq, xx, VAdq, VCdq, xx, no, x, END_LIST},
{OP_vqadd_u8, 0xf3000050, "vqadd.u8", VBdq, xx, VAdq, VCdq, xx, no, x, END_LIST},
{OP_vrhadd_u8, 0xf3000100, "vrhadd.u8", VBq, xx, VAq, VCq, xx, no, x, tsi6[6][0x06]},
{OP_veor, 0xf3000110, "veor", VBq, xx, VAq, VCq, xx, no, x, tsi6[6][0x07]},
{OP_vrhadd_u8, 0xf3000140, "vrhadd.u8", VBdq, xx, VAdq, VCdq, xx, no, x, END_LIST},
{OP_veor, 0xf3000150, "veor", VBdq, xx, VAdq, VCdq, xx, no, x, END_LIST},
{OP_vhsub_u8, 0xf3000200, "vhsub.u8", VBq, xx, VAq, VCq, xx, no, x, tsi6[6][0x0a]},
{OP_vqsub_u8, 0xf3000210, "vqsub.u8", VBq, xx, VAq, VCq, xx, no, x, tsi6[6][0x0b]},
{OP_vhsub_u8, 0xf3000240, "vhsub.u8", VBdq, xx, VAdq, VCdq, xx, no, x, END_LIST},
{OP_vqsub_u8, 0xf3000250, "vqsub.u8", VBdq, xx, VAdq, VCdq, xx, no, x, END_LIST},
{OP_vcgt_u8, 0xf3000300, "vcgt.u8", VBq, xx, VAq, VCq, xx, no, x, tsi6[6][0x0e]},
{OP_vcge_u8, 0xf3000310, "vcge.u8", VBq, xx, VAq, VCq, xx, no, x, tsi6[6][0x0f]},
{OP_vcgt_u8, 0xf3000340, "vcgt.u8", VBdq, xx, VAdq, VCdq, xx, no, x, END_LIST},
{OP_vcge_u8, 0xf3000350, "vcge.u8", VBdq, xx, VAdq, VCdq, xx, no, x, END_LIST},
{OP_vshl_u8, 0xf3000400, "vshl.u8", VBq, xx, VAq, VCq, xx, no, x, tsi6[6][0x12]},
{OP_vqshl_u8, 0xf3000410, "vqshl.u8", VBq, xx, VAq, VCq, xx, no, x, tsi5[1][0x0f]},
{OP_vshl_u8, 0xf3000440, "vshl.u8", VBdq, xx, VAdq, VCdq, xx, no, x, END_LIST},
{OP_vqshl_u8, 0xf3000450, "vqshl.u8", VBdq, xx, VAdq, VCdq, xx, no, x, END_LIST},
{OP_vrshl_u8, 0xf3000500, "vrshl.u8", VBq, xx, VAq, VCq, xx, no, x, tsi6[6][0x16]},
{OP_vqrshl_u8, 0xf3000510, "vqrshl.u8", VBq, xx, VAq, VCq, xx, no, x, tsi6[6][0x17]},
{OP_vrshl_u8, 0xf3000540, "vrshl.u8", VBdq, xx, VAdq, VCdq, xx, no, x, END_LIST},
{OP_vqrshl_u8, 0xf3000550, "vqrshl.u8", VBdq, xx, VAdq, VCdq, xx, no, x, END_LIST},
{OP_vmax_u8, 0xf3000600, "vmax.u8", VBq, xx, VAq, VCq, xx, no, x, tsi6[6][0x1a]},
{OP_vmin_u8, 0xf3000610, "vmin.u8", VBq, xx, VAq, VCq, xx, no, x, tsi6[6][0x1b]},
{OP_vmax_u8, 0xf3000640, "vmax.u8", VBdq, xx, VAdq, VCdq, xx, no, x, END_LIST},
{OP_vmin_u8, 0xf3000650, "vmin.u8", VBdq, xx, VAdq, VCdq, xx, no, x, END_LIST},
{OP_vabd_u8, 0xf3000700, "vabd.u8", VBq, xx, VAq, VCq, xx, no, x, tsi6[6][0x1e]},
{OP_vaba_u8, 0xf3000710, "vaba.u8", VBq, xx, VAq, VCq, xx, no, x, tsi6[6][0x1f]},
{OP_vabd_u8, 0xf3000740, "vabd.u8", VBdq, xx, VAdq, VCdq, xx, no, x, END_LIST},
{OP_vaba_u8, 0xf3000750, "vaba.u8", VBdq, xx, VAdq, VCdq, xx, no, x, END_LIST},
/* 0x80 */
{OP_vsub_i8, 0xf3000800, "vsub.i8", VBq, xx, VAq, VCq, xx, no, x, tsi6[6][0x22]},
{OP_vceq_i8, 0xf3000810, "vceq.i8", VBq, xx, VAq, VCq, xx, no, x, tsi6b[0][0x05]},
{OP_vsub_i8, 0xf3000840, "vsub.i8", VBdq, xx, VAdq, VCdq, xx, no, x, END_LIST},
{OP_vceq_i8, 0xf3000850, "vceq.i8", VBdq, xx, VAdq, VCdq, xx, no, x, END_LIST},
{OP_vmls_i8, 0xf3000900, "vmls.i8", VBq, xx, VAq, VCq, xx, no, x, tsi6[6][0x26]},
{OP_vmul_p8, 0xf3000910, "vmul.p8", VBq, xx, VAq, VCq, xx, no, x, tsi6[6][0x27]},
{OP_vmls_i8, 0xf3000940, "vmls.i8", VBdq, xx, VAdq, VCdq, xx, no, x, END_LIST},
{OP_vmul_p8, 0xf3000950, "vmul.p8", VBdq, xx, VAdq, VCdq, xx, no, x, END_LIST},
{OP_vpmax_u8, 0xf3000a00, "vpmax.u8", VBq, xx, VAq, VCq, xx, no, x, END_LIST},
{OP_vpmin_u8, 0xf3000a10, "vpmin.u8", VBq, xx, VAq, VCq, xx, no, x, END_LIST},
{INVALID, 0xf3000a40, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3000a50, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3000b00, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3000b10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3000b40, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3000b50, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3000c00, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3000c10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_sha256h_32, 0xf3000c40, "sha256h.32", VBdq, xx, VAdq, VCdq, xx, v8, x, END_LIST},
{INVALID, 0xf3000c50, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vpadd_f32, 0xf3000d00, "vpadd.f32", VBq, xx, VAq, VCq, xx, no, x, END_LIST},
{OP_vmul_f32, 0xf3000d10, "vmul.f32", VBq, xx, VAq, VCq, xx, no, x, tsi6[5][0x26]},
{INVALID, 0xf3000d40, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vmul_f32, 0xf3000d50, "vmul.f32", VBdq, xx, VAdq, VCdq, xx, no, x, tfpA[2][0x00]},
{OP_vcge_f32, 0xf3000e00, "vcge.f32", VBq, xx, VAq, VCq, xx, no, x, tsi6b[6][0x13]},
{OP_vacge_f32, 0xf3000e10, "vacge.f32", VBq, xx, VAq, VCq, xx, no, x, tsi6[6][0x3b]},
{OP_vcge_f32, 0xf3000e40, "vcge.f32", VBdq, xx, VAdq, VCdq, xx, no, x, END_LIST},
{OP_vacge_f32, 0xf3000e50, "vacge.f32", VBdq, xx, VAdq, VCdq, xx, no, x, END_LIST},
{OP_vpmax_f32, 0xf3000f00, "vpmax.f32", VBq, xx, VAq, VCq, xx, no, x, END_LIST},
{OP_vmaxnm_f32, 0xf3000f10, "vmaxnm.f32", VBq, xx, VAq, VCq, xx, v8, x, tsi6[6][0x3f]},
{INVALID, 0xf3000f40, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vmaxnm_f32, 0xf3000f50, "vmaxnm.f32", VBdq, xx, VAdq, VCdq, xx, v8, x, tb6[1][0x00]},
}, { /* 7 */
{OP_vhadd_u16, 0xf3100000, "vhadd.u16", VBq, xx, VAq, VCq, xx, no, x, tsi6[7][0x02]},
{OP_vqadd_u16, 0xf3100010, "vqadd.u16", VBq, xx, VAq, VCq, xx, no, x, tsi6[7][0x03]},
{OP_vhadd_u16, 0xf3100040, "vhadd.u16", VBdq, xx, VAdq, VCdq, xx, no, x, END_LIST},
{OP_vqadd_u16, 0xf3100050, "vqadd.u16", VBdq, xx, VAdq, VCdq, xx, no, x, END_LIST},
{OP_vrhadd_u16, 0xf3100100, "vrhadd.u16", VBq, xx, VAq, VCq, xx, no, x, tsi6[7][0x06]},
{OP_vbsl, 0xf3100110, "vbsl", VBq, xx, VAq, VCq, xx, no, x, tsi6[7][0x07]},
{OP_vrhadd_u16, 0xf3100140, "vrhadd.u16", VBdq, xx, VAdq, VCdq, xx, no, x, END_LIST},
{OP_vbsl, 0xf3100150, "vbsl", VBdq, xx, VAdq, VCdq, xx, no, x, END_LIST},
{OP_vhsub_u16, 0xf3100200, "vhsub.u16", VBq, xx, VAq, VCq, xx, no, x, tsi6[7][0x0a]},
{OP_vqsub_u16, 0xf3100210, "vqsub.u16", VBq, xx, VAq, VCq, xx, no, x, tsi6[7][0x0b]},
{OP_vhsub_u16, 0xf3100240, "vhsub.u16", VBdq, xx, VAdq, VCdq, xx, no, x, END_LIST},
{OP_vqsub_u16, 0xf3100250, "vqsub.u16", VBdq, xx, VAdq, VCdq, xx, no, x, END_LIST},
{OP_vcgt_u16, 0xf3100300, "vcgt.u16", VBq, xx, VAq, VCq, xx, no, x, tsi6[7][0x0e]},
{OP_vcge_u16, 0xf3100310, "vcge.u16", VBq, xx, VAq, VCq, xx, no, x, tsi6[7][0x0f]},
{OP_vcgt_u16, 0xf3100340, "vcgt.u16", VBdq, xx, VAdq, VCdq, xx, no, x, END_LIST},
{OP_vcge_u16, 0xf3100350, "vcge.u16", VBdq, xx, VAdq, VCdq, xx, no, x, END_LIST},
{OP_vshl_u16, 0xf3100400, "vshl.u16", VBq, xx, VAq, VCq, xx, no, x, tsi6[7][0x12]},
{OP_vqshl_u16, 0xf3100410, "vqshl.u16", VBq, xx, VAq, VCq, xx, no, x, tsi6[10][0x1f]},
{OP_vshl_u16, 0xf3100440, "vshl.u16", VBdq, xx, VAdq, VCdq, xx, no, x, END_LIST},
{OP_vqshl_u16, 0xf3100450, "vqshl.u16", VBdq, xx, VAdq, VCdq, xx, no, x, END_LIST},
{OP_vrshl_u16, 0xf3100500, "vrshl.u16", VBq, xx, VAq, VCq, xx, no, x, tsi6[7][0x16]},
{OP_vqrshl_u16, 0xf3100510, "vqrshl.u16", VBq, xx, VAq, VCq, xx, no, x, tsi6[7][0x17]},
{OP_vrshl_u16, 0xf3100540, "vrshl.u16", VBdq, xx, VAdq, VCdq, xx, no, x, END_LIST},
{OP_vqrshl_u16, 0xf3100550, "vqrshl.u16", VBdq, xx, VAdq, VCdq, xx, no, x, END_LIST},
{OP_vmax_u16, 0xf3100600, "vmax.u16", VBq, xx, VAq, VCq, xx, no, x, tsi6[7][0x1a]},
{OP_vmin_u16, 0xf3100610, "vmin.u16", VBq, xx, VAq, VCq, xx, no, x, tsi6[7][0x1b]},
{OP_vmax_u16, 0xf3100640, "vmax.u16", VBdq, xx, VAdq, VCdq, xx, no, x, END_LIST},
{OP_vmin_u16, 0xf3100650, "vmin.u16", VBdq, xx, VAdq, VCdq, xx, no, x, END_LIST},
{OP_vabd_u16, 0xf3100700, "vabd.u16", VBq, xx, VAq, VCq, xx, no, x, tsi6[7][0x1e]},
{OP_vaba_u16, 0xf3100710, "vaba.u16", VBq, xx, VAq, VCq, xx, no, x, tsi6[7][0x1f]},
{OP_vabd_u16, 0xf3100740, "vabd.u16", VBdq, xx, VAdq, VCdq, xx, no, x, END_LIST},
{OP_vaba_u16, 0xf3100750, "vaba.u16", VBdq, xx, VAdq, VCdq, xx, no, x, END_LIST},
{OP_vsub_i16, 0xf3100800, "vsub.i16", VBq, xx, VAq, VCq, xx, no, x, tsi6[7][0x22]},
{OP_vceq_i16, 0xf3100810, "vceq.i16", VBq, xx, VAq, VCq, xx, no, x, tsi6b[3][0x05]},
{OP_vsub_i16, 0xf3100840, "vsub.i16", VBdq, xx, VAdq, VCdq, xx, no, x, END_LIST},
{OP_vceq_i16, 0xf3100850, "vceq.i16", VBdq, xx, VAdq, VCdq, xx, no, x, END_LIST},
{OP_vmls_i16, 0xf3100900, "vmls.i16", VBq, xx, VAq, VCq, xx, no, x, tsi6[4][0x12]},
{INVALID, 0xf3100910, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vmls_i16, 0xf3100940, "vmls.i16", VBdq, xx, VAdq, VCdq, xx, no, x, END_LIST},
{INVALID, 0xf3100950, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vpmax_u16, 0xf3100a00, "vpmax.u16", VBq, xx, VAq, VCq, xx, no, x, END_LIST},
{OP_vpmin_u16, 0xf3100a10, "vpmin.u16", VBq, xx, VAq, VCq, xx, no, x, END_LIST},
{INVALID, 0xf3100a40, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3100a50, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vqrdmulh_s16, 0xf3100b00, "vqrdmulh.s16", VBq, xx, VAq, VCq, xx, no, x, tsi6[4][0x36]},
{INVALID, 0xf3100b10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vqrdmulh_s16, 0xf3100b40, "vqrdmulh.s16", VBdq, xx, VAdq, VCdq, xx, no, x, END_LIST},
{INVALID, 0xf3100b50, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3100c00, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3100c10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_sha256h2_32, 0xf3100c40, "sha256h2.32", VBdq, xx, VAdq, VCdq, xx, v8, x, END_LIST},
{INVALID, 0xf3100c50, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3100d00, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3100d10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3100d40, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3100d50, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3100e00, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3100e10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3100e40, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3100e50, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3100f00, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3100f10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3100f40, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3100f50, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* 8 */
{OP_vhadd_u32, 0xf3200000, "vhadd.u32", VBq, xx, VAq, VCq, xx, no, x, tsi6[8][0x02]},
{OP_vqadd_u32, 0xf3200010, "vqadd.u32", VBq, xx, VAq, VCq, xx, no, x, tsi6[8][0x03]},
{OP_vhadd_u32, 0xf3200040, "vhadd.u32", VBdq, xx, VAdq, VCdq, xx, no, x, END_LIST},
{OP_vqadd_u32, 0xf3200050, "vqadd.u32", VBdq, xx, VAdq, VCdq, xx, no, x, END_LIST},
{OP_vrhadd_u32, 0xf3200100, "vrhadd.u32", VBq, xx, VAq, VCq, xx, no, x, tsi6[8][0x06]},
{OP_vbit, 0xf3200110, "vbit", VBq, xx, VAq, VCq, xx, no, x, tsi6[8][0x07]},
{OP_vrhadd_u32, 0xf3200140, "vrhadd.u32", VBdq, xx, VAdq, VCdq, xx, no, x, END_LIST},
{OP_vbit, 0xf3200150, "vbit", VBdq, xx, VAdq, VCdq, xx, no, x, END_LIST},
{OP_vhsub_u32, 0xf3200200, "vhsub.u32", VBq, xx, VAq, VCq, xx, no, x, tsi6[8][0x0a]},
{OP_vqsub_u32, 0xf3200210, "vqsub.u32", VBq, xx, VAq, VCq, xx, no, x, tsi6[8][0x0b]},
{OP_vhsub_u32, 0xf3200240, "vhsub.u32", VBdq, xx, VAdq, VCdq, xx, no, x, END_LIST},
{OP_vqsub_u32, 0xf3200250, "vqsub.u32", VBdq, xx, VAdq, VCdq, xx, no, x, END_LIST},
{OP_vcgt_u32, 0xf3200300, "vcgt.u32", VBq, xx, VAq, VCq, xx, no, x, tsi6[8][0x0e]},
{OP_vcge_u32, 0xf3200310, "vcge.u32", VBq, xx, VAq, VCq, xx, no, x, tsi6[8][0x0f]},
{OP_vcgt_u32, 0xf3200340, "vcgt.u32", VBdq, xx, VAdq, VCdq, xx, no, x, END_LIST},
{OP_vcge_u32, 0xf3200350, "vcge.u32", VBdq, xx, VAdq, VCdq, xx, no, x, END_LIST},
{OP_vshl_u32, 0xf3200400, "vshl.u32", VBq, xx, VAq, VCq, xx, no, x, tsi6[8][0x12]},
{OP_vqshl_u32, 0xf3200410, "vqshl.u32", VBq, xx, VAq, VCq, xx, no, x, tsi6[11][0x1f]},
{OP_vshl_u32, 0xf3200440, "vshl.u32", VBdq, xx, VAdq, VCdq, xx, no, x, END_LIST},
{OP_vqshl_u32, 0xf3200450, "vqshl.u32", VBdq, xx, VAdq, VCdq, xx, no, x, END_LIST},
{OP_vrshl_u32, 0xf3200500, "vrshl.u32", VBq, xx, VAq, VCq, xx, no, x, tsi6[8][0x16]},
{OP_vqrshl_u32, 0xf3200510, "vqrshl.u32", VBq, xx, VAq, VCq, xx, no, x, tsi6[8][0x17]},
{OP_vrshl_u32, 0xf3200540, "vrshl.u32", VBdq, xx, VAdq, VCdq, xx, no, x, END_LIST},
{OP_vqrshl_u32, 0xf3200550, "vqrshl.u32", VBdq, xx, VAdq, VCdq, xx, no, x, END_LIST},
{OP_vmax_u32, 0xf3200600, "vmax.u32", VBq, xx, VAq, VCq, xx, no, x, tsi6[8][0x1a]},
{OP_vmin_u32, 0xf3200610, "vmin.u32", VBq, xx, VAq, VCq, xx, no, x, tsi6[8][0x1b]},
{OP_vmax_u32, 0xf3200640, "vmax.u32", VBdq, xx, VAdq, VCdq, xx, no, x, END_LIST},
{OP_vmin_u32, 0xf3200650, "vmin.u32", VBdq, xx, VAdq, VCdq, xx, no, x, END_LIST},
{OP_vabd_u32, 0xf3200700, "vabd.u32", VBq, xx, VAq, VCq, xx, no, x, tsi6[8][0x1e]},
{OP_vaba_u32, 0xf3200710, "vaba.u32", VBq, xx, VAq, VCq, xx, no, x, tsi6[8][0x1f]},
{OP_vabd_u32, 0xf3200740, "vabd.u32", VBdq, xx, VAdq, VCdq, xx, no, x, END_LIST},
{OP_vaba_u32, 0xf3200750, "vaba.u32", VBdq, xx, VAdq, VCdq, xx, no, x, END_LIST},
/* 0x80 */
{OP_vsub_i32, 0xf3200800, "vsub.i32", VBq, xx, VAq, VCq, xx, no, x, tsi6[8][0x22]},
{OP_vceq_i32, 0xf3200810, "vceq.i32", VBq, xx, VAq, VCq, xx, no, x, tsi6b[6][0x05]},
{OP_vsub_i32, 0xf3200840, "vsub.i32", VBdq, xx, VAdq, VCdq, xx, no, x, END_LIST},
{OP_vceq_i32, 0xf3200850, "vceq.i32", VBdq, xx, VAdq, VCdq, xx, no, x, END_LIST},
{OP_vmls_i32, 0xf3200900, "vmls.i32", VBq, xx, VAq, VCq, xx, no, x, tsi6[5][0x12]},
{OP_vmul_p32, 0xf3200910, "vmul.p32", VBq, xx, VAq, VCq, xx, no, x, tsi6[8][0x27]},
{OP_vmls_i32, 0xf3200940, "vmls.i32", VBdq, xx, VAdq, VCdq, xx, no, x, END_LIST},
{OP_vmul_p32, 0xf3200950, "vmul.p32", VBdq, xx, VAdq, VCdq, xx, no, x, END_LIST},
{OP_vpmax_u32, 0xf3200a00, "vpmax.u32", VBq, xx, VAq, VCq, xx, no, x, END_LIST},
{OP_vpmin_u32, 0xf3200a10, "vpmin.u32", VBq, xx, VAq, VCq, xx, no, x, END_LIST},
{INVALID, 0xf3200a40, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3200a50, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vqrdmulh_s32, 0xf3200b00, "vqrdmulh.s32", VBq, xx, VAq, VCq, xx, no, x, tsi6[5][0x36]},
{INVALID, 0xf3200b10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vqrdmulh_s32, 0xf3200b40, "vqrdmulh.s32", VBdq, xx, VAdq, VCdq, xx, no, x, END_LIST},
{INVALID, 0xf3200b50, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3200c00, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3200c10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_sha256su1_32, 0xf3200c40, "sha256su1.32", VBdq, xx, VAdq, VCdq, xx, v8, x, END_LIST},
{INVALID, 0xf3200c50, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3200d00, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3200d10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3200d40, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3200d50, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vcgt_f32, 0xf3200e00, "vcgt.f32", VBq, xx, VAq, VCq, xx, no, x, tsi6b[6][0x11]},
{OP_vacgt_f32, 0xf3200e10, "vacgt.f32", VBq, xx, VAq, VCq, xx, no, x, tsi6[8][0x3b]},
{OP_vcgt_f32, 0xf3200e40, "vcgt.f32", VBdq, xx, VAdq, VCdq, xx, no, x, END_LIST},
{OP_vacgt_f32, 0xf3200e50, "vacgt.f32", VBdq, xx, VAdq, VCdq, xx, no, x, END_LIST},
{OP_vpmin_f32, 0xf3200f00, "vpmin.f32", VBq, xx, VAq, VCq, xx, no, x, END_LIST},
{OP_vminnm_f32, 0xf3200f10, "vminnm.f32", VBq, xx, VAq, VCq, xx, v8, x, tsi6[8][0x3f]},
{INVALID, 0xf3200f40, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vminnm_f32, 0xf3200f50, "vminnm.f32", VBdq, xx, VAdq, VCdq, xx, v8, x, tb6[1][0x01]},
}, { /* 9 */
/* XXX: this entry is sparse: should we make a new table to somehow compress it? */
{INVALID, 0xf3300000, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vqadd_u64, 0xf3300010, "vqadd.u64", VBq, xx, VAq, VCq, xx, no, x, tsi6[9][0x03]},
{INVALID, 0xf3300040, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vqadd_u64, 0xf3300050, "vqadd.u64", VBdq, xx, VAdq, VCdq, xx, no, x, END_LIST},
{INVALID, 0xf3300100, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vbif, 0xf3300110, "vbif", VBq, xx, VAq, VCq, xx, no, x, tsi6[9][0x07]},
{INVALID, 0xf3300140, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vbif, 0xf3300150, "vbif", VBdq, xx, VAdq, VCdq, xx, no, x, END_LIST},
{INVALID, 0xf3300200, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vqsub_u64, 0xf3300210, "vqsub.u64", VBq, xx, VAq, VCq, xx, no, x, tsi6[9][0x0b]},
{INVALID, 0xf3300240, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vqsub_u64, 0xf3300250, "vqsub.u64", VBdq, xx, VAdq, VCdq, xx, no, x, END_LIST},
{INVALID, 0xf3300300, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3300310, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3300340, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3300350, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vshl_u64, 0xf3300400, "vshl.u64", VBq, xx, VAq, VCq, xx, no, x, tsi6[9][0x12]},
{OP_vqshl_u64, 0xf3300410, "vqshl.u64", VBq, xx, VAq, VCq, xx, no, x, ti6l[1][0x0f]},
{OP_vshl_u64, 0xf3300440, "vshl.u64", VBdq, xx, VAdq, VCdq, xx, no, x, END_LIST},
{OP_vqshl_u64, 0xf3300450, "vqshl.u64", VBdq, xx, VAdq, VCdq, xx, no, x, END_LIST},
{OP_vrshl_u64, 0xf3300500, "vrshl.u64", VBq, xx, VAq, VCq, xx, no, x, tsi6[9][0x16]},
{OP_vqrshl_u64, 0xf3300510, "vqrshl.u64", VBq, xx, VAq, VCq, xx, no, x, tsi6[9][0x17]},
{OP_vrshl_u64, 0xf3300540, "vrshl.u64", VBdq, xx, VAdq, VCdq, xx, no, x, END_LIST},
{OP_vqrshl_u64, 0xf3300550, "vqrshl.u64", VBdq, xx, VAdq, VCdq, xx, no, x, END_LIST},
{INVALID, 0xf3300600, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3300610, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3300640, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3300650, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3300700, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3300710, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3300740, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3300750, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
/* 0x80 */
{OP_vsub_i64, 0xf3300800, "vsub.i64", VBq, xx, VAq, VCq, xx, no, x, tsi6[9][0x22]},
{INVALID, 0xf3300810, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vsub_i64, 0xf3300840, "vsub.i64", VBdq, xx, VAdq, VCdq, xx, no, x, END_LIST},
{INVALID, 0xf3300850, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3300900, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3300910, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3300940, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3300950, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3300a00, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3300a10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3300a40, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3300a50, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3300b00, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3300b10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3300b40, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3300b50, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3300c00, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3300c10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3300c40, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3300c50, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3300d00, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3300d10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3300d40, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3300d50, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3300e00, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3300e10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3300e40, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3300e50, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3300f00, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3300f10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3300f40, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3300f50, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* 10 */
{OP_vaddl_u16, 0xf3900000, "vaddl.u16", VBdq, xx, VAq, VCq, xx, no, x, END_LIST},
{OP_vshr_u16, 0xf3900010, "vshr.u16", VBq, xx, VCq, i4_16, xx, no, x, tsi6[10][0x03]},/*XXX: imm = 16-imm*/
{OP_vmla_i16, 0xf3900040, "vmla.i16", VBdq, xx, VAdq, VC3h_q, i2x5_3, no, x, tsi6[1][0x26]},
{OP_vshr_u16, 0xf3900050, "vshr.u16", VBdq, xx, VCdq, i4_16, xx, no, x, END_LIST},/*XXX: imm = 16-imm*/
{OP_vaddw_u16, 0xf3900100, "vaddw.u16", VBdq, xx, VAdq, VCq, xx, no, x, END_LIST},
{OP_vsra_u16, 0xf3900110, "vsra.u16", VBq, xx, VCq, i4_16, xx, no, x, tsi6[10][0x07]},/*XXX: imm = 16-imm*/
{INVALID, 0xf3900140, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vsra_u16, 0xf3900150, "vsra.u16", VBdq, xx, VCdq, i4_16, xx, no, x, END_LIST},/*XXX: imm = 16-imm*/
{OP_vsubl_u16, 0xf3900200, "vsubl.u16", VBdq, xx, VAq, VCq, xx, no, x, END_LIST},
{OP_vrshr_u16, 0xf3900210, "vrshr.u16", VBq, xx, VCq, i4_16, xx, no, x, tsi6[10][0x0b]},/*XXX: imm = 16-imm*/
{OP_vmlal_u16, 0xf3900240, "vmlal.u16", VBdq, xx, VAq, VC3h_q, i2x5_3, no, x, END_LIST},
{OP_vrshr_u16, 0xf3900250, "vrshr.u16", VBdq, xx, VCdq, i4_16, xx, no, x, END_LIST},/*XXX: imm = 16-imm*/
{OP_vsubw_u16, 0xf3900300, "vsubw.u16", VBdq, xx, VAdq, VCq, xx, no, x, END_LIST},
{OP_vrsra_u16, 0xf3900310, "vrsra.u16", VBq, xx, VCq, i4_16, xx, no, x, tsi6[10][0x0f]},/*XXX: imm = 16-imm*/
{INVALID, 0xf3900340, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vrsra_u16, 0xf3900350, "vrsra.u16", VBdq, xx, VCdq, i4_16, xx, no, x, END_LIST},/*XXX: imm = 16-imm*/
{OP_vraddhn_i32, 0xf3900400, "vraddhn.i32", VBq, xx, VAdq, VCdq, xx, no, x, END_LIST},
{OP_vsri_16, 0xf3900410, "vsri.16", VBq, xx, VCq, i4_16, xx, no, x, tsi6[10][0x13]},/*XXX: imm = 16-imm?*/
{OP_vmls_i16, 0xf3900440, "vmls.i16", VBdq, xx, VAdq, VC3h_q, i2x5_3, no, x, tsi6[7][0x26]},
{OP_vsri_16, 0xf3900450, "vsri.16", VBdq, xx, VCdq, i4_16, xx, no, x, END_LIST},/*XXX: imm = 16-imm?*/
{OP_vabal_u16, 0xf3900500, "vabal.u16", VBdq, xx, VAq, VCq, xx, no, x, END_LIST},
{OP_vsli_16, 0xf3900510, "vsli.16", VBq, xx, VCq, i4_16, xx, no, x, tsi6[10][0x17]},/*XXX: imm = 16-imm?*/
{INVALID, 0xf3900540, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vsli_16, 0xf3900550, "vsli.16", VBdq, xx, VCdq, i4_16, xx, no, x, END_LIST},/*XXX: imm = 16-imm?*/
{OP_vrsubhn_i32, 0xf3900600, "vrsubhn.i32", VBq, xx, VAdq, VCdq, xx, no, x, END_LIST},
{OP_vqshlu_s16, 0xf3900610, "vqshlu.s16", VBq, xx, VCq, i4_16, xx, no, x, tsi6[10][0x1b]},/*XXX: imm = imm-16*/
{OP_vmlsl_u16, 0xf3900640, "vmlsl.u16", VBdq, xx, VAq, VC3h_q, i2x5_3, no, x, END_LIST},
{OP_vqshlu_s16, 0xf3900650, "vqshlu.s16", VBdq, xx, VCdq, i4_16, xx, no, x, END_LIST},/*XXX: imm = imm-16*/
{OP_vabdl_u16, 0xf3900700, "vabdl.u16", VBdq, xx, VAq, VCq, xx, no, x, END_LIST},
{OP_vqshl_u16, 0xf3900710, "vqshl.u16", VBq, xx, VCq, i4_16, xx, no, x, tsi6[7][0x11]},/*XXX: imm = imm-16*/
{INVALID, 0xf3900740, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vqshl_u16, 0xf3900750, "vqshl.u16", VBdq, xx, VCdq, i4_16, xx, no, x, tsi6[7][0x13]},/*XXX: imm = imm-16*/
/* 0x80 */
{OP_vmlal_u16, 0xf3900800, "vmlal.u16", VBdq, xx, VAq, VCq, xx, no, x, tsi6[10][0x0a]},
{OP_vqshrun_s32, 0xf3900810, "vqshrun.s32", VBq, xx, VCdq, i4_16, xx, no, x, END_LIST},/*XXX: imm = 16-imm*/
{OP_vmul_i16, 0xf3900840, "vmul.i16", VBdq, xx, VAdq, VC3h_q, i2x5_3, no, x, tsi6[1][0x27]},
{OP_vqrshrun_s32, 0xf3900850, "vqrshrun.s32", VBq, xx, VCdq, i4_16, xx, no, x, END_LIST},/*XXX: imm = 16-imm*/
{INVALID, 0xf3900900, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vqshrn_u32, 0xf3900910, "vqshrn.u32", VBq, xx, VCdq, i4_16, xx, no, x, END_LIST},/*XXX: imm = 16-imm*/
{INVALID, 0xf3900940, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vqrshrn_u32, 0xf3900950, "vqrshrn.u32", VBq, xx, VCdq, i4_16, xx, no, x, END_LIST},/*XXX: imm = 16-imm*/
{OP_vmlsl_u16, 0xf3900a00, "vmlsl.u16", VBdq, xx, VAq, VCq, xx, no, x, tsi6[10][0x1a]},
{EXT_IMM1916, 0xf3900a10, "(ext imm1916 2)", xx, xx, xx, xx, xx, no, x, 2},
{OP_vmull_u16, 0xf3900a40, "vmull.u16", VBdq, xx, VAq, VC3h_q, i2x5_3, no, x, END_LIST},
{INVALID, 0xf3900a50, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3900b00, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3900b10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3900b40, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3900b50, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vmull_u16, 0xf3900c00, "vmull.u16", VBdq, xx, VAq, VCq, xx, no, x, tsi6[10][0x2a]},
{INVALID, 0xf3900c10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vqdmulh_s16, 0xf3900c40, "vqdmulh.s16", VBdq, xx, VAdq, VC3h_q, i2x5_3, no, x, tsi6[1][0x2e]},
{INVALID, 0xf3900c50, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3900d00, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3900d10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vqrdmulh_s16, 0xf3900d40, "vqrdmulh.s16", VBdq, xx, VAdq, VC3h_q, i2x5_3, no, x, tsi6[7][0x2e]},
{INVALID, 0xf3900d50, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3900e00, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3900e10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3900e40, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3900e50, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3900f00, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3900f10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3900f40, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3900f50, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* 11 */
/* 0xf3b with bit 4 set will also come here */
{OP_vaddl_u32, 0xf3a00000, "vaddl.u32", VBdq, xx, VAq, VCq, xx, no, x, END_LIST},
{OP_vshr_u32, 0xf3a00010, "vshr.u32", VBq, xx, VCq, i5_16, xx, no, x, tsi6[11][0x03]},/*XXX: imm = 32-imm*/
{OP_vmla_i32, 0xf3a00040, "vmla.i32", VBdq, xx, VAdq, VC4d_q, i1_5, no, x, tsi6[2][0x26]},
{OP_vshr_u32, 0xf3a00050, "vshr.u32", VBdq, xx, VCdq, i5_16, xx, no, x, END_LIST},/*XXX: imm = 32-imm*/
{OP_vaddw_u32, 0xf3a00100, "vaddw.u32", VBdq, xx, VAdq, VCq, xx, no, x, END_LIST},
{OP_vsra_u32, 0xf3a00110, "vsra.u32", VBq, xx, VCq, i5_16, xx, no, x, tsi6[11][0x07]},/*XXX: imm = 32-imm*/
{OP_vmla_f32, 0xf3a00140, "vmla.f32", VBdq, xx, VAdq, VC4d_q, i1_5, no, x, tsi6[0][0x35]},
{OP_vsra_u32, 0xf3a00150, "vsra.u32", VBdq, xx, VCdq, i5_16, xx, no, x, END_LIST},/*XXX: imm = 32-imm*/
{OP_vsubl_u32, 0xf3a00200, "vsubl.u32", VBdq, xx, VAq, VCq, xx, no, x, END_LIST},
{OP_vrshr_u32, 0xf3a00210, "vrshr.u32", VBq, xx, VCq, i5_16, xx, no, x, tsi6[11][0x0b]},/*XXX: imm = 32-imm*/
{OP_vmlal_u32, 0xf3a00240, "vmlal.u32", VBdq, xx, VAq, VC4d_q, i1_5, no, x, END_LIST},
{OP_vrshr_u32, 0xf3a00250, "vrshr.u32", VBdq, xx, VCdq, i5_16, xx, no, x, END_LIST},/*XXX: imm = 32-imm*/
{OP_vsubw_u32, 0xf3a00300, "vsubw.u32", VBdq, xx, VAdq, VCq, xx, no, x, END_LIST},
{OP_vrsra_u32, 0xf3a00310, "vrsra.u32", VBq, xx, VCq, i5_16, xx, no, x, tsi6[11][0x0f]},/*XXX: imm = 32-imm*/
{INVALID, 0xf3a00340, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vrsra_u32, 0xf3a00350, "vrsra.u32", VBdq, xx, VCdq, i5_16, xx, no, x, END_LIST},/*XXX: imm = 32-imm*/
{OP_vraddhn_i64, 0xf3a00400, "vraddhn.i64", VBq, xx, VAdq, VCdq, xx, no, x, END_LIST},
{OP_vsri_32, 0xf3a00410, "vsri.32", VBq, xx, VCq, i5_16, xx, no, x, tsi6[11][0x13]},/*XXX: imm = 32-imm?*/
{OP_vmls_i32, 0xf3a00440, "vmls.i32", VBdq, xx, VAdq, VC4d_q, i1_5, no, x, tsi6[8][0x26]},
{OP_vsri_32, 0xf3a00450, "vsri.32", VBdq, xx, VCdq, i5_16, xx, no, x, END_LIST},/*XXX: imm = 32-imm?*/
{OP_vabal_u32, 0xf3a00500, "vabal.u32", VBdq, xx, VAq, VCq, xx, no, x, END_LIST},
{OP_vsli_32, 0xf3a00510, "vsli.32", VBq, xx, VCq, i5_16, xx, no, x, tsi6[11][0x17]},/*XXX: imm = 32-imm?*/
{OP_vmls_f32, 0xf3a00540, "vmls.f32", VBdq, xx, VAdq, VC4d_q, i1_5, no, x, tsi6[2][0x35]},
{OP_vsli_32, 0xf3a00550, "vsli.32", VBdq, xx, VCdq, i5_16, xx, no, x, END_LIST},/*XXX: imm = 32-imm?*/
{OP_vrsubhn_i64, 0xf3a00600, "vrsubhn.i64", VBq, xx, VAdq, VCdq, xx, no, x, END_LIST},
{OP_vqshlu_s32, 0xf3a00610, "vqshlu.s32", VBq, xx, VCq, i5_16, xx, no, x, tsi6[11][0x1b]},/*XXX: imm = imm-32*/
{OP_vmlsl_u32, 0xf3a00640, "vmlsl.u32", VBdq, xx, VAq, VC4d_q, i1_5, no, x, END_LIST},
{OP_vqshlu_s32, 0xf3a00650, "vqshlu.s32", VBdq, xx, VCdq, i5_16, xx, no, x, END_LIST},/*XXX: imm = imm-32*/
{OP_vabdl_u32, 0xf3a00700, "vabdl.u32", VBdq, xx, VAq, VCq, xx, no, x, END_LIST},
{OP_vqshl_u32, 0xf3a00710, "vqshl.u32", VBq, xx, VCq, i5_16, xx, no, x, tsi6[8][0x11]},/*XXX: imm = imm-32*/
{INVALID, 0xf3a00740, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vqshl_u32, 0xf3a00750, "vqshl.u32", VBdq, xx, VCdq, i5_16, xx, no, x, tsi6[8][0x13]},/*XXX: imm = imm-32*/
/* 0x80 */
{OP_vmlal_u32, 0xf3a00800, "vmlal.u32", VBdq, xx, VAq, VCq, xx, no, x, tsi6[11][0x0a]},
{OP_vqshrun_s64, 0xf3a00810, "vqshrun.s64", VBq, xx, VCdq, i5_16, xx, no, x, END_LIST},/*XXX: imm = 32-imm*/
{OP_vmul_i32, 0xf3a00840, "vmul.i32", VBdq, xx, VAdq, VC4d_q, i1_5, no, x, tsi6[2][0x27]},
{OP_vqrshrun_s64, 0xf3a00850, "vqrshrun.s64", VBq, xx, VCdq, i5_16, xx, no, x, END_LIST},/*XXX: imm = 32-imm*/
{INVALID, 0xf3a00900, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vqshrn_u64, 0xf3a00910, "vqshrn.u64", VBq, xx, VCdq, i5_16, xx, no, x, END_LIST},/*XXX: imm = 32-imm*/
{OP_vmul_f32, 0xf3a00940, "vmul.f32", VBdq, xx, VAdq, VC4d_q, i1_5, no, x, tsi6[6][0x37]},
{OP_vqrshrn_u64, 0xf3a00950, "vqrshrn.u64", VBq, xx, VCdq, i5_16, xx, no, x, END_LIST},/*XXX: imm = 32-imm*/
{OP_vmlsl_u32, 0xf3a00a00, "vmlsl.u32", VBdq, xx, VAq, VCq, xx, no, x, tsi6[11][0x1a]},
{EXT_IMM2016, 0xf3a00a10, "(ext imm2016 1)", xx, xx, xx, xx, xx, no, x, 1},
{OP_vmull_u32, 0xf3a00a40, "vmull.u32", VBdq, xx, VAq, VC4d_q, i1_5, no, x, END_LIST},
{INVALID, 0xf3a00a50, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3a00b00, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3a00b10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3a00b40, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3a00b50, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vmull_u32, 0xf3a00c00, "vmull.u32", VBdq, xx, VAq, VCq, xx, no, x, tsi6[11][0x2a]},
{INVALID, 0xf3a00c10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vqdmulh_s32, 0xf3a00c40, "vqdmulh.s32", VBdq, xx, VAdq, VC4d_q, i1_5, no, x, tsi6[2][0x2e]},
{INVALID, 0xf3a00c50, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3a00d00, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3a00d10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vqrdmulh_s32, 0xf3a00d40, "vqrdmulh.s32", VBdq, xx, VAdq, VC4d_q, i1_5, no, x, tsi6[8][0x2e]},
{INVALID, 0xf3a00d50, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3a00e00, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vcvt_f32_u32, 0xf3a00e10, "vcvt.f32.u32", VBq, xx, VCq, i6_16, xx, no, x, tsi6b[8][0x1b]},
{INVALID, 0xf3a00e40, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vcvt_f32_u32, 0xf3a00e50, "vcvt.f32.u32", VBdq, xx, VCdq, i6_16, xx, no, x, t16[1][0x0b]},
{INVALID, 0xf3a00f00, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vcvt_u32_f32, 0xf3a00f10, "vcvt.u32.f32", VBq, xx, VCq, i6_16, xx, no, x, tsi6b[8][0x1f]},
{INVALID, 0xf3a00f40, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vcvt_u32_f32, 0xf3a00f50, "vcvt.u32.f32", VBdq, xx, VCdq, i6_16, xx, no, x, t16[1][0x0f]},
},
};
/* Indexed by bits 11:8,6 */
const instr_info_t A32_ext_simd5[][32] = {
{ /* 0 */
{OP_vshr_s8, 0xf2880010, "vshr.s8", VBq, xx, VCq, i3_16, xx, no, x, tsi5[0][0x01]},/*XXX: imm = 8-imm*/
{OP_vshr_s8, 0xf2880050, "vshr.s8", VBdq, xx, VCdq, i3_16, xx, no, x, END_LIST},/*XXX: imm = 8-imm*/
{OP_vsra_s8, 0xf2880110, "vsra.s8", VBq, xx, VCq, i3_16, xx, no, x, tsi5[0][0x03]},/*XXX: imm = 8-imm*/
{OP_vsra_s8, 0xf2880150, "vsra.s8", VBdq, xx, VCdq, i3_16, xx, no, x, END_LIST},/*XXX: imm = 8-imm*/
{OP_vrshr_s8, 0xf2880210, "vrshr.s8", VBq, xx, VCq, i3_16, xx, no, x, tsi5[0][0x05]},/*XXX: imm = 8-imm*/
{OP_vrshr_s8, 0xf2880250, "vrshr.s8", VBdq, xx, VCdq, i3_16, xx, no, x, END_LIST},/*XXX: imm = 8-imm*/
{OP_vrsra_s8, 0xf2880310, "vrsra.s8", VBq, xx, VCq, i3_16, xx, no, x, tsi5[0][0x07]},/*XXX: imm = 8-imm*/
{OP_vrsra_s8, 0xf2880350, "vrsra.s8", VBdq, xx, VCdq, i3_16, xx, no, x, END_LIST},/*XXX: imm = 8-imm*/
{INVALID, 0xf2880410, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf2880450, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vshl_i8, 0xf2880510, "vshl.i8", VBq, xx, VCq, i3_16, xx, no, x, tsi5[0][0x0b]},/*XXX: imm = 8-imm?*/
{OP_vshl_i8, 0xf2880550, "vshl.i8", VBdq, xx, VCdq, i3_16, xx, no, x, END_LIST},/*XXX: imm = 8-imm?*/
{INVALID, 0xf2880610, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf2880650, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vqshl_s8, 0xf2880710, "vqshl.s8", VBq, xx, VCq, i3_16, xx, no, x, tsi6[0][0x11]},/*XXX: imm = imm-8*/
{OP_vqshl_s8, 0xf2880750, "vqshl.s8", VBdq, xx, VCdq, i3_16, xx, no, x, tsi6[0][0x13]},/*XXX: imm = imm-8*/
{OP_vshrn_i16, 0xf2880810, "vshrn.i16", VBq, xx, VCdq, i3_16, xx, no, x, END_LIST},/*XXX: imm = 8-imm*/
{OP_vrshrn_i16, 0xf2880850, "vrshrn.i16", VBq, xx, VCq, i3_16, xx, no, x, END_LIST},/*XXX: imm = 8-imm*/
{OP_vqshrn_s16, 0xf2880910, "vqshrn.s16", VBq, xx, VCdq, i3_16, xx, no, x, END_LIST},/*XXX: imm = 8-imm*/
{OP_vqrshrn_s16, 0xf2880950, "vqrshrn.s16", VBq, xx, VCdq, i3_16, xx, no, x, END_LIST},/*XXX: imm = 8-imm*/
{EXT_IMM1816, 0xf2880a10, "(ext imm1816 0)", xx, xx, xx, xx, xx, no, x, 0},
{INVALID, 0xf2880a50, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf2880b10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf2880b50, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf2880c10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf2880c50, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf2880d10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf2880d50, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf2880e10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf2880e50, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf2880f10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf2880f50, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* 1 */
{OP_vshr_u8, 0xf3880010, "vshr.u8", VBq, xx, VCq, i3_16, xx, no, x, tsi5[1][0x01]},/*XXX: imm = 8-imm*/
{OP_vshr_u8, 0xf3880050, "vshr.u8", VBdq, xx, VCdq, i3_16, xx, no, x, END_LIST},/*XXX: imm = 8-imm*/
{OP_vsra_u8, 0xf3880110, "vsra.u8", VBq, xx, VCq, i3_16, xx, no, x, tsi5[1][0x03]},/*XXX: imm = 8-imm*/
{OP_vsra_u8, 0xf3880150, "vsra.u8", VBdq, xx, VCdq, i3_16, xx, no, x, END_LIST},/*XXX: imm = 8-imm*/
{OP_vrshr_u8, 0xf3880210, "vrshr.u8", VBq, xx, VCq, i3_16, xx, no, x, tsi5[1][0x05]},/*XXX: imm = 8-imm*/
{OP_vrshr_u8, 0xf3880250, "vrshr.u8", VBdq, xx, VCdq, i3_16, xx, no, x, END_LIST},/*XXX: imm = 8-imm*/
{OP_vrsra_u8, 0xf3880310, "vrsra.u8", VBq, xx, VCq, i3_16, xx, no, x, tsi5[1][0x07]},/*XXX: imm = 8-imm*/
{OP_vrsra_u8, 0xf3880350, "vrsra.u8", VBdq, xx, VCdq, i3_16, xx, no, x, END_LIST},/*XXX: imm = 8-imm*/
{OP_vsri_8, 0xf3880410, "vsri.8", VBq, xx, VCq, i3_16, xx, no, x, tsi5[1][0x09]},/*XXX: imm = 8-imm?*/
{OP_vsri_8, 0xf3880450, "vsri.8", VBdq, xx, VCdq, i3_16, xx, no, x, END_LIST},/*XXX: imm = 8-imm?*/
{OP_vsli_8, 0xf3880510, "vsli.8", VBq, xx, VCq, i3_16, xx, no, x, tsi5[1][0x0b]},/*XXX: imm = 8-imm?*/
{OP_vsli_8, 0xf3880550, "vsli.8", VBdq, xx, VCdq, i3_16, xx, no, x, END_LIST},/*XXX: imm = 8-imm?*/
{OP_vqshlu_s8, 0xf3880610, "vqshlu.s8", VBq, xx, VCq, i3_16, xx, no, x, tsi5[1][0x0d]},/*XXX: imm = imm-8*/
{OP_vqshlu_s8, 0xf3880650, "vqshlu.s8", VBdq, xx, VCdq, i3_16, xx, no, x, END_LIST},/*XXX: imm = imm-8*/
{OP_vqshl_u8, 0xf3880710, "vqshl.u8", VBq, xx, VCq, i3_16, xx, no, x, tsi6[6][0x11]},/*XXX: imm = imm-8*/
{OP_vqshl_u8, 0xf3880750, "vqshl.u8", VBdq, xx, VCdq, i3_16, xx, no, x, tsi6[6][0x13]},/*XXX: imm = imm-8*/
{OP_vqshrun_s16, 0xf3880810, "vqshrun.s16", VBq, xx, VCdq, i3_16, xx, no, x, END_LIST},/*XXX: imm = 8-imm*/
{OP_vqrshrun_s16, 0xf3880850, "vqrshrun.s16", VBq, xx, VCdq, i3_16, xx, no, x, END_LIST},/*XXX: imm = 8-imm*/
{OP_vqshrn_u16, 0xf3880910, "vqshrn.u16", VBq, xx, VCdq, i3_16, xx, no, x, END_LIST},/*XXX: imm = 8-imm*/
{OP_vqrshrn_u16, 0xf3880950, "vqrshrn.u16", VBq, xx, VCdq, i3_16, xx, no, x, END_LIST},/*XXX: imm = 8-imm*/
{EXT_IMM1816, 0xf3880a10, "(ext imm1816 1)", xx, xx, xx, xx, xx, no, x, 1},
{INVALID, 0xf3880a50, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3880b10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3880b50, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3880c10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3880c50, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3880d10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3880d50, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3880e10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3880e50, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3880f10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3880f50, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
},
};
/* Indexed by bits 18:16,8:7 */
const instr_info_t A32_ext_simd5b[][32] = {
{ /* 0 */
{OP_vrinta_f32_f32, 0xfeb80a40, "vrinta.f32.f32", WBd, xx, WCd, xx, xx, v8, x, END_LIST},
{INVALID, 0xfeb80ac0, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vrinta_f64_f64, 0xfeb80b40, "vrinta.f64.f64", VBq, xx, VCq, xx, xx, v8, x, END_LIST},
{INVALID, 0xfeb80bc0, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vrintn_f32_f32, 0xfeb90a40, "vrintn.f32.f32", WBd, xx, WCd, xx, xx, v8, x, END_LIST},
{INVALID, 0xfeb90ac0, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vrintn_f64_f64, 0xfeb90b40, "vrintn.f64.f64", VBq, xx, VCq, xx, xx, v8, x, END_LIST},
{INVALID, 0xfeb90bc0, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vrintp_f32_f32, 0xfeba0a40, "vrintp.f32.f32", WBd, xx, WCd, xx, xx, v8, x, END_LIST},
{INVALID, 0xfeba0ac0, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vrintp_f64_f64, 0xfeba0b40, "vrintp.f64.f64", VBq, xx, VCq, xx, xx, v8, x, END_LIST},
{INVALID, 0xfeba0bc0, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vrintm_f32_f32, 0xfebb0a40, "vrintm.f32.f32", WBd, xx, WCd, xx, xx, v8, x, END_LIST},
{INVALID, 0xfebb0ac0, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vrintm_f64_f64, 0xfebb0b40, "vrintm.f64.f64", VBq, xx, VCq, xx, xx, v8, x, END_LIST},
{INVALID, 0xfebb0bc0, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vcvta_u32_f32, 0xfebc0a40, "vcvta.u32.f32", WBd, xx, WCd, xx, xx, v8|vfp, x, END_LIST},
{OP_vcvta_s32_f32, 0xfebc0ac0, "vcvta.s32.f32", WBd, xx, WCd, xx, xx, v8|vfp, x, END_LIST},
{OP_vcvta_u32_f64, 0xfebc0b40, "vcvta.u32.f64", WBd, xx, VCq, xx, xx, v8|vfp, x, END_LIST},
{OP_vcvta_s32_f64, 0xfebc0bc0, "vcvta.s32.f64", WBd, xx, VCq, xx, xx, v8|vfp, x, END_LIST},
{OP_vcvtn_u32_f32, 0xfebd0a40, "vcvtn.u32.f32", WBd, xx, WCd, xx, xx, v8|vfp, x, END_LIST},
{OP_vcvtn_s32_f32, 0xfebd0ac0, "vcvtn.s32.f32", WBd, xx, WCd, xx, xx, v8|vfp, x, END_LIST},
{OP_vcvtn_u32_f64, 0xfebd0b40, "vcvtn.u32.f64", WBd, xx, VCq, xx, xx, v8|vfp, x, END_LIST},
{OP_vcvtn_s32_f64, 0xfebd0bc0, "vcvtn.s32.f64", WBd, xx, VCq, xx, xx, v8|vfp, x, END_LIST},
{OP_vcvtp_u32_f32, 0xfebe0a40, "vcvtp.u32.f32", WBd, xx, WCd, xx, xx, v8|vfp, x, END_LIST},
{OP_vcvtp_s32_f32, 0xfebe0ac0, "vcvtp.s32.f32", WBd, xx, WCd, xx, xx, v8|vfp, x, END_LIST},
{OP_vcvtp_u32_f64, 0xfebe0b40, "vcvtp.u32.f64", WBd, xx, VCq, xx, xx, v8|vfp, x, END_LIST},
{OP_vcvtp_s32_f64, 0xfebe0bc0, "vcvtp.s32.f64", WBd, xx, VCq, xx, xx, v8|vfp, x, END_LIST},
{OP_vcvtm_u32_f32, 0xfebf0a40, "vcvtm.u32.f32", WBd, xx, WCd, xx, xx, v8|vfp, x, END_LIST},
{OP_vcvtm_s32_f32, 0xfebf0ac0, "vcvtm.s32.f32", WBd, xx, WCd, xx, xx, v8|vfp, x, END_LIST},
{OP_vcvtm_u32_f64, 0xfebf0b40, "vcvtm.u32.f64", WBd, xx, VCq, xx, xx, v8|vfp, x, END_LIST},
{OP_vcvtm_s32_f64, 0xfebf0bc0, "vcvtm.s32.f64", WBd, xx, VCq, xx, xx, v8|vfp, x, END_LIST},
},
};
/* Indexed by bits 11:8,6:4, but 6:4 are in the following manner:
* + If bit 4 == 0, offset is 0;
* + Else, offset is 1 + bits 6:5.
* (Thus, 0 followed by odds < 8).
*/
const instr_info_t A32_ext_simd8[][80] = {
{ /* 0 */
{OP_vaddl_s8, 0xf2800000, "vaddl.s8", VBdq, xx, VAq, VCq, xx, no, x, END_LIST},
{OP_vmov_i32, 0xf2800010, "vmov.i32", VBq, xx, i12x8_24_16_0, xx, xx, no, x, tsi8[0][0x03]},
{OP_vmvn_i32, 0xf2800030, "vmvn.i32", VBq, xx, i12x8_24_16_0, xx, xx, no, x, tsi8[0][0x04]},
{OP_vmov_i32, 0xf2800050, "vmov.i32", VBdq, xx, i12x8_24_16_0, xx, xx, no, x, END_LIST},
{OP_vmvn_i32, 0xf2800070, "vmvn.i32", VBdq, xx, i12x8_24_16_0, xx, xx, no, x, END_LIST},
/* 0x10 */
{OP_vaddw_s8, 0xf2800100, "vaddw.s8", VBdq, xx, VAdq, VCq, xx, no, x, END_LIST},
{OP_vorr_i32, 0xf2800110, "vorr.i32", VBq, xx, i12x8_24_16_0, xx, xx, no, x, tsi8[0][0x08]},
{OP_vbic_i32, 0xf2800130, "vbic.i32", VBq, xx, i12x8_24_16_0, xx, xx, no, x, tsi8[0][0x09]},
{OP_vorr_i32, 0xf2800150, "vorr.i32", VBdq, xx, i12x8_24_16_0, xx, xx, no, x, END_LIST},
{OP_vbic_i32, 0xf2800170, "vbic.i32", VBdq, xx, i12x8_24_16_0, xx, xx, no, x, END_LIST},
/* 0x20 */
{OP_vsubl_s8, 0xf2800200, "vsubl.s8", VBdq, xx, VAq, VCq, xx, no, x, END_LIST},
{OP_vmov_i32, 0xf2800210, "vmov.i32", VBq, xx, i12x8_24_16_0, xx, xx, no, x, DUP_ENTRY},
{OP_vmvn_i32, 0xf2800230, "vmvn.i32", VBq, xx, i12x8_24_16_0, xx, xx, no, x, DUP_ENTRY},
{OP_vmov_i32, 0xf2800250, "vmov.i32", VBdq, xx, i12x8_24_16_0, xx, xx, no, x, DUP_ENTRY},
{OP_vmvn_i32, 0xf2800270, "vmvn.i32", VBdq, xx, i12x8_24_16_0, xx, xx, no, x, DUP_ENTRY},
/* 0x30 */
{OP_vsubw_s8, 0xf2800300, "vsubw.s8", VBdq, xx, VAdq, VCq, xx, no, x, END_LIST},
{OP_vorr_i32, 0xf2800310, "vorr.i32", VBq, xx, i12x8_24_16_0, xx, xx, no, x, DUP_ENTRY},
{OP_vbic_i32, 0xf2800330, "vbic.i32", VBq, xx, i12x8_24_16_0, xx, xx, no, x, DUP_ENTRY},
{OP_vorr_i32, 0xf2800350, "vorr.i32", VBdq, xx, i12x8_24_16_0, xx, xx, no, x, DUP_ENTRY},
{OP_vbic_i32, 0xf2800370, "vbic.i32", VBdq, xx, i12x8_24_16_0, xx, xx, no, x, DUP_ENTRY},
/* 0x40 */
{OP_vaddhn_i16, 0xf2800400, "vaddhn.i16", VBq, xx, VAdq, VCdq, xx, no, x, END_LIST},
{OP_vmov_i32, 0xf2800410, "vmov.i32", VBq, xx, i12x8_24_16_0, xx, xx, no, x, DUP_ENTRY},
{OP_vmvn_i32, 0xf2800430, "vmvn.i32", VBq, xx, i12x8_24_16_0, xx, xx, no, x, DUP_ENTRY},
{OP_vmov_i32, 0xf2800450, "vmov.i32", VBdq, xx, i12x8_24_16_0, xx, xx, no, x, DUP_ENTRY},
{OP_vmvn_i32, 0xf2800470, "vmvn.i32", VBdq, xx, i12x8_24_16_0, xx, xx, no, x, DUP_ENTRY},
/* 0x50 */
{OP_vabal_s8, 0xf2800500, "vabal.s8", VBdq, xx, VAq, VCq, xx, no, x, END_LIST},
{OP_vorr_i32, 0xf2800510, "vorr.i32", VBq, xx, i12x8_24_16_0, xx, xx, no, x, DUP_ENTRY},
{OP_vbic_i32, 0xf2800530, "vbic.i32", VBq, xx, i12x8_24_16_0, xx, xx, no, x, DUP_ENTRY},
{OP_vorr_i32, 0xf2800550, "vorr.i32", VBdq, xx, i12x8_24_16_0, xx, xx, no, x, DUP_ENTRY},
{OP_vbic_i32, 0xf2800570, "vbic.i32", VBdq, xx, i12x8_24_16_0, xx, xx, no, x, DUP_ENTRY},
/* 0x60 */
{OP_vsubhn_i16, 0xf2800600, "vsubhn.i16", VBq, xx, VAdq, VCdq, xx, no, x, END_LIST},
{OP_vmov_i32, 0xf2800610, "vmov.i32", VBq, xx, i12x8_24_16_0, xx, xx, no, x, DUP_ENTRY},
{OP_vmvn_i32, 0xf2800630, "vmvn.i32", VBq, xx, i12x8_24_16_0, xx, xx, no, x, DUP_ENTRY},
{OP_vmov_i32, 0xf2800650, "vmov.i32", VBdq, xx, i12x8_24_16_0, xx, xx, no, x, DUP_ENTRY},
{OP_vmvn_i32, 0xf2800670, "vmvn.i32", VBdq, xx, i12x8_24_16_0, xx, xx, no, x, DUP_ENTRY},
/* 0x70 */
{OP_vabdl_s8, 0xf2800700, "vabdl.s8", VBdq, xx, VAq, VCq, xx, no, x, END_LIST},
{OP_vorr_i32, 0xf2800710, "vorr.i32", VBq, xx, i12x8_24_16_0, xx, xx, no, x, DUP_ENTRY},
{OP_vbic_i32, 0xf2800730, "vbic.i32", VBq, xx, i12x8_24_16_0, xx, xx, no, x, DUP_ENTRY},
{OP_vorr_i32, 0xf2800750, "vorr.i32", VBdq, xx, i12x8_24_16_0, xx, xx, no, x, DUP_ENTRY},
{OP_vbic_i32, 0xf2800770, "vbic.i32", VBdq, xx, i12x8_24_16_0, xx, xx, no, x, DUP_ENTRY},
/* 0x80 */
{OP_vmlal_s8, 0xf2800800, "vmlal.s8", VBdq, xx, VAq, VCq, xx, no, x, END_LIST},
{OP_vmov_i16, 0xf2800810, "vmov.i16", VBq, xx, i12x8_24_16_0, xx, xx, no, x, tsi8[0][0x2b]},
{OP_vmvn_i16, 0xf2800830, "vmvn.i16", VBq, xx, i12x8_24_16_0, xx, xx, no, x, tsi8[0][0x2c]},
{OP_vmov_i16, 0xf2800850, "vmov.i16", VBdq, xx, i12x8_24_16_0, xx, xx, no, x, END_LIST},
{OP_vmvn_i16, 0xf2800870, "vmvn.i16", VBdq, xx, i12x8_24_16_0, xx, xx, no, x, END_LIST},
/* 0x90 */
{INVALID, 0xf2800900, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vorr_i16, 0xf2800910, "vorr.i16", VBq, xx, i12x8_24_16_0, xx, xx, no, x, tsi8[0][0x30]},
{OP_vbic_i16, 0xf2800930, "vbic.i16", VBq, xx, i12x8_24_16_0, xx, xx, no, x, tsi8[0][0x31]},
{OP_vorr_i16, 0xf2800950, "vorr.i16", VBdq, xx, i12x8_24_16_0, xx, xx, no, x, END_LIST},
{OP_vbic_i16, 0xf2800970, "vbic.i16", VBdq, xx, i12x8_24_16_0, xx, xx, no, x, END_LIST},
/* 0xa0 */
{OP_vmlsl_s8, 0xf2800a00, "vmlsl.s8", VBdq, xx, VAq, VCq, xx, no, x, END_LIST},
{OP_vmov_i16, 0xf2800a10, "vmov.i16", VBq, xx, i12x8_24_16_0, xx, xx, no, x, DUP_ENTRY},
{OP_vmvn_i16, 0xf2800a30, "vmvn.i16", VBq, xx, i12x8_24_16_0, xx, xx, no, x, DUP_ENTRY},
{OP_vmov_i16, 0xf2800a50, "vmov.i16", VBdq, xx, i12x8_24_16_0, xx, xx, no, x, DUP_ENTRY},
{OP_vmvn_i16, 0xf2800a70, "vmvn.i16", VBdq, xx, i12x8_24_16_0, xx, xx, no, x, DUP_ENTRY},
/* 0xb0 */
{INVALID, 0xf2800b00, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vorr_i16, 0xf2800b10, "vorr.i16", VBq, xx, i12x8_24_16_0, xx, xx, no, x, DUP_ENTRY},
{OP_vbic_i16, 0xf2800b30, "vbic.i16", VBq, xx, i12x8_24_16_0, xx, xx, no, x, DUP_ENTRY},
{OP_vorr_i16, 0xf2800b50, "vorr.i16", VBdq, xx, i12x8_24_16_0, xx, xx, no, x, DUP_ENTRY},
{OP_vbic_i16, 0xf2800b70, "vbic.i16", VBdq, xx, i12x8_24_16_0, xx, xx, no, x, DUP_ENTRY},
/* 0xc0 */
{OP_vmull_s8, 0xf2800c00, "vmull.s8", VBdq, xx, VAq, VCq, xx, no, x, END_LIST},
{OP_vmov_i32, 0xf2800c10, "vmov.i32", VBq, xx, i12x8_24_16_0, xx, xx, no, x, DUP_ENTRY},
{OP_vmvn_i32, 0xf2800c30, "vmvn.i32", VBq, xx, i12x8_24_16_0, xx, xx, no, x, DUP_ENTRY},
{OP_vmov_i32, 0xf2800c50, "vmov.i32", VBdq, xx, i12x8_24_16_0, xx, xx, no, x, DUP_ENTRY},
{OP_vmvn_i32, 0xf2800c70, "vmvn.i32", VBdq, xx, i12x8_24_16_0, xx, xx, no, x, DUP_ENTRY},
/* 0xd0 */
{INVALID, 0xf2800d00, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vmov_i32, 0xf2800d10, "vmov.i32", VBq, xx, i12x8_24_16_0, xx, xx, no, x, DUP_ENTRY},
{OP_vmvn_i32, 0xf2800d30, "vmvn.i32", VBq, xx, i12x8_24_16_0, xx, xx, no, x, DUP_ENTRY},
{OP_vmov_i32, 0xf2800d50, "vmov.i32", VBdq, xx, i12x8_24_16_0, xx, xx, no, x, DUP_ENTRY},
{OP_vmvn_i32, 0xf2800d70, "vmvn.i32", VBdq, xx, i12x8_24_16_0, xx, xx, no, x, DUP_ENTRY},
/* 0xe0 */
{OP_vmull_p8, 0xf2800e00, "vmull.p8", VBdq, xx, VAq, VCq, xx, no, x, END_LIST},
{OP_vmov_i8, 0xf2800e10, "vmov.i8", VBq, xx, i12x8_24_16_0, xx, xx, no, x, tsi8[0][0x49]},
{OP_vmov_i64, 0xf2800e30, "vmov.i64", VBq, xx, i12x8_24_16_0, xx, xx, no, x, tsi8[0][0x4a]},
{OP_vmov_i8, 0xf2800e50, "vmov.i8", VBdq, xx, i12x8_24_16_0, xx, xx, no, x, END_LIST},
{OP_vmov_i64, 0xf2800e70, "vmov.i64", VBdq, xx, i12x8_24_16_0, xx, xx, no, x, END_LIST},
/* 0xf0 */
{INVALID, 0xf2800f00, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vmov_f32, 0xf2800f10, "vmov.f32", VBq, xx, i12x8_28_16_0, xx, xx, no, x, tsi8[0][0x4e]},
{INVALID, 0xf2800f30, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vmov_f32, 0xf2800f50, "vmov.f32", VBdq, xx, i12x8_28_16_0, xx, xx, no, x, top4[7][0x00]},
{INVALID, 0xf2800f70, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* 1 */
{OP_vaddl_u8, 0xf3800000, "vaddl.u8", VBdq, xx, VAq, VCq, xx, no, x, END_LIST},
{OP_vmov_i32, 0xf3800010, "vmov.i32", VBq, xx, i12x8_24_16_0, xx, xx, no, x, DUP_ENTRY},
{OP_vmvn_i32, 0xf3800030, "vmvn.i32", VBq, xx, i12x8_24_16_0, xx, xx, no, x, DUP_ENTRY},
{OP_vmov_i32, 0xf3800050, "vmov.i32", VBdq, xx, i12x8_24_16_0, xx, xx, no, x, DUP_ENTRY},
{OP_vmvn_i32, 0xf3800070, "vmvn.i32", VBdq, xx, i12x8_24_16_0, xx, xx, no, x, DUP_ENTRY},
/* 0x10 */
{OP_vaddw_u8, 0xf3800100, "vaddw.u8", VBdq, xx, VAdq, VCq, xx, no, x, END_LIST},
{OP_vorr_i32, 0xf3800110, "vorr.i32", VBq, xx, i12x8_24_16_0, xx, xx, no, x, DUP_ENTRY},
{OP_vbic_i32, 0xf3800130, "vbic.i32", VBq, xx, i12x8_24_16_0, xx, xx, no, x, DUP_ENTRY},
{OP_vorr_i32, 0xf3800150, "vorr.i32", VBdq, xx, i12x8_24_16_0, xx, xx, no, x, DUP_ENTRY},
{OP_vbic_i32, 0xf3800170, "vbic.i32", VBdq, xx, i12x8_24_16_0, xx, xx, no, x, DUP_ENTRY},
/* 0x20 */
{OP_vsubl_u8, 0xf3800200, "vsubl.u8", VBdq, xx, VAq, VCq, xx, no, x, END_LIST},
{OP_vmov_i32, 0xf3800210, "vmov.i32", VBq, xx, i12x8_24_16_0, xx, xx, no, x, DUP_ENTRY},
{OP_vmvn_i32, 0xf3800230, "vmvn.i32", VBq, xx, i12x8_24_16_0, xx, xx, no, x, DUP_ENTRY},
{OP_vmov_i32, 0xf3800250, "vmov.i32", VBdq, xx, i12x8_24_16_0, xx, xx, no, x, DUP_ENTRY},
{OP_vmvn_i32, 0xf3800270, "vmvn.i32", VBdq, xx, i12x8_24_16_0, xx, xx, no, x, DUP_ENTRY},
/* 0x30 */
{OP_vsubw_u8, 0xf3800300, "vsubw.u8", VBdq, xx, VAdq, VCq, xx, no, x, END_LIST},
{OP_vorr_i32, 0xf3800310, "vorr.i32", VBq, xx, i12x8_24_16_0, xx, xx, no, x, DUP_ENTRY},
{OP_vbic_i32, 0xf3800330, "vbic.i32", VBq, xx, i12x8_24_16_0, xx, xx, no, x, DUP_ENTRY},
{OP_vorr_i32, 0xf3800350, "vorr.i32", VBdq, xx, i12x8_24_16_0, xx, xx, no, x, DUP_ENTRY},
{OP_vbic_i32, 0xf3800370, "vbic.i32", VBdq, xx, i12x8_24_16_0, xx, xx, no, x, DUP_ENTRY},
/* 0x40 */
{OP_vraddhn_i16, 0xf3800400, "vraddhn.i16", VBq, xx, VAdq, VCdq, xx, no, x, END_LIST},
{OP_vmov_i32, 0xf3800410, "vmov.i32", VBq, xx, i12x8_24_16_0, xx, xx, no, x, DUP_ENTRY},
{OP_vmvn_i32, 0xf3800430, "vmvn.i32", VBq, xx, i12x8_24_16_0, xx, xx, no, x, DUP_ENTRY},
{OP_vmov_i32, 0xf3800450, "vmov.i32", VBdq, xx, i12x8_24_16_0, xx, xx, no, x, DUP_ENTRY},
{OP_vmvn_i32, 0xf3800470, "vmvn.i32", VBdq, xx, i12x8_24_16_0, xx, xx, no, x, DUP_ENTRY},
/* 0x50 */
{OP_vabal_u8, 0xf3800500, "vabal.u8", VBdq, xx, VAq, VCq, xx, no, x, END_LIST},
{OP_vorr_i32, 0xf3800510, "vorr.i32", VBq, xx, i12x8_24_16_0, xx, xx, no, x, DUP_ENTRY},
{INVALID, 0xf3800530, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vorr_i32, 0xf3800550, "vorr.i32", VBdq, xx, i12x8_24_16_0, xx, xx, no, x, DUP_ENTRY},
{INVALID, 0xf3800570, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
/* 0x60 */
{OP_vrsubhn_i16, 0xf3800600, "vrsubhn.i16", VBq, xx, VAdq, VCdq, xx, no, x, END_LIST},
{OP_vmov_i32, 0xf3800610, "vmov.i32", VBq, xx, i12x8_24_16_0, xx, xx, no, x, DUP_ENTRY},
{OP_vmvn_i32, 0xf3800630, "vmvn.i32", VBq, xx, i12x8_24_16_0, xx, xx, no, x, DUP_ENTRY},
{OP_vmov_i32, 0xf3800650, "vmov.i32", VBdq, xx, i12x8_24_16_0, xx, xx, no, x, DUP_ENTRY},
{OP_vmvn_i32, 0xf3800670, "vmvn.i32", VBdq, xx, i12x8_24_16_0, xx, xx, no, x, DUP_ENTRY},
/* 0x70 */
{OP_vabdl_u8, 0xf3800700, "vabdl.u8", VBdq, xx, VAq, VCq, xx, no, x, END_LIST},
{OP_vorr_i32, 0xf3800710, "vorr.i32", VBq, xx, i12x8_24_16_0, xx, xx, no, x, DUP_ENTRY},
{INVALID, 0xf3800730, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vorr_i32, 0xf3800750, "vorr.i32", VBdq, xx, i12x8_24_16_0, xx, xx, no, x, DUP_ENTRY},
{INVALID, 0xf3800770, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
/* 0x80 */
{OP_vmlal_u8, 0xf3800800, "vmlal.u8", VBdq, xx, VAq, VCq, xx, no, x, END_LIST},
{OP_vmov_i16, 0xf3800810, "vmov.i16", VBq, xx, i12x8_24_16_0, xx, xx, no, x, DUP_ENTRY},
{OP_vmvn_i16, 0xf3800830, "vmvn.i16", VBq, xx, i12x8_24_16_0, xx, xx, no, x, DUP_ENTRY},
{OP_vmov_i16, 0xf3800850, "vmov.i16", VBdq, xx, i12x8_24_16_0, xx, xx, no, x, DUP_ENTRY},
{OP_vmvn_i16, 0xf3800870, "vmvn.i16", VBdq, xx, i12x8_24_16_0, xx, xx, no, x, DUP_ENTRY},
/* 0x90 */
{INVALID, 0xf3800900, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vorr_i16, 0xf3800910, "vorr.i16", VBq, xx, i12x8_24_16_0, xx, xx, no, x, DUP_ENTRY},
{INVALID, 0xf3800930, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vorr_i16, 0xf3800950, "vorr.i16", VBdq, xx, i12x8_24_16_0, xx, xx, no, x, DUP_ENTRY},
{INVALID, 0xf3800970, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
/* 0xa0 */
{OP_vmlsl_u8, 0xf3800a00, "vmlsl.u8", VBdq, xx, VAq, VCq, xx, no, x, END_LIST},
{OP_vmov_i16, 0xf3800a10, "vmov.i16", VBq, xx, i12x8_24_16_0, xx, xx, no, x, DUP_ENTRY},
{OP_vmvn_i16, 0xf3800a30, "vmvn.i16", VBq, xx, i12x8_24_16_0, xx, xx, no, x, DUP_ENTRY},
{OP_vmov_i16, 0xf3800a50, "vmov.i16", VBdq, xx, i12x8_24_16_0, xx, xx, no, x, DUP_ENTRY},
{OP_vmvn_i16, 0xf3800a70, "vmvn.i16", VBdq, xx, i12x8_24_16_0, xx, xx, no, x, DUP_ENTRY},
/* 0xb0 */
{INVALID, 0xf3800b00, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vorr_i16, 0xf3800b10, "vorr.i16", VBq, xx, i12x8_24_16_0, xx, xx, no, x, DUP_ENTRY},
{INVALID, 0xf3800b30, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vorr_i16, 0xf3800b50, "vorr.i16", VBdq, xx, i12x8_24_16_0, xx, xx, no, x, DUP_ENTRY},
{INVALID, 0xf3800b70, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
/* 0xc0 */
{OP_vmull_u8, 0xf3800c00, "vmull.u8", VBdq, xx, VAq, VCq, xx, no, x, END_LIST},
{OP_vmov_i32, 0xf3800c10, "vmov.i32", VBq, xx, i12x8_24_16_0, xx, xx, no, x, DUP_ENTRY},
{OP_vmvn_i32, 0xf3800c30, "vmvn.i32", VBq, xx, i12x8_24_16_0, xx, xx, no, x, DUP_ENTRY},
{OP_vmov_i32, 0xf3800c50, "vmov.i32", VBdq, xx, i12x8_24_16_0, xx, xx, no, x, DUP_ENTRY},
{OP_vmvn_i32, 0xf3800c70, "vmvn.i32", VBdq, xx, i12x8_24_16_0, xx, xx, no, x, DUP_ENTRY},
/* 0xd0 */
{INVALID, 0xf3800d00, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vmov_i32, 0xf3800d10, "vmov.i32", VBq, xx, i12x8_24_16_0, xx, xx, no, x, DUP_ENTRY},
{OP_vmvn_i32, 0xf3800d30, "vmvn.i32", VBq, xx, i12x8_24_16_0, xx, xx, no, x, DUP_ENTRY},
{OP_vmov_i32, 0xf3800d50, "vmov.i32", VBdq, xx, i12x8_24_16_0, xx, xx, no, x, DUP_ENTRY},
{OP_vmvn_i32, 0xf3800d70, "vmvn.i32", VBdq, xx, i12x8_24_16_0, xx, xx, no, x, DUP_ENTRY},
/* 0xe0 */
{INVALID, 0xf3800e00, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vmov_i8, 0xf3800e10, "vmov.i8", VBq, xx, i12x8_24_16_0, xx, xx, no, x, DUP_ENTRY},
{OP_vmov_i64, 0xf3800e30, "vmov.i64", VBq, xx, i12x8_24_16_0, xx, xx, no, x, DUP_ENTRY},
{OP_vmov_i8, 0xf3800e50, "vmov.i8", VBdq, xx, i12x8_24_16_0, xx, xx, no, x, DUP_ENTRY},
{OP_vmov_i64, 0xf3800e70, "vmov.i64", VBdq, xx, i12x8_24_16_0, xx, xx, no, x, DUP_ENTRY},
/* 0xf0 */
{INVALID, 0xf3800f00, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3800f10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3800f30, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3800f50, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3800f70, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
},
};
/* Indexed by bits 10:8,7:6 with one extra set of 7:6 for bit 11 being set */
const instr_info_t A32_ext_simd6b[][36] = {
{ /* 0 */
{OP_vcgt_s8, 0xf3b10000, "vcgt.s8", VBq, xx, VCq, k0, xx, no, x, tsi6[0][0x0c]},
{OP_vcgt_s8, 0xf3b10040, "vcgt.s8", VBdq, xx, VCdq, k0, xx, no, x, tsi6[0][0x0e]},
{OP_vcge_s8, 0xf3b10080, "vcge.s8", VBq, xx, VCq, k0, xx, no, x, tsi6[0][0x0d]},
{OP_vcge_s8, 0xf3b100c0, "vcge.s8", VBdq, xx, VCdq, k0, xx, no, x, tsi6[0][0x0f]},
{OP_vceq_i8, 0xf3b10100, "vceq.i8", VBq, xx, VCq, k0, xx, no, x, tsi6[6][0x21]},
{OP_vceq_i8, 0xf3b10140, "vceq.i8", VBdq, xx, VCdq, k0, xx, no, x, tsi6[6][0x23]},
{OP_vcle_s8, 0xf3b10180, "vcle.s8", VBq, xx, VCq, k0, xx, no, x, tsi6b[0][0x07]},
{OP_vcle_s8, 0xf3b101c0, "vcle.s8", VBdq, xx, VCdq, k0, xx, no, x, END_LIST},
{OP_vclt_s8, 0xf3b10200, "vclt.s8", VBq, xx, VCq, k0, xx, no, x, tsi6b[0][0x09]},
{OP_vclt_s8, 0xf3b10240, "vclt.s8", VBdq, xx, VCdq, k0, xx, no, x, END_LIST},
{INVALID, 0xf3b10280, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3b102c0, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vabs_s8, 0xf3b10300, "vabs.s8", VBq, xx, VCq, xx, xx, no, x, tsi6b[0][0x0d]},
{OP_vabs_s8, 0xf3b10340, "vabs.s8", VBdq, xx, VCdq, xx, xx, no, x, END_LIST},
{OP_vneg_s8, 0xf3b10380, "vneg.s8", VBq, xx, VCq, xx, xx, no, x, tsi6b[0][0x0f]},
{OP_vneg_s8, 0xf3b103c0, "vneg.s8", VBdq, xx, VCdq, xx, xx, no, x, END_LIST},
{INVALID, 0xf3b10400, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3b10440, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3b10480, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3b104c0, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3b10500, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3b10540, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3b10580, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3b105c0, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3b10600, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3b10640, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3b10680, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3b106c0, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3b10700, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3b10740, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3b10780, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3b107c0, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vdup_8, 0xf3b10c00, "vdup.8", VBq, xx, VCb_q, i3_17, xx, no, x, tsi6b[0][0x21]},
{OP_vdup_8, 0xf3b10c40, "vdup.8", VBdq, xx, VCb_q, i3_17, xx, no, x, tfpB[13][0x01]},
{INVALID, 0xf3b10c80, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3b10cc0, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* 1 */
{OP_vswp, 0xf3b20000, "vswp", VBq, xx, VCq, xx, xx, no, x, tsi6b[1][0x01]},
{OP_vswp, 0xf3b20040, "vswp", VBdq, xx, VCdq, xx, xx, no, x, END_LIST},
{OP_vtrn_8, 0xf3b20080, "vtrn.8", VBq, xx, VCq, xx, xx, no, x, tsi6b[1][0x03]},
{OP_vtrn_8, 0xf3b200c0, "vtrn.8", VBdq, xx, VCdq, xx, xx, no, x, END_LIST},
{OP_vuzp_8, 0xf3b20100, "vuzp.8", VBq, xx, VCq, xx, xx, no, x, tsi6b[1][0x05]},
{OP_vuzp_8, 0xf3b20140, "vuzp.8", VBdq, xx, VCdq, xx, xx, no, x, END_LIST},
{OP_vzip_8, 0xf3b20180, "vzip.8", VBq, xx, VCq, xx, xx, no, x, tsi6b[1][0x07]},
{OP_vzip_8, 0xf3b201c0, "vzip.8", VBdq, xx, VCdq, xx, xx, no, x, END_LIST},
{OP_vmovn_i16, 0xf3b20200, "vmovn.i16", VBd, xx, VCdq, xx, xx, no, x, END_LIST},/*XXX: doesn't read entire src*/
{OP_vqmovun_s16, 0xf3b20240, "vqmovun.s16", VBq, xx, VCdq, xx, xx, no, x, END_LIST},
{OP_vqmovn_s16, 0xf3b20280, "vqmovn.s16", VBq, xx, VCdq, xx, xx, no, x, END_LIST},
{OP_vqmovn_u16, 0xf3b202c0, "vqmovn.u16", VBq, xx, VCdq, xx, xx, no, x, END_LIST},
{OP_vshll_i8, 0xf3b20300, "vshll.i8", VBdq, xx, VCq, k8, xx, no, x, END_LIST},
{INVALID, 0xf3b20340, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3b20380, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3b203c0, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3b20400, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3b20440, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3b20480, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3b204c0, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3b20500, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3b20540, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3b20580, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3b205c0, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3b20600, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3b20640, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3b20680, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3b206c0, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3b20700, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3b20740, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3b20780, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3b207c0, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vdup_16, 0xf3b20c00, "vdup.16", VBq, xx, VCh_q, i2_18, xx, no, x, tsi6b[1][0x21]},
{OP_vdup_16, 0xf3b20c40, "vdup.16", VBdq, xx, VCh_q, i2_18, xx, no, x, tfpB[10][0x03]},
{INVALID, 0xf3b10c80, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3b10cc0, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* 2 */
{OP_vrev64_16, 0xf3b40000, "vrev64.16", VBq, xx, VCq, xx, xx, no, x, tsi6b[2][0x01]},
{OP_vrev64_16, 0xf3b40040, "vrev64.16", VBdq, xx, VCdq, xx, xx, no, x, END_LIST},
{OP_vrev32_16, 0xf3b40080, "vrev32.16", VBq, xx, VCq, xx, xx, no, x, tsi6b[2][0x03]},
{OP_vrev32_16, 0xf3b400c0, "vrev32.16", VBdq, xx, VCdq, xx, xx, no, x, END_LIST},
{OP_vrev16_16, 0xf3b40100, "vrev16.16", VBq, xx, VCq, xx, xx, no, x, tsi6b[2][0x05]},
{OP_vrev16_16, 0xf3b40140, "vrev16.16", VBdq, xx, VCdq, xx, xx, no, x, END_LIST},
{INVALID, 0xf3b40180, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3b401c0, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vpaddl_s16, 0xf3b40200, "vpaddl.s16", VBq, xx, VCq, xx, xx, no, x, tsi6b[2][0x09]},
{OP_vpaddl_s16, 0xf3b40240, "vpaddl.s16", VBdq, xx, VCdq, xx, xx, no, x, END_LIST},
{OP_vpaddl_u16, 0xf3b40280, "vpaddl.u16", VBq, xx, VCq, xx, xx, no, x, tsi6b[2][0x0b]},
{OP_vpaddl_u16, 0xf3b402c0, "vpaddl.u16", VBdq, xx, VCdq, xx, xx, no, x, END_LIST},
{INVALID, 0xf3b40300, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3b40340, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3b40380, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3b403c0, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vcls_s16, 0xf3b40400, "vcls.s16", VBq, xx, VCq, xx, xx, no, x, tsi6b[2][0x11]},
{OP_vcls_s16, 0xf3b40440, "vcls.s16", VBdq, xx, VCdq, xx, xx, no, x, END_LIST},
{OP_vclz_i16, 0xf3b40480, "vclz.i16", VBq, xx, VCq, xx, xx, no, x, tsi6b[2][0x13]},
{OP_vclz_i16, 0xf3b404c0, "vclz.i16", VBdq, xx, VCdq, xx, xx, no, x, END_LIST},
{INVALID, 0xf3b40500, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3b40540, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3b40580, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3b405c0, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vpadal_s16, 0xf3b40600, "vpadal.s16", VBq, xx, VCq, xx, xx, no, x, tsi6b[2][0x19]},
{OP_vpadal_s16, 0xf3b40640, "vpadal.s16", VBdq, xx, VCdq, xx, xx, no, x, END_LIST},
{OP_vpadal_u16, 0xf3b40680, "vpadal.u16", VBq, xx, VCq, xx, xx, no, x, tsi6b[2][0x1b]},
{OP_vpadal_u16, 0xf3b406c0, "vpadal.u16", VBdq, xx, VCdq, xx, xx, no, x, END_LIST},
{OP_vqabs_s16, 0xf3b40700, "vqabs.s16", VBq, xx, VCq, xx, xx, no, x, tsi6b[2][0x1d]},
{OP_vqabs_s16, 0xf3b40740, "vqabs.s16", VBdq, xx, VCdq, xx, xx, no, x, END_LIST},
{OP_vqneg_s16, 0xf3b40780, "vqneg.s16", VBq, xx, VCq, xx, xx, no, x, tsi6b[2][0x1f]},
{OP_vqneg_s16, 0xf3b407c0, "vqneg.s16", VBdq, xx, VCdq, xx, xx, no, x, END_LIST},
{OP_vdup_32, 0xf3b40c00, "vdup.32", VBq, xx, VCd_q, i1_19, xx, no, x, tsi6b[2][0x21]},
{OP_vdup_32, 0xf3b40c40, "vdup.32", VBdq, xx, VCd_q, i1_19, xx, no, x, tfpB[10][0x01]},
{INVALID, 0xf3b40c80, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3b40cc0, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* 3 */
{OP_vcgt_s16, 0xf3b50000, "vcgt.s16", VBq, xx, VCq, k0, xx, no, x, tsi6[1][0x0c]},
{OP_vcgt_s16, 0xf3b50040, "vcgt.s16", VBdq, xx, VCdq, k0, xx, no, x, tsi6[1][0x0e]},
{OP_vcge_s16, 0xf3b50080, "vcge.s16", VBq, xx, VCq, k0, xx, no, x, tsi6[1][0x0d]},
{OP_vcge_s16, 0xf3b500c0, "vcge.s16", VBdq, xx, VCdq, k0, xx, no, x, tsi6[1][0x0f]},
{OP_vceq_i16, 0xf3b50100, "vceq.i16", VBq, xx, VCq, k0, xx, no, x, tsi6[7][0x21]},
{OP_vceq_i16, 0xf3b50140, "vceq.i16", VBdq, xx, VCdq, k0, xx, no, x, tsi6[7][0x23]},
{OP_vcle_s16, 0xf3b50180, "vcle.s16", VBq, xx, VCq, k0, xx, no, x, tsi6b[3][0x07]},
{OP_vcle_s16, 0xf3b501c0, "vcle.s16", VBdq, xx, VCdq, k0, xx, no, x, END_LIST},
{OP_vclt_s16, 0xf3b50200, "vclt.s16", VBq, xx, VCq, k0, xx, no, x, tsi6b[3][0x09]},
{OP_vclt_s16, 0xf3b50240, "vclt.s16", VBdq, xx, VCdq, k0, xx, no, x, END_LIST},
{INVALID, 0xf3b50280, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3b502c0, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vabs_s16, 0xf3b50300, "vabs.s16", VBq, xx, VCq, xx, xx, no, x, tsi6b[3][0x0d]},
{OP_vabs_s16, 0xf3b50340, "vabs.s16", VBdq, xx, VCdq, xx, xx, no, x, END_LIST},
{OP_vneg_s16, 0xf3b50380, "vneg.s16", VBq, xx, VCq, xx, xx, no, x, tsi6b[3][0x0f]},
{OP_vneg_s16, 0xf3b503c0, "vneg.s16", VBdq, xx, VCdq, xx, xx, no, x, END_LIST},
{INVALID, 0xf3b50400, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3b50440, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3b50480, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3b504c0, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3b50500, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3b50540, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3b50580, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3b505c0, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3b50600, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3b50640, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3b50680, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3b506c0, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3b50700, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3b50740, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3b50780, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3b507c0, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vdup_8, 0xf3b50c00, "vdup.8", VBq, xx, VCb_q, i3_17, xx, no, x, DUP_ENTRY},
{OP_vdup_8, 0xf3b50c40, "vdup.8", VBdq, xx, VCb_q, i3_17, xx, no, x, DUP_ENTRY},
{INVALID, 0xf3b50c80, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3b50cc0, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* 4 */
{INVALID, 0xf3b60000, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3b60040, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vtrn_16, 0xf3b60080, "vtrn.16", VBq, xx, VCq, xx, xx, no, x, tsi6b[4][0x03]},
{OP_vtrn_16, 0xf3b600c0, "vtrn.16", VBdq, xx, VCdq, xx, xx, no, x, END_LIST},
{OP_vuzp_16, 0xf3b60100, "vuzp.16", VBq, xx, VCq, xx, xx, no, x, tsi6b[4][0x05]},
{OP_vuzp_16, 0xf3b60140, "vuzp.16", VBdq, xx, VCdq, xx, xx, no, x, END_LIST},
{OP_vzip_16, 0xf3b60180, "vzip.16", VBq, xx, VCq, xx, xx, no, x, tsi6b[4][0x07]},
{OP_vzip_16, 0xf3b601c0, "vzip.16", VBdq, xx, VCdq, xx, xx, no, x, END_LIST},
{OP_vmovn_i32, 0xf3b60200, "vmovn.i32", VBd, xx, VCdq, xx, xx, no, x, END_LIST},
{OP_vqmovun_s32, 0xf3b60240, "vqmovun.s32", VBq, xx, VCdq, xx, xx, no, x, END_LIST},
{OP_vqmovn_s32, 0xf3b60280, "vqmovn.s32", VBq, xx, VCdq, xx, xx, no, x, END_LIST},
{OP_vqmovn_u32, 0xf3b602c0, "vqmovn.u32", VBq, xx, VCdq, xx, xx, no, x, END_LIST},
{OP_vshll_i16, 0xf3b60300, "vshll.i16", VBdq, xx, VCq, k16, xx, no, x, END_LIST},
{INVALID, 0xf3b60340, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3b60380, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3b603c0, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3b60400, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3b60440, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3b60480, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3b604c0, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3b60500, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3b60540, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3b60580, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3b605c0, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vcvt_f16_f32, 0xf3b60600, "vcvt.f16.f32", VBq, xx, VCdq, xx, xx, no, x, END_LIST},
{INVALID, 0xf3b60640, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3b60680, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3b606c0, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vcvt_f32_f16, 0xf3b60700, "vcvt.f32.f16", VBdq, xx, VCq, xx, xx, no, x, END_LIST},
{INVALID, 0xf3b60740, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3b60780, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3b607c0, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vdup_16, 0xf3b60c00, "vdup.16", VBq, xx, VCh_q, i2_18, xx, no, x, DUP_ENTRY},
{OP_vdup_16, 0xf3b60c40, "vdup.16", VBdq, xx, VCh_q, i2_18, xx, no, x, DUP_ENTRY},
{INVALID, 0xf3b60c80, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3b60cc0, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* 5 */
{OP_vrev64_32, 0xf3b80000, "vrev64.32", VBq, xx, VCq, xx, xx, no, x, tsi6b[5][0x01]},
{OP_vrev64_32, 0xf3b80040, "vrev64.32", VBdq, xx, VCdq, xx, xx, no, x, END_LIST},
{OP_vrev32_32, 0xf3b80080, "vrev32.32", VBq, xx, VCq, xx, xx, no, x, tsi6b[5][0x03]},
{OP_vrev32_32, 0xf3b800c0, "vrev32.32", VBdq, xx, VCdq, xx, xx, no, x, END_LIST},
{INVALID, 0xf3b80100, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3b80140, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3b80180, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3b801c0, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vpaddl_s32, 0xf3b80200, "vpaddl.s32", VBq, xx, VCq, xx, xx, no, x, tsi6b[5][0x09]},
{OP_vpaddl_s32, 0xf3b80240, "vpaddl.s32", VBdq, xx, VCdq, xx, xx, no, x, END_LIST},
{OP_vpaddl_u32, 0xf3b80280, "vpaddl.u32", VBq, xx, VCq, xx, xx, no, x, tsi6b[5][0x0b]},
{OP_vpaddl_u32, 0xf3b802c0, "vpaddl.u32", VBdq, xx, VCdq, xx, xx, no, x, END_LIST},
{INVALID, 0xf3b80300, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3b80340, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3b80380, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3b803c0, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vcls_s32, 0xf3b80400, "vcls.s32", VBq, xx, VCq, xx, xx, no, x, tsi6b[5][0x11]},
{OP_vcls_s32, 0xf3b80440, "vcls.s32", VBdq, xx, VCdq, xx, xx, no, x, END_LIST},
{OP_vclz_i32, 0xf3b80480, "vclz.i32", VBq, xx, VCq, xx, xx, no, x, tsi6b[5][0x13]},
{OP_vclz_i32, 0xf3b804c0, "vclz.i32", VBdq, xx, VCdq, xx, xx, no, x, END_LIST},
{INVALID, 0xf3b80500, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3b80540, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3b80580, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3b805c0, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vpadal_s32, 0xf3b80600, "vpadal.s32", VBq, xx, VCq, xx, xx, no, x, tsi6b[5][0x19]},
{OP_vpadal_s32, 0xf3b80640, "vpadal.s32", VBdq, xx, VCdq, xx, xx, no, x, END_LIST},
{OP_vpadal_u32, 0xf3b80680, "vpadal.u32", VBq, xx, VCq, xx, xx, no, x, tsi6b[5][0x1b]},
{OP_vpadal_u32, 0xf3b806c0, "vpadal.u32", VBdq, xx, VCdq, xx, xx, no, x, END_LIST},
{OP_vqabs_s32, 0xf3b80700, "vqabs.s32", VBq, xx, VCq, xx, xx, no, x, tsi6b[5][0x1d]},
{OP_vqabs_s32, 0xf3b80740, "vqabs.s32", VBdq, xx, VCdq, xx, xx, no, x, END_LIST},
{OP_vqneg_s32, 0xf3b80780, "vqneg.s32", VBq, xx, VCq, xx, xx, no, x, tsi6b[5][0x1f]},
{OP_vqneg_s32, 0xf3b807c0, "vqneg.s32", VBdq, xx, VCdq, xx, xx, no, x, END_LIST},
{INVALID, 0xf3b80c00, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3b80c40, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3b80c80, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3b80cc0, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* 6 */
{OP_vcgt_s32, 0xf3b90000, "vcgt.s32", VBq, xx, VCq, k0, xx, no, x, tsi6[2][0x0c]},
{OP_vcgt_s32, 0xf3b90040, "vcgt.s32", VBdq, xx, VCdq, k0, xx, no, x, tsi6[2][0x0e]},
{OP_vcge_s32, 0xf3b90080, "vcge.s32", VBq, xx, VCq, k0, xx, no, x, tsi6[2][0x0d]},
{OP_vcge_s32, 0xf3b900c0, "vcge.s32", VBdq, xx, VCdq, k0, xx, no, x, tsi6[2][0x0f]},
{OP_vceq_i32, 0xf3b90100, "vceq.i32", VBq, xx, VCq, k0, xx, no, x, tsi6[8][0x21]},
{OP_vceq_i32, 0xf3b90140, "vceq.i32", VBdq, xx, VCdq, k0, xx, no, x, tsi6[8][0x23]},
{OP_vcle_s32, 0xf3b90180, "vcle.s32", VBq, xx, VCq, k0, xx, no, x, tsi6b[6][0x07]},
{OP_vcle_s32, 0xf3b901c0, "vcle.s32", VBdq, xx, VCdq, k0, xx, no, x, END_LIST},
{OP_vclt_s32, 0xf3b90200, "vclt.s32", VBq, xx, VCq, k0, xx, no, x, tsi6b[6][0x09]},
{OP_vclt_s32, 0xf3b90240, "vclt.s32", VBdq, xx, VCdq, k0, xx, no, x, END_LIST},
{INVALID, 0xf3b90280, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_sha1h_32, 0xf3b902c0, "sha1h.32", VBdq, xx, VCdq, xx, xx, v8, x, END_LIST},
{OP_vabs_s32, 0xf3b90300, "vabs.s32", VBq, xx, VCq, xx, xx, no, x, tsi6b[6][0x0d]},
{OP_vabs_s32, 0xf3b90340, "vabs.s32", VBdq, xx, VCdq, xx, xx, no, x, END_LIST},
{OP_vneg_s32, 0xf3b90380, "vneg.s32", VBq, xx, VCq, xx, xx, no, x, tsi6b[6][0x0f]},
{OP_vneg_s32, 0xf3b903c0, "vneg.s32", VBdq, xx, VCdq, xx, xx, no, x, END_LIST},
{OP_vcgt_f32, 0xf3b90400, "vcgt.f32", VBq, xx, VCq, k0, xx, no, x, tsi6[8][0x38]},
{OP_vcgt_f32, 0xf3b90440, "vcgt.f32", VBdq, xx, VCdq, k0, xx, no, x, tsi6[8][0x3a]},
{OP_vcge_f32, 0xf3b90480, "vcge.f32", VBq, xx, VCq, k0, xx, no, x, tsi6[6][0x38]},
{OP_vcge_f32, 0xf3b904c0, "vcge.f32", VBdq, xx, VCdq, k0, xx, no, x, tsi6[6][0x3a]},
{OP_vceq_f32, 0xf3b90500, "vceq.f32", VBq, xx, VCq, k0, xx, no, x, tsi6[0][0x38]},
{OP_vceq_f32, 0xf3b90540, "vceq.f32", VBdq, xx, VCdq, k0, xx, no, x, tsi6[0][0x3a]},
{OP_vcle_f32, 0xf3b90580, "vcle.f32", VBq, xx, VCq, k0, xx, no, x, tsi6b[6][0x17]},
{OP_vcle_f32, 0xf3b905c0, "vcle.f32", VBdq, xx, VCdq, k0, xx, no, x, END_LIST},
{OP_vclt_f32, 0xf3b90600, "vclt.f32", VBq, xx, VCq, k0, xx, no, x, tsi6b[6][0x19]},
{OP_vclt_f32, 0xf3b90640, "vclt.f32", VBdq, xx, VCdq, k0, xx, no, x, END_LIST},
{INVALID, 0xf3b90680, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3b906c0, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vabs_f32, 0xf3b90700, "vabs.f32", VBq, xx, VCq, xx, xx, no, x, tsi6b[6][0x1d]},
{OP_vabs_f32, 0xf3b90740, "vabs.f32", VBdq, xx, VCdq, xx, xx, no, x, t16[1][0x00]},
{OP_vneg_f32, 0xf3b90780, "vneg.f32", VBq, xx, VCq, xx, xx, no, x, tsi6b[6][0x1f]},
{OP_vneg_f32, 0xf3b907c0, "vneg.f32", VBdq, xx, VCdq, xx, xx, no, x, t16[0][0x01]},
{OP_vdup_8, 0xf3b90c00, "vdup.8", VBq, xx, VCb_q, i3_17, xx, no, x, DUP_ENTRY},
{OP_vdup_8, 0xf3b90c40, "vdup.8", VBdq, xx, VCb_q, i3_17, xx, no, x, DUP_ENTRY},
{INVALID, 0xf3b90c80, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3b90cc0, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* 7 */
{INVALID, 0xf3ba0000, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3ba0040, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vtrn_32, 0xf3ba0080, "vtrn.32", VBq, xx, VCq, xx, xx, no, x, tsi6b[7][0x03]},
{OP_vtrn_32, 0xf3ba00c0, "vtrn.32", VBdq, xx, VCdq, xx, xx, no, x, END_LIST},
{OP_vuzp_32, 0xf3ba0100, "vuzp.32", VBq, xx, VCq, xx, xx, no, x, tsi6b[7][0x05]},
{OP_vuzp_32, 0xf3ba0140, "vuzp.32", VBdq, xx, VCdq, xx, xx, no, x, END_LIST},
{OP_vzip_32, 0xf3ba0180, "vzip.32", VBq, xx, VCq, xx, xx, no, x, tsi6b[7][0x07]},
{OP_vzip_32, 0xf3ba01c0, "vzip.32", VBdq, xx, VCdq, xx, xx, no, x, END_LIST},
{OP_vmovn_i64, 0xf3ba0200, "vmovn.i64", VBd, xx, VCdq, xx, xx, no, x, END_LIST},
{OP_vqmovun_s64, 0xf3ba0240, "vqmovun.s64", VBq, xx, VCdq, xx, xx, no, x, END_LIST},
{OP_vqmovn_s64, 0xf3ba0280, "vqmovn.s64", VBq, xx, VCdq, xx, xx, no, x, END_LIST},
{OP_vqmovn_u64, 0xf3ba02c0, "vqmovn.u64", VBq, xx, VCdq, xx, xx, no, x, END_LIST},
{OP_vshll_i32, 0xf3ba0300, "vshll.i32", VBdq, xx, VCq, k32, xx, no, x, END_LIST},
{INVALID, 0xf3ba0340, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_sha1su1_32, 0xf3ba0380, "sha1su1.32", VBdq, xx, VCdq, xx, xx, v8, x, END_LIST},
{OP_sha256su0_32, 0xf3ba03c0, "sha256su0.32", VBdq, xx, VCdq, xx, xx, v8, x, END_LIST},
{OP_vrintn_f32_f32, 0xf3ba0400, "vrintn.f32.f32", VBq, xx, VCq, xx, xx, v8, x, tsi6b[7][0x11]},
{OP_vrintn_f32_f32, 0xf3ba0440, "vrintn.f32.f32", VBdq, xx, VCdq, xx, xx, v8, x, tsi5b[0][0x04]},
{OP_vrintx_f32_f32, 0xf3ba0480, "vrintx.f32.f32", VBq, xx, VCq, xx, xx, v8, x, tsi6b[7][0x13]},
{OP_vrintx_f32_f32, 0xf3ba04c0, "vrintx.f32.f32", VBdq, xx, VCdq, xx, xx, v8, x, END_LIST},
{OP_vrinta_f32_f32, 0xf3ba0500, "vrinta.f32.f32", VBq, xx, VCq, xx, xx, v8, x, tsi6b[7][0x15]},
{OP_vrinta_f32_f32, 0xf3ba0540, "vrinta.f32.f32", VBdq, xx, VCdq, xx, xx, v8, x, tsi5b[0][0x00]},
{OP_vrintz_f32_f32, 0xf3ba0580, "vrintz.f32.f32", VBq, xx, VCq, xx, xx, v8, x, tsi6b[7][0x17]},
{OP_vrintz_f32_f32, 0xf3ba05c0, "vrintz.f32.f32", VBdq, xx, VCdq, xx, xx, v8, x, END_LIST},
{INVALID, 0xf3ba0600, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3ba0640, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vrintm_f32_f32, 0xf3ba0680, "vrintm.f32.f32", VBq, xx, VCq, xx, xx, v8, x, tsi6b[7][0x1b]},
{OP_vrintm_f32_f32, 0xf3ba06c0, "vrintm.f32.f32", VBdq, xx, VCdq, xx, xx, v8, x, tsi5b[0][0x0c]},
{INVALID, 0xf3ba0700, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3ba0740, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vrintp_f32_f32, 0xf3ba0780, "vrintp.f32.f32", VBq, xx, VCq, xx, xx, v8, x, tsi6b[7][0x1f]},
{OP_vrintp_f32_f32, 0xf3ba07c0, "vrintp.f32.f32", VBdq, xx, VCdq, xx, xx, v8, x, tsi5b[0][0x08]},
{OP_vdup_16, 0xf3ba0c00, "vdup.16", VBq, xx, VCh_q, i2_18, xx, no, x, DUP_ENTRY},
{OP_vdup_16, 0xf3ba0c40, "vdup.16", VBdq, xx, VCh_q, i2_18, xx, no, x, DUP_ENTRY},
{INVALID, 0xf3ba0c80, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3ba0cc0, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* 8 */
{OP_vcvta_s32_f32, 0xf3bb0000, "vcvta.s32.f32", VBq, xx, VCq, xx, xx, v8, x, tsi6b[8][0x01]},
{OP_vcvta_s32_f32, 0xf3bb0040, "vcvta.s32.f32", VBdq, xx, VCdq, xx, xx, v8, x, tsi5b[0][0x11]},
{OP_vcvta_u32_f32, 0xf3bb0080, "vcvta.u32.f32", VBq, xx, VCq, xx, xx, v8, x, tsi6b[8][0x03]},
{OP_vcvta_u32_f32, 0xf3bb00c0, "vcvta.u32.f32", VBdq, xx, VCdq, xx, xx, v8, x, tsi5b[0][0x10]},
{OP_vcvtn_s32_f32, 0xf3bb0100, "vcvtn.s32.f32", VBq, xx, VCq, xx, xx, v8, x, tsi6b[8][0x05]},
{OP_vcvtn_s32_f32, 0xf3bb0140, "vcvtn.s32.f32", VBdq, xx, VCdq, xx, xx, v8, x, tsi5b[0][0x15]},
{OP_vcvtn_u32_f32, 0xf3bb0180, "vcvtn.u32.f32", VBq, xx, VCq, xx, xx, v8, x, tsi6b[8][0x07]},
{OP_vcvtn_u32_f32, 0xf3bb01c0, "vcvtn.u32.f32", VBdq, xx, VCdq, xx, xx, v8, x, tsi5b[0][0x14]},
{OP_vcvtp_s32_f32, 0xf3bb0200, "vcvtp.s32.f32", VBq, xx, VCq, xx, xx, v8, x, tsi6b[8][0x09]},
{OP_vcvtp_s32_f32, 0xf3bb0240, "vcvtp.s32.f32", VBdq, xx, VCdq, xx, xx, v8, x, tsi5b[0][0x19]},
{OP_vcvtp_u32_f32, 0xf3bb0280, "vcvtp.u32.f32", VBq, xx, VCq, xx, xx, v8, x, tsi6b[8][0x0b]},
{OP_vcvtp_u32_f32, 0xf3bb02c0, "vcvtp.u32.f32", VBdq, xx, VCdq, xx, xx, v8, x, tsi5b[0][0x18]},
{OP_vcvtm_s32_f32, 0xf3bb0300, "vcvtm.s32.f32", VBq, xx, VCq, xx, xx, v8, x, tsi6b[8][0x0d]},
{OP_vcvtm_s32_f32, 0xf3bb0340, "vcvtm.s32.f32", VBdq, xx, VCdq, xx, xx, v8, x, tsi5b[0][0x1d]},
{OP_vcvtm_u32_f32, 0xf3bb0380, "vcvtm.u32.f32", VBq, xx, VCq, xx, xx, v8, x, tsi6b[8][0x0f]},
{OP_vcvtm_u32_f32, 0xf3bb03c0, "vcvtm.u32.f32", VBdq, xx, VCdq, xx, xx, v8, x, tsi5b[0][0x1c]},
{OP_vrecpe_u32, 0xf3bb0400, "vrecpe.u32", VBq, xx, VCq, xx, xx, no, x, tsi6b[8][0x11]},
{OP_vrecpe_u32, 0xf3bb0440, "vrecpe.u32", VBdq, xx, VCdq, xx, xx, no, x, END_LIST},
{OP_vrsqrte_u32, 0xf3bb0480, "vrsqrte.u32", VBq, xx, VCq, xx, xx, no, x, tsi6b[8][0x13]},
{OP_vrsqrte_u32, 0xf3bb04c0, "vrsqrte.u32", VBdq, xx, VCdq, xx, xx, no, x, END_LIST},
{OP_vrecpe_f32, 0xf3bb0500, "vrecpe.f32", VBq, xx, VCq, xx, xx, no, x, tsi6b[8][0x15]},
{OP_vrecpe_f32, 0xf3bb0540, "vrecpe.f32", VBdq, xx, VCdq, xx, xx, no, x, END_LIST},
{OP_vrsqrte_f32, 0xf3bb0580, "vrsqrte.f32", VBq, xx, VCq, xx, xx, no, x, tsi6b[8][0x17]},
{OP_vrsqrte_f32, 0xf3bb05c0, "vrsqrte.f32", VBdq, xx, VCdq, xx, xx, no, x, END_LIST},
{OP_vcvt_f32_s32, 0xf3bb0600, "vcvt.f32.s32", VBq, xx, VCq, xx, xx, no, x, tsi6[5][0x39]},
{OP_vcvt_f32_s32, 0xf3bb0640, "vcvt.f32.s32", VBdq, xx, VCdq, xx, xx, no, x, tsi6[5][0x3b]},
{OP_vcvt_f32_u32, 0xf3bb0680, "vcvt.f32.u32", VBq, xx, VCq, xx, xx, no, x, tsi6[11][0x39]},
{OP_vcvt_f32_u32, 0xf3bb06c0, "vcvt.f32.u32", VBdq, xx, VCdq, xx, xx, no, x, tsi6[11][0x3b]},
{OP_vcvt_s32_f32, 0xf3bb0700, "vcvt.s32.f32", VBq, xx, VCq, xx, xx, no, x, tsi6[5][0x3c]},
{OP_vcvt_s32_f32, 0xf3bb0740, "vcvt.s32.f32", VBdq, xx, VCdq, xx, xx, no, x, tsi6[5][0x3e]},
{OP_vcvt_u32_f32, 0xf3bb0780, "vcvt.u32.f32", VBq, xx, VCq, xx, xx, no, x, tsi6[11][0x3d]},
{OP_vcvt_u32_f32, 0xf3bb07c0, "vcvt.u32.f32", VBdq, xx, VCdq, xx, xx, no, x, tsi6[11][0x3f]},
{OP_vdup_8, 0xf3bb0c00, "vdup.8", VBq, xx, VCb_q, i3_17, xx, no, x, DUP_ENTRY},
{OP_vdup_8, 0xf3bb0c40, "vdup.8", VBdq, xx, VCb_q, i3_17, xx, no, x, DUP_ENTRY},
{INVALID, 0xf3bb0c80, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3bb0cc0, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* 9 */
{OP_vrev64_8, 0xf3b00000, "vrev64.8", VBq, xx, VCq, xx, xx, no, x, tsi6b[9][0x01]},
{OP_vrev64_8, 0xf3b00040, "vrev64.8", VBdq, xx, VCdq, xx, xx, no, x, END_LIST},
{OP_vrev32_8, 0xf3b00080, "vrev32.8", VBq, xx, VCq, xx, xx, no, x, tsi6b[9][0x03]},
{OP_vrev32_8, 0xf3b000c0, "vrev32.8", VBdq, xx, VCdq, xx, xx, no, x, END_LIST},
{OP_vrev16_8, 0xf3b00100, "vrev16.8", VBq, xx, VCq, xx, xx, no, x, tsi6b[9][0x05]},
{OP_vrev16_8, 0xf3b00140, "vrev16.8", VBdq, xx, VCdq, xx, xx, no, x, END_LIST},
{INVALID, 0xf3b00180, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3b001c0, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vpaddl_s8, 0xf3b00200, "vpaddl.s8", VBq, xx, VCq, xx, xx, no, x, tsi6b[9][0x09]},
{OP_vpaddl_s8, 0xf3b00240, "vpaddl.s8", VBdq, xx, VCdq, xx, xx, no, x, END_LIST},
{OP_vpaddl_u8, 0xf3b00280, "vpaddl.u8", VBq, xx, VCq, xx, xx, no, x, tsi6b[9][0x0b]},
{OP_vpaddl_u8, 0xf3b002c0, "vpaddl.u8", VBdq, xx, VCdq, xx, xx, no, x, END_LIST},
{OP_aese_8, 0xf3b00300, "aese.8", VBdq, xx, VCdq, xx, xx, v8, x, END_LIST},
{OP_aesd_8, 0xf3b00340, "aesd.8", VBdq, xx, VCdq, xx, xx, v8, x, END_LIST},
{OP_aesmc_8, 0xf3b00380, "aesmc.8", VBdq, xx, VCdq, xx, xx, v8, x, END_LIST},
{OP_aesimc_8, 0xf3b003c0, "aesimc.8", VBdq, xx, VCdq, xx, xx, v8, x, END_LIST},
{OP_vcls_s8, 0xf3b00400, "vcls.s8", VBq, xx, VCq, xx, xx, no, x, tsi6b[9][0x11]},
{OP_vcls_s8, 0xf3b00440, "vcls.s8", VBdq, xx, VCdq, xx, xx, no, x, END_LIST},
{OP_vclz_i8, 0xf3b00480, "vclz.i8", VBq, xx, VCq, xx, xx, no, x, tsi6b[9][0x13]},
{OP_vclz_i8, 0xf3b004c0, "vclz.i8", VBdq, xx, VCdq, xx, xx, no, x, END_LIST},
{OP_vcnt_8, 0xf3b00500, "vcnt.8", VBq, xx, VCq, xx, xx, no, x, tsi6b[9][0x15]},
{OP_vcnt_8, 0xf3b00540, "vcnt.8", VBdq, xx, VCdq, xx, xx, no, x, END_LIST},
{OP_vmvn, 0xf3b00580, "vmvn", VBq, xx, VCq, xx, xx, no, x, tsi6b[9][0x17]},
{OP_vmvn, 0xf3b005c0, "vmvn", VBdq, xx, VCdq, xx, xx, no, x, END_LIST},
{OP_vpadal_s8, 0xf3b00600, "vpadal.s8", VBq, xx, VCq, xx, xx, no, x, tsi6b[9][0x19]},
{OP_vpadal_s8, 0xf3b00640, "vpadal.s8", VBdq, xx, VCdq, xx, xx, no, x, END_LIST},
{OP_vpadal_u8, 0xf3b00680, "vpadal.u8", VBq, xx, VCq, xx, xx, no, x, tsi6b[9][0x1b]},
{OP_vpadal_u8, 0xf3b006c0, "vpadal.u8", VBdq, xx, VCdq, xx, xx, no, x, END_LIST},
{OP_vqabs_s8, 0xf3b00700, "vqabs.s8", VBq, xx, VCq, xx, xx, no, x, tsi6b[9][0x1d]},
{OP_vqabs_s8, 0xf3b00740, "vqabs.s8", VBdq, xx, VCdq, xx, xx, no, x, END_LIST},
{OP_vqneg_s8, 0xf3b00780, "vqneg.s8", VBq, xx, VCq, xx, xx, no, x, tsi6b[9][0x1f]},
{OP_vqneg_s8, 0xf3b007c0, "vqneg.s8", VBdq, xx, VCdq, xx, xx, no, x, END_LIST},
{INVALID, 0xf3b00c00, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3b00c40, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3b00c80, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3b00cc0, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
},
};
/* Indexed by bits 11,6 (0x......840) */
const instr_info_t A32_ext_simd2[][4] = {
{ /* 0 */
{INVALID, 0xf3b30000, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3b30040, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vdup_8, 0xf3b30c00, "vdup.8", VBq, xx, VCb_q, i3_17, xx, no, x, DUP_ENTRY},
{OP_vdup_8, 0xf3b30c40, "vdup.8", VBdq, xx, VCb_q, i3_17, xx, no, x, DUP_ENTRY},
}, { /* 1 */
{INVALID, 0xf3b70000, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3b70040, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vdup_8, 0xf3b70c00, "vdup.8", VBq, xx, VCb_q, i3_17, xx, no, x, DUP_ENTRY},
{OP_vdup_8, 0xf3b70c40, "vdup.8", VBdq, xx, VCb_q, i3_17, xx, no, x, DUP_ENTRY},
}, { /* 2 */
{INVALID, 0xf3bc0000, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3bc0040, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vdup_32, 0xf3bc0c00, "vdup.32", VBq, xx, VCd_q, i1_19, xx, no, x, DUP_ENTRY},
{OP_vdup_32, 0xf3bc0c40, "vdup.32", VBdq, xx, VCd_q, i1_19, xx, no, x, DUP_ENTRY},
}, { /* 3 */
{INVALID, 0xf3bd0000, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3bd0040, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vdup_8, 0xf3bd0c00, "vdup.8", VBq, xx, VCb_q, i3_17, xx, no, x, DUP_ENTRY},
{OP_vdup_8, 0xf3bd0c40, "vdup.8", VBdq, xx, VCb_q, i3_17, xx, no, x, DUP_ENTRY},
}, { /* 4 */
{INVALID, 0xf3be0000, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3be0040, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vdup_16, 0xf3be0c00, "vdup.16", VBq, xx, VCh_q, i2_18, xx, no, x, DUP_ENTRY},
{OP_vdup_16, 0xf3be0c40, "vdup.16", VBdq, xx, VCh_q, i2_18, xx, no, x, DUP_ENTRY},
}, { /* 5 */
{INVALID, 0xf3bf0000, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf3bf0040, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vdup_8, 0xf3bf0c00, "vdup.8", VBq, xx, VCb_q, i3_17, xx, no, x, DUP_ENTRY},
{OP_vdup_8, 0xf3bf0c40, "vdup.8", VBdq, xx, VCb_q, i3_17, xx, no, x, DUP_ENTRY},
},
};
/* Indexed by bits 10:8,6. Bits 4 and 7 are already set. These have
* i6_16 with the L bit which means their upper bits can vary quite a bit.
*/
const instr_info_t A32_ext_imm6L[][16] = {
{ /* 0 */
{OP_vshr_s64, 0xf2800090, "vshr.s64", VBq, xx, VCq, i6_16, xx, no, x, ti6l[0][0x01]},/*XXX: imm = 64-imm*/
{OP_vshr_s64, 0xf28000d0, "vshr.s64", VBdq, xx, VCdq, i6_16, xx, no, x, END_LIST},/*XXX: imm = 64-imm*/
{OP_vsra_s64, 0xf2800190, "vsra.s64", VBq, xx, VCq, i6_16, xx, no, x, ti6l[0][0x03]},/*XXX: imm = 64-imm*/
{OP_vsra_s64, 0xf28001d0, "vsra.s64", VBdq, xx, VCdq, i6_16, xx, no, x, END_LIST},/*XXX: imm = 64-imm*/
{OP_vrshr_s64, 0xf2800290, "vrshr.s64", VBq, xx, VCq, i6_16, xx, no, x, ti6l[0][0x05]},/*XXX: imm = 64-imm*/
{OP_vrshr_s64, 0xf28002d0, "vrshr.s64", VBdq, xx, VCdq, i6_16, xx, no, x, END_LIST},/*XXX: imm = 64-imm*/
{OP_vrsra_s64, 0xf2800390, "vrsra.s64", VBq, xx, VCq, i6_16, xx, no, x, ti6l[0][0x07]},/*XXX: imm = 64-imm*/
{OP_vrsra_s64, 0xf28003d0, "vrsra.s64", VBdq, xx, VCdq, i6_16, xx, no, x, END_LIST},/*XXX: imm = 64-imm*/
{INVALID, 0xf2800490, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf28004d0, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vshl_i64, 0xf2800590, "vshl.i64", VBq, xx, VCq, i6_16, xx, no, x, ti6l[0][0x0b]},/*XXX: imm = 64-imm?*/
{OP_vshl_i64, 0xf28005d0, "vshl.i64", VBdq, xx, VCdq, i6_16, xx, no, x, END_LIST},/*XXX: imm = 64-imm?*/
{INVALID, 0xf2800690, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf28006d0, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vqshl_s64, 0xf2800790, "vqshl.s64", VBq, xx, VCq, i6_16, xx, no, x, tsi6[3][0x11]},
{OP_vqshl_s64, 0xf28007d0, "vqshl.s64", VBdq, xx, VCdq, i6_16, xx, no, x, tsi6[3][0x13]},
}, { /* 1 */
{OP_vshr_u64, 0xf3800090, "vshr.u64", VBq, xx, VCq, i6_16, xx, no, x, ti6l[1][0x01]},/*XXX: imm = 64-imm*/
{OP_vshr_u64, 0xf38000d0, "vshr.u64", VBdq, xx, VCdq, i6_16, xx, no, x, END_LIST},/*XXX: imm = 64-imm*/
{OP_vsra_u64, 0xf3800190, "vsra.u64", VBq, xx, VCq, i6_16, xx, no, x, ti6l[1][0x03]},/*XXX: imm = 64-imm*/
{OP_vsra_u64, 0xf38001d0, "vsra.u64", VBdq, xx, VCdq, i6_16, xx, no, x, END_LIST},/*XXX: imm = 64-imm*/
{OP_vrshr_u64, 0xf3800290, "vrshr.u64", VBq, xx, VCq, i6_16, xx, no, x, ti6l[1][0x05]},/*XXX: imm = 64-imm*/
{OP_vrshr_u64, 0xf38002d0, "vrshr.u64", VBdq, xx, VCdq, i6_16, xx, no, x, END_LIST},/*XXX: imm = 64-imm*/
{OP_vrsra_u64, 0xf3800390, "vrsra.u64", VBq, xx, VCq, i6_16, xx, no, x, ti6l[1][0x07]},/*XXX: imm = 64-imm*/
{OP_vrsra_u64, 0xf38003d0, "vrsra.u64", VBdq, xx, VCdq, i6_16, xx, no, x, END_LIST},/*XXX: imm = 64-imm*/
{OP_vsri_64, 0xf3800490, "vsri.64", VBq, xx, VCq, i6_16, xx, no, x, ti6l[1][0x09]},/*XXX: imm = 64-imm?*/
{OP_vsri_64, 0xf38004d0, "vsri.64", VBdq, xx, VCdq, i6_16, xx, no, x, END_LIST},/*XXX: imm = 64-imm?*/
{OP_vsli_64, 0xf3800590, "vsli.64", VBq, xx, VCq, i6_16, xx, no, x, ti6l[1][0x0b]},/*XXX: imm = 64-imm?*/
{OP_vsli_64, 0xf38005d0, "vsli.64", VBdq, xx, VCdq, i6_16, xx, no, x, END_LIST},/*XXX: imm = 64-imm?*/
{OP_vqshlu_s64, 0xf3800690, "vqshlu.s64", VBq, xx, VCq, i6_16, xx, no, x, ti6l[1][0x0d]},
{OP_vqshlu_s64, 0xf38006d0, "vqshlu.s64", VBdq, xx, VCdq, i6_16, xx, no, x, END_LIST},
{OP_vqshl_u64, 0xf3800790, "vqshl.u64", VBq, xx, VCq, i6_16, xx, no, x, tsi6[9][0x11]},
{OP_vqshl_u64, 0xf38007d0, "vqshl.u64", VBdq, xx, VCdq, i6_16, xx, no, x, tsi6[9][0x13]},
},
};
/* Indexed by bits (11:8,7:6)*3+X where X is based on the value of 3:0:
* + 0xd => 0
* + 0xf => 1
* + else => 2
* However, the top 11:8 stops at 0xa.
*/
const instr_info_t A32_ext_vldA[][132] = {
{ /* 0 */
{OP_vst4_8, 0xf400000d, "vst4.8", Mqq, RAw, LX4q, i2_4, RAw, no, x, END_LIST},
{OP_vst4_8, 0xf400000f, "vst4.8", Mqq, xx, LX4q, i2_4, xx, no, x, tvlA[0][0x00]},
{OP_vst4_8, 0xf4000000, "vst4.8", Mqq, RAw, LX4q, i2_4, RDw, xop_wb, x, tvlA[0][0x01]},
{OP_vst4_16, 0xf400004d, "vst4.16", Mqq, RAw, LX4q, i2_4, RAw, no, x, END_LIST},
{OP_vst4_16, 0xf400004f, "vst4.16", Mqq, xx, LX4q, i2_4, xx, no, x, tvlA[0][0x03]},
{OP_vst4_16, 0xf4000040, "vst4.16", Mqq, RAw, LX4q, i2_4, RDw, xop_wb, x, tvlA[0][0x04]},
{OP_vst4_32, 0xf400008d, "vst4.32", Mqq, RAw, LX4q, i2_4, RAw, no, x, END_LIST},
{OP_vst4_32, 0xf400008f, "vst4.32", Mqq, xx, LX4q, i2_4, xx, no, x, tvlA[0][0x06]},
{OP_vst4_32, 0xf4000080, "vst4.32", Mqq, RAw, LX4q, i2_4, RDw, xop_wb, x, tvlA[0][0x07]},
{INVALID, 0xf40000cd, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf40000cf, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf40000c0, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vst4_8, 0xf400010d, "vst4.8", Mqq, RAw, LX4Dq, i2_4, RAw, no, x, tvlA[0][0x02]},
{OP_vst4_8, 0xf400010f, "vst4.8", Mqq, xx, LX4Dq, i2_4, xx, no, x, tvlA[0][0x0c]},
{OP_vst4_8, 0xf4000100, "vst4.8", Mqq, RAw, LX4Dq, i2_4, RDw, xop_wb, x, tvlA[0][0x0d]},
{OP_vst4_16, 0xf400014d, "vst4.16", Mqq, RAw, LX4Dq, i2_4, RAw, no, x, tvlA[0][0x05]},
{OP_vst4_16, 0xf400014f, "vst4.16", Mqq, xx, LX4Dq, i2_4, xx, no, x, tvlA[0][0x0f]},
{OP_vst4_16, 0xf4000140, "vst4.16", Mqq, RAw, LX4Dq, i2_4, RDw, xop_wb, x, tvlA[0][0x10]},
{OP_vst4_32, 0xf400018d, "vst4.32", Mqq, RAw, LX4Dq, i2_4, RAw, no, x, tvlA[0][0x08]},
{OP_vst4_32, 0xf400018f, "vst4.32", Mqq, xx, LX4Dq, i2_4, xx, no, x, tvlA[0][0x12]},
{OP_vst4_32, 0xf4000180, "vst4.32", Mqq, RAw, LX4Dq, i2_4, RDw, xop_wb, x, tvlA[0][0x13]},
{INVALID, 0xf40001cd, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf40001cf, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf40001c0, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vst1_8, 0xf400020d, "vst1.8", Mqq, RAw, LX4q, i2_4, RAw, no, x, END_LIST},
{OP_vst1_8, 0xf400020f, "vst1.8", Mqq, xx, LX4q, i2_4, xx, no, x, tvlA[0][0x18]},
{OP_vst1_8, 0xf4000200, "vst1.8", Mqq, RAw, LX4q, i2_4, RDw, xop_wb, x, tvlA[0][0x19]},
{OP_vst1_16, 0xf400024d, "vst1.16", Mqq, RAw, LX4q, i2_4, RAw, no, x, END_LIST},
{OP_vst1_16, 0xf400024f, "vst1.16", Mqq, xx, LX4q, i2_4, xx, no, x, tvlA[0][0x1b]},
{OP_vst1_16, 0xf4000240, "vst1.16", Mqq, RAw, LX4q, i2_4, RDw, xop_wb, x, tvlA[0][0x1c]},
{OP_vst1_32, 0xf400028d, "vst1.32", Mqq, RAw, LX4q, i2_4, RAw, no, x, END_LIST},
{OP_vst1_32, 0xf400028f, "vst1.32", Mqq, xx, LX4q, i2_4, xx, no, x, tvlA[0][0x1e]},
{OP_vst1_32, 0xf4000280, "vst1.32", Mqq, RAw, LX4q, i2_4, RDw, xop_wb, x, tvlA[0][0x1f]},
{OP_vst1_64, 0xf40002cd, "vst1.64", Mqq, RAw, LX4q, i2_4, RAw, no, x, END_LIST},
{OP_vst1_64, 0xf40002cf, "vst1.64", Mqq, xx, LX4q, i2_4, xx, no, x, tvlA[0][0x21]},
{OP_vst1_64, 0xf40002c0, "vst1.64", Mqq, RAw, LX4q, i2_4, RDw, xop_wb, x, tvlA[0][0x22]},
{OP_vst2_8, 0xf400030d, "vst2.8", Mqq, RAw, LX4q, i2_4, RAw, no, x, END_LIST},
{OP_vst2_8, 0xf400030f, "vst2.8", Mqq, xx, LX4q, i2_4, xx, no, x, tvlA[0][0x24]},
{OP_vst2_8, 0xf4000300, "vst2.8", Mqq, RAw, LX4q, i2_4, RDw, xop_wb, x, tvlA[0][0x25]},
{OP_vst2_16, 0xf400034d, "vst2.16", Mqq, RAw, LX4q, i2_4, RAw, no, x, END_LIST},
{OP_vst2_16, 0xf400034f, "vst2.16", Mqq, xx, LX4q, i2_4, xx, no, x, tvlA[0][0x27]},
{OP_vst2_16, 0xf4000340, "vst2.16", Mqq, RAw, LX4q, i2_4, RDw, xop_wb, x, tvlA[0][0x28]},
{OP_vst2_32, 0xf400038d, "vst2.32", Mqq, RAw, LX4q, i2_4, RAw, no, x, END_LIST},
{OP_vst2_32, 0xf400038f, "vst2.32", Mqq, xx, LX4q, i2_4, xx, no, x, tvlA[0][0x2a]},
{OP_vst2_32, 0xf4000380, "vst2.32", Mqq, RAw, LX4q, i2_4, RDw, xop_wb, x, tvlA[0][0x2b]},
{INVALID, 0xf40003cd, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf40003cf, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf40003c0, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vst3_8, 0xf400040d, "vst3.8", M24, RAw, LX3q, i2_4, RAw, no, x, END_LIST},
{OP_vst3_8, 0xf400040f, "vst3.8", M24, xx, LX3q, i2_4, xx, no, x, tvlA[0][0x30]},
{OP_vst3_8, 0xf4000400, "vst3.8", M24, RAw, LX3q, i2_4, RDw, xop_wb, x, tvlA[0][0x31]},
{OP_vst3_16, 0xf400044d, "vst3.16", M24, RAw, LX3q, i2_4, RAw, no, x, END_LIST},
{OP_vst3_16, 0xf400044f, "vst3.16", M24, xx, LX3q, i2_4, xx, no, x, tvlA[0][0x33]},
{OP_vst3_16, 0xf4000440, "vst3.16", M24, RAw, LX3q, i2_4, RDw, xop_wb, x, tvlA[0][0x34]},
{OP_vst3_32, 0xf400048d, "vst3.32", M24, RAw, LX3q, i2_4, RAw, no, x, END_LIST},
{OP_vst3_32, 0xf400048f, "vst3.32", M24, xx, LX3q, i2_4, xx, no, x, tvlA[0][0x36]},
{OP_vst3_32, 0xf4000480, "vst3.32", M24, RAw, LX3q, i2_4, RDw, xop_wb, x, tvlA[0][0x37]},
{INVALID, 0xf40004cd, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf40004cf, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf40004c0, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vst3_8, 0xf400050d, "vst3.8", M24, RAw, LX3Dq, i2_4, RAw, no, x, tvlA[0][0x32]},
{OP_vst3_8, 0xf400050f, "vst3.8", M24, xx, LX3Dq, i2_4, xx, no, x, tvlA[0][0x3c]},
{OP_vst3_8, 0xf4000500, "vst3.8", M24, RAw, LX3Dq, i2_4, RDw, xop_wb, x, tvlA[0][0x3d]},
{OP_vst3_16, 0xf400054d, "vst3.16", M24, RAw, LX3Dq, i2_4, RAw, no, x, tvlA[0][0x35]},
{OP_vst3_16, 0xf400054f, "vst3.16", M24, xx, LX3Dq, i2_4, xx, no, x, tvlA[0][0x3f]},
{OP_vst3_16, 0xf4000540, "vst3.16", M24, RAw, LX3Dq, i2_4, RDw, xop_wb, x, tvlA[0][0x40]},
{OP_vst3_32, 0xf400058d, "vst3.32", M24, RAw, LX3Dq, i2_4, RAw, no, x, tvlA[0][0x38]},
{OP_vst3_32, 0xf400058f, "vst3.32", M24, xx, LX3Dq, i2_4, xx, no, x, tvlA[0][0x42]},
{OP_vst3_32, 0xf4000580, "vst3.32", M24, RAw, LX3Dq, i2_4, RDw, xop_wb, x, tvlA[0][0x43]},
{INVALID, 0xf40005cd, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf40005cf, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf40005c0, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vst1_8, 0xf400060d, "vst1.8", M24, RAw, LX3q, i2_4, RAw, no, x, tvlA[0][0x1a]},
{OP_vst1_8, 0xf400060f, "vst1.8", M24, xx, LX3q, i2_4, xx, no, x, tvlA[0][0x48]},
{OP_vst1_8, 0xf4000600, "vst1.8", M24, RAw, LX3q, i2_4, RDw, xop_wb, x, tvlA[0][0x49]},
{OP_vst1_16, 0xf400064d, "vst1.16", M24, RAw, LX3q, i2_4, RAw, no, x, tvlA[0][0x1d]},
{OP_vst1_16, 0xf400064f, "vst1.16", M24, xx, LX3q, i2_4, xx, no, x, tvlA[0][0x4b]},
{OP_vst1_16, 0xf4000640, "vst1.16", M24, RAw, LX3q, i2_4, RDw, xop_wb, x, tvlA[0][0x4c]},
{OP_vst1_32, 0xf400068d, "vst1.32", M24, RAw, LX3q, i2_4, RAw, no, x, tvlA[0][0x20]},
{OP_vst1_32, 0xf400068f, "vst1.32", M24, xx, LX3q, i2_4, xx, no, x, tvlA[0][0x4e]},
{OP_vst1_32, 0xf4000680, "vst1.32", M24, RAw, LX3q, i2_4, RDw, xop_wb, x, tvlA[0][0x4f]},
{OP_vst1_64, 0xf40006cd, "vst1.64", M24, RAw, LX3q, i2_4, RAw, no, x, tvlA[0][0x23]},
{OP_vst1_64, 0xf40006cf, "vst1.64", M24, xx, LX3q, i2_4, xx, no, x, tvlA[0][0x51]},
{OP_vst1_64, 0xf40006c0, "vst1.64", M24, RAw, LX3q, i2_4, RDw, xop_wb, x, tvlA[0][0x52]},
{OP_vst1_8, 0xf400070d, "vst1.8", Mq, RAw, VBq, i2_4, RAw, no, x, tvlA[0][0x7a]},/*XXX: some align values => undefined*/
{OP_vst1_8, 0xf400070f, "vst1.8", Mq, xx, VBq, i2_4, xx, no, x, tvlA[0][0x54]},/*XXX: combine align into memop?*/
{OP_vst1_8, 0xf4000700, "vst1.8", Mq, RAw, VBq, i2_4, RDw, xop_wb, x, tvlA[0][0x55]},
{OP_vst1_16, 0xf400074d, "vst1.16", Mq, RAw, VBq, i2_4, RAw, no, x, tvlA[0][0x7d]},
{OP_vst1_16, 0xf400074f, "vst1.16", Mq, xx, VBq, i2_4, xx, no, x, tvlA[0][0x57]},
{OP_vst1_16, 0xf4000740, "vst1.16", Mq, RAw, VBq, i2_4, RDw, xop_wb, x, tvlA[0][0x58]},
{OP_vst1_32, 0xf400078d, "vst1.32", Mq, RAw, VBq, i2_4, RAw, no, x, tvlA[0][0x80]},
{OP_vst1_32, 0xf400078f, "vst1.32", Mq, xx, VBq, i2_4, xx, no, x, tvlA[0][0x5a]},
{OP_vst1_32, 0xf4000780, "vst1.32", Mq, RAw, VBq, i2_4, RDw, xop_wb, x, tvlA[0][0x5b]},
{OP_vst1_64, 0xf40007cd, "vst1.64", Mq, RAw, VBq, i2_4, RAw, no, x, tvlA[0][0x83]},
{OP_vst1_64, 0xf40007cf, "vst1.64", Mq, xx, VBq, i2_4, xx, no, x, tvlA[0][0x5d]},
{OP_vst1_64, 0xf40007c0, "vst1.64", Mq, RAw, VBq, i2_4, RDw, xop_wb, x, tvlA[0][0x5e]},
/* 0x80 */
{OP_vst2_8, 0xf400080d, "vst2.8", Mdq, RAw, LX2q, i2_4, RAw, no, x, tvlA[0][0x26]},
{OP_vst2_8, 0xf400080f, "vst2.8", Mdq, xx, LX2q, i2_4, xx, no, x, tvlA[0][0x60]},
{OP_vst2_8, 0xf4000800, "vst2.8", Mdq, RAw, LX2q, i2_4, RDw, xop_wb, x, tvlA[0][0x61]},
{OP_vst2_16, 0xf400084d, "vst2.16", Mdq, RAw, LX2q, i2_4, RAw, no, x, tvlA[0][0x29]},
{OP_vst2_16, 0xf400084f, "vst2.16", Mdq, xx, LX2q, i2_4, xx, no, x, tvlA[0][0x63]},
{OP_vst2_16, 0xf4000840, "vst2.16", Mdq, RAw, LX2q, i2_4, RDw, xop_wb, x, tvlA[0][0x64]},
{OP_vst2_32, 0xf400088d, "vst2.32", Mdq, RAw, LX2q, i2_4, RAw, no, x, tvlA[0][0x2c]},
{OP_vst2_32, 0xf400088f, "vst2.32", Mdq, xx, LX2q, i2_4, xx, no, x, tvlA[0][0x66]},
{OP_vst2_32, 0xf4000880, "vst2.32", Mdq, RAw, LX2q, i2_4, RDw, xop_wb, x, tvlA[0][0x67]},
{INVALID, 0xf40008cd, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf40008cf, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf40008c0, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vst2_8, 0xf400090d, "vst2.8", Mdq, RAw, LX2Dq, i2_4, RAw, no, x, tvlA[0][0x62]},
{OP_vst2_8, 0xf400090f, "vst2.8", Mdq, xx, LX2Dq, i2_4, xx, no, x, tvlA[0][0x6c]},
{OP_vst2_8, 0xf4000900, "vst2.8", Mdq, RAw, LX2Dq, i2_4, RDw, xop_wb, x, tvlA[0][0x6d]},
{OP_vst2_16, 0xf400094d, "vst2.16", Mdq, RAw, LX2Dq, i2_4, RAw, no, x, tvlA[0][0x65]},
{OP_vst2_16, 0xf400094f, "vst2.16", Mdq, xx, LX2Dq, i2_4, xx, no, x, tvlA[0][0x6f]},
{OP_vst2_16, 0xf4000940, "vst2.16", Mdq, RAw, LX2Dq, i2_4, RDw, xop_wb, x, tvlA[0][0x70]},
{OP_vst2_32, 0xf400098d, "vst2.32", Mdq, RAw, LX2Dq, i2_4, RAw, no, x, tvlA[0][0x68]},
{OP_vst2_32, 0xf400098f, "vst2.32", Mdq, xx, LX2Dq, i2_4, xx, no, x, tvlA[0][0x72]},
{OP_vst2_32, 0xf4000980, "vst2.32", Mdq, RAw, LX2Dq, i2_4, RDw, xop_wb, x, tvlA[0][0x73]},
{INVALID, 0xf40009cd, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf40009cf, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf40009c0, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vst1_8, 0xf4000a0d, "vst1.8", Mdq, RAw, LX2q, i2_4, RAw, no, x, tvlA[0][0x4a]},
{OP_vst1_8, 0xf4000a0f, "vst1.8", Mdq, xx, LX2q, i2_4, xx, no, x, tvlA[0][0x78]},
{OP_vst1_8, 0xf4000a00, "vst1.8", Mdq, RAw, LX2q, i2_4, RDw, xop_wb, x, tvlA[0][0x79]},
{OP_vst1_16, 0xf4000a4d, "vst1.16", Mdq, RAw, LX2q, i2_4, RAw, no, x, tvlA[0][0x4d]},
{OP_vst1_16, 0xf4000a4f, "vst1.16", Mdq, xx, LX2q, i2_4, xx, no, x, tvlA[0][0x7b]},
{OP_vst1_16, 0xf4000a40, "vst1.16", Mdq, RAw, LX2q, i2_4, RDw, xop_wb, x, tvlA[0][0x7c]},
{OP_vst1_32, 0xf4000a8d, "vst1.32", Mdq, RAw, LX2q, i2_4, RAw, no, x, tvlA[0][0x50]},
{OP_vst1_32, 0xf4000a8f, "vst1.32", Mdq, xx, LX2q, i2_4, xx, no, x, tvlA[0][0x7e]},
{OP_vst1_32, 0xf4000a80, "vst1.32", Mdq, RAw, LX2q, i2_4, RDw, xop_wb, x, tvlA[0][0x7f]},
{OP_vst1_64, 0xf4000acd, "vst1.64", Mdq, RAw, LX2q, i2_4, RAw, no, x, tvlA[0][0x53]},
{OP_vst1_64, 0xf4000acf, "vst1.64", Mdq, xx, LX2q, i2_4, xx, no, x, tvlA[0][0x81]},
{OP_vst1_64, 0xf4000ac0, "vst1.64", Mdq, RAw, LX2q, i2_4, RDw, xop_wb, x, tvlA[0][0x82]},
}, { /* 1 */
{OP_vld4_8, 0xf420000d, "vld4.8", LX4q, RAw, Mqq, i2_4, RAw, no, x, END_LIST},
{OP_vld4_8, 0xf420000f, "vld4.8", LX4q, xx, Mqq, i2_4, xx, no, x, tvlA[1][0x00]},
{OP_vld4_8, 0xf4200000, "vld4.8", LX4q, RAw, Mqq, i2_4, RDw, xop_wb, x, tvlA[1][0x01]},
{OP_vld4_16, 0xf420004d, "vld4.16", LX4q, RAw, Mqq, i2_4, RAw, no, x, END_LIST},
{OP_vld4_16, 0xf420004f, "vld4.16", LX4q, xx, Mqq, i2_4, xx, no, x, tvlA[1][0x03]},
{OP_vld4_16, 0xf4200040, "vld4.16", LX4q, RAw, Mqq, i2_4, RDw, xop_wb, x, tvlA[1][0x04]},
{OP_vld4_32, 0xf420008d, "vld4.32", LX4q, RAw, Mqq, i2_4, RAw, no, x, END_LIST},
{OP_vld4_32, 0xf420008f, "vld4.32", LX4q, xx, Mqq, i2_4, xx, no, x, tvlA[1][0x06]},
{OP_vld4_32, 0xf4200080, "vld4.32", LX4q, RAw, Mqq, i2_4, RDw, xop_wb, x, tvlA[1][0x07]},
{INVALID, 0xf42000cd, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf42000cf, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf42000c0, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vld4_8, 0xf420010d, "vld4.8", LX4Dq, RAw, Mqq, i2_4, RAw, no, x, tvlA[1][0x02]},
{OP_vld4_8, 0xf420010f, "vld4.8", LX4Dq, xx, Mqq, i2_4, xx, no, x, tvlA[1][0x0c]},
{OP_vld4_8, 0xf4200100, "vld4.8", LX4Dq, RAw, Mqq, i2_4, RDw, xop_wb, x, tvlA[1][0x0d]},
{OP_vld4_16, 0xf420014d, "vld4.16", LX4Dq, RAw, Mqq, i2_4, RAw, no, x, tvlA[1][0x05]},
{OP_vld4_16, 0xf420014f, "vld4.16", LX4Dq, xx, Mqq, i2_4, xx, no, x, tvlA[1][0x0f]},
{OP_vld4_16, 0xf4200140, "vld4.16", LX4Dq, RAw, Mqq, i2_4, RDw, xop_wb, x, tvlA[1][0x10]},
{OP_vld4_32, 0xf420018d, "vld4.32", LX4Dq, RAw, Mqq, i2_4, RAw, no, x, tvlA[1][0x08]},
{OP_vld4_32, 0xf420018f, "vld4.32", LX4Dq, xx, Mqq, i2_4, xx, no, x, tvlA[1][0x12]},
{OP_vld4_32, 0xf4200180, "vld4.32", LX4Dq, RAw, Mqq, i2_4, RDw, xop_wb, x, tvlA[1][0x13]},
{INVALID, 0xf42001cd, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf42001cf, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf42001c0, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vld1_8, 0xf420020d, "vld1.8", LX4q, RAw, Mqq, i2_4, RAw, no, x, END_LIST},
{OP_vld1_8, 0xf420020f, "vld1.8", LX4q, xx, Mqq, i2_4, xx, no, x, tvlA[1][0x18]},
{OP_vld1_8, 0xf4200200, "vld1.8", LX4q, RAw, Mqq, i2_4, RDw, xop_wb, x, tvlA[1][0x19]},
{OP_vld1_16, 0xf420024d, "vld1.16", LX4q, RAw, Mqq, i2_4, RAw, no, x, END_LIST},
{OP_vld1_16, 0xf420024f, "vld1.16", LX4q, xx, Mqq, i2_4, xx, no, x, tvlA[1][0x1b]},
{OP_vld1_16, 0xf4200240, "vld1.16", LX4q, RAw, Mqq, i2_4, RDw, xop_wb, x, tvlA[1][0x1c]},
{OP_vld1_32, 0xf420028d, "vld1.32", LX4q, RAw, Mqq, i2_4, RAw, no, x, END_LIST},
{OP_vld1_32, 0xf420028f, "vld1.32", LX4q, xx, Mqq, i2_4, xx, no, x, tvlA[1][0x1e]},
{OP_vld1_32, 0xf4200280, "vld1.32", LX4q, RAw, Mqq, i2_4, RDw, xop_wb, x, tvlA[1][0x1f]},
{OP_vld1_64, 0xf42002cd, "vld1.64", LX4q, RAw, Mqq, i2_4, RAw, no, x, END_LIST},
{OP_vld1_64, 0xf42002cf, "vld1.64", LX4q, xx, Mqq, i2_4, xx, no, x, tvlA[1][0x21]},
{OP_vld1_64, 0xf42002c0, "vld1.64", LX4q, RAw, Mqq, i2_4, RDw, xop_wb, x, tvlA[1][0x22]},
{OP_vld2_8, 0xf420030d, "vld2.8", LX4q, RAw, Mqq, i2_4, RAw, no, x, END_LIST},
{OP_vld2_8, 0xf420030f, "vld2.8", LX4q, xx, Mqq, i2_4, xx, no, x, tvlA[1][0x24]},
{OP_vld2_8, 0xf4200300, "vld2.8", LX4q, RAw, Mqq, i2_4, RDw, xop_wb, x, tvlA[1][0x25]},
{OP_vld2_16, 0xf420034d, "vld2.16", LX4q, RAw, Mqq, i2_4, RAw, no, x, END_LIST},
{OP_vld2_16, 0xf420034f, "vld2.16", LX4q, xx, Mqq, i2_4, xx, no, x, tvlA[1][0x27]},
{OP_vld2_16, 0xf4200340, "vld2.16", LX4q, RAw, Mqq, i2_4, RDw, xop_wb, x, tvlA[1][0x28]},
{OP_vld2_32, 0xf420038d, "vld2.32", LX4q, RAw, Mqq, i2_4, RAw, no, x, END_LIST},
{OP_vld2_32, 0xf420038f, "vld2.32", LX4q, xx, Mqq, i2_4, xx, no, x, tvlA[1][0x2a]},
{OP_vld2_32, 0xf4200380, "vld2.32", LX4q, RAw, Mqq, i2_4, RDw, xop_wb, x, tvlA[1][0x2b]},
{INVALID, 0xf42003cd, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf42003cf, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf42003c0, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vld3_8, 0xf420040d, "vld3.8", LX3q, RAw, M24, i2_4, RAw, no, x, END_LIST},
{OP_vld3_8, 0xf420040f, "vld3.8", LX3q, xx, M24, i2_4, xx, no, x, tvlA[1][0x30]},
{OP_vld3_8, 0xf4200400, "vld3.8", LX3q, RAw, M24, i2_4, RDw, xop_wb, x, tvlA[1][0x31]},
{OP_vld3_16, 0xf420044d, "vld3.16", LX3q, RAw, M24, i2_4, RAw, no, x, END_LIST},
{OP_vld3_16, 0xf420044f, "vld3.16", LX3q, xx, M24, i2_4, xx, no, x, tvlA[1][0x33]},
{OP_vld3_16, 0xf4200440, "vld3.16", LX3q, RAw, M24, i2_4, RDw, xop_wb, x, tvlA[1][0x34]},
{OP_vld3_32, 0xf420048d, "vld3.32", LX3q, RAw, M24, i2_4, RAw, no, x, END_LIST},
{OP_vld3_32, 0xf420048f, "vld3.32", LX3q, xx, M24, i2_4, xx, no, x, tvlA[1][0x36]},
{OP_vld3_32, 0xf4200480, "vld3.32", LX3q, RAw, M24, i2_4, RDw, xop_wb, x, tvlA[1][0x37]},
{INVALID, 0xf42004cd, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf42004cf, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf42004c0, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vld3_8, 0xf420050d, "vld3.8", LX3Dq, RAw, M24, i2_4, RAw, no, x, tvlA[1][0x32]},
{OP_vld3_8, 0xf420050f, "vld3.8", LX3Dq, xx, M24, i2_4, xx, no, x, tvlA[1][0x3c]},
{OP_vld3_8, 0xf4200500, "vld3.8", LX3Dq, RAw, M24, i2_4, RDw, xop_wb, x, tvlA[1][0x3d]},
{OP_vld3_16, 0xf420054d, "vld3.16", LX3Dq, RAw, M24, i2_4, RAw, no, x, tvlA[1][0x35]},
{OP_vld3_16, 0xf420054f, "vld3.16", LX3Dq, xx, M24, i2_4, xx, no, x, tvlA[1][0x3f]},
{OP_vld3_16, 0xf4200540, "vld3.16", LX3Dq, RAw, M24, i2_4, RDw, xop_wb, x, tvlA[1][0x40]},
{OP_vld3_32, 0xf420058d, "vld3.32", LX3Dq, RAw, M24, i2_4, RAw, no, x, tvlA[1][0x38]},
{OP_vld3_32, 0xf420058f, "vld3.32", LX3Dq, xx, M24, i2_4, xx, no, x, tvlA[1][0x42]},
{OP_vld3_32, 0xf4200580, "vld3.32", LX3Dq, RAw, M24, i2_4, RDw, xop_wb, x, tvlA[1][0x43]},
{INVALID, 0xf42005cd, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf42005cf, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf42005c0, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vld1_8, 0xf420060d, "vld1.8", LX3q, RAw, M24, i2_4, RAw, no, x, tvlA[1][0x7a]},
{OP_vld1_8, 0xf420060f, "vld1.8", LX3q, xx, M24, i2_4, xx, no, x, tvlA[1][0x48]},
{OP_vld1_8, 0xf4200600, "vld1.8", LX3q, RAw, M24, i2_4, RDw, xop_wb, x, tvlA[1][0x49]},
{OP_vld1_16, 0xf420064d, "vld1.16", LX3q, RAw, M24, i2_4, RAw, no, x, tvlA[1][0x7d]},
{OP_vld1_16, 0xf420064f, "vld1.16", LX3q, xx, M24, i2_4, xx, no, x, tvlA[1][0x4b]},
{OP_vld1_16, 0xf4200640, "vld1.16", LX3q, RAw, M24, i2_4, RDw, xop_wb, x, tvlA[1][0x4c]},
{OP_vld1_32, 0xf420068d, "vld1.32", LX3q, RAw, M24, i2_4, RAw, no, x, tvlA[1][0x80]},
{OP_vld1_32, 0xf420068f, "vld1.32", LX3q, xx, M24, i2_4, xx, no, x, tvlA[1][0x4e]},
{OP_vld1_32, 0xf4200680, "vld1.32", LX3q, RAw, M24, i2_4, RDw, xop_wb, x, tvlA[1][0x4f]},
{OP_vld1_64, 0xf42006cd, "vld1.64", LX3q, RAw, M24, i2_4, RAw, no, x, tvlA[1][0x83]},
{OP_vld1_64, 0xf42006cf, "vld1.64", LX3q, xx, M24, i2_4, xx, no, x, tvlA[1][0x51]},
{OP_vld1_64, 0xf42006c0, "vld1.64", LX3q, RAw, M24, i2_4, RDw, xop_wb, x, tvlA[1][0x52]},
{OP_vld1_8, 0xf420070d, "vld1.8", VBq, RAw, Mq, i2_4, RAw, no, x, tvlA[1][0x4a]},/*XXX: some align values => undefined*/
{OP_vld1_8, 0xf420070f, "vld1.8", VBq, xx, Mq, i2_4, xx, no, x, tvlA[1][0x54]},/*XXX: combine align into memop?*/
{OP_vld1_8, 0xf4200700, "vld1.8", VBq, RAw, Mq, i2_4, RDw, xop_wb, x, tvlA[1][0x55]},
{OP_vld1_16, 0xf420074d, "vld1.16", VBq, RAw, Mq, i2_4, RAw, no, x, tvlA[1][0x4d]},
{OP_vld1_16, 0xf420074f, "vld1.16", VBq, xx, Mq, i2_4, xx, no, x, tvlA[1][0x57]},
{OP_vld1_16, 0xf4200740, "vld1.16", VBq, RAw, Mq, i2_4, RDw, xop_wb, x, tvlA[1][0x58]},
{OP_vld1_32, 0xf420078d, "vld1.32", VBq, RAw, Mq, i2_4, RAw, no, x, tvlA[1][0x50]},
{OP_vld1_32, 0xf420078f, "vld1.32", VBq, xx, Mq, i2_4, xx, no, x, tvlA[1][0x5a]},
{OP_vld1_32, 0xf4200780, "vld1.32", VBq, RAw, Mq, i2_4, RDw, xop_wb, x, tvlA[1][0x5b]},
{OP_vld1_64, 0xf42007cd, "vld1.64", VBq, RAw, Mq, i2_4, RAw, no, x, tvlA[1][0x53]},
{OP_vld1_64, 0xf42007cf, "vld1.64", VBq, xx, Mq, i2_4, xx, no, x, tvlA[1][0x5d]},
{OP_vld1_64, 0xf42007c0, "vld1.64", VBq, RAw, Mq, i2_4, RDw, xop_wb, x, tvlA[1][0x5e]},
/* 0x80 */
{OP_vld2_8, 0xf420080d, "vld2.8", LX2q, RAw, Mdq, i2_4, RAw, no, x, tvlA[1][0x26]},
{OP_vld2_8, 0xf420080f, "vld2.8", LX2q, xx, Mdq, i2_4, xx, no, x, tvlA[1][0x60]},
{OP_vld2_8, 0xf4200800, "vld2.8", LX2q, RAw, Mdq, i2_4, RDw, xop_wb, x, tvlA[1][0x61]},
{OP_vld2_16, 0xf420084d, "vld2.16", LX2q, RAw, Mdq, i2_4, RAw, no, x, tvlA[1][0x29]},
{OP_vld2_16, 0xf420084f, "vld2.16", LX2q, xx, Mdq, i2_4, xx, no, x, tvlA[1][0x63]},
{OP_vld2_16, 0xf4200840, "vld2.16", LX2q, RAw, Mdq, i2_4, RDw, xop_wb, x, tvlA[1][0x64]},
{OP_vld2_32, 0xf420088d, "vld2.32", LX2q, RAw, Mdq, i2_4, RAw, no, x, tvlA[1][0x2c]},
{OP_vld2_32, 0xf420088f, "vld2.32", LX2q, xx, Mdq, i2_4, xx, no, x, tvlA[1][0x66]},
{OP_vld2_32, 0xf4200880, "vld2.32", LX2q, RAw, Mdq, i2_4, RDw, xop_wb, x, tvlA[1][0x67]},
{INVALID, 0xf42008cd, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf42008cf, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf42008c0, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vld2_8, 0xf420090d, "vld2.8", LX2Dq, RAw, Mdq, i2_4, RAw, no, x, tvlA[1][0x62]},
{OP_vld2_8, 0xf420090f, "vld2.8", LX2Dq, xx, Mdq, i2_4, xx, no, x, tvlA[1][0x6c]},
{OP_vld2_8, 0xf4200900, "vld2.8", LX2Dq, RAw, Mdq, i2_4, RDw, xop_wb, x, tvlA[1][0x6d]},
{OP_vld2_16, 0xf420094d, "vld2.16", LX2Dq, RAw, Mdq, i2_4, RAw, no, x, tvlA[1][0x65]},
{OP_vld2_16, 0xf420094f, "vld2.16", LX2Dq, xx, Mdq, i2_4, xx, no, x, tvlA[1][0x6f]},
{OP_vld2_16, 0xf4200940, "vld2.16", LX2Dq, RAw, Mdq, i2_4, RDw, xop_wb, x, tvlA[1][0x70]},
{OP_vld2_32, 0xf420098d, "vld2.32", LX2Dq, RAw, Mdq, i2_4, RAw, no, x, tvlA[1][0x68]},
{OP_vld2_32, 0xf420098f, "vld2.32", LX2Dq, xx, Mdq, i2_4, xx, no, x, tvlA[1][0x72]},
{OP_vld2_32, 0xf4200980, "vld2.32", LX2Dq, RAw, Mdq, i2_4, RDw, xop_wb, x, tvlA[1][0x73]},
{INVALID, 0xf42009cd, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf42009cf, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf42009c0, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vld1_8, 0xf4200a0d, "vld1.8", LX2q, RAw, Mdq, i2_4, RAw, no, x, tvlA[1][0x1a]},
{OP_vld1_8, 0xf4200a0f, "vld1.8", LX2q, xx, Mdq, i2_4, xx, no, x, tvlA[1][0x78]},
{OP_vld1_8, 0xf4200a00, "vld1.8", LX2q, RAw, Mdq, i2_4, RDw, xop_wb, x, tvlA[1][0x79]},
{OP_vld1_16, 0xf4200a4d, "vld1.16", LX2q, RAw, Mdq, i2_4, RAw, no, x, tvlA[1][0x1d]},
{OP_vld1_16, 0xf4200a4f, "vld1.16", LX2q, xx, Mdq, i2_4, xx, no, x, tvlA[1][0x7b]},
{OP_vld1_16, 0xf4200a40, "vld1.16", LX2q, RAw, Mdq, i2_4, RDw, xop_wb, x, tvlA[1][0x7c]},
{OP_vld1_32, 0xf4200a8d, "vld1.32", LX2q, RAw, Mdq, i2_4, RAw, no, x, tvlA[1][0x20]},
{OP_vld1_32, 0xf4200a8f, "vld1.32", LX2q, xx, Mdq, i2_4, xx, no, x, tvlA[1][0x7e]},
{OP_vld1_32, 0xf4200a80, "vld1.32", LX2q, RAw, Mdq, i2_4, RDw, xop_wb, x, tvlA[1][0x7f]},
{OP_vld1_64, 0xf4200acd, "vld1.64", LX2q, RAw, Mdq, i2_4, RAw, no, x, tvlA[1][0x23]},
{OP_vld1_64, 0xf4200acf, "vld1.64", LX2q, xx, Mdq, i2_4, xx, no, x, tvlA[1][0x81]},
{OP_vld1_64, 0xf4200ac0, "vld1.64", LX2q, RAw, Mdq, i2_4, RDw, xop_wb, x, tvlA[1][0x82]},
},
};
/* Indexed by bits (11:8,Y)*3+X where X is based on the value of 3:0:
* + 0xd => 0
* + 0xf => 1
* + else => 2
* And Y is:
* + If bit 11 (0x.....8..) is set, the value of bit 6 (0x......4.)
* + Else, the value of bit 5 (0x......2.).
*
* This requires some duplicate entries, marked below to make it easier to
* reconfigure the table if we want to try a different arrangement.
* It's just easier to deal w/ dups than tons of separate 2-entry tables
* with indexes.
*/
const instr_info_t A32_ext_vldB[][96] = {
{ /* 0 */
{OP_vst1_lane_8, 0xf480000d, "vst1.8", Mb, RAw, VBb_q, i3_5, RAw, no, x, END_LIST},
{OP_vst1_lane_8, 0xf480000f, "vst1.8", Mb, xx, VBb_q, i3_5, xx, no, x, tvlB[0][0x00]},/*XXX: combine align into memop?*/
{OP_vst1_lane_8, 0xf4800000, "vst1.8", Mb, RAw, VBb_q, i3_5, RDw, xop_wb, x, tvlB[0][0x01]},
{OP_vst1_lane_8, 0xf480002d, "vst1.8", Mb, RAw, VBb_q, i3_5, RAw, no, x, DUP_ENTRY},
{OP_vst1_lane_8, 0xf480002f, "vst1.8", Mb, xx, VBb_q, i3_5, xx, no, x, DUP_ENTRY},/*XXX: combine align into memop?*/
{OP_vst1_lane_8, 0xf4800020, "vst1.8", Mb, RAw, VBb_q, i3_5, RDw, xop_wb, x, DUP_ENTRY},
{OP_vst2_lane_8, 0xf480010d, "vst2.8", Mh, RAw, LX2b_q, i3_5, i1_4, xop_wb, x, END_LIST},
{OP_vst2_lane_8, 0xf480010f, "vst2.8", Mh, xx, LX2b_q, i3_5, i1_4, no, x, tvlB[0][0x06]},
{OP_vst2_lane_8, 0xf4800100, "vst2.8", Mh, RAw, LX2b_q, i3_5, i1_4, xop_wb2, x, tvlB[0][0x07]},
{OP_vst2_lane_8, 0xf480012d, "vst2.8", Mh, RAw, LX2b_q, i3_5, i1_4, xop_wb, x, DUP_ENTRY},
{OP_vst2_lane_8, 0xf480012f, "vst2.8", Mh, xx, LX2b_q, i3_5, i1_4, no, x, DUP_ENTRY},
{OP_vst2_lane_8, 0xf4800120, "vst2.8", Mh, RAw, LX2b_q, i3_5, i1_4, xop_wb2, x, DUP_ENTRY},
{OP_vst3_lane_8, 0xf480020d, "vst3.8", M3, RAw, LX3b_q, i3_5, RAw, no, x, END_LIST},
{OP_vst3_lane_8, 0xf480020f, "vst3.8", M3, xx, LX3b_q, i3_5, xx, no, x, tvlB[0][0x0c]},
{OP_vst3_lane_8, 0xf4800200, "vst3.8", M3, RAw, LX3b_q, i3_5, RDw, xop_wb, x, tvlB[0][0x0d]},
{OP_vst3_lane_8, 0xf480022d, "vst3.8", M3, RAw, LX3b_q, i3_5, RAw, no, x, DUP_ENTRY},
{OP_vst3_lane_8, 0xf480022f, "vst3.8", M3, xx, LX3b_q, i3_5, xx, no, x, DUP_ENTRY},
{OP_vst3_lane_8, 0xf4800220, "vst3.8", M3, RAw, LX3b_q, i3_5, RDw, xop_wb, x, DUP_ENTRY},
{OP_vst4_lane_8, 0xf480030d, "vst4.8", Md, RAw, LX4b_q, i3_5, i1_4, xop_wb, x, END_LIST},
{OP_vst4_lane_8, 0xf480030f, "vst4.8", Md, xx, LX4b_q, i3_5, i1_4, no, x, tvlB[0][0x12]},
{OP_vst4_lane_8, 0xf4800300, "vst4.8", Md, RAw, LX4b_q, i3_5, i1_4, xop_wb2, x, tvlB[0][0x13]},
{OP_vst4_lane_8, 0xf480032d, "vst4.8", Md, RAw, LX4b_q, i3_5, i1_4, xop_wb, x, DUP_ENTRY},
{OP_vst4_lane_8, 0xf480032f, "vst4.8", Md, xx, LX4b_q, i3_5, i1_4, no, x, DUP_ENTRY},
{OP_vst4_lane_8, 0xf4800320, "vst4.8", Md, RAw, LX4b_q, i3_5, i1_4, xop_wb2, x, DUP_ENTRY},
{OP_vst1_lane_16, 0xf480040d, "vst1.16", Mh, RAw, VBh_q, i2_6, i1_4, xop_wb, x, END_LIST},
{OP_vst1_lane_16, 0xf480040f, "vst1.16", Mh, xx, VBh_q, i2_6, i1_4, no, x, tvlB[0][0x18]},
{OP_vst1_lane_16, 0xf4800400, "vst1.16", Mh, RAw, VBh_q, i2_6, i1_4, xop_wb2, x, tvlB[0][0x19]},
{OP_vst1_lane_16, 0xf480042d, "vst1.16", Mh, RAw, VBh_q, i2_6, i1_4, xop_wb, x, DUP_ENTRY},
{OP_vst1_lane_16, 0xf480042f, "vst1.16", Mh, xx, VBh_q, i2_6, i1_4, no, x, DUP_ENTRY},
{OP_vst1_lane_16, 0xf4800420, "vst1.16", Mh, RAw, VBh_q, i2_6, i1_4, xop_wb2, x, DUP_ENTRY},
{OP_vst2_lane_16, 0xf480050d, "vst2.16", Md, RAw, LX2h_q, i2_6, i1_4, xop_wb, x, END_LIST},
{OP_vst2_lane_16, 0xf480050f, "vst2.16", Md, xx, LX2h_q, i2_6, i1_4, no, x, tvlB[0][0x1e]},
{OP_vst2_lane_16, 0xf4800500, "vst2.16", Md, RAw, LX2h_q, i2_6, i1_4, xop_wb2, x, tvlB[0][0x1f]},
{OP_vst2_lane_16, 0xf480052d, "vst2.16", Md, RAw, LX2Dh_q, i2_6, i1_4, xop_wb, x, tvlB[0][0x20]},
{OP_vst2_lane_16, 0xf480052f, "vst2.16", Md, xx, LX2Dh_q, i2_6, i1_4, no, x, tvlB[0][0x21]},
{OP_vst2_lane_16, 0xf4800520, "vst2.16", Md, RAw, LX2Dh_q, i2_6, i1_4, xop_wb2, x, tvlB[0][0x22]},
{OP_vst3_lane_16, 0xf480060d, "vst3.16", M6, RAw, LX3h_q, i2_6, RAw, no, x, END_LIST},
{OP_vst3_lane_16, 0xf480060f, "vst3.16", M6, xx, LX3h_q, i2_6, xx, no, x, tvlB[0][0x24]},
{OP_vst3_lane_16, 0xf4800600, "vst3.16", M6, RAw, LX3h_q, i2_6, RDw, xop_wb, x, tvlB[0][0x25]},
{OP_vst3_lane_16, 0xf480062d, "vst3.16", M6, RAw, LX3Dh_q, i2_6, RAw, no, x, tvlB[0][0x26]},
{OP_vst3_lane_16, 0xf480062f, "vst3.16", M6, xx, LX3Dh_q, i2_6, xx, no, x, tvlB[0][0x27]},
{OP_vst3_lane_16, 0xf4800620, "vst3.16", M6, RAw, LX3Dh_q, i2_6, RDw, xop_wb, x, tvlB[0][0x28]},
{OP_vst4_lane_16, 0xf480070d, "vst4.16", Mq, RAw, LX4h_q, i2_6, i1_4, xop_wb, x, END_LIST},
{OP_vst4_lane_16, 0xf480070f, "vst4.16", Mq, xx, LX4h_q, i2_6, i1_4, no, x, tvlB[0][0x2a]},
{OP_vst4_lane_16, 0xf4800700, "vst4.16", Mq, RAw, LX4h_q, i2_6, i1_4, xop_wb2, x, tvlB[0][0x2b]},
{OP_vst4_lane_16, 0xf480072d, "vst4.16", Mq, RAw, LX4Dh_q, i2_6, i1_4, xop_wb, x, tvlB[0][0x2c]},
{OP_vst4_lane_16, 0xf480072f, "vst4.16", Mq, xx, LX4Dh_q, i2_6, i1_4, no, x, tvlB[0][0x2d]},
{OP_vst4_lane_16, 0xf4800720, "vst4.16", Mq, RAw, LX4Dh_q, i2_6, i1_4, xop_wb2, x, tvlB[0][0x2e]},
/* 0x80 */
{OP_vst1_lane_32, 0xf480080d, "vst1.32", Md, RAw, VBd_q, i1_7, i2_4, xop_wb, x, END_LIST},
{OP_vst1_lane_32, 0xf480080f, "vst1.32", Md, xx, VBd_q, i1_7, i2_4, no, x, tvlB[0][0x30]},
{OP_vst1_lane_32, 0xf4800800, "vst1.32", Md, RAw, VBd_q, i1_7, i2_4, xop_wb2, x, tvlB[0][0x31]},
{OP_vst1_lane_32, 0xf480084d, "vst1.32", Md, RAw, VBd_q, i1_7, i2_4, xop_wb, x, DUP_ENTRY},
{OP_vst1_lane_32, 0xf480084f, "vst1.32", Md, xx, VBd_q, i1_7, i2_4, no, x, DUP_ENTRY},
{OP_vst1_lane_32, 0xf4800840, "vst1.32", Md, RAw, VBd_q, i1_7, i2_4, xop_wb2, x, DUP_ENTRY},
{OP_vst2_lane_32, 0xf480090d, "vst2.32", Mq, RAw, LX2d_q, i1_7, i2_4, xop_wb, x, END_LIST},
{OP_vst2_lane_32, 0xf480090f, "vst2.32", Mq, xx, LX2d_q, i1_7, i2_4, no, x, tvlB[0][0x36]},
{OP_vst2_lane_32, 0xf4800900, "vst2.32", Mq, RAw, LX2d_q, i1_7, i2_4, xop_wb2, x, tvlB[0][0x37]},
{OP_vst2_lane_32, 0xf480094d, "vst2.32", Mq, RAw, LX2Dd_q, i1_7, i2_4, xop_wb, x, tvlB[0][0x38]},
{OP_vst2_lane_32, 0xf480094f, "vst2.32", Mq, xx, LX2Dd_q, i1_7, i2_4, no, x, tvlB[0][0x39]},
{OP_vst2_lane_32, 0xf4800940, "vst2.32", Mq, RAw, LX2Dd_q, i1_7, i2_4, xop_wb2, x, tvlB[0][0x3a]},
{OP_vst3_lane_32, 0xf4800a0d, "vst3.32", M12, RAw, LX3d_q, i1_7, RAw, no, x, END_LIST},
{OP_vst3_lane_32, 0xf4800a0f, "vst3.32", M12, xx, LX3d_q, i1_7, xx, no, x, tvlB[0][0x3c]},
{OP_vst3_lane_32, 0xf4800a00, "vst3.32", M12, RAw, LX3d_q, i1_7, RDw, xop_wb, x, tvlB[0][0x3d]},
{OP_vst3_lane_32, 0xf4800a4d, "vst3.32", M12, RAw, LX3Dd_q, i1_7, RAw, no, x, tvlB[0][0x3e]},
{OP_vst3_lane_32, 0xf4800a4f, "vst3.32", M12, xx, LX3Dd_q, i1_7, xx, no, x, tvlB[0][0x3f]},
{OP_vst3_lane_32, 0xf4800a40, "vst3.32", M12, RAw, LX3Dd_q, i1_7, RDw, xop_wb, x, tvlB[0][0x40]},
{OP_vst4_lane_32, 0xf4800b0d, "vst4.32", Mdq, RAw, LX4d_q, i1_7, i2_4, xop_wb, x, END_LIST},
{OP_vst4_lane_32, 0xf4800b0f, "vst4.32", Mdq, xx, LX4d_q, i1_7, i2_4, no, x, tvlB[0][0x42]},
{OP_vst4_lane_32, 0xf4800b00, "vst4.32", Mdq, RAw, LX4d_q, i1_7, i2_4, xop_wb2, x, tvlB[0][0x43]},
{OP_vst4_lane_32, 0xf4800b4d, "vst4.32", Mdq, RAw, LX4Dd_q, i1_7, i2_4, xop_wb, x, tvlB[0][0x44]},
{OP_vst4_lane_32, 0xf4800b4f, "vst4.32", Mdq, xx, LX4Dd_q, i1_7, i2_4, no, x, tvlB[0][0x45]},
{OP_vst4_lane_32, 0xf4800b40, "vst4.32", Mdq, RAw, LX4Dd_q, i1_7, i2_4, xop_wb2, x, tvlB[0][0x46]},
{INVALID, 0xf4800c0d, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf4800c0f, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf4800c00, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf4800c4d, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf4800c4f, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf4800c40, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf4800d0d, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf4800d0f, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf4800d00, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf4800d4d, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf4800d4f, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf4800d40, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf4800e0d, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf4800e0f, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf4800e00, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf4800e4d, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf4800e4f, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf4800e40, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf4800f0d, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf4800f0f, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf4800f00, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf4800f4d, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf4800f4f, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf4800f40, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* 1 */
{OP_vld1_lane_8, 0xf4a0000d, "vld1.8", VBb_q, RAw, Mb, i3_5, RAw, no, x, END_LIST},
{OP_vld1_lane_8, 0xf4a0000f, "vld1.8", VBb_q, xx, Mb, i3_5, xx, no, x, tvlB[1][0x00]},/*XXX: combine align into memop?*/
{OP_vld1_lane_8, 0xf4a00000, "vld1.8", VBb_q, RAw, Mb, i3_5, RDw, xop_wb, x, tvlB[1][0x01]},
{OP_vld1_lane_8, 0xf4a0002d, "vld1.8", VBb_q, RAw, Mb, i3_5, RAw, no, x, DUP_ENTRY},
{OP_vld1_lane_8, 0xf4a0002f, "vld1.8", VBb_q, xx, Mb, i3_5, xx, no, x, DUP_ENTRY},/*XXX: combine align into memop?*/
{OP_vld1_lane_8, 0xf4a00020, "vld1.8", VBb_q, RAw, Mb, i3_5, RDw, xop_wb, x, DUP_ENTRY},
{OP_vld2_lane_8, 0xf4a0010d, "vld2.8", LX2b_q, RAw, Mh, i3_5, RAw, no, x, END_LIST},
{OP_vld2_lane_8, 0xf4a0010f, "vld2.8", LX2b_q, xx, Mh, i3_5, xx, no, x, tvlB[1][0x06]},
{OP_vld2_lane_8, 0xf4a00100, "vld2.8", LX2b_q, RAw, Mh, i3_5, RDw, xop_wb, x, tvlB[1][0x07]},
{OP_vld2_lane_8, 0xf4a0012d, "vld2.8", LX2b_q, RAw, Mh, i3_5, RAw, no, x, DUP_ENTRY},
{OP_vld2_lane_8, 0xf4a0012f, "vld2.8", LX2b_q, xx, Mh, i3_5, xx, no, x, DUP_ENTRY},
{OP_vld2_lane_8, 0xf4a00120, "vld2.8", LX2b_q, RAw, Mh, i3_5, RDw, xop_wb, x, DUP_ENTRY},
{OP_vld3_lane_8, 0xf4a0020d, "vld3.8", LX3b_q, RAw, M3, i3_5, RAw, no, x, END_LIST},
{OP_vld3_lane_8, 0xf4a0020f, "vld3.8", LX3b_q, xx, M3, i3_5, xx, no, x, tvlB[1][0x0c]},
{OP_vld3_lane_8, 0xf4a00200, "vld3.8", LX3b_q, RAw, M3, i3_5, RDw, xop_wb, x, tvlB[1][0x0d]},
{OP_vld3_lane_8, 0xf4a0022d, "vld3.8", LX3b_q, RAw, M3, i3_5, RAw, no, x, DUP_ENTRY},
{OP_vld3_lane_8, 0xf4a0022f, "vld3.8", LX3b_q, xx, M3, i3_5, xx, no, x, DUP_ENTRY},
{OP_vld3_lane_8, 0xf4a00220, "vld3.8", LX3b_q, RAw, M3, i3_5, RDw, xop_wb, x, DUP_ENTRY},
{OP_vld4_lane_8, 0xf4a0030d, "vld4.8", LX4b_q, RAw, Md, i3_5, RAw, no, x, END_LIST},
{OP_vld4_lane_8, 0xf4a0030f, "vld4.8", LX4b_q, xx, Md, i3_5, xx, no, x, tvlB[1][0x12]},
{OP_vld4_lane_8, 0xf4a00300, "vld4.8", LX4b_q, RAw, Md, i3_5, RDw, xop_wb, x, tvlB[1][0x13]},
{OP_vld4_lane_8, 0xf4a0032d, "vld4.8", LX4b_q, RAw, Md, i3_5, RAw, no, x, DUP_ENTRY},
{OP_vld4_lane_8, 0xf4a0032f, "vld4.8", LX4b_q, xx, Md, i3_5, xx, no, x, DUP_ENTRY},
{OP_vld4_lane_8, 0xf4a00320, "vld4.8", LX4b_q, RAw, Md, i3_5, RDw, xop_wb, x, DUP_ENTRY},
{OP_vld1_lane_16, 0xf4a0040d, "vld1.16", VBh_q, RAw, Mh, i2_6, i1_4, xop_wb, x, END_LIST},
{OP_vld1_lane_16, 0xf4a0040f, "vld1.16", VBh_q, xx, Mh, i2_6, i1_4, no, x, tvlB[1][0x18]},
{OP_vld1_lane_16, 0xf4a00400, "vld1.16", VBh_q, RAw, Mh, i2_6, i1_4, xop_wb2, x, tvlB[1][0x19]},
{OP_vld1_lane_16, 0xf4a0042d, "vld1.16", VBh_q, RAw, Mh, i2_6, i1_4, xop_wb, x, DUP_ENTRY},
{OP_vld1_lane_16, 0xf4a0042f, "vld1.16", VBh_q, xx, Mh, i2_6, i1_4, no, x, DUP_ENTRY},
{OP_vld1_lane_16, 0xf4a00420, "vld1.16", VBh_q, RAw, Mh, i2_6, i1_4, xop_wb2, x, DUP_ENTRY},
{OP_vld2_lane_16, 0xf4a0050d, "vld2.16", LX2h_q, RAw, Md, i2_6, i1_4, xop_wb, x, END_LIST},
{OP_vld2_lane_16, 0xf4a0050f, "vld2.16", LX2h_q, xx, Md, i2_6, i1_4, no, x, tvlB[1][0x1e]},
{OP_vld2_lane_16, 0xf4a00500, "vld2.16", LX2h_q, RAw, Md, i2_6, i1_4, xop_wb2, x, tvlB[1][0x1f]},
{OP_vld2_lane_16, 0xf4a0052d, "vld2.16", LX2Dh_q, RAw, Md, i2_6, i1_4, xop_wb, x, tvlB[1][0x20]},
{OP_vld2_lane_16, 0xf4a0052f, "vld2.16", LX2Dh_q, xx, Md, i2_6, i1_4, no, x, tvlB[1][0x21]},
{OP_vld2_lane_16, 0xf4a00520, "vld2.16", LX2Dh_q, RAw, Md, i2_6, i1_4, xop_wb2, x, tvlB[1][0x22]},
{OP_vld3_lane_16, 0xf4a0060d, "vld3.16", LX3h_q, RAw, M6, i2_6, i1_4, xop_wb, x, END_LIST},
{OP_vld3_lane_16, 0xf4a0060f, "vld3.16", LX3h_q, xx, M6, i2_6, i1_4, no, x, tvlB[1][0x24]},
{OP_vld3_lane_16, 0xf4a00600, "vld3.16", LX3h_q, RAw, M6, i2_6, i1_4, xop_wb2, x, tvlB[1][0x25]},
{OP_vld3_lane_16, 0xf4a0062d, "vld3.16", LX3Dh_q, RAw, M6, i2_6, i1_4, xop_wb, x, tvlB[1][0x26]},
{OP_vld3_lane_16, 0xf4a0062f, "vld3.16", LX3Dh_q, xx, M6, i2_6, i1_4, no, x, tvlB[1][0x27]},
{OP_vld3_lane_16, 0xf4a00620, "vld3.16", LX3Dh_q, RAw, M6, i2_6, i1_4, xop_wb2, x, tvlB[1][0x28]},
{OP_vld4_lane_16, 0xf4a0070d, "vld4.16", LX4h_q, RAw, Mq, i2_6, i1_4, xop_wb, x, END_LIST},
{OP_vld4_lane_16, 0xf4a0070f, "vld4.16", LX4h_q, xx, Mq, i2_6, i1_4, no, x, tvlB[1][0x2a]},
{OP_vld4_lane_16, 0xf4a00700, "vld4.16", LX4h_q, RAw, Mq, i2_6, i1_4, xop_wb2, x, tvlB[1][0x2b]},
{OP_vld4_lane_16, 0xf4a0072d, "vld4.16", LX4Dh_q, RAw, Mq, i2_6, i1_4, xop_wb, x, tvlB[1][0x2c]},
{OP_vld4_lane_16, 0xf4a0072f, "vld4.16", LX4Dh_q, xx, Mq, i2_6, i1_4, no, x, tvlB[1][0x2d]},
{OP_vld4_lane_16, 0xf4a00720, "vld4.16", LX4Dh_q, RAw, Mq, i2_6, i1_4, xop_wb2, x, tvlB[1][0x2e]},
{OP_vld1_lane_32, 0xf4a0080d, "vld1.32", VBd_q, RAw, Md, i1_7, i2_4, xop_wb, x, END_LIST},
{OP_vld1_lane_32, 0xf4a0080f, "vld1.32", VBd_q, xx, Md, i1_7, i2_4, no, x, tvlB[1][0x30]},
{OP_vld1_lane_32, 0xf4a00800, "vld1.32", VBd_q, RAw, Md, i1_7, i2_4, xop_wb2, x, tvlB[1][0x31]},
{OP_vld1_lane_32, 0xf4a0082d, "vld1.32", VBd_q, RAw, Md, i1_7, i2_4, xop_wb, x, DUP_ENTRY},
{OP_vld1_lane_32, 0xf4a0082f, "vld1.32", VBd_q, xx, Md, i1_7, i2_4, no, x, DUP_ENTRY},
{OP_vld1_lane_32, 0xf4a00820, "vld1.32", VBd_q, RAw, Md, i1_7, i2_4, xop_wb2, x, DUP_ENTRY},
{OP_vld2_lane_32, 0xf4a0090d, "vld2.32", LX2d_q, RAw, Mq, i1_7, i2_4, xop_wb, x, END_LIST},
{OP_vld2_lane_32, 0xf4a0090f, "vld2.32", LX2d_q, xx, Mq, i1_7, i2_4, no, x, tvlB[1][0x36]},
{OP_vld2_lane_32, 0xf4a00900, "vld2.32", LX2d_q, RAw, Mq, i1_7, i2_4, xop_wb2, x, tvlB[1][0x37]},
{OP_vld2_lane_32, 0xf4a0094d, "vld2.32", LX2Dd_q, RAw, Mq, i1_7, i2_4, xop_wb, x, tvlB[1][0x38]},
{OP_vld2_lane_32, 0xf4a0094f, "vld2.32", LX2Dd_q, xx, Mq, i1_7, i2_4, no, x, tvlB[1][0x39]},
{OP_vld2_lane_32, 0xf4a00940, "vld2.32", LX2Dd_q, RAw, Mq, i1_7, i2_4, xop_wb2, x, tvlB[1][0x3a]},
{OP_vld3_lane_32, 0xf4a00a0d, "vld3.32", LX3d_q, RAw, M12, i1_7, i2_4, xop_wb, x, END_LIST},
{OP_vld3_lane_32, 0xf4a00a0f, "vld3.32", LX3d_q, xx, M12, i1_7, i2_4, no, x, tvlB[1][0x3c]},
{OP_vld3_lane_32, 0xf4a00a00, "vld3.32", LX3d_q, RAw, M12, i1_7, i2_4, xop_wb2, x, tvlB[1][0x3d]},
{OP_vld3_lane_32, 0xf4a00a4d, "vld3.32", LX3Dd_q, RAw, M12, i1_7, i2_4, xop_wb, x, tvlB[1][0x3e]},
{OP_vld3_lane_32, 0xf4a00a4f, "vld3.32", LX3Dd_q, xx, M12, i1_7, i2_4, no, x, tvlB[1][0x3f]},
{OP_vld3_lane_32, 0xf4a00a40, "vld3.32", LX3Dd_q, RAw, M12, i1_7, i2_4, xop_wb2, x, tvlB[1][0x40]},
{OP_vld4_lane_32, 0xf4a00b0d, "vld4.32", LX4d_q, RAw, Mdq, i1_7, i2_4, xop_wb, x, END_LIST},
{OP_vld4_lane_32, 0xf4a00b0f, "vld4.32", LX4d_q, xx, Mdq, i1_7, i2_4, no, x, tvlB[1][0x42]},
{OP_vld4_lane_32, 0xf4a00b00, "vld4.32", LX4d_q, RAw, Mdq, i1_7, i2_4, xop_wb2, x, tvlB[1][0x43]},
{OP_vld4_lane_32, 0xf4a00b4d, "vld4.32", LX4Dd_q, RAw, Mdq, i1_7, i2_4, xop_wb, x, tvlB[1][0x44]},
{OP_vld4_lane_32, 0xf4a00b4f, "vld4.32", LX4Dd_q, xx, Mdq, i1_7, i2_4, no, x, tvlB[1][0x45]},
{OP_vld4_lane_32, 0xf4a00b40, "vld4.32", LX4Dd_q, RAw, Mdq, i1_7, i2_4, xop_wb2, x, tvlB[1][0x46]},
{EXT_VLDD, 0xf4a00c0d, "(ext vldD 0)", xx, xx, xx, xx, xx, no, x, 0},
{EXT_VLDD, 0xf4a00c0f, "(ext vldD 0)", xx, xx, xx, xx, xx, no, x, 0},
{EXT_VLDD, 0xf4a00c00, "(ext vldD 0)", xx, xx, xx, xx, xx, no, x, 0},
{EXT_VLDD, 0xf4a00c4d, "(ext vldD 0)", xx, xx, xx, xx, xx, no, x, 0},
{EXT_VLDD, 0xf4a00c4f, "(ext vldD 0)", xx, xx, xx, xx, xx, no, x, 0},
{EXT_VLDD, 0xf4a00c40, "(ext vldD 0)", xx, xx, xx, xx, xx, no, x, 0},
{EXT_VLDC, 0xf4a00d0d, "(ext vldC 0)", xx, xx, xx, xx, xx, no, x, 0},
{EXT_VLDC, 0xf4a00d0f, "(ext vldC 0)", xx, xx, xx, xx, xx, no, x, 0},
{EXT_VLDC, 0xf4a00d00, "(ext vldC 0)", xx, xx, xx, xx, xx, no, x, 0},
{EXT_VLDC, 0xf4a00d4d, "(ext vldC 0)", xx, xx, xx, xx, xx, no, x, 0},
{EXT_VLDC, 0xf4a00d4f, "(ext vldC 0)", xx, xx, xx, xx, xx, no, x, 0},
{EXT_VLDC, 0xf4a00d40, "(ext vldC 0)", xx, xx, xx, xx, xx, no, x, 0},
{EXT_BIT4, 0xf4a00e0d, "(ext bit4 21)", xx, xx, xx, xx, xx, no, x, 21},
{EXT_BIT4, 0xf4a00e0f, "(ext bit4 21)", xx, xx, xx, xx, xx, no, x, 21},
{EXT_BIT4, 0xf4a00e00, "(ext bit4 21)", xx, xx, xx, xx, xx, no, x, 21},
{EXT_BIT4, 0xf4a00e4d, "(ext bit4 21)", xx, xx, xx, xx, xx, no, x, 21},
{EXT_BIT4, 0xf4a00e4f, "(ext bit4 21)", xx, xx, xx, xx, xx, no, x, 21},
{EXT_BIT4, 0xf4a00e40, "(ext bit4 21)", xx, xx, xx, xx, xx, no, x, 21},
{EXT_VLDD, 0xf4a00f0d, "(ext vldD 1)", xx, xx, xx, xx, xx, no, x, 1},
{EXT_VLDD, 0xf4a00f0f, "(ext vldD 1)", xx, xx, xx, xx, xx, no, x, 1},
{EXT_VLDD, 0xf4a00f00, "(ext vldD 1)", xx, xx, xx, xx, xx, no, x, 1},
{EXT_VLDD, 0xf4a00f4d, "(ext vldD 1)", xx, xx, xx, xx, xx, no, x, 1},
{EXT_VLDD, 0xf4a00f4f, "(ext vldD 1)", xx, xx, xx, xx, xx, no, x, 1},
{EXT_VLDD, 0xf4a00f40, "(ext vldD 1)", xx, xx, xx, xx, xx, no, x, 1},
},
};
/* Indexed by bits (7:5)*3+X where X is based on the value of 3:0:
* + 0xd => 0
* + 0xf => 1
* + else => 2
*/
const instr_info_t A32_ext_vldC[][24] = {
{ /* 0 */
{OP_vld2_dup_8, 0xf4a00d0d, "vld2.8", LX2q, RAw, Mh, RAw, xx, no, x, END_LIST},
{OP_vld2_dup_8, 0xf4a00d0f, "vld2.8", LX2q, xx, Mh, xx, xx, no, x, tvlC[0][0x00]},
{OP_vld2_dup_8, 0xf4a00d00, "vld2.8", LX2q, RAw, Mh, RDw, RAw, no, x, tvlC[0][0x01]},
{OP_vld2_dup_8, 0xf4a00d2d, "vld2.8", LX2Dq, RAw, Mh, RAw, xx, no, x, tvlC[0][0x02]},
{OP_vld2_dup_8, 0xf4a00d2f, "vld2.8", LX2Dq, xx, Mh, xx, xx, no, x, tvlC[0][0x03]},
{OP_vld2_dup_8, 0xf4a00d20, "vld2.8", LX2Dq, RAw, Mh, RDw, RAw, no, x, tvlC[0][0x04]},
{OP_vld2_dup_16, 0xf4a00d4d, "vld2.16", LX2q, RAw, Md, i1_4, RAw, no, x, END_LIST},
{OP_vld2_dup_16, 0xf4a00d4f, "vld2.16", LX2q, xx, Md, i1_4, xx, no, x, tvlC[0][0x06]},
{OP_vld2_dup_16, 0xf4a00d40, "vld2.16", LX2q, RAw, Md, i1_4, RDw, xop_wb, x, tvlC[0][0x07]},
{OP_vld2_dup_16, 0xf4a00d6d, "vld2.16", LX2Dq, RAw, Md, i1_4, RAw, no, x, tvlC[0][0x08]},
{OP_vld2_dup_16, 0xf4a00d6f, "vld2.16", LX2Dq, xx, Md, i1_4, xx, no, x, tvlC[0][0x09]},
{OP_vld2_dup_16, 0xf4a00d60, "vld2.16", LX2Dq, RAw, Md, i1_4, RDw, xop_wb, x, tvlC[0][0x0a]},
{OP_vld2_dup_32, 0xf4a00d8d, "vld2.32", LX2q, RAw, Mq, i1_4, RAw, no, x, END_LIST},
{OP_vld2_dup_32, 0xf4a00d8f, "vld2.32", LX2q, xx, Mq, i1_4, xx, no, x, tvlC[0][0x0c]},
{OP_vld2_dup_32, 0xf4a00d80, "vld2.32", LX2q, RAw, Mq, i1_4, RDw, xop_wb, x, tvlC[0][0x0d]},
{OP_vld2_dup_32, 0xf4a00dad, "vld2.32", LX2Dq, RAw, Mq, i1_4, RAw, no, x, tvlC[0][0x0e]},
{OP_vld2_dup_32, 0xf4a00daf, "vld2.32", LX2Dq, xx, Mq, i1_4, xx, no, x, tvlC[0][0x0f]},
{OP_vld2_dup_32, 0xf4a00da0, "vld2.32", LX2Dq, RAw, Mq, i1_4, RDw, xop_wb, x, tvlC[0][0x10]},
{INVALID, 0xf4a00dcd, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf4a00dcf, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf4a00dc0, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf4a00ded, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf4a00def, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf4a00de0, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* 2 */
/* We've already ruled out bit4==1 as invalid */
{OP_vld3_dup_8, 0xf4a00e0d, "vld3.8", LX3q, RAw, M3, RAw, xx, no, x, END_LIST},
{OP_vld3_dup_8, 0xf4a00e0f, "vld3.8", LX3q, xx, M3, xx, xx, no, x, tvlC[1][0x00]},
{OP_vld3_dup_8, 0xf4a00e00, "vld3.8", LX3q, RAw, M3, RDw, RAw, no, x, tvlC[1][0x01]},
{OP_vld3_dup_8, 0xf4a00e2d, "vld3.8", LX3Dq, RAw, M3, RAw, xx, no, x, tvlC[1][0x02]},
{OP_vld3_dup_8, 0xf4a00e2f, "vld3.8", LX3Dq, xx, M3, xx, xx, no, x, tvlC[1][0x03]},
{OP_vld3_dup_8, 0xf4a00e20, "vld3.8", LX3Dq, RAw, M3, RDw, RAw, no, x, tvlC[1][0x04]},
{OP_vld3_dup_16, 0xf4a00e4d, "vld3.16", LX3q, RAw, M6, i1_4, RAw, no, x, END_LIST},
{OP_vld3_dup_16, 0xf4a00e4f, "vld3.16", LX3q, xx, M6, i1_4, xx, no, x, tvlC[1][0x06]},
{OP_vld3_dup_16, 0xf4a00e40, "vld3.16", LX3q, RAw, M6, i1_4, RDw, xop_wb, x, tvlC[1][0x07]},
{OP_vld3_dup_16, 0xf4a00e6d, "vld3.16", LX3Dq, RAw, M6, i1_4, RAw, no, x, tvlC[1][0x08]},
{OP_vld3_dup_16, 0xf4a00e6f, "vld3.16", LX3Dq, xx, M6, i1_4, xx, no, x, tvlC[1][0x09]},
{OP_vld3_dup_16, 0xf4a00e60, "vld3.16", LX3Dq, RAw, M6, i1_4, RDw, xop_wb, x, tvlC[1][0x0a]},
{OP_vld3_dup_32, 0xf4a00e8d, "vld3.32", LX3q, RAw, M12, i1_4, RAw, no, x, END_LIST},
{OP_vld3_dup_32, 0xf4a00e8f, "vld3.32", LX3q, xx, M12, i1_4, xx, no, x, tvlC[1][0x0c]},
{OP_vld3_dup_32, 0xf4a00e80, "vld3.32", LX3q, RAw, M12, i1_4, RDw, xop_wb, x, tvlC[1][0x0d]},
{OP_vld3_dup_32, 0xf4a00ead, "vld3.32", LX3Dq, RAw, M12, i1_4, RAw, no, x, tvlC[1][0x0e]},
{OP_vld3_dup_32, 0xf4a00eaf, "vld3.32", LX3Dq, xx, M12, i1_4, xx, no, x, tvlC[1][0x0f]},
{OP_vld3_dup_32, 0xf4a00ea0, "vld3.32", LX3Dq, RAw, M12, i1_4, RDw, xop_wb, x, tvlC[1][0x10]},
{INVALID, 0xf4a00ecd, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf4a00ecf, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf4a00ec0, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf4a00eed, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf4a00eef, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf4a00ee0, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
},
};
/* Indexed by bits (7:4)*3+X where X is based on the value of 3:0:
* + 0xd => 0
* + 0xf => 1
* + else => 2
*/
const instr_info_t A32_ext_vldD[][48] = {
{ /* 0 */
{OP_vld1_dup_8, 0xf4a00c0d, "vld1.8", VBq, RAw, Mb, RAw, xx, no, x, tvlD[0][0x08]},
{OP_vld1_dup_8, 0xf4a00c0f, "vld1.8", VBq, xx, Mb, xx, xx, no, x, tvlD[0][0x00]},
{OP_vld1_dup_8, 0xf4a00c00, "vld1.8", VBq, RAw, Mb, RDw, RAw, no, x, tvlD[0][0x01]},
{INVALID, 0xf4a00c1d, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf4a00c1f, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf4a00c10, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vld1_dup_8, 0xf4a00c2d, "vld1.8", LX2q, RAw, Mb, RAw, xx, no, x, END_LIST},
{OP_vld1_dup_8, 0xf4a00c2f, "vld1.8", LX2q, xx, Mb, xx, xx, no, x, tvlD[0][0x06]},
{OP_vld1_dup_8, 0xf4a00c20, "vld1.8", LX2q, RAw, Mb, RDw, RAw, no, x, tvlD[0][0x07]},
{INVALID, 0xf4a00c3d, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf4a00c3f, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf4a00c30, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vld1_dup_16, 0xf4a00c4d, "vld1.16", VBq, RAw, Mh, i1_4, RAw, no, x, tvlD[0][0x14]},
{OP_vld1_dup_16, 0xf4a00c4f, "vld1.16", VBq, xx, Mh, i1_4, xx, no, x, tvlD[0][0x0c]},
{OP_vld1_dup_16, 0xf4a00c40, "vld1.16", VBq, RAw, Mh, i1_4, RDw, xop_wb, x, tvlD[0][0x0d]},
{OP_vld1_dup_16, 0xf4a00c5d, "vld1.16", VBq, RAw, Mh, i1_4, RAw, no, x, DUP_ENTRY},
{OP_vld1_dup_16, 0xf4a00c5f, "vld1.16", VBq, xx, Mh, i1_4, xx, no, x, DUP_ENTRY},
{OP_vld1_dup_16, 0xf4a00c50, "vld1.16", VBq, RAw, Mh, i1_4, RDw, xop_wb, x, DUP_ENTRY},
{OP_vld1_dup_16, 0xf4a00c6d, "vld1.16", LX2q, RAw, Mh, i1_4, RAw, no, x, END_LIST},
{OP_vld1_dup_16, 0xf4a00c6f, "vld1.16", LX2q, xx, Mh, i1_4, xx, no, x, tvlD[0][0x12]},
{OP_vld1_dup_16, 0xf4a00c60, "vld1.16", LX2q, RAw, Mh, i1_4, RDw, xop_wb, x, tvlD[0][0x13]},
{OP_vld1_dup_16, 0xf4a00c7d, "vld1.16", LX2q, RAw, Mh, i1_4, RAw, no, x, DUP_ENTRY},
{OP_vld1_dup_16, 0xf4a00c7f, "vld1.16", LX2q, xx, Mh, i1_4, xx, no, x, DUP_ENTRY},
{OP_vld1_dup_16, 0xf4a00c70, "vld1.16", LX2q, RAw, Mh, i1_4, RDw, xop_wb, x, DUP_ENTRY},
{OP_vld1_dup_32, 0xf4a00c8d, "vld1.32", VBq, RAw, Md, i1_4, RAw, no, x, tvlD[0][0x20]},
{OP_vld1_dup_32, 0xf4a00c8f, "vld1.32", VBq, xx, Md, i1_4, xx, no, x, tvlD[0][0x18]},
{OP_vld1_dup_32, 0xf4a00c80, "vld1.32", VBq, RAw, Md, i1_4, RDw, xop_wb, x, tvlD[0][0x19]},
{OP_vld1_dup_32, 0xf4a00c9d, "vld1.32", VBq, RAw, Md, i1_4, RAw, no, x, DUP_ENTRY},
{OP_vld1_dup_32, 0xf4a00c9f, "vld1.32", VBq, xx, Md, i1_4, xx, no, x, DUP_ENTRY},
{OP_vld1_dup_32, 0xf4a00c90, "vld1.32", VBq, RAw, Md, i1_4, RDw, xop_wb, x, DUP_ENTRY},
{OP_vld1_dup_32, 0xf4a00cad, "vld1.32", LX2q, RAw, Md, i1_4, RAw, no, x, END_LIST},
{OP_vld1_dup_32, 0xf4a00caf, "vld1.32", LX2q, xx, Md, i1_4, xx, no, x, tvlD[0][0x1e]},
{OP_vld1_dup_32, 0xf4a00ca0, "vld1.32", LX2q, RAw, Md, i1_4, RDw, xop_wb, x, tvlD[0][0x1f]},
{OP_vld1_dup_32, 0xf4a00cbd, "vld1.32", LX2q, RAw, Md, i1_4, RAw, no, x, DUP_ENTRY},
{OP_vld1_dup_32, 0xf4a00cbf, "vld1.32", LX2q, xx, Md, i1_4, xx, no, x, DUP_ENTRY},
{OP_vld1_dup_32, 0xf4a00cb0, "vld1.32", LX2q, RAw, Md, i1_4, RDw, xop_wb, x, DUP_ENTRY},
{INVALID, 0xf4a00ccd, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf4a00ccf, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf4a00cc0, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf4a00cdd, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf4a00cdf, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf4a00cd0, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf4a00ced, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf4a00cef, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf4a00ce0, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf4a00cfd, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf4a00cff, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf4a00cf0, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
}, { /* 1 */
{OP_vld4_dup_8, 0xf4a00f0d, "vld4.8", LX4q, RAw, Md, RAw, xx, no, x, END_LIST},
{OP_vld4_dup_8, 0xf4a00f0f, "vld4.8", LX4q, xx, Md, xx, xx, no, x, tvlD[1][0x00]},
{OP_vld4_dup_8, 0xf4a00f00, "vld4.8", LX4q, RAw, Md, RDw, RAw, no, x, tvlD[1][0x01]},
{OP_vld4_dup_8, 0xf4a00f1d, "vld4.8", LX4q, RAw, Md, RAw, xx, no, x, DUP_ENTRY},
{OP_vld4_dup_8, 0xf4a00f1f, "vld4.8", LX4q, xx, Md, xx, xx, no, x, DUP_ENTRY},
{OP_vld4_dup_8, 0xf4a00f10, "vld4.8", LX4q, RAw, Md, RDw, RAw, no, x, DUP_ENTRY},
{OP_vld4_dup_8, 0xf4a00f2d, "vld4.8", LX4Dq, RAw, Md, RAw, xx, no, x, tvlD[1][0x02]},
{OP_vld4_dup_8, 0xf4a00f2f, "vld4.8", LX4Dq, xx, Md, xx, xx, no, x, tvlD[1][0x06]},
{OP_vld4_dup_8, 0xf4a00f20, "vld4.8", LX4Dq, RAw, Md, RDw, RAw, no, x, tvlD[1][0x07]},
{OP_vld4_dup_8, 0xf4a00f3d, "vld4.8", LX4Dq, RAw, Md, RAw, xx, no, x, DUP_ENTRY},
{OP_vld4_dup_8, 0xf4a00f3f, "vld4.8", LX4Dq, xx, Md, xx, xx, no, x, DUP_ENTRY},
{OP_vld4_dup_8, 0xf4a00f30, "vld4.8", LX4Dq, RAw, Md, RDw, RAw, no, x, DUP_ENTRY},
{OP_vld4_dup_16, 0xf4a00f4d, "vld4.16", LX4q, RAw, Mq, i1_4, RAw, no, x, END_LIST},
{OP_vld4_dup_16, 0xf4a00f4f, "vld4.16", LX4q, xx, Mq, i1_4, xx, no, x, tvlD[1][0x0c]},
{OP_vld4_dup_16, 0xf4a00f40, "vld4.16", LX4q, RAw, Mq, i1_4, RDw, xop_wb, x, tvlD[1][0x0d]},
{OP_vld4_dup_16, 0xf4a00f5d, "vld4.16", LX4q, RAw, Mq, i1_4, RAw, no, x, DUP_ENTRY},
{OP_vld4_dup_16, 0xf4a00f5f, "vld4.16", LX4q, xx, Mq, i1_4, xx, no, x, DUP_ENTRY},
{OP_vld4_dup_16, 0xf4a00f50, "vld4.16", LX4q, RAw, Mq, i1_4, RDw, xop_wb, x, DUP_ENTRY},
{OP_vld4_dup_16, 0xf4a00f6d, "vld4.16", LX4Dq, RAw, Mq, i1_4, RAw, no, x, tvlD[1][0x0e]},
{OP_vld4_dup_16, 0xf4a00f6f, "vld4.16", LX4Dq, xx, Mq, i1_4, xx, no, x, tvlD[1][0x12]},
{OP_vld4_dup_16, 0xf4a00f60, "vld4.16", LX4Dq, RAw, Mq, i1_4, RDw, xop_wb, x, tvlD[1][0x13]},
{OP_vld4_dup_16, 0xf4a00f7d, "vld4.16", LX4Dq, RAw, Mq, i1_4, RAw, no, x, DUP_ENTRY},
{OP_vld4_dup_16, 0xf4a00f7f, "vld4.16", LX4Dq, xx, Mq, i1_4, xx, no, x, DUP_ENTRY},
{OP_vld4_dup_16, 0xf4a00f70, "vld4.16", LX4Dq, RAw, Mq, i1_4, RDw, xop_wb, x, DUP_ENTRY},
{OP_vld4_dup_32, 0xf4a00f8d, "vld4.32", LX4q, RAw, Mdq, i1_4, RAw, no, x, END_LIST},
{OP_vld4_dup_32, 0xf4a00f8f, "vld4.32", LX4q, xx, Mdq, i1_4, xx, no, x, tvlD[1][0x18]},
{OP_vld4_dup_32, 0xf4a00f80, "vld4.32", LX4q, RAw, Mdq, i1_4, RDw, xop_wb, x, tvlD[1][0x19]},
{OP_vld4_dup_32, 0xf4a00f9d, "vld4.32", LX4q, RAw, Mdq, i1_4, RAw, no, x, DUP_ENTRY},
{OP_vld4_dup_32, 0xf4a00f9f, "vld4.32", LX4q, xx, Mdq, i1_4, xx, no, x, DUP_ENTRY},
{OP_vld4_dup_32, 0xf4a00f90, "vld4.32", LX4q, RAw, Mdq, i1_4, RDw, xop_wb, x, DUP_ENTRY},
{OP_vld4_dup_32, 0xf4a00fad, "vld4.32", LX4Dq, RAw, Mdq, i1_4, RAw, no, x, tvlD[1][0x1a]},
{OP_vld4_dup_32, 0xf4a00faf, "vld4.32", LX4Dq, xx, Mdq, i1_4, xx, no, x, tvlD[1][0x1e]},
{OP_vld4_dup_32, 0xf4a00fa0, "vld4.32", LX4Dq, RAw, Mdq, i1_4, RDw, xop_wb, x, tvlD[1][0x1f]},
{OP_vld4_dup_32, 0xf4a00fbd, "vld4.32", LX4Dq, RAw, Mdq, i1_4, RAw, no, x, DUP_ENTRY},
{OP_vld4_dup_32, 0xf4a00fbf, "vld4.32", LX4Dq, xx, Mdq, i1_4, xx, no, x, DUP_ENTRY},
{OP_vld4_dup_32, 0xf4a00fb0, "vld4.32", LX4Dq, RAw, Mdq, i1_4, RDw, xop_wb, x, DUP_ENTRY},
{INVALID, 0xf4a00fcd, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf4a00fcf, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf4a00fc0, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vld4_dup_32, 0xf4a00fdd, "vld4.32", LX4Dq, RAw, Mdq, i1_4, RAw, no, x, DUP_ENTRY},
{OP_vld4_dup_32, 0xf4a00fdf, "vld4.32", LX4Dq, xx, Mdq, i1_4, xx, no, x, DUP_ENTRY},
{OP_vld4_dup_32, 0xf4a00fd0, "vld4.32", LX4Dq, RAw, Mdq, i1_4, RDw, xop_wb, x, DUP_ENTRY},
{INVALID, 0xf4a00fed, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf4a00fef, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{INVALID, 0xf4a00fe0, "(bad)", xx, xx, xx, xx, xx, no, x, NA},
{OP_vld4_dup_32, 0xf4a00fdd, "vld4.32", LX4Dq, RAw, Mdq, i1_4, RAw, no, x, DUP_ENTRY},
{OP_vld4_dup_32, 0xf4a00fdf, "vld4.32", LX4Dq, xx, Mdq, i1_4, xx, no, x, DUP_ENTRY},
{OP_vld4_dup_32, 0xf4a00fd0, "vld4.32", LX4Dq, RAw, Mdq, i1_4, RDw, xop_wb, x, DUP_ENTRY},
},
};
/* Indexed by:
* + if 11:10 != 2, then index 0;
* + else, 9:8,6
* XXX: this is to handle OP_vtb{l,x} only and it adds an extra step
* for a lot of other opcodes -- can we do better?
*/
const instr_info_t A32_ext_vtb[][9] = {
{ /* 0 */
{EXT_BITS16, 0xf3b00000, "(ext bits16 9)", xx, xx, xx, xx, xx, no, x, 9},
{OP_vtbl_8, 0xf3b00800, "vtbl.8", VBq, xx, VAq, VCq, xx, no, x, END_LIST},
{OP_vtbx_8, 0xf3b00840, "vtbx.8", VBq, xx, VAq, VCq, xx, no, x, END_LIST},
{OP_vtbl_8, 0xf3b00900, "vtbl.8", VBq, xx, LXA2q, VCq, xx, no, x, tvtb[0][0x01]},
{OP_vtbx_8, 0xf3b00940, "vtbx.8", VBq, xx, LXA2q, VCq, xx, no, x, tvtb[0][0x02]},
{OP_vtbl_8, 0xf3b00a00, "vtbl.8", VBq, xx, LXA3q, VCq, xx, no, x, tvtb[0][0x03]},
{OP_vtbx_8, 0xf3b00a40, "vtbx.8", VBq, xx, LXA3q, VCq, xx, no, x, tvtb[0][0x04]},
{OP_vtbl_8, 0xf3b00b00, "vtbl.8", VBq, xx, LXA4q, VCq, xx, no, x, tvtb[0][0x05]},
{OP_vtbx_8, 0xf3b00b40, "vtbx.8", VBq, xx, LXA4q, VCq, xx, no, x, tvtb[0][0x06]},
},
};
| 1 | 11,883 | OK, it looks like all children have bit 4 set, but it doesn't really matter at this split point: just informative, nothing reads it. | DynamoRIO-dynamorio | c |
@@ -32,6 +32,12 @@ type DNSZoneSpec struct {
// Zone is the DNS zone to host
Zone string `json:"zone"`
+ // LinkToParentDomain specifies whether DNS records should
+ // be automatically created to link this DNSZone with a
+ // parent domain.
+ // +optional
+ LinkToParentDomain bool `json:"linkToParentDomain,omitempty"`
+
// AWS specifies AWS-specific cloud configuration
// +optional
AWS *AWSDNSZoneSpec `json:"aws,omitempty"` | 1 | /*
Copyright 2018 The Kubernetes Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package v1alpha1
import (
corev1 "k8s.io/api/core/v1"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
)
const (
// FinalizerDNSZone is used on DNSZones to ensure we successfuly deprovision
// the cloud objects before cleaning up the API object.
FinalizerDNSZone string = "hive.openshift.io/dnszone"
)
// DNSZoneSpec defines the desired state of DNSZone
type DNSZoneSpec struct {
// Zone is the DNS zone to host
Zone string `json:"zone"`
// AWS specifies AWS-specific cloud configuration
// +optional
AWS *AWSDNSZoneSpec `json:"aws,omitempty"`
}
// AWSDNSZoneSpec contains AWS-specific DNSZone specifications
type AWSDNSZoneSpec struct {
// AccountSecret contains a reference to a secret that contains AWS credentials
// for CRUD operations
AccountSecret corev1.LocalObjectReference `json:"accountSecret"`
// Region specifies the region-specific API endpoint to use
Region string `json:"region"`
// AdditionalTags is a set of additional tags to set on the DNS hosted zone. In addition
// to these tags,the DNS Zone controller will set a hive.openhsift.io/hostedzone tag
// identifying the HostedZone record that it belongs to.
AdditionalTags []AWSResourceTag `json:"additionalTags,omitempty"`
}
// AWSResourceTag represents a tag that is applied to an AWS cloud resource
type AWSResourceTag struct {
// Key is the key for the tag
Key string `json:"key"`
// Value is the value for the tag
Value string `json:"value"`
}
// DNSZoneStatus defines the observed state of DNSZone
type DNSZoneStatus struct {
// LastSyncTimestamp is the time that the zone was last sync'd.
// +optional
LastSyncTimestamp *metav1.Time `json:"lastSyncTimestamp,omitempty"`
// LastSyncGeneration is the generation of the zone resource that was last sync'd. This is used to know
// if the Object has changed and we should sync immediately.
LastSyncGeneration int64 `json:"lastSyncGeneration"`
// NameServers is a list of nameservers for this DNS zone
// +optional
NameServers []string `json:"nameServers,omitempty"`
// AWSDNSZoneStatus contains status information specific to AWS
// +optional
AWS *AWSDNSZoneStatus `json:"aws,omitempty"`
}
// AWSDNSZoneStatus contains status information specific to AWS DNS zones
type AWSDNSZoneStatus struct {
// ZoneID is the ID of the zone in AWS
// +optional
ZoneID *string `json:"zoneID,omitempty"`
}
// +genclient
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// DNSZone is the Schema for the dnszones API
// +k8s:openapi-gen=true
// +kubebuilder:subresource:status
type DNSZone struct {
metav1.TypeMeta `json:",inline"`
metav1.ObjectMeta `json:"metadata,omitempty"`
Spec DNSZoneSpec `json:"spec,omitempty"`
Status DNSZoneStatus `json:"status,omitempty"`
}
// +k8s:deepcopy-gen:interfaces=k8s.io/apimachinery/pkg/runtime.Object
// DNSZoneList contains a list of DNSZone
type DNSZoneList struct {
metav1.TypeMeta `json:",inline"`
metav1.ListMeta `json:"metadata,omitempty"`
Items []DNSZone `json:"items"`
}
func init() {
SchemeBuilder.Register(&DNSZone{}, &DNSZoneList{})
}
| 1 | 5,751 | Sorry for the very basic question but why do we still have a DNSZone CRD in light of the DNSEndpoint, I don't fully understand the differences. | openshift-hive | go |
@@ -0,0 +1,17 @@
+package session
+
+import (
+ "testing"
+ "github.com/stretchr/testify/assert"
+)
+
+func TestManagerHasUniqueSessionsStored(t *testing.T) {
+ manager := Manager{}
+ length := 10
+
+ for i := 0; i < length; i++ {
+ manager.Create()
+ }
+
+ assert.Len(t, manager.Sessions, length)
+} | 1 | 1 | 9,520 | Looks like loop with overhead. You can: - create SID - test private function `sessionIsUnique()` | mysteriumnetwork-node | go |
|
@@ -54,6 +54,7 @@ public class UserPreferences {
private static final String PREF_SHOW_DOWNLOAD_REPORT = "prefShowDownloadReport";
public static final String PREF_BACK_BUTTON_BEHAVIOR = "prefBackButtonBehavior";
private static final String PREF_BACK_BUTTON_GO_TO_PAGE = "prefBackButtonGoToPage";
+ public static final String PREF_QUEUE_SORT_ORDER = "prefQueueSortOrder";
// Queue
private static final String PREF_QUEUE_ADD_TO_FRONT = "prefQueueAddToFront"; | 1 | package de.danoeh.antennapod.core.preferences;
import android.content.Context;
import android.content.SharedPreferences;
import android.preference.PreferenceManager;
import android.support.annotation.IntRange;
import android.support.annotation.NonNull;
import android.support.v4.app.NotificationCompat;
import android.text.TextUtils;
import android.util.Log;
import org.json.JSONArray;
import org.json.JSONException;
import java.io.File;
import java.io.IOException;
import java.net.Proxy;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.List;
import java.util.concurrent.TimeUnit;
import de.danoeh.antennapod.core.R;
import de.danoeh.antennapod.core.service.download.ProxyConfig;
import de.danoeh.antennapod.core.storage.APCleanupAlgorithm;
import de.danoeh.antennapod.core.storage.APNullCleanupAlgorithm;
import de.danoeh.antennapod.core.storage.APQueueCleanupAlgorithm;
import de.danoeh.antennapod.core.storage.EpisodeCleanupAlgorithm;
import de.danoeh.antennapod.core.util.Converter;
import de.danoeh.antennapod.core.util.download.AutoUpdateManager;
/**
* Provides access to preferences set by the user in the settings screen. A
* private instance of this class must first be instantiated via
* init() or otherwise every public method will throw an Exception
* when called.
*/
public class UserPreferences {
private UserPreferences(){}
private static final String IMPORT_DIR = "import/";
private static final String TAG = "UserPreferences";
// User Interface
public static final String PREF_THEME = "prefTheme";
public static final String PREF_HIDDEN_DRAWER_ITEMS = "prefHiddenDrawerItems";
private static final String PREF_DRAWER_FEED_ORDER = "prefDrawerFeedOrder";
private static final String PREF_DRAWER_FEED_COUNTER = "prefDrawerFeedIndicator";
public static final String PREF_EXPANDED_NOTIFICATION = "prefExpandNotify";
private static final String PREF_PERSISTENT_NOTIFICATION = "prefPersistNotify";
public static final String PREF_COMPACT_NOTIFICATION_BUTTONS = "prefCompactNotificationButtons";
public static final String PREF_LOCKSCREEN_BACKGROUND = "prefLockscreenBackground";
private static final String PREF_SHOW_DOWNLOAD_REPORT = "prefShowDownloadReport";
public static final String PREF_BACK_BUTTON_BEHAVIOR = "prefBackButtonBehavior";
private static final String PREF_BACK_BUTTON_GO_TO_PAGE = "prefBackButtonGoToPage";
// Queue
private static final String PREF_QUEUE_ADD_TO_FRONT = "prefQueueAddToFront";
// Playback
public static final String PREF_PAUSE_ON_HEADSET_DISCONNECT = "prefPauseOnHeadsetDisconnect";
public static final String PREF_UNPAUSE_ON_HEADSET_RECONNECT = "prefUnpauseOnHeadsetReconnect";
private static final String PREF_UNPAUSE_ON_BLUETOOTH_RECONNECT = "prefUnpauseOnBluetoothReconnect";
private static final String PREF_HARDWARE_FOWARD_BUTTON_SKIPS = "prefHardwareForwardButtonSkips";
private static final String PREF_HARDWARE_PREVIOUS_BUTTON_RESTARTS = "prefHardwarePreviousButtonRestarts";
public static final String PREF_FOLLOW_QUEUE = "prefFollowQueue";
private static final String PREF_SKIP_KEEPS_EPISODE = "prefSkipKeepsEpisode";
private static final String PREF_FAVORITE_KEEPS_EPISODE = "prefFavoriteKeepsEpisode";
private static final String PREF_AUTO_DELETE = "prefAutoDelete";
public static final String PREF_SMART_MARK_AS_PLAYED_SECS = "prefSmartMarkAsPlayedSecs";
private static final String PREF_PLAYBACK_SPEED_ARRAY = "prefPlaybackSpeedArray";
private static final String PREF_PAUSE_PLAYBACK_FOR_FOCUS_LOSS = "prefPauseForFocusLoss";
private static final String PREF_RESUME_AFTER_CALL = "prefResumeAfterCall";
public static final String PREF_VIDEO_BEHAVIOR = "prefVideoBehavior";
private static final String PREF_TIME_RESPECTS_SPEED = "prefPlaybackTimeRespectsSpeed";
// Network
private static final String PREF_ENQUEUE_DOWNLOADED = "prefEnqueueDownloaded";
public static final String PREF_UPDATE_INTERVAL = "prefAutoUpdateIntervall";
public static final String PREF_MOBILE_UPDATE = "prefMobileUpdateAllowed";
public static final String PREF_EPISODE_CLEANUP = "prefEpisodeCleanup";
public static final String PREF_PARALLEL_DOWNLOADS = "prefParallelDownloads";
public static final String PREF_EPISODE_CACHE_SIZE = "prefEpisodeCacheSize";
public static final String PREF_ENABLE_AUTODL = "prefEnableAutoDl";
public static final String PREF_ENABLE_AUTODL_ON_BATTERY = "prefEnableAutoDownloadOnBattery";
public static final String PREF_ENABLE_AUTODL_WIFI_FILTER = "prefEnableAutoDownloadWifiFilter";
public static final String PREF_ENABLE_AUTODL_ON_MOBILE = "prefEnableAutoDownloadOnMobile";
private static final String PREF_AUTODL_SELECTED_NETWORKS = "prefAutodownloadSelectedNetworks";
private static final String PREF_PROXY_TYPE = "prefProxyType";
private static final String PREF_PROXY_HOST = "prefProxyHost";
private static final String PREF_PROXY_PORT = "prefProxyPort";
private static final String PREF_PROXY_USER = "prefProxyUser";
private static final String PREF_PROXY_PASSWORD = "prefProxyPassword";
// Services
private static final String PREF_AUTO_FLATTR = "pref_auto_flattr";
private static final String PREF_AUTO_FLATTR_PLAYED_DURATION_THRESHOLD = "prefAutoFlattrPlayedDurationThreshold";
private static final String PREF_GPODNET_NOTIFICATIONS = "pref_gpodnet_notifications";
// Other
private static final String PREF_DATA_FOLDER = "prefDataFolder";
public static final String PREF_IMAGE_CACHE_SIZE = "prefImageCacheSize";
public static final String PREF_DELETE_REMOVES_FROM_QUEUE = "prefDeleteRemovesFromQueue";
// Mediaplayer
public static final String PREF_MEDIA_PLAYER = "prefMediaPlayer";
public static final String PREF_MEDIA_PLAYER_EXOPLAYER = "exoplayer";
private static final String PREF_PLAYBACK_SPEED = "prefPlaybackSpeed";
public static final String PREF_PLAYBACK_SKIP_SILENCE = "prefSkipSilence";
private static final String PREF_FAST_FORWARD_SECS = "prefFastForwardSecs";
private static final String PREF_REWIND_SECS = "prefRewindSecs";
private static final String PREF_QUEUE_LOCKED = "prefQueueLocked";
private static final String IMAGE_CACHE_DEFAULT_VALUE = "100";
private static final int IMAGE_CACHE_SIZE_MINIMUM = 20;
private static final String PREF_LEFT_VOLUME = "prefLeftVolume";
private static final String PREF_RIGHT_VOLUME = "prefRightVolume";
// Experimental
private static final String PREF_STEREO_TO_MONO = "PrefStereoToMono";
public static final String PREF_CAST_ENABLED = "prefCast"; //Used for enabling Chromecast support
public static final int EPISODE_CLEANUP_QUEUE = -1;
public static final int EPISODE_CLEANUP_NULL = -2;
public static final int EPISODE_CLEANUP_DEFAULT = 0;
// Constants
private static final int NOTIFICATION_BUTTON_REWIND = 0;
private static final int NOTIFICATION_BUTTON_FAST_FORWARD = 1;
private static final int NOTIFICATION_BUTTON_SKIP = 2;
private static final int EPISODE_CACHE_SIZE_UNLIMITED = -1;
public static final int FEED_ORDER_COUNTER = 0;
public static final int FEED_ORDER_ALPHABETICAL = 1;
public static final int FEED_ORDER_MOST_PLAYED = 3;
public static final int FEED_COUNTER_SHOW_NEW_UNPLAYED_SUM = 0;
public static final int FEED_COUNTER_SHOW_NEW = 1;
public static final int FEED_COUNTER_SHOW_UNPLAYED = 2;
public static final int FEED_COUNTER_SHOW_NONE = 3;
public static final int FEED_COUNTER_SHOW_DOWNLOADED = 4;
private static Context context;
private static SharedPreferences prefs;
/**
* Sets up the UserPreferences class.
*
* @throws IllegalArgumentException if context is null
*/
public static void init(@NonNull Context context) {
Log.d(TAG, "Creating new instance of UserPreferences");
UserPreferences.context = context.getApplicationContext();
UserPreferences.prefs = PreferenceManager.getDefaultSharedPreferences(context);
createImportDirectory();
createNoMediaFile();
}
/**
* Returns theme as R.style value
*
* @return R.style.Theme_AntennaPod_Light or R.style.Theme_AntennaPod_Dark
*/
public static int getTheme() {
return readThemeValue(prefs.getString(PREF_THEME, "0"));
}
public static int getNoTitleTheme() {
int theme = getTheme();
if (theme == R.style.Theme_AntennaPod_Dark) {
return R.style.Theme_AntennaPod_Dark_NoTitle;
} else if (theme == R.style.Theme_AntennaPod_TrueBlack) {
return R.style.Theme_AntennaPod_TrueBlack_NoTitle;
} else {
return R.style.Theme_AntennaPod_Light_NoTitle;
}
}
public static List<String> getHiddenDrawerItems() {
String hiddenItems = prefs.getString(PREF_HIDDEN_DRAWER_ITEMS, "");
return new ArrayList<>(Arrays.asList(TextUtils.split(hiddenItems, ",")));
}
public static List<Integer> getCompactNotificationButtons() {
String[] buttons = TextUtils.split(
prefs.getString(PREF_COMPACT_NOTIFICATION_BUTTONS,
String.valueOf(NOTIFICATION_BUTTON_SKIP)),
",");
List<Integer> notificationButtons = new ArrayList<>();
for (String button : buttons) {
notificationButtons.add(Integer.parseInt(button));
}
return notificationButtons;
}
/**
* Helper function to return whether the specified button should be shown on compact
* notifications.
*
* @param buttonId Either NOTIFICATION_BUTTON_REWIND, NOTIFICATION_BUTTON_FAST_FORWARD or
* NOTIFICATION_BUTTON_SKIP.
* @return {@code true} if button should be shown, {@code false} otherwise
*/
private static boolean showButtonOnCompactNotification(int buttonId) {
return getCompactNotificationButtons().contains(buttonId);
}
public static boolean showRewindOnCompactNotification() {
return showButtonOnCompactNotification(NOTIFICATION_BUTTON_REWIND);
}
public static boolean showFastForwardOnCompactNotification() {
return showButtonOnCompactNotification(NOTIFICATION_BUTTON_FAST_FORWARD);
}
public static boolean showSkipOnCompactNotification() {
return showButtonOnCompactNotification(NOTIFICATION_BUTTON_SKIP);
}
public static int getFeedOrder() {
String value = prefs.getString(PREF_DRAWER_FEED_ORDER, "0");
return Integer.parseInt(value);
}
public static int getFeedCounterSetting() {
String value = prefs.getString(PREF_DRAWER_FEED_COUNTER, "0");
return Integer.parseInt(value);
}
/**
* Returns notification priority.
*
* @return NotificationCompat.PRIORITY_MAX or NotificationCompat.PRIORITY_DEFAULT
*/
public static int getNotifyPriority() {
if (prefs.getBoolean(PREF_EXPANDED_NOTIFICATION, false)) {
return NotificationCompat.PRIORITY_MAX;
} else {
return NotificationCompat.PRIORITY_DEFAULT;
}
}
/**
* Returns true if notifications are persistent
*
* @return {@code true} if notifications are persistent, {@code false} otherwise
*/
public static boolean isPersistNotify() {
return prefs.getBoolean(PREF_PERSISTENT_NOTIFICATION, true);
}
/**
* Returns true if the lockscreen background should be set to the current episode's image
*
* @return {@code true} if the lockscreen background should be set, {@code false} otherwise
*/
public static boolean setLockscreenBackground() {
return prefs.getBoolean(PREF_LOCKSCREEN_BACKGROUND, true);
}
/**
* Returns true if download reports are shown
*
* @return {@code true} if download reports are shown, {@code false} otherwise
*/
public static boolean showDownloadReport() {
return prefs.getBoolean(PREF_SHOW_DOWNLOAD_REPORT, true);
}
public static boolean enqueueDownloadedEpisodes() {
return prefs.getBoolean(PREF_ENQUEUE_DOWNLOADED, true);
}
public static boolean enqueueAtFront() {
return prefs.getBoolean(PREF_QUEUE_ADD_TO_FRONT, false);
}
public static boolean isPauseOnHeadsetDisconnect() {
return prefs.getBoolean(PREF_PAUSE_ON_HEADSET_DISCONNECT, true);
}
public static boolean isUnpauseOnHeadsetReconnect() {
return prefs.getBoolean(PREF_UNPAUSE_ON_HEADSET_RECONNECT, true);
}
public static boolean isUnpauseOnBluetoothReconnect() {
return prefs.getBoolean(PREF_UNPAUSE_ON_BLUETOOTH_RECONNECT, false);
}
public static boolean shouldHardwareButtonSkip() {
return prefs.getBoolean(PREF_HARDWARE_FOWARD_BUTTON_SKIPS, false);
}
public static boolean shouldHardwarePreviousButtonRestart() {
return prefs.getBoolean(PREF_HARDWARE_PREVIOUS_BUTTON_RESTARTS, false);
}
public static boolean isFollowQueue() {
return prefs.getBoolean(PREF_FOLLOW_QUEUE, true);
}
public static boolean shouldSkipKeepEpisode() { return prefs.getBoolean(PREF_SKIP_KEEPS_EPISODE, true); }
public static boolean shouldFavoriteKeepEpisode() {
return prefs.getBoolean(PREF_FAVORITE_KEEPS_EPISODE, true);
}
public static boolean isAutoDelete() {
return prefs.getBoolean(PREF_AUTO_DELETE, false);
}
public static int getSmartMarkAsPlayedSecs() {
return Integer.parseInt(prefs.getString(PREF_SMART_MARK_AS_PLAYED_SECS, "30"));
}
public static boolean shouldDeleteRemoveFromQueue() {
return prefs.getBoolean(PREF_DELETE_REMOVES_FROM_QUEUE, false);
}
public static boolean isAutoFlattr() {
return prefs.getBoolean(PREF_AUTO_FLATTR, false);
}
public static String getPlaybackSpeed() {
return prefs.getString(PREF_PLAYBACK_SPEED, "1.00");
}
public static boolean isSkipSilence() {
return prefs.getBoolean(PREF_PLAYBACK_SKIP_SILENCE, false);
}
public static String[] getPlaybackSpeedArray() {
return readPlaybackSpeedArray(prefs.getString(PREF_PLAYBACK_SPEED_ARRAY, null));
}
public static float getLeftVolume() {
int volume = prefs.getInt(PREF_LEFT_VOLUME, 100);
return Converter.getVolumeFromPercentage(volume);
}
public static float getRightVolume() {
int volume = prefs.getInt(PREF_RIGHT_VOLUME, 100);
return Converter.getVolumeFromPercentage(volume);
}
public static int getLeftVolumePercentage() {
return prefs.getInt(PREF_LEFT_VOLUME, 100);
}
public static int getRightVolumePercentage() {
return prefs.getInt(PREF_RIGHT_VOLUME, 100);
}
public static boolean shouldPauseForFocusLoss() {
return prefs.getBoolean(PREF_PAUSE_PLAYBACK_FOR_FOCUS_LOSS, false);
}
/*
* Returns update interval in milliseconds; value 0 means that auto update is disabled
* or feeds are updated at a certain time of day
*/
public static long getUpdateInterval() {
String updateInterval = prefs.getString(PREF_UPDATE_INTERVAL, "0");
if(!updateInterval.contains(":")) {
return readUpdateInterval(updateInterval);
} else {
return 0;
}
}
public static int[] getUpdateTimeOfDay() {
String datetime = prefs.getString(PREF_UPDATE_INTERVAL, "");
if(datetime.length() >= 3 && datetime.contains(":")) {
String[] parts = datetime.split(":");
int hourOfDay = Integer.parseInt(parts[0]);
int minute = Integer.parseInt(parts[1]);
return new int[] { hourOfDay, minute };
} else {
return new int[0];
}
}
public static boolean isAutoUpdateDisabled() {
return prefs.getString(PREF_UPDATE_INTERVAL, "").equals("0");
}
public static String getMobileUpdatesEnabled() {
return prefs.getString(PREF_MOBILE_UPDATE, "images");
}
public static boolean isAllowMobileUpdate() {
return getMobileUpdatesEnabled().equals("everything");
}
public static boolean isAllowMobileImages() {
return isAllowMobileUpdate() || getMobileUpdatesEnabled().equals("images");
}
public static int getParallelDownloads() {
return Integer.parseInt(prefs.getString(PREF_PARALLEL_DOWNLOADS, "4"));
}
public static int getEpisodeCacheSizeUnlimited() {
return context.getResources().getInteger(R.integer.episode_cache_size_unlimited);
}
/**
* Returns the capacity of the episode cache. This method will return the
* negative integer EPISODE_CACHE_SIZE_UNLIMITED if the cache size is set to
* 'unlimited'.
*/
public static int getEpisodeCacheSize() {
return readEpisodeCacheSizeInternal(prefs.getString(PREF_EPISODE_CACHE_SIZE, "20"));
}
public static boolean isEnableAutodownload() {
return prefs.getBoolean(PREF_ENABLE_AUTODL, false);
}
public static boolean isEnableAutodownloadOnBattery() {
return prefs.getBoolean(PREF_ENABLE_AUTODL_ON_BATTERY, true);
}
public static boolean isEnableAutodownloadWifiFilter() {
return prefs.getBoolean(PREF_ENABLE_AUTODL_WIFI_FILTER, false);
}
public static boolean isEnableAutodownloadOnMobile() {
return prefs.getBoolean(PREF_ENABLE_AUTODL_ON_MOBILE, false);
}
public static int getImageCacheSize() {
String cacheSizeString = prefs.getString(PREF_IMAGE_CACHE_SIZE, IMAGE_CACHE_DEFAULT_VALUE);
int cacheSizeInt = Integer.parseInt(cacheSizeString);
// if the cache size is too small the user won't get any images at all
// that's bad, force it back to the default.
if (cacheSizeInt < IMAGE_CACHE_SIZE_MINIMUM) {
prefs.edit().putString(PREF_IMAGE_CACHE_SIZE, IMAGE_CACHE_DEFAULT_VALUE).apply();
cacheSizeInt = Integer.parseInt(IMAGE_CACHE_DEFAULT_VALUE);
}
int cacheSizeMB = cacheSizeInt * 1024 * 1024;
return cacheSizeMB;
}
public static int getFastForwardSecs() {
return prefs.getInt(PREF_FAST_FORWARD_SECS, 30);
}
public static int getRewindSecs() {
return prefs.getInt(PREF_REWIND_SECS, 30);
}
/**
* Returns the time after which an episode should be auto-flattr'd in percent of the episode's
* duration.
*/
public static float getAutoFlattrPlayedDurationThreshold() {
return prefs.getFloat(PREF_AUTO_FLATTR_PLAYED_DURATION_THRESHOLD, 0.8f);
}
public static String[] getAutodownloadSelectedNetworks() {
String selectedNetWorks = prefs.getString(PREF_AUTODL_SELECTED_NETWORKS, "");
return TextUtils.split(selectedNetWorks, ",");
}
public static void setProxyConfig(ProxyConfig config) {
SharedPreferences.Editor editor = prefs.edit();
editor.putString(PREF_PROXY_TYPE, config.type.name());
if(TextUtils.isEmpty(config.host)) {
editor.remove(PREF_PROXY_HOST);
} else {
editor.putString(PREF_PROXY_HOST, config.host);
}
if(config.port <= 0 || config.port > 65535) {
editor.remove(PREF_PROXY_PORT);
} else {
editor.putInt(PREF_PROXY_PORT, config.port);
}
if(TextUtils.isEmpty(config.username)) {
editor.remove(PREF_PROXY_USER);
} else {
editor.putString(PREF_PROXY_USER, config.username);
}
if(TextUtils.isEmpty(config.password)) {
editor.remove(PREF_PROXY_PASSWORD);
} else {
editor.putString(PREF_PROXY_PASSWORD, config.password);
}
editor.apply();
}
public static ProxyConfig getProxyConfig() {
Proxy.Type type = Proxy.Type.valueOf(prefs.getString(PREF_PROXY_TYPE, Proxy.Type.DIRECT.name()));
String host = prefs.getString(PREF_PROXY_HOST, null);
int port = prefs.getInt(PREF_PROXY_PORT, 0);
String username = prefs.getString(PREF_PROXY_USER, null);
String password = prefs.getString(PREF_PROXY_PASSWORD, null);
return new ProxyConfig(type, host, port, username, password);
}
public static boolean shouldResumeAfterCall() {
return prefs.getBoolean(PREF_RESUME_AFTER_CALL, true);
}
public static boolean isQueueLocked() {
return prefs.getBoolean(PREF_QUEUE_LOCKED, false);
}
public static void setFastForwardSecs(int secs) {
prefs.edit()
.putInt(PREF_FAST_FORWARD_SECS, secs)
.apply();
}
public static void setRewindSecs(int secs) {
prefs.edit()
.putInt(PREF_REWIND_SECS, secs)
.apply();
}
public static void setPlaybackSpeed(String speed) {
prefs.edit()
.putString(PREF_PLAYBACK_SPEED, speed)
.apply();
}
public static void setSkipSilence(boolean skipSilence) {
prefs.edit()
.putBoolean(PREF_PLAYBACK_SKIP_SILENCE, skipSilence)
.apply();
}
public static void setPlaybackSpeedArray(String[] speeds) {
JSONArray jsonArray = new JSONArray();
for (String speed : speeds) {
jsonArray.put(speed);
}
prefs.edit()
.putString(PREF_PLAYBACK_SPEED_ARRAY, jsonArray.toString())
.apply();
}
public static void setVolume(@IntRange(from = 0, to = 100) int leftVolume,
@IntRange(from = 0, to = 100) int rightVolume) {
prefs.edit()
.putInt(PREF_LEFT_VOLUME, leftVolume)
.putInt(PREF_RIGHT_VOLUME, rightVolume)
.apply();
}
public static void setAutodownloadSelectedNetworks(String[] value) {
prefs.edit()
.putString(PREF_AUTODL_SELECTED_NETWORKS, TextUtils.join(",", value))
.apply();
}
/**
* Sets the update interval value.
*/
public static void setUpdateInterval(long hours) {
prefs.edit()
.putString(PREF_UPDATE_INTERVAL, String.valueOf(hours))
.apply();
// when updating with an interval, we assume the user wants
// to update *now* and then every 'hours' interval thereafter.
restartUpdateAlarm();
}
/**
* Sets the update interval value.
*/
public static void setUpdateTimeOfDay(int hourOfDay, int minute) {
prefs.edit()
.putString(PREF_UPDATE_INTERVAL, hourOfDay + ":" + minute)
.apply();
restartUpdateAlarm();
}
public static void disableAutoUpdate() {
prefs.edit()
.putString(PREF_UPDATE_INTERVAL, "0")
.apply();
AutoUpdateManager.disableAutoUpdate();
}
/**
* Change the auto-flattr settings
*
* @param enabled Whether automatic flattring should be enabled at all
* @param autoFlattrThreshold The percentage of playback time after which an episode should be
* flattrd. Must be a value between 0 and 1 (inclusive)
* */
public static void setAutoFlattrSettings( boolean enabled, float autoFlattrThreshold) {
if(autoFlattrThreshold < 0.0 || autoFlattrThreshold > 1.0) {
throw new IllegalArgumentException("Flattr threshold must be in range [0.0, 1.0]");
}
prefs.edit()
.putBoolean(PREF_AUTO_FLATTR, enabled)
.putFloat(PREF_AUTO_FLATTR_PLAYED_DURATION_THRESHOLD, autoFlattrThreshold)
.apply();
}
public static boolean gpodnetNotificationsEnabled() {
return prefs.getBoolean(PREF_GPODNET_NOTIFICATIONS, true);
}
public static void setGpodnetNotificationsEnabled() {
prefs.edit()
.putBoolean(PREF_GPODNET_NOTIFICATIONS, true)
.apply();
}
public static void setHiddenDrawerItems(List<String> items) {
String str = TextUtils.join(",", items);
prefs.edit()
.putString(PREF_HIDDEN_DRAWER_ITEMS, str)
.apply();
}
public static void setCompactNotificationButtons(List<Integer> items) {
String str = TextUtils.join(",", items);
prefs.edit()
.putString(PREF_COMPACT_NOTIFICATION_BUTTONS, str)
.apply();
}
public static void setQueueLocked(boolean locked) {
prefs.edit()
.putBoolean(PREF_QUEUE_LOCKED, locked)
.apply();
}
private static int readThemeValue(String valueFromPrefs) {
switch (Integer.parseInt(valueFromPrefs)) {
case 0:
return R.style.Theme_AntennaPod_Light;
case 1:
return R.style.Theme_AntennaPod_Dark;
case 2:
return R.style.Theme_AntennaPod_TrueBlack;
default:
return R.style.Theme_AntennaPod_Light;
}
}
private static long readUpdateInterval(String valueFromPrefs) {
int hours = Integer.parseInt(valueFromPrefs);
return TimeUnit.HOURS.toMillis(hours);
}
private static int readEpisodeCacheSizeInternal(String valueFromPrefs) {
if (valueFromPrefs.equals(context.getString(R.string.pref_episode_cache_unlimited))) {
return EPISODE_CACHE_SIZE_UNLIMITED;
} else {
return Integer.parseInt(valueFromPrefs);
}
}
private static String[] readPlaybackSpeedArray(String valueFromPrefs) {
String[] selectedSpeeds = null;
// If this preference hasn't been set yet, return the default options
if (valueFromPrefs == null) {
selectedSpeeds = new String[] { "1.00", "1.25", "1.50", "1.75", "2.00" };
} else {
try {
JSONArray jsonArray = new JSONArray(valueFromPrefs);
selectedSpeeds = new String[jsonArray.length()];
for (int i = 0; i < jsonArray.length(); i++) {
selectedSpeeds[i] = jsonArray.getString(i);
}
} catch (JSONException e) {
Log.e(TAG, "Got JSON error when trying to get speeds from JSONArray");
e.printStackTrace();
}
}
return selectedSpeeds;
}
public static boolean useSonic() {
return prefs.getString(PREF_MEDIA_PLAYER, "sonic").equals("sonic");
}
public static boolean useExoplayer() {
return prefs.getString(PREF_MEDIA_PLAYER, "sonic").equals(PREF_MEDIA_PLAYER_EXOPLAYER);
}
public static void enableSonic() {
prefs.edit().putString(PREF_MEDIA_PLAYER, "sonic").apply();
}
public static boolean stereoToMono() {
return prefs.getBoolean(PREF_STEREO_TO_MONO, false);
}
public static void stereoToMono(boolean enable) {
prefs.edit()
.putBoolean(PREF_STEREO_TO_MONO, enable)
.apply();
}
public static VideoBackgroundBehavior getVideoBackgroundBehavior() {
switch (prefs.getString(PREF_VIDEO_BEHAVIOR, "stop")) {
case "stop": return VideoBackgroundBehavior.STOP;
case "pip": return VideoBackgroundBehavior.PICTURE_IN_PICTURE;
case "continue": return VideoBackgroundBehavior.CONTINUE_PLAYING;
default: return VideoBackgroundBehavior.STOP;
}
}
public static EpisodeCleanupAlgorithm getEpisodeCleanupAlgorithm() {
int cleanupValue = getEpisodeCleanupValue();
if (cleanupValue == EPISODE_CLEANUP_QUEUE) {
return new APQueueCleanupAlgorithm();
} else if (cleanupValue == EPISODE_CLEANUP_NULL) {
return new APNullCleanupAlgorithm();
} else {
return new APCleanupAlgorithm(cleanupValue);
}
}
public static int getEpisodeCleanupValue() {
return Integer.parseInt(prefs.getString(PREF_EPISODE_CLEANUP, "-1"));
}
public static void setEpisodeCleanupValue(int episodeCleanupValue) {
prefs.edit()
.putString(PREF_EPISODE_CLEANUP, Integer.toString(episodeCleanupValue))
.apply();
}
/**
* Return the folder where the app stores all of its data. This method will
* return the standard data folder if none has been set by the user.
*
* @param type The name of the folder inside the data folder. May be null
* when accessing the root of the data folder.
* @return The data folder that has been requested or null if the folder
* could not be created.
*/
public static File getDataFolder(String type) {
String strDir = prefs.getString(PREF_DATA_FOLDER, null);
if (strDir == null) {
Log.d(TAG, "Using default data folder");
return context.getExternalFilesDir(type);
} else {
File dataDir = new File(strDir);
if (!dataDir.exists()) {
if (!dataDir.mkdir()) {
Log.w(TAG, "Could not create data folder");
return null;
}
}
if (type == null) {
return dataDir;
} else {
// handle path separators
String[] dirs = type.split("/");
for (int i = 0; i < dirs.length; i++) {
if (dirs.length > 0) {
if (i < dirs.length - 1) {
dataDir = getDataFolder(dirs[i]);
if (dataDir == null) {
return null;
}
}
type = dirs[i];
}
}
File typeDir = new File(dataDir, type);
if (!typeDir.exists()) {
if (dataDir.canWrite()) {
if (!typeDir.mkdir()) {
Log.e(TAG, "Could not create data folder named " + type);
return null;
}
}
}
return typeDir;
}
}
}
public static void setDataFolder(String dir) {
Log.d(TAG, "setDataFolder(dir: " + dir + ")");
prefs.edit()
.putString(PREF_DATA_FOLDER, dir)
.apply();
createImportDirectory();
}
/**
* Create a .nomedia file to prevent scanning by the media scanner.
*/
private static void createNoMediaFile() {
File f = new File(context.getExternalFilesDir(null), ".nomedia");
if (!f.exists()) {
try {
f.createNewFile();
} catch (IOException e) {
Log.e(TAG, "Could not create .nomedia file");
e.printStackTrace();
}
Log.d(TAG, ".nomedia file created");
}
}
/**
* Creates the import directory if it doesn't exist and if storage is
* available
*/
private static void createImportDirectory() {
File importDir = getDataFolder(IMPORT_DIR);
if (importDir != null) {
if (importDir.exists()) {
Log.d(TAG, "Import directory already exists");
} else {
Log.d(TAG, "Creating import directory");
importDir.mkdir();
}
} else {
Log.d(TAG, "Could not access external storage.");
}
}
/**
*
* @return true if auto update is set to a specific time
* false if auto update is set to interval
*/
public static boolean isAutoUpdateTimeOfDay() {
return getUpdateTimeOfDay().length == 2;
}
public static void restartUpdateAlarm() {
if (isAutoUpdateDisabled()) {
AutoUpdateManager.disableAutoUpdate();
} else if (isAutoUpdateTimeOfDay()) {
int[] timeOfDay = getUpdateTimeOfDay();
Log.d(TAG, "timeOfDay: " + Arrays.toString(timeOfDay));
AutoUpdateManager.restartUpdateTimeOfDayAlarm(timeOfDay[0], timeOfDay[1]);
} else {
long milliseconds = getUpdateInterval();
AutoUpdateManager.restartUpdateIntervalAlarm(milliseconds);
}
}
/**
* Reads episode cache size as it is saved in the episode_cache_size_values array.
*/
public static int readEpisodeCacheSize(String valueFromPrefs) {
return readEpisodeCacheSizeInternal(valueFromPrefs);
}
/**
* Evaluates whether Cast support (Chromecast, Audio Cast, etc) is enabled on the preferences.
*/
public static boolean isCastEnabled() {
return prefs.getBoolean(PREF_CAST_ENABLED, false);
}
public enum VideoBackgroundBehavior {
STOP, PICTURE_IN_PICTURE, CONTINUE_PLAYING
}
public enum BackButtonBehavior {
DEFAULT, OPEN_DRAWER, DOUBLE_TAP, SHOW_PROMPT, GO_TO_PAGE
}
public static BackButtonBehavior getBackButtonBehavior() {
switch (prefs.getString(PREF_BACK_BUTTON_BEHAVIOR, "default")) {
case "default": return BackButtonBehavior.DEFAULT;
case "drawer": return BackButtonBehavior.OPEN_DRAWER;
case "doubletap": return BackButtonBehavior.DOUBLE_TAP;
case "prompt": return BackButtonBehavior.SHOW_PROMPT;
case "page": return BackButtonBehavior.GO_TO_PAGE;
default: return BackButtonBehavior.DEFAULT;
}
}
public static String getBackButtonGoToPage() {
return prefs.getString(PREF_BACK_BUTTON_GO_TO_PAGE, "QueueFragment");
}
public static void setBackButtonGoToPage(String tag) {
prefs.edit()
.putString(PREF_BACK_BUTTON_GO_TO_PAGE, tag)
.apply();
}
public static boolean timeRespectsSpeed() {
return prefs.getBoolean(PREF_TIME_RESPECTS_SPEED, false);
}
}
| 1 | 14,943 | In my opinion, it would be better to have two separate settings. One that stores if sorting is enabled and one that stores the order. That allows to toggle automatic sort without having to re-select the order. | AntennaPod-AntennaPod | java |
@@ -1,7 +1,10 @@
package main
-import "github.com/spiffe/sri/control_plane/cli"
-import "os"
+import (
+ "os"
+
+ "github.com/spiffe/sri/control_plane/cli"
+)
func main() {
control_plane_cli.Run(os.Args) | 1 | package main
import "github.com/spiffe/sri/control_plane/cli"
import "os"
func main() {
control_plane_cli.Run(os.Args)
}
| 1 | 8,204 | /me can't wait to rename this repo :) | spiffe-spire | go |
@@ -18,10 +18,14 @@ const handleCallback = require('../utils').handleCallback;
const indexInformationDb = require('./db_ops').indexInformation;
const isObject = require('../utils').isObject;
const Long = require('mongodb-core').BSON.Long;
+const makeLazyLoader = require('../utils').makeLazyLoader;
const MongoError = require('mongodb-core').MongoError;
const ReadPreference = require('mongodb-core').ReadPreference;
const toError = require('../utils').toError;
+const collLoader = makeLazyLoader(`${__dirname}/../collection`);
+const dbLoader = makeLazyLoader(`${__dirname}/../db`);
+
/**
* Group function helper
* @ignore | 1 | 'use strict';
const applyWriteConcern = require('../utils').applyWriteConcern;
const applyRetryableWrites = require('../utils').applyRetryableWrites;
const checkCollectionName = require('../utils').checkCollectionName;
const Code = require('mongodb-core').BSON.Code;
const createIndexDb = require('./db_ops').createIndex;
const decorateCommand = require('../utils').decorateCommand;
const decorateWithCollation = require('../utils').decorateWithCollation;
const decorateWithReadConcern = require('../utils').decorateWithReadConcern;
const ensureIndexDb = require('./db_ops').ensureIndex;
const evaluate = require('./db_ops').evaluate;
const executeCommand = require('./db_ops').executeCommand;
const executeDbAdminCommand = require('./db_ops').executeDbAdminCommand;
const formattedOrderClause = require('../utils').formattedOrderClause;
const resolveReadPreference = require('../utils').resolveReadPreference;
const handleCallback = require('../utils').handleCallback;
const indexInformationDb = require('./db_ops').indexInformation;
const isObject = require('../utils').isObject;
const Long = require('mongodb-core').BSON.Long;
const MongoError = require('mongodb-core').MongoError;
const ReadPreference = require('mongodb-core').ReadPreference;
const toError = require('../utils').toError;
/**
* Group function helper
* @ignore
*/
// var groupFunction = function () {
// var c = db[ns].find(condition);
// var map = new Map();
// var reduce_function = reduce;
//
// while (c.hasNext()) {
// var obj = c.next();
// var key = {};
//
// for (var i = 0, len = keys.length; i < len; ++i) {
// var k = keys[i];
// key[k] = obj[k];
// }
//
// var aggObj = map.get(key);
//
// if (aggObj == null) {
// var newObj = Object.extend({}, key);
// aggObj = Object.extend(newObj, initial);
// map.put(key, aggObj);
// }
//
// reduce_function(obj, aggObj);
// }
//
// return { "result": map.values() };
// }.toString();
const groupFunction =
'function () {\nvar c = db[ns].find(condition);\nvar map = new Map();\nvar reduce_function = reduce;\n\nwhile (c.hasNext()) {\nvar obj = c.next();\nvar key = {};\n\nfor (var i = 0, len = keys.length; i < len; ++i) {\nvar k = keys[i];\nkey[k] = obj[k];\n}\n\nvar aggObj = map.get(key);\n\nif (aggObj == null) {\nvar newObj = Object.extend({}, key);\naggObj = Object.extend(newObj, initial);\nmap.put(key, aggObj);\n}\n\nreduce_function(obj, aggObj);\n}\n\nreturn { "result": map.values() };\n}';
/**
* Perform a bulkWrite operation. See Collection.prototype.bulkWrite for more information.
*
* @method
* @param {Collection} a Collection instance.
* @param {object[]} operations Bulk operations to perform.
* @param {object} [options] Optional settings. See Collection.prototype.bulkWrite for a list of options.
* @param {Collection~bulkWriteOpCallback} [callback] The command result callback
*/
function bulkWrite(coll, operations, options, callback) {
// Add ignoreUndfined
if (coll.s.options.ignoreUndefined) {
options = Object.assign({}, options);
options.ignoreUndefined = coll.s.options.ignoreUndefined;
}
// Create the bulk operation
const bulk =
options.ordered === true || options.ordered == null
? coll.initializeOrderedBulkOp(options)
: coll.initializeUnorderedBulkOp(options);
// Do we have a collation
let collation = false;
// for each op go through and add to the bulk
try {
for (let i = 0; i < operations.length; i++) {
// Get the operation type
const key = Object.keys(operations[i])[0];
// Check if we have a collation
if (operations[i][key].collation) {
collation = true;
}
// Pass to the raw bulk
bulk.raw(operations[i]);
}
} catch (err) {
return callback(err, null);
}
// Final options for retryable writes and write concern
let finalOptions = Object.assign({}, options);
finalOptions = applyRetryableWrites(finalOptions, coll.s.db);
finalOptions = applyWriteConcern(finalOptions, { db: coll.s.db, collection: coll }, options);
const writeCon = finalOptions.writeConcern ? finalOptions.writeConcern : {};
const capabilities = coll.s.topology.capabilities();
// Did the user pass in a collation, check if our write server supports it
if (collation && capabilities && !capabilities.commandsTakeCollation) {
return callback(new MongoError('server/primary/mongos does not support collation'));
}
// Execute the bulk
bulk.execute(writeCon, finalOptions, (err, r) => {
// We have connection level error
if (!r && err) {
return callback(err, null);
}
r.insertedCount = r.nInserted;
r.matchedCount = r.nMatched;
r.modifiedCount = r.nModified || 0;
r.deletedCount = r.nRemoved;
r.upsertedCount = r.getUpsertedIds().length;
r.upsertedIds = {};
r.insertedIds = {};
// Update the n
r.n = r.insertedCount;
// Inserted documents
const inserted = r.getInsertedIds();
// Map inserted ids
for (let i = 0; i < inserted.length; i++) {
r.insertedIds[inserted[i].index] = inserted[i]._id;
}
// Upserted documents
const upserted = r.getUpsertedIds();
// Map upserted ids
for (let i = 0; i < upserted.length; i++) {
r.upsertedIds[upserted[i].index] = upserted[i]._id;
}
// Return the results
callback(null, r);
});
}
// Check the update operation to ensure it has atomic operators.
function checkForAtomicOperators(update) {
const keys = Object.keys(update);
// same errors as the server would give for update doc lacking atomic operators
if (keys.length === 0) {
return toError('The update operation document must contain at least one atomic operator.');
}
if (keys[0][0] !== '$') {
return toError('the update operation document must contain atomic operators.');
}
}
/**
* Count the number of documents in the collection that match the query.
*
* @method
* @param {Collection} a Collection instance.
* @param {object} query The query for the count.
* @param {object} [options] Optional settings. See Collection.prototype.count for a list of options.
* @param {Collection~countCallback} [callback] The command result callback
*/
function count(coll, query, options, callback) {
if (typeof options === 'function') (callback = options), (options = {});
options = Object.assign({}, options);
options.collectionName = coll.s.name;
options.readPreference = resolveReadPreference(options, {
db: coll.s.db,
collection: coll
});
let cmd;
try {
cmd = buildCountCommand(coll, query, options);
} catch (err) {
return callback(err);
}
executeCommand(coll.s.db, cmd, options, (err, result) => {
if (err) return handleCallback(callback, err);
handleCallback(callback, null, result.n);
});
}
function countDocuments(coll, query, options, callback) {
const skip = options.skip;
const limit = options.limit;
options = Object.assign({}, options);
const pipeline = [{ $match: query }];
// Add skip and limit if defined
if (typeof skip === 'number') {
pipeline.push({ $skip: skip });
}
if (typeof limit === 'number') {
pipeline.push({ $limit: limit });
}
pipeline.push({ $group: { _id: null, n: { $sum: 1 } } });
delete options.limit;
delete options.skip;
coll.aggregate(pipeline, options).toArray((err, docs) => {
if (err) return handleCallback(callback, err);
handleCallback(callback, null, docs.length ? docs[0].n : 0);
});
}
/**
* Build the count command.
*
* @method
* @param {collectionOrCursor} an instance of a collection or cursor
* @param {object} query The query for the count.
* @param {object} [options] Optional settings. See Collection.prototype.count and Cursor.prototype.count for a list of options.
*/
function buildCountCommand(collectionOrCursor, query, options) {
const skip = options.skip;
const limit = options.limit;
let hint = options.hint;
const maxTimeMS = options.maxTimeMS;
query = query || {};
// Final query
const cmd = {
count: options.collectionName,
query: query
};
// check if collectionOrCursor is a cursor by using cursor.s.numberOfRetries
if (collectionOrCursor.s.numberOfRetries) {
if (collectionOrCursor.s.options.hint) {
hint = collectionOrCursor.s.options.hint;
} else if (collectionOrCursor.s.cmd.hint) {
hint = collectionOrCursor.s.cmd.hint;
}
decorateWithCollation(cmd, collectionOrCursor, collectionOrCursor.s.cmd);
} else {
decorateWithCollation(cmd, collectionOrCursor, options);
}
// Add limit, skip and maxTimeMS if defined
if (typeof skip === 'number') cmd.skip = skip;
if (typeof limit === 'number') cmd.limit = limit;
if (typeof maxTimeMS === 'number') cmd.maxTimeMS = maxTimeMS;
if (hint) cmd.hint = hint;
// Do we have a readConcern specified
decorateWithReadConcern(cmd, collectionOrCursor);
return cmd;
}
/**
* Create an index on the db and collection.
*
* @method
* @param {Collection} a Collection instance.
* @param {(string|object)} fieldOrSpec Defines the index.
* @param {object} [options] Optional settings. See Collection.prototype.createIndex for a list of options.
* @param {Collection~resultCallback} [callback] The command result callback
*/
function createIndex(coll, fieldOrSpec, options, callback) {
createIndexDb(coll.s.db, coll.s.name, fieldOrSpec, options, callback);
}
/**
* Create multiple indexes in the collection. This method is only supported for
* MongoDB 2.6 or higher. Earlier version of MongoDB will throw a command not supported
* error. Index specifications are defined at http://docs.mongodb.org/manual/reference/command/createIndexes/.
*
* @method
* @param {Collection} a Collection instance.
* @param {array} indexSpecs An array of index specifications to be created
* @param {Object} [options] Optional settings. See Collection.prototype.createIndexes for a list of options.
* @param {Collection~resultCallback} [callback] The command result callback
*/
function createIndexes(coll, indexSpecs, options, callback) {
const capabilities = coll.s.topology.capabilities();
// Ensure we generate the correct name if the parameter is not set
for (let i = 0; i < indexSpecs.length; i++) {
if (indexSpecs[i].name == null) {
const keys = [];
// Did the user pass in a collation, check if our write server supports it
if (indexSpecs[i].collation && capabilities && !capabilities.commandsTakeCollation) {
return callback(new MongoError('server/primary/mongos does not support collation'));
}
for (let name in indexSpecs[i].key) {
keys.push(`${name}_${indexSpecs[i].key[name]}`);
}
// Set the name
indexSpecs[i].name = keys.join('_');
}
}
options = Object.assign({}, options, { readPreference: ReadPreference.PRIMARY });
// Execute the index
executeCommand(
coll.s.db,
{
createIndexes: coll.s.name,
indexes: indexSpecs
},
options,
callback
);
}
function deleteCallback(err, r, callback) {
if (callback == null) return;
if (err && callback) return callback(err);
if (r == null) return callback(null, { result: { ok: 1 } });
r.deletedCount = r.result.n;
if (callback) callback(null, r);
}
/**
* Delete multiple documents from the collection.
*
* @method
* @param {Collection} a Collection instance.
* @param {object} filter The Filter used to select the documents to remove
* @param {object} [options] Optional settings. See Collection.prototype.deleteMany for a list of options.
* @param {Collection~deleteWriteOpCallback} [callback] The command result callback
*/
function deleteMany(coll, filter, options, callback) {
options.single = false;
removeDocuments(coll, filter, options, (err, r) => deleteCallback(err, r, callback));
}
/**
* Delete a single document from the collection.
*
* @method
* @param {Collection} a Collection instance.
* @param {object} filter The Filter used to select the document to remove
* @param {object} [options] Optional settings. See Collection.prototype.deleteOne for a list of options.
* @param {Collection~deleteWriteOpCallback} [callback] The command result callback
*/
function deleteOne(coll, filter, options, callback) {
options.single = true;
removeDocuments(coll, filter, options, (err, r) => deleteCallback(err, r, callback));
}
/**
* Return a list of distinct values for the given key across a collection.
*
* @method
* @param {Collection} a Collection instance.
* @param {string} key Field of the document to find distinct values for.
* @param {object} query The query for filtering the set of documents to which we apply the distinct filter.
* @param {object} [options] Optional settings. See Collection.prototype.distinct for a list of options.
* @param {Collection~resultCallback} [callback] The command result callback
*/
function distinct(coll, key, query, options, callback) {
// maxTimeMS option
const maxTimeMS = options.maxTimeMS;
// Distinct command
const cmd = {
distinct: coll.s.name,
key: key,
query: query
};
options = Object.assign({}, options);
// Ensure we have the right read preference inheritance
options.readPreference = resolveReadPreference(options, { db: coll.s.db, collection: coll });
// Add maxTimeMS if defined
if (typeof maxTimeMS === 'number') cmd.maxTimeMS = maxTimeMS;
// Do we have a readConcern specified
decorateWithReadConcern(cmd, coll, options);
// Have we specified collation
try {
decorateWithCollation(cmd, coll, options);
} catch (err) {
return callback(err, null);
}
// Execute the command
executeCommand(coll.s.db, cmd, options, (err, result) => {
if (err) return handleCallback(callback, err);
handleCallback(callback, null, result.values);
});
}
/**
* Drop an index from this collection.
*
* @method
* @param {Collection} a Collection instance.
* @param {string} indexName Name of the index to drop.
* @param {object} [options] Optional settings. See Collection.prototype.dropIndex for a list of options.
* @param {Collection~resultCallback} [callback] The command result callback
*/
function dropIndex(coll, indexName, options, callback) {
// Delete index command
const cmd = { dropIndexes: coll.s.name, index: indexName };
// Decorate command with writeConcern if supported
applyWriteConcern(cmd, { db: coll.s.db, collection: coll }, options);
// Execute command
executeCommand(coll.s.db, cmd, options, (err, result) => {
if (typeof callback !== 'function') return;
if (err) return handleCallback(callback, err, null);
handleCallback(callback, null, result);
});
}
/**
* Drop all indexes from this collection.
*
* @method
* @param {Collection} a Collection instance.
* @param {Object} [options] Optional settings. See Collection.prototype.dropIndexes for a list of options.
* @param {Collection~resultCallback} [callback] The command result callback
*/
function dropIndexes(coll, options, callback) {
dropIndex(coll, '*', options, err => {
if (err) return handleCallback(callback, err, false);
handleCallback(callback, null, true);
});
}
/**
* Ensure that an index exists. If the index does not exist, this function creates it.
*
* @method
* @param {Collection} a Collection instance.
* @param {(string|object)} fieldOrSpec Defines the index.
* @param {object} [options] Optional settings. See Collection.prototype.ensureIndex for a list of options.
* @param {Collection~resultCallback} [callback] The command result callback
*/
function ensureIndex(coll, fieldOrSpec, options, callback) {
ensureIndexDb(coll.s.db, coll.s.name, fieldOrSpec, options, callback);
}
/**
* Find and update a document.
*
* @method
* @param {Collection} a Collection instance.
* @param {object} query Query object to locate the object to modify.
* @param {array} sort If multiple docs match, choose the first one in the specified sort order as the object to manipulate.
* @param {object} doc The fields/vals to be updated.
* @param {object} [options] Optional settings. See Collection.prototype.findAndModify for a list of options.
* @param {Collection~findAndModifyCallback} [callback] The command result callback
* @deprecated use findOneAndUpdate, findOneAndReplace or findOneAndDelete instead
*/
function findAndModify(coll, query, sort, doc, options, callback) {
// Create findAndModify command object
const queryObject = {
findAndModify: coll.s.name,
query: query
};
sort = formattedOrderClause(sort);
if (sort) {
queryObject.sort = sort;
}
queryObject.new = options.new ? true : false;
queryObject.remove = options.remove ? true : false;
queryObject.upsert = options.upsert ? true : false;
const projection = options.projection || options.fields;
if (projection) {
queryObject.fields = projection;
}
if (options.arrayFilters) {
queryObject.arrayFilters = options.arrayFilters;
delete options.arrayFilters;
}
if (doc && !options.remove) {
queryObject.update = doc;
}
if (options.maxTimeMS) queryObject.maxTimeMS = options.maxTimeMS;
// Either use override on the function, or go back to default on either the collection
// level or db
options.serializeFunctions = options.serializeFunctions || coll.s.serializeFunctions;
// No check on the documents
options.checkKeys = false;
// Final options for retryable writes and write concern
let finalOptions = Object.assign({}, options);
finalOptions = applyRetryableWrites(finalOptions, coll.s.db);
finalOptions = applyWriteConcern(finalOptions, { db: coll.s.db, collection: coll }, options);
// Decorate the findAndModify command with the write Concern
if (finalOptions.writeConcern) {
queryObject.writeConcern = finalOptions.writeConcern;
}
// Have we specified bypassDocumentValidation
if (finalOptions.bypassDocumentValidation === true) {
queryObject.bypassDocumentValidation = finalOptions.bypassDocumentValidation;
}
finalOptions.readPreference = ReadPreference.primary;
// Have we specified collation
try {
decorateWithCollation(queryObject, coll, finalOptions);
} catch (err) {
return callback(err, null);
}
// Execute the command
executeCommand(coll.s.db, queryObject, finalOptions, (err, result) => {
if (err) return handleCallback(callback, err, null);
return handleCallback(callback, null, result);
});
}
/**
* Find and remove a document.
*
* @method
* @param {Collection} a Collection instance.
* @param {object} query Query object to locate the object to modify.
* @param {array} sort If multiple docs match, choose the first one in the specified sort order as the object to manipulate.
* @param {object} [options] Optional settings. See Collection.prototype.findAndRemove for a list of options.
* @param {Collection~resultCallback} [callback] The command result callback
* @deprecated use findOneAndDelete instead
*/
function findAndRemove(coll, query, sort, options, callback) {
// Add the remove option
options.remove = true;
// Execute the callback
findAndModify(coll, query, sort, null, options, callback);
}
/**
* Fetch the first document that matches the query.
*
* @method
* @param {Collection} a Collection instance.
* @param {object} query Query for find Operation
* @param {object} [options] Optional settings. See Collection.prototype.findOne for a list of options.
* @param {Collection~resultCallback} [callback] The command result callback
*/
function findOne(coll, query, options, callback) {
const cursor = coll
.find(query, options)
.limit(-1)
.batchSize(1);
// Return the item
cursor.next((err, item) => {
if (err != null) return handleCallback(callback, toError(err), null);
handleCallback(callback, null, item);
});
}
/**
* Find a document and delete it in one atomic operation. This requires a write lock for the duration of the operation.
*
* @method
* @param {Collection} a Collection instance.
* @param {object} filter Document selection filter.
* @param {object} [options] Optional settings. See Collection.prototype.findOneAndDelete for a list of options.
* @param {Collection~findAndModifyCallback} [callback] The collection result callback
*/
function findOneAndDelete(coll, filter, options, callback) {
// Final options
const finalOptions = Object.assign({}, options);
finalOptions.fields = options.projection;
finalOptions.remove = true;
// Execute find and Modify
findAndModify(coll, filter, options.sort, null, finalOptions, callback);
}
/**
* Find a document and replace it in one atomic operation. This requires a write lock for the duration of the operation.
*
* @method
* @param {Collection} a Collection instance.
* @param {object} filter Document selection filter.
* @param {object} replacement Document replacing the matching document.
* @param {object} [options] Optional settings. See Collection.prototype.findOneAndReplace for a list of options.
* @param {Collection~findAndModifyCallback} [callback] The collection result callback
*/
function findOneAndReplace(coll, filter, replacement, options, callback) {
// Final options
const finalOptions = Object.assign({}, options);
finalOptions.fields = options.projection;
finalOptions.update = true;
finalOptions.new = options.returnOriginal !== void 0 ? !options.returnOriginal : false;
finalOptions.upsert = options.upsert !== void 0 ? !!options.upsert : false;
// Execute findAndModify
findAndModify(coll, filter, options.sort, replacement, finalOptions, callback);
}
/**
* Find a document and update it in one atomic operation. This requires a write lock for the duration of the operation.
*
* @method
* @param {Collection} a Collection instance.
* @param {object} filter Document selection filter.
* @param {object} update Update operations to be performed on the document
* @param {object} [options] Optional settings. See Collection.prototype.findOneAndUpdate for a list of options.
* @param {Collection~findAndModifyCallback} [callback] The collection result callback
*/
function findOneAndUpdate(coll, filter, update, options, callback) {
// Final options
const finalOptions = Object.assign({}, options);
finalOptions.fields = options.projection;
finalOptions.update = true;
finalOptions.new = typeof options.returnOriginal === 'boolean' ? !options.returnOriginal : false;
finalOptions.upsert = typeof options.upsert === 'boolean' ? options.upsert : false;
// Execute findAndModify
findAndModify(coll, filter, options.sort, update, finalOptions, callback);
}
/**
* Execute a geo search using a geo haystack index on a collection.
*
* @method
* @param {Collection} a Collection instance.
* @param {number} x Point to search on the x axis, ensure the indexes are ordered in the same order.
* @param {number} y Point to search on the y axis, ensure the indexes are ordered in the same order.
* @param {object} [options] Optional settings. See Collection.prototype.geoHaystackSearch for a list of options.
* @param {Collection~resultCallback} [callback] The command result callback
*/
function geoHaystackSearch(coll, x, y, options, callback) {
// Build command object
let commandObject = {
geoSearch: coll.s.name,
near: [x, y]
};
// Remove read preference from hash if it exists
commandObject = decorateCommand(commandObject, options, ['readPreference', 'session']);
options = Object.assign({}, options);
// Ensure we have the right read preference inheritance
options.readPreference = resolveReadPreference(options, { db: coll.s.db, collection: coll });
// Do we have a readConcern specified
decorateWithReadConcern(commandObject, coll, options);
// Execute the command
executeCommand(coll.s.db, commandObject, options, (err, res) => {
if (err) return handleCallback(callback, err);
if (res.err || res.errmsg) handleCallback(callback, toError(res));
// should we only be returning res.results here? Not sure if the user
// should see the other return information
handleCallback(callback, null, res);
});
}
/**
* Run a group command across a collection.
*
* @method
* @param {Collection} a Collection instance.
* @param {(object|array|function|code)} keys An object, array or function expressing the keys to group by.
* @param {object} condition An optional condition that must be true for a row to be considered.
* @param {object} initial Initial value of the aggregation counter object.
* @param {(function|Code)} reduce The reduce function aggregates (reduces) the objects iterated
* @param {(function|Code)} finalize An optional function to be run on each item in the result set just before the item is returned.
* @param {boolean} command Specify if you wish to run using the internal group command or using eval, default is true.
* @param {object} [options] Optional settings. See Collection.prototype.group for a list of options.
* @param {Collection~resultCallback} [callback] The command result callback
* @deprecated MongoDB 3.6 or higher will no longer support the group command. We recommend rewriting using the aggregation framework.
*/
function group(coll, keys, condition, initial, reduce, finalize, command, options, callback) {
// Execute using the command
if (command) {
const reduceFunction = reduce && reduce._bsontype === 'Code' ? reduce : new Code(reduce);
const selector = {
group: {
ns: coll.s.name,
$reduce: reduceFunction,
cond: condition,
initial: initial,
out: 'inline'
}
};
// if finalize is defined
if (finalize != null) selector.group['finalize'] = finalize;
// Set up group selector
if ('function' === typeof keys || (keys && keys._bsontype === 'Code')) {
selector.group.$keyf = keys && keys._bsontype === 'Code' ? keys : new Code(keys);
} else {
const hash = {};
keys.forEach(key => {
hash[key] = 1;
});
selector.group.key = hash;
}
options = Object.assign({}, options);
// Ensure we have the right read preference inheritance
options.readPreference = resolveReadPreference(options, { db: coll.s.db, collection: coll });
// Do we have a readConcern specified
decorateWithReadConcern(selector, coll, options);
// Have we specified collation
try {
decorateWithCollation(selector, coll, options);
} catch (err) {
return callback(err, null);
}
// Execute command
executeCommand(coll.s.db, selector, options, (err, result) => {
if (err) return handleCallback(callback, err, null);
handleCallback(callback, null, result.retval);
});
} else {
// Create execution scope
const scope = reduce != null && reduce._bsontype === 'Code' ? reduce.scope : {};
scope.ns = coll.s.name;
scope.keys = keys;
scope.condition = condition;
scope.initial = initial;
// Pass in the function text to execute within mongodb.
const groupfn = groupFunction.replace(/ reduce;/, reduce.toString() + ';');
evaluate(coll.s.db, new Code(groupfn, scope), null, options, (err, results) => {
if (err) return handleCallback(callback, err, null);
handleCallback(callback, null, results.result || results);
});
}
}
/**
* Retrieve all the indexes on the collection.
*
* @method
* @param {Collection} a Collection instance.
* @param {Object} [options] Optional settings. See Collection.prototype.indexes for a list of options.
* @param {Collection~resultCallback} [callback] The command result callback
*/
function indexes(coll, options, callback) {
options = Object.assign({}, { full: true }, options);
indexInformationDb(coll.s.db, coll.s.name, options, callback);
}
/**
* Check if one or more indexes exist on the collection. This fails on the first index that doesn't exist.
*
* @method
* @param {Collection} a Collection instance.
* @param {(string|array)} indexes One or more index names to check.
* @param {Object} [options] Optional settings. See Collection.prototype.indexExists for a list of options.
* @param {Collection~resultCallback} [callback] The command result callback
*/
function indexExists(coll, indexes, options, callback) {
indexInformation(coll, options, (err, indexInformation) => {
// If we have an error return
if (err != null) return handleCallback(callback, err, null);
// Let's check for the index names
if (!Array.isArray(indexes))
return handleCallback(callback, null, indexInformation[indexes] != null);
// Check in list of indexes
for (let i = 0; i < indexes.length; i++) {
if (indexInformation[indexes[i]] == null) {
return handleCallback(callback, null, false);
}
}
// All keys found return true
return handleCallback(callback, null, true);
});
}
/**
* Retrieve this collection's index info.
*
* @method
* @param {Collection} a Collection instance.
* @param {object} [options] Optional settings. See Collection.prototype.indexInformation for a list of options.
* @param {Collection~resultCallback} [callback] The command result callback
*/
function indexInformation(coll, options, callback) {
indexInformationDb(coll.s.db, coll.s.name, options, callback);
}
function insertDocuments(coll, docs, options, callback) {
if (typeof options === 'function') (callback = options), (options = {});
options = options || {};
// Ensure we are operating on an array op docs
docs = Array.isArray(docs) ? docs : [docs];
// Final options for retryable writes and write concern
let finalOptions = Object.assign({}, options);
finalOptions = applyRetryableWrites(finalOptions, coll.s.db);
finalOptions = applyWriteConcern(finalOptions, { db: coll.s.db, collection: coll }, options);
// If keep going set unordered
if (finalOptions.keepGoing === true) finalOptions.ordered = false;
finalOptions.serializeFunctions = options.serializeFunctions || coll.s.serializeFunctions;
docs = prepareDocs(coll, docs, options);
// File inserts
coll.s.topology.insert(coll.s.namespace, docs, finalOptions, (err, result) => {
if (callback == null) return;
if (err) return handleCallback(callback, err);
if (result == null) return handleCallback(callback, null, null);
if (result.result.code) return handleCallback(callback, toError(result.result));
if (result.result.writeErrors)
return handleCallback(callback, toError(result.result.writeErrors[0]));
// Add docs to the list
result.ops = docs;
// Return the results
handleCallback(callback, null, result);
});
}
/**
* Insert a single document into the collection. See Collection.prototype.insertOne for more information.
*
* @method
* @param {Collection} a Collection instance.
* @param {object} doc Document to insert.
* @param {object} [options] Optional settings. See Collection.prototype.insertOne for a list of options.
* @param {Collection~insertOneWriteOpCallback} [callback] The command result callback
*/
function insertOne(coll, doc, options, callback) {
if (Array.isArray(doc)) {
return callback(
MongoError.create({ message: 'doc parameter must be an object', driver: true })
);
}
insertDocuments(coll, [doc], options, (err, r) => {
if (callback == null) return;
if (err && callback) return callback(err);
// Workaround for pre 2.6 servers
if (r == null) return callback(null, { result: { ok: 1 } });
// Add values to top level to ensure crud spec compatibility
r.insertedCount = r.result.n;
r.insertedId = doc._id;
if (callback) callback(null, r);
});
}
/**
* Determine whether the collection is a capped collection.
*
* @method
* @param {Collection} a Collection instance.
* @param {Object} [options] Optional settings. See Collection.prototype.isCapped for a list of options.
* @param {Collection~resultCallback} [callback] The results callback
*/
function isCapped(coll, options, callback) {
optionsOp(coll, options, (err, document) => {
if (err) return handleCallback(callback, err);
handleCallback(callback, null, !!(document && document.capped));
});
}
/**
* Run Map Reduce across a collection. Be aware that the inline option for out will return an array of results not a collection.
*
* @method
* @param {Collection} a Collection instance.
* @param {(function|string)} map The mapping function.
* @param {(function|string)} reduce The reduce function.
* @param {object} [options] Optional settings. See Collection.prototype.mapReduce for a list of options.
* @param {Collection~resultCallback} [callback] The command result callback
*/
function mapReduce(coll, map, reduce, options, callback) {
const mapCommandHash = {
mapreduce: coll.s.name,
map: map,
reduce: reduce
};
// Exclusion list
const exclusionList = ['readPreference', 'session', 'bypassDocumentValidation'];
// Add any other options passed in
for (let n in options) {
if ('scope' === n) {
mapCommandHash[n] = processScope(options[n]);
} else {
// Only include if not in exclusion list
if (exclusionList.indexOf(n) === -1) {
mapCommandHash[n] = options[n];
}
}
}
options = Object.assign({}, options);
// Ensure we have the right read preference inheritance
options.readPreference = resolveReadPreference(options, { db: coll.s.db, collection: coll });
// If we have a read preference and inline is not set as output fail hard
if (
options.readPreference !== false &&
options.readPreference !== 'primary' &&
options['out'] &&
(options['out'].inline !== 1 && options['out'] !== 'inline')
) {
// Force readPreference to primary
options.readPreference = 'primary';
// Decorate command with writeConcern if supported
applyWriteConcern(mapCommandHash, { db: coll.s.db, collection: coll }, options);
} else {
decorateWithReadConcern(mapCommandHash, coll, options);
}
// Is bypassDocumentValidation specified
if (options.bypassDocumentValidation === true) {
mapCommandHash.bypassDocumentValidation = options.bypassDocumentValidation;
}
// Have we specified collation
try {
decorateWithCollation(mapCommandHash, coll, options);
} catch (err) {
return callback(err, null);
}
// Execute command
executeCommand(coll.s.db, mapCommandHash, options, (err, result) => {
if (err) return handleCallback(callback, err);
// Check if we have an error
if (1 !== result.ok || result.err || result.errmsg) {
return handleCallback(callback, toError(result));
}
// Create statistics value
const stats = {};
if (result.timeMillis) stats['processtime'] = result.timeMillis;
if (result.counts) stats['counts'] = result.counts;
if (result.timing) stats['timing'] = result.timing;
// invoked with inline?
if (result.results) {
// If we wish for no verbosity
if (options['verbose'] == null || !options['verbose']) {
return handleCallback(callback, null, result.results);
}
return handleCallback(callback, null, { results: result.results, stats: stats });
}
// The returned collection
let collection = null;
// If we have an object it's a different db
if (result.result != null && typeof result.result === 'object') {
const doc = result.result;
// Return a collection from another db
const Db = require('../db');
collection = new Db(doc.db, coll.s.db.s.topology, coll.s.db.s.options).collection(
doc.collection
);
} else {
// Create a collection object that wraps the result collection
collection = coll.s.db.collection(result.result);
}
// If we wish for no verbosity
if (options['verbose'] == null || !options['verbose']) {
return handleCallback(callback, err, collection);
}
// Return stats as third set of values
handleCallback(callback, err, { collection: collection, stats: stats });
});
}
/**
* Return the options of the collection.
*
* @method
* @param {Collection} a Collection instance.
* @param {Object} [options] Optional settings. See Collection.prototype.options for a list of options.
* @param {Collection~resultCallback} [callback] The results callback
*/
function optionsOp(coll, opts, callback) {
coll.s.db.listCollections({ name: coll.s.name }, opts).toArray((err, collections) => {
if (err) return handleCallback(callback, err);
if (collections.length === 0) {
return handleCallback(
callback,
MongoError.create({ message: `collection ${coll.s.namespace} not found`, driver: true })
);
}
handleCallback(callback, err, collections[0].options || null);
});
}
/**
* Return N parallel cursors for a collection to allow parallel reading of the entire collection. There are
* no ordering guarantees for returned results.
*
* @method
* @param {Collection} a Collection instance.
* @param {object} [options] Optional settings. See Collection.prototype.parallelCollectionScan for a list of options.
* @param {Collection~parallelCollectionScanCallback} [callback] The command result callback
*/
function parallelCollectionScan(coll, options, callback) {
// Create command object
const commandObject = {
parallelCollectionScan: coll.s.name,
numCursors: options.numCursors
};
// Do we have a readConcern specified
decorateWithReadConcern(commandObject, coll, options);
// Store the raw value
const raw = options.raw;
delete options['raw'];
// Execute the command
executeCommand(coll.s.db, commandObject, options, (err, result) => {
if (err) return handleCallback(callback, err, null);
if (result == null)
return handleCallback(
callback,
new Error('no result returned for parallelCollectionScan'),
null
);
options = Object.assign({ explicitlyIgnoreSession: true }, options);
const cursors = [];
// Add the raw back to the option
if (raw) options.raw = raw;
// Create command cursors for each item
for (let i = 0; i < result.cursors.length; i++) {
const rawId = result.cursors[i].cursor.id;
// Convert cursorId to Long if needed
const cursorId = typeof rawId === 'number' ? Long.fromNumber(rawId) : rawId;
// Add a command cursor
cursors.push(coll.s.topology.cursor(coll.s.namespace, cursorId, options));
}
handleCallback(callback, null, cursors);
});
}
// modifies documents before being inserted or updated
function prepareDocs(coll, docs, options) {
const forceServerObjectId =
typeof options.forceServerObjectId === 'boolean'
? options.forceServerObjectId
: coll.s.db.options.forceServerObjectId;
// no need to modify the docs if server sets the ObjectId
if (forceServerObjectId === true) {
return docs;
}
return docs.map(doc => {
if (forceServerObjectId !== true && doc._id == null) {
doc._id = coll.s.pkFactory.createPk();
}
return doc;
});
}
/**
* Functions that are passed as scope args must
* be converted to Code instances.
* @ignore
*/
function processScope(scope) {
if (!isObject(scope) || scope._bsontype === 'ObjectID') {
return scope;
}
const keys = Object.keys(scope);
let key;
const new_scope = {};
for (let i = keys.length - 1; i >= 0; i--) {
key = keys[i];
if ('function' === typeof scope[key]) {
new_scope[key] = new Code(String(scope[key]));
} else {
new_scope[key] = processScope(scope[key]);
}
}
return new_scope;
}
/**
* Reindex all indexes on the collection.
*
* @method
* @param {Collection} a Collection instance.
* @param {Object} [options] Optional settings. See Collection.prototype.reIndex for a list of options.
* @param {Collection~resultCallback} [callback] The command result callback
*/
function reIndex(coll, options, callback) {
// Reindex
const cmd = { reIndex: coll.s.name };
// Execute the command
executeCommand(coll.s.db, cmd, options, (err, result) => {
if (callback == null) return;
if (err) return handleCallback(callback, err, null);
handleCallback(callback, null, result.ok ? true : false);
});
}
function removeDocuments(coll, selector, options, callback) {
if (typeof options === 'function') {
(callback = options), (options = {});
} else if (typeof selector === 'function') {
callback = selector;
options = {};
selector = {};
}
// Create an empty options object if the provided one is null
options = options || {};
// Final options for retryable writes and write concern
let finalOptions = Object.assign({}, options);
finalOptions = applyRetryableWrites(finalOptions, coll.s.db);
finalOptions = applyWriteConcern(finalOptions, { db: coll.s.db, collection: coll }, options);
// If selector is null set empty
if (selector == null) selector = {};
// Build the op
const op = { q: selector, limit: 0 };
if (options.single) {
op.limit = 1;
} else if (finalOptions.retryWrites) {
finalOptions.retryWrites = false;
}
// Have we specified collation
try {
decorateWithCollation(finalOptions, coll, options);
} catch (err) {
return callback(err, null);
}
// Execute the remove
coll.s.topology.remove(coll.s.namespace, [op], finalOptions, (err, result) => {
if (callback == null) return;
if (err) return handleCallback(callback, err, null);
if (result == null) return handleCallback(callback, null, null);
if (result.result.code) return handleCallback(callback, toError(result.result));
if (result.result.writeErrors)
return handleCallback(callback, toError(result.result.writeErrors[0]));
// Return the results
handleCallback(callback, null, result);
});
}
/**
* Rename the collection.
*
* @method
* @param {Collection} a Collection instance.
* @param {string} newName New name of of the collection.
* @param {object} [options] Optional settings. See Collection.prototype.rename for a list of options.
* @param {Collection~collectionResultCallback} [callback] The results callback
*/
function rename(coll, newName, options, callback) {
const Collection = require('../collection');
// Check the collection name
checkCollectionName(newName);
// Build the command
const renameCollection = `${coll.s.dbName}.${coll.s.name}`;
const toCollection = `${coll.s.dbName}.${newName}`;
const dropTarget = typeof options.dropTarget === 'boolean' ? options.dropTarget : false;
const cmd = { renameCollection: renameCollection, to: toCollection, dropTarget: dropTarget };
// Decorate command with writeConcern if supported
applyWriteConcern(cmd, { db: coll.s.db, collection: coll }, options);
// Execute against admin
executeDbAdminCommand(coll.s.db.admin().s.db, cmd, options, (err, doc) => {
if (err) return handleCallback(callback, err, null);
// We have an error
if (doc.errmsg) return handleCallback(callback, toError(doc), null);
try {
return handleCallback(
callback,
null,
new Collection(
coll.s.db,
coll.s.topology,
coll.s.dbName,
newName,
coll.s.pkFactory,
coll.s.options
)
);
} catch (err) {
return handleCallback(callback, toError(err), null);
}
});
}
/**
* Replace a document in the collection.
*
* @method
* @param {Collection} a Collection instance.
* @param {object} filter The Filter used to select the document to update
* @param {object} doc The Document that replaces the matching document
* @param {object} [options] Optional settings. See Collection.prototype.replaceOne for a list of options.
* @param {Collection~updateWriteOpCallback} [callback] The command result callback
*/
function replaceOne(coll, filter, doc, options, callback) {
// Set single document update
options.multi = false;
// Execute update
updateDocuments(coll, filter, doc, options, (err, r) => {
if (callback == null) return;
if (err && callback) return callback(err);
if (r == null) return callback(null, { result: { ok: 1 } });
r.modifiedCount = r.result.nModified != null ? r.result.nModified : r.result.n;
r.upsertedId =
Array.isArray(r.result.upserted) && r.result.upserted.length > 0
? r.result.upserted[0] // FIXME(major): should be `r.result.upserted[0]._id`
: null;
r.upsertedCount =
Array.isArray(r.result.upserted) && r.result.upserted.length ? r.result.upserted.length : 0;
r.matchedCount =
Array.isArray(r.result.upserted) && r.result.upserted.length > 0 ? 0 : r.result.n;
r.ops = [doc];
if (callback) callback(null, r);
});
}
/**
* Save a document.
*
* @method
* @param {Collection} a Collection instance.
* @param {object} doc Document to save
* @param {object} [options] Optional settings. See Collection.prototype.save for a list of options.
* @param {Collection~writeOpCallback} [callback] The command result callback
* @deprecated use insertOne, insertMany, updateOne or updateMany
*/
function save(coll, doc, options, callback) {
// Get the write concern options
const finalOptions = applyWriteConcern(
Object.assign({}, options),
{ db: coll.s.db, collection: coll },
options
);
// Establish if we need to perform an insert or update
if (doc._id != null) {
finalOptions.upsert = true;
return updateDocuments(coll, { _id: doc._id }, doc, finalOptions, callback);
}
// Insert the document
insertDocuments(coll, [doc], finalOptions, (err, result) => {
if (callback == null) return;
if (doc == null) return handleCallback(callback, null, null);
if (err) return handleCallback(callback, err, null);
handleCallback(callback, null, result);
});
}
/**
* Get all the collection statistics.
*
* @method
* @param {Collection} a Collection instance.
* @param {object} [options] Optional settings. See Collection.prototype.stats for a list of options.
* @param {Collection~resultCallback} [callback] The collection result callback
*/
function stats(coll, options, callback) {
// Build command object
const commandObject = {
collStats: coll.s.name
};
// Check if we have the scale value
if (options['scale'] != null) commandObject['scale'] = options['scale'];
options = Object.assign({}, options);
// Ensure we have the right read preference inheritance
options.readPreference = resolveReadPreference(options, { db: coll.s.db, collection: coll });
// Execute the command
executeCommand(coll.s.db, commandObject, options, callback);
}
function updateCallback(err, r, callback) {
if (callback == null) return;
if (err) return callback(err);
if (r == null) return callback(null, { result: { ok: 1 } });
r.modifiedCount = r.result.nModified != null ? r.result.nModified : r.result.n;
r.upsertedId =
Array.isArray(r.result.upserted) && r.result.upserted.length > 0
? r.result.upserted[0] // FIXME(major): should be `r.result.upserted[0]._id`
: null;
r.upsertedCount =
Array.isArray(r.result.upserted) && r.result.upserted.length ? r.result.upserted.length : 0;
r.matchedCount =
Array.isArray(r.result.upserted) && r.result.upserted.length > 0 ? 0 : r.result.n;
callback(null, r);
}
function updateDocuments(coll, selector, document, options, callback) {
if ('function' === typeof options) (callback = options), (options = null);
if (options == null) options = {};
if (!('function' === typeof callback)) callback = null;
// If we are not providing a selector or document throw
if (selector == null || typeof selector !== 'object')
return callback(toError('selector must be a valid JavaScript object'));
if (document == null || typeof document !== 'object')
return callback(toError('document must be a valid JavaScript object'));
// Final options for retryable writes and write concern
let finalOptions = Object.assign({}, options);
finalOptions = applyRetryableWrites(finalOptions, coll.s.db);
finalOptions = applyWriteConcern(finalOptions, { db: coll.s.db, collection: coll }, options);
// Do we return the actual result document
// Either use override on the function, or go back to default on either the collection
// level or db
finalOptions.serializeFunctions = options.serializeFunctions || coll.s.serializeFunctions;
// Execute the operation
const op = { q: selector, u: document };
op.upsert = options.upsert !== void 0 ? !!options.upsert : false;
op.multi = options.multi !== void 0 ? !!options.multi : false;
if (finalOptions.arrayFilters) {
op.arrayFilters = finalOptions.arrayFilters;
delete finalOptions.arrayFilters;
}
if (finalOptions.retryWrites && op.multi) {
finalOptions.retryWrites = false;
}
// Have we specified collation
try {
decorateWithCollation(finalOptions, coll, options);
} catch (err) {
return callback(err, null);
}
// Update options
coll.s.topology.update(coll.s.namespace, [op], finalOptions, (err, result) => {
if (callback == null) return;
if (err) return handleCallback(callback, err, null);
if (result == null) return handleCallback(callback, null, null);
if (result.result.code) return handleCallback(callback, toError(result.result));
if (result.result.writeErrors)
return handleCallback(callback, toError(result.result.writeErrors[0]));
// Return the results
handleCallback(callback, null, result);
});
}
/**
* Update multiple documents in the collection.
*
* @method
* @param {Collection} a Collection instance.
* @param {object} filter The Filter used to select the documents to update
* @param {object} update The update operations to be applied to the document
* @param {object} [options] Optional settings. See Collection.prototype.updateMany for a list of options.
* @param {Collection~updateWriteOpCallback} [callback] The command result callback
*/
function updateMany(coll, filter, update, options, callback) {
// Set single document update
options.multi = true;
// Execute update
updateDocuments(coll, filter, update, options, (err, r) => updateCallback(err, r, callback));
}
/**
* Update a single document in the collection.
*
* @method
* @param {Collection} a Collection instance.
* @param {object} filter The Filter used to select the document to update
* @param {object} update The update operations to be applied to the document
* @param {object} [options] Optional settings. See Collection.prototype.updateOne for a list of options.
* @param {Collection~updateWriteOpCallback} [callback] The command result callback
*/
function updateOne(coll, filter, update, options, callback) {
// Set single document update
options.multi = false;
// Execute update
updateDocuments(coll, filter, update, options, (err, r) => updateCallback(err, r, callback));
}
module.exports = {
bulkWrite,
checkForAtomicOperators,
count,
countDocuments,
buildCountCommand,
createIndex,
createIndexes,
deleteMany,
deleteOne,
distinct,
dropIndex,
dropIndexes,
ensureIndex,
findAndModify,
findAndRemove,
findOne,
findOneAndDelete,
findOneAndReplace,
findOneAndUpdate,
geoHaystackSearch,
group,
indexes,
indexExists,
indexInformation,
insertOne,
isCapped,
mapReduce,
optionsOp,
parallelCollectionScan,
prepareDocs,
reIndex,
removeDocuments,
rename,
replaceOne,
save,
stats,
updateDocuments,
updateMany,
updateOne
};
| 1 | 15,111 | nit: maybe call these `loadCollection()/loadDb()` | mongodb-node-mongodb-native | js |
@@ -562,6 +562,19 @@ class Task(object):
Default behavior is to send an None value"""
pass
+ def on_external_failure(self):
+ """
+ Override for custom error handling for failed external tasks.
+
+ This method gets called if a task is an :py:class:`luigi.task.ExternalTask`, or if a
+ :py:class:`luigi.task.Task`'s :py:meth:`run` method is not implemented.
+
+ The returned value is json encoded and sent to the scheduler as the `expl` argument.
+
+ Default behavior is to send a None value
+ """
+ pass
+
@contextmanager
def no_unpicklable_properties(self):
""" | 1 | # -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
The abstract :py:class:`Task` class.
It is a central concept of Luigi and represents the state of the workflow.
See :doc:`/tasks` for an overview.
"""
try:
from itertools import imap as map
except ImportError:
pass
from contextlib import contextmanager
import logging
import traceback
import warnings
import json
import hashlib
import re
from luigi import six
from luigi import parameter
from luigi.task_register import Register
Parameter = parameter.Parameter
logger = logging.getLogger('luigi-interface')
TASK_ID_INCLUDE_PARAMS = 3
TASK_ID_TRUNCATE_PARAMS = 16
TASK_ID_TRUNCATE_HASH = 10
TASK_ID_INVALID_CHAR_REGEX = re.compile(r'[^A-Za-z0-9_]')
def namespace(namespace=None):
"""
Call to set namespace of tasks declared after the call.
If called without arguments or with ``None`` as the namespace, the namespace
is reset, which is recommended to do at the end of any file where the
namespace is set to avoid unintentionally setting namespace on tasks outside
of the scope of the current file.
The namespace of a Task can also be changed by specifying the property
``task_namespace``. This solution has the advantage that the namespace
doesn't have to be restored.
.. code-block:: python
class Task2(luigi.Task):
task_namespace = 'namespace2'
"""
Register._default_namespace = namespace
def task_id_str(task_family, params):
"""
Returns a canonical string used to identify a particular task
:param task_family: The task family (class name) of the task
:param params: a dict mapping parameter names to their serialized values
:return: A unique, shortened identifier corresponding to the family and params
"""
# task_id is a concatenation of task family, the first values of the first 3 parameters
# sorted by parameter name and a md5hash of the family/parameters as a cananocalised json.
param_str = json.dumps(params, separators=(',', ':'), sort_keys=True)
param_hash = hashlib.md5(param_str.encode('utf-8')).hexdigest()
param_summary = '_'.join(p[:TASK_ID_TRUNCATE_PARAMS]
for p in (params[p] for p in sorted(params)[:TASK_ID_INCLUDE_PARAMS]))
param_summary = TASK_ID_INVALID_CHAR_REGEX.sub('_', param_summary)
return '{}_{}_{}'.format(task_family, param_summary, param_hash[:TASK_ID_TRUNCATE_HASH])
class BulkCompleteNotImplementedError(NotImplementedError):
"""This is here to trick pylint.
pylint thinks anything raising NotImplementedError needs to be implemented
in any subclass. bulk_complete isn't like that. This tricks pylint into
thinking that the default implementation is a valid implementation and no
an abstract method."""
pass
@six.add_metaclass(Register)
class Task(object):
"""
This is the base class of all Luigi Tasks, the base unit of work in Luigi.
A Luigi Task describes a unit or work.
The key methods of a Task, which must be implemented in a subclass are:
* :py:meth:`run` - the computation done by this task.
* :py:meth:`requires` - the list of Tasks that this Task depends on.
* :py:meth:`output` - the output :py:class:`Target` that this Task creates.
Each :py:class:`~luigi.Parameter` of the Task should be declared as members:
.. code:: python
class MyTask(luigi.Task):
count = luigi.IntParameter()
second_param = luigi.Parameter()
In addition to any declared properties and methods, there are a few
non-declared properties, which are created by the :py:class:`Register`
metaclass:
``Task.task_namespace``
optional string which is prepended to the task name for the sake of
scheduling. If it isn't overridden in a Task, whatever was last declared
using `luigi.namespace` will be used.
"""
_event_callbacks = {}
#: Priority of the task: the scheduler should favor available
#: tasks with higher priority values first.
#: See :ref:`Task.priority`
priority = 0
disabled = False
#: Resources used by the task. Should be formatted like {"scp": 1} to indicate that the
#: task requires 1 unit of the scp resource.
resources = {}
#: Number of seconds after which to time out the run function.
#: No timeout if set to 0.
#: Defaults to 0 or worker-timeout value in config file
#: Only works when using multiple workers.
worker_timeout = None
#: Maximum number of tasks to run together as a batch. Infinite by default
max_batch_size = float('inf')
@property
def batchable(self):
"""
True if this instance can be run as part of a batch. By default, True
if it has any batched parameters
"""
return bool(self.batch_param_names())
@property
def retry_count(self):
"""
Override this positive integer to have different ``retry_count`` at task level
Check :ref:`scheduler-config`
"""
return None
@property
def disable_hard_timeout(self):
"""
Override this positive integer to have different ``disable_hard_timeout`` at task level.
Check :ref:`scheduler-config`
"""
return None
@property
def disable_window_seconds(self):
"""
Override this positive integer to have different ``disable_window_seconds`` at task level.
Check :ref:`scheduler-config`
"""
return None
@property
def owner_email(self):
'''
Override this to send out additional error emails to task owner, in addition to the one
defined in the global configuration. This should return a string or a list of strings. e.g.
'[email protected]' or ['[email protected]', '[email protected]']
'''
return None
def _owner_list(self):
"""
Turns the owner_email property into a list. This should not be overridden.
"""
owner_email = self.owner_email
if owner_email is None:
return []
elif isinstance(owner_email, six.string_types):
return owner_email.split(',')
else:
return owner_email
@property
def use_cmdline_section(self):
''' Property used by core config such as `--workers` etc.
These will be exposed without the class as prefix.'''
return True
@classmethod
def event_handler(cls, event):
"""
Decorator for adding event handlers.
"""
def wrapped(callback):
cls._event_callbacks.setdefault(cls, {}).setdefault(event, set()).add(callback)
return callback
return wrapped
def trigger_event(self, event, *args, **kwargs):
"""
Trigger that calls all of the specified events associated with this class.
"""
for event_class, event_callbacks in six.iteritems(self._event_callbacks):
if not isinstance(self, event_class):
continue
for callback in event_callbacks.get(event, []):
try:
# callbacks are protected
callback(*args, **kwargs)
except KeyboardInterrupt:
return
except BaseException:
logger.exception("Error in event callback for %r", event)
@property
def task_module(self):
''' Returns what Python module to import to get access to this class. '''
# TODO(erikbern): we should think about a language-agnostic mechanism
return self.__class__.__module__
@property
def task_family(self):
"""
Convenience method since a property on the metaclass isn't directly accessible through the class instances.
"""
return self.__class__.task_family
@classmethod
def get_params(cls):
"""
Returns all of the Parameters for this Task.
"""
# We want to do this here and not at class instantiation, or else there is no room to extend classes dynamically
params = []
for param_name in dir(cls):
param_obj = getattr(cls, param_name)
if not isinstance(param_obj, Parameter):
continue
params.append((param_name, param_obj))
# The order the parameters are created matters. See Parameter class
params.sort(key=lambda t: t[1]._counter)
return params
@classmethod
def batch_param_names(cls):
return [name for name, p in cls.get_params() if p._is_batchable()]
@classmethod
def get_param_names(cls, include_significant=False):
return [name for name, p in cls.get_params() if include_significant or p.significant]
@classmethod
def get_param_values(cls, params, args, kwargs):
"""
Get the values of the parameters from the args and kwargs.
:param params: list of (param_name, Parameter).
:param args: positional arguments
:param kwargs: keyword arguments.
:returns: list of `(name, value)` tuples, one for each parameter.
"""
result = {}
params_dict = dict(params)
task_name = cls.task_family
# In case any exceptions are thrown, create a helpful description of how the Task was invoked
# TODO: should we detect non-reprable arguments? These will lead to mysterious errors
exc_desc = '%s[args=%s, kwargs=%s]' % (task_name, args, kwargs)
# Fill in the positional arguments
positional_params = [(n, p) for n, p in params if p.positional]
for i, arg in enumerate(args):
if i >= len(positional_params):
raise parameter.UnknownParameterException('%s: takes at most %d parameters (%d given)' % (exc_desc, len(positional_params), len(args)))
param_name, param_obj = positional_params[i]
result[param_name] = param_obj.normalize(arg)
# Then the keyword arguments
for param_name, arg in six.iteritems(kwargs):
if param_name in result:
raise parameter.DuplicateParameterException('%s: parameter %s was already set as a positional parameter' % (exc_desc, param_name))
if param_name not in params_dict:
raise parameter.UnknownParameterException('%s: unknown parameter %s' % (exc_desc, param_name))
result[param_name] = params_dict[param_name].normalize(arg)
# Then use the defaults for anything not filled in
for param_name, param_obj in params:
if param_name not in result:
if not param_obj.has_task_value(task_name, param_name):
raise parameter.MissingParameterException("%s: requires the '%s' parameter to be set" % (exc_desc, param_name))
result[param_name] = param_obj.task_value(task_name, param_name)
def list_to_tuple(x):
""" Make tuples out of lists and sets to allow hashing """
if isinstance(x, list) or isinstance(x, set):
return tuple(x)
else:
return x
# Sort it by the correct order and make a list
return [(param_name, list_to_tuple(result[param_name])) for param_name, param_obj in params]
def __init__(self, *args, **kwargs):
params = self.get_params()
param_values = self.get_param_values(params, args, kwargs)
# Set all values on class instance
for key, value in param_values:
setattr(self, key, value)
# Register args and kwargs as an attribute on the class. Might be useful
self.param_args = tuple(value for key, value in param_values)
self.param_kwargs = dict(param_values)
self.task_id = task_id_str(self.task_family, self.to_str_params(only_significant=True))
self.__hash = hash(self.task_id)
self.set_tracking_url = None
self.set_status_message = None
def initialized(self):
"""
Returns ``True`` if the Task is initialized and ``False`` otherwise.
"""
return hasattr(self, 'task_id')
@classmethod
def from_str_params(cls, params_str):
"""
Creates an instance from a str->str hash.
:param params_str: dict of param name -> value as string.
"""
kwargs = {}
for param_name, param in cls.get_params():
if param_name in params_str:
param_str = params_str[param_name]
if isinstance(param_str, list):
kwargs[param_name] = param._parse_list(param_str)
else:
kwargs[param_name] = param.parse(param_str)
return cls(**kwargs)
def to_str_params(self, only_significant=False):
"""
Convert all parameters to a str->str hash.
"""
params_str = {}
params = dict(self.get_params())
for param_name, param_value in six.iteritems(self.param_kwargs):
if (not only_significant) or params[param_name].significant:
params_str[param_name] = params[param_name].serialize(param_value)
return params_str
def clone(self, cls=None, **kwargs):
"""
Creates a new instance from an existing instance where some of the args have changed.
There's at least two scenarios where this is useful (see test/clone_test.py):
* remove a lot of boiler plate when you have recursive dependencies and lots of args
* there's task inheritance and some logic is on the base class
:param cls:
:param kwargs:
:return:
"""
if cls is None:
cls = self.__class__
new_k = {}
for param_name, param_class in cls.get_params():
if param_name in kwargs:
new_k[param_name] = kwargs[param_name]
elif hasattr(self, param_name):
new_k[param_name] = getattr(self, param_name)
return cls(**new_k)
def __hash__(self):
return self.__hash
def __repr__(self):
"""
Build a task representation like `MyTask(param1=1.5, param2='5')`
"""
params = self.get_params()
param_values = self.get_param_values(params, [], self.param_kwargs)
# Build up task id
repr_parts = []
param_objs = dict(params)
for param_name, param_value in param_values:
if param_objs[param_name].significant:
repr_parts.append('%s=%s' % (param_name, param_objs[param_name].serialize(param_value)))
task_str = '{}({})'.format(self.task_family, ', '.join(repr_parts))
return task_str
def __eq__(self, other):
return self.__class__ == other.__class__ and self.param_args == other.param_args
def complete(self):
"""
If the task has any outputs, return ``True`` if all outputs exist.
Otherwise, return ``False``.
However, you may freely override this method with custom logic.
"""
outputs = flatten(self.output())
if len(outputs) == 0:
warnings.warn(
"Task %r without outputs has no custom complete() method" % self,
stacklevel=2
)
return False
return all(map(lambda output: output.exists(), outputs))
@classmethod
def bulk_complete(cls, parameter_tuples):
"""
Returns those of parameter_tuples for which this Task is complete.
Override (with an efficient implementation) for efficient scheduling
with range tools. Keep the logic consistent with that of complete().
"""
raise BulkCompleteNotImplementedError()
def output(self):
"""
The output that this Task produces.
The output of the Task determines if the Task needs to be run--the task
is considered finished iff the outputs all exist. Subclasses should
override this method to return a single :py:class:`Target` or a list of
:py:class:`Target` instances.
Implementation note
If running multiple workers, the output must be a resource that is accessible
by all workers, such as a DFS or database. Otherwise, workers might compute
the same output since they don't see the work done by other workers.
See :ref:`Task.output`
"""
return [] # default impl
def requires(self):
"""
The Tasks that this Task depends on.
A Task will only run if all of the Tasks that it requires are completed.
If your Task does not require any other Tasks, then you don't need to
override this method. Otherwise, a Subclasses can override this method
to return a single Task, a list of Task instances, or a dict whose
values are Task instances.
See :ref:`Task.requires`
"""
return [] # default impl
def _requires(self):
"""
Override in "template" tasks which themselves are supposed to be
subclassed and thus have their requires() overridden (name preserved to
provide consistent end-user experience), yet need to introduce
(non-input) dependencies.
Must return an iterable which among others contains the _requires() of
the superclass.
"""
return flatten(self.requires()) # base impl
def process_resources(self):
"""
Override in "template" tasks which provide common resource functionality
but allow subclasses to specify additional resources while preserving
the name for consistent end-user experience.
"""
return self.resources # default impl
def input(self):
"""
Returns the outputs of the Tasks returned by :py:meth:`requires`
See :ref:`Task.input`
:return: a list of :py:class:`Target` objects which are specified as
outputs of all required Tasks.
"""
return getpaths(self.requires())
def deps(self):
"""
Internal method used by the scheduler.
Returns the flattened list of requires.
"""
# used by scheduler
return flatten(self._requires())
def run(self):
"""
The task run method, to be overridden in a subclass.
See :ref:`Task.run`
"""
pass # default impl
def on_failure(self, exception):
"""
Override for custom error handling.
This method gets called if an exception is raised in :py:meth:`run`.
The returned value of this method is json encoded and sent to the scheduler
as the `expl` argument. Its string representation will be used as the
body of the error email sent out if any.
Default behavior is to return a string representation of the stack trace.
"""
traceback_string = traceback.format_exc()
return "Runtime error:\n%s" % traceback_string
def on_success(self):
"""
Override for doing custom completion handling for a larger class of tasks
This method gets called when :py:meth:`run` completes without raising any exceptions.
The returned value is json encoded and sent to the scheduler as the `expl` argument.
Default behavior is to send an None value"""
pass
@contextmanager
def no_unpicklable_properties(self):
"""
Remove unpicklable properties before dump task and resume them after.
This method could be called in subtask's dump method, to ensure unpicklable
properties won't break dump.
This method is a context-manager which can be called as below:
.. code-block: python
class DummyTask(luigi):
def _dump(self):
with self.no_unpicklable_properties():
pickle.dumps(self)
"""
unpicklable_properties = ('set_tracking_url', 'set_status_message')
reserved_properties = {}
for property_name in unpicklable_properties:
if hasattr(self, property_name):
reserved_properties[property_name] = getattr(self, property_name)
setattr(self, property_name, 'placeholder_during_pickling')
yield
for property_name, value in six.iteritems(reserved_properties):
setattr(self, property_name, value)
class MixinNaiveBulkComplete(object):
"""
Enables a Task to be efficiently scheduled with e.g. range tools, by providing a bulk_complete implementation which checks completeness in a loop.
Applicable to tasks whose completeness checking is cheap.
This doesn't exploit output location specific APIs for speed advantage, nevertheless removes redundant scheduler roundtrips.
"""
@classmethod
def bulk_complete(cls, parameter_tuples):
generated_tuples = []
for parameter_tuple in parameter_tuples:
if isinstance(parameter_tuple, (list, tuple)):
if cls(*parameter_tuple).complete():
generated_tuples.append(parameter_tuple)
elif isinstance(parameter_tuple, dict):
if cls(**parameter_tuple).complete():
generated_tuples.append(parameter_tuple)
else:
if cls(parameter_tuple).complete():
generated_tuples.append(parameter_tuple)
return generated_tuples
def externalize(task):
"""
Returns an externalized version of the Task.
See :py:class:`ExternalTask`.
"""
task.run = None
return task
class ExternalTask(Task):
"""
Subclass for references to external dependencies.
An ExternalTask's does not have a `run` implementation, which signifies to
the framework that this Task's :py:meth:`output` is generated outside of
Luigi.
"""
run = None
class WrapperTask(Task):
"""
Use for tasks that only wrap other tasks and that by definition are done if all their requirements exist.
"""
def complete(self):
return all(r.complete() for r in flatten(self.requires()))
class Config(Task):
"""
Class for configuration. See :ref:`ConfigClasses`.
"""
# TODO: let's refactor Task & Config so that it inherits from a common
# ParamContainer base class
pass
def getpaths(struct):
"""
Maps all Tasks in a structured data object to their .output().
"""
if isinstance(struct, Task):
return struct.output()
elif isinstance(struct, dict):
r = {}
for k, v in six.iteritems(struct):
r[k] = getpaths(v)
return r
else:
# Remaining case: assume r is iterable...
try:
s = list(struct)
except TypeError:
raise Exception('Cannot map %s to Task/dict/list' % str(struct))
return [getpaths(r) for r in s]
def flatten(struct):
"""
Creates a flat list of all all items in structured output (dicts, lists, items):
.. code-block:: python
>>> sorted(flatten({'a': 'foo', 'b': 'bar'}))
['bar', 'foo']
>>> sorted(flatten(['foo', ['bar', 'troll']]))
['bar', 'foo', 'troll']
>>> flatten('foo')
['foo']
>>> flatten(42)
[42]
"""
if struct is None:
return []
flat = []
if isinstance(struct, dict):
for _, result in six.iteritems(struct):
flat += flatten(result)
return flat
if isinstance(struct, six.string_types):
return [struct]
try:
# if iterable
iterator = iter(struct)
except TypeError:
return [struct]
for result in iterator:
flat += flatten(result)
return flat
def flatten_output(task):
"""
Lists all output targets by recursively walking output-less (wrapper) tasks.
FIXME order consistently.
"""
r = flatten(task.output())
if not r:
for dep in flatten(task.requires()):
r += flatten_output(dep)
return r
| 1 | 16,673 | Did you consider just do `return Task is an external data dependency and data does not exist (yet?).'`? Less `None`'s and `ifs`. | spotify-luigi | py |
@@ -2,13 +2,10 @@
<li class="cite">
<%= link_to t('blacklight.tools.cite'), citation_catalog_path(:sort=>params[:sort], :per_page=>params[:per_page], :id => @bookmarks.collect{|doc| doc.document_id}), {:id => 'citeLink', :name => 'citation', :class => 'btn btn-default', :data => {:ajax_modal => "trigger"}} %>
</li>
- <li class="refworks">
- <%= render :partial => 'catalog/refworks_form', :locals => {:documents=>@document_list} %>
- </li>
-
- <li class="endnote">
- <%= link_to t('blacklight.tools.endnote'), endnote_catalog_path(:sort=>params[:sort], :per_page=>params[:per_page], :id => @bookmarks.collect {|doc| doc.document_id}, :format => 'endnote'), {:class => 'btn btn-default'}%>
- </li>
+ <% if defined? BlacklightMarc %>
+ <%= render partial: 'bookmarks/refworks' %>
+ <%= render partial: 'bookmarks/endnote' %>
+ <% end %>
<li class="email">
<%= link_to t('blacklight.tools.email'), email_catalog_path(:sort=>params[:sort], :per_page=>params[:per_page], :id => @bookmarks.collect {|doc| doc.document_id}), :class=>"btn btn-default", :id => "emailLink", :data => {:ajax_modal => "trigger"} %>
</li> | 1 | <ul class="bookmarkTools nav nav-pills">
<li class="cite">
<%= link_to t('blacklight.tools.cite'), citation_catalog_path(:sort=>params[:sort], :per_page=>params[:per_page], :id => @bookmarks.collect{|doc| doc.document_id}), {:id => 'citeLink', :name => 'citation', :class => 'btn btn-default', :data => {:ajax_modal => "trigger"}} %>
</li>
<li class="refworks">
<%= render :partial => 'catalog/refworks_form', :locals => {:documents=>@document_list} %>
</li>
<li class="endnote">
<%= link_to t('blacklight.tools.endnote'), endnote_catalog_path(:sort=>params[:sort], :per_page=>params[:per_page], :id => @bookmarks.collect {|doc| doc.document_id}, :format => 'endnote'), {:class => 'btn btn-default'}%>
</li>
<li class="email">
<%= link_to t('blacklight.tools.email'), email_catalog_path(:sort=>params[:sort], :per_page=>params[:per_page], :id => @bookmarks.collect {|doc| doc.document_id}), :class=>"btn btn-default", :id => "emailLink", :data => {:ajax_modal => "trigger"} %>
</li>
</ul>
| 1 | 4,953 | Some day, we really need to make these toolbars pluggable.. | projectblacklight-blacklight | rb |
@@ -100,7 +100,11 @@ class WebEngineAction(browsertab.AbstractAction):
"""Save the current page."""
self._widget.triggerPageAction(QWebEnginePage.SavePage)
- def show_source(self):
+ def show_source(self, pygment):
+ if pygment:
+ self._show_source_pygment()
+ return
+
try:
self._widget.triggerPageAction(QWebEnginePage.ViewSource)
except AttributeError: | 1 | # vim: ft=python fileencoding=utf-8 sts=4 sw=4 et:
# Copyright 2016-2018 Florian Bruhin (The Compiler) <[email protected]>
#
# This file is part of qutebrowser.
#
# qutebrowser is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# qutebrowser is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with qutebrowser. If not, see <http://www.gnu.org/licenses/>.
"""Wrapper over a QWebEngineView."""
import math
import functools
import sys
import re
import html as html_utils
import sip
from PyQt5.QtCore import (pyqtSignal, pyqtSlot, Qt, QEvent, QPoint, QPointF,
QUrl, QTimer)
from PyQt5.QtGui import QKeyEvent
from PyQt5.QtNetwork import QAuthenticator
from PyQt5.QtWidgets import QApplication
from PyQt5.QtWebEngineWidgets import QWebEnginePage, QWebEngineScript
from qutebrowser.browser import browsertab, mouse, shared
from qutebrowser.browser.webengine import (webview, webengineelem, tabhistory,
interceptor, webenginequtescheme,
webenginedownloads,
webenginesettings)
from qutebrowser.misc import miscwidgets
from qutebrowser.utils import (usertypes, qtutils, log, javascript, utils,
message, objreg, jinja, debug)
_qute_scheme_handler = None
def init():
"""Initialize QtWebEngine-specific modules."""
# For some reason we need to keep a reference, otherwise the scheme handler
# won't work...
# https://www.riverbankcomputing.com/pipermail/pyqt/2016-September/038075.html
global _qute_scheme_handler
app = QApplication.instance()
log.init.debug("Initializing qute://* handler...")
_qute_scheme_handler = webenginequtescheme.QuteSchemeHandler(parent=app)
_qute_scheme_handler.install(webenginesettings.default_profile)
_qute_scheme_handler.install(webenginesettings.private_profile)
log.init.debug("Initializing request interceptor...")
host_blocker = objreg.get('host-blocker')
req_interceptor = interceptor.RequestInterceptor(
host_blocker, parent=app)
req_interceptor.install(webenginesettings.default_profile)
req_interceptor.install(webenginesettings.private_profile)
log.init.debug("Initializing QtWebEngine downloads...")
download_manager = webenginedownloads.DownloadManager(parent=app)
download_manager.install(webenginesettings.default_profile)
download_manager.install(webenginesettings.private_profile)
objreg.register('webengine-download-manager', download_manager)
greasemonkey = objreg.get('greasemonkey')
greasemonkey.scripts_reloaded.connect(webenginesettings.inject_userscripts)
webenginesettings.inject_userscripts()
# Mapping worlds from usertypes.JsWorld to QWebEngineScript world IDs.
_JS_WORLD_MAP = {
usertypes.JsWorld.main: QWebEngineScript.MainWorld,
usertypes.JsWorld.application: QWebEngineScript.ApplicationWorld,
usertypes.JsWorld.user: QWebEngineScript.UserWorld,
usertypes.JsWorld.jseval: QWebEngineScript.UserWorld + 1,
}
class WebEngineAction(browsertab.AbstractAction):
"""QtWebEngine implementations related to web actions."""
action_class = QWebEnginePage
action_base = QWebEnginePage.WebAction
def exit_fullscreen(self):
self._widget.triggerPageAction(QWebEnginePage.ExitFullScreen)
def save_page(self):
"""Save the current page."""
self._widget.triggerPageAction(QWebEnginePage.SavePage)
def show_source(self):
try:
self._widget.triggerPageAction(QWebEnginePage.ViewSource)
except AttributeError:
# Qt < 5.8
tb = objreg.get('tabbed-browser', scope='window',
window=self._tab.win_id)
urlstr = self._tab.url().toString(QUrl.RemoveUserInfo)
# The original URL becomes the path of a view-source: URL
# (without a host), but query/fragment should stay.
url = QUrl('view-source:' + urlstr)
tb.tabopen(url, background=False, related=True)
class WebEnginePrinting(browsertab.AbstractPrinting):
"""QtWebEngine implementations related to printing."""
def check_pdf_support(self):
return True
def check_printer_support(self):
if not hasattr(self._widget.page(), 'print'):
raise browsertab.WebTabError(
"Printing is unsupported with QtWebEngine on Qt < 5.8")
def check_preview_support(self):
raise browsertab.WebTabError(
"Print previews are unsupported with QtWebEngine")
def to_pdf(self, filename):
self._widget.page().printToPdf(filename)
def to_printer(self, printer, callback=None):
if callback is None:
callback = lambda _ok: None
self._widget.page().print(printer, callback)
class WebEngineSearch(browsertab.AbstractSearch):
"""QtWebEngine implementations related to searching on the page.
Attributes:
_flags: The QWebEnginePage.FindFlags of the last search.
_pending_searches: How many searches have been started but not called
back yet.
"""
def __init__(self, parent=None):
super().__init__(parent)
self._flags = QWebEnginePage.FindFlags(0)
self._pending_searches = 0
def _find(self, text, flags, callback, caller):
"""Call findText on the widget."""
self.search_displayed = True
self._pending_searches += 1
def wrapped_callback(found):
"""Wrap the callback to do debug logging."""
self._pending_searches -= 1
if self._pending_searches > 0:
# See https://github.com/qutebrowser/qutebrowser/issues/2442
# and https://github.com/qt/qtwebengine/blob/5.10/src/core/web_contents_adapter.cpp#L924-L934
log.webview.debug("Ignoring cancelled search callback with "
"{} pending searches".format(
self._pending_searches))
return
found_text = 'found' if found else "didn't find"
if flags:
flag_text = 'with flags {}'.format(debug.qflags_key(
QWebEnginePage, flags, klass=QWebEnginePage.FindFlag))
else:
flag_text = ''
log.webview.debug(' '.join([caller, found_text, text, flag_text])
.strip())
if callback is not None:
callback(found)
self._widget.findText(text, flags, wrapped_callback)
def search(self, text, *, ignore_case='never', reverse=False,
result_cb=None):
# Don't go to next entry on duplicate search
if self.text == text and self.search_displayed:
log.webview.debug("Ignoring duplicate search request"
" for {}".format(text))
return
self.text = text
self._flags = QWebEnginePage.FindFlags(0)
if self._is_case_sensitive(ignore_case):
self._flags |= QWebEnginePage.FindCaseSensitively
if reverse:
self._flags |= QWebEnginePage.FindBackward
self._find(text, self._flags, result_cb, 'search')
def clear(self):
self.search_displayed = False
self._widget.findText('')
def prev_result(self, *, result_cb=None):
# The int() here makes sure we get a copy of the flags.
flags = QWebEnginePage.FindFlags(int(self._flags))
if flags & QWebEnginePage.FindBackward:
flags &= ~QWebEnginePage.FindBackward
else:
flags |= QWebEnginePage.FindBackward
self._find(self.text, flags, result_cb, 'prev_result')
def next_result(self, *, result_cb=None):
self._find(self.text, self._flags, result_cb, 'next_result')
class WebEngineCaret(browsertab.AbstractCaret):
"""QtWebEngine implementations related to moving the cursor/selection."""
@pyqtSlot(usertypes.KeyMode)
def _on_mode_entered(self, mode):
if mode != usertypes.KeyMode.caret:
return
if self._tab.search.search_displayed:
# We are currently in search mode.
# convert the search to a blue selection so we can operate on it
# https://bugreports.qt.io/browse/QTBUG-60673
self._tab.search.clear()
self._tab.run_js_async(
javascript.assemble('caret', 'setPlatform', sys.platform))
self._js_call('setInitialCursor')
@pyqtSlot(usertypes.KeyMode)
def _on_mode_left(self, mode):
if mode != usertypes.KeyMode.caret:
return
self.drop_selection()
self._js_call('disableCaret')
def move_to_next_line(self, count=1):
for _ in range(count):
self._js_call('moveDown')
def move_to_prev_line(self, count=1):
for _ in range(count):
self._js_call('moveUp')
def move_to_next_char(self, count=1):
for _ in range(count):
self._js_call('moveRight')
def move_to_prev_char(self, count=1):
for _ in range(count):
self._js_call('moveLeft')
def move_to_end_of_word(self, count=1):
for _ in range(count):
self._js_call('moveToEndOfWord')
def move_to_next_word(self, count=1):
for _ in range(count):
self._js_call('moveToNextWord')
def move_to_prev_word(self, count=1):
for _ in range(count):
self._js_call('moveToPreviousWord')
def move_to_start_of_line(self):
self._js_call('moveToStartOfLine')
def move_to_end_of_line(self):
self._js_call('moveToEndOfLine')
def move_to_start_of_next_block(self, count=1):
for _ in range(count):
self._js_call('moveToStartOfNextBlock')
def move_to_start_of_prev_block(self, count=1):
for _ in range(count):
self._js_call('moveToStartOfPrevBlock')
def move_to_end_of_next_block(self, count=1):
for _ in range(count):
self._js_call('moveToEndOfNextBlock')
def move_to_end_of_prev_block(self, count=1):
for _ in range(count):
self._js_call('moveToEndOfPrevBlock')
def move_to_start_of_document(self):
self._js_call('moveToStartOfDocument')
def move_to_end_of_document(self):
self._js_call('moveToEndOfDocument')
def toggle_selection(self):
self._js_call('toggleSelection')
def drop_selection(self):
self._js_call('dropSelection')
def selection(self, callback):
# Not using selectedText() as WORKAROUND for
# https://bugreports.qt.io/browse/QTBUG-53134
# Even on Qt 5.10 selectedText() seems to work poorly, see
# https://github.com/qutebrowser/qutebrowser/issues/3523
self._tab.run_js_async(javascript.assemble('caret', 'getSelection'),
callback)
def _follow_selected_cb(self, js_elem, tab=False):
"""Callback for javascript which clicks the selected element.
Args:
js_elem: The element serialized from javascript.
tab: Open in a new tab.
"""
if js_elem is None:
return
assert isinstance(js_elem, dict), js_elem
elem = webengineelem.WebEngineElement(js_elem, tab=self._tab)
if tab:
click_type = usertypes.ClickTarget.tab
else:
click_type = usertypes.ClickTarget.normal
# Only click if we see a link
if elem.is_link():
log.webview.debug("Found link in selection, clicking. ClickTarget "
"{}, elem {}".format(click_type, elem))
elem.click(click_type)
def follow_selected(self, *, tab=False):
if self._tab.search.search_displayed:
# We are currently in search mode.
# let's click the link via a fake-click
# https://bugreports.qt.io/browse/QTBUG-60673
self._tab.search.clear()
log.webview.debug("Clicking a searched link via fake key press.")
# send a fake enter, clicking the orange selection box
if tab:
self._tab.key_press(Qt.Key_Enter, modifier=Qt.ControlModifier)
else:
self._tab.key_press(Qt.Key_Enter)
else:
# click an existing blue selection
js_code = javascript.assemble('webelem', 'find_selected_link')
self._tab.run_js_async(js_code, lambda jsret:
self._follow_selected_cb(jsret, tab))
def _js_call(self, command):
self._tab.run_js_async(
javascript.assemble('caret', command))
class WebEngineScroller(browsertab.AbstractScroller):
"""QtWebEngine implementations related to scrolling."""
def __init__(self, tab, parent=None):
super().__init__(tab, parent)
self._args = objreg.get('args')
self._pos_perc = (0, 0)
self._pos_px = QPoint()
self._at_bottom = False
def _init_widget(self, widget):
super()._init_widget(widget)
page = widget.page()
page.scrollPositionChanged.connect(self._update_pos)
def _repeated_key_press(self, key, count=1, modifier=Qt.NoModifier):
"""Send count fake key presses to this scroller's WebEngineTab."""
for _ in range(min(count, 5000)):
self._tab.key_press(key, modifier)
@pyqtSlot(QPointF)
def _update_pos(self, pos):
"""Update the scroll position attributes when it changed."""
self._pos_px = pos.toPoint()
contents_size = self._widget.page().contentsSize()
scrollable_x = contents_size.width() - self._widget.width()
if scrollable_x == 0:
perc_x = 0
else:
try:
perc_x = min(100, round(100 / scrollable_x * pos.x()))
except ValueError:
# https://github.com/qutebrowser/qutebrowser/issues/3219
log.misc.debug("Got ValueError!")
log.misc.debug("contents_size.width(): {}".format(
contents_size.width()))
log.misc.debug("self._widget.width(): {}".format(
self._widget.width()))
log.misc.debug("scrollable_x: {}".format(scrollable_x))
log.misc.debug("pos.x(): {}".format(pos.x()))
raise
scrollable_y = contents_size.height() - self._widget.height()
if scrollable_y == 0:
perc_y = 0
else:
perc_y = min(100, round(100 / scrollable_y * pos.y()))
self._at_bottom = math.ceil(pos.y()) >= scrollable_y
if (self._pos_perc != (perc_x, perc_y) or
'no-scroll-filtering' in self._args.debug_flags):
self._pos_perc = perc_x, perc_y
self.perc_changed.emit(*self._pos_perc)
def pos_px(self):
return self._pos_px
def pos_perc(self):
return self._pos_perc
def to_perc(self, x=None, y=None):
js_code = javascript.assemble('scroll', 'to_perc', x, y)
self._tab.run_js_async(js_code)
def to_point(self, point):
js_code = javascript.assemble('window', 'scroll', point.x(), point.y())
self._tab.run_js_async(js_code)
def delta(self, x=0, y=0):
self._tab.run_js_async(javascript.assemble('window', 'scrollBy', x, y))
def delta_page(self, x=0, y=0):
js_code = javascript.assemble('scroll', 'delta_page', x, y)
self._tab.run_js_async(js_code)
def up(self, count=1):
self._repeated_key_press(Qt.Key_Up, count)
def down(self, count=1):
self._repeated_key_press(Qt.Key_Down, count)
def left(self, count=1):
self._repeated_key_press(Qt.Key_Left, count)
def right(self, count=1):
self._repeated_key_press(Qt.Key_Right, count)
def top(self):
self._tab.key_press(Qt.Key_Home)
def bottom(self):
self._tab.key_press(Qt.Key_End)
def page_up(self, count=1):
self._repeated_key_press(Qt.Key_PageUp, count)
def page_down(self, count=1):
self._repeated_key_press(Qt.Key_PageDown, count)
def at_top(self):
return self.pos_px().y() == 0
def at_bottom(self):
return self._at_bottom
class WebEngineHistory(browsertab.AbstractHistory):
"""QtWebEngine implementations related to page history."""
def current_idx(self):
return self._history.currentItemIndex()
def can_go_back(self):
return self._history.canGoBack()
def can_go_forward(self):
return self._history.canGoForward()
def _item_at(self, i):
return self._history.itemAt(i)
def _go_to_item(self, item):
self._tab.predicted_navigation.emit(item.url())
self._history.goToItem(item)
def serialize(self):
if not qtutils.version_check('5.9', compiled=False):
# WORKAROUND for
# https://github.com/qutebrowser/qutebrowser/issues/2289
# Don't use the history's currentItem here, because of
# https://bugreports.qt.io/browse/QTBUG-59599 and because it doesn't
# contain view-source.
scheme = self._tab.url().scheme()
if scheme in ['view-source', 'chrome']:
raise browsertab.WebTabError("Can't serialize special URL!")
return qtutils.serialize(self._history)
def deserialize(self, data):
return qtutils.deserialize(data, self._history)
def load_items(self, items):
stream, _data, cur_data = tabhistory.serialize(items)
qtutils.deserialize_stream(stream, self._history)
@pyqtSlot()
def _on_load_finished():
self._tab.scroller.to_point(cur_data['scroll-pos'])
self._tab.load_finished.disconnect(_on_load_finished)
if cur_data is not None:
if 'zoom' in cur_data:
self._tab.zoom.set_factor(cur_data['zoom'])
if ('scroll-pos' in cur_data and
self._tab.scroller.pos_px() == QPoint(0, 0)):
self._tab.load_finished.connect(_on_load_finished)
class WebEngineZoom(browsertab.AbstractZoom):
"""QtWebEngine implementations related to zooming."""
def _set_factor_internal(self, factor):
self._widget.setZoomFactor(factor)
class WebEngineElements(browsertab.AbstractElements):
"""QtWebEngine implemementations related to elements on the page."""
def _js_cb_multiple(self, callback, js_elems):
"""Handle found elements coming from JS and call the real callback.
Args:
callback: The callback to call with the found elements.
Called with None if there was an error.
js_elems: The elements serialized from javascript.
"""
if js_elems is None:
callback(None)
return
elems = []
for js_elem in js_elems:
elem = webengineelem.WebEngineElement(js_elem, tab=self._tab)
elems.append(elem)
callback(elems)
def _js_cb_single(self, callback, js_elem):
"""Handle a found focus elem coming from JS and call the real callback.
Args:
callback: The callback to call with the found element.
Called with a WebEngineElement or None.
js_elem: The element serialized from javascript.
"""
debug_str = ('None' if js_elem is None
else utils.elide(repr(js_elem), 1000))
log.webview.debug("Got element from JS: {}".format(debug_str))
if js_elem is None:
callback(None)
else:
elem = webengineelem.WebEngineElement(js_elem, tab=self._tab)
callback(elem)
def find_css(self, selector, callback, *, only_visible=False):
js_code = javascript.assemble('webelem', 'find_css', selector,
only_visible)
js_cb = functools.partial(self._js_cb_multiple, callback)
self._tab.run_js_async(js_code, js_cb)
def find_id(self, elem_id, callback):
js_code = javascript.assemble('webelem', 'find_id', elem_id)
js_cb = functools.partial(self._js_cb_single, callback)
self._tab.run_js_async(js_code, js_cb)
def find_focused(self, callback):
js_code = javascript.assemble('webelem', 'find_focused')
js_cb = functools.partial(self._js_cb_single, callback)
self._tab.run_js_async(js_code, js_cb)
def find_at_pos(self, pos, callback):
assert pos.x() >= 0
assert pos.y() >= 0
pos /= self._tab.zoom.factor()
js_code = javascript.assemble('webelem', 'find_at_pos',
pos.x(), pos.y())
js_cb = functools.partial(self._js_cb_single, callback)
self._tab.run_js_async(js_code, js_cb)
class WebEngineTab(browsertab.AbstractTab):
"""A QtWebEngine tab in the browser.
Signals:
_load_finished_fake:
Used in place of unreliable loadFinished
"""
# WORKAROUND for https://bugreports.qt.io/browse/QTBUG-65223
_load_finished_fake = pyqtSignal(bool)
def __init__(self, *, win_id, mode_manager, private, parent=None):
super().__init__(win_id=win_id, mode_manager=mode_manager,
private=private, parent=parent)
widget = webview.WebEngineView(tabdata=self.data, win_id=win_id,
private=private)
self.history = WebEngineHistory(self)
self.scroller = WebEngineScroller(self, parent=self)
self.caret = WebEngineCaret(mode_manager=mode_manager,
tab=self, parent=self)
self.zoom = WebEngineZoom(tab=self, parent=self)
self.search = WebEngineSearch(parent=self)
self.printing = WebEnginePrinting()
self.elements = WebEngineElements(tab=self)
self.action = WebEngineAction(tab=self)
# We're assigning settings in _set_widget
self.settings = webenginesettings.WebEngineSettings(settings=None)
self._set_widget(widget)
self._connect_signals()
self.backend = usertypes.Backend.QtWebEngine
self._init_js()
self._child_event_filter = None
self._saved_zoom = None
self._reload_url = None
def _init_js(self):
js_code = '\n'.join([
'"use strict";',
'window._qutebrowser = window._qutebrowser || {};',
utils.read_file('javascript/scroll.js'),
utils.read_file('javascript/webelem.js'),
utils.read_file('javascript/caret.js'),
])
script = QWebEngineScript()
# We can't use DocumentCreation here as WORKAROUND for
# https://bugreports.qt.io/browse/QTBUG-66011
script.setInjectionPoint(QWebEngineScript.DocumentReady)
script.setSourceCode(js_code)
page = self._widget.page()
script.setWorldId(QWebEngineScript.ApplicationWorld)
# FIXME:qtwebengine what about runsOnSubFrames?
page.scripts().insert(script)
def _install_event_filter(self):
self._widget.focusProxy().installEventFilter(self._mouse_event_filter)
self._child_event_filter = mouse.ChildEventFilter(
eventfilter=self._mouse_event_filter, widget=self._widget,
parent=self)
self._widget.installEventFilter(self._child_event_filter)
@pyqtSlot()
def _restore_zoom(self):
if sip.isdeleted(self._widget):
# https://github.com/qutebrowser/qutebrowser/issues/3498
return
if self._saved_zoom is None:
return
self.zoom.set_factor(self._saved_zoom)
self._saved_zoom = None
def openurl(self, url):
self._saved_zoom = self.zoom.factor()
self._openurl_prepare(url)
self._widget.load(url)
def url(self, requested=False):
page = self._widget.page()
if requested:
return page.requestedUrl()
else:
return page.url()
def dump_async(self, callback, *, plain=False):
if plain:
self._widget.page().toPlainText(callback)
else:
self._widget.page().toHtml(callback)
def run_js_async(self, code, callback=None, *, world=None):
if world is None:
world_id = QWebEngineScript.ApplicationWorld
elif isinstance(world, int):
world_id = world
else:
world_id = _JS_WORLD_MAP[world]
if callback is None:
self._widget.page().runJavaScript(code, world_id)
else:
self._widget.page().runJavaScript(code, world_id, callback)
def shutdown(self):
self.shutting_down.emit()
self.action.exit_fullscreen()
if qtutils.version_check('5.8', exact=True, compiled=False):
# WORKAROUND for
# https://bugreports.qt.io/browse/QTBUG-58563
self.search.clear()
self._widget.shutdown()
def reload(self, *, force=False):
if force:
action = QWebEnginePage.ReloadAndBypassCache
else:
action = QWebEnginePage.Reload
self._widget.triggerPageAction(action)
def stop(self):
self._widget.stop()
def title(self):
return self._widget.title()
def icon(self):
return self._widget.icon()
def set_html(self, html, base_url=QUrl()):
# FIXME:qtwebengine
# check this and raise an exception if too big:
# Warning: The content will be percent encoded before being sent to the
# renderer via IPC. This may increase its size. The maximum size of the
# percent encoded content is 2 megabytes minus 30 bytes.
self._widget.setHtml(html, base_url)
def networkaccessmanager(self):
return None
def user_agent(self):
return None
def clear_ssl_errors(self):
raise browsertab.UnsupportedOperationError
def key_press(self, key, modifier=Qt.NoModifier):
press_evt = QKeyEvent(QEvent.KeyPress, key, modifier, 0, 0, 0)
release_evt = QKeyEvent(QEvent.KeyRelease, key, modifier,
0, 0, 0)
self.send_event(press_evt)
self.send_event(release_evt)
def _show_error_page(self, url, error):
"""Show an error page in the tab."""
log.misc.debug("Showing error page for {}".format(error))
url_string = url.toDisplayString()
error_page = jinja.render(
'error.html',
title="Error loading page: {}".format(url_string),
url=url_string, error=error)
self.set_html(error_page)
@pyqtSlot()
def _on_history_trigger(self):
try:
self._widget.page()
except RuntimeError:
# Looks like this slot can be triggered on destroyed tabs:
# https://crashes.qutebrowser.org/view/3abffbed (Qt 5.9.1)
# wrapped C/C++ object of type WebEngineView has been deleted
log.misc.debug("Ignoring history trigger for destroyed tab")
return
url = self.url()
requested_url = self.url(requested=True)
# Don't save the title if it's generated from the URL
title = self.title()
title_url = QUrl(url)
title_url.setScheme('')
if title == title_url.toDisplayString(QUrl.RemoveScheme).strip('/'):
title = ""
# Don't add history entry if the URL is invalid anyways
if not url.isValid():
log.misc.debug("Ignoring invalid URL being added to history")
return
self.add_history_item.emit(url, requested_url, title)
@pyqtSlot(QUrl, 'QAuthenticator*', 'QString')
def _on_proxy_authentication_required(self, url, authenticator,
proxy_host):
"""Called when a proxy needs authentication."""
msg = "<b>{}</b> requires a username and password.".format(
html_utils.escape(proxy_host))
urlstr = url.toString(QUrl.RemovePassword | QUrl.FullyEncoded)
answer = message.ask(
title="Proxy authentication required", text=msg,
mode=usertypes.PromptMode.user_pwd,
abort_on=[self.shutting_down, self.load_started], url=urlstr)
if answer is not None:
authenticator.setUser(answer.user)
authenticator.setPassword(answer.password)
else:
try:
# pylint: disable=no-member, useless-suppression
sip.assign(authenticator, QAuthenticator())
# pylint: enable=no-member, useless-suppression
except AttributeError:
self._show_error_page(url, "Proxy authentication required")
@pyqtSlot(QUrl, 'QAuthenticator*')
def _on_authentication_required(self, url, authenticator):
netrc_success = False
if not self.data.netrc_used:
self.data.netrc_used = True
netrc_success = shared.netrc_authentication(url, authenticator)
if not netrc_success:
abort_on = [self.shutting_down, self.load_started]
answer = shared.authentication_required(url, authenticator,
abort_on)
if not netrc_success and answer is None:
try:
# pylint: disable=no-member, useless-suppression
sip.assign(authenticator, QAuthenticator())
# pylint: enable=no-member, useless-suppression
except AttributeError:
# WORKAROUND for
# https://www.riverbankcomputing.com/pipermail/pyqt/2016-December/038400.html
self._show_error_page(url, "Authentication required")
@pyqtSlot('QWebEngineFullScreenRequest')
def _on_fullscreen_requested(self, request):
request.accept()
on = request.toggleOn()
self.data.fullscreen = on
self.fullscreen_requested.emit(on)
if on:
notification = miscwidgets.FullscreenNotification(self)
notification.show()
notification.set_timeout(3000)
@pyqtSlot()
def _on_load_started(self):
"""Clear search when a new load is started if needed."""
if (qtutils.version_check('5.9', compiled=False) and
not qtutils.version_check('5.9.2', compiled=False)):
# WORKAROUND for
# https://bugreports.qt.io/browse/QTBUG-61506
self.search.clear()
super()._on_load_started()
self.data.netrc_used = False
@pyqtSlot(QWebEnginePage.RenderProcessTerminationStatus, int)
def _on_render_process_terminated(self, status, exitcode):
"""Show an error when the renderer process terminated."""
if (status == QWebEnginePage.AbnormalTerminationStatus and
exitcode == 256):
# WORKAROUND for https://bugreports.qt.io/browse/QTBUG-58697
status = QWebEnginePage.CrashedTerminationStatus
status_map = {
QWebEnginePage.NormalTerminationStatus:
browsertab.TerminationStatus.normal,
QWebEnginePage.AbnormalTerminationStatus:
browsertab.TerminationStatus.abnormal,
QWebEnginePage.CrashedTerminationStatus:
browsertab.TerminationStatus.crashed,
QWebEnginePage.KilledTerminationStatus:
browsertab.TerminationStatus.killed,
-1:
browsertab.TerminationStatus.unknown,
}
self.renderer_process_terminated.emit(status_map[status], exitcode)
@pyqtSlot(int)
def _on_load_progress_workaround(self, perc):
"""Use loadProgress(100) to emit loadFinished(True).
See https://bugreports.qt.io/browse/QTBUG-65223
"""
if perc == 100 and self.load_status() != usertypes.LoadStatus.error:
self._load_finished_fake.emit(True)
@pyqtSlot(bool)
def _on_load_finished_workaround(self, ok):
"""Use only loadFinished(False).
See https://bugreports.qt.io/browse/QTBUG-65223
"""
if not ok:
self._load_finished_fake.emit(False)
def _error_page_workaround(self, html):
"""Check if we're displaying a Chromium error page.
This gets only called if we got loadFinished(False) without JavaScript,
so we can display at least some error page.
WORKAROUND for https://bugreports.qt.io/browse/QTBUG-66643
Needs to check the page content as a WORKAROUND for
https://bugreports.qt.io/browse/QTBUG-66661
"""
match = re.search(r'"errorCode":"([^"]*)"', html)
if match is None:
return
self._show_error_page(self.url(), error=match.group(1))
@pyqtSlot(bool)
def _on_load_finished(self, ok):
"""Display a static error page if JavaScript is disabled."""
super()._on_load_finished(ok)
js_enabled = self.settings.test_attribute('content.javascript.enabled')
if not ok and not js_enabled:
self.dump_async(self._error_page_workaround)
if ok and self._reload_url is not None:
# WORKAROUND for https://bugreports.qt.io/browse/QTBUG-66656
log.config.debug(
"Reloading {} because of config change".format(
self._reload_url.toDisplayString()))
QTimer.singleShot(100, lambda url=self._reload_url:
self.openurl(url))
self._reload_url = None
@pyqtSlot(QUrl)
def _on_predicted_navigation(self, url):
"""If we know we're going to visit an URL soon, change the settings."""
self.settings.update_for_url(url)
@pyqtSlot(usertypes.NavigationRequest)
def _on_navigation_request(self, navigation):
super()._on_navigation_request(navigation)
if navigation.accepted and navigation.is_main_frame:
changed = self.settings.update_for_url(navigation.url)
needs_reload = {'content.plugins', 'content.javascript.enabled'}
if (changed & needs_reload and navigation.navigation_type !=
navigation.Type.link_clicked):
# WORKAROUND for https://bugreports.qt.io/browse/QTBUG-66656
self._reload_url = navigation.url
def _connect_signals(self):
view = self._widget
page = view.page()
page.windowCloseRequested.connect(self.window_close_requested)
page.linkHovered.connect(self.link_hovered)
page.loadProgress.connect(self._on_load_progress)
page.loadStarted.connect(self._on_load_started)
page.certificate_error.connect(self._on_ssl_errors)
page.authenticationRequired.connect(self._on_authentication_required)
page.proxyAuthenticationRequired.connect(
self._on_proxy_authentication_required)
page.fullScreenRequested.connect(self._on_fullscreen_requested)
page.contentsSizeChanged.connect(self.contents_size_changed)
page.navigation_request.connect(self._on_navigation_request)
view.titleChanged.connect(self.title_changed)
view.urlChanged.connect(self._on_url_changed)
view.renderProcessTerminated.connect(
self._on_render_process_terminated)
view.iconChanged.connect(self.icon_changed)
# WORKAROUND for https://bugreports.qt.io/browse/QTBUG-65223
if qtutils.version_check('5.10', compiled=False):
page.loadProgress.connect(self._on_load_progress_workaround)
self._load_finished_fake.connect(self._on_history_trigger)
self._load_finished_fake.connect(self._restore_zoom)
self._load_finished_fake.connect(self._on_load_finished)
page.loadFinished.connect(self._on_load_finished_workaround)
else:
# for older Qt versions which break with the above
page.loadProgress.connect(self._on_load_progress)
page.loadFinished.connect(self._on_history_trigger)
page.loadFinished.connect(self._restore_zoom)
page.loadFinished.connect(self._on_load_finished)
self.predicted_navigation.connect(self._on_predicted_navigation)
def event_target(self):
return self._widget.focusProxy()
| 1 | 20,845 | This should be `pygment=False` like in `browsertab.py` | qutebrowser-qutebrowser | py |
@@ -766,6 +766,11 @@ func (s *Server) createClient(conn net.Conn) *client {
return c
}
+ // If there is a max subscriptions specified, add to the client.
+ if opts.MaxSubs > 0 {
+ c.msubs = opts.MaxSubs
+ }
+
// If there is a max connections specified, check that adding
// this new client would not push us over the max
if opts.MaxConn > 0 && len(s.clients) >= opts.MaxConn { | 1 | // Copyright 2012-2018 The NATS Authors
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package server
import (
"bytes"
"crypto/tls"
"encoding/json"
"flag"
"fmt"
"io/ioutil"
"net"
"net/http"
"os"
"runtime"
"strconv"
"strings"
"sync"
"sync/atomic"
"time"
// Allow dynamic profiling.
_ "net/http/pprof"
"github.com/nats-io/gnatsd/util"
)
// Info is the information sent to clients to help them understand information
// about this server.
type Info struct {
ID string `json:"server_id"`
Version string `json:"version"`
Proto int `json:"proto"`
GitCommit string `json:"git_commit,omitempty"`
GoVersion string `json:"go"`
Host string `json:"host"`
Port int `json:"port"`
AuthRequired bool `json:"auth_required,omitempty"`
TLSRequired bool `json:"tls_required,omitempty"`
TLSVerify bool `json:"tls_verify,omitempty"`
MaxPayload int `json:"max_payload"`
IP string `json:"ip,omitempty"`
CID uint64 `json:"client_id,omitempty"`
ClientConnectURLs []string `json:"connect_urls,omitempty"` // Contains URLs a client can connect to.
}
// Server is our main struct.
type Server struct {
gcid uint64
stats
mu sync.Mutex
info Info
sl *Sublist
configFile string
optsMu sync.RWMutex
opts *Options
running bool
shutdown bool
listener net.Listener
clients map[uint64]*client
routes map[uint64]*client
remotes map[string]*client
users map[string]*User
totalClients uint64
closed *closedRingBuffer
done chan bool
start time.Time
http net.Listener
httpHandler http.Handler
profiler net.Listener
httpReqStats map[string]uint64
routeListener net.Listener
routeInfo Info
routeInfoJSON []byte
quitCh chan struct{}
// Tracking for remote QRSID tags.
rqsMu sync.RWMutex
rqsubs map[string]rqsub
rqsubsTimer *time.Timer
// Tracking Go routines
grMu sync.Mutex
grTmpClients map[uint64]*client
grRunning bool
grWG sync.WaitGroup // to wait on various go routines
cproto int64 // number of clients supporting async INFO
configTime time.Time // last time config was loaded
logging struct {
sync.RWMutex
logger Logger
trace int32
debug int32
}
clientConnectURLs []string
// Used internally for quick look-ups.
clientConnectURLsMap map[string]struct{}
lastCURLsUpdate int64
// These store the real client/cluster listen ports. They are
// required during config reload to reset the Options (after
// reload) to the actual listen port values.
clientActualPort int
clusterActualPort int
// Used by tests to check that http.Servers do
// not set any timeout.
monitoringServer *http.Server
profilingServer *http.Server
}
// Make sure all are 64bits for atomic use
type stats struct {
inMsgs int64
outMsgs int64
inBytes int64
outBytes int64
slowConsumers int64
}
// New will setup a new server struct after parsing the options.
func New(opts *Options) *Server {
processOptions(opts)
// Process TLS options, including whether we require client certificates.
tlsReq := opts.TLSConfig != nil
verify := (tlsReq && opts.TLSConfig.ClientAuth == tls.RequireAndVerifyClientCert)
info := Info{
ID: genID(),
Version: VERSION,
Proto: PROTO,
GitCommit: gitCommit,
GoVersion: runtime.Version(),
Host: opts.Host,
Port: opts.Port,
AuthRequired: false,
TLSRequired: tlsReq,
TLSVerify: verify,
MaxPayload: opts.MaxPayload,
}
now := time.Now()
s := &Server{
configFile: opts.ConfigFile,
info: info,
sl: NewSublist(),
opts: opts,
done: make(chan bool, 1),
start: now,
configTime: now,
}
s.mu.Lock()
defer s.mu.Unlock()
// This is normally done in the AcceptLoop, once the
// listener has been created (possibly with random port),
// but since some tests may expect the INFO to be properly
// set after New(), let's do it now.
s.setInfoHostPortAndGenerateJSON()
// Used internally for quick look-ups.
s.clientConnectURLsMap = make(map[string]struct{})
// For tracking clients
s.clients = make(map[uint64]*client)
// For tracking closed clients.
s.closed = newClosedRingBuffer(opts.MaxClosedClients)
// For tracking connections that are not yet registered
// in s.routes, but for which readLoop has started.
s.grTmpClients = make(map[uint64]*client)
// For tracking routes and their remote ids
s.routes = make(map[uint64]*client)
s.remotes = make(map[string]*client)
// Used to kick out all go routines possibly waiting on server
// to shutdown.
s.quitCh = make(chan struct{})
// Used to setup Authorization.
s.configureAuthorization()
// Start signal handler
s.handleSignals()
return s
}
func (s *Server) getOpts() *Options {
s.optsMu.RLock()
opts := s.opts
s.optsMu.RUnlock()
return opts
}
func (s *Server) setOpts(opts *Options) {
s.optsMu.Lock()
s.opts = opts
s.optsMu.Unlock()
}
func (s *Server) generateRouteInfoJSON() {
b, _ := json.Marshal(s.routeInfo)
pcs := [][]byte{[]byte("INFO"), b, []byte(CR_LF)}
s.routeInfoJSON = bytes.Join(pcs, []byte(" "))
}
// PrintAndDie is exported for access in other packages.
func PrintAndDie(msg string) {
fmt.Fprintf(os.Stderr, "%s\n", msg)
os.Exit(1)
}
// PrintServerAndExit will print our version and exit.
func PrintServerAndExit() {
fmt.Printf("nats-server version %s\n", VERSION)
os.Exit(0)
}
// ProcessCommandLineArgs takes the command line arguments
// validating and setting flags for handling in case any
// sub command was present.
func ProcessCommandLineArgs(cmd *flag.FlagSet) (showVersion bool, showHelp bool, err error) {
if len(cmd.Args()) > 0 {
arg := cmd.Args()[0]
switch strings.ToLower(arg) {
case "version":
return true, false, nil
case "help":
return false, true, nil
default:
return false, false, fmt.Errorf("unrecognized command: %q", arg)
}
}
return false, false, nil
}
// Protected check on running state
func (s *Server) isRunning() bool {
s.mu.Lock()
defer s.mu.Unlock()
return s.running
}
func (s *Server) logPid() error {
pidStr := strconv.Itoa(os.Getpid())
return ioutil.WriteFile(s.getOpts().PidFile, []byte(pidStr), 0660)
}
// Start up the server, this will block.
// Start via a Go routine if needed.
func (s *Server) Start() {
s.Noticef("Starting nats-server version %s", VERSION)
s.Debugf("Go build version %s", s.info.GoVersion)
gc := gitCommit
if gc == "" {
gc = "not set"
}
s.Noticef("Git commit [%s]", gc)
// Avoid RACE between Start() and Shutdown()
s.mu.Lock()
s.running = true
s.mu.Unlock()
s.grMu.Lock()
s.grRunning = true
s.grMu.Unlock()
// Snapshot server options.
opts := s.getOpts()
// Log the pid to a file
if opts.PidFile != _EMPTY_ {
if err := s.logPid(); err != nil {
PrintAndDie(fmt.Sprintf("Could not write pidfile: %v\n", err))
}
}
// Start monitoring if needed
if err := s.StartMonitoring(); err != nil {
s.Fatalf("Can't start monitoring: %v", err)
return
}
// The Routing routine needs to wait for the client listen
// port to be opened and potential ephemeral port selected.
clientListenReady := make(chan struct{})
// Start up routing as well if needed.
if opts.Cluster.Port != 0 {
s.startGoRoutine(func() {
s.StartRouting(clientListenReady)
})
}
// Pprof http endpoint for the profiler.
if opts.ProfPort != 0 {
s.StartProfiler()
}
// Wait for clients.
s.AcceptLoop(clientListenReady)
}
// Shutdown will shutdown the server instance by kicking out the AcceptLoop
// and closing all associated clients.
func (s *Server) Shutdown() {
s.mu.Lock()
// Prevent issues with multiple calls.
if s.shutdown {
s.mu.Unlock()
return
}
s.shutdown = true
s.running = false
s.grMu.Lock()
s.grRunning = false
s.grMu.Unlock()
conns := make(map[uint64]*client)
// Copy off the clients
for i, c := range s.clients {
conns[i] = c
}
// Copy off the connections that are not yet registered
// in s.routes, but for which the readLoop has started
s.grMu.Lock()
for i, c := range s.grTmpClients {
conns[i] = c
}
s.grMu.Unlock()
// Copy off the routes
for i, r := range s.routes {
r.setRouteNoReconnectOnClose()
conns[i] = r
}
// Number of done channel responses we expect.
doneExpected := 0
// Kick client AcceptLoop()
if s.listener != nil {
doneExpected++
s.listener.Close()
s.listener = nil
}
// Kick route AcceptLoop()
if s.routeListener != nil {
doneExpected++
s.routeListener.Close()
s.routeListener = nil
}
// Kick HTTP monitoring if its running
if s.http != nil {
doneExpected++
s.http.Close()
s.http = nil
}
// Kick Profiling if its running
if s.profiler != nil {
doneExpected++
s.profiler.Close()
}
// Clear any remote qsub mappings
s.clearRemoteQSubs()
s.mu.Unlock()
// Release go routines that wait on that channel
close(s.quitCh)
// Close client and route connections
for _, c := range conns {
c.closeConnection(ServerShutdown)
}
// Block until the accept loops exit
for doneExpected > 0 {
<-s.done
doneExpected--
}
// Wait for go routines to be done.
s.grWG.Wait()
}
// AcceptLoop is exported for easier testing.
func (s *Server) AcceptLoop(clr chan struct{}) {
// If we were to exit before the listener is setup properly,
// make sure we close the channel.
defer func() {
if clr != nil {
close(clr)
}
}()
// Snapshot server options.
opts := s.getOpts()
hp := net.JoinHostPort(opts.Host, strconv.Itoa(opts.Port))
l, e := net.Listen("tcp", hp)
if e != nil {
s.Fatalf("Error listening on port: %s, %q", hp, e)
return
}
s.Noticef("Listening for client connections on %s",
net.JoinHostPort(opts.Host, strconv.Itoa(l.Addr().(*net.TCPAddr).Port)))
// Alert of TLS enabled.
if opts.TLSConfig != nil {
s.Noticef("TLS required for client connections")
}
s.Debugf("Server id is %s", s.info.ID)
s.Noticef("Server is ready")
// Setup state that can enable shutdown
s.mu.Lock()
s.listener = l
// If server was started with RANDOM_PORT (-1), opts.Port would be equal
// to 0 at the beginning this function. So we need to get the actual port
if opts.Port == 0 {
// Write resolved port back to options.
opts.Port = l.Addr().(*net.TCPAddr).Port
}
// Keep track of actual listen port. This will be needed in case of
// config reload.
s.clientActualPort = opts.Port
// Now that port has been set (if it was set to RANDOM), set the
// server's info Host/Port with either values from Options or
// ClientAdvertise. Also generate the JSON byte array.
if err := s.setInfoHostPortAndGenerateJSON(); err != nil {
s.Fatalf("Error setting server INFO with ClientAdvertise value of %s, err=%v", s.opts.ClientAdvertise, err)
s.mu.Unlock()
return
}
// Keep track of client connect URLs. We may need them later.
s.clientConnectURLs = s.getClientConnectURLs()
s.mu.Unlock()
// Let the caller know that we are ready
close(clr)
clr = nil
tmpDelay := ACCEPT_MIN_SLEEP
for s.isRunning() {
conn, err := l.Accept()
if err != nil {
if ne, ok := err.(net.Error); ok && ne.Temporary() {
s.Errorf("Temporary Client Accept Error (%v), sleeping %dms",
ne, tmpDelay/time.Millisecond)
time.Sleep(tmpDelay)
tmpDelay *= 2
if tmpDelay > ACCEPT_MAX_SLEEP {
tmpDelay = ACCEPT_MAX_SLEEP
}
} else if s.isRunning() {
s.Errorf("Client Accept Error: %v", err)
}
continue
}
tmpDelay = ACCEPT_MIN_SLEEP
s.startGoRoutine(func() {
s.createClient(conn)
s.grWG.Done()
})
}
s.Noticef("Server Exiting..")
s.done <- true
}
// This function sets the server's info Host/Port based on server Options.
// Note that this function may be called during config reload, this is why
// Host/Port may be reset to original Options if the ClientAdvertise option
// is not set (since it may have previously been).
// The function then generates the server infoJSON.
func (s *Server) setInfoHostPortAndGenerateJSON() error {
// When this function is called, opts.Port is set to the actual listen
// port (if option was originally set to RANDOM), even during a config
// reload. So use of s.opts.Port is safe.
if s.opts.ClientAdvertise != "" {
h, p, err := parseHostPort(s.opts.ClientAdvertise, s.opts.Port)
if err != nil {
return err
}
s.info.Host = h
s.info.Port = p
} else {
s.info.Host = s.opts.Host
s.info.Port = s.opts.Port
}
return nil
}
// StartProfiler is called to enable dynamic profiling.
func (s *Server) StartProfiler() {
// Snapshot server options.
opts := s.getOpts()
port := opts.ProfPort
// Check for Random Port
if port == -1 {
port = 0
}
hp := net.JoinHostPort(opts.Host, strconv.Itoa(port))
l, err := net.Listen("tcp", hp)
s.Noticef("profiling port: %d", l.Addr().(*net.TCPAddr).Port)
if err != nil {
s.Fatalf("error starting profiler: %s", err)
}
srv := &http.Server{
Addr: hp,
Handler: http.DefaultServeMux,
MaxHeaderBytes: 1 << 20,
}
s.mu.Lock()
s.profiler = l
s.profilingServer = srv
s.mu.Unlock()
go func() {
// if this errors out, it's probably because the server is being shutdown
err := srv.Serve(l)
if err != nil {
s.mu.Lock()
shutdown := s.shutdown
s.mu.Unlock()
if !shutdown {
s.Fatalf("error starting profiler: %s", err)
}
}
s.done <- true
}()
}
// StartHTTPMonitoring will enable the HTTP monitoring port.
// DEPRECATED: Should use StartMonitoring.
func (s *Server) StartHTTPMonitoring() {
s.startMonitoring(false)
}
// StartHTTPSMonitoring will enable the HTTPS monitoring port.
// DEPRECATED: Should use StartMonitoring.
func (s *Server) StartHTTPSMonitoring() {
s.startMonitoring(true)
}
// StartMonitoring starts the HTTP or HTTPs server if needed.
func (s *Server) StartMonitoring() error {
// Snapshot server options.
opts := s.getOpts()
// Specifying both HTTP and HTTPS ports is a misconfiguration
if opts.HTTPPort != 0 && opts.HTTPSPort != 0 {
return fmt.Errorf("can't specify both HTTP (%v) and HTTPs (%v) ports", opts.HTTPPort, opts.HTTPSPort)
}
var err error
if opts.HTTPPort != 0 {
err = s.startMonitoring(false)
} else if opts.HTTPSPort != 0 {
if opts.TLSConfig == nil {
return fmt.Errorf("TLS cert and key required for HTTPS")
}
err = s.startMonitoring(true)
}
return err
}
// HTTP endpoints
const (
RootPath = "/"
VarzPath = "/varz"
ConnzPath = "/connz"
RoutezPath = "/routez"
SubszPath = "/subsz"
StackszPath = "/stacksz"
)
// Start the monitoring server
func (s *Server) startMonitoring(secure bool) error {
// Snapshot server options.
opts := s.getOpts()
// Used to track HTTP requests
s.httpReqStats = map[string]uint64{
RootPath: 0,
VarzPath: 0,
ConnzPath: 0,
RoutezPath: 0,
SubszPath: 0,
}
var (
hp string
err error
httpListener net.Listener
port int
)
monitorProtocol := "http"
if secure {
monitorProtocol += "s"
port = opts.HTTPSPort
if port == -1 {
port = 0
}
hp = net.JoinHostPort(opts.HTTPHost, strconv.Itoa(port))
config := util.CloneTLSConfig(opts.TLSConfig)
config.ClientAuth = tls.NoClientCert
httpListener, err = tls.Listen("tcp", hp, config)
} else {
port = opts.HTTPPort
if port == -1 {
port = 0
}
hp = net.JoinHostPort(opts.HTTPHost, strconv.Itoa(port))
httpListener, err = net.Listen("tcp", hp)
}
if err != nil {
return fmt.Errorf("can't listen to the monitor port: %v", err)
}
s.Noticef("Starting %s monitor on %s", monitorProtocol,
net.JoinHostPort(opts.HTTPHost, strconv.Itoa(httpListener.Addr().(*net.TCPAddr).Port)))
mux := http.NewServeMux()
// Root
mux.HandleFunc(RootPath, s.HandleRoot)
// Varz
mux.HandleFunc(VarzPath, s.HandleVarz)
// Connz
mux.HandleFunc(ConnzPath, s.HandleConnz)
// Routez
mux.HandleFunc(RoutezPath, s.HandleRoutez)
// Subz
mux.HandleFunc(SubszPath, s.HandleSubsz)
// Subz alias for backwards compatibility
mux.HandleFunc("/subscriptionsz", s.HandleSubsz)
// Stacksz
mux.HandleFunc(StackszPath, s.HandleStacksz)
// Do not set a WriteTimeout because it could cause cURL/browser
// to return empty response or unable to display page if the
// server needs more time to build the response.
srv := &http.Server{
Addr: hp,
Handler: mux,
MaxHeaderBytes: 1 << 20,
}
s.mu.Lock()
s.http = httpListener
s.httpHandler = mux
s.monitoringServer = srv
s.mu.Unlock()
go func() {
srv.Serve(httpListener)
srv.Handler = nil
s.mu.Lock()
s.httpHandler = nil
s.mu.Unlock()
s.done <- true
}()
return nil
}
// HTTPHandler returns the http.Handler object used to handle monitoring
// endpoints. It will return nil if the server is not configured for
// monitoring, or if the server has not been started yet (Server.Start()).
func (s *Server) HTTPHandler() http.Handler {
s.mu.Lock()
defer s.mu.Unlock()
return s.httpHandler
}
// Perform a conditional deep copy due to reference nature of ClientConnectURLs.
// If updates are made to Info, this function should be consulted and updated.
// Assume lock is held.
func (s *Server) copyInfo() Info {
info := s.info
if info.ClientConnectURLs != nil {
info.ClientConnectURLs = make([]string, len(s.info.ClientConnectURLs))
copy(info.ClientConnectURLs, s.info.ClientConnectURLs)
}
return info
}
func (s *Server) createClient(conn net.Conn) *client {
// Snapshot server options.
opts := s.getOpts()
max_pay := int64(opts.MaxPayload)
now := time.Now()
c := &client{srv: s, nc: conn, opts: defaultOpts, mpay: max_pay, start: now, last: now}
// Grab JSON info string
s.mu.Lock()
info := s.copyInfo()
s.totalClients++
s.mu.Unlock()
// Grab lock
c.mu.Lock()
// Initialize
c.initClient()
c.Debugf("Client connection created")
// Send our information.
c.sendInfo(c.generateClientInfoJSON(info))
// Unlock to register
c.mu.Unlock()
// Register with the server.
s.mu.Lock()
// If server is not running, Shutdown() may have already gathered the
// list of connections to close. It won't contain this one, so we need
// to bail out now otherwise the readLoop started down there would not
// be interrupted.
if !s.running {
s.mu.Unlock()
return c
}
// If there is a max connections specified, check that adding
// this new client would not push us over the max
if opts.MaxConn > 0 && len(s.clients) >= opts.MaxConn {
s.mu.Unlock()
c.maxConnExceeded()
return nil
}
s.clients[c.cid] = c
s.mu.Unlock()
// Re-Grab lock
c.mu.Lock()
// Check for TLS
if info.TLSRequired {
c.Debugf("Starting TLS client connection handshake")
c.nc = tls.Server(c.nc, opts.TLSConfig)
conn := c.nc.(*tls.Conn)
// Setup the timeout
ttl := secondsToDuration(opts.TLSTimeout)
time.AfterFunc(ttl, func() { tlsTimeout(c, conn) })
conn.SetReadDeadline(time.Now().Add(ttl))
// Force handshake
c.mu.Unlock()
if err := conn.Handshake(); err != nil {
c.Errorf("TLS handshake error: %v", err)
c.closeConnection(TLSHandshakeError)
return nil
}
// Reset the read deadline
conn.SetReadDeadline(time.Time{})
// Re-Grab lock
c.mu.Lock()
// Indicate that handshake is complete (used in monitoring)
c.flags.set(handshakeComplete)
}
// The connection may have been closed
if c.nc == nil {
c.mu.Unlock()
return c
}
// Check for Auth. We schedule this timer after the TLS handshake to avoid
// the race where the timer fires during the handshake and causes the
// server to write bad data to the socket. See issue #432.
if info.AuthRequired {
c.setAuthTimer(secondsToDuration(opts.AuthTimeout))
}
// Do final client initialization
// Set the Ping timer
c.setPingTimer()
// Spin up the read loop.
s.startGoRoutine(c.readLoop)
// Spin up the write loop.
s.startGoRoutine(c.writeLoop)
if info.TLSRequired {
c.Debugf("TLS handshake complete")
cs := c.nc.(*tls.Conn).ConnectionState()
c.Debugf("TLS version %s, cipher suite %s", tlsVersion(cs.Version), tlsCipher(cs.CipherSuite))
}
c.mu.Unlock()
return c
}
// This will save off a closed client in a ring buffer such that
// /connz can inspect. Useful for debugging, etc.
func (s *Server) saveClosedClient(c *client, nc net.Conn, reason ClosedState) {
now := time.Now()
c.mu.Lock()
cc := &closedClient{}
cc.fill(c, nc, now)
cc.Stop = &now
cc.Reason = reason.String()
// Do subs, do not place by default in main ConnInfo
if len(c.subs) > 0 {
cc.subs = make([]string, 0, len(c.subs))
for _, sub := range c.subs {
cc.subs = append(cc.subs, string(sub.subject))
}
}
// Hold user as well.
cc.user = c.opts.Username
c.mu.Unlock()
// Place in the ring buffer
s.mu.Lock()
s.closed.append(cc)
s.mu.Unlock()
}
// Adds the given array of urls to the server's INFO.ClientConnectURLs
// array. The server INFO JSON is regenerated.
// Note that a check is made to ensure that given URLs are not
// already present. So the INFO JSON is regenerated only if new ULRs
// were added.
// If there was a change, an INFO protocol is sent to registered clients
// that support async INFO protocols.
func (s *Server) addClientConnectURLsAndSendINFOToClients(urls []string) {
s.updateServerINFOAndSendINFOToClients(urls, true)
}
// Removes the given array of urls from the server's INFO.ClientConnectURLs
// array. The server INFO JSON is regenerated if needed.
// If there was a change, an INFO protocol is sent to registered clients
// that support async INFO protocols.
func (s *Server) removeClientConnectURLsAndSendINFOToClients(urls []string) {
s.updateServerINFOAndSendINFOToClients(urls, false)
}
// Updates the server's Info object with the given array of URLs and re-generate
// the infoJSON byte array, then send an (async) INFO protocol to clients that
// support it.
func (s *Server) updateServerINFOAndSendINFOToClients(urls []string, add bool) {
s.mu.Lock()
defer s.mu.Unlock()
// Will be set to true if we alter the server's Info object.
wasUpdated := false
remove := !add
for _, url := range urls {
_, present := s.clientConnectURLsMap[url]
if add && !present {
s.clientConnectURLsMap[url] = struct{}{}
wasUpdated = true
} else if remove && present {
delete(s.clientConnectURLsMap, url)
wasUpdated = true
}
}
if wasUpdated {
// Recreate the info.ClientConnectURL array from the map
s.info.ClientConnectURLs = s.info.ClientConnectURLs[:0]
// Add this server client connect ULRs first...
s.info.ClientConnectURLs = append(s.info.ClientConnectURLs, s.clientConnectURLs...)
for url := range s.clientConnectURLsMap {
s.info.ClientConnectURLs = append(s.info.ClientConnectURLs, url)
}
// Update the time of this update
s.lastCURLsUpdate = time.Now().UnixNano()
// Send to all registered clients that support async INFO protocols.
s.sendAsyncInfoToClients()
}
}
// Handle closing down a connection when the handshake has timedout.
func tlsTimeout(c *client, conn *tls.Conn) {
c.mu.Lock()
nc := c.nc
c.mu.Unlock()
// Check if already closed
if nc == nil {
return
}
cs := conn.ConnectionState()
if !cs.HandshakeComplete {
c.Errorf("TLS handshake timeout")
c.sendErr("Secure Connection - TLS Required")
c.closeConnection(TLSHandshakeError)
}
}
// Seems silly we have to write these
func tlsVersion(ver uint16) string {
switch ver {
case tls.VersionTLS10:
return "1.0"
case tls.VersionTLS11:
return "1.1"
case tls.VersionTLS12:
return "1.2"
}
return fmt.Sprintf("Unknown [%x]", ver)
}
// We use hex here so we don't need multiple versions
func tlsCipher(cs uint16) string {
name, present := cipherMapByID[cs]
if present {
return name
}
return fmt.Sprintf("Unknown [%x]", cs)
}
// Remove a client or route from our internal accounting.
func (s *Server) removeClient(c *client) {
var rID string
c.mu.Lock()
cid := c.cid
typ := c.typ
r := c.route
if r != nil {
rID = r.remoteID
}
updateProtoInfoCount := false
if typ == CLIENT && c.opts.Protocol >= ClientProtoInfo {
updateProtoInfoCount = true
}
c.mu.Unlock()
s.mu.Lock()
switch typ {
case CLIENT:
delete(s.clients, cid)
if updateProtoInfoCount {
s.cproto--
}
case ROUTER:
delete(s.routes, cid)
if r != nil {
rc, ok := s.remotes[rID]
// Only delete it if it is us..
if ok && c == rc {
delete(s.remotes, rID)
}
}
// Remove from temporary map in case it is there.
s.grMu.Lock()
delete(s.grTmpClients, cid)
s.grMu.Unlock()
}
s.mu.Unlock()
}
/////////////////////////////////////////////////////////////////
// These are some helpers for accounting in functional tests.
/////////////////////////////////////////////////////////////////
// NumRoutes will report the number of registered routes.
func (s *Server) NumRoutes() int {
s.mu.Lock()
nr := len(s.routes)
s.mu.Unlock()
return nr
}
// NumRemotes will report number of registered remotes.
func (s *Server) NumRemotes() int {
s.mu.Lock()
defer s.mu.Unlock()
return len(s.remotes)
}
// NumClients will report the number of registered clients.
func (s *Server) NumClients() int {
s.mu.Lock()
defer s.mu.Unlock()
return len(s.clients)
}
// getClient will return the client associated with cid.
func (s *Server) getClient(cid uint64) *client {
s.mu.Lock()
defer s.mu.Unlock()
return s.clients[cid]
}
// NumSubscriptions will report how many subscriptions are active.
func (s *Server) NumSubscriptions() uint32 {
s.mu.Lock()
subs := s.sl.Count()
s.mu.Unlock()
return subs
}
// NumSlowConsumers will report the number of slow consumers.
func (s *Server) NumSlowConsumers() int64 {
return atomic.LoadInt64(&s.slowConsumers)
}
// ConfigTime will report the last time the server configuration was loaded.
func (s *Server) ConfigTime() time.Time {
s.mu.Lock()
defer s.mu.Unlock()
return s.configTime
}
// Addr will return the net.Addr object for the current listener.
func (s *Server) Addr() net.Addr {
s.mu.Lock()
defer s.mu.Unlock()
if s.listener == nil {
return nil
}
return s.listener.Addr()
}
// MonitorAddr will return the net.Addr object for the monitoring listener.
func (s *Server) MonitorAddr() *net.TCPAddr {
s.mu.Lock()
defer s.mu.Unlock()
if s.http == nil {
return nil
}
return s.http.Addr().(*net.TCPAddr)
}
// ClusterAddr returns the net.Addr object for the route listener.
func (s *Server) ClusterAddr() *net.TCPAddr {
s.mu.Lock()
defer s.mu.Unlock()
if s.routeListener == nil {
return nil
}
return s.routeListener.Addr().(*net.TCPAddr)
}
// ProfilerAddr returns the net.Addr object for the route listener.
func (s *Server) ProfilerAddr() *net.TCPAddr {
s.mu.Lock()
defer s.mu.Unlock()
if s.profiler == nil {
return nil
}
return s.profiler.Addr().(*net.TCPAddr)
}
// ReadyForConnections returns `true` if the server is ready to accept client
// and, if routing is enabled, route connections. If after the duration
// `dur` the server is still not ready, returns `false`.
func (s *Server) ReadyForConnections(dur time.Duration) bool {
// Snapshot server options.
opts := s.getOpts()
end := time.Now().Add(dur)
for time.Now().Before(end) {
s.mu.Lock()
ok := s.listener != nil && (opts.Cluster.Port == 0 || s.routeListener != nil)
s.mu.Unlock()
if ok {
return true
}
time.Sleep(25 * time.Millisecond)
}
return false
}
// ID returns the server's ID
func (s *Server) ID() string {
s.mu.Lock()
defer s.mu.Unlock()
return s.info.ID
}
func (s *Server) startGoRoutine(f func()) {
s.grMu.Lock()
if s.grRunning {
s.grWG.Add(1)
go f()
}
s.grMu.Unlock()
}
func (s *Server) numClosedConns() int {
s.mu.Lock()
defer s.mu.Unlock()
return s.closed.len()
}
func (s *Server) totalClosedConns() uint64 {
s.mu.Lock()
defer s.mu.Unlock()
return s.closed.totalConns()
}
func (s *Server) closedClients() []*closedClient {
s.mu.Lock()
defer s.mu.Unlock()
return s.closed.closedClients()
}
// getClientConnectURLs returns suitable URLs for clients to connect to the listen
// port based on the server options' Host and Port. If the Host corresponds to
// "any" interfaces, this call returns the list of resolved IP addresses.
// If ClientAdvertise is set, returns the client advertise host and port.
// The server lock is assumed held on entry.
func (s *Server) getClientConnectURLs() []string {
// Snapshot server options.
opts := s.getOpts()
urls := make([]string, 0, 1)
// short circuit if client advertise is set
if opts.ClientAdvertise != "" {
// just use the info host/port. This is updated in s.New()
urls = append(urls, net.JoinHostPort(s.info.Host, strconv.Itoa(s.info.Port)))
} else {
sPort := strconv.Itoa(opts.Port)
ipAddr, err := net.ResolveIPAddr("ip", opts.Host)
// If the host is "any" (0.0.0.0 or ::), get specific IPs from available
// interfaces.
if err == nil && ipAddr.IP.IsUnspecified() {
var ip net.IP
ifaces, _ := net.Interfaces()
for _, i := range ifaces {
addrs, _ := i.Addrs()
for _, addr := range addrs {
switch v := addr.(type) {
case *net.IPNet:
ip = v.IP
case *net.IPAddr:
ip = v.IP
}
// Skip non global unicast addresses
if !ip.IsGlobalUnicast() || ip.IsUnspecified() {
ip = nil
continue
}
urls = append(urls, net.JoinHostPort(ip.String(), sPort))
}
}
}
if err != nil || len(urls) == 0 {
// We are here if s.opts.Host is not "0.0.0.0" nor "::", or if for some
// reason we could not add any URL in the loop above.
// We had a case where a Windows VM was hosed and would have err == nil
// and not add any address in the array in the loop above, and we
// ended-up returning 0.0.0.0, which is problematic for Windows clients.
// Check for 0.0.0.0 or :: specifically, and ignore if that's the case.
if opts.Host == "0.0.0.0" || opts.Host == "::" {
s.Errorf("Address %q can not be resolved properly", opts.Host)
} else {
urls = append(urls, net.JoinHostPort(opts.Host, sPort))
}
}
}
return urls
}
| 1 | 7,700 | Should this be moved down (or up) to under the client lock instead of server lock? | nats-io-nats-server | go |
@@ -273,8 +273,6 @@ fpga_result __FPGA_API__ xfpga_fpgaGetMetricsByIndex(fpga_handle handle,
result = FPGA_OK;
}
- } else {
- result = FPGA_INVALID_PARAM;
}
out_unlock: | 1 | // Copyright(c) 2018, Intel Corporation
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are met:
//
// * Redistributions of source code must retain the above copyright notice,
// this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above copyright notice,
// this list of conditions and the following disclaimer in the documentation
// and/or other materials provided with the distribution.
// * Neither the name of Intel Corporation nor the names of its contributors
// may be used to endorse or promote products derived from this software
// without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
// AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
// IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
// ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
// LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
// CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
// SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
// INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
// CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
// ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
// POSSIBILITY OF SUCH DAMAGE.
/**
* \file metrics.c
* \brief xfpgs fpga metrics API
*/
#ifdef HAVE_CONFIG_H
#include <config.h>
#endif // HAVE_CONFIG_H
#include "safe_string/safe_string.h"
#include "opae/access.h"
#include "opae/utils.h"
#include "common_int.h"
#include "types_int.h"
#include "opae/metrics.h"
#include "metrics/vector.h"
#include "metrics/metrics_int.h"
//Wrong search string invalid array index
#define METRIC_ARRAY_INVALID_INDEX 0xFFFFFF
fpga_result __FPGA_API__ xfpga_fpgaGetNumMetrics(fpga_handle handle,
uint64_t *num_metrics)
{
fpga_result result = FPGA_OK;
struct _fpga_handle *_handle = (struct _fpga_handle *)handle;
int err = 0;
uint64_t num_enun_metrics = 0;
if (_handle == NULL) {
FPGA_ERR("NULL fpga handle");
return FPGA_INVALID_PARAM;
}
result = handle_check_and_lock(_handle);
if (result)
return result;
if (_handle->fddev < 0) {
FPGA_ERR("Invalid handle file descriptor");
result = FPGA_INVALID_PARAM;
goto out_unlock;
}
if (num_metrics == NULL) {
FPGA_ERR("Invlaid Input parameters");
result = FPGA_INVALID_PARAM;
goto out_unlock;
}
result = enum_fpga_metrics(handle);
if (result != FPGA_OK) {
FPGA_ERR("Failed to Discover Metrics");
result = FPGA_NOT_FOUND;
goto out_unlock;
}
result = fpga_vector_total(&(_handle->fpga_enum_metric_vector), &num_enun_metrics);
if (result != FPGA_OK) {
FPGA_ERR("Failed to get metric total");
goto out_unlock;
}
if (num_enun_metrics == 0)
result = FPGA_NOT_FOUND;
*num_metrics = num_enun_metrics;
out_unlock:
err = pthread_mutex_unlock(&_handle->lock);
if (err) {
FPGA_ERR("pthread_mutex_unlock() failed: %s", strerror(err));
}
return result;
}
fpga_result __FPGA_API__ xfpga_fpgaGetMetricsInfo(fpga_handle handle,
fpga_metric_info *metric_info,
uint64_t *num_metrics)
{
fpga_result result = FPGA_OK;
struct _fpga_handle *_handle = (struct _fpga_handle *)handle;
int err = 0;
uint64_t i = 0;
uint64_t num_enun_metrics = 0;
struct _fpga_enum_metric *fpga_enum_metric = NULL;
if (_handle == NULL) {
FPGA_ERR("NULL fpga handle");
return FPGA_INVALID_PARAM;
}
result = handle_check_and_lock(_handle);
if (result)
return result;
if (_handle->fddev < 0) {
FPGA_ERR("Invalid handle file descriptor");
result = FPGA_INVALID_PARAM;
goto out_unlock;
}
if (metric_info == NULL ||
num_metrics == NULL) {
FPGA_ERR("Invlaid Input parameters");
result = FPGA_INVALID_PARAM;
goto out_unlock;
}
result = enum_fpga_metrics(handle);
if (result != FPGA_OK) {
FPGA_ERR("Failed to enum Metrics");
result = FPGA_NOT_FOUND;
goto out_unlock;
}
result = fpga_vector_total(&(_handle->fpga_enum_metric_vector), &num_enun_metrics);
if (result != FPGA_OK) {
FPGA_ERR("Failed to get metric total");
goto out_unlock;
}
// get metric info
for (i = 0; i < *num_metrics; i++) {
if (*num_metrics <= num_enun_metrics) {
fpga_enum_metric = (struct _fpga_enum_metric *) fpga_vector_get(&(_handle->fpga_enum_metric_vector), i);
result = add_metric_info(fpga_enum_metric, &metric_info[i]);
if (result != FPGA_OK) {
FPGA_MSG("Failed to add metric info");
continue;
}
}
}
out_unlock:
err = pthread_mutex_unlock(&_handle->lock);
if (err) {
FPGA_ERR("pthread_mutex_unlock() failed: %s", strerror(err));
}
return result;
}
fpga_result __FPGA_API__ xfpga_fpgaGetMetricsByIndex(fpga_handle handle,
uint64_t *metric_num,
uint64_t num_metric_indexes,
fpga_metric *metrics)
{
fpga_result result = FPGA_OK;
uint64_t found = 0;
struct _fpga_handle *_handle = (struct _fpga_handle *)handle;
int err = 0;
uint64_t i = 0;
fpga_objtype objtype;
if (_handle == NULL) {
FPGA_ERR("NULL fpga handle");
return FPGA_INVALID_PARAM;
}
result = handle_check_and_lock(_handle);
if (result)
return result;
if (_handle->fddev < 0) {
FPGA_ERR("Invalid handle file descriptor");
result = FPGA_INVALID_PARAM;
goto out_unlock;
}
if (metrics == NULL ||
metric_num == NULL) {
FPGA_ERR("Invlaid Input parameters");
result = FPGA_INVALID_PARAM;
goto out_unlock;
}
result = enum_fpga_metrics(handle);
if (result != FPGA_OK) {
FPGA_ERR("Failed to Discover Metrics");
result = FPGA_NOT_FOUND;
goto out_unlock;
}
result = get_fpga_object_type(handle, &objtype);
if (result != FPGA_OK) {
FPGA_ERR("Failed to init vector");
result = FPGA_INVALID_PARAM;
goto out_unlock;
}
if (objtype == FPGA_ACCELERATOR) {
// get AFU metrics
for (i = 0; i < num_metric_indexes; i++) {
result = get_afu_metric_value(handle,
&(_handle->fpga_enum_metric_vector),
metric_num[i],
&metrics[i]);
if (result != FPGA_OK) {
FPGA_ERR("Failed to get metric value at Index = %ld", metric_num[i]);
metrics[i].metric_num = metric_num[i];
continue;
} else {
// found metrics num
found++;
}
}
// API returns not found if doesnot found any metric
if (found == 0 || num_metric_indexes == 0) {
result = FPGA_NOT_FOUND;
} else {
result = FPGA_OK;
}
} else if (objtype == FPGA_DEVICE) {
// get FME metrics
for (i = 0; i < num_metric_indexes; i++) {
result = get_fme_metric_value(handle,
&(_handle->fpga_enum_metric_vector),
metric_num[i],
&metrics[i]);
if (result != FPGA_OK) {
FPGA_ERR("Failed to get metric value at Index = %ld", metric_num[i]);
metrics[i].metric_num = metric_num[i];
continue;
} else {
// found metrics num
found++;
}
}
// API returns not found if doesnot found any metric
if (found == 0 || num_metric_indexes == 0) {
result = FPGA_NOT_FOUND;
} else {
result = FPGA_OK;
}
} else {
result = FPGA_INVALID_PARAM;
}
out_unlock:
clear_cached_values(_handle);
err = pthread_mutex_unlock(&_handle->lock);
if (err) {
FPGA_ERR("pthread_mutex_unlock() failed: %s", strerror(err));
}
return result;
}
fpga_result __FPGA_API__ xfpga_fpgaGetMetricsByName(fpga_handle handle,
char **metrics_names,
uint64_t num_metric_names,
fpga_metric *metrics)
{
fpga_result result = FPGA_OK;
uint64_t found = 0;
struct _fpga_handle *_handle = (struct _fpga_handle *)handle;
int err = 0;
uint64_t i = 0;
uint64_t metric_num = 0;
fpga_objtype objtype;
if (_handle == NULL) {
FPGA_ERR("NULL fpga handle");
return FPGA_INVALID_PARAM;
}
result = handle_check_and_lock(_handle);
if (result)
return result;
if (_handle->fddev < 0) {
FPGA_ERR("Invalid handle file descriptor");
result = FPGA_INVALID_PARAM;
goto out_unlock;
}
if (metrics_names == NULL ||
metrics == NULL) {
FPGA_ERR("Invlaid Input parameters");
result = FPGA_INVALID_PARAM;
goto out_unlock;
}
if (num_metric_names == 0) {
FPGA_ERR("Invlaid Input parameters");
result = FPGA_INVALID_PARAM;
goto out_unlock;
}
result = enum_fpga_metrics(handle);
if (result != FPGA_OK) {
FPGA_ERR("Failed to Discover Metrics");
result = FPGA_NOT_FOUND;
goto out_unlock;
}
result = get_fpga_object_type(handle, &objtype);
if (result != FPGA_OK) {
FPGA_ERR("Failed to init vector");
result = FPGA_INVALID_PARAM;
goto out_unlock;
}
if (objtype == FPGA_ACCELERATOR) {
// get AFU metrics
for (i = 0; i < num_metric_names; i++) {
result = parse_metric_num_name(metrics_names[i],
&(_handle->fpga_enum_metric_vector),
&metric_num);
if (result != FPGA_OK) {
FPGA_ERR("Invalid input metrics string= %s", metrics_names[i]);
metrics[i].metric_num = METRIC_ARRAY_INVALID_INDEX;
continue;
}
result = get_afu_metric_value(handle, &(_handle->fpga_enum_metric_vector),
metric_num,
&metrics[i]);
if (result != FPGA_OK) {
FPGA_ERR("Failed to get metric value for metric = %s", metrics_names[i]);
metrics[i].metric_num = METRIC_ARRAY_INVALID_INDEX;
continue;
} else {
// found metrics num
found++;
}
}
// API returns not found if doesnot found any metric
if (found == 0 || num_metric_names == 0) {
result = FPGA_NOT_FOUND;
} else {
result = FPGA_OK;
}
} else if (objtype == FPGA_DEVICE) {
// get FME metrics
for (i = 0; i < num_metric_names; i++) {
result = parse_metric_num_name(metrics_names[i],
&(_handle->fpga_enum_metric_vector),
&metric_num);
if (result != FPGA_OK) {
FPGA_ERR("Invalid input metrics string= %s", metrics_names[i]);
metrics[i].metric_num = METRIC_ARRAY_INVALID_INDEX;
continue;
}
result = get_fme_metric_value(handle,
&(_handle->fpga_enum_metric_vector),
metric_num,
&metrics[i]);
if (result != FPGA_OK) {
FPGA_ERR("Failed to get metric value for metric = %s \n", metrics_names[i]);
metrics[i].metric_num = METRIC_ARRAY_INVALID_INDEX;
continue;
} else {
// found metrics num
found++;
}
}
// API returns not found if doesnot found any metric
if (found == 0 || num_metric_names == 0) {
result = FPGA_NOT_FOUND;
} else {
result = FPGA_OK;
}
} else {
result = FPGA_INVALID_PARAM;
}
out_unlock:
clear_cached_values(_handle);
err = pthread_mutex_unlock(&_handle->lock);
if (err) {
FPGA_ERR("pthread_mutex_unlock() failed: %s", strerror(err));
}
return result;
} | 1 | 17,879 | Why are we removing this? | OPAE-opae-sdk | c |
@@ -76,7 +76,7 @@ module Gsa18f
# @todo - this is pretty ugly
def public_identifier
- self.cart.id
+ "##{self.cart.id}"
end
def total_price | 1 | module Gsa18f
# Make sure all table names use 'gsa18f_XXX'
def self.table_name_prefix
'gsa18f_'
end
DATA = YAML.load_file("#{Rails.root}/config/data/18f.yaml")
class Procurement < ActiveRecord::Base
URGENCY = DATA['URGENCY']
OFFICES = DATA['OFFICES']
RECURRENCE = DATA['RECURRENCE']
# TODO include ProposalDelegate
has_one :proposal, as: :client_data
# TODO remove the dependence
has_one :cart, through: :proposal
validates :cost_per_unit, numericality: {
greater_than_or_equal_to: 0,
less_than_or_equal_to: 3000
}
validates :quantity, numericality: {
greater_than_or_equal_to: 1
}
validates :product_name_and_description, presence: true
def init_and_save_cart(requester)
cart = Cart.create(
proposal_attributes: {flow: 'linear', client_data: self}
)
cart.set_requester(requester)
self.add_approvals
Dispatcher.deliver_new_proposal_emails(proposal)
cart
end
def update_cart(cart)
cart.proposal.approvals.destroy_all
self.add_approvals
cart.restart!
cart
end
def add_approvals
self.cart.add_approver(Gsa18f::Procurement.approver_email)
end
# Ignore values in certain fields if they aren't relevant. May want to
# split these into different models
def self.relevant_fields(recurring)
fields = [:office, :justification, :link_to_product, :quantity,
:date_requested, :urgency, :additional_info, :cost_per_unit,
:product_name_and_description, :recurring]
if recurring
fields += [:recurring_interval, :recurring_length]
end
fields
end
def relevant_fields
Gsa18f::Procurement.relevant_fields(self.recurring)
end
def fields_for_display
attributes = self.relevant_fields
attributes.map! {|key| [Procurement.human_attribute_name(key), self[key]]}
attributes.push(["Total Price", total_price])
end
def client
"gsa18f"
end
# @todo - this is pretty ugly
def public_identifier
self.cart.id
end
def total_price
(self.cost_per_unit * self.quantity) || 0.0
end
# may be replaced with paper-trail or similar at some point
def version
self.updated_at.to_i
end
def name
self.product_name_and_description
end
def self.approver_email
ENV['GSA18F_APPROVER_EMAIL'] || '[email protected]'
end
end
end
| 1 | 13,039 | What do you think about using `self.id` ? | 18F-C2 | rb |
@@ -442,7 +442,17 @@ configRetry:
log.Infof("Starting the Typha connection")
err := typhaConnection.Start(context.Background())
if err != nil {
- log.WithError(err).Fatal("Failed to connect to Typha")
+ retry := 0
+ for err != nil && retry < 10 {
+ // Set Ready and Live to false
+ healthAggregator.Report(healthName, &health.HealthReport{Live: false, Ready: false})
+ err = typhaConnection.Start(context.Background())
+ log.WithError(err).Warn("Retrying to start Typha")
+ retry++
+ }
+ if err != nil && retry > 10 {
+ log.WithError(err).Fatal("Failed to connect to Typha")
+ }
}
go func() {
typhaConnection.Finished.Wait() | 1 | // Copyright (c) 2017-2019 Tigera, Inc. All rights reserved.
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package daemon
import (
"context"
"errors"
"fmt"
"math/rand"
"net/http"
"os"
"os/exec"
"os/signal"
"runtime"
"runtime/debug"
"syscall"
"time"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp"
log "github.com/sirupsen/logrus"
"k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/client-go/kubernetes"
"k8s.io/client-go/rest"
"github.com/projectcalico/felix/buildinfo"
"github.com/projectcalico/felix/calc"
"github.com/projectcalico/felix/config"
_ "github.com/projectcalico/felix/config"
dp "github.com/projectcalico/felix/dataplane"
"github.com/projectcalico/felix/logutils"
"github.com/projectcalico/felix/policysync"
"github.com/projectcalico/felix/proto"
"github.com/projectcalico/felix/statusrep"
"github.com/projectcalico/felix/usagerep"
apiv3 "github.com/projectcalico/libcalico-go/lib/apis/v3"
"github.com/projectcalico/libcalico-go/lib/backend"
bapi "github.com/projectcalico/libcalico-go/lib/backend/api"
"github.com/projectcalico/libcalico-go/lib/backend/model"
"github.com/projectcalico/libcalico-go/lib/backend/syncersv1/felixsyncer"
"github.com/projectcalico/libcalico-go/lib/backend/syncersv1/updateprocessors"
"github.com/projectcalico/libcalico-go/lib/backend/watchersyncer"
cerrors "github.com/projectcalico/libcalico-go/lib/errors"
"github.com/projectcalico/libcalico-go/lib/health"
lclogutils "github.com/projectcalico/libcalico-go/lib/logutils"
"github.com/projectcalico/libcalico-go/lib/set"
"github.com/projectcalico/pod2daemon/binder"
"github.com/projectcalico/typha/pkg/syncclient"
)
const usage = `Felix, the Calico per-host daemon.
Usage:
calico-felix [options]
Options:
-c --config-file=<filename> Config file to load [default: /etc/calico/felix.cfg].
--version Print the version and exit.
`
const (
// Our default value for GOGC if it is not set. This is the percentage that heap usage must
// grow by to trigger a garbage collection. Go's default is 100, meaning that 50% of the
// heap can be lost to garbage. We reduce it to this value to trade increased CPU usage for
// lower occupancy.
defaultGCPercent = 20
// String sent on the failure report channel to indicate we're shutting down for config
// change.
reasonConfigChanged = "config changed"
// Process return code used to report a config change. This is the same as the code used
// by SIGHUP, which means that the wrapper script also restarts Felix on a SIGHUP.
configChangedRC = 129
)
// Run is the entry point to run a Felix instance.
//
// Its main role is to sequence Felix's startup by:
//
// Initialising early logging config (log format and early debug settings).
//
// Parsing command line parameters.
//
// Loading datastore configuration from the environment or config file.
//
// Loading more configuration from the datastore (this is retried until success).
//
// Starting the configured internal (golang) or external dataplane driver.
//
// Starting the background processing goroutines, which load and keep in sync with the
// state from the datastore, the "calculation graph".
//
// Starting the usage reporting and prometheus metrics endpoint threads (if configured).
//
// Then, it defers to monitorAndManageShutdown(), which blocks until one of the components
// fails, then attempts a graceful shutdown. At that point, all the processing is in
// background goroutines.
//
// To avoid having to maintain rarely-used code paths, Felix handles updates to its
// main config parameters by exiting and allowing itself to be restarted by the init
// daemon.
func Run(configFile string) {
// Go's RNG is not seeded by default. Do that now.
rand.Seed(time.Now().UTC().UnixNano())
// Special-case handling for environment variable-configured logging:
// Initialise early so we can trace out config parsing.
logutils.ConfigureEarlyLogging()
ctx := context.Background()
if os.Getenv("GOGC") == "" {
// Tune the GC to trade off a little extra CPU usage for significantly lower
// occupancy at high scale. This is worthwhile because Felix runs per-host so
// any occupancy improvement is multiplied by the number of hosts.
log.Debugf("No GOGC value set, defaulting to %d%%.", defaultGCPercent)
debug.SetGCPercent(defaultGCPercent)
}
buildInfoLogCxt := log.WithFields(log.Fields{
"version": buildinfo.GitVersion,
"buildDate": buildinfo.BuildDate,
"gitCommit": buildinfo.GitRevision,
"GOMAXPROCS": runtime.GOMAXPROCS(0),
})
buildInfoLogCxt.Info("Felix starting up")
// Health monitoring, for liveness and readiness endpoints. The following loop can take a
// while before the datastore reports itself as ready - for example when there is data that
// needs to be migrated from a previous version - and we still want to Felix to report
// itself as live (but not ready) while we are waiting for that. So we create the
// aggregator upfront and will start serving health status over HTTP as soon as we see _any_
// config that indicates that.
healthAggregator := health.NewHealthAggregator()
const healthName = "felix-startup"
// Register this function as a reporter of liveness and readiness, with no timeout.
healthAggregator.RegisterReporter(healthName, &health.HealthReport{Live: true, Ready: true}, 0)
// Load the configuration from all the different sources including the
// datastore and merge. Keep retrying on failure. We'll sit in this
// loop until the datastore is ready.
log.Info("Loading configuration...")
var backendClient bapi.Client
var configParams *config.Config
var typhaAddr string
var numClientsCreated int
configRetry:
for {
if numClientsCreated > 60 {
// If we're in a restart loop, periodically exit (so we can be restarted) since
// - it may solve the problem if there's something wrong with our process
// - it prevents us from leaking connections to the datastore.
exitWithCustomRC(configChangedRC, "Restarting to avoid leaking datastore connections")
}
// Make an initial report that says we're live but not yet ready.
healthAggregator.Report(healthName, &health.HealthReport{Live: true, Ready: false})
// Load locally-defined config, including the datastore connection
// parameters. First the environment variables.
configParams = config.New()
envConfig := config.LoadConfigFromEnvironment(os.Environ())
// Then, the config file.
log.Infof("Loading config file: %v", configFile)
fileConfig, err := config.LoadConfigFile(configFile)
if err != nil {
log.WithError(err).WithField("configFile", configFile).Error(
"Failed to load configuration file")
time.Sleep(1 * time.Second)
continue configRetry
}
// Parse and merge the local config.
configParams.UpdateFrom(envConfig, config.EnvironmentVariable)
if configParams.Err != nil {
log.WithError(configParams.Err).WithField("configFile", configFile).Error(
"Failed to parse configuration environment variable")
time.Sleep(1 * time.Second)
continue configRetry
}
configParams.UpdateFrom(fileConfig, config.ConfigFile)
if configParams.Err != nil {
log.WithError(configParams.Err).WithField("configFile", configFile).Error(
"Failed to parse configuration file")
time.Sleep(1 * time.Second)
continue configRetry
}
// Each time round this loop, check that we're serving health reports if we should
// be, or cancel any existing server if we should not be serving any more.
healthAggregator.ServeHTTP(configParams.HealthEnabled, configParams.HealthHost, configParams.HealthPort)
// We should now have enough config to connect to the datastore
// so we can load the remainder of the config.
datastoreConfig := configParams.DatastoreConfig()
// Can't dump the whole config because it may have sensitive information...
log.WithField("datastore", datastoreConfig.Spec.DatastoreType).Info("Connecting to datastore")
backendClient, err = backend.NewClient(datastoreConfig)
if err != nil {
log.WithError(err).Error("Failed to create datastore client")
time.Sleep(1 * time.Second)
continue configRetry
}
log.Info("Created datastore client")
numClientsCreated++
for {
globalConfig, hostConfig, err := loadConfigFromDatastore(
ctx, backendClient, configParams.FelixHostname)
if err == ErrNotReady {
log.Warn("Waiting for datastore to be initialized (or migrated)")
time.Sleep(1 * time.Second)
healthAggregator.Report(healthName, &health.HealthReport{Live: true, Ready: true})
continue
} else if err != nil {
log.WithError(err).Error("Failed to get config from datastore")
time.Sleep(1 * time.Second)
continue configRetry
}
configParams.UpdateFrom(globalConfig, config.DatastoreGlobal)
configParams.UpdateFrom(hostConfig, config.DatastorePerHost)
break
}
configParams.Validate()
if configParams.Err != nil {
log.WithError(configParams.Err).Error(
"Failed to parse/validate configuration from datastore.")
time.Sleep(1 * time.Second)
continue configRetry
}
// We now have some config flags that affect how we configure the syncer.
// After loading the config from the datastore, reconnect, possibly with new
// config. We don't need to re-load the configuration _again_ because the
// calculation graph will spot if the config has changed since we were initialised.
datastoreConfig = configParams.DatastoreConfig()
backendClient, err = backend.NewClient(datastoreConfig)
if err != nil {
log.WithError(err).Error("Failed to (re)connect to datastore")
time.Sleep(1 * time.Second)
continue configRetry
}
numClientsCreated++
// If we're configured to discover Typha, do that now so we can retry if we fail.
typhaAddr, err = discoverTyphaAddr(configParams)
if err != nil {
log.WithError(err).Error("Typha discovery enabled but discovery failed.")
time.Sleep(1 * time.Second)
continue configRetry
}
break configRetry
}
if numClientsCreated > 2 {
// We don't have a way to close datastore connection so, if we reconnected after
// a failure to load config, restart felix to avoid leaking connections.
exitWithCustomRC(configChangedRC, "Restarting to avoid leaking datastore connections")
}
// We're now both live and ready.
healthAggregator.Report(healthName, &health.HealthReport{Live: true, Ready: true})
// Enable or disable the health HTTP server according to coalesced config.
healthAggregator.ServeHTTP(configParams.HealthEnabled, configParams.HealthHost, configParams.HealthPort)
// If we get here, we've loaded the configuration successfully.
// Update log levels before we do anything else.
logutils.ConfigureLogging(configParams)
// Since we may have enabled more logging, log with the build context
// again.
buildInfoLogCxt.WithField("config", configParams).Info(
"Successfully loaded configuration.")
// Start up the dataplane driver. This may be the internal go-based driver or an external
// one.
var dpDriver dp.DataplaneDriver
var dpDriverCmd *exec.Cmd
failureReportChan := make(chan string)
configChangedRestartCallback := func() { failureReportChan <- reasonConfigChanged }
dpDriver, dpDriverCmd = dp.StartDataplaneDriver(configParams, healthAggregator, configChangedRestartCallback)
// Initialise the glue logic that connects the calculation graph to/from the dataplane driver.
log.Info("Connect to the dataplane driver.")
var connToUsageRepUpdChan chan map[string]string
if configParams.UsageReportingEnabled {
// Make a channel for the connector to use to send updates to the usage reporter.
// (Otherwise, we pass in a nil channel, which disables such updates.)
connToUsageRepUpdChan = make(chan map[string]string, 1)
}
dpConnector := newConnector(configParams, connToUsageRepUpdChan, backendClient, dpDriver, failureReportChan)
// If enabled, create a server for the policy sync API. This allows clients to connect to
// Felix over a socket and receive policy updates.
var policySyncServer *policysync.Server
var policySyncProcessor *policysync.Processor
var policySyncAPIBinder binder.Binder
calcGraphClientChannels := []chan<- interface{}{dpConnector.ToDataplane}
if configParams.PolicySyncPathPrefix != "" {
log.WithField("policySyncPathPrefix", configParams.PolicySyncPathPrefix).Info(
"Policy sync API enabled. Creating the policy sync server.")
toPolicySync := make(chan interface{})
policySyncUIDAllocator := policysync.NewUIDAllocator()
policySyncProcessor = policysync.NewProcessor(toPolicySync)
policySyncServer = policysync.NewServer(
policySyncProcessor.JoinUpdates,
policySyncUIDAllocator.NextUID,
)
policySyncAPIBinder = binder.NewBinder(configParams.PolicySyncPathPrefix)
policySyncServer.RegisterGrpc(policySyncAPIBinder.Server())
calcGraphClientChannels = append(calcGraphClientChannels, toPolicySync)
}
// Now create the calculation graph, which receives updates from the
// datastore and outputs dataplane updates for the dataplane driver.
//
// The Syncer has its own thread and we use an extra thread for the
// Validator, just to pipeline that part of the calculation then the
// main calculation graph runs in a single thread for simplicity.
// The output of the calculation graph arrives at the dataplane
// connection via channel.
//
// Syncer -chan-> Validator -chan-> Calc graph -chan-> dataplane
// KVPair KVPair protobufs
// Get a Syncer from the datastore, or a connection to our remote sync daemon, Typha,
// which will feed the calculation graph with updates, bringing Felix into sync.
var syncer Startable
var typhaConnection *syncclient.SyncerClient
syncerToValidator := calc.NewSyncerCallbacksDecoupler()
if typhaAddr != "" {
// Use a remote Syncer, via the Typha server.
log.WithField("addr", typhaAddr).Info("Connecting to Typha.")
typhaConnection = syncclient.New(
typhaAddr,
buildinfo.GitVersion,
configParams.FelixHostname,
fmt.Sprintf("Revision: %s; Build date: %s",
buildinfo.GitRevision, buildinfo.BuildDate),
syncerToValidator,
&syncclient.Options{
ReadTimeout: configParams.TyphaReadTimeout,
WriteTimeout: configParams.TyphaWriteTimeout,
KeyFile: configParams.TyphaKeyFile,
CertFile: configParams.TyphaCertFile,
CAFile: configParams.TyphaCAFile,
ServerCN: configParams.TyphaCN,
ServerURISAN: configParams.TyphaURISAN,
},
)
} else {
// Use the syncer locally.
syncer = felixsyncer.New(backendClient, syncerToValidator)
}
log.WithField("syncer", syncer).Info("Created Syncer")
// Create the ipsets/active policy calculation graph, which will
// do the dynamic calculation of ipset memberships and active policies
// etc.
asyncCalcGraph := calc.NewAsyncCalcGraph(
configParams,
calcGraphClientChannels,
healthAggregator,
)
if configParams.UsageReportingEnabled {
// Usage reporting enabled, add stats collector to graph. When it detects an update
// to the stats, it makes a callback, which we use to send an update on a channel.
// We use a buffered channel here to avoid blocking the calculation graph.
statsChanIn := make(chan calc.StatsUpdate, 1)
statsCollector := calc.NewStatsCollector(func(stats calc.StatsUpdate) error {
statsChanIn <- stats
return nil
})
statsCollector.RegisterWith(asyncCalcGraph.CalcGraph)
// Rather than sending the updates directly to the usage reporting thread, we
// decouple with an extra goroutine. This prevents blocking the calculation graph
// goroutine if the usage reporting goroutine is blocked on IO, for example.
// Using a buffered channel wouldn't work here because the usage reporting
// goroutine can block for a long time on IO so we could build up a long queue.
statsChanOut := make(chan calc.StatsUpdate)
go func() {
var statsChanOutOrNil chan calc.StatsUpdate
var stats calc.StatsUpdate
for {
select {
case stats = <-statsChanIn:
// Got a stats update, activate the output channel.
log.WithField("stats", stats).Debug("Buffer: stats update received")
statsChanOutOrNil = statsChanOut
case statsChanOutOrNil <- stats:
// Passed on the update, deactivate the output channel until
// the next update.
log.WithField("stats", stats).Debug("Buffer: stats update sent")
statsChanOutOrNil = nil
}
}
}()
usageRep := usagerep.New(
configParams.UsageReportingInitialDelaySecs,
configParams.UsageReportingIntervalSecs,
statsChanOut,
connToUsageRepUpdChan,
)
go usageRep.PeriodicallyReportUsage(context.Background())
} else {
// Usage reporting disabled, but we still want a stats collector for the
// felix_cluster_* metrics. Register a no-op function as the callback.
statsCollector := calc.NewStatsCollector(func(stats calc.StatsUpdate) error {
return nil
})
statsCollector.RegisterWith(asyncCalcGraph.CalcGraph)
}
// Create the validator, which sits between the syncer and the
// calculation graph.
validator := calc.NewValidationFilter(asyncCalcGraph)
// Start the background processing threads.
if syncer != nil {
log.Infof("Starting the datastore Syncer")
syncer.Start()
} else {
log.Infof("Starting the Typha connection")
err := typhaConnection.Start(context.Background())
if err != nil {
log.WithError(err).Fatal("Failed to connect to Typha")
}
go func() {
typhaConnection.Finished.Wait()
failureReportChan <- "Connection to Typha failed"
}()
}
go syncerToValidator.SendTo(validator)
asyncCalcGraph.Start()
log.Infof("Started the processing graph")
var stopSignalChans []chan<- bool
if configParams.EndpointReportingEnabled {
delay := configParams.EndpointReportingDelaySecs
log.WithField("delay", delay).Info(
"Endpoint status reporting enabled, starting status reporter")
dpConnector.statusReporter = statusrep.NewEndpointStatusReporter(
configParams.FelixHostname,
configParams.OpenstackRegion,
dpConnector.StatusUpdatesFromDataplane,
dpConnector.InSync,
dpConnector.datastore,
delay,
delay*180,
)
dpConnector.statusReporter.Start()
}
// Start communicating with the dataplane driver.
dpConnector.Start()
if policySyncProcessor != nil {
log.WithField("policySyncPathPrefix", configParams.PolicySyncPathPrefix).Info(
"Policy sync API enabled. Starting the policy sync server.")
policySyncProcessor.Start()
sc := make(chan bool)
stopSignalChans = append(stopSignalChans, sc)
go policySyncAPIBinder.SearchAndBind(sc)
}
// Send the opening message to the dataplane driver, giving it its
// config.
dpConnector.ToDataplane <- &proto.ConfigUpdate{
Config: configParams.RawValues(),
}
if configParams.PrometheusMetricsEnabled {
log.Info("Prometheus metrics enabled. Starting server.")
gaugeHost := prometheus.NewGauge(prometheus.GaugeOpts{
Name: "felix_host",
Help: "Configured Felix hostname (as a label), typically used in grouping/aggregating stats; the label defaults to the hostname of the host but can be overridden by configuration. The value of the gauge is always set to 1.",
ConstLabels: prometheus.Labels{"host": configParams.FelixHostname},
})
gaugeHost.Set(1)
prometheus.MustRegister(gaugeHost)
go servePrometheusMetrics(configParams)
}
// Register signal handlers to dump memory/CPU profiles.
logutils.RegisterProfilingSignalHandlers(configParams)
// Now monitor the worker process and our worker threads and shut
// down the process gracefully if they fail.
monitorAndManageShutdown(failureReportChan, dpDriverCmd, stopSignalChans)
}
func servePrometheusMetrics(configParams *config.Config) {
for {
log.WithField("port", configParams.PrometheusMetricsPort).Info("Starting prometheus metrics endpoint")
if configParams.PrometheusGoMetricsEnabled && configParams.PrometheusProcessMetricsEnabled {
log.Info("Including Golang & Process metrics")
} else {
if !configParams.PrometheusGoMetricsEnabled {
log.Info("Discarding Golang metrics")
prometheus.Unregister(prometheus.NewGoCollector())
}
if !configParams.PrometheusProcessMetricsEnabled {
log.Info("Discarding process metrics")
prometheus.Unregister(prometheus.NewProcessCollector(prometheus.ProcessCollectorOpts{}))
}
}
http.Handle("/metrics", promhttp.Handler())
err := http.ListenAndServe(fmt.Sprintf(":%v", configParams.PrometheusMetricsPort), nil)
log.WithError(err).Error(
"Prometheus metrics endpoint failed, trying to restart it...")
time.Sleep(1 * time.Second)
}
}
func monitorAndManageShutdown(failureReportChan <-chan string, driverCmd *exec.Cmd, stopSignalChans []chan<- bool) {
// Ask the runtime to tell us if we get a term/int signal.
signalChan := make(chan os.Signal, 1)
signal.Notify(signalChan, syscall.SIGTERM)
signal.Notify(signalChan, syscall.SIGINT)
signal.Notify(signalChan, syscall.SIGHUP)
// Start a background thread to tell us when the dataplane driver stops.
// If the driver stops unexpectedly, we'll terminate this process.
// If this process needs to stop, we'll kill the driver and then wait
// for the message from the background thread.
driverStoppedC := make(chan bool)
go func() {
if driverCmd == nil {
log.Info("No driver process to monitor")
return
}
err := driverCmd.Wait()
log.WithError(err).Warn("Driver process stopped")
driverStoppedC <- true
}()
// Wait for one of the channels to give us a reason to shut down.
driverAlreadyStopped := driverCmd == nil
receivedFatalSignal := false
var reason string
select {
case <-driverStoppedC:
reason = "Driver stopped"
driverAlreadyStopped = true
case sig := <-signalChan:
if sig == syscall.SIGHUP {
log.Warning("Received a SIGHUP, treating as a request to reload config")
reason = reasonConfigChanged
} else {
reason = fmt.Sprintf("Received OS signal %v", sig)
receivedFatalSignal = true
}
case reason = <-failureReportChan:
}
logCxt := log.WithField("reason", reason)
logCxt.Warn("Felix is shutting down")
// Notify other components to stop.
for _, c := range stopSignalChans {
select {
case c <- true:
default:
}
}
if !driverAlreadyStopped {
// Driver may still be running, just in case the driver is
// unresponsive, start a thread to kill this process if we
// don't manage to kill the driver.
logCxt.Info("Driver still running, trying to shut it down...")
giveUpOnSigTerm := make(chan bool)
go func() {
time.Sleep(4 * time.Second)
giveUpOnSigTerm <- true
time.Sleep(1 * time.Second)
log.Fatal("Failed to wait for driver to exit, giving up.")
}()
// Signal to the driver to exit.
driverCmd.Process.Signal(syscall.SIGTERM)
select {
case <-driverStoppedC:
logCxt.Info("Driver shut down after SIGTERM")
case <-giveUpOnSigTerm:
logCxt.Error("Driver did not respond to SIGTERM, sending SIGKILL")
driverCmd.Process.Kill()
<-driverStoppedC
logCxt.Info("Driver shut down after SIGKILL")
}
}
if !receivedFatalSignal {
// We're exiting due to a failure or a config change, wait
// a couple of seconds to ensure that we don't go into a tight
// restart loop (which would make the init daemon in calico/node give
// up trying to restart us).
logCxt.Info("Sleeping to avoid tight restart loop.")
go func() {
time.Sleep(2 * time.Second)
if reason == reasonConfigChanged {
exitWithCustomRC(configChangedRC, "Exiting for config change")
return
}
logCxt.Fatal("Exiting.")
}()
for {
sig := <-signalChan
if sig == syscall.SIGHUP {
logCxt.Warning("Ignoring SIGHUP because we're already shutting down")
continue
}
logCxt.WithField("signal", sig).Fatal(
"Signal received while shutting down, exiting immediately")
}
}
logCxt.Fatal("Exiting immediately")
}
func exitWithCustomRC(rc int, message string) {
// Since log writing is done a background thread, we set the force-flush flag on this log to ensure that
// all the in-flight logs get written before we exit.
log.WithFields(log.Fields{
"rc": rc,
lclogutils.FieldForceFlush: true,
}).Info(message)
os.Exit(rc)
}
var (
ErrNotReady = errors.New("datastore is not ready or has not been initialised")
)
func loadConfigFromDatastore(
ctx context.Context, client bapi.Client, hostname string,
) (globalConfig, hostConfig map[string]string, err error) {
// The configuration is split over 3 different resource types and 4 different resource
// instances in the v3 data model:
// - ClusterInformation (global): name "default"
// - FelixConfiguration (global): name "default"
// - FelixConfiguration (per-host): name "node.<hostname>"
// - Node (per-host): name: <hostname>
// Get the global values and host specific values separately. We re-use the updateprocessor
// logic to convert the single v3 resource to a set of v1 key/values.
hostConfig = make(map[string]string)
globalConfig = make(map[string]string)
var ready bool
err = getAndMergeConfig(
ctx, client, globalConfig,
apiv3.KindClusterInformation, "default",
updateprocessors.NewClusterInfoUpdateProcessor(),
&ready,
)
if err != nil {
return
}
if !ready {
// The ClusterInformation struct should contain the ready flag, if it is not set, abort.
err = ErrNotReady
return
}
err = getAndMergeConfig(
ctx, client, globalConfig,
apiv3.KindFelixConfiguration, "default",
updateprocessors.NewFelixConfigUpdateProcessor(),
&ready,
)
if err != nil {
return
}
err = getAndMergeConfig(
ctx, client, hostConfig,
apiv3.KindFelixConfiguration, "node."+hostname,
updateprocessors.NewFelixConfigUpdateProcessor(),
&ready,
)
if err != nil {
return
}
err = getAndMergeConfig(
ctx, client, hostConfig,
apiv3.KindNode, hostname,
updateprocessors.NewFelixNodeUpdateProcessor(),
&ready,
)
if err != nil {
return
}
return
}
// getAndMergeConfig gets the v3 resource configuration extracts the separate config values
// (where each configuration value is stored in a field of the v3 resource Spec) and merges into
// the supplied map, as required by our v1-style configuration loader.
func getAndMergeConfig(
ctx context.Context, client bapi.Client, config map[string]string,
kind string, name string,
configConverter watchersyncer.SyncerUpdateProcessor,
ready *bool,
) error {
logCxt := log.WithFields(log.Fields{"kind": kind, "name": name})
cfg, err := client.Get(ctx, model.ResourceKey{
Kind: kind,
Name: name,
Namespace: "",
}, "")
if err != nil {
switch err.(type) {
case cerrors.ErrorResourceDoesNotExist:
logCxt.Info("No config of this type")
return nil
default:
logCxt.WithError(err).Info("Failed to load config from datastore")
return err
}
}
// Re-use the update processor logic implemented for the Syncer. We give it a v3 config
// object in a KVPair and it uses the annotations defined on it to split it into v1-style
// KV pairs. Log any errors - but don't fail completely to avoid cyclic restarts.
v1kvs, err := configConverter.Process(cfg)
if err != nil {
logCxt.WithError(err).Error("Failed to convert configuration")
}
// Loop through the converted values and update our config map with values from either the
// Global or Host configs.
for _, v1KV := range v1kvs {
if _, ok := v1KV.Key.(model.ReadyFlagKey); ok {
logCxt.WithField("ready", v1KV.Value).Info("Loaded ready flag")
if v1KV.Value == true {
*ready = true
}
} else if v1KV.Value != nil {
switch k := v1KV.Key.(type) {
case model.GlobalConfigKey:
config[k.Name] = v1KV.Value.(string)
case model.HostConfigKey:
config[k.Name] = v1KV.Value.(string)
default:
logCxt.WithField("KV", v1KV).Debug("Skipping config - not required for initial loading")
}
}
}
return nil
}
type DataplaneConnector struct {
config *config.Config
configUpdChan chan<- map[string]string
ToDataplane chan interface{}
StatusUpdatesFromDataplane chan interface{}
InSync chan bool
failureReportChan chan<- string
dataplane dp.DataplaneDriver
datastore bapi.Client
statusReporter *statusrep.EndpointStatusReporter
datastoreInSync bool
firstStatusReportSent bool
}
type Startable interface {
Start()
}
func newConnector(configParams *config.Config,
configUpdChan chan<- map[string]string,
datastore bapi.Client,
dataplane dp.DataplaneDriver,
failureReportChan chan<- string,
) *DataplaneConnector {
felixConn := &DataplaneConnector{
config: configParams,
configUpdChan: configUpdChan,
datastore: datastore,
ToDataplane: make(chan interface{}),
StatusUpdatesFromDataplane: make(chan interface{}),
InSync: make(chan bool, 1),
failureReportChan: failureReportChan,
dataplane: dataplane,
}
return felixConn
}
func (fc *DataplaneConnector) readMessagesFromDataplane() {
defer func() {
fc.shutDownProcess("Failed to read messages from dataplane")
}()
log.Info("Reading from dataplane driver pipe...")
ctx := context.Background()
for {
payload, err := fc.dataplane.RecvMessage()
if err != nil {
log.WithError(err).Error("Failed to read from front-end socket")
fc.shutDownProcess("Failed to read from front-end socket")
}
log.WithField("payload", payload).Debug("New message from dataplane")
switch msg := payload.(type) {
case *proto.ProcessStatusUpdate:
fc.handleProcessStatusUpdate(ctx, msg)
case *proto.WorkloadEndpointStatusUpdate:
if fc.statusReporter != nil {
fc.StatusUpdatesFromDataplane <- msg
}
case *proto.WorkloadEndpointStatusRemove:
if fc.statusReporter != nil {
fc.StatusUpdatesFromDataplane <- msg
}
case *proto.HostEndpointStatusUpdate:
if fc.statusReporter != nil {
fc.StatusUpdatesFromDataplane <- msg
}
case *proto.HostEndpointStatusRemove:
if fc.statusReporter != nil {
fc.StatusUpdatesFromDataplane <- msg
}
default:
log.WithField("msg", msg).Warning("Unknown message from dataplane")
}
log.Debug("Finished handling message from front-end")
}
}
func (fc *DataplaneConnector) handleProcessStatusUpdate(ctx context.Context, msg *proto.ProcessStatusUpdate) {
log.Debugf("Status update from dataplane driver: %v", *msg)
statusReport := model.StatusReport{
Timestamp: msg.IsoTimestamp,
UptimeSeconds: msg.Uptime,
FirstUpdate: !fc.firstStatusReportSent,
}
kv := model.KVPair{
Key: model.ActiveStatusReportKey{Hostname: fc.config.FelixHostname, RegionString: model.RegionString(fc.config.OpenstackRegion)},
Value: &statusReport,
TTL: fc.config.ReportingTTLSecs,
}
applyCtx, cancel := context.WithTimeout(ctx, 2*time.Second)
_, err := fc.datastore.Apply(applyCtx, &kv)
cancel()
if err != nil {
if _, ok := err.(cerrors.ErrorOperationNotSupported); ok {
log.Debug("Datastore doesn't support status reports.")
return // and it won't support the last status key either.
} else {
log.Warningf("Failed to write status to datastore: %v", err)
}
} else {
fc.firstStatusReportSent = true
}
kv = model.KVPair{
Key: model.LastStatusReportKey{Hostname: fc.config.FelixHostname, RegionString: model.RegionString(fc.config.OpenstackRegion)},
Value: &statusReport,
}
applyCtx, cancel = context.WithTimeout(ctx, 2*time.Second)
_, err = fc.datastore.Apply(applyCtx, &kv)
cancel()
if err != nil {
log.Warningf("Failed to write status to datastore: %v", err)
}
}
var handledConfigChanges = set.From("CalicoVersion", "ClusterGUID", "ClusterType")
func (fc *DataplaneConnector) sendMessagesToDataplaneDriver() {
defer func() {
fc.shutDownProcess("Failed to send messages to dataplane")
}()
var config map[string]string
for {
msg := <-fc.ToDataplane
switch msg := msg.(type) {
case *proto.InSync:
log.Info("Datastore now in sync.")
if !fc.datastoreInSync {
log.Info("Datastore in sync for first time, sending message to status reporter.")
fc.datastoreInSync = true
fc.InSync <- true
}
case *proto.ConfigUpdate:
if config != nil {
log.WithFields(log.Fields{
"old": config,
"new": msg.Config,
}).Info("Config updated, checking whether we need to restart")
restartNeeded := false
for kNew, vNew := range msg.Config {
logCxt := log.WithFields(log.Fields{"key": kNew, "new": vNew})
if vOld, prs := config[kNew]; !prs {
logCxt = logCxt.WithField("updateType", "add")
} else if vNew != vOld {
logCxt = logCxt.WithFields(log.Fields{"old": vOld, "updateType": "update"})
} else {
continue
}
if handledConfigChanges.Contains(kNew) {
logCxt.Info("Config change can be handled without restart")
continue
}
logCxt.Warning("Config change requires restart")
restartNeeded = true
}
for kOld, vOld := range config {
logCxt := log.WithFields(log.Fields{"key": kOld, "old": vOld, "updateType": "delete"})
if _, prs := msg.Config[kOld]; prs {
// Key was present in the message so we've handled above.
continue
}
if handledConfigChanges.Contains(kOld) {
logCxt.Info("Config change can be handled without restart")
continue
}
logCxt.Warning("Config change requires restart")
restartNeeded = true
}
if restartNeeded {
fc.shutDownProcess("config changed")
}
}
// Take a copy of the config to compare against next time.
config = make(map[string]string)
for k, v := range msg.Config {
config[k] = v
}
if fc.configUpdChan != nil {
// Send the config over to the usage reporter.
fc.configUpdChan <- config
}
case *calc.DatastoreNotReady:
log.Warn("Datastore became unready, need to restart.")
fc.shutDownProcess("datastore became unready")
}
if err := fc.dataplane.SendMessage(msg); err != nil {
fc.shutDownProcess("Failed to write to dataplane driver")
}
}
}
func (fc *DataplaneConnector) shutDownProcess(reason string) {
// Send a failure report to the managed shutdown thread then give it
// a few seconds to do the shutdown.
fc.failureReportChan <- reason
time.Sleep(5 * time.Second)
// The graceful shutdown failed, terminate the process.
log.Panic("Managed shutdown failed. Panicking.")
}
func (fc *DataplaneConnector) Start() {
// Start a background thread to write to the dataplane driver.
go fc.sendMessagesToDataplaneDriver()
// Start background thread to read messages from dataplane driver.
go fc.readMessagesFromDataplane()
}
var ErrServiceNotReady = errors.New("Kubernetes service missing IP or port.")
func discoverTyphaAddr(configParams *config.Config) (string, error) {
if configParams.TyphaAddr != "" {
// Explicit address; trumps other sources of config.
return configParams.TyphaAddr, nil
}
if configParams.TyphaK8sServiceName == "" {
// No explicit address, and no service name, not using Typha.
return "", nil
}
// If we get here, we need to look up the Typha service using the k8s API.
// TODO Typha: support Typha lookup without using rest.InClusterConfig().
k8sconf, err := rest.InClusterConfig()
if err != nil {
log.WithError(err).Error("Unable to create Kubernetes config.")
return "", err
}
clientset, err := kubernetes.NewForConfig(k8sconf)
if err != nil {
log.WithError(err).Error("Unable to create Kubernetes client set.")
return "", err
}
svcClient := clientset.CoreV1().Services(configParams.TyphaK8sNamespace)
svc, err := svcClient.Get(configParams.TyphaK8sServiceName, v1.GetOptions{})
if err != nil {
log.WithError(err).Error("Unable to get Typha service from Kubernetes.")
return "", err
}
host := svc.Spec.ClusterIP
log.WithField("clusterIP", host).Info("Found Typha ClusterIP.")
if host == "" {
log.WithError(err).Error("Typha service had no ClusterIP.")
return "", ErrServiceNotReady
}
for _, p := range svc.Spec.Ports {
if p.Name == "calico-typha" {
log.WithField("port", p).Info("Found Typha service port.")
typhaAddr := fmt.Sprintf("%s:%v", host, p.Port)
return typhaAddr, nil
}
}
log.Error("Didn't find Typha service port.")
return "", ErrServiceNotReady
}
| 1 | 16,830 | Please log once here at Error level "Failed to connect to Typha, will retry..." | projectcalico-felix | go |
@@ -1,5 +1,5 @@
# frozen_string_literal: true
module Faker #:nodoc:
- VERSION = '2.18.0'
+ VERSION = '2.18.1'
end | 1 | # frozen_string_literal: true
module Faker #:nodoc:
VERSION = '2.18.0'
end
| 1 | 10,414 | Can you revert this change? The `VERSION` value will be updated after maintainer determines the version at release. | faker-ruby-faker | rb |
@@ -25,13 +25,13 @@ public interface DefinitionConst {
String nameKey = "name";
- String qulifiedServiceNameKey = serviceDescriptionKey + "." + nameKey;
+ String qualifiedServiceNameKey = serviceDescriptionKey + "." + nameKey;
- String qulifiedServiceVersionKey = serviceDescriptionKey + ".version";
+ String qualifiedServiceVersionKey = serviceDescriptionKey + ".version";
- String qulifiedServiceDescKey = serviceDescriptionKey + ".description";
+ String qualifiedServiceDescKey = serviceDescriptionKey + ".description";
- String qulifiedServiceRoleKey = serviceDescriptionKey + ".role";
+ String qualifiedServiceRoleKey = serviceDescriptionKey + ".role";
String allowCrossAppKey = "allowCrossApp";
| 1 | /*
* Copyright 2017 Huawei Technologies Co., Ltd
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package io.servicecomb.serviceregistry.definition;
import static io.servicecomb.foundation.common.base.ServiceCombConstants.CONFIG_SERVICE;
public interface DefinitionConst {
String appIdKey = "APPLICATION_ID";
String serviceDescriptionKey = CONFIG_SERVICE;
String nameKey = "name";
String qulifiedServiceNameKey = serviceDescriptionKey + "." + nameKey;
String qulifiedServiceVersionKey = serviceDescriptionKey + ".version";
String qulifiedServiceDescKey = serviceDescriptionKey + ".description";
String qulifiedServiceRoleKey = serviceDescriptionKey + ".role";
String allowCrossAppKey = "allowCrossApp";
String defaultAppId = "default";
String defaultMicroserviceName = "defaultMicroservice";
String defaultVersion = "1.0.0";
String defaultStage = "prod";
String serviceEnvironmentKey = "instance_description.environment";
String defaultEnvironment = "production";
}
| 1 | 6,881 | It could be better if we use the up case letter to specify the constant string name. Such as QUALIFIED_SERVICE_NAME_KEY can let the user know it's a constant string. | apache-servicecomb-java-chassis | java |
@@ -108,8 +108,12 @@ module RSpec
# or the configured failure exit code (1 by default) if specs
# failed.
def run_specs(example_groups)
- @configuration.reporter.report(@world.example_count(example_groups)) do |reporter|
+ examples_count = @world.example_count(example_groups)
+ @configuration.reporter.report(examples_count) do |reporter|
@configuration.with_suite_hooks do
+ if examples_count == 0 && @configuration.fail_if_no_examples
+ return @configuration.failure_exit_code
+ end
example_groups.map { |g| g.run(reporter) }.all? ? 0 : @configuration.failure_exit_code
end
end | 1 | module RSpec
module Core
# Provides the main entry point to run a suite of RSpec examples.
class Runner
# @attr_reader
# @private
attr_reader :options, :configuration, :world
# Register an `at_exit` hook that runs the suite when the process exits.
#
# @note This is not generally needed. The `rspec` command takes care
# of running examples for you without involving an `at_exit`
# hook. This is only needed if you are running specs using
# the `ruby` command, and even then, the normal way to invoke
# this is by requiring `rspec/autorun`.
def self.autorun
if autorun_disabled?
RSpec.deprecate("Requiring `rspec/autorun` when running RSpec via the `rspec` command")
return
elsif installed_at_exit? || running_in_drb?
return
end
at_exit { perform_at_exit }
@installed_at_exit = true
end
# @private
def self.perform_at_exit
# Don't bother running any specs and just let the program terminate
# if we got here due to an unrescued exception (anything other than
# SystemExit, which is raised when somebody calls Kernel#exit).
return unless $!.nil? || $!.is_a?(SystemExit)
# We got here because either the end of the program was reached or
# somebody called Kernel#exit. Run the specs and then override any
# existing exit status with RSpec's exit status if any specs failed.
invoke
end
# Runs the suite of specs and exits the process with an appropriate exit
# code.
def self.invoke
disable_autorun!
status = run(ARGV, $stderr, $stdout).to_i
exit(status) if status != 0
end
# Run a suite of RSpec examples. Does not exit.
#
# This is used internally by RSpec to run a suite, but is available
# for use by any other automation tool.
#
# If you want to run this multiple times in the same process, and you
# want files like `spec_helper.rb` to be reloaded, be sure to load `load`
# instead of `require`.
#
# @param args [Array] command-line-supported arguments
# @param err [IO] error stream
# @param out [IO] output stream
# @return [Fixnum] exit status code. 0 if all specs passed,
# or the configured failure exit code (1 by default) if specs
# failed.
def self.run(args, err=$stderr, out=$stdout)
trap_interrupt
options = ConfigurationOptions.new(args)
if options.options[:runner]
options.options[:runner].call(options, err, out)
else
new(options).run(err, out)
end
end
def initialize(options, configuration=RSpec.configuration, world=RSpec.world)
@options = options
@configuration = configuration
@world = world
end
# Configures and runs a spec suite.
#
# @param err [IO] error stream
# @param out [IO] output stream
def run(err, out)
setup(err, out)
run_specs(@world.ordered_example_groups).tap do
persist_example_statuses
end
end
# Wires together the various configuration objects and state holders.
#
# @param err [IO] error stream
# @param out [IO] output stream
def setup(err, out)
@configuration.error_stream = err
@configuration.output_stream = out if @configuration.output_stream == $stdout
@options.configure(@configuration)
@configuration.load_spec_files
@world.announce_filters
end
# Runs the provided example groups.
#
# @param example_groups [Array<RSpec::Core::ExampleGroup>] groups to run
# @return [Fixnum] exit status code. 0 if all specs passed,
# or the configured failure exit code (1 by default) if specs
# failed.
def run_specs(example_groups)
@configuration.reporter.report(@world.example_count(example_groups)) do |reporter|
@configuration.with_suite_hooks do
example_groups.map { |g| g.run(reporter) }.all? ? 0 : @configuration.failure_exit_code
end
end
end
private
def persist_example_statuses
return unless (path = @configuration.example_status_persistence_file_path)
ExampleStatusPersister.persist(@world.all_examples, path)
rescue SystemCallError => e
RSpec.warning "Could not write example statuses to #{path} (configured as " \
"`config.example_status_persistence_file_path`) due to a " \
"system error: #{e.inspect}. Please check that the config " \
"option is set to an accessible, valid file path", :call_site => nil
end
# @private
def self.disable_autorun!
@autorun_disabled = true
end
# @private
def self.autorun_disabled?
@autorun_disabled ||= false
end
# @private
def self.installed_at_exit?
@installed_at_exit ||= false
end
# @private
def self.running_in_drb?
return false unless defined?(DRb)
server = begin
DRb.current_server
rescue DRb::DRbServerNotFound
return false
end
return false unless server && server.alive?
require 'socket'
require 'uri'
local_ipv4 = begin
IPSocket.getaddress(Socket.gethostname)
rescue SocketError
return false
end
["127.0.0.1", "localhost", local_ipv4].any? { |addr| addr == URI(DRb.current_server.uri).host }
end
# @private
def self.trap_interrupt
trap('INT') { handle_interrupt }
end
# @private
def self.handle_interrupt
if RSpec.world.wants_to_quit
exit!(1)
else
RSpec.world.wants_to_quit = true
$stderr.puts "\nRSpec is shutting down and will print the summary report... Interrupt again to force quit."
end
end
end
end
end
| 1 | 16,298 | This should be `@configuration.failure_exit_code`, we don't want to hard code 1. | rspec-rspec-core | rb |
@@ -359,11 +359,10 @@ Blockly.FieldVariable.prototype.onItemSelected = function(menu, menuItem) {
};
/**
- * Whether this field references any Blockly variables. If true it may need to
- * be handled differently during serialization and deserialization. Subclasses
- * may override this.
- * @return {boolean} True if this field has any variable references.
+ * Overrides referencesVariables(), indicating this field refers to a variable.
+ * @return {boolean} True.
* @package
+ * @override
*/
Blockly.FieldVariable.prototype.referencesVariables = function() {
return true; | 1 | /**
* @license
* Visual Blocks Editor
*
* Copyright 2012 Google Inc.
* https://developers.google.com/blockly/
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
/**
* @fileoverview Variable input field.
* @author [email protected] (Neil Fraser)
*/
'use strict';
goog.provide('Blockly.FieldVariable');
goog.require('Blockly.FieldDropdown');
goog.require('Blockly.Msg');
goog.require('Blockly.VariableModel');
goog.require('Blockly.Variables');
goog.require('goog.asserts');
goog.require('goog.string');
/**
* Class for a variable's dropdown field.
* @param {?string} varname The default name for the variable. If null,
* a unique variable name will be generated.
* @param {Function=} opt_validator A function that is executed when a new
* option is selected. Its sole argument is the new option value.
* @param {Array.<string>} opt_variableTypes A list of the types of variables to
* include in the dropdown.
* @extends {Blockly.FieldDropdown}
* @constructor
*/
Blockly.FieldVariable = function(varname, opt_validator, opt_variableTypes) {
// The FieldDropdown constructor would call setValue, which might create a
// spurious variable. Just do the relevant parts of the constructor.
this.menuGenerator_ = Blockly.FieldVariable.dropdownCreate;
this.size_ = new goog.math.Size(Blockly.BlockSvg.FIELD_WIDTH,
Blockly.BlockSvg.FIELD_HEIGHT);
this.setValidator(opt_validator);
// TODO (blockly #1499): Add opt_default_type to match default value.
// If not set, ''.
this.defaultVariableName = (varname || '');
var hasSingleVarType = opt_variableTypes && (opt_variableTypes.length == 1);
this.defaultType_ = hasSingleVarType ? opt_variableTypes[0] : '';
this.variableTypes = opt_variableTypes;
this.value_ = null;
};
goog.inherits(Blockly.FieldVariable, Blockly.FieldDropdown);
/**
* Construct a FieldVariable from a JSON arg object,
* dereferencing any string table references.
* @param {!Object} options A JSON object with options (variable,
* variableTypes, and defaultType).
* @returns {!Blockly.FieldVariable} The new field instance.
* @package
* @nocollapse
*/
Blockly.FieldVariable.fromJson = function(options) {
var varname = Blockly.utils.replaceMessageReferences(options['variable']);
var variableTypes = options['variableTypes'];
return new Blockly.FieldVariable(varname, null, variableTypes);
};
/**
* Initialize everything needed to render this field. This includes making sure
* that the field's value is valid.
* @public
*/
Blockly.FieldVariable.prototype.init = function() {
if (this.fieldGroup_) {
// Dropdown has already been initialized once.
return;
}
Blockly.FieldVariable.superClass_.init.call(this);
// TODO (blockly #1010): Change from init/initModel to initView/initModel
this.initModel();
};
/**
* Initialize the model for this field if it has not already been initialized.
* If the value has not been set to a variable by the first render, we make up a
* variable rather than let the value be invalid.
* @package
*/
Blockly.FieldVariable.prototype.initModel = function() {
if (this.variable_) {
return; // Initialization already happened.
}
this.workspace_ = this.sourceBlock_.workspace;
// Initialize this field if it's in a broadcast block in the flyout
var variable = this.initFlyoutBroadcast_(this.workspace_);
if (!variable) {
var variable = Blockly.Variables.getOrCreateVariablePackage(
this.workspace_, null, this.defaultVariableName, this.defaultType_);
}
// Don't fire a change event for this setValue. It would have null as the
// old value, which is not valid.
Blockly.Events.disable();
try {
this.setValue(variable.getId());
} finally {
Blockly.Events.enable();
}
};
/**
* Initialize broadcast blocks in the flyout.
* Implicit deletion of broadcast messages from the scratch vm may cause
* broadcast blocks in the flyout to change which variable they display as the
* selected option when the workspace is refreshed.
* Re-sort the broadcast messages by name, and set the field value to the id
* of the variable that comes first in sorted order.
* @param {!Blockly.Workspace} workspace The flyout workspace containing the
* broadcast block.
* @return {string} The variable of type 'broadcast_msg' that comes
* first in sorted order.
*/
Blockly.FieldVariable.prototype.initFlyoutBroadcast_ = function(workspace) {
// Using shorter name for this constant
var broadcastMsgType = Blockly.BROADCAST_MESSAGE_VARIABLE_TYPE;
var broadcastVars = workspace.getVariablesOfType(broadcastMsgType);
if(workspace.isFlyout && this.defaultType_ == broadcastMsgType &&
broadcastVars.length != 0) {
broadcastVars.sort(Blockly.VariableModel.compareByName);
return broadcastVars[0];
}
};
/**
* Dispose of this field.
* @public
*/
Blockly.FieldVariable.dispose = function() {
Blockly.FieldVariable.superClass_.dispose.call(this);
this.workspace_ = null;
this.variableMap_ = null;
};
/**
* Attach this field to a block.
* @param {!Blockly.Block} block The block containing this field.
*/
Blockly.FieldVariable.prototype.setSourceBlock = function(block) {
goog.asserts.assert(!block.isShadow(),
'Variable fields are not allowed to exist on shadow blocks.');
Blockly.FieldVariable.superClass_.setSourceBlock.call(this, block);
};
/**
* Get the variable's ID.
* @return {string} Current variable's ID.
*/
Blockly.FieldVariable.prototype.getValue = function() {
return this.variable_ ? this.variable_.getId() : null;
};
/**
* Get the text from this field, which is the selected variable's name.
* @return {string} The selected variable's name, or the empty string if no
* variable is selected.
*/
Blockly.FieldVariable.prototype.getText = function() {
return this.variable_ ? this.variable_.name : '';
};
/**
* Get the variable model for the selected variable.
* Not guaranteed to be in the variable map on the workspace (e.g. if accessed
* after the variable has been deleted).
* @return {?Blockly.VariableModel} the selected variable, or null if none was
* selected.
* @package
*/
Blockly.FieldVariable.prototype.getVariable = function() {
return this.variable_;
};
/**
* Set the variable ID.
* @param {string} id New variable ID, which must reference an existing
* variable.
*/
Blockly.FieldVariable.prototype.setValue = function(id) {
var workspace = this.sourceBlock_.workspace;
var variable = Blockly.Variables.getVariable(workspace, id);
if (!variable) {
throw new Error('Variable id doesn\'t point to a real variable! ID was ' +
id);
}
// Type checks!
var type = variable.type;
if (!this.typeIsAllowed_(type)) {
throw new Error('Variable type doesn\'t match this field! Type was ' +
type);
}
if (this.sourceBlock_ && Blockly.Events.isEnabled()) {
var oldValue = this.variable_ ? this.variable_.getId() : null;
Blockly.Events.fire(new Blockly.Events.BlockChange(
this.sourceBlock_, 'field', this.name, oldValue, id));
}
this.variable_ = variable;
this.value_ = id;
this.setText(variable.name);
};
/**
* Check whether the given variable type is allowed on this field.
* @param {string} type The type to check.
* @return {boolean} True if the type is in the list of allowed types.
* @private
*/
Blockly.FieldVariable.prototype.typeIsAllowed_ = function(type) {
var typeList = this.getVariableTypes_();
if (!typeList) {
return true; // If it's null, all types are valid.
}
for (var i = 0; i < typeList.length; i++) {
if (type == typeList[i]) {
return true;
}
}
return false;
};
/**
* Return a list of variable types to include in the dropdown.
* @return {!Array.<string>} Array of variable types.
* @throws {Error} if variableTypes is an empty array.
* @private
*/
Blockly.FieldVariable.prototype.getVariableTypes_ = function() {
// TODO (#1513): Try to avoid calling this every time the field is edited.
var variableTypes = this.variableTypes;
if (variableTypes === null) {
// If variableTypes is null, return all variable types.
if (this.sourceBlock_) {
var workspace = this.sourceBlock_.workspace;
return workspace.getVariableTypes();
}
}
variableTypes = variableTypes || [''];
if (variableTypes.length == 0) {
// Throw an error if variableTypes is an empty list.
var name = this.getText();
throw new Error('\'variableTypes\' of field variable ' +
name + ' was an empty list');
}
return variableTypes;
};
/**
* Return a sorted list of variable names for variable dropdown menus.
* Include a special option at the end for creating a new variable name.
* @return {!Array.<string>} Array of variable names.
* @this {Blockly.FieldVariable}
*/
Blockly.FieldVariable.dropdownCreate = function() {
if (!this.variable_) {
throw new Error('Tried to call dropdownCreate on a variable field with no' +
' variable selected.');
}
var variableModelList = [];
var name = this.getText();
var workspace = null;
if (this.sourceBlock_) {
workspace = this.sourceBlock_.workspace;
}
if (workspace) {
var variableTypes = this.getVariableTypes_();
var variableModelList = [];
// Get a copy of the list, so that adding rename and new variable options
// doesn't modify the workspace's list.
for (var i = 0; i < variableTypes.length; i++) {
var variableType = variableTypes[i];
var variables = workspace.getVariablesOfType(variableType);
variableModelList = variableModelList.concat(variables);
var potentialVarMap = workspace.getPotentialVariableMap();
if (potentialVarMap) {
var potentialVars = potentialVarMap.getVariablesOfType(variableType);
variableModelList = variableModelList.concat(potentialVars);
}
}
}
variableModelList.sort(Blockly.VariableModel.compareByName);
var options = [];
for (var i = 0; i < variableModelList.length; i++) {
// Set the uuid as the internal representation of the variable.
options[i] = [variableModelList[i].name, variableModelList[i].getId()];
}
if (this.defaultType_ == Blockly.BROADCAST_MESSAGE_VARIABLE_TYPE) {
options.push([Blockly.Msg.NEW_BROADCAST_MESSAGE, Blockly.NEW_BROADCAST_MESSAGE_ID]);
} else {
options.push([Blockly.Msg.RENAME_VARIABLE, Blockly.RENAME_VARIABLE_ID]);
if (Blockly.Msg.DELETE_VARIABLE) {
options.push([Blockly.Msg.DELETE_VARIABLE.replace('%1', name),
Blockly.DELETE_VARIABLE_ID]);
}
}
return options;
};
/**
* Handle the selection of an item in the variable dropdown menu.
* Special case the 'Rename variable...', 'Delete variable...',
* and 'New message...' options.
* In the rename case, prompt the user for a new name.
* @param {!goog.ui.Menu} menu The Menu component clicked.
* @param {!goog.ui.MenuItem} menuItem The MenuItem selected within menu.
*/
Blockly.FieldVariable.prototype.onItemSelected = function(menu, menuItem) {
var id = menuItem.getValue();
if (this.sourceBlock_ && this.sourceBlock_.workspace) {
var workspace = this.sourceBlock_.workspace;
if (id == Blockly.RENAME_VARIABLE_ID) {
// Rename variable.
Blockly.Variables.renameVariable(workspace, this.variable_);
return;
} else if (id == Blockly.DELETE_VARIABLE_ID) {
// Delete variable.
workspace.deleteVariableById(this.variable_.getId());
return;
} else if (id == Blockly.NEW_BROADCAST_MESSAGE_ID) {
var thisField = this;
var updateField = function(varId) {
if (varId) {
thisField.setValue(varId);
}
};
Blockly.Variables.createVariable(workspace, updateField,
Blockly.BROADCAST_MESSAGE_VARIABLE_TYPE);
return;
}
// TODO (blockly #1529): Call any validation function, and allow it to override.
}
this.setValue(id);
};
/**
* Whether this field references any Blockly variables. If true it may need to
* be handled differently during serialization and deserialization. Subclasses
* may override this.
* @return {boolean} True if this field has any variable references.
* @package
*/
Blockly.FieldVariable.prototype.referencesVariables = function() {
return true;
};
Blockly.Field.register('field_variable', Blockly.FieldVariable);
| 1 | 9,274 | Changed per review in Blockly. | LLK-scratch-blocks | js |
@@ -968,12 +968,13 @@ func (c *client) bridgeAndUplinkFlows(uplinkOfport uint32, bridgeLocalPort uint3
Cookie(c.cookieAllocator.Request(category).Raw()).
Done(),
// Forward the packet to conntrackTable if it enters the OVS pipeline from the bridge interface and is sent to
- // local Pods.
+ // local Pods. Set the packet with MAC rewrite mark, so that the dstMAC will be re-written with real MAC in
+ // the L3Routing table, and it could be forwarded to the valid OVS interface.
c.pipeline[ClassifierTable].BuildFlow(priorityHigh).
MatchProtocol(binding.ProtocolIP).
MatchInPort(bridgeLocalPort).
MatchDstIPNet(localSubnet).
- Action().SetDstMAC(globalVirtualMAC).
+ Action().LoadRegRange(int(marksReg), macRewriteMark, macRewriteMarkRange).
Action().GotoTable(conntrackTable).
Cookie(c.cookieAllocator.Request(category).Raw()).
Done(), | 1 | // Copyright 2019 Antrea Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package openflow
import (
"encoding/binary"
"fmt"
"net"
"strconv"
"strings"
"sync"
"k8s.io/client-go/tools/cache"
"github.com/vmware-tanzu/antrea/pkg/agent/config"
"github.com/vmware-tanzu/antrea/pkg/agent/openflow/cookie"
"github.com/vmware-tanzu/antrea/pkg/agent/types"
"github.com/vmware-tanzu/antrea/pkg/features"
binding "github.com/vmware-tanzu/antrea/pkg/ovs/openflow"
"github.com/vmware-tanzu/antrea/third_party/proxy"
)
const (
// Flow table id index
ClassifierTable binding.TableIDType = 0
spoofGuardTable binding.TableIDType = 10
arpResponderTable binding.TableIDType = 20
serviceHairpinTable binding.TableIDType = 29
conntrackTable binding.TableIDType = 30
conntrackStateTable binding.TableIDType = 31
sessionAffinityTable binding.TableIDType = 40
dnatTable binding.TableIDType = 40
serviceLBTable binding.TableIDType = 41
endpointDNATTable binding.TableIDType = 42
cnpEgressRuleTable binding.TableIDType = 45
EgressRuleTable binding.TableIDType = 50
EgressDefaultTable binding.TableIDType = 60
l3ForwardingTable binding.TableIDType = 70
l2ForwardingCalcTable binding.TableIDType = 80
cnpIngressRuleTable binding.TableIDType = 85
IngressRuleTable binding.TableIDType = 90
IngressDefaultTable binding.TableIDType = 100
conntrackCommitTable binding.TableIDType = 105
hairpinSNATTable binding.TableIDType = 106
L2ForwardingOutTable binding.TableIDType = 110
// Flow priority level
priorityHigh = uint16(210)
priorityNormal = uint16(200)
priorityLow = uint16(190)
prioritySNAT = uint16(180)
priorityMiss = uint16(0)
priorityTopCNP = uint16(64990)
// Index for priority cache
priorityIndex = "priority"
// Traffic marks
markTrafficFromTunnel = 0
markTrafficFromGateway = 1
markTrafficFromLocal = 2
markTrafficFromUplink = 4
)
var (
FlowTables = []struct {
Number binding.TableIDType
Name string
}{
{ClassifierTable, "Classification"},
{spoofGuardTable, "SpoofGuard"},
{arpResponderTable, "ARPResponder"},
{serviceHairpinTable, "ServiceHairpin"},
{conntrackTable, "ConntrackZone"},
{conntrackStateTable, "ConntrackState"},
{dnatTable, "DNAT(SessionAffinity)"},
{sessionAffinityTable, "SessionAffinity"},
{serviceLBTable, "ServiceLB"},
{endpointDNATTable, "EndpointDNAT"},
{cnpEgressRuleTable, "CNPEgressRule"},
{EgressRuleTable, "EgressRule"},
{EgressDefaultTable, "EgressDefaultRule"},
{l3ForwardingTable, "l3Forwarding"},
{l2ForwardingCalcTable, "L2Forwarding"},
{cnpIngressRuleTable, "CNPIngressRule"},
{IngressRuleTable, "IngressRule"},
{IngressDefaultTable, "IngressDefaultRule"},
{conntrackCommitTable, "ConntrackCommit"},
{hairpinSNATTable, "HairpinSNATTable"},
{L2ForwardingOutTable, "Output"},
}
)
// GetFlowTableName returns the flow table name given the table number. An empty
// string is returned if the table cannot be found.
func GetFlowTableName(tableNumber binding.TableIDType) string {
for _, t := range FlowTables {
if t.Number == tableNumber {
return t.Name
}
}
return ""
}
// GetFlowTableNumber does a case insensitive lookup of the table name, and
// returns the flow table number if the table is found. Otherwise TableIDAll is
// returned if the table cannot be found.
func GetFlowTableNumber(tableName string) binding.TableIDType {
for _, t := range FlowTables {
if strings.EqualFold(t.Name, tableName) {
return t.Number
}
}
return binding.TableIDAll
}
type regType uint
func (rt regType) number() string {
return fmt.Sprint(rt)
}
func (rt regType) nxm() string {
return fmt.Sprintf("NXM_NX_REG%d", rt)
}
func (rt regType) reg() string {
return fmt.Sprintf("reg%d", rt)
}
const (
// marksReg stores traffic-source mark and pod-found mark.
// traffic-source resides in [0..15], pod-found resides in [16].
marksReg regType = 0
portCacheReg regType = 1
swapReg regType = 2
endpointIPReg regType = 3 // Use reg3 to store endpoint IP
endpointPortReg regType = 4 // Use reg4[0..15] to store endpoint port
serviceLearnReg = endpointPortReg // Use reg4[16..18] to store endpoint selection states.
EgressReg regType = 5
IngressReg regType = 6
TraceflowReg regType = 9 // Use reg9[28..31] to store traceflow dataplaneTag.
// marksRegServiceNeedLB indicates a packet need to do service selection.
marksRegServiceNeedLB uint32 = 0b001
// marksRegServiceSelected indicates a packet has done service selection.
marksRegServiceSelected uint32 = 0b010
// marksRegServiceNeedLearn indicates a packet has done service selection and
// the selection result needs to be cached.
marksRegServiceNeedLearn uint32 = 0b011
CtZone = 0xfff0
portFoundMark = 0b1
snatRequiredMark = 0b1
hairpinMark = 0b1
macRewriteMark = 0b1
gatewayCTMark = 0x20
snatCTMark = 0x40
serviceCTMark = 0x21
)
var (
// ofPortMarkRange takes the 16th bit of register marksReg to indicate if the ofPort number of an interface
// is found or not. Its value is 0x1 if yes.
ofPortMarkRange = binding.Range{16, 16}
// OfTraceflowMarkRange stores dataplaneTag at range 28-31 in marksReg.
OfTraceflowMarkRange = binding.Range{28, 31}
// ofPortRegRange takes a 32-bit range of register portCacheReg to cache the ofPort number of the interface.
ofPortRegRange = binding.Range{0, 31}
// snatMarkRange takes the 17th bit of register marksReg to indicate if the packet needs to be SNATed with Node's IP
// or not. Its value is 0x1 if yes.
snatMarkRange = binding.Range{17, 17}
// hairpinMarkRange takes the 18th bit of register marksReg to indicate
// if the packet needs DNAT to virtual IP or not. Its value is 0x1 if yes.
hairpinMarkRange = binding.Range{18, 18}
// macRewriteMarkRange takes the 19th bit of register marksReg to indicate
// if the packet's MAC addresses need to be rewritten. Its value is 0x1 if yes.
macRewriteMarkRange = binding.Range{19, 19}
// endpointIPRegRange takes a 32-bit range of register endpointIPReg to store
// the selected Service Endpoint IP.
endpointIPRegRange = binding.Range{0, 31}
// endpointPortRegRange takes a 16-bit range of register endpointPortReg to store
// the selected Service Endpoint port.
endpointPortRegRange = binding.Range{0, 15}
// serviceLearnRegRange takes a 3-bit range of register serviceLearnReg to
// indicate if the packet accessing a Service has already selected the Service
// Endpoint, still needs to select an Endpoint, or if an Endpoint has already
// been selected and the selection decision needs to be learned.
serviceLearnRegRange = binding.Range{16, 18}
globalVirtualMAC, _ = net.ParseMAC("aa:bb:cc:dd:ee:ff")
ReentranceMAC, _ = net.ParseMAC("de:ad:be:ef:de:ad")
hairpinIP = net.ParseIP("169.254.169.252").To4()
)
type OFEntryOperations interface {
Add(flow binding.Flow) error
Modify(flow binding.Flow) error
Delete(flow binding.Flow) error
AddAll(flows []binding.Flow) error
DeleteAll(flows []binding.Flow) error
AddOFEntries(ofEntries []binding.OFEntry) error
DeleteOFEntries(ofEntries []binding.OFEntry) error
}
type flowCache map[string]binding.Flow
type flowCategoryCache struct {
sync.Map
}
type client struct {
enableProxy bool
roundInfo types.RoundInfo
cookieAllocator cookie.Allocator
bridge binding.Bridge
pipeline map[binding.TableIDType]binding.Table
nodeFlowCache, podFlowCache, serviceFlowCache *flowCategoryCache // cache for corresponding deletions
// "fixed" flows installed by the agent after initialization and which do not change during
// the lifetime of the client.
gatewayFlows, defaultServiceFlows, defaultTunnelFlows, hostNetworkingFlows []binding.Flow
// ofEntryOperations is a wrapper interface for OpenFlow entry Add / Modify / Delete operations. It
// enables convenient mocking in unit tests.
ofEntryOperations OFEntryOperations
// policyCache is a storage that supports listing policyRuleConjunction with different indexers.
// It's guaranteed that one policyRuleConjunction is processed by at most one goroutine at any given time.
policyCache cache.Indexer
conjMatchFlowLock sync.Mutex // Lock for access globalConjMatchFlowCache
groupCache sync.Map
// globalConjMatchFlowCache is a global map for conjMatchFlowContext. The key is a string generated from the
// conjMatchFlowContext.
globalConjMatchFlowCache map[string]*conjMatchFlowContext
// replayMutex provides exclusive access to the OFSwitch to the ReplayFlows method.
replayMutex sync.RWMutex
nodeConfig *config.NodeConfig
encapMode config.TrafficEncapModeType
gatewayPort uint32 // OVSOFPort number
// packetInHandlers stores handler to process PacketIn event
packetInHandlers map[string]PacketInHandler
}
func (c *client) GetTunnelVirtualMAC() net.HardwareAddr {
return globalVirtualMAC
}
func (c *client) Add(flow binding.Flow) error {
return c.bridge.AddFlowsInBundle([]binding.Flow{flow}, nil, nil)
}
func (c *client) Modify(flow binding.Flow) error {
return c.bridge.AddFlowsInBundle(nil, []binding.Flow{flow}, nil)
}
func (c *client) Delete(flow binding.Flow) error {
return c.bridge.AddFlowsInBundle(nil, nil, []binding.Flow{flow})
}
func (c *client) AddAll(flows []binding.Flow) error {
return c.bridge.AddFlowsInBundle(flows, nil, nil)
}
func (c *client) DeleteAll(flows []binding.Flow) error {
return c.bridge.AddFlowsInBundle(nil, nil, flows)
}
func (c *client) AddOFEntries(ofEntries []binding.OFEntry) error {
return c.bridge.AddOFEntriesInBundle(ofEntries, nil, nil)
}
func (c *client) DeleteOFEntries(ofEntries []binding.OFEntry) error {
return c.bridge.AddOFEntriesInBundle(nil, nil, ofEntries)
}
// defaultFlows generates the default flows of all tables.
func (c *client) defaultFlows() (flows []binding.Flow) {
for _, table := range c.pipeline {
flowBuilder := table.BuildFlow(priorityMiss)
switch table.GetMissAction() {
case binding.TableMissActionNext:
flowBuilder = flowBuilder.Action().GotoTable(table.GetNext())
case binding.TableMissActionNormal:
flowBuilder = flowBuilder.Action().Normal()
case binding.TableMissActionDrop:
flowBuilder = flowBuilder.Action().Drop()
case binding.TableMissActionNone:
fallthrough
default:
continue
}
flows = append(flows, flowBuilder.Cookie(c.cookieAllocator.Request(cookie.Default).Raw()).Done())
}
return flows
}
// tunnelClassifierFlow generates the flow to mark traffic comes from the tunnelOFPort.
func (c *client) tunnelClassifierFlow(tunnelOFPort uint32, category cookie.Category) binding.Flow {
flowBuilder := c.pipeline[ClassifierTable].BuildFlow(priorityNormal).
MatchInPort(tunnelOFPort)
if features.DefaultFeatureGate.Enabled(features.Traceflow) {
regName := fmt.Sprintf("%s%d", binding.NxmFieldReg, TraceflowReg)
tunMetadataName := fmt.Sprintf("%s%d", binding.NxmFieldTunMetadata, 0)
flowBuilder = flowBuilder.Action().MoveRange(tunMetadataName, regName, OfTraceflowMarkRange, OfTraceflowMarkRange)
}
return flowBuilder.Action().LoadRegRange(int(marksReg), markTrafficFromTunnel, binding.Range{0, 15}).
Action().LoadRegRange(int(marksReg), macRewriteMark, macRewriteMarkRange).
Action().GotoTable(conntrackTable).
Cookie(c.cookieAllocator.Request(category).Raw()).
Done()
}
// gatewayClassifierFlow generates the flow to mark traffic comes from the gatewayOFPort.
func (c *client) gatewayClassifierFlow(gatewayOFPort uint32, category cookie.Category) binding.Flow {
classifierTable := c.pipeline[ClassifierTable]
return classifierTable.BuildFlow(priorityNormal).
MatchInPort(gatewayOFPort).
Action().LoadRegRange(int(marksReg), markTrafficFromGateway, binding.Range{0, 15}).
Action().GotoTable(classifierTable.GetNext()).
Cookie(c.cookieAllocator.Request(category).Raw()).
Done()
}
// podClassifierFlow generates the flow to mark traffic comes from the podOFPort.
func (c *client) podClassifierFlow(podOFPort uint32, category cookie.Category) binding.Flow {
classifierTable := c.pipeline[ClassifierTable]
return classifierTable.BuildFlow(priorityLow).
MatchInPort(podOFPort).
Action().LoadRegRange(int(marksReg), markTrafficFromLocal, binding.Range{0, 15}).
Action().GotoTable(classifierTable.GetNext()).
Cookie(c.cookieAllocator.Request(category).Raw()).
Done()
}
// hostBridgeUplinkFlows generates the flows that forward traffic between bridge local port and uplink port to support
// host communicate with outside. These flows are only needed on windows platform.
func (c *client) hostBridgeUplinkFlows(uplinkPort uint32, bridgeLocalPort uint32, category cookie.Category) (flows []binding.Flow) {
classifierTable := c.pipeline[ClassifierTable]
flows = []binding.Flow{
classifierTable.BuildFlow(priorityLow).MatchInPort(uplinkPort).
Action().Output(int(bridgeLocalPort)).
Cookie(c.cookieAllocator.Request(category).Raw()).
Done(),
classifierTable.BuildFlow(priorityNormal).MatchInPort(bridgeLocalPort).
Action().Output(int(uplinkPort)).
Cookie(c.cookieAllocator.Request(category).Raw()).
Done(),
}
return flows
}
// connectionTrackFlows generates flows that redirect traffic to ct_zone and handle traffic according to ct_state:
// 1) commit new connections to ct_zone(0xfff0) in the conntrackCommitTable.
// 2) Add ct_mark on the packet if it is sent to the switch from the host gateway.
// 3) Allow traffic if it hits ct_mark and is sent from the host gateway.
// 4) Drop all invalid traffic.
// 5) Let other traffic go to the sessionAffinityTable first and then the serviceLBTable.
// The sessionAffinityTable is a side-effect table which means traffic will not
// be resubmitted to any table. serviceLB does Endpoint selection for traffic
// to a Service.
func (c *client) connectionTrackFlows(category cookie.Category) []binding.Flow {
connectionTrackTable := c.pipeline[conntrackTable]
connectionTrackStateTable := c.pipeline[conntrackStateTable]
connectionTrackCommitTable := c.pipeline[conntrackCommitTable]
var flows []binding.Flow
if c.enableProxy {
flows = append(flows,
// Replace the default flow with multiple resubmits actions.
connectionTrackStateTable.BuildFlow(priorityMiss).
Cookie(c.cookieAllocator.Request(category).Raw()).
Action().ResubmitToTable(sessionAffinityTable).
Action().ResubmitToTable(serviceLBTable).
Done(),
// Enable NAT.
connectionTrackTable.BuildFlow(priorityNormal).MatchProtocol(binding.ProtocolIP).
Action().CT(false, connectionTrackTable.GetNext(), CtZone).NAT().CTDone().
Cookie(c.cookieAllocator.Request(category).Raw()).
Done(),
connectionTrackCommitTable.BuildFlow(priorityLow).MatchProtocol(binding.ProtocolIP).
MatchCTStateTrk(true).
MatchCTMark(serviceCTMark).
MatchRegRange(int(serviceLearnReg), marksRegServiceSelected, serviceLearnRegRange).
Cookie(c.cookieAllocator.Request(category).Raw()).
Action().GotoTable(connectionTrackCommitTable.GetNext()).
Done(),
)
} else {
flows = append(flows,
connectionTrackTable.BuildFlow(priorityNormal).MatchProtocol(binding.ProtocolIP).
Action().CT(false, connectionTrackTable.GetNext(), CtZone).CTDone().
Cookie(c.cookieAllocator.Request(category).Raw()).
Done(),
)
}
return append(flows,
connectionTrackStateTable.BuildFlow(priorityHigh).MatchProtocol(binding.ProtocolIP).
MatchRegRange(int(marksReg), markTrafficFromGateway, binding.Range{0, 15}).
MatchCTMark(gatewayCTMark).
MatchCTStateNew(false).MatchCTStateTrk(true).
Action().GotoTable(connectionTrackStateTable.GetNext()).
Cookie(c.cookieAllocator.Request(category).Raw()).
Done(),
connectionTrackStateTable.BuildFlow(priorityLow).MatchProtocol(binding.ProtocolIP).
MatchCTStateInv(true).MatchCTStateTrk(true).
Action().Drop().
Cookie(c.cookieAllocator.Request(category).Raw()).
Done(),
connectionTrackCommitTable.BuildFlow(priorityNormal).MatchProtocol(binding.ProtocolIP).
MatchRegRange(int(marksReg), markTrafficFromGateway, binding.Range{0, 15}).
MatchCTStateNew(true).MatchCTStateTrk(true).
Action().CT(true, connectionTrackCommitTable.GetNext(), CtZone).LoadToMark(gatewayCTMark).CTDone().
Cookie(c.cookieAllocator.Request(category).Raw()).
Done(),
connectionTrackCommitTable.BuildFlow(priorityLow).MatchProtocol(binding.ProtocolIP).
MatchCTStateNew(true).MatchCTStateTrk(true).
Action().CT(true, connectionTrackCommitTable.GetNext(), CtZone).CTDone().
Cookie(c.cookieAllocator.Request(category).Raw()).
Done(),
)
}
// TODO: Use DuplicateToBuilder or integrate this function into original one to avoid unexpected difference.
// traceflowConnectionTrackFlows generate Traceflow specific flows that bypass the drop flow in connectionTrackFlows to
// avoid unexpected packet drop in Traceflow.
func (c *client) traceflowConnectionTrackFlows(dataplaneTag uint8, category cookie.Category) binding.Flow {
connectionTrackStateTable := c.pipeline[conntrackStateTable]
return connectionTrackStateTable.BuildFlow(priorityNormal+2).
MatchRegRange(int(TraceflowReg), uint32(dataplaneTag), OfTraceflowMarkRange).
SetHardTimeout(300).
Action().ResubmitToTable(connectionTrackStateTable.GetNext()).
Cookie(c.cookieAllocator.Request(category).Raw()).
Done()
}
// reEntranceBypassCTFlow generates flow that bypass CT for traffic re-entering host network space.
// In host network space, we disable conntrack for re-entrance traffic so not to confuse conntrack
// in host namespace, This however has inverse effect on conntrack in Antrea conntrack zone as well,
// all subsequent re-entrance traffic becomes invalid.
func (c *client) reEntranceBypassCTFlow(gwPort, reentPort uint32, category cookie.Category) binding.Flow {
conntrackCommitTable := c.pipeline[conntrackCommitTable]
return conntrackCommitTable.BuildFlow(priorityHigh).MatchProtocol(binding.ProtocolIP).
MatchRegRange(int(marksReg), portFoundMark, ofPortMarkRange).
MatchInPort(gwPort).MatchReg(int(portCacheReg), reentPort).
Action().GotoTable(conntrackCommitTable.GetNext()).
Cookie(c.cookieAllocator.Request(category).Raw()).
Done()
}
// ctRewriteDstMACFlow rewrites the destination MAC with local host gateway MAC if the packets has set ct_mark but not sent from the host gateway.
func (c *client) ctRewriteDstMACFlow(gatewayMAC net.HardwareAddr, category cookie.Category) binding.Flow {
connectionTrackStateTable := c.pipeline[conntrackStateTable]
macData, _ := strconv.ParseUint(strings.Replace(gatewayMAC.String(), ":", "", -1), 16, 64)
return connectionTrackStateTable.BuildFlow(priorityNormal).MatchProtocol(binding.ProtocolIP).
MatchCTMark(gatewayCTMark).
MatchCTStateNew(false).MatchCTStateTrk(true).
Action().LoadRange(binding.NxmFieldDstMAC, macData, binding.Range{0, 47}).
Action().GotoTable(connectionTrackStateTable.GetNext()).
Cookie(c.cookieAllocator.Request(category).Raw()).
Done()
}
// serviceLBBypassFlow makes packets that belong to a tracked connection bypass
// service LB tables and enter egressRuleTable directly.
func (c *client) serviceLBBypassFlow() binding.Flow {
connectionTrackStateTable := c.pipeline[conntrackStateTable]
return connectionTrackStateTable.BuildFlow(priorityNormal).MatchProtocol(binding.ProtocolIP).
MatchCTMark(serviceCTMark).
MatchCTStateNew(false).MatchCTStateTrk(true).
Action().LoadRegRange(int(marksReg), macRewriteMark, macRewriteMarkRange).
Action().GotoTable(EgressRuleTable).
Cookie(c.cookieAllocator.Request(cookie.Service).Raw()).
Done()
}
// l2ForwardCalcFlow generates the flow that matches dst MAC and loads ofPort to reg.
func (c *client) l2ForwardCalcFlow(dstMAC net.HardwareAddr, ofPort uint32, category cookie.Category) binding.Flow {
l2FwdCalcTable := c.pipeline[l2ForwardingCalcTable]
return l2FwdCalcTable.BuildFlow(priorityNormal).
MatchDstMAC(dstMAC).
Action().LoadRegRange(int(portCacheReg), ofPort, ofPortRegRange).
Action().LoadRegRange(int(marksReg), portFoundMark, ofPortMarkRange).
Action().GotoTable(l2FwdCalcTable.GetNext()).
Cookie(c.cookieAllocator.Request(category).Raw()).
Done()
}
// l2ForwardOutputFlow generates the flow that outputs packets to OVS port after L2 forwarding calculation.
func (c *client) l2ForwardOutputFlow(category cookie.Category) binding.Flow {
return c.pipeline[L2ForwardingOutTable].BuildFlow(priorityNormal).MatchProtocol(binding.ProtocolIP).
MatchRegRange(int(marksReg), portFoundMark, ofPortMarkRange).
Action().OutputRegRange(int(portCacheReg), ofPortRegRange).
Cookie(c.cookieAllocator.Request(category).Raw()).
Done()
}
// traceflowL2ForwardOutputFlow generates Traceflow specific flow that outputs traceflow packets to OVS port and Antrea
// Agent after L2forwarding calculation.
func (c *client) traceflowL2ForwardOutputFlow(dataplaneTag uint8, category cookie.Category) binding.Flow {
regName := fmt.Sprintf("%s%d", binding.NxmFieldReg, TraceflowReg)
tunMetadataName := fmt.Sprintf("%s%d", binding.NxmFieldTunMetadata, 0)
return c.pipeline[L2ForwardingOutTable].BuildFlow(priorityNormal+2).
MatchRegRange(int(TraceflowReg), uint32(dataplaneTag), OfTraceflowMarkRange).
SetHardTimeout(300).
MatchProtocol(binding.ProtocolIP).
MatchRegRange(int(marksReg), portFoundMark, ofPortMarkRange).
Action().MoveRange(regName, tunMetadataName, OfTraceflowMarkRange, OfTraceflowMarkRange).
Action().OutputRegRange(int(portCacheReg), ofPortRegRange).
Action().SendToController(1).
Cookie(c.cookieAllocator.Request(category).Raw()).
Done()
}
// l2ForwardOutputReentInPortFlow generates the flow that forwards re-entrance peer Node traffic via antrea-gw0.
// This flow supersedes default output flow because ovs by default auto-skips packets with output = input port.
func (c *client) l2ForwardOutputReentInPortFlow(gwPort uint32, category cookie.Category) binding.Flow {
return c.pipeline[L2ForwardingOutTable].BuildFlow(priorityHigh).MatchProtocol(binding.ProtocolIP).
MatchRegRange(int(marksReg), portFoundMark, ofPortMarkRange).
MatchInPort(gwPort).MatchReg(int(portCacheReg), gwPort).
Action().SetSrcMAC(ReentranceMAC).
Action().OutputInPort().
Cookie(c.cookieAllocator.Request(category).Raw()).
Done()
}
// l2ForwardOutputServiceHairpinFlow uses in_port action for Service
// hairpin packets to avoid packets from being dropped by OVS.
func (c *client) l2ForwardOutputServiceHairpinFlow() binding.Flow {
return c.pipeline[L2ForwardingOutTable].BuildFlow(priorityHigh).MatchProtocol(binding.ProtocolIP).
MatchRegRange(int(marksReg), hairpinMark, hairpinMarkRange).
Action().OutputInPort().
Cookie(c.cookieAllocator.Request(cookie.Service).Raw()).
Done()
}
// l3BypassMACRewriteFlow bypasses remaining l3forwarding flows if the MAC is set via ctRewriteDstMACFlow in
// conntrackState stage.
func (c *client) l3BypassMACRewriteFlow(gatewayMAC net.HardwareAddr, category cookie.Category) binding.Flow {
l3FwdTable := c.pipeline[l3ForwardingTable]
return l3FwdTable.BuildFlow(priorityNormal).MatchProtocol(binding.ProtocolIP).
MatchCTMark(gatewayCTMark).
MatchDstMAC(gatewayMAC).
Action().GotoTable(l3FwdTable.GetNext()).
Cookie(c.cookieAllocator.Request(category).Raw()).
Done()
}
// l3FlowsToPod generates the flow to rewrite MAC if the packet is received from tunnel port and destined for local Pods.
func (c *client) l3FlowsToPod(localGatewayMAC net.HardwareAddr, podInterfaceIP net.IP, podInterfaceMAC net.HardwareAddr, category cookie.Category) binding.Flow {
l3FwdTable := c.pipeline[l3ForwardingTable]
flowBuilder := l3FwdTable.BuildFlow(priorityNormal).MatchProtocol(binding.ProtocolIP)
if c.enableProxy {
flowBuilder = flowBuilder.MatchRegRange(int(marksReg), macRewriteMark, macRewriteMarkRange)
} else {
flowBuilder = flowBuilder.MatchDstMAC(globalVirtualMAC)
}
// Rewrite src MAC to local gateway MAC, and rewrite dst MAC to pod MAC
return flowBuilder.
MatchDstIP(podInterfaceIP).
Action().SetSrcMAC(localGatewayMAC).
Action().SetDstMAC(podInterfaceMAC).
Action().DecTTL().
Action().GotoTable(l3FwdTable.GetNext()).
Cookie(c.cookieAllocator.Request(category).Raw()).
Done()
}
// l3ToPodFromGwFlow generates the flow to rewrite MAC if the packet IP matches an local IP.
// This flow is used in policy only traffic mode.
func (c *client) l3ToPodFlow(podInterfaceIP net.IP, podInterfaceMAC net.HardwareAddr, category cookie.Category) binding.Flow {
l3FwdTable := c.pipeline[l3ForwardingTable]
return l3FwdTable.BuildFlow(priorityNormal).MatchProtocol(binding.ProtocolIP).
MatchDstIP(podInterfaceIP).
Action().SetDstMAC(podInterfaceMAC).
Action().DecTTL().
Action().GotoTable(l3FwdTable.GetNext()).
Cookie(c.cookieAllocator.Request(category).Raw()).
Done()
}
// l3ToGWFlow generates the flow to rewrite MAC to gw port if the packet received is unmatched by local Pod flows.
// This flow is used in policy only traffic mode.
func (c *client) l3ToGWFlow(gwMAC net.HardwareAddr, category cookie.Category) binding.Flow {
l3FwdTable := c.pipeline[l3ForwardingTable]
return l3FwdTable.BuildFlow(priorityLow).MatchProtocol(binding.ProtocolIP).
Action().SetDstMAC(gwMAC).
Action().DecTTL().
Action().GotoTable(l3FwdTable.GetNext()).
Cookie(c.cookieAllocator.Request(category).Raw()).
Done()
}
// l3ToGatewayFlow generates flow that rewrites MAC of the packet received from tunnel port and destined to local gateway.
func (c *client) l3ToGatewayFlow(localGatewayIP net.IP, localGatewayMAC net.HardwareAddr, category cookie.Category) binding.Flow {
l3FwdTable := c.pipeline[l3ForwardingTable]
return l3FwdTable.BuildFlow(priorityNormal).MatchProtocol(binding.ProtocolIP).
MatchDstMAC(globalVirtualMAC).
MatchDstIP(localGatewayIP).
Action().SetDstMAC(localGatewayMAC).
Action().GotoTable(l3FwdTable.GetNext()).
Cookie(c.cookieAllocator.Request(category).Raw()).
Done()
}
// l3FwdFlowToRemote generates the L3 forward flow on source node to support traffic to remote pods/gateway.
func (c *client) l3FwdFlowToRemote(
localGatewayMAC net.HardwareAddr,
peerSubnet net.IPNet,
tunnelPeer net.IP,
tunOFPort uint32,
category cookie.Category) binding.Flow {
return c.pipeline[l3ForwardingTable].BuildFlow(priorityNormal).MatchProtocol(binding.ProtocolIP).
MatchDstIPNet(peerSubnet).
Action().DecTTL().
// Rewrite src MAC to local gateway MAC and rewrite dst MAC to virtual MAC.
Action().SetSrcMAC(localGatewayMAC).
Action().SetDstMAC(globalVirtualMAC).
// Load ofport of the tunnel interface.
Action().LoadRegRange(int(portCacheReg), tunOFPort, ofPortRegRange).
// Set MAC-known.
Action().LoadRegRange(int(marksReg), portFoundMark, ofPortMarkRange).
// Flow based tunnel. Set tunnel destination.
Action().SetTunnelDst(tunnelPeer).
// Bypass l2ForwardingCalcTable and tables for ingress rules (which won't
// apply to packets to remote Nodes).
Action().GotoTable(conntrackCommitTable).
Cookie(c.cookieAllocator.Request(category).Raw()).
Done()
}
// l3FwdFlowToRemoteViaGW generates the L3 forward flow on source node to support traffic to remote via gateway.
func (c *client) l3FwdFlowToRemoteViaGW(
localGatewayMAC net.HardwareAddr,
peerSubnet net.IPNet,
category cookie.Category) binding.Flow {
l3FwdTable := c.pipeline[l3ForwardingTable]
return l3FwdTable.BuildFlow(priorityNormal).MatchProtocol(binding.ProtocolIP).
MatchDstIPNet(peerSubnet).
Action().DecTTL().
Action().SetDstMAC(localGatewayMAC).
Action().GotoTable(l3FwdTable.GetNext()).
Cookie(c.cookieAllocator.Request(category).Raw()).
Done()
}
// arpResponderFlow generates the ARP responder flow entry that replies request comes from local gateway for peer
// gateway MAC.
func (c *client) arpResponderFlow(peerGatewayIP net.IP, category cookie.Category) binding.Flow {
return c.pipeline[arpResponderTable].BuildFlow(priorityNormal).MatchProtocol(binding.ProtocolARP).
MatchARPOp(1).
MatchARPTpa(peerGatewayIP).
Action().Move(binding.NxmFieldSrcMAC, binding.NxmFieldDstMAC).
Action().SetSrcMAC(globalVirtualMAC).
Action().LoadARPOperation(2).
Action().Move(binding.NxmFieldARPSha, binding.NxmFieldARPTha).
Action().SetARPSha(globalVirtualMAC).
Action().Move(binding.NxmFieldARPSpa, binding.NxmFieldARPTpa).
Action().SetARPSpa(peerGatewayIP).
Action().OutputInPort().
Cookie(c.cookieAllocator.Request(category).Raw()).
Done()
}
// arpResponderStaticFlow generates ARP reply for any ARP request with the same global virtual MAC.
// This flow is used in policy-only mode, where traffic are routed via IP not MAC.
func (c *client) arpResponderStaticFlow(category cookie.Category) binding.Flow {
return c.pipeline[arpResponderTable].BuildFlow(priorityNormal).MatchProtocol(binding.ProtocolARP).
MatchARPOp(1).
Action().Move(binding.NxmFieldSrcMAC, binding.NxmFieldDstMAC).
Action().SetSrcMAC(globalVirtualMAC).
Action().LoadARPOperation(2).
Action().Move(binding.NxmFieldARPSha, binding.NxmFieldARPTha).
Action().SetARPSha(globalVirtualMAC).
Action().Move(binding.NxmFieldARPTpa, swapReg.nxm()).
Action().Move(binding.NxmFieldARPSpa, binding.NxmFieldARPTpa).
Action().Move(swapReg.nxm(), binding.NxmFieldARPSpa).
Action().OutputInPort().
Cookie(c.cookieAllocator.Request(category).Raw()).
Done()
}
// podIPSpoofGuardFlow generates the flow to check IP traffic sent out from local pod. Traffic from host gateway interface
// will not be checked, since it might be pod to service traffic or host namespace traffic.
func (c *client) podIPSpoofGuardFlow(ifIP net.IP, ifMAC net.HardwareAddr, ifOFPort uint32, category cookie.Category) binding.Flow {
ipPipeline := c.pipeline
ipSpoofGuardTable := ipPipeline[spoofGuardTable]
return ipSpoofGuardTable.BuildFlow(priorityNormal).MatchProtocol(binding.ProtocolIP).
MatchInPort(ifOFPort).
MatchSrcMAC(ifMAC).
MatchSrcIP(ifIP).
Action().GotoTable(ipSpoofGuardTable.GetNext()).
Cookie(c.cookieAllocator.Request(category).Raw()).
Done()
}
// serviceHairpinResponseDNATFlow generates the flow which transforms destination
// IP of the hairpin packet to the source IP.
func (c *client) serviceHairpinResponseDNATFlow() binding.Flow {
return c.pipeline[serviceHairpinTable].BuildFlow(priorityNormal).MatchProtocol(binding.ProtocolIP).
MatchDstIP(hairpinIP).
Action().Move("NXM_OF_IP_SRC", "NXM_OF_IP_DST").
Action().LoadRegRange(int(marksReg), hairpinMark, hairpinMarkRange).
Action().GotoTable(conntrackTable).
Cookie(c.cookieAllocator.Request(cookie.Service).Raw()).
Done()
}
// gatewayARPSpoofGuardFlow generates the flow to check ARP traffic sent out from the local gateway interface.
func (c *client) gatewayARPSpoofGuardFlow(gatewayOFPort uint32, gatewayIP net.IP, gatewayMAC net.HardwareAddr, category cookie.Category) binding.Flow {
return c.pipeline[spoofGuardTable].BuildFlow(priorityNormal).MatchProtocol(binding.ProtocolARP).
MatchInPort(gatewayOFPort).
MatchARPSha(gatewayMAC).
MatchARPSpa(gatewayIP).
Action().GotoTable(arpResponderTable).
Cookie(c.cookieAllocator.Request(category).Raw()).
Done()
}
// arpSpoofGuardFlow generates the flow to check ARP traffic sent out from local pods interfaces.
func (c *client) arpSpoofGuardFlow(ifIP net.IP, ifMAC net.HardwareAddr, ifOFPort uint32, category cookie.Category) binding.Flow {
return c.pipeline[spoofGuardTable].BuildFlow(priorityNormal).MatchProtocol(binding.ProtocolARP).
MatchInPort(ifOFPort).
MatchARPSha(ifMAC).
MatchARPSpa(ifIP).
Action().GotoTable(arpResponderTable).
Cookie(c.cookieAllocator.Request(category).Raw()).
Done()
}
// gatewayIPSpoofGuardFlow generates the flow to skip spoof guard checking for traffic sent from gateway interface.
func (c *client) gatewayIPSpoofGuardFlow(gatewayOFPort uint32, category cookie.Category) binding.Flow {
ipPipeline := c.pipeline
ipSpoofGuardTable := ipPipeline[spoofGuardTable]
return ipSpoofGuardTable.BuildFlow(priorityNormal).MatchProtocol(binding.ProtocolIP).
MatchInPort(gatewayOFPort).
Action().GotoTable(ipSpoofGuardTable.GetNext()).
Cookie(c.cookieAllocator.Request(category).Raw()).
Done()
}
// sessionAffinityReselectFlow generates the flow which resubmits the service accessing
// packet back to serviceLBTable if there is no endpointDNAT flow matched. This
// case will occur if an Endpoint is removed and is the learned Endpoint
// selection of the Service.
func (c *client) sessionAffinityReselectFlow() binding.Flow {
return c.pipeline[endpointDNATTable].BuildFlow(priorityLow).
MatchRegRange(int(serviceLearnReg), marksRegServiceSelected, serviceLearnRegRange).
Action().LoadRegRange(int(serviceLearnReg), marksRegServiceNeedLB, serviceLearnRegRange).
Action().ResubmitToTable(serviceLBTable).
Cookie(c.cookieAllocator.Request(cookie.Service).Raw()).
Done()
}
// serviceCIDRDNATFlow generates flows to match dst IP in service CIDR and output to host gateway interface directly.
func (c *client) serviceCIDRDNATFlow(serviceCIDR *net.IPNet, gatewayMAC net.HardwareAddr, gatewayOFPort uint32) binding.Flow {
return c.pipeline[dnatTable].BuildFlow(priorityNormal).MatchProtocol(binding.ProtocolIP).
MatchDstIPNet(*serviceCIDR).
Action().SetDstMAC(gatewayMAC).
Action().LoadRegRange(int(portCacheReg), gatewayOFPort, ofPortRegRange).
Action().LoadRegRange(int(marksReg), portFoundMark, ofPortMarkRange).
Action().GotoTable(conntrackCommitTable).
Cookie(c.cookieAllocator.Request(cookie.Service).Raw()).
Done()
}
// serviceNeedLBFlow generates flows to mark packets as LB needed.
func (c *client) serviceNeedLBFlow() binding.Flow {
return c.pipeline[sessionAffinityTable].BuildFlow(priorityMiss).
Cookie(c.cookieAllocator.Request(cookie.Service).Raw()).
Action().LoadRegRange(int(serviceLearnReg), marksRegServiceNeedLB, serviceLearnRegRange).
Done()
}
// arpNormalFlow generates the flow to response arp in normal way if no flow in arpResponderTable is matched.
func (c *client) arpNormalFlow(category cookie.Category) binding.Flow {
return c.pipeline[arpResponderTable].BuildFlow(priorityLow).MatchProtocol(binding.ProtocolARP).
Action().Normal().
Cookie(c.cookieAllocator.Request(category).Raw()).
Done()
}
// conjunctionActionFlow generates the flow to jump to a specific table if policyRuleConjunction ID is matched. Priority of
// conjunctionActionFlow is created at priorityLow for k8s network policies, and *priority assigned by PriorityAssigner for CNP.
func (c *client) conjunctionActionFlow(conjunctionID uint32, tableID binding.TableIDType, nextTable binding.TableIDType, priority *uint16) binding.Flow {
var ofPriority uint16
if priority == nil {
ofPriority = priorityLow
} else {
ofPriority = *priority
}
conjReg := IngressReg
if tableID == EgressRuleTable {
conjReg = EgressReg
}
return c.pipeline[tableID].BuildFlow(ofPriority).MatchProtocol(binding.ProtocolIP).
MatchConjID(conjunctionID).
MatchPriority(ofPriority).
Action().LoadRegRange(int(conjReg), conjunctionID, binding.Range{0, 31}). // Traceflow.
Action().GotoTable(nextTable).
Cookie(c.cookieAllocator.Request(cookie.Policy).Raw()).
Done()
}
// conjunctionActionFlow generates the flow to drop traffic if policyRuleConjunction ID is matched.
func (c *client) conjunctionActionDropFlow(conjunctionID uint32, tableID binding.TableIDType, priority *uint16) binding.Flow {
ofPriority := *priority
return c.pipeline[tableID].BuildFlow(ofPriority).MatchProtocol(binding.ProtocolIP).
MatchConjID(conjunctionID).
MatchPriority(ofPriority).
Action().Drop().
Cookie(c.cookieAllocator.Request(cookie.Policy).Raw()).
Done()
}
func (c *client) Disconnect() error {
return c.bridge.Disconnect()
}
func newFlowCategoryCache() *flowCategoryCache {
return &flowCategoryCache{}
}
// establishedConnectionFlows generates flows to ensure established connections skip the NetworkPolicy rules.
func (c *client) establishedConnectionFlows(category cookie.Category) (flows []binding.Flow) {
// egressDropTable checks the source address of packets, and drops packets sent from the AppliedToGroup but not
// matching the NetworkPolicy rules. Packets in the established connections need not to be checked with the
// egressRuleTable or the egressDropTable.
egressDropTable := c.pipeline[EgressDefaultTable]
egressEstFlow := c.pipeline[EgressRuleTable].BuildFlow(priorityHigh).MatchProtocol(binding.ProtocolIP).
MatchCTStateNew(false).MatchCTStateEst(true).
Action().GotoTable(egressDropTable.GetNext()).
Cookie(c.cookieAllocator.Request(category).Raw()).
Done()
cnpEgressEstFlow := c.pipeline[cnpEgressRuleTable].BuildFlow(priorityTopCNP).MatchProtocol(binding.ProtocolIP).
MatchCTStateNew(false).MatchCTStateEst(true).
Action().GotoTable(egressDropTable.GetNext()).
Cookie(c.cookieAllocator.Request(category).Raw()).
Done()
// ingressDropTable checks the destination address of packets, and drops packets sent to the AppliedToGroup but not
// matching the NetworkPolicy rules. Packets in the established connections need not to be checked with the
// ingressRuleTable or ingressDropTable.
ingressDropTable := c.pipeline[IngressDefaultTable]
ingressEstFlow := c.pipeline[IngressRuleTable].BuildFlow(priorityHigh).MatchProtocol(binding.ProtocolIP).
MatchCTStateNew(false).MatchCTStateEst(true).
Action().GotoTable(ingressDropTable.GetNext()).
Cookie(c.cookieAllocator.Request(category).Raw()).
Done()
cnpIngressEstFlow := c.pipeline[cnpIngressRuleTable].BuildFlow(priorityTopCNP).MatchProtocol(binding.ProtocolIP).
MatchCTStateNew(false).MatchCTStateEst(true).
Action().GotoTable(ingressDropTable.GetNext()).
Cookie(c.cookieAllocator.Request(category).Raw()).
Done()
return []binding.Flow{egressEstFlow, ingressEstFlow, cnpEgressEstFlow, cnpIngressEstFlow}
}
func (c *client) addFlowMatch(fb binding.FlowBuilder, matchType int, matchValue interface{}) binding.FlowBuilder {
switch matchType {
case MatchDstIP:
fb = fb.MatchProtocol(binding.ProtocolIP).MatchDstIP(matchValue.(net.IP))
case MatchDstIPNet:
fb = fb.MatchProtocol(binding.ProtocolIP).MatchDstIPNet(matchValue.(net.IPNet))
case MatchSrcIP:
fb = fb.MatchProtocol(binding.ProtocolIP).MatchSrcIP(matchValue.(net.IP))
case MatchSrcIPNet:
fb = fb.MatchProtocol(binding.ProtocolIP).MatchSrcIPNet(matchValue.(net.IPNet))
case MatchDstOFPort:
// ofport number in NXM_NX_REG1 is used in ingress rule to match packets sent to local Pod.
fb = fb.MatchProtocol(binding.ProtocolIP).MatchReg(int(portCacheReg), uint32(matchValue.(int32)))
case MatchSrcOFPort:
fb = fb.MatchProtocol(binding.ProtocolIP).MatchInPort(uint32(matchValue.(int32)))
case MatchTCPDstPort:
fb = fb.MatchProtocol(binding.ProtocolTCP)
portValue := matchValue.(uint16)
if portValue > 0 {
fb = fb.MatchTCPDstPort(portValue)
}
case MatchUDPDstPort:
fb = fb.MatchProtocol(binding.ProtocolUDP)
portValue := matchValue.(uint16)
if portValue > 0 {
fb = fb.MatchUDPDstPort(portValue)
}
case MatchSCTPDstPort:
fb = fb.MatchProtocol(binding.ProtocolSCTP)
portValue := matchValue.(uint16)
if portValue > 0 {
fb = fb.MatchSCTPDstPort(portValue)
}
}
return fb
}
// conjunctionExceptionFlow generates the flow to jump to a specific table if both policyRuleConjunction ID and except address are matched.
// Keeping this for reference to generic exception flow.
func (c *client) conjunctionExceptionFlow(conjunctionID uint32, tableID binding.TableIDType, nextTable binding.TableIDType, matchKey int, matchValue interface{}) binding.Flow {
conjReg := IngressReg
if tableID == EgressRuleTable {
conjReg = EgressReg
}
fb := c.pipeline[tableID].BuildFlow(priorityNormal).MatchConjID(conjunctionID)
return c.addFlowMatch(fb, matchKey, matchValue).
Action().LoadRegRange(int(conjReg), conjunctionID, binding.Range{0, 31}). // Traceflow.
Action().GotoTable(nextTable).
Cookie(c.cookieAllocator.Request(cookie.Policy).Raw()).
Done()
}
// conjunctiveMatchFlow generates the flow to set conjunctive actions if the match condition is matched.
func (c *client) conjunctiveMatchFlow(tableID binding.TableIDType, matchKey int, matchValue interface{}, priority *uint16, actions ...*conjunctiveAction) binding.Flow {
var ofPriority uint16
if priority != nil {
ofPriority = *priority
} else {
ofPriority = priorityNormal
}
fb := c.pipeline[tableID].BuildFlow(ofPriority)
fb = c.addFlowMatch(fb, matchKey, matchValue)
for _, act := range actions {
fb.Action().Conjunction(act.conjID, act.clauseID, act.nClause)
}
return fb.Cookie(c.cookieAllocator.Request(cookie.Policy).Raw()).Done()
}
// defaultDropFlow generates the flow to drop packets if the match condition is matched.
func (c *client) defaultDropFlow(tableID binding.TableIDType, matchKey int, matchValue interface{}) binding.Flow {
fb := c.pipeline[tableID].BuildFlow(priorityNormal)
return c.addFlowMatch(fb, matchKey, matchValue).
Action().Drop().
Cookie(c.cookieAllocator.Request(cookie.Default).Raw()).
Done()
}
// localProbeFlow generates the flow to forward packets to conntrackCommitTable. The packets are sent from Node to probe the liveness/readiness of local Pods.
func (c *client) localProbeFlow(localGatewayIP net.IP, category cookie.Category) binding.Flow {
return c.pipeline[IngressRuleTable].BuildFlow(priorityHigh).
MatchProtocol(binding.ProtocolIP).
MatchSrcIP(localGatewayIP).
Action().GotoTable(conntrackCommitTable).
Cookie(c.cookieAllocator.Request(category).Raw()).
Done()
}
func (c *client) bridgeAndUplinkFlows(uplinkOfport uint32, bridgeLocalPort uint32, nodeIP net.IP, localSubnet net.IPNet, category cookie.Category) []binding.Flow {
snatIPRange := &binding.IPRange{nodeIP, nodeIP}
vMACInt, _ := strconv.ParseUint(strings.Replace(globalVirtualMAC.String(), ":", "", -1), 16, 64)
ctStateNext := dnatTable
if c.enableProxy {
ctStateNext = endpointDNATTable
}
flows := []binding.Flow{
// Forward the packet to conntrackTable if it enters the OVS pipeline from the uplink interface.
c.pipeline[ClassifierTable].BuildFlow(priorityNormal).
MatchProtocol(binding.ProtocolIP).
MatchInPort(uplinkOfport).
Action().LoadRegRange(int(marksReg), markTrafficFromUplink, binding.Range{0, 15}).
Action().GotoTable(conntrackTable).
Cookie(c.cookieAllocator.Request(category).Raw()).
Done(),
// Forward the packet to conntrackTable if it enters the OVS pipeline from the bridge interface and is sent to
// local Pods.
c.pipeline[ClassifierTable].BuildFlow(priorityHigh).
MatchProtocol(binding.ProtocolIP).
MatchInPort(bridgeLocalPort).
MatchDstIPNet(localSubnet).
Action().SetDstMAC(globalVirtualMAC).
Action().GotoTable(conntrackTable).
Cookie(c.cookieAllocator.Request(category).Raw()).
Done(),
// Enforce IP packet into the conntrack zone with SNAT. If the connection is SNATed, the reply packet should use
// Pod IP as the destination, and then is forwarded to conntrackStateTable.
c.pipeline[conntrackTable].BuildFlow(priorityNormal).MatchProtocol(binding.ProtocolIP).
Action().CT(false, conntrackStateTable, CtZone).NAT().CTDone().
Cookie(c.cookieAllocator.Request(category).Raw()).
Done(),
// Rewrite dMAC with the global vMAC if the packet is a reply to the Pod from the external address.
c.pipeline[conntrackStateTable].BuildFlow(priorityHigh).
MatchProtocol(binding.ProtocolIP).
MatchCTStateNew(false).MatchCTStateTrk(true).
MatchCTMark(snatCTMark).
MatchRegRange(int(marksReg), markTrafficFromUplink, binding.Range{0, 15}).
Action().LoadRange(binding.NxmFieldDstMAC, vMACInt, binding.Range{0, 47}).
Action().LoadRegRange(int(marksReg), macRewriteMark, macRewriteMarkRange).
Action().GotoTable(ctStateNext).
Cookie(c.cookieAllocator.Request(category).Raw()).
Done(),
// Forward the packet to dnatTable if it is sent from local Pod to the external IP address.
c.pipeline[conntrackStateTable].BuildFlow(priorityNormal).
MatchProtocol(binding.ProtocolIP).
MatchCTStateNew(false).MatchCTStateTrk(true).
MatchCTMark(snatCTMark).
Action().GotoTable(ctStateNext).
Cookie(c.cookieAllocator.Request(category).Raw()).
Done(),
// Output the non-SNAT packet to the bridge interface directly if it is received from the uplink interface.
c.pipeline[conntrackStateTable].BuildFlow(priorityNormal).
MatchProtocol(binding.ProtocolIP).
MatchInPort(uplinkOfport).
Action().Output(int(bridgeLocalPort)).
Cookie(c.cookieAllocator.Request(category).Raw()).
Done(),
// Enforce the packet into L2ForwardingOutput table after the packet is SNATed. The "SNAT" packet has these
// characteristics: 1) the ct_state is "+new+trk", 2) reg0[17] is set to 1; 3) Node IP is used as the target
// source IP in NAT action, 4) ct_mark is set to 0x40 in the conn_track context.
c.pipeline[conntrackCommitTable].BuildFlow(priorityNormal).
MatchProtocol(binding.ProtocolIP).
MatchCTStateNew(true).MatchCTStateTrk(true).
MatchRegRange(int(marksReg), snatRequiredMark, snatMarkRange).
Action().CT(true, L2ForwardingOutTable, CtZone).
SNAT(snatIPRange, nil).
LoadToMark(snatCTMark).CTDone().
Cookie(c.cookieAllocator.Request(category).Raw()).
Done(),
}
return flows
}
func (c *client) l3ToExternalFlows(nodeIP net.IP, localSubnet net.IPNet, outputPort int, category cookie.Category) []binding.Flow {
flows := []binding.Flow{
// Forward the packet to L2ForwardingCalc table if it is communicating to a Service.
c.pipeline[l3ForwardingTable].BuildFlow(priorityNormal).
MatchProtocol(binding.ProtocolIP).
MatchRegRange(int(marksReg), markTrafficFromLocal, binding.Range{0, 15}).
MatchCTMark(gatewayCTMark).
Action().GotoTable(l2ForwardingCalcTable).
Cookie(c.cookieAllocator.Request(category).Raw()).
Done(),
// Forward the packet to L2ForwardingCalc table if it is sent to the Node IP(not to the host gateway). Since
// the packet is using the host gateway's MAC as dst MAC, it will be sent out from "antrea-gw0". This flow
// entry is to avoid SNAT on such packet, otherwise the source and destination IP are the same.
c.pipeline[l3ForwardingTable].BuildFlow(priorityLow).
MatchProtocol(binding.ProtocolIP).
MatchRegRange(int(marksReg), markTrafficFromLocal, binding.Range{0, 15}).
MatchDstIP(nodeIP).
Action().GotoTable(l2ForwardingCalcTable).
Cookie(c.cookieAllocator.Request(category).Raw()).
Done(),
// Forward the packet to L2ForwardingCalc table if it is a packet sent to a local Pod. This flow entry has a
// low priority to avoid overlapping with those packets received from tunnel port.
c.pipeline[l3ForwardingTable].BuildFlow(priorityLow).
MatchProtocol(binding.ProtocolIP).
MatchRegRange(int(marksReg), markTrafficFromLocal, binding.Range{0, 15}).
MatchDstIPNet(localSubnet).
Action().GotoTable(c.pipeline[l3ForwardingTable].GetNext()).
Cookie(c.cookieAllocator.Request(category).Raw()).
Done(),
// Add SNAT mark on the packet that is not filtered by other flow entries in L3Forwarding table. This is the
// table miss if SNAT feature is enabled.
c.pipeline[l3ForwardingTable].BuildFlow(prioritySNAT).
MatchProtocol(binding.ProtocolIP).
MatchRegRange(int(marksReg), markTrafficFromLocal, binding.Range{0, 15}).
Action().LoadRegRange(int(marksReg), snatRequiredMark, snatMarkRange).
Action().GotoTable(IngressRuleTable).
Cookie(c.cookieAllocator.Request(category).Raw()).
Done(),
// Output the SNATed packet to the specified port.
c.pipeline[L2ForwardingOutTable].BuildFlow(priorityNormal).
MatchProtocol(binding.ProtocolIP).
MatchRegRange(int(marksReg), snatRequiredMark, snatMarkRange).
Action().Output(outputPort).
Cookie(c.cookieAllocator.Request(category).Raw()).
Done(),
}
return flows
}
// serviceLearnFlow generates the flow with learn action which adds new flows in
// sessionAffinityTable according to the Endpoint selection decision.
func (c *client) serviceLearnFlow(groupID binding.GroupIDType, svcIP net.IP, svcPort uint16, protocol binding.Protocol, affinityTimeout uint16) binding.Flow {
// Using unique cookie ID here to avoid learned flow cascade deletion.
cookieID := c.cookieAllocator.RequestWithObjectID(cookie.Service, uint32(groupID)).Raw()
learnFlowBuilder := c.pipeline[serviceLBTable].BuildFlow(priorityLow).
MatchRegRange(int(serviceLearnReg), marksRegServiceNeedLearn, serviceLearnRegRange).
MatchDstIP(svcIP).
Cookie(cookieID)
learnFlowBuilderLearnAction := learnFlowBuilder.
Action().Learn(sessionAffinityTable, priorityNormal, affinityTimeout, 0, cookieID).
DeleteLearned()
if protocol == binding.ProtocolTCP {
learnFlowBuilder = learnFlowBuilder.MatchTCPDstPort(svcPort)
learnFlowBuilderLearnAction = learnFlowBuilderLearnAction.MatchLearnedTCPDstPort()
} else if protocol == binding.ProtocolUDP {
learnFlowBuilder = learnFlowBuilder.MatchUDPDstPort(svcPort)
learnFlowBuilderLearnAction = learnFlowBuilderLearnAction.MatchLearnedUDPDstPort()
} else if protocol == binding.ProtocolSCTP {
learnFlowBuilder = learnFlowBuilder.MatchSCTPDstPort(svcPort)
learnFlowBuilderLearnAction = learnFlowBuilderLearnAction.MatchLearnedSCTPDstPort()
}
return learnFlowBuilderLearnAction.
MatchLearnedDstIP().
MatchLearnedSrcIP().
LoadRegToReg(int(endpointIPReg), int(endpointIPReg), endpointIPRegRange, endpointIPRegRange).
LoadRegToReg(int(endpointPortReg), int(endpointPortReg), endpointPortRegRange, endpointPortRegRange).
LoadReg(int(serviceLearnReg), marksRegServiceSelected, serviceLearnRegRange).
LoadReg(int(marksReg), macRewriteMark, macRewriteMarkRange).
Done().
Action().LoadRegRange(int(serviceLearnReg), marksRegServiceSelected, serviceLearnRegRange).
Action().GotoTable(endpointDNATTable).
Done()
}
// serviceLBFlow generates the flow which uses the specific group to do Endpoint
// selection.
func (c *client) serviceLBFlow(groupID binding.GroupIDType, svcIP net.IP, svcPort uint16, protocol binding.Protocol) binding.Flow {
lbFlowBuilder := c.pipeline[serviceLBTable].BuildFlow(priorityNormal)
if protocol == binding.ProtocolTCP {
lbFlowBuilder = lbFlowBuilder.MatchTCPDstPort(svcPort)
} else if protocol == binding.ProtocolUDP {
lbFlowBuilder = lbFlowBuilder.MatchUDPDstPort(svcPort)
} else if protocol == binding.ProtocolSCTP {
lbFlowBuilder = lbFlowBuilder.MatchSCTPDstPort(svcPort)
}
lbFlow := lbFlowBuilder.
MatchDstIP(svcIP).
MatchRegRange(int(serviceLearnReg), marksRegServiceNeedLB, serviceLearnRegRange).
Action().Group(groupID).
Cookie(c.cookieAllocator.Request(cookie.Service).Raw()).
Done()
return lbFlow
}
// endpointDNATFlow generates the flow which transforms the Service Cluster IP
// to the Endpoint IP according to the Endpoint selection decision which is stored
// in regs.
func (c *client) endpointDNATFlow(endpointIP net.IP, endpointPort uint16, protocol binding.Protocol) binding.Flow {
ipVal := binary.BigEndian.Uint32(endpointIP)
unionVal := (marksRegServiceSelected << endpointPortRegRange.Length()) + uint32(endpointPort)
return c.pipeline[endpointDNATTable].BuildFlow(priorityNormal).
Cookie(c.cookieAllocator.Request(cookie.Service).Raw()).
MatchProtocol(protocol).
MatchReg(int(endpointIPReg), ipVal).
MatchRegRange(int(endpointPortReg), unionVal, binding.Range{0, 18}).
Action().CT(true, EgressRuleTable, CtZone).
DNAT(
&binding.IPRange{StartIP: endpointIP, EndIP: endpointIP},
&binding.PortRange{StartPort: endpointPort, EndPort: endpointPort},
).
LoadToMark(serviceCTMark).
CTDone().
Done()
}
// hairpinSNATFlow generates the flow which does SNAT for Service
// hairpin packets and loads the hairpin mark to markReg.
func (c *client) hairpinSNATFlow(endpointIP net.IP) binding.Flow {
return c.pipeline[hairpinSNATTable].BuildFlow(priorityNormal).
Cookie(c.cookieAllocator.Request(cookie.Service).Raw()).
MatchProtocol(binding.ProtocolIP).
MatchDstIP(endpointIP).
MatchSrcIP(endpointIP).
Action().SetSrcIP(hairpinIP).
Action().LoadRegRange(int(marksReg), hairpinMark, hairpinMarkRange).
Action().GotoTable(L2ForwardingOutTable).
Done()
}
// serviceEndpointGroup creates/modifies the group/buckets of Endpoints. If the
// withSessionAffinity is true, then buckets will resubmit packets back to
// serviceLBTable to trigger the learn flow, the learn flow will then send packets
// to endpointDNATTable. Otherwise, buckets will resubmit packets to
// endpointDNATTable directly.
func (c *client) serviceEndpointGroup(groupID binding.GroupIDType, withSessionAffinity bool, endpoints ...proxy.Endpoint) binding.Group {
group := c.bridge.CreateGroup(groupID).ResetBuckets()
var resubmitTableID binding.TableIDType
var lbResultMark uint32
if withSessionAffinity {
resubmitTableID = serviceLBTable
lbResultMark = marksRegServiceNeedLearn
} else {
resubmitTableID = endpointDNATTable
lbResultMark = marksRegServiceSelected
}
for _, endpoint := range endpoints {
endpointPort, _ := endpoint.Port()
endpointIP := net.ParseIP(endpoint.IP()).To4()
ipVal := binary.BigEndian.Uint32(endpointIP)
portVal := uint16(endpointPort)
group = group.Bucket().Weight(100).
LoadReg(int(endpointIPReg), ipVal).
LoadRegRange(int(endpointPortReg), uint32(portVal), endpointPortRegRange).
LoadRegRange(int(serviceLearnReg), lbResultMark, serviceLearnRegRange).
LoadRegRange(int(marksReg), macRewriteMark, macRewriteMarkRange).
ResubmitToTable(resubmitTableID).
Done()
}
return group
}
// policyConjKeyFuncKeyFunc knows how to get key of a *policyRuleConjunction.
func policyConjKeyFunc(obj interface{}) (string, error) {
conj := obj.(*policyRuleConjunction)
return string(conj.id), nil
}
// priorityIndexFunc knows how to get priority of actionFlows in a *policyRuleConjunction.
// It's provided to cache.Indexer to build an index of policyRuleConjunction.
func priorityIndexFunc(obj interface{}) ([]string, error) {
conj := obj.(*policyRuleConjunction)
return conj.ActionFlowPriorities(), nil
}
func generatePipeline(bridge binding.Bridge, enableProxy bool) map[binding.TableIDType]binding.Table {
if enableProxy {
return map[binding.TableIDType]binding.Table{
ClassifierTable: bridge.CreateTable(ClassifierTable, spoofGuardTable, binding.TableMissActionDrop),
spoofGuardTable: bridge.CreateTable(spoofGuardTable, serviceHairpinTable, binding.TableMissActionDrop),
arpResponderTable: bridge.CreateTable(arpResponderTable, binding.LastTableID, binding.TableMissActionDrop),
serviceHairpinTable: bridge.CreateTable(serviceHairpinTable, conntrackTable, binding.TableMissActionNext),
conntrackTable: bridge.CreateTable(conntrackTable, conntrackStateTable, binding.TableMissActionNone),
conntrackStateTable: bridge.CreateTable(conntrackStateTable, endpointDNATTable, binding.TableMissActionNext),
sessionAffinityTable: bridge.CreateTable(sessionAffinityTable, binding.LastTableID, binding.TableMissActionNone),
serviceLBTable: bridge.CreateTable(serviceLBTable, endpointDNATTable, binding.TableMissActionNext),
endpointDNATTable: bridge.CreateTable(endpointDNATTable, cnpEgressRuleTable, binding.TableMissActionNext),
cnpEgressRuleTable: bridge.CreateTable(cnpEgressRuleTable, EgressRuleTable, binding.TableMissActionNext),
EgressRuleTable: bridge.CreateTable(EgressRuleTable, EgressDefaultTable, binding.TableMissActionNext),
EgressDefaultTable: bridge.CreateTable(EgressDefaultTable, l3ForwardingTable, binding.TableMissActionNext),
l3ForwardingTable: bridge.CreateTable(l3ForwardingTable, l2ForwardingCalcTable, binding.TableMissActionNext),
l2ForwardingCalcTable: bridge.CreateTable(l2ForwardingCalcTable, cnpIngressRuleTable, binding.TableMissActionNext),
cnpIngressRuleTable: bridge.CreateTable(cnpIngressRuleTable, IngressRuleTable, binding.TableMissActionNext),
IngressRuleTable: bridge.CreateTable(IngressRuleTable, IngressDefaultTable, binding.TableMissActionNext),
IngressDefaultTable: bridge.CreateTable(IngressDefaultTable, conntrackCommitTable, binding.TableMissActionNext),
conntrackCommitTable: bridge.CreateTable(conntrackCommitTable, hairpinSNATTable, binding.TableMissActionNext),
hairpinSNATTable: bridge.CreateTable(hairpinSNATTable, L2ForwardingOutTable, binding.TableMissActionNext),
L2ForwardingOutTable: bridge.CreateTable(L2ForwardingOutTable, binding.LastTableID, binding.TableMissActionDrop),
}
}
return map[binding.TableIDType]binding.Table{
ClassifierTable: bridge.CreateTable(ClassifierTable, spoofGuardTable, binding.TableMissActionDrop),
spoofGuardTable: bridge.CreateTable(spoofGuardTable, conntrackTable, binding.TableMissActionDrop),
arpResponderTable: bridge.CreateTable(arpResponderTable, binding.LastTableID, binding.TableMissActionDrop),
conntrackTable: bridge.CreateTable(conntrackTable, conntrackStateTable, binding.TableMissActionNone),
conntrackStateTable: bridge.CreateTable(conntrackStateTable, dnatTable, binding.TableMissActionNext),
dnatTable: bridge.CreateTable(dnatTable, cnpEgressRuleTable, binding.TableMissActionNext),
cnpEgressRuleTable: bridge.CreateTable(cnpEgressRuleTable, EgressRuleTable, binding.TableMissActionNext),
EgressRuleTable: bridge.CreateTable(EgressRuleTable, EgressDefaultTable, binding.TableMissActionNext),
EgressDefaultTable: bridge.CreateTable(EgressDefaultTable, l3ForwardingTable, binding.TableMissActionNext),
l3ForwardingTable: bridge.CreateTable(l3ForwardingTable, l2ForwardingCalcTable, binding.TableMissActionNext),
l2ForwardingCalcTable: bridge.CreateTable(l2ForwardingCalcTable, cnpIngressRuleTable, binding.TableMissActionNext),
cnpIngressRuleTable: bridge.CreateTable(cnpIngressRuleTable, IngressRuleTable, binding.TableMissActionNext),
IngressRuleTable: bridge.CreateTable(IngressRuleTable, IngressDefaultTable, binding.TableMissActionNext),
IngressDefaultTable: bridge.CreateTable(IngressDefaultTable, conntrackCommitTable, binding.TableMissActionNext),
conntrackCommitTable: bridge.CreateTable(conntrackCommitTable, L2ForwardingOutTable, binding.TableMissActionNext),
L2ForwardingOutTable: bridge.CreateTable(L2ForwardingOutTable, binding.LastTableID, binding.TableMissActionDrop),
}
}
// NewClient is the constructor of the Client interface.
func NewClient(bridgeName, mgmtAddr string, enableProxy bool) Client {
bridge := binding.NewOFBridge(bridgeName, mgmtAddr)
policyCache := cache.NewIndexer(
policyConjKeyFunc,
cache.Indexers{priorityIndex: priorityIndexFunc},
)
c := &client{
bridge: bridge,
pipeline: generatePipeline(bridge, enableProxy),
nodeFlowCache: newFlowCategoryCache(),
podFlowCache: newFlowCategoryCache(),
serviceFlowCache: newFlowCategoryCache(),
policyCache: policyCache,
groupCache: sync.Map{},
globalConjMatchFlowCache: map[string]*conjMatchFlowContext{},
packetInHandlers: map[string]PacketInHandler{},
}
c.ofEntryOperations = c
c.enableProxy = enableProxy
return c
}
| 1 | 20,979 | Hi @wenyingd . so what's the original dst MAC of the reply packet from kube-proxy? | antrea-io-antrea | go |
@@ -402,6 +402,15 @@ class MisdesignChecker(BaseChecker):
"statement (see R0916).",
},
),
+ (
+ "exclude-too-few-public-methods",
+ {
+ "default": (),
+ "type": "csv",
+ "metavar": "<comma separated list of class names>",
+ "help": "List of qualified class names to ignore when counting public methods (see R0903)",
+ },
+ ),
)
def __init__(self, linter=None): | 1 | # Copyright (c) 2006, 2009-2010, 2012-2015 LOGILAB S.A. (Paris, FRANCE) <[email protected]>
# Copyright (c) 2012, 2014 Google, Inc.
# Copyright (c) 2014-2020 Claudiu Popa <[email protected]>
# Copyright (c) 2014 Arun Persaud <[email protected]>
# Copyright (c) 2015 Ionel Cristian Maries <[email protected]>
# Copyright (c) 2016 Łukasz Rogalski <[email protected]>
# Copyright (c) 2017 ahirnish <[email protected]>
# Copyright (c) 2018 Lucas Cimon <[email protected]>
# Copyright (c) 2018 Mike Frysinger <[email protected]>
# Copyright (c) 2018 Mark Miller <[email protected]>
# Copyright (c) 2018 Ashley Whetter <[email protected]>
# Copyright (c) 2018 Ville Skyttä <[email protected]>
# Copyright (c) 2018 Jakub Wilk <[email protected]>
# Copyright (c) 2019-2021 Pierre Sassoulas <[email protected]>
# Copyright (c) 2019 Michael Scott Cuthbert <[email protected]>
# Copyright (c) 2020 hippo91 <[email protected]>
# Copyright (c) 2020 Anthony Sottile <[email protected]>
# Copyright (c) 2021 Daniël van Noord <[email protected]>
# Copyright (c) 2021 Melvin <[email protected]>
# Copyright (c) 2021 Rebecca Turner <[email protected]>
# Copyright (c) 2021 Marc Mueller <[email protected]>
# Copyright (c) 2021 yushao2 <[email protected]>
# Licensed under the GPL: https://www.gnu.org/licenses/old-licenses/gpl-2.0.html
# For details: https://github.com/PyCQA/pylint/blob/main/LICENSE
"""check for signs of poor design"""
import re
from collections import defaultdict
from typing import FrozenSet, Iterator, List, Set, cast
import astroid
from astroid import nodes
from pylint import utils
from pylint.checkers import BaseChecker
from pylint.checkers.utils import check_messages
from pylint.interfaces import IAstroidChecker
MSGS = { # pylint: disable=consider-using-namedtuple-or-dataclass
"R0901": (
"Too many ancestors (%s/%s)",
"too-many-ancestors",
"Used when class has too many parent classes, try to reduce "
"this to get a simpler (and so easier to use) class.",
),
"R0902": (
"Too many instance attributes (%s/%s)",
"too-many-instance-attributes",
"Used when class has too many instance attributes, try to reduce "
"this to get a simpler (and so easier to use) class.",
),
"R0903": (
"Too few public methods (%s/%s)",
"too-few-public-methods",
"Used when class has too few public methods, so be sure it's "
"really worth it.",
),
"R0904": (
"Too many public methods (%s/%s)",
"too-many-public-methods",
"Used when class has too many public methods, try to reduce "
"this to get a simpler (and so easier to use) class.",
),
"R0911": (
"Too many return statements (%s/%s)",
"too-many-return-statements",
"Used when a function or method has too many return statement, "
"making it hard to follow.",
),
"R0912": (
"Too many branches (%s/%s)",
"too-many-branches",
"Used when a function or method has too many branches, "
"making it hard to follow.",
),
"R0913": (
"Too many arguments (%s/%s)",
"too-many-arguments",
"Used when a function or method takes too many arguments.",
),
"R0914": (
"Too many local variables (%s/%s)",
"too-many-locals",
"Used when a function or method has too many local variables.",
),
"R0915": (
"Too many statements (%s/%s)",
"too-many-statements",
"Used when a function or method has too many statements. You "
"should then split it in smaller functions / methods.",
),
"R0916": (
"Too many boolean expressions in if statement (%s/%s)",
"too-many-boolean-expressions",
"Used when an if statement contains too many boolean expressions.",
),
}
SPECIAL_OBJ = re.compile("^_{2}[a-z]+_{2}$")
DATACLASSES_DECORATORS = frozenset({"dataclass", "attrs"})
DATACLASS_IMPORT = "dataclasses"
TYPING_NAMEDTUPLE = "typing.NamedTuple"
TYPING_TYPEDDICT = "typing.TypedDict"
# Set of stdlib classes to ignore when calculating number of ancestors
STDLIB_CLASSES_IGNORE_ANCESTOR = frozenset(
(
"builtins.object",
"builtins.tuple",
"builtins.dict",
"builtins.list",
"builtins.set",
"bulitins.frozenset",
"collections.ChainMap",
"collections.Counter",
"collections.OrderedDict",
"collections.UserDict",
"collections.UserList",
"collections.UserString",
"collections.defaultdict",
"collections.deque",
"collections.namedtuple",
"_collections_abc.Awaitable",
"_collections_abc.Coroutine",
"_collections_abc.AsyncIterable",
"_collections_abc.AsyncIterator",
"_collections_abc.AsyncGenerator",
"_collections_abc.Hashable",
"_collections_abc.Iterable",
"_collections_abc.Iterator",
"_collections_abc.Generator",
"_collections_abc.Reversible",
"_collections_abc.Sized",
"_collections_abc.Container",
"_collections_abc.Collection",
"_collections_abc.Set",
"_collections_abc.MutableSet",
"_collections_abc.Mapping",
"_collections_abc.MutableMapping",
"_collections_abc.MappingView",
"_collections_abc.KeysView",
"_collections_abc.ItemsView",
"_collections_abc.ValuesView",
"_collections_abc.Sequence",
"_collections_abc.MutableSequence",
"_collections_abc.ByteString",
"typing.Tuple",
"typing.List",
"typing.Dict",
"typing.Set",
"typing.FrozenSet",
"typing.Deque",
"typing.DefaultDict",
"typing.OrderedDict",
"typing.Counter",
"typing.ChainMap",
"typing.Awaitable",
"typing.Coroutine",
"typing.AsyncIterable",
"typing.AsyncIterator",
"typing.AsyncGenerator",
"typing.Iterable",
"typing.Iterator",
"typing.Generator",
"typing.Reversible",
"typing.Container",
"typing.Collection",
"typing.AbstractSet",
"typing.MutableSet",
"typing.Mapping",
"typing.MutableMapping",
"typing.Sequence",
"typing.MutableSequence",
"typing.ByteString",
"typing.MappingView",
"typing.KeysView",
"typing.ItemsView",
"typing.ValuesView",
"typing.ContextManager",
"typing.AsyncContextManager",
"typing.Hashable",
"typing.Sized",
)
)
def _is_exempt_from_public_methods(node: astroid.ClassDef) -> bool:
"""Check if a class is exempt from too-few-public-methods"""
# If it's a typing.Namedtuple, typing.TypedDict or an Enum
for ancestor in node.ancestors():
if ancestor.name == "Enum" and ancestor.root().name == "enum":
return True
if ancestor.qname() in (TYPING_NAMEDTUPLE, TYPING_TYPEDDICT):
return True
# Or if it's a dataclass
if not node.decorators:
return False
root_locals = set(node.root().locals)
for decorator in node.decorators.nodes:
if isinstance(decorator, astroid.Call):
decorator = decorator.func
if not isinstance(decorator, (astroid.Name, astroid.Attribute)):
continue
if isinstance(decorator, astroid.Name):
name = decorator.name
else:
name = decorator.attrname
if name in DATACLASSES_DECORATORS and (
root_locals.intersection(DATACLASSES_DECORATORS)
or DATACLASS_IMPORT in root_locals
):
return True
return False
def _count_boolean_expressions(bool_op):
"""Counts the number of boolean expressions in BoolOp `bool_op` (recursive)
example: a and (b or c or (d and e)) ==> 5 boolean expressions
"""
nb_bool_expr = 0
for bool_expr in bool_op.get_children():
if isinstance(bool_expr, astroid.BoolOp):
nb_bool_expr += _count_boolean_expressions(bool_expr)
else:
nb_bool_expr += 1
return nb_bool_expr
def _count_methods_in_class(node):
all_methods = sum(1 for method in node.methods() if not method.name.startswith("_"))
# Special methods count towards the number of public methods,
# but don't count towards there being too many methods.
for method in node.mymethods():
if SPECIAL_OBJ.search(method.name) and method.name != "__init__":
all_methods += 1
return all_methods
def _get_parents_iter(
node: nodes.ClassDef, ignored_parents: FrozenSet[str]
) -> Iterator[nodes.ClassDef]:
r"""Get parents of ``node``, excluding ancestors of ``ignored_parents``.
If we have the following inheritance diagram:
F
/
D E
\/
B C
\/
A # class A(B, C): ...
And ``ignored_parents`` is ``{"E"}``, then this function will return
``{A, B, C, D}`` -- both ``E`` and its ancestors are excluded.
"""
parents: Set[nodes.ClassDef] = set()
to_explore = cast(List[nodes.ClassDef], list(node.ancestors(recurs=False)))
while to_explore:
parent = to_explore.pop()
if parent.qname() in ignored_parents:
continue
if parent not in parents:
# This guard might appear to be performing the same function as
# adding the resolved parents to a set to eliminate duplicates
# (legitimate due to diamond inheritance patterns), but its
# additional purpose is to prevent cycles (not normally possible,
# but potential due to inference) and thus guarantee termination
# of the while-loop
yield parent
parents.add(parent)
to_explore.extend(parent.ancestors(recurs=False))
def _get_parents(
node: nodes.ClassDef, ignored_parents: FrozenSet[str]
) -> Set[nodes.ClassDef]:
return set(_get_parents_iter(node, ignored_parents))
class MisdesignChecker(BaseChecker):
"""checks for sign of poor/misdesign:
* number of methods, attributes, local variables...
* size, complexity of functions, methods
"""
__implements__ = (IAstroidChecker,)
# configuration section name
name = "design"
# messages
msgs = MSGS
priority = -2
# configuration options
options = (
(
"max-args",
{
"default": 5,
"type": "int",
"metavar": "<int>",
"help": "Maximum number of arguments for function / method.",
},
),
(
"max-locals",
{
"default": 15,
"type": "int",
"metavar": "<int>",
"help": "Maximum number of locals for function / method body.",
},
),
(
"max-returns",
{
"default": 6,
"type": "int",
"metavar": "<int>",
"help": "Maximum number of return / yield for function / "
"method body.",
},
),
(
"max-branches",
{
"default": 12,
"type": "int",
"metavar": "<int>",
"help": "Maximum number of branch for function / method body.",
},
),
(
"max-statements",
{
"default": 50,
"type": "int",
"metavar": "<int>",
"help": "Maximum number of statements in function / method body.",
},
),
(
"max-parents",
{
"default": 7,
"type": "int",
"metavar": "<num>",
"help": "Maximum number of parents for a class (see R0901).",
},
),
(
"ignored-parents",
{
"default": (),
"type": "csv",
"metavar": "<comma separated list of class names>",
"help": "List of qualified class names to ignore when counting class parents (see R0901)",
},
),
(
"max-attributes",
{
"default": 7,
"type": "int",
"metavar": "<num>",
"help": "Maximum number of attributes for a class \
(see R0902).",
},
),
(
"min-public-methods",
{
"default": 2,
"type": "int",
"metavar": "<num>",
"help": "Minimum number of public methods for a class \
(see R0903).",
},
),
(
"max-public-methods",
{
"default": 20,
"type": "int",
"metavar": "<num>",
"help": "Maximum number of public methods for a class \
(see R0904).",
},
),
(
"max-bool-expr",
{
"default": 5,
"type": "int",
"metavar": "<num>",
"help": "Maximum number of boolean expressions in an if "
"statement (see R0916).",
},
),
)
def __init__(self, linter=None):
super().__init__(linter)
self._returns = None
self._branches = None
self._stmts = None
def open(self):
"""initialize visit variables"""
self.linter.stats.reset_node_count()
self._returns = []
self._branches = defaultdict(int)
self._stmts = []
def _inc_all_stmts(self, amount):
for i, _ in enumerate(self._stmts):
self._stmts[i] += amount
@astroid.decorators.cachedproperty
def _ignored_argument_names(self):
return utils.get_global_option(self, "ignored-argument-names", default=None)
@check_messages(
"too-many-ancestors",
"too-many-instance-attributes",
"too-few-public-methods",
"too-many-public-methods",
)
def visit_classdef(self, node: nodes.ClassDef) -> None:
"""check size of inheritance hierarchy and number of instance attributes"""
parents = _get_parents(
node, STDLIB_CLASSES_IGNORE_ANCESTOR.union(self.config.ignored_parents)
)
nb_parents = len(parents)
if nb_parents > self.config.max_parents:
self.add_message(
"too-many-ancestors",
node=node,
args=(nb_parents, self.config.max_parents),
)
if len(node.instance_attrs) > self.config.max_attributes:
self.add_message(
"too-many-instance-attributes",
node=node,
args=(len(node.instance_attrs), self.config.max_attributes),
)
@check_messages("too-few-public-methods", "too-many-public-methods")
def leave_classdef(self, node: nodes.ClassDef) -> None:
"""check number of public methods"""
my_methods = sum(
1 for method in node.mymethods() if not method.name.startswith("_")
)
# Does the class contain less than n public methods ?
# This checks only the methods defined in the current class,
# since the user might not have control over the classes
# from the ancestors. It avoids some false positives
# for classes such as unittest.TestCase, which provides
# a lot of assert methods. It doesn't make sense to warn
# when the user subclasses TestCase to add his own tests.
if my_methods > self.config.max_public_methods:
self.add_message(
"too-many-public-methods",
node=node,
args=(my_methods, self.config.max_public_methods),
)
# Stop here for exception, metaclass, interface classes and other
# classes for which we don't need to count the methods.
if node.type != "class" or _is_exempt_from_public_methods(node):
return
# Does the class contain more than n public methods ?
# This checks all the methods defined by ancestors and
# by the current class.
all_methods = _count_methods_in_class(node)
if all_methods < self.config.min_public_methods:
self.add_message(
"too-few-public-methods",
node=node,
args=(all_methods, self.config.min_public_methods),
)
@check_messages(
"too-many-return-statements",
"too-many-branches",
"too-many-arguments",
"too-many-locals",
"too-many-statements",
"keyword-arg-before-vararg",
)
def visit_functiondef(self, node: nodes.FunctionDef) -> None:
"""check function name, docstring, arguments, redefinition,
variable names, max locals
"""
# init branch and returns counters
self._returns.append(0)
# check number of arguments
args = node.args.args
ignored_argument_names = self._ignored_argument_names
if args is not None:
ignored_args_num = 0
if ignored_argument_names:
ignored_args_num = sum(
1 for arg in args if ignored_argument_names.match(arg.name)
)
argnum = len(args) - ignored_args_num
if argnum > self.config.max_args:
self.add_message(
"too-many-arguments",
node=node,
args=(len(args), self.config.max_args),
)
else:
ignored_args_num = 0
# check number of local variables
locnum = len(node.locals) - ignored_args_num
if locnum > self.config.max_locals:
self.add_message(
"too-many-locals", node=node, args=(locnum, self.config.max_locals)
)
# init new statements counter
self._stmts.append(1)
visit_asyncfunctiondef = visit_functiondef
@check_messages(
"too-many-return-statements",
"too-many-branches",
"too-many-arguments",
"too-many-locals",
"too-many-statements",
)
def leave_functiondef(self, node: nodes.FunctionDef) -> None:
"""most of the work is done here on close:
checks for max returns, branch, return in __init__
"""
returns = self._returns.pop()
if returns > self.config.max_returns:
self.add_message(
"too-many-return-statements",
node=node,
args=(returns, self.config.max_returns),
)
branches = self._branches[node]
if branches > self.config.max_branches:
self.add_message(
"too-many-branches",
node=node,
args=(branches, self.config.max_branches),
)
# check number of statements
stmts = self._stmts.pop()
if stmts > self.config.max_statements:
self.add_message(
"too-many-statements",
node=node,
args=(stmts, self.config.max_statements),
)
leave_asyncfunctiondef = leave_functiondef
def visit_return(self, _: nodes.Return) -> None:
"""count number of returns"""
if not self._returns:
return # return outside function, reported by the base checker
self._returns[-1] += 1
def visit_default(self, node: nodes.NodeNG) -> None:
"""default visit method -> increments the statements counter if
necessary
"""
if node.is_statement:
self._inc_all_stmts(1)
def visit_tryexcept(self, node: nodes.TryExcept) -> None:
"""increments the branches counter"""
branches = len(node.handlers)
if node.orelse:
branches += 1
self._inc_branch(node, branches)
self._inc_all_stmts(branches)
def visit_tryfinally(self, node: nodes.TryFinally) -> None:
"""increments the branches counter"""
self._inc_branch(node, 2)
self._inc_all_stmts(2)
@check_messages("too-many-boolean-expressions")
def visit_if(self, node: nodes.If) -> None:
"""increments the branches counter and checks boolean expressions"""
self._check_boolean_expressions(node)
branches = 1
# don't double count If nodes coming from some 'elif'
if node.orelse and (
len(node.orelse) > 1 or not isinstance(node.orelse[0], astroid.If)
):
branches += 1
self._inc_branch(node, branches)
self._inc_all_stmts(branches)
def _check_boolean_expressions(self, node):
"""Go through "if" node `node` and counts its boolean expressions
if the "if" node test is a BoolOp node
"""
condition = node.test
if not isinstance(condition, astroid.BoolOp):
return
nb_bool_expr = _count_boolean_expressions(condition)
if nb_bool_expr > self.config.max_bool_expr:
self.add_message(
"too-many-boolean-expressions",
node=condition,
args=(nb_bool_expr, self.config.max_bool_expr),
)
def visit_while(self, node: nodes.While) -> None:
"""increments the branches counter"""
branches = 1
if node.orelse:
branches += 1
self._inc_branch(node, branches)
visit_for = visit_while
def _inc_branch(self, node, branchesnum=1):
"""increments the branches counter"""
self._branches[node.scope()] += branchesnum
def register(linter):
"""required method to auto register this checker"""
linter.register_checker(MisdesignChecker(linter))
| 1 | 16,924 | Note: I wasn't sure what the right wording would be for this, so I used existing language from elsewhere in the codebase. | PyCQA-pylint | py |
@@ -113,3 +113,13 @@ class Config(object):
# grab config_file default from molecule if it's not set in the user-supplied ansible options
if 'config_file' not in self.config['ansible']:
self.config['ansible']['config_file'] = self.config['molecule']['config_file']
+
+ def populate_instance_names(self, platform):
+ """
+ Updates instances section of config with an additional key containing the full instance name
+
+ :return: None
+ """
+ for instance in self.config['vagrant']['instances']:
+ instance['vm_name'] = utilities.format_instance_name(instance['name'], platform,
+ self.config['vagrant']['instances']) | 1 | # Copyright (c) 2015 Cisco Systems
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import os
import sys
import yaml
from colorama import Fore
import molecule.utilities as utilities
class Config(object):
# locations to look for molecule config files
CONFIG_PATHS = [os.environ.get('MOLECULE_CONFIG'), os.path.expanduser('~/.config/molecule/config.yml'),
'/etc/molecule/config.yml']
def load_defaults_file(self, defaults_file=None):
"""
Loads config from a file
:param defaults_file: optional YAML file to open and read defaults from
:return: None
"""
# load defaults from provided file
if defaults_file is None:
defaults_file = os.path.join(os.path.dirname(__file__), 'conf/defaults.yml')
with open(defaults_file, 'r') as stream:
self.config = yaml.load(stream)
def merge_molecule_config_files(self, paths=CONFIG_PATHS):
"""
Looks for a molecule config file in paths and merges it with current config if found
Only the first file that's found will be merged in.
:param paths: list of places to look for config files
:return: Path of file that was merged into config, if found, otherwise None
"""
# merge defaults with a config file if found
for path in paths:
if path and os.path.isfile(path):
with open(path, 'r') as stream:
self.config = utilities.merge_dicts(self.config, yaml.load(stream))
return path
return
def merge_molecule_file(self, molecule_file=None):
"""
Looks for a molecule file in the local path and merges it into our config
:param molecule_file: path and name of molecule file to look for
:return: None
"""
if molecule_file is None:
molecule_file = self.config['molecule']['molecule_file']
if not os.path.isfile(molecule_file):
error = '\n{}Unable to find {}. Exiting.{}'
print(error.format(Fore.RED, self.config['molecule']['molecule_file'], Fore.RESET))
sys.exit(1)
with open(molecule_file, 'r') as env:
try:
molecule_yml = yaml.load(env)
except Exception as e:
error = "\n{}{} isn't properly formatted: {}{}"
print(error.format(Fore.RED, molecule_file, e, Fore.RESET))
sys.exit(1)
interim = utilities.merge_dicts(self.config, molecule_yml)
self.config = interim
def build_easy_paths(self):
"""
Convenience function to build up paths from our config values
:return: None
"""
values_to_update = ['state_file', 'vagrantfile_file', 'rakefile_file', 'config_file', 'inventory_file']
for item in values_to_update:
self.config['molecule'][item] = os.path.join(self.config['molecule']['molecule_dir'],
self.config['molecule'][item])
def update_ansible_defaults(self):
"""
Copies certain default values from molecule to ansible if none are specified in molecule.yml
:return: None
"""
# grab inventory_file default from molecule if it's not set in the user-supplied ansible options
if 'inventory_file' not in self.config['ansible']:
self.config['ansible']['inventory_file'] = self.config['molecule']['inventory_file']
# grab config_file default from molecule if it's not set in the user-supplied ansible options
if 'config_file' not in self.config['ansible']:
self.config['ansible']['config_file'] = self.config['molecule']['config_file']
| 1 | 5,729 | Docstring for `platform`. | ansible-community-molecule | py |
@@ -85,7 +85,9 @@ func Main() {
if err != nil {
fmt.Fprintf(os.Stderr, "%s: %v\n", subcommand.Name, err)
}
-
+ if err := cleanup(); err != nil {
+ fmt.Fprintf(os.Stderr, "error restoring console to functional state: %v\n", err)
+ }
os.Exit(exitCode)
}
| 1 | // Copyright 2015 Matthew Holt and The Caddy Authors
//
// Licensed under the Apache License, Version 2.0 (the "License");
// you may not use this file except in compliance with the License.
// You may obtain a copy of the License at
//
// http://www.apache.org/licenses/LICENSE-2.0
//
// Unless required by applicable law or agreed to in writing, software
// distributed under the License is distributed on an "AS IS" BASIS,
// WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
// See the License for the specific language governing permissions and
// limitations under the License.
package caddycmd
import (
"bufio"
"bytes"
"flag"
"fmt"
"io"
"log"
"net"
"os"
"path/filepath"
"runtime"
"runtime/debug"
"strconv"
"strings"
"time"
"github.com/caddyserver/caddy/v2"
"github.com/caddyserver/caddy/v2/caddyconfig"
"github.com/caddyserver/certmagic"
"go.uber.org/zap"
)
func init() {
// set a fitting User-Agent for ACME requests
goModule := caddy.GoModule()
cleanModVersion := strings.TrimPrefix(goModule.Version, "v")
certmagic.UserAgent = "Caddy/" + cleanModVersion
// by using Caddy, user indicates agreement to CA terms
// (very important, or ACME account creation will fail!)
certmagic.DefaultACME.Agreed = true
}
// Main implements the main function of the caddy command.
// Call this if Caddy is to be the main() of your program.
func Main() {
switch len(os.Args) {
case 0:
fmt.Printf("[FATAL] no arguments provided by OS; args[0] must be command\n")
os.Exit(caddy.ExitCodeFailedStartup)
case 1:
os.Args = append(os.Args, "help")
}
subcommandName := os.Args[1]
subcommand, ok := commands[subcommandName]
if !ok {
if strings.HasPrefix(os.Args[1], "-") {
// user probably forgot to type the subcommand
fmt.Println("[ERROR] first argument must be a subcommand; see 'caddy help'")
} else {
fmt.Printf("[ERROR] '%s' is not a recognized subcommand; see 'caddy help'\n", os.Args[1])
}
os.Exit(caddy.ExitCodeFailedStartup)
}
fs := subcommand.Flags
if fs == nil {
fs = flag.NewFlagSet(subcommand.Name, flag.ExitOnError)
}
err := fs.Parse(os.Args[2:])
if err != nil {
fmt.Println(err)
os.Exit(caddy.ExitCodeFailedStartup)
}
exitCode, err := subcommand.Func(Flags{fs})
if err != nil {
fmt.Fprintf(os.Stderr, "%s: %v\n", subcommand.Name, err)
}
os.Exit(exitCode)
}
// handlePingbackConn reads from conn and ensures it matches
// the bytes in expect, or returns an error if it doesn't.
func handlePingbackConn(conn net.Conn, expect []byte) error {
defer conn.Close()
confirmationBytes, err := io.ReadAll(io.LimitReader(conn, 32))
if err != nil {
return err
}
if !bytes.Equal(confirmationBytes, expect) {
return fmt.Errorf("wrong confirmation: %x", confirmationBytes)
}
return nil
}
// loadConfig loads the config from configFile and adapts it
// using adapterName. If adapterName is specified, configFile
// must be also. If no configFile is specified, it tries
// loading a default config file. The lack of a config file is
// not treated as an error, but false will be returned if
// there is no config available. It prints any warnings to stderr,
// and returns the resulting JSON config bytes along with
// whether a config file was loaded or not.
func loadConfig(configFile, adapterName string) ([]byte, string, error) {
// specifying an adapter without a config file is ambiguous
if adapterName != "" && configFile == "" {
return nil, "", fmt.Errorf("cannot adapt config without config file (use --config)")
}
// load initial config and adapter
var config []byte
var cfgAdapter caddyconfig.Adapter
var err error
if configFile != "" {
if configFile == "-" {
config, err = io.ReadAll(os.Stdin)
} else {
config, err = os.ReadFile(configFile)
}
if err != nil {
return nil, "", fmt.Errorf("reading config file: %v", err)
}
caddy.Log().Info("using provided configuration",
zap.String("config_file", configFile),
zap.String("config_adapter", adapterName))
} else if adapterName == "" {
// as a special case when no config file or adapter
// is specified, see if the Caddyfile adapter is
// plugged in, and if so, try using a default Caddyfile
cfgAdapter = caddyconfig.GetAdapter("caddyfile")
if cfgAdapter != nil {
config, err = os.ReadFile("Caddyfile")
if os.IsNotExist(err) {
// okay, no default Caddyfile; pretend like this never happened
cfgAdapter = nil
} else if err != nil {
// default Caddyfile exists, but error reading it
return nil, "", fmt.Errorf("reading default Caddyfile: %v", err)
} else {
// success reading default Caddyfile
configFile = "Caddyfile"
caddy.Log().Info("using adjacent Caddyfile")
}
}
}
// as a special case, if a config file called "Caddyfile" was
// specified, and no adapter is specified, assume caddyfile adapter
// for convenience
if strings.HasPrefix(filepath.Base(configFile), "Caddyfile") &&
filepath.Ext(configFile) != ".json" &&
adapterName == "" {
adapterName = "caddyfile"
}
// load config adapter
if adapterName != "" {
cfgAdapter = caddyconfig.GetAdapter(adapterName)
if cfgAdapter == nil {
return nil, "", fmt.Errorf("unrecognized config adapter: %s", adapterName)
}
}
// adapt config
if cfgAdapter != nil {
adaptedConfig, warnings, err := cfgAdapter.Adapt(config, map[string]interface{}{
"filename": configFile,
})
if err != nil {
return nil, "", fmt.Errorf("adapting config using %s: %v", adapterName, err)
}
for _, warn := range warnings {
msg := warn.Message
if warn.Directive != "" {
msg = fmt.Sprintf("%s: %s", warn.Directive, warn.Message)
}
caddy.Log().Warn(msg, zap.String("adapter", adapterName), zap.String("file", warn.File), zap.Int("line", warn.Line))
}
config = adaptedConfig
}
return config, configFile, nil
}
// watchConfigFile watches the config file at filename for changes
// and reloads the config if the file was updated. This function
// blocks indefinitely; it only quits if the poller has errors for
// long enough time. The filename passed in must be the actual
// config file used, not one to be discovered.
func watchConfigFile(filename, adapterName string) {
defer func() {
if err := recover(); err != nil {
log.Printf("[PANIC] watching config file: %v\n%s", err, debug.Stack())
}
}()
// make our logger; since config reloads can change the
// default logger, we need to get it dynamically each time
logger := func() *zap.Logger {
return caddy.Log().
Named("watcher").
With(zap.String("config_file", filename))
}
// get the initial timestamp on the config file
info, err := os.Stat(filename)
if err != nil {
logger().Error("cannot watch config file", zap.Error(err))
return
}
lastModified := info.ModTime()
logger().Info("watching config file for changes")
// if the file disappears or something, we can
// stop polling if the error lasts long enough
var lastErr time.Time
finalError := func(err error) bool {
if lastErr.IsZero() {
lastErr = time.Now()
return false
}
if time.Since(lastErr) > 30*time.Second {
logger().Error("giving up watching config file; too many errors",
zap.Error(err))
return true
}
return false
}
// begin poller
//nolint:staticcheck
for range time.Tick(1 * time.Second) {
// get the file info
info, err := os.Stat(filename)
if err != nil {
if finalError(err) {
return
}
continue
}
lastErr = time.Time{} // no error, so clear any memory of one
// if it hasn't changed, nothing to do
if !info.ModTime().After(lastModified) {
continue
}
logger().Info("config file changed; reloading")
// remember this timestamp
lastModified = info.ModTime()
// load the contents of the file
config, _, err := loadConfig(filename, adapterName)
if err != nil {
logger().Error("unable to load latest config", zap.Error(err))
continue
}
// apply the updated config
err = caddy.Load(config, false)
if err != nil {
logger().Error("applying latest config", zap.Error(err))
continue
}
}
}
// Flags wraps a FlagSet so that typed values
// from flags can be easily retrieved.
type Flags struct {
*flag.FlagSet
}
// String returns the string representation of the
// flag given by name. It panics if the flag is not
// in the flag set.
func (f Flags) String(name string) string {
return f.FlagSet.Lookup(name).Value.String()
}
// Bool returns the boolean representation of the
// flag given by name. It returns false if the flag
// is not a boolean type. It panics if the flag is
// not in the flag set.
func (f Flags) Bool(name string) bool {
val, _ := strconv.ParseBool(f.String(name))
return val
}
// Int returns the integer representation of the
// flag given by name. It returns 0 if the flag
// is not an integer type. It panics if the flag is
// not in the flag set.
func (f Flags) Int(name string) int {
val, _ := strconv.ParseInt(f.String(name), 0, strconv.IntSize)
return int(val)
}
// Float64 returns the float64 representation of the
// flag given by name. It returns false if the flag
// is not a float64 type. It panics if the flag is
// not in the flag set.
func (f Flags) Float64(name string) float64 {
val, _ := strconv.ParseFloat(f.String(name), 64)
return val
}
// Duration returns the duration representation of the
// flag given by name. It returns false if the flag
// is not a duration type. It panics if the flag is
// not in the flag set.
func (f Flags) Duration(name string) time.Duration {
val, _ := caddy.ParseDuration(f.String(name))
return val
}
// flagHelp returns the help text for fs.
func flagHelp(fs *flag.FlagSet) string {
if fs == nil {
return ""
}
// temporarily redirect output
out := fs.Output()
defer fs.SetOutput(out)
buf := new(bytes.Buffer)
fs.SetOutput(buf)
fs.PrintDefaults()
return buf.String()
}
func loadEnvFromFile(envFile string) error {
file, err := os.Open(envFile)
if err != nil {
return fmt.Errorf("reading environment file: %v", err)
}
defer file.Close()
envMap, err := parseEnvFile(file)
if err != nil {
return fmt.Errorf("parsing environment file: %v", err)
}
for k, v := range envMap {
if err := os.Setenv(k, v); err != nil {
return fmt.Errorf("setting environment variables: %v", err)
}
}
// Update the storage paths to ensure they have the proper
// value after loading a specified env file.
caddy.ConfigAutosavePath = filepath.Join(caddy.AppConfigDir(), "autosave.json")
caddy.DefaultStorage = &certmagic.FileStorage{Path: caddy.AppDataDir()}
return nil
}
func parseEnvFile(envInput io.Reader) (map[string]string, error) {
envMap := make(map[string]string)
scanner := bufio.NewScanner(envInput)
var line string
lineNumber := 0
for scanner.Scan() {
line = strings.TrimSpace(scanner.Text())
lineNumber++
// skip lines starting with comment
if strings.HasPrefix(line, "#") {
continue
}
// skip empty line
if len(line) == 0 {
continue
}
fields := strings.SplitN(line, "=", 2)
if len(fields) != 2 {
return nil, fmt.Errorf("can't parse line %d; line should be in KEY=VALUE format", lineNumber)
}
if strings.Contains(fields[0], " ") {
return nil, fmt.Errorf("bad key on line %d: contains whitespace", lineNumber)
}
key := fields[0]
val := fields[1]
if key == "" {
return nil, fmt.Errorf("missing or empty key on line %d", lineNumber)
}
envMap[key] = val
}
if err := scanner.Err(); err != nil {
return nil, err
}
return envMap, nil
}
func printEnvironment() {
fmt.Printf("caddy.HomeDir=%s\n", caddy.HomeDir())
fmt.Printf("caddy.AppDataDir=%s\n", caddy.AppDataDir())
fmt.Printf("caddy.AppConfigDir=%s\n", caddy.AppConfigDir())
fmt.Printf("caddy.ConfigAutosavePath=%s\n", caddy.ConfigAutosavePath)
fmt.Printf("caddy.Version=%s\n", CaddyVersion())
fmt.Printf("runtime.GOOS=%s\n", runtime.GOOS)
fmt.Printf("runtime.GOARCH=%s\n", runtime.GOARCH)
fmt.Printf("runtime.Compiler=%s\n", runtime.Compiler)
fmt.Printf("runtime.NumCPU=%d\n", runtime.NumCPU())
fmt.Printf("runtime.GOMAXPROCS=%d\n", runtime.GOMAXPROCS(0))
fmt.Printf("runtime.Version=%s\n", runtime.Version())
cwd, err := os.Getwd()
if err != nil {
cwd = fmt.Sprintf("<error: %v>", err)
}
fmt.Printf("os.Getwd=%s\n\n", cwd)
for _, v := range os.Environ() {
fmt.Println(v)
}
}
// CaddyVersion returns a detailed version string, if available.
func CaddyVersion() string {
goModule := caddy.GoModule()
ver := goModule.Version
if goModule.Sum != "" {
ver += " " + goModule.Sum
}
if goModule.Replace != nil {
ver += " => " + goModule.Replace.Path
if goModule.Replace.Version != "" {
ver += "@" + goModule.Replace.Version
}
if goModule.Replace.Sum != "" {
ver += " " + goModule.Replace.Sum
}
}
return ver
}
| 1 | 16,960 | This should be called before any `os.Exit` otherwise it could still mess up things. | caddyserver-caddy | go |
@@ -7,16 +7,16 @@ package MyGame.Example;
*/
public final class Color {
private Color() { }
- public static final byte Red = 1;
+ public static final int Red = 1;
/**
* \brief color Green
* Green is bit_flag with value (1u << 1)
*/
- public static final byte Green = 2;
+ public static final int Green = 2;
/**
* \brief color Blue (1u << 3)
*/
- public static final byte Blue = 8;
+ public static final int Blue = 8;
public static final String[] names = { "Red", "Green", "", "", "", "", "", "Blue", };
| 1 | // automatically generated by the FlatBuffers compiler, do not modify
package MyGame.Example;
/**
* Composite components of Monster color.
*/
public final class Color {
private Color() { }
public static final byte Red = 1;
/**
* \brief color Green
* Green is bit_flag with value (1u << 1)
*/
public static final byte Green = 2;
/**
* \brief color Blue (1u << 3)
*/
public static final byte Blue = 8;
public static final String[] names = { "Red", "Green", "", "", "", "", "", "Blue", };
public static String name(int e) { return names[e - Red]; }
}
| 1 | 20,269 | shouldn't this be `short` ? | google-flatbuffers | java |
@@ -63,15 +63,15 @@ func (c *CmdVolumeOptions) RunVolumesList(cmd *cobra.Command) error {
}
out := make([]string, len(cvols.Items)+2)
- out[0] = "Namespace|Name|Status|Type"
- out[1] = "---------|----|------|----"
+ out[0] = "Namespace|Name|Status|Type|Capacity"
+ out[1] = "---------|----|------|----|--------"
for i, items := range cvols.Items {
if len(items.Status.Reason) == 0 {
items.Status.Reason = volumeStatusOK
}
- out[i+2] = fmt.Sprintf("%s|%s|%s|%s", items.ObjectMeta.Namespace,
+ out[i+2] = fmt.Sprintf("%s|%s|%s|%s|%s", items.ObjectMeta.Namespace,
items.ObjectMeta.Name,
- items.Status.Reason, items.Spec.CasType)
+ items.Status.Reason, items.Spec.CasType, items.Spec.Capacity)
}
if len(out) == 2 {
fmt.Println("No Volumes are running") | 1 | /*
Copyright 2017 The OpenEBS Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package command
import (
"fmt"
"github.com/openebs/maya/pkg/apis/openebs.io/v1alpha1"
"github.com/openebs/maya/pkg/client/mapiserver"
"github.com/openebs/maya/pkg/util"
"github.com/spf13/cobra"
)
var (
volumesListCommandHelpText = `
This command displays status of available Volumes.
If no volume ID is given, a list of all known volumes will be displayed.
Usage: mayactl volume list [options]
`
)
// NewCmdVolumesList displays status of OpenEBS Volume(s)
func NewCmdVolumesList() *cobra.Command {
cmd := &cobra.Command{
Use: "list",
Short: "Displays status information about Volume(s)",
Long: volumesListCommandHelpText,
Run: func(cmd *cobra.Command, args []string) {
util.CheckErr(options.RunVolumesList(cmd), util.Fatal)
},
}
cmd.Flags().StringVarP(&options.volName, "volname", "", options.volName,
"unique volume name.")
cmd.MarkPersistentFlagRequired("volname")
return cmd
}
//RunVolumesList fetchs the volumes from maya-apiserver
func (c *CmdVolumeOptions) RunVolumesList(cmd *cobra.Command) error {
//fmt.Println("Executing volume list...")
var cvols v1alpha1.CASVolumeList
err := mapiserver.ListVolumes(&cvols)
if err != nil {
return fmt.Errorf("Volume list error: %s", err)
}
out := make([]string, len(cvols.Items)+2)
out[0] = "Namespace|Name|Status|Type"
out[1] = "---------|----|------|----"
for i, items := range cvols.Items {
if len(items.Status.Reason) == 0 {
items.Status.Reason = volumeStatusOK
}
out[i+2] = fmt.Sprintf("%s|%s|%s|%s", items.ObjectMeta.Namespace,
items.ObjectMeta.Name,
items.Status.Reason, items.Spec.CasType)
}
if len(out) == 2 {
fmt.Println("No Volumes are running")
return nil
}
fmt.Println(util.FormatList(out))
return nil
}
| 1 | 10,043 | Can you please paste the output of `volume list` command which will show capacity of a openebs volume. | openebs-maya | go |
@@ -83,10 +83,15 @@ public class MetricRegistry implements MetricSet {
* @param metric the metric
* @param <T> the type of the metric
* @return {@code metric}
- * @throws IllegalArgumentException if the name is already registered
+ * @throws IllegalArgumentException if the name is already registered or metric variable is null
*/
@SuppressWarnings("unchecked")
public <T extends Metric> T register(String name, T metric) throws IllegalArgumentException {
+
+ if (metric == null) {
+ throw new IllegalArgumentException("metric variable must not be null !!!");
+ }
+
if (metric instanceof MetricRegistry) {
final MetricRegistry childRegistry = (MetricRegistry)metric;
final String childName = name; | 1 | package com.codahale.metrics;
import java.util.Collections;
import java.util.List;
import java.util.Map;
import java.util.SortedMap;
import java.util.SortedSet;
import java.util.TreeMap;
import java.util.TreeSet;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.ConcurrentMap;
import java.util.concurrent.CopyOnWriteArrayList;
/**
* A registry of metric instances.
*/
public class MetricRegistry implements MetricSet {
/**
* Concatenates elements to form a dotted name, eliding any null values or empty strings.
*
* @param name the first element of the name
* @param names the remaining elements of the name
* @return {@code name} and {@code names} concatenated by periods
*/
public static String name(String name, String... names) {
final StringBuilder builder = new StringBuilder();
append(builder, name);
if (names != null) {
for (String s : names) {
append(builder, s);
}
}
return builder.toString();
}
/**
* Concatenates a class name and elements to form a dotted name, eliding any null values or
* empty strings.
*
* @param klass the first element of the name
* @param names the remaining elements of the name
* @return {@code klass} and {@code names} concatenated by periods
*/
public static String name(Class<?> klass, String... names) {
return name(klass.getName(), names);
}
private static void append(StringBuilder builder, String part) {
if (part != null && !part.isEmpty()) {
if (builder.length() > 0) {
builder.append('.');
}
builder.append(part);
}
}
private final ConcurrentMap<String, Metric> metrics;
private final List<MetricRegistryListener> listeners;
/**
* Creates a new {@link MetricRegistry}.
*/
public MetricRegistry() {
this.metrics = buildMap();
this.listeners = new CopyOnWriteArrayList<>();
}
/**
* Creates a new {@link ConcurrentMap} implementation for use inside the registry. Override this
* to create a {@link MetricRegistry} with space- or time-bounded metric lifecycles, for
* example.
*
* @return a new {@link ConcurrentMap}
*/
protected ConcurrentMap<String, Metric> buildMap() {
return new ConcurrentHashMap<>();
}
/**
* Given a {@link Metric}, registers it under the given name.
*
* @param name the name of the metric
* @param metric the metric
* @param <T> the type of the metric
* @return {@code metric}
* @throws IllegalArgumentException if the name is already registered
*/
@SuppressWarnings("unchecked")
public <T extends Metric> T register(String name, T metric) throws IllegalArgumentException {
if (metric instanceof MetricRegistry) {
final MetricRegistry childRegistry = (MetricRegistry)metric;
final String childName = name;
childRegistry.addListener(new MetricRegistryListener() {
@Override
public void onGaugeAdded(String name, Gauge<?> gauge) {
register(name(childName, name), gauge);
}
@Override
public void onGaugeRemoved(String name) {
remove(name(childName, name));
}
@Override
public void onCounterAdded(String name, Counter counter) {
register(name(childName, name), counter);
}
@Override
public void onCounterRemoved(String name) {
remove(name(childName, name));
}
@Override
public void onHistogramAdded(String name, Histogram histogram) {
register(name(childName, name), histogram);
}
@Override
public void onHistogramRemoved(String name) {
remove(name(childName, name));
}
@Override
public void onMeterAdded(String name, Meter meter) {
register(name(childName, name), meter);
}
@Override
public void onMeterRemoved(String name) {
remove(name(childName, name));
}
@Override
public void onTimerAdded(String name, Timer timer) {
register(name(childName, name), timer);
}
@Override
public void onTimerRemoved(String name) {
remove(name(childName, name));
}
});
} else if (metric instanceof MetricSet) {
registerAll(name, (MetricSet) metric);
} else {
final Metric existing = metrics.putIfAbsent(name, metric);
if (existing == null) {
onMetricAdded(name, metric);
} else {
throw new IllegalArgumentException("A metric named " + name + " already exists");
}
}
return metric;
}
/**
* Given a metric set, registers them.
*
* @param metrics a set of metrics
* @throws IllegalArgumentException if any of the names are already registered
*/
public void registerAll(MetricSet metrics) throws IllegalArgumentException {
registerAll(null, metrics);
}
/**
* Return the {@link Counter} registered under this name; or create and register
* a new {@link Counter} if none is registered.
*
* @param name the name of the metric
* @return a new or pre-existing {@link Counter}
*/
public Counter counter(String name) {
return getOrAdd(name, MetricBuilder.COUNTERS);
}
/**
* Return the {@link Counter} registered under this name; or create and register
* a new {@link Counter} using the provided MetricSupplier if none is registered.
*
* @param name the name of the metric
* @param supplier a MetricSupplier that can be used to manufacture a counter.
* @return a new or pre-existing {@link Counter}
*/
public Counter counter(String name, final MetricSupplier<Counter> supplier) {
return getOrAdd(name, new MetricBuilder<Counter>() {
@Override
public Counter newMetric() {
return supplier.newMetric();
}
@Override
public boolean isInstance(Metric metric) {
return Counter.class.isInstance(metric);
}
});
}
/**
* Return the {@link Histogram} registered under this name; or create and register
* a new {@link Histogram} if none is registered.
*
* @param name the name of the metric
* @return a new or pre-existing {@link Histogram}
*/
public Histogram histogram(String name) {
return getOrAdd(name, MetricBuilder.HISTOGRAMS);
}
/**
* Return the {@link Histogram} registered under this name; or create and register
* a new {@link Histogram} using the provided MetricSupplier if none is registered.
*
* @param name the name of the metric
* @param supplier a MetricSupplier that can be used to manufacture a histogram
* @return a new or pre-existing {@link Histogram}
*/
public Histogram histogram(String name, final MetricSupplier<Histogram> supplier) {
return getOrAdd(name, new MetricBuilder<Histogram>() {
@Override
public Histogram newMetric() {
return supplier.newMetric();
}
@Override
public boolean isInstance(Metric metric) {
return Histogram.class.isInstance(metric);
}
});
}
/**
* Return the {@link Meter} registered under this name; or create and register
* a new {@link Meter} if none is registered.
*
* @param name the name of the metric
* @return a new or pre-existing {@link Meter}
*/
public Meter meter(String name) {
return getOrAdd(name, MetricBuilder.METERS);
}
/**
* Return the {@link Meter} registered under this name; or create and register
* a new {@link Meter} using the provided MetricSupplier if none is registered.
*
* @param name the name of the metric
* @param supplier a MetricSupplier that can be used to manufacture a Meter
* @return a new or pre-existing {@link Meter}
*/
public Meter meter(String name, final MetricSupplier<Meter> supplier) {
return getOrAdd(name, new MetricBuilder<Meter>() {
@Override
public Meter newMetric() {
return supplier.newMetric();
}
@Override
public boolean isInstance(Metric metric) {
return Meter.class.isInstance(metric);
}
});
}
/**
* Return the {@link Timer} registered under this name; or create and register
* a new {@link Timer} if none is registered.
*
* @param name the name of the metric
* @return a new or pre-existing {@link Timer}
*/
public Timer timer(String name) {
return getOrAdd(name, MetricBuilder.TIMERS);
}
/**
* Return the {@link Timer} registered under this name; or create and register
* a new {@link Timer} using the provided MetricSupplier if none is registered.
*
* @param name the name of the metric
* @param supplier a MetricSupplier that can be used to manufacture a Timer
* @return a new or pre-existing {@link Timer}
*/
public Timer timer(String name, final MetricSupplier<Timer> supplier) {
return getOrAdd(name, new MetricBuilder<Timer>() {
@Override
public Timer newMetric() {
return supplier.newMetric();
}
@Override
public boolean isInstance(Metric metric) {
return Timer.class.isInstance(metric);
}
});
}
/**
* Return the {@link Gauge} registered under this name; or create and register
* a new {@link Gauge} using the provided MetricSupplier if none is registered.
*
* @param name the name of the metric
* @param supplier a MetricSupplier that can be used to manufacture a Gauge
* @return a new or pre-existing {@link Gauge}
*/
@SuppressWarnings("rawtypes")
public Gauge gauge(String name, final MetricSupplier<Gauge> supplier) {
return getOrAdd(name, new MetricBuilder<Gauge>() {
@Override
public Gauge newMetric() {
return supplier.newMetric();
}
@Override
public boolean isInstance(Metric metric) {
return Gauge.class.isInstance(metric);
}
});
}
/**
* Removes the metric with the given name.
*
* @param name the name of the metric
* @return whether or not the metric was removed
*/
public boolean remove(String name) {
final Metric metric = metrics.remove(name);
if (metric != null) {
onMetricRemoved(name, metric);
return true;
}
return false;
}
/**
* Removes all metrics which match the given filter.
*
* @param filter a filter
*/
public void removeMatching(MetricFilter filter) {
for (Map.Entry<String, Metric> entry : metrics.entrySet()) {
if (filter.matches(entry.getKey(), entry.getValue())) {
remove(entry.getKey());
}
}
}
/**
* Adds a {@link MetricRegistryListener} to a collection of listeners that will be notified on
* metric creation. Listeners will be notified in the order in which they are added.
* <p>
* <b>N.B.:</b> The listener will be notified of all existing metrics when it first registers.
*
* @param listener the listener that will be notified
*/
public void addListener(MetricRegistryListener listener) {
listeners.add(listener);
for (Map.Entry<String, Metric> entry : metrics.entrySet()) {
notifyListenerOfAddedMetric(listener, entry.getValue(), entry.getKey());
}
}
/**
* Removes a {@link MetricRegistryListener} from this registry's collection of listeners.
*
* @param listener the listener that will be removed
*/
public void removeListener(MetricRegistryListener listener) {
listeners.remove(listener);
}
/**
* Returns a set of the names of all the metrics in the registry.
*
* @return the names of all the metrics
*/
public SortedSet<String> getNames() {
return Collections.unmodifiableSortedSet(new TreeSet<>(metrics.keySet()));
}
/**
* Returns a map of all the gauges in the registry and their names.
*
* @return all the gauges in the registry
*/
@SuppressWarnings("rawtypes")
public SortedMap<String, Gauge> getGauges() {
return getGauges(MetricFilter.ALL);
}
/**
* Returns a map of all the gauges in the registry and their names which match the given filter.
*
* @param filter the metric filter to match
* @return all the gauges in the registry
*/
@SuppressWarnings("rawtypes")
public SortedMap<String, Gauge> getGauges(MetricFilter filter) {
return getMetrics(Gauge.class, filter);
}
/**
* Returns a map of all the counters in the registry and their names.
*
* @return all the counters in the registry
*/
public SortedMap<String, Counter> getCounters() {
return getCounters(MetricFilter.ALL);
}
/**
* Returns a map of all the counters in the registry and their names which match the given
* filter.
*
* @param filter the metric filter to match
* @return all the counters in the registry
*/
public SortedMap<String, Counter> getCounters(MetricFilter filter) {
return getMetrics(Counter.class, filter);
}
/**
* Returns a map of all the histograms in the registry and their names.
*
* @return all the histograms in the registry
*/
public SortedMap<String, Histogram> getHistograms() {
return getHistograms(MetricFilter.ALL);
}
/**
* Returns a map of all the histograms in the registry and their names which match the given
* filter.
*
* @param filter the metric filter to match
* @return all the histograms in the registry
*/
public SortedMap<String, Histogram> getHistograms(MetricFilter filter) {
return getMetrics(Histogram.class, filter);
}
/**
* Returns a map of all the meters in the registry and their names.
*
* @return all the meters in the registry
*/
public SortedMap<String, Meter> getMeters() {
return getMeters(MetricFilter.ALL);
}
/**
* Returns a map of all the meters in the registry and their names which match the given filter.
*
* @param filter the metric filter to match
* @return all the meters in the registry
*/
public SortedMap<String, Meter> getMeters(MetricFilter filter) {
return getMetrics(Meter.class, filter);
}
/**
* Returns a map of all the timers in the registry and their names.
*
* @return all the timers in the registry
*/
public SortedMap<String, Timer> getTimers() {
return getTimers(MetricFilter.ALL);
}
/**
* Returns a map of all the timers in the registry and their names which match the given filter.
*
* @param filter the metric filter to match
* @return all the timers in the registry
*/
public SortedMap<String, Timer> getTimers(MetricFilter filter) {
return getMetrics(Timer.class, filter);
}
@SuppressWarnings("unchecked")
private <T extends Metric> T getOrAdd(String name, MetricBuilder<T> builder) {
final Metric metric = metrics.get(name);
if (builder.isInstance(metric)) {
return (T) metric;
} else if (metric == null) {
try {
return register(name, builder.newMetric());
} catch (IllegalArgumentException e) {
final Metric added = metrics.get(name);
if (builder.isInstance(added)) {
return (T) added;
}
}
}
throw new IllegalArgumentException(name + " is already used for a different type of metric");
}
@SuppressWarnings("unchecked")
private <T extends Metric> SortedMap<String, T> getMetrics(Class<T> klass, MetricFilter filter) {
final TreeMap<String, T> timers = new TreeMap<>();
for (Map.Entry<String, Metric> entry : metrics.entrySet()) {
if (klass.isInstance(entry.getValue()) && filter.matches(entry.getKey(),
entry.getValue())) {
timers.put(entry.getKey(), (T) entry.getValue());
}
}
return Collections.unmodifiableSortedMap(timers);
}
private void onMetricAdded(String name, Metric metric) {
for (MetricRegistryListener listener : listeners) {
notifyListenerOfAddedMetric(listener, metric, name);
}
}
private void notifyListenerOfAddedMetric(MetricRegistryListener listener, Metric metric, String name) {
if (metric instanceof Gauge) {
listener.onGaugeAdded(name, (Gauge<?>) metric);
} else if (metric instanceof Counter) {
listener.onCounterAdded(name, (Counter) metric);
} else if (metric instanceof Histogram) {
listener.onHistogramAdded(name, (Histogram) metric);
} else if (metric instanceof Meter) {
listener.onMeterAdded(name, (Meter) metric);
} else if (metric instanceof Timer) {
listener.onTimerAdded(name, (Timer) metric);
} else {
throw new IllegalArgumentException("Unknown metric type: " + metric.getClass());
}
}
private void onMetricRemoved(String name, Metric metric) {
for (MetricRegistryListener listener : listeners) {
notifyListenerOfRemovedMetric(name, metric, listener);
}
}
private void notifyListenerOfRemovedMetric(String name, Metric metric, MetricRegistryListener listener) {
if (metric instanceof Gauge) {
listener.onGaugeRemoved(name);
} else if (metric instanceof Counter) {
listener.onCounterRemoved(name);
} else if (metric instanceof Histogram) {
listener.onHistogramRemoved(name);
} else if (metric instanceof Meter) {
listener.onMeterRemoved(name);
} else if (metric instanceof Timer) {
listener.onTimerRemoved(name);
} else {
throw new IllegalArgumentException("Unknown metric type: " + metric.getClass());
}
}
/**
* Given a metric set, registers them with the given prefix prepended to their names.
*
* @param prefix a name prefix
* @param metrics a set of metrics
* @throws IllegalArgumentException if any of the names are already registered
*/
public void registerAll(String prefix, MetricSet metrics) throws IllegalArgumentException {
for (Map.Entry<String, Metric> entry : metrics.getMetrics().entrySet()) {
if (entry.getValue() instanceof MetricSet) {
registerAll(name(prefix, entry.getKey()), (MetricSet) entry.getValue());
} else {
register(name(prefix, entry.getKey()), entry.getValue());
}
}
}
@Override
public Map<String, Metric> getMetrics() {
return Collections.unmodifiableMap(metrics);
}
@FunctionalInterface
public interface MetricSupplier<T extends Metric> {
T newMetric();
}
/**
* A quick and easy way of capturing the notion of default metrics.
*/
private interface MetricBuilder<T extends Metric> {
MetricBuilder<Counter> COUNTERS = new MetricBuilder<Counter>() {
@Override
public Counter newMetric() {
return new Counter();
}
@Override
public boolean isInstance(Metric metric) {
return Counter.class.isInstance(metric);
}
};
MetricBuilder<Histogram> HISTOGRAMS = new MetricBuilder<Histogram>() {
@Override
public Histogram newMetric() {
return new Histogram(new ExponentiallyDecayingReservoir());
}
@Override
public boolean isInstance(Metric metric) {
return Histogram.class.isInstance(metric);
}
};
MetricBuilder<Meter> METERS = new MetricBuilder<Meter>() {
@Override
public Meter newMetric() {
return new Meter();
}
@Override
public boolean isInstance(Metric metric) {
return Meter.class.isInstance(metric);
}
};
MetricBuilder<Timer> TIMERS = new MetricBuilder<Timer>() {
@Override
public Timer newMetric() {
return new Timer();
}
@Override
public boolean isInstance(Metric metric) {
return Timer.class.isInstance(metric);
}
};
T newMetric();
boolean isInstance(Metric metric);
}
}
| 1 | 7,295 | I'd make this a `throw new NullPointerException("metric == null");` instead | dropwizard-metrics | java |
@@ -27,6 +27,7 @@ extern "C" {
#include "ScriptingEnvironment.h"
#include "../typedefs.h"
#include "../Util/OpenMPWrapper.h"
+#include "../Util/LuaUtil.h"
ScriptingEnvironment::ScriptingEnvironment() {}
ScriptingEnvironment::ScriptingEnvironment(const char * fileName) { | 1 | /*
open source routing machine
Copyright (C) Dennis Luxen, others 2010
This program is free software; you can redistribute it and/or modify
it under the terms of the GNU AFFERO General Public License as published by
the Free Software Foundation; either version 3 of the License, or
any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
or see http://www.gnu.org/licenses/agpl.txt.
*/
extern "C" {
#include <lua.h>
#include <lauxlib.h>
#include <lualib.h>
}
#include "ScriptingEnvironment.h"
#include "../typedefs.h"
#include "../Util/OpenMPWrapper.h"
ScriptingEnvironment::ScriptingEnvironment() {}
ScriptingEnvironment::ScriptingEnvironment(const char * fileName) {
INFO("Using script " << fileName);
// Create a new lua state
for(int i = 0; i < omp_get_max_threads(); ++i)
luaStateVector.push_back(luaL_newstate());
// Connect LuaBind to this lua state for all threads
#pragma omp parallel
{
lua_State * myLuaState = getLuaStateForThreadID(omp_get_thread_num());
luabind::open(myLuaState);
//open utility libraries string library;
luaL_openlibs(myLuaState);
// Add our function to the state's global scope
luabind::module(myLuaState) [
luabind::def("print", LUA_print<std::string>),
luabind::def("parseMaxspeed", parseMaxspeed),
luabind::def("durationIsValid", durationIsValid),
luabind::def("parseDuration", parseDuration)
];
//#pragma omp critical
// {
// if(0 != luaL_dostring(
// myLuaState,
// "print('Initializing LUA engine')\n"
// )) {
// ERR(lua_tostring(myLuaState,-1)<< " occured in scripting block");
// }
// }
luabind::module(myLuaState) [
luabind::class_<HashTable<std::string, std::string> >("keyVals")
.def("Add", &HashTable<std::string, std::string>::Add)
.def("Find", &HashTable<std::string, std::string>::Find)
.def("Holds", &HashTable<std::string, std::string>::Holds)
];
luabind::module(myLuaState) [
luabind::class_<ImportNode>("Node")
.def(luabind::constructor<>())
.def_readwrite("lat", &ImportNode::lat)
.def_readwrite("lon", &ImportNode::lon)
.def_readwrite("id", &ImportNode::id)
.def_readwrite("bollard", &ImportNode::bollard)
.def_readwrite("traffic_light", &ImportNode::trafficLight)
.def_readwrite("tags", &ImportNode::keyVals)
];
luabind::module(myLuaState) [
luabind::class_<_Way>("Way")
.def(luabind::constructor<>())
.def_readwrite("name", &_Way::name)
.def_readwrite("speed", &_Way::speed)
.def_readwrite("type", &_Way::type)
.def_readwrite("access", &_Way::access)
.def_readwrite("roundabout", &_Way::roundabout)
.def_readwrite("is_duration_set", &_Way::isDurationSet)
.def_readwrite("is_access_restricted", &_Way::isAccessRestricted)
.def_readwrite("ignore_in_grid", &_Way::ignoreInGrid)
.def_readwrite("tags", &_Way::keyVals)
.def_readwrite("direction", &_Way::direction)
.enum_("constants")
[
luabind::value("notSure", 0),
luabind::value("oneway", 1),
luabind::value("bidirectional", 2),
luabind::value("opposite", 3)
]
];
// Now call our function in a lua script
//#pragma omp critical
// {
// INFO("Parsing speedprofile from " << fileName );
// }
if(0 != luaL_dofile(myLuaState, fileName) ) {
ERR(lua_tostring(myLuaState,-1)<< " occured in scripting block");
}
}
}
ScriptingEnvironment::~ScriptingEnvironment() {
for(unsigned i = 0; i < luaStateVector.size(); ++i) {
// luaStateVector[i];
}
}
lua_State * ScriptingEnvironment::getLuaStateForThreadID(const int id) {
return luaStateVector[id];
}
| 1 | 12,295 | Include should be order lexicographically. | Project-OSRM-osrm-backend | cpp |
@@ -538,7 +538,7 @@ public class OAuthWebviewHelper implements KeyChainAliasCallback {
final PasscodeManager passcodeManager = mgr.getPasscodeManager();
passcodeManager.storeMobilePolicyForOrg(account, id.screenLockTimeout * 1000 * 60, id.pinLength);
passcodeManager.setTimeoutMs(id.screenLockTimeout * 1000 * 60);
- passcodeManager.setMinPasscodeLength(id.pinLength);
+ boolean changeRequired = passcodeManager.setMinPasscodeLength((Activity) getContext(), id.pinLength);
/*
* Checks if a passcode already exists. If a passcode has NOT | 1 | /*
* Copyright (c) 2011-present, salesforce.com, inc.
* All rights reserved.
* Redistribution and use of this software in source and binary forms, with or
* without modification, are permitted provided that the following conditions
* are met:
* - Redistributions of source code must retain the above copyright notice, this
* list of conditions and the following disclaimer.
* - Redistributions in binary form must reproduce the above copyright notice,
* this list of conditions and the following disclaimer in the documentation
* and/or other materials provided with the distribution.
* - Neither the name of salesforce.com, inc. nor the names of its contributors
* may be used to endorse or promote products derived from this software without
* specific prior written permission of salesforce.com, inc.
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
* AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
* ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
* LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
* CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
* SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
* INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
* CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
* ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
* POSSIBILITY OF SUCH DAMAGE.
*/
package com.salesforce.androidsdk.ui;
import android.annotation.TargetApi;
import android.app.Activity;
import android.content.Context;
import android.content.Intent;
import android.net.Uri;
import android.net.http.SslError;
import android.os.AsyncTask;
import android.os.Build;
import android.os.Bundle;
import android.security.KeyChain;
import android.security.KeyChainAliasCallback;
import android.security.KeyChainException;
import android.text.TextUtils;
import android.util.Log;
import android.webkit.ClientCertRequest;
import android.webkit.SslErrorHandler;
import android.webkit.WebChromeClient;
import android.webkit.WebSettings;
import android.webkit.WebView;
import android.webkit.WebViewClient;
import android.widget.Toast;
import com.salesforce.androidsdk.R;
import com.salesforce.androidsdk.accounts.UserAccount;
import com.salesforce.androidsdk.accounts.UserAccountManager;
import com.salesforce.androidsdk.analytics.EventBuilderHelper;
import com.salesforce.androidsdk.app.SalesforceSDKManager;
import com.salesforce.androidsdk.auth.HttpAccess;
import com.salesforce.androidsdk.auth.OAuth2;
import com.salesforce.androidsdk.auth.OAuth2.IdServiceResponse;
import com.salesforce.androidsdk.auth.OAuth2.TokenEndpointResponse;
import com.salesforce.androidsdk.config.BootConfig;
import com.salesforce.androidsdk.config.LoginServerManager;
import com.salesforce.androidsdk.config.RuntimeConfig;
import com.salesforce.androidsdk.push.PushMessaging;
import com.salesforce.androidsdk.rest.ClientManager;
import com.salesforce.androidsdk.rest.ClientManager.LoginOptions;
import com.salesforce.androidsdk.security.PasscodeManager;
import com.salesforce.androidsdk.util.EventsObservable;
import com.salesforce.androidsdk.util.EventsObservable.EventType;
import com.salesforce.androidsdk.util.MapUtil;
import com.salesforce.androidsdk.util.UriFragmentParser;
import org.json.JSONArray;
import org.json.JSONException;
import org.json.JSONObject;
import java.net.URI;
import java.net.URISyntaxException;
import java.security.PrivateKey;
import java.security.cert.X509Certificate;
import java.util.List;
import java.util.Locale;
import java.util.Map;
import java.util.concurrent.ExecutorService;
import java.util.concurrent.Executors;
/**
* Helper class to manage a WebView instance that is going through the OAuth login process.
* Basic flow is
* a) load and show the login page to the user
* b) user logins in and authorizes app
* c) we see the navigation to the auth complete Url, and grab the tokens
* d) we call the Id service to obtain additional info about the user
* e) we create a local account, and return an authentication result bundle.
* f) done!
*
*/
public class OAuthWebviewHelper implements KeyChainAliasCallback {
// Set a custom permission on your connected application with that name if you want
// the application to be restricted to managed devices
public static final String MUST_BE_MANAGED_APP_PERM = "must_be_managed_app";
public static final String AUTHENTICATION_FAILED_INTENT = "com.salesforce.auth.intent.AUTHENTICATION_ERROR";
public static final String HTTP_ERROR_RESPONSE_CODE_INTENT = "com.salesforce.auth.intent.HTTP_RESPONSE_CODE";
public static final String RESPONSE_ERROR_INTENT = "com.salesforce.auth.intent.RESPONSE_ERROR";
public static final String RESPONSE_ERROR_DESCRIPTION_INTENT = "com.salesforce.auth.intent.RESPONSE_ERROR_DESCRIPTION";
private static final String TAG = "OAuthWebViewHelper";
private static final String ACCOUNT_OPTIONS = "accountOptions";
// background executor
private final ExecutorService threadPool = Executors.newFixedThreadPool(1);
/**
* the host activity/fragment should pass in an implementation of this
* interface so that it can notify it of things it needs to do as part of
* the oauth process.
*/
public interface OAuthWebviewHelperEvents {
/** we're starting to load this login page into the webview */
void loadingLoginPage(String loginUrl);
/**
* progress update of loading the webview, totalProgress will go from
* 0..10000 (you can pass this directly to the activity progressbar)
*/
void onLoadingProgress(int totalProgress);
/** We're doing something that takes some unknown amount of time */
void onIndeterminateProgress(boolean show);
/** We've completed the auth process and here's the resulting Authentication Result bundle to return to the Authenticator */
void onAccountAuthenticatorResult(Bundle authResult);
/** we're in some end state and requesting that the host activity be finished/closed. */
void finish();
}
/**
* Construct a new OAuthWebviewHelper and perform the initial configuration of the Webview.
*/
public OAuthWebviewHelper(Activity activity, OAuthWebviewHelperEvents callback,
LoginOptions options, WebView webview, Bundle savedInstanceState) {
assert options != null && callback != null && webview != null && activity != null;
this.activity = activity;
this.callback = callback;
this.loginOptions = options;
this.webview = webview;
final WebSettings webSettings = webview.getSettings();
webSettings.setJavaScriptEnabled(true);
webSettings.setUserAgentString(SalesforceSDKManager.getInstance().getUserAgent());
webview.setWebViewClient(makeWebViewClient());
webview.setWebChromeClient(makeWebChromeClient());
/*
* Restores WebView's state if available.
* This ensures the user is not forced to type in credentials again
* once the auth process has been kicked off.
*/
if (savedInstanceState != null) {
webview.restoreState(savedInstanceState);
accountOptions = AccountOptions.fromBundle(savedInstanceState.getBundle(ACCOUNT_OPTIONS));
} else {
clearCookies();
}
}
private final OAuthWebviewHelperEvents callback;
protected final LoginOptions loginOptions;
private final WebView webview;
private AccountOptions accountOptions;
private Activity activity;
private PrivateKey key;
private X509Certificate[] certChain;
public void saveState(Bundle outState) {
webview.saveState(outState);
if (accountOptions != null) {
// we have completed the auth flow but not created the account, because we need to create a pin
outState.putBundle(ACCOUNT_OPTIONS, accountOptions.asBundle());
}
}
public WebView getWebView() {
return webview;
}
public void clearCookies() {
SalesforceSDKManager.getInstance().removeAllCookies();
}
public void clearView() {
webview.loadUrl("about:blank");
}
/**
* Method called by login activity when it resumes after the passcode activity
*
* When the server has a mobile policy requiring a passcode, we start the passcode activity after completing the
* auth flow (see onAuthFlowComplete).
* When the passcode activity completes, the login activity's onActivityResult gets invoked, and it calls this method
* to finalize the account creation.
*/
public void onNewPasscode() {
/*
* Re-encryption of existing accounts with the new passcode is taken
* care of in the 'Confirm Passcode' step in PasscodeActivity.
*/
if (accountOptions != null) {
loginOptions.setPasscodeHash(SalesforceSDKManager.getInstance().getPasscodeHash());
addAccount();
callback.finish();
}
}
/** Factory method for the WebViewClient, you can replace this with something else if you need to */
protected WebViewClient makeWebViewClient() {
return new AuthWebViewClient();
}
/** Factory method for the WebChromeClient, you can replace this with something else if you need to */
protected WebChromeClient makeWebChromeClient() {
return new AuthWebChromeClient();
}
protected Context getContext() {
return webview.getContext();
}
/**
* Called when the user facing part of the auth flow completed with an error.
* We show the user an error and end the activity.
*
* @param error Error.
* @param errorDesc Error description.
* @param e Exception.
*/
protected void onAuthFlowError(String error, String errorDesc, Exception e) {
Log.w(TAG, error + ":" + errorDesc);
// look for deny. kick them back to login, so clear cookies and repoint browser
if ("access_denied".equals(error)
&& "end-user denied authorization".equals(errorDesc)) {
webview.post(new Runnable() {
@Override
public void run() {
clearCookies();
loadLoginPage();
}
});
} else {
Toast t = Toast.makeText(webview.getContext(), error + " : " + errorDesc,
Toast.LENGTH_LONG);
webview.postDelayed(new Runnable() {
@Override
public void run() {
callback.finish();
}
}, t.getDuration());
t.show();
}
final Intent intent = new Intent(AUTHENTICATION_FAILED_INTENT);
if (e != null && e instanceof OAuth2.OAuthFailedException) {
final OAuth2.OAuthFailedException exception = (OAuth2.OAuthFailedException) e;
int statusCode = exception.getHttpStatusCode();
intent.putExtra(HTTP_ERROR_RESPONSE_CODE_INTENT, statusCode);
final OAuth2.TokenErrorResponse errorResponse = exception.getTokenErrorResponse();
if (errorResponse != null) {
final String tokenError = errorResponse.error;
final String tokenErrorDesc = errorResponse.errorDescription;
intent.putExtra(RESPONSE_ERROR_INTENT, tokenError);
intent.putExtra(RESPONSE_ERROR_DESCRIPTION_INTENT, tokenErrorDesc);
}
}
SalesforceSDKManager.getInstance().getAppContext().sendBroadcast(intent);
}
protected void showError(Exception exception) {
Toast.makeText(getContext(),
getContext().getString(SalesforceSDKManager.getInstance().getSalesforceR().stringGenericError(), exception.toString()),
Toast.LENGTH_LONG).show();
}
/**
* Tells the webview to load the authorization page.
* We also update the window title, so its easier to
* see which system you're logging in to
*/
public void loadLoginPage() {
if (TextUtils.isEmpty(loginOptions.getJwt())) {
loginOptions.setLoginUrl(getLoginUrl());
doLoadPage(false);
} else {
new SwapJWTForAccessTokenTask().execute(loginOptions);
}
}
private void doLoadPage(boolean jwtFlow) {
try {
URI uri = getAuthorizationUrl(jwtFlow);
callback.loadingLoginPage(loginOptions.getLoginUrl());
webview.loadUrl(uri.toString());
} catch (URISyntaxException ex) {
showError(ex);
}
}
protected String getOAuthClientId() {
return loginOptions.getOauthClientId();
}
protected URI getAuthorizationUrl(Boolean jwtFlow) throws URISyntaxException {
if (jwtFlow) {
return OAuth2.getAuthorizationUrl(
new URI(loginOptions.getLoginUrl()),
getOAuthClientId(),
loginOptions.getOauthCallbackUrl(),
loginOptions.getOauthScopes(),
null,
getAuthorizationDisplayType(), loginOptions.getJwt(), loginOptions.getLoginUrl(),loginOptions.getAdditionalParameters());
}
return OAuth2.getAuthorizationUrl(
new URI(loginOptions.getLoginUrl()),
getOAuthClientId(),
loginOptions.getOauthCallbackUrl(),
loginOptions.getOauthScopes(),
null,
getAuthorizationDisplayType(),loginOptions.getAdditionalParameters());
}
protected URI getAuthorizationUrl() throws URISyntaxException {
return getAuthorizationUrl(false);
}
/**
* Override this to replace the default login webview's display param with
* your custom display param. You can override this by either subclassing this class,
* or adding "<string name="sf__oauth_display_type">desiredDisplayParam</string>"
* to your app's resource so that it overrides the default value in the SDK library.
*
* @return the OAuth login display type, e.g. 'mobile', 'touch',
* see the OAuth docs for the complete list of valid values.
*/
protected String getAuthorizationDisplayType() {
return this.getContext().getString(R.string.oauth_display_type);
}
/**
* Override this method to customize the login url.
* @return login url
*/
protected String getLoginUrl() {
return SalesforceSDKManager.getInstance().getLoginServerManager().getSelectedLoginServer().url.trim();
}
/**
* WebViewClient which intercepts the redirect to the oauth callback url.
* That redirect marks the end of the user facing portion of the authentication flow.
*/
protected class AuthWebViewClient extends WebViewClient {
@Override
public void onPageFinished(WebView view, String url) {
EventsObservable.get().notifyEvent(EventType.AuthWebViewPageFinished, url);
super.onPageFinished(view, url);
}
@Override
public boolean shouldOverrideUrlLoading(WebView view, String url) {
boolean isDone = url.replace("///", "/").toLowerCase(Locale.US).startsWith(loginOptions.getOauthCallbackUrl().replace("///", "/").toLowerCase(Locale.US));
if (isDone) {
Uri callbackUri = Uri.parse(url);
Map<String, String> params = UriFragmentParser.parse(callbackUri);
String error = params.get("error");
// Did we fail?
if (error != null) {
String errorDesc = params.get("error_description");
onAuthFlowError(error, errorDesc, null);
}
// Or succeed?
else {
TokenEndpointResponse tr = new TokenEndpointResponse(params);
onAuthFlowComplete(tr);
}
}
return isDone;
}
@Override
public void onReceivedSslError(WebView view, SslErrorHandler handler, SslError error) {
int primError = error.getPrimaryError();
// Figuring out string resource id
SalesforceR r = SalesforceSDKManager.getInstance().getSalesforceR();
int primErrorStringId = r.stringSSLUnknownError();
switch (primError) {
case SslError.SSL_EXPIRED: primErrorStringId = r.stringSSLExpired(); break;
case SslError.SSL_IDMISMATCH: primErrorStringId = r.stringSSLIdMismatch(); break;
case SslError.SSL_NOTYETVALID: primErrorStringId = r.stringSSLNotYetValid(); break;
case SslError.SSL_UNTRUSTED: primErrorStringId = r.stringSSLUntrusted(); break;
}
// Building text message to show
String text = getContext().getString(r.stringSSLError(), getContext().getString(primErrorStringId));
// Bringing up toast
Toast.makeText(getContext(), text, Toast.LENGTH_LONG).show();
handler.cancel();
}
@TargetApi(Build.VERSION_CODES.LOLLIPOP)
@Override
public void onReceivedClientCertRequest(WebView view, ClientCertRequest request) {
request.proceed(key, certChain);
}
}
/**
* Called when the user facing part of the auth flow completed successfully.
* The last step is to call the identity service to get the username.
*/
protected void onAuthFlowComplete(TokenEndpointResponse tr) {
FinishAuthTask t = new FinishAuthTask();
t.execute(tr);
}
private class SwapJWTForAccessTokenTask extends BaseFinishAuthFlowTask<LoginOptions> {
@Override
protected TokenEndpointResponse performRequest(LoginOptions options) {
try {
return OAuth2.swapJWTForTokens(HttpAccess.DEFAULT, new URI(options.getLoginUrl()), options.getJwt());
} catch (Exception e) {
backgroundException = e;
}
return null;
}
@Override
protected void onPostExecute(TokenEndpointResponse tr) {
if (backgroundException != null) {
handleJWTError();
loginOptions.setJwt(null);
return;
}
if (tr != null && tr.authToken != null) {
loginOptions.setJwt(tr.authToken);
doLoadPage(true);
} else {
doLoadPage(false);
handleJWTError();
}
loginOptions.setJwt(null);
}
private void handleJWTError() {
final SalesforceSDKManager mgr = SalesforceSDKManager.getInstance();
onAuthFlowError(getContext().getString(mgr.getSalesforceR().stringGenericAuthenticationErrorTitle()),
getContext().getString(mgr.getSalesforceR().stringJWTAuthenticationErrorBody()), backgroundException);
}
}
/**
* Base class with common code for the background task that finishes off the auth process.
*/
protected abstract class BaseFinishAuthFlowTask<RequestType> extends AsyncTask<RequestType, Boolean, TokenEndpointResponse> {
protected volatile Exception backgroundException;
protected volatile IdServiceResponse id = null;
public BaseFinishAuthFlowTask() {
}
@SafeVarargs
@Override
protected final TokenEndpointResponse doInBackground(RequestType ... params) {
try {
publishProgress(true);
return performRequest(params[0]);
} catch (Exception ex) {
handleException(ex);
}
return null;
}
protected abstract TokenEndpointResponse performRequest(RequestType param) throws Exception;
@Override
protected void onPostExecute(OAuth2.TokenEndpointResponse tr) {
final SalesforceSDKManager mgr = SalesforceSDKManager.getInstance();
// Failure cases.
if (backgroundException != null) {
Log.w(TAG, backgroundException);
onAuthFlowError(getContext().getString(mgr.getSalesforceR().stringGenericAuthenticationErrorTitle()),
getContext().getString(mgr.getSalesforceR().stringGenericAuthenticationErrorBody()), backgroundException);
callback.finish();
return;
}
if (id.customPermissions != null) {
final boolean mustBeManagedApp = id.customPermissions.optBoolean(MUST_BE_MANAGED_APP_PERM);
if (mustBeManagedApp && !RuntimeConfig.getRuntimeConfig(getContext()).isManagedApp()) {
onAuthFlowError(getContext().getString(mgr.getSalesforceR().stringGenericAuthenticationErrorTitle()),
getContext().getString(mgr.getSalesforceR().stringManagedAppError()), backgroundException);
callback.finish();
return;
}
}
// Putting together all the information needed to create the new account.
accountOptions = new AccountOptions(id.username, tr.refreshToken,
tr.authToken, tr.idUrl, tr.instanceUrl, tr.orgId, tr.userId,
tr.communityId, tr.communityUrl, id.firstName, id.lastName,
id.displayName, id.email, id.pictureUrl, id.thumbnailUrl, tr.additionalOauthValues);
// Sets additional admin prefs, if they exist.
final UserAccount account = new UserAccount(accountOptions.authToken,
accountOptions.refreshToken, loginOptions.getLoginUrl(),
accountOptions.identityUrl, accountOptions.instanceUrl,
accountOptions.orgId, accountOptions.userId,
accountOptions.username, buildAccountName(accountOptions.username,
accountOptions.instanceUrl), loginOptions.getClientSecret(),
accountOptions.communityId, accountOptions.communityUrl,
accountOptions.firstName, accountOptions.lastName, accountOptions.displayName,
accountOptions.email, accountOptions.photoUrl,
accountOptions.thumbnailUrl, accountOptions.additionalOauthValues);
if (id.customAttributes != null) {
mgr.getAdminSettingsManager().setPrefs(id.customAttributes, account);
}
if (id.customPermissions != null) {
mgr.getAdminPermsManager().setPrefs(id.customPermissions, account);
}
// Screen lock required by mobile policy.
if (id.screenLockTimeout > 0) {
// Stores the mobile policy for the org.
final PasscodeManager passcodeManager = mgr.getPasscodeManager();
passcodeManager.storeMobilePolicyForOrg(account, id.screenLockTimeout * 1000 * 60, id.pinLength);
passcodeManager.setTimeoutMs(id.screenLockTimeout * 1000 * 60);
passcodeManager.setMinPasscodeLength(id.pinLength);
/*
* Checks if a passcode already exists. If a passcode has NOT
* been created yet, the user is taken through the passcode
* creation flow, at the end of which account data is encrypted
* with a hash of the passcode. Other existing accounts are
* also re-encrypted behind the scenes at this point. If a
* passcode already exists, the existing hash is used and the
* account is added at this point.
*/
if (!passcodeManager.hasStoredPasscode(mgr.getAppContext())) {
// This will bring up the create passcode screen - we will create the account in onResume
mgr.getPasscodeManager().setEnabled(true);
mgr.getPasscodeManager().lockIfNeeded((Activity) getContext(), true);
} else {
loginOptions.setPasscodeHash(mgr.getPasscodeHash());
addAccount();
callback.finish();
}
}
// No screen lock required or no mobile policy specified
else {
final PasscodeManager passcodeManager = mgr.getPasscodeManager();
passcodeManager.storeMobilePolicyForOrg(account, 0, PasscodeManager.MIN_PASSCODE_LENGTH);
loginOptions.setPasscodeHash(mgr.getPasscodeHash());
addAccount();
callback.finish();
}
}
protected void handleException(Exception ex) {
if (ex.getMessage() != null) {
Log.w(TAG, "handleException", ex);
}
backgroundException = ex;
}
@Override
protected void onProgressUpdate(Boolean... values) {
callback.onIndeterminateProgress(values[0]);
}
}
/**
* This is a background process that will call the identity service to get the info we need from
* the Identity service, and finally wrap up and create account.
*/
private class FinishAuthTask extends BaseFinishAuthFlowTask<TokenEndpointResponse> {
@Override
protected TokenEndpointResponse performRequest(TokenEndpointResponse tr) throws Exception {
try {
id = OAuth2.callIdentityService(
HttpAccess.DEFAULT, tr.idUrlWithInstance, tr.authToken);
} catch(Exception e) {
backgroundException = e;
}
return tr;
}
}
protected void addAccount() {
ClientManager clientManager = new ClientManager(getContext(),
SalesforceSDKManager.getInstance().getAccountType(),
loginOptions, SalesforceSDKManager.getInstance().shouldLogoutWhenTokenRevoked());
// Create account name (shown in Settings -> Accounts & sync)
String accountName = buildAccountName(accountOptions.username,
accountOptions.instanceUrl);
// New account
Bundle extras = clientManager.createNewAccount(accountName,
accountOptions.username,
accountOptions.refreshToken,
accountOptions.authToken,
accountOptions.instanceUrl,
loginOptions.getLoginUrl(),
accountOptions.identityUrl,
getOAuthClientId(),
accountOptions.orgId,
accountOptions.userId,
loginOptions.getPasscodeHash(),
loginOptions.getClientSecret(),
accountOptions.communityId,
accountOptions.communityUrl,
accountOptions.firstName,
accountOptions.lastName,
accountOptions.displayName,
accountOptions.email,
accountOptions.photoUrl,
accountOptions.thumbnailUrl,
accountOptions.additionalOauthValues);
/*
* Registers for push notifications, if push notification client ID is present.
* This step needs to happen after the account has been added by client
* manager, so that the push service has all the account info it needs.
*/
final Context appContext = SalesforceSDKManager.getInstance().getAppContext();
final String pushNotificationId = BootConfig.getBootConfig(appContext).getPushNotificationClientId();
final UserAccount account = new UserAccount(accountOptions.authToken,
accountOptions.refreshToken, loginOptions.getLoginUrl(),
accountOptions.identityUrl, accountOptions.instanceUrl,
accountOptions.orgId, accountOptions.userId,
accountOptions.username, accountName,
loginOptions.getClientSecret(), accountOptions.communityId,
accountOptions.communityUrl, accountOptions.firstName,
accountOptions.lastName, accountOptions.displayName, accountOptions.email,
accountOptions.photoUrl, accountOptions.thumbnailUrl, accountOptions.additionalOauthValues);
if (!TextUtils.isEmpty(pushNotificationId)) {
PushMessaging.register(appContext, account);
}
// Logs analytics event for new user.
final JSONObject userAttr = new JSONObject();
try {
final List<UserAccount> users = UserAccountManager.getInstance().getAuthenticatedUsers();
userAttr.put("numUsers", (users == null) ? 0 : users.size());
} catch (JSONException e) {
Log.e(TAG, "Exception thrown while creating JSON", e);
}
callback.onAccountAuthenticatorResult(extras);
if (SalesforceSDKManager.getInstance().getIsTestRun()) {
logAddAccount(account);
} else {
threadPool.execute(new Runnable() {
@Override
public void run() {
logAddAccount(account);
}
});
}
}
/**
* Log the addition of a new account.
* @param account
*/
private void logAddAccount(UserAccount account) {
final JSONObject serverAttr = new JSONObject();
try {
final List<LoginServerManager.LoginServer> servers = SalesforceSDKManager.getInstance().getLoginServerManager().getLoginServers();
serverAttr.put("numLoginServers", (servers == null) ? 0 : servers.size());
if (servers != null) {
final JSONArray serversJson = new JSONArray();
for (final LoginServerManager.LoginServer server : servers) {
if (server != null) {
serversJson.put(server.url);
}
}
serverAttr.put("loginServers", serversJson);
}
EventBuilderHelper.createAndStoreEventSync("addUser", account, TAG, serverAttr);
} catch (JSONException e) {
Log.e(TAG, "Exception thrown while creating JSON", e);
}
}
/**
* @return name to be shown for account in Settings -> Accounts & Sync
*/
protected String buildAccountName(String username, String instanceServer) {
return String.format("%s (%s) (%s)", username, instanceServer,
SalesforceSDKManager.getInstance().getApplicationName());
}
/**
* WebChromeClient used to report back loading progress.
*/
protected class AuthWebChromeClient extends WebChromeClient {
@Override
public void onProgressChanged(WebView view, int newProgress) {
callback.onLoadingProgress(newProgress * 100);
}
}
/**
* Class encapsulating the parameters required to create a new account
*/
public static class AccountOptions {
private static final String USER_ID = "userId";
private static final String ORG_ID = "orgId";
private static final String IDENTITY_URL = "identityUrl";
private static final String INSTANCE_URL = "instanceUrl";
private static final String AUTH_TOKEN = "authToken";
private static final String REFRESH_TOKEN = "refreshToken";
private static final String USERNAME = "username";
private static final String COMMUNITY_ID = "communityId";
private static final String COMMUNITY_URL = "communityUrl";
private static final String FIRST_NAME = "firstName";
private static final String LAST_NAME = "lastName";
private static final String DISPLAY_NAME = "displayName";
private static final String EMAIL = "email";
private static final String PHOTO_URL = "photoUrl";
private static final String THUMBNAIL_URL = "thumbnailUrl";
public final String username;
public final String refreshToken;
public final String authToken;
public final String identityUrl;
public final String instanceUrl;
public final String orgId;
public final String userId;
public final String communityId;
public final String communityUrl;
public final String firstName;
public final String lastName;
public final String displayName;
public final String email;
public final String photoUrl;
public final String thumbnailUrl;
public final Map<String, String> additionalOauthValues;
private Bundle bundle;
public AccountOptions(String username, String refreshToken,
String authToken, String identityUrl, String instanceUrl,
String orgId, String userId, String communityId, String communityUrl,
String firstName, String lastName, String displayName, String email,
String photoUrl, String thumbnailUrl, Map<String, String> additionalOauthValues) {
super();
this.username = username;
this.refreshToken = refreshToken;
this.authToken = authToken;
this.identityUrl = identityUrl;
this.instanceUrl = instanceUrl;
this.orgId = orgId;
this.userId = userId;
this.communityId = communityId;
this.communityUrl = communityUrl;
this.firstName = firstName;
this.lastName = lastName;
this.displayName = displayName;
this.email = email;
this.photoUrl = photoUrl;
this.thumbnailUrl = thumbnailUrl;
this.additionalOauthValues = additionalOauthValues;
bundle = new Bundle();
bundle.putString(USERNAME, username);
bundle.putString(REFRESH_TOKEN, refreshToken);
bundle.putString(AUTH_TOKEN, authToken);
bundle.putString(IDENTITY_URL, identityUrl);
bundle.putString(INSTANCE_URL, instanceUrl);
bundle.putString(ORG_ID, orgId);
bundle.putString(USER_ID, userId);
bundle.putString(COMMUNITY_ID, communityId);
bundle.putString(COMMUNITY_URL, communityUrl);
bundle.putString(FIRST_NAME, firstName);
bundle.putString(LAST_NAME, lastName);
bundle.putString(DISPLAY_NAME, displayName);
bundle.putString(EMAIL, email);
bundle.putString(PHOTO_URL, photoUrl);
bundle.putString(THUMBNAIL_URL, thumbnailUrl);
bundle = MapUtil.addMapToBundle(additionalOauthValues,
SalesforceSDKManager.getInstance().getAdditionalOauthKeys(), bundle);
}
public Bundle asBundle() {
return bundle;
}
public static AccountOptions fromBundle(Bundle options) {
if (options == null) {
return null;
}
return new AccountOptions(
options.getString(USERNAME),
options.getString(REFRESH_TOKEN),
options.getString(AUTH_TOKEN),
options.getString(IDENTITY_URL),
options.getString(INSTANCE_URL),
options.getString(ORG_ID),
options.getString(USER_ID),
options.getString(COMMUNITY_ID),
options.getString(COMMUNITY_URL),
options.getString(FIRST_NAME),
options.getString(LAST_NAME),
options.getString(DISPLAY_NAME),
options.getString(EMAIL),
options.getString(PHOTO_URL),
options.getString(THUMBNAIL_URL),
getAdditionalOauthValues(options)
);
}
private static Map<String, String> getAdditionalOauthValues(Bundle options) {
return MapUtil.addBundleToMap(options,
SalesforceSDKManager.getInstance().getAdditionalOauthKeys(), null);
}
}
@Override
public void alias(String alias) {
try {
certChain = KeyChain.getCertificateChain(activity, alias);
key = KeyChain.getPrivateKey(activity, alias);
activity.runOnUiThread(new Runnable() {
@Override
public void run() {
loadLoginPage();
}
});
} catch (KeyChainException e) {
e.printStackTrace();
} catch (InterruptedException e) {
e.printStackTrace();
}
}
}
| 1 | 15,953 | We need to pass in the Activity context here, so that LoginActivity is used, and so that LoginActivity can get onActivityResult from the PasscodeActivity. Without this, the application context is used, and we don't get a callback when the passcode is done. | forcedotcom-SalesforceMobileSDK-Android | java |
@@ -39,4 +39,10 @@ class Tracepoint < ActiveRecord::Base
el1 << (XML::Node.new("time") << timestamp.xmlschema) if print_timestamp
el1
end
+
+ # Return points of trackable traces in original order
+ scope :trackable_ordered, -> { joins(:trace).where(:gpx_files => { :visibility => %w[trackable identifiable] }).order("gpx_id DESC, trackid ASC, timestamp ASC") }
+
+ # Hide the order of points of non-trackable traces for privacy
+ scope :non_trackable_unordered, -> { joins(:trace).where(:gpx_files => { :visibility => %w[public private] }).order("gps_points.latitude", "gps_points.longitude") }
end | 1 | # == Schema Information
#
# Table name: gps_points
#
# altitude :float
# trackid :integer not null
# latitude :integer not null
# longitude :integer not null
# gpx_id :integer not null
# timestamp :datetime
# tile :integer
#
# Indexes
#
# points_gpxid_idx (gpx_id)
# points_tile_idx (tile)
#
# Foreign Keys
#
# gps_points_gpx_id_fkey (gpx_id => gpx_files.id)
#
class Tracepoint < ActiveRecord::Base
include GeoRecord
self.table_name = "gps_points"
validates :trackid, :numericality => { :only_integer => true }
validates :latitude, :longitude, :numericality => { :only_integer => true }
validates :trace, :associated => true
validates :timestamp, :presence => true
belongs_to :trace, :foreign_key => "gpx_id"
def to_xml_node(print_timestamp = false)
el1 = XML::Node.new "trkpt"
el1["lat"] = lat.to_s
el1["lon"] = lon.to_s
el1 << (XML::Node.new("time") << timestamp.xmlschema) if print_timestamp
el1
end
end
| 1 | 11,717 | Can you move these to the top please, for consistency with other models - normally we put scopes immediately after the associations at the top of the model. As to names I agree with @gravitystorm that these names may be confusing but I'm not sure the ones I suggested are perfect either so I'm not really sure what's best. The goal of my names was just to separate point which could be ordered (as `ordered`) from those which can't (as `unordered`) but I realise that may not be very clear from the names. | openstreetmap-openstreetmap-website | rb |
@@ -1117,7 +1117,10 @@ public class PlaybackService extends MediaBrowserServiceCompat {
// only mark the item as played if we're not keeping it anyways
DBWriter.markItemPlayed(item, FeedItem.PLAYED, ended);
// don't know if it actually matters to not autodownload when smart mark as played is triggered
- DBWriter.removeQueueItem(PlaybackService.this, ended, item);
+ boolean shouldRepeat = UserPreferences.getShouldRepeatEpisode();
+ if (!shouldRepeat) {
+ DBWriter.removeQueueItem(PlaybackService.this, ended, item);
+ }
// Delete episode if enabled
FeedPreferences.AutoDeleteAction action =
item.getFeed().getPreferences().getCurrentAutoDelete(); | 1 | package de.danoeh.antennapod.core.service.playback;
import android.app.NotificationManager;
import android.app.PendingIntent;
import android.app.Service;
import android.app.UiModeManager;
import android.bluetooth.BluetoothA2dp;
import android.content.BroadcastReceiver;
import android.content.ComponentName;
import android.content.ContentResolver;
import android.content.Context;
import android.content.Intent;
import android.content.IntentFilter;
import android.content.SharedPreferences;
import android.content.res.Configuration;
import android.graphics.Bitmap;
import android.media.AudioManager;
import android.media.MediaPlayer;
import android.net.Uri;
import android.os.Binder;
import android.os.Build;
import android.os.Bundle;
import android.os.IBinder;
import android.os.Vibrator;
import androidx.preference.PreferenceManager;
import androidx.annotation.NonNull;
import androidx.annotation.StringRes;
import androidx.core.app.NotificationCompat;
import androidx.core.app.NotificationManagerCompat;
import android.support.v4.media.MediaBrowserCompat;
import androidx.media.MediaBrowserServiceCompat;
import android.support.v4.media.MediaDescriptionCompat;
import android.support.v4.media.MediaMetadataCompat;
import android.support.v4.media.session.MediaSessionCompat;
import android.support.v4.media.session.PlaybackStateCompat;
import android.text.TextUtils;
import android.util.Log;
import android.util.Pair;
import android.view.KeyEvent;
import android.view.SurfaceHolder;
import android.webkit.URLUtil;
import android.widget.Toast;
import com.bumptech.glide.Glide;
import com.bumptech.glide.request.RequestOptions;
import com.bumptech.glide.request.target.Target;
import java.util.ArrayList;
import java.util.Collections;
import java.util.List;
import java.util.concurrent.TimeUnit;
import de.danoeh.antennapod.core.R;
import de.danoeh.antennapod.core.event.MessageEvent;
import de.danoeh.antennapod.core.event.PlaybackPositionEvent;
import de.danoeh.antennapod.core.event.ServiceEvent;
import de.danoeh.antennapod.core.event.settings.SkipIntroEndingChangedEvent;
import de.danoeh.antennapod.core.event.settings.SpeedPresetChangedEvent;
import de.danoeh.antennapod.core.event.settings.VolumeAdaptionChangedEvent;
import de.danoeh.antennapod.model.feed.Chapter;
import de.danoeh.antennapod.model.feed.Feed;
import de.danoeh.antennapod.model.feed.FeedItem;
import de.danoeh.antennapod.model.feed.FeedMedia;
import de.danoeh.antennapod.model.feed.FeedPreferences;
import de.danoeh.antennapod.model.playback.MediaType;
import de.danoeh.antennapod.core.glide.ApGlideSettings;
import de.danoeh.antennapod.core.preferences.PlaybackPreferences;
import de.danoeh.antennapod.core.preferences.SleepTimerPreferences;
import de.danoeh.antennapod.core.preferences.UserPreferences;
import de.danoeh.antennapod.core.receiver.MediaButtonReceiver;
import de.danoeh.antennapod.core.storage.DBReader;
import de.danoeh.antennapod.core.storage.DBTasks;
import de.danoeh.antennapod.core.storage.DBWriter;
import de.danoeh.antennapod.core.storage.FeedSearcher;
import de.danoeh.antennapod.core.feed.util.ImageResourceUtils;
import de.danoeh.antennapod.core.sync.SyncService;
import de.danoeh.antennapod.core.util.FeedItemUtil;
import de.danoeh.antennapod.core.util.IntentUtils;
import de.danoeh.antennapod.core.util.NetworkUtils;
import de.danoeh.antennapod.core.util.gui.NotificationUtils;
import de.danoeh.antennapod.model.playback.Playable;
import de.danoeh.antennapod.core.util.playback.PlayableUtils;
import de.danoeh.antennapod.core.util.playback.PlaybackServiceStarter;
import de.danoeh.antennapod.core.widget.WidgetUpdater;
import de.danoeh.antennapod.ui.appstartintent.MainActivityStarter;
import de.danoeh.antennapod.ui.appstartintent.VideoPlayerActivityStarter;
import io.reactivex.Observable;
import io.reactivex.Single;
import io.reactivex.android.schedulers.AndroidSchedulers;
import io.reactivex.disposables.Disposable;
import io.reactivex.schedulers.Schedulers;
import org.greenrobot.eventbus.EventBus;
import org.greenrobot.eventbus.Subscribe;
import org.greenrobot.eventbus.ThreadMode;
import static de.danoeh.antennapod.model.feed.FeedPreferences.SPEED_USE_GLOBAL;
/**
* Controls the MediaPlayer that plays a FeedMedia-file
*/
public class PlaybackService extends MediaBrowserServiceCompat {
/**
* Logging tag
*/
private static final String TAG = "PlaybackService";
/**
* Parcelable of type Playable.
*/
public static final String EXTRA_PLAYABLE = "PlaybackService.PlayableExtra";
/**
* True if cast session should disconnect.
*/
public static final String EXTRA_CAST_DISCONNECT = "extra.de.danoeh.antennapod.core.service.castDisconnect";
/**
* True if media should be streamed.
*/
public static final String EXTRA_SHOULD_STREAM = "extra.de.danoeh.antennapod.core.service.shouldStream";
public static final String EXTRA_ALLOW_STREAM_THIS_TIME = "extra.de.danoeh.antennapod.core.service.allowStream";
public static final String EXTRA_ALLOW_STREAM_ALWAYS = "extra.de.danoeh.antennapod.core.service.allowStreamAlways";
/**
* True if playback should be started immediately after media has been
* prepared.
*/
public static final String EXTRA_START_WHEN_PREPARED = "extra.de.danoeh.antennapod.core.service.startWhenPrepared";
public static final String EXTRA_PREPARE_IMMEDIATELY = "extra.de.danoeh.antennapod.core.service.prepareImmediately";
public static final String ACTION_PLAYER_STATUS_CHANGED = "action.de.danoeh.antennapod.core.service.playerStatusChanged";
private static final String AVRCP_ACTION_PLAYER_STATUS_CHANGED = "com.android.music.playstatechanged";
private static final String AVRCP_ACTION_META_CHANGED = "com.android.music.metachanged";
public static final String ACTION_PLAYER_NOTIFICATION = "action.de.danoeh.antennapod.core.service.playerNotification";
public static final String EXTRA_NOTIFICATION_CODE = "extra.de.danoeh.antennapod.core.service.notificationCode";
public static final String EXTRA_NOTIFICATION_TYPE = "extra.de.danoeh.antennapod.core.service.notificationType";
/**
* If the PlaybackService receives this action, it will stop playback and
* try to shutdown.
*/
public static final String ACTION_SHUTDOWN_PLAYBACK_SERVICE = "action.de.danoeh.antennapod.core.service.actionShutdownPlaybackService";
/**
* If the PlaybackService receives this action, it will end playback of the
* current episode and load the next episode if there is one available.
*/
public static final String ACTION_SKIP_CURRENT_EPISODE = "action.de.danoeh.antennapod.core.service.skipCurrentEpisode";
/**
* If the PlaybackService receives this action, it will pause playback.
*/
public static final String ACTION_PAUSE_PLAY_CURRENT_EPISODE = "action.de.danoeh.antennapod.core.service.pausePlayCurrentEpisode";
/**
* Custom action used by Android Wear, Android Auto
*/
private static final String CUSTOM_ACTION_FAST_FORWARD = "action.de.danoeh.antennapod.core.service.fastForward";
private static final String CUSTOM_ACTION_REWIND = "action.de.danoeh.antennapod.core.service.rewind";
/**
* Used in NOTIFICATION_TYPE_RELOAD.
*/
public static final int EXTRA_CODE_AUDIO = 1;
public static final int EXTRA_CODE_VIDEO = 2;
public static final int EXTRA_CODE_CAST = 3;
public static final int NOTIFICATION_TYPE_ERROR = 0;
public static final int NOTIFICATION_TYPE_BUFFER_UPDATE = 2;
/**
* Receivers of this intent should update their information about the curently playing media
*/
public static final int NOTIFICATION_TYPE_RELOAD = 3;
/**
* The state of the sleeptimer changed.
*/
public static final int NOTIFICATION_TYPE_SLEEPTIMER_UPDATE = 4;
public static final int NOTIFICATION_TYPE_BUFFER_START = 5;
public static final int NOTIFICATION_TYPE_BUFFER_END = 6;
/**
* No more episodes are going to be played.
*/
public static final int NOTIFICATION_TYPE_PLAYBACK_END = 7;
/**
* Playback speed has changed
*/
public static final int NOTIFICATION_TYPE_PLAYBACK_SPEED_CHANGE = 8;
/**
* Returned by getPositionSafe() or getDurationSafe() if the playbackService
* is in an invalid state.
*/
public static final int INVALID_TIME = -1;
/**
* Is true if service is running.
*/
public static boolean isRunning = false;
/**
* Is true if the service was running, but paused due to headphone disconnect
*/
private static boolean transientPause = false;
/**
* Is true if a Cast Device is connected to the service.
*/
private static volatile boolean isCasting = false;
private PlaybackServiceMediaPlayer mediaPlayer;
private PlaybackServiceTaskManager taskManager;
private PlaybackServiceFlavorHelper flavorHelper;
private PlaybackServiceStateManager stateManager;
private Disposable positionEventTimer;
private PlaybackServiceNotificationBuilder notificationBuilder;
private String autoSkippedFeedMediaId = null;
/**
* Used for Lollipop notifications, Android Wear, and Android Auto.
*/
private MediaSessionCompat mediaSession;
private static volatile MediaType currentMediaType = MediaType.UNKNOWN;
private final IBinder mBinder = new LocalBinder();
public class LocalBinder extends Binder {
public PlaybackService getService() {
return PlaybackService.this;
}
}
@Override
public boolean onUnbind(Intent intent) {
Log.d(TAG, "Received onUnbind event");
return super.onUnbind(intent);
}
/**
* Returns an intent which starts an audio- or videoplayer, depending on the
* type of media that is being played. If the playbackservice is not
* running, the type of the last played media will be looked up.
*/
public static Intent getPlayerActivityIntent(Context context) {
boolean showVideoPlayer;
if (isRunning) {
showVideoPlayer = currentMediaType == MediaType.VIDEO && !isCasting;
} else {
showVideoPlayer = PlaybackPreferences.getCurrentEpisodeIsVideo();
}
if (showVideoPlayer) {
return new VideoPlayerActivityStarter(context).getIntent();
} else {
return new MainActivityStarter(context).withOpenPlayer().getIntent();
}
}
/**
* Same as {@link #getPlayerActivityIntent(Context)}, but here the type of activity
* depends on the FeedMedia that is provided as an argument.
*/
public static Intent getPlayerActivityIntent(Context context, Playable media) {
if (media.getMediaType() == MediaType.VIDEO && !isCasting) {
return new VideoPlayerActivityStarter(context).getIntent();
} else {
return new MainActivityStarter(context).withOpenPlayer().getIntent();
}
}
@Override
public void onCreate() {
super.onCreate();
Log.d(TAG, "Service created.");
isRunning = true;
stateManager = new PlaybackServiceStateManager(this);
notificationBuilder = new PlaybackServiceNotificationBuilder(this);
registerReceiver(autoStateUpdated, new IntentFilter("com.google.android.gms.car.media.STATUS"));
registerReceiver(headsetDisconnected, new IntentFilter(Intent.ACTION_HEADSET_PLUG));
registerReceiver(shutdownReceiver, new IntentFilter(ACTION_SHUTDOWN_PLAYBACK_SERVICE));
registerReceiver(bluetoothStateUpdated, new IntentFilter(BluetoothA2dp.ACTION_CONNECTION_STATE_CHANGED));
registerReceiver(audioBecomingNoisy, new IntentFilter(AudioManager.ACTION_AUDIO_BECOMING_NOISY));
registerReceiver(skipCurrentEpisodeReceiver, new IntentFilter(ACTION_SKIP_CURRENT_EPISODE));
registerReceiver(pausePlayCurrentEpisodeReceiver, new IntentFilter(ACTION_PAUSE_PLAY_CURRENT_EPISODE));
EventBus.getDefault().register(this);
taskManager = new PlaybackServiceTaskManager(this, taskManagerCallback);
flavorHelper = new PlaybackServiceFlavorHelper(PlaybackService.this, flavorHelperCallback);
PreferenceManager.getDefaultSharedPreferences(this)
.registerOnSharedPreferenceChangeListener(prefListener);
ComponentName eventReceiver = new ComponentName(getApplicationContext(), MediaButtonReceiver.class);
Intent mediaButtonIntent = new Intent(Intent.ACTION_MEDIA_BUTTON);
mediaButtonIntent.setComponent(eventReceiver);
PendingIntent buttonReceiverIntent = PendingIntent.getBroadcast(this, 0, mediaButtonIntent, PendingIntent.FLAG_UPDATE_CURRENT);
mediaSession = new MediaSessionCompat(getApplicationContext(), TAG, eventReceiver, buttonReceiverIntent);
setSessionToken(mediaSession.getSessionToken());
try {
mediaSession.setCallback(sessionCallback);
mediaSession.setFlags(MediaSessionCompat.FLAG_HANDLES_MEDIA_BUTTONS | MediaSessionCompat.FLAG_HANDLES_TRANSPORT_CONTROLS);
} catch (NullPointerException npe) {
// on some devices (Huawei) setting active can cause a NullPointerException
// even with correct use of the api.
// See http://stackoverflow.com/questions/31556679/android-huawei-mediassessioncompat
// and https://plus.google.com/+IanLake/posts/YgdTkKFxz7d
Log.e(TAG, "NullPointerException while setting up MediaSession");
npe.printStackTrace();
}
Single.<List<MediaSessionCompat.QueueItem>>create(emitter -> {
List<MediaSessionCompat.QueueItem> queueItems = new ArrayList<>();
for (FeedItem feedItem : taskManager.getQueue()) {
if (feedItem.getMedia() != null) {
MediaDescriptionCompat mediaDescription = feedItem.getMedia().getMediaItem().getDescription();
queueItems.add(new MediaSessionCompat.QueueItem(mediaDescription, feedItem.getId()));
}
}
emitter.onSuccess(queueItems);
})
.subscribeOn(Schedulers.io())
.observeOn(AndroidSchedulers.mainThread())
.subscribe(queueItems -> mediaSession.setQueue(queueItems), Throwable::printStackTrace);
flavorHelper.initializeMediaPlayer(PlaybackService.this);
mediaSession.setActive(true);
EventBus.getDefault().post(new ServiceEvent(ServiceEvent.Action.SERVICE_STARTED));
}
@Override
public void onDestroy() {
super.onDestroy();
Log.d(TAG, "Service is about to be destroyed");
if (notificationBuilder.getPlayerStatus() == PlayerStatus.PLAYING) {
notificationBuilder.setPlayerStatus(PlayerStatus.STOPPED);
NotificationManagerCompat notificationManager = NotificationManagerCompat.from(this);
notificationManager.notify(R.id.notification_playing, notificationBuilder.build());
}
stateManager.stopForeground(!UserPreferences.isPersistNotify());
isRunning = false;
currentMediaType = MediaType.UNKNOWN;
cancelPositionObserver();
PreferenceManager.getDefaultSharedPreferences(this).unregisterOnSharedPreferenceChangeListener(prefListener);
if (mediaSession != null) {
mediaSession.release();
}
unregisterReceiver(autoStateUpdated);
unregisterReceiver(headsetDisconnected);
unregisterReceiver(shutdownReceiver);
unregisterReceiver(bluetoothStateUpdated);
unregisterReceiver(audioBecomingNoisy);
unregisterReceiver(skipCurrentEpisodeReceiver);
unregisterReceiver(pausePlayCurrentEpisodeReceiver);
flavorHelper.removeCastConsumer();
flavorHelper.unregisterWifiBroadcastReceiver();
mediaPlayer.shutdown();
taskManager.shutdown();
}
@Override
public BrowserRoot onGetRoot(@NonNull String clientPackageName, int clientUid, Bundle rootHints) {
Log.d(TAG, "OnGetRoot: clientPackageName=" + clientPackageName +
"; clientUid=" + clientUid + " ; rootHints=" + rootHints);
return new BrowserRoot(
getResources().getString(R.string.app_name), // Name visible in Android Auto
null); // Bundle of optional extras
}
private MediaBrowserCompat.MediaItem createBrowsableMediaItemForRoot() {
Uri uri = new Uri.Builder()
.scheme(ContentResolver.SCHEME_ANDROID_RESOURCE)
.authority(getResources().getResourcePackageName(R.drawable.ic_playlist_black))
.appendPath(getResources().getResourceTypeName(R.drawable.ic_playlist_black))
.appendPath(getResources().getResourceEntryName(R.drawable.ic_playlist_black))
.build();
String subtitle = "";
try {
int count = taskManager.getQueue().size();
subtitle = getResources().getQuantityString(R.plurals.num_episodes, count, count);
} catch (InterruptedException e) {
e.printStackTrace();
}
MediaDescriptionCompat description = new MediaDescriptionCompat.Builder()
.setIconUri(uri)
.setMediaId(getResources().getString(R.string.queue_label))
.setTitle(getResources().getString(R.string.queue_label))
.setSubtitle(subtitle)
.build();
return new MediaBrowserCompat.MediaItem(description,
MediaBrowserCompat.MediaItem.FLAG_BROWSABLE);
}
private MediaBrowserCompat.MediaItem createBrowsableMediaItemForFeed(Feed feed) {
MediaDescriptionCompat.Builder builder = new MediaDescriptionCompat.Builder()
.setMediaId("FeedId:" + feed.getId())
.setTitle(feed.getTitle())
.setDescription(feed.getDescription())
.setSubtitle(feed.getCustomTitle());
if (feed.getImageUrl() != null) {
builder.setIconUri(Uri.parse(feed.getImageUrl()));
}
if (feed.getLink() != null) {
builder.setMediaUri(Uri.parse(feed.getLink()));
}
MediaDescriptionCompat description = builder.build();
return new MediaBrowserCompat.MediaItem(description,
MediaBrowserCompat.MediaItem.FLAG_BROWSABLE);
}
@Override
public void onLoadChildren(@NonNull String parentId,
@NonNull Result<List<MediaBrowserCompat.MediaItem>> result) {
Log.d(TAG, "OnLoadChildren: parentMediaId=" + parentId);
List<MediaBrowserCompat.MediaItem> mediaItems = new ArrayList<>();
if (parentId.equals(getResources().getString(R.string.app_name))) {
// Root List
try {
if (!(taskManager.getQueue().isEmpty())) {
mediaItems.add(createBrowsableMediaItemForRoot());
}
} catch (InterruptedException e) {
e.printStackTrace();
}
List<Feed> feeds = DBReader.getFeedList();
for (Feed feed : feeds) {
mediaItems.add(createBrowsableMediaItemForFeed(feed));
}
} else if (parentId.equals(getResources().getString(R.string.queue_label))) {
// Child List
try {
for (FeedItem feedItem : taskManager.getQueue()) {
FeedMedia media = feedItem.getMedia();
if (media != null) {
mediaItems.add(media.getMediaItem());
}
}
} catch (InterruptedException e) {
e.printStackTrace();
}
} else if (parentId.startsWith("FeedId:")) {
long feedId = Long.parseLong(parentId.split(":")[1]);
List<FeedItem> feedItems = DBReader.getFeedItemList(DBReader.getFeed(feedId));
for (FeedItem feedItem : feedItems) {
if (feedItem.getMedia() != null && feedItem.getMedia().getMediaItem() != null) {
mediaItems.add(feedItem.getMedia().getMediaItem());
}
}
}
result.sendResult(mediaItems);
}
@Override
public IBinder onBind(Intent intent) {
Log.d(TAG, "Received onBind event");
if (intent.getAction() != null && TextUtils.equals(intent.getAction(), MediaBrowserServiceCompat.SERVICE_INTERFACE)) {
return super.onBind(intent);
} else {
return mBinder;
}
}
@Override
public int onStartCommand(Intent intent, int flags, int startId) {
super.onStartCommand(intent, flags, startId);
Log.d(TAG, "OnStartCommand called");
stateManager.startForeground(R.id.notification_playing, notificationBuilder.build());
NotificationManagerCompat notificationManager = NotificationManagerCompat.from(this);
notificationManager.cancel(R.id.notification_streaming_confirmation);
final int keycode = intent.getIntExtra(MediaButtonReceiver.EXTRA_KEYCODE, -1);
final boolean hardwareButton = intent.getBooleanExtra(MediaButtonReceiver.EXTRA_HARDWAREBUTTON, false);
final boolean castDisconnect = intent.getBooleanExtra(EXTRA_CAST_DISCONNECT, false);
Playable playable = intent.getParcelableExtra(EXTRA_PLAYABLE);
if (keycode == -1 && playable == null && !castDisconnect) {
Log.e(TAG, "PlaybackService was started with no arguments");
stateManager.stopService();
return Service.START_NOT_STICKY;
}
if ((flags & Service.START_FLAG_REDELIVERY) != 0) {
Log.d(TAG, "onStartCommand is a redelivered intent, calling stopForeground now.");
stateManager.stopForeground(true);
} else {
if (keycode != -1) {
boolean notificationButton;
if (hardwareButton) {
Log.d(TAG, "Received hardware button event");
notificationButton = false;
} else {
Log.d(TAG, "Received media button event");
notificationButton = true;
}
boolean handled = handleKeycode(keycode, notificationButton);
if (!handled && !stateManager.hasReceivedValidStartCommand()) {
stateManager.stopService();
return Service.START_NOT_STICKY;
}
} else if (!flavorHelper.castDisconnect(castDisconnect) && playable != null) {
stateManager.validStartCommandWasReceived();
boolean stream = intent.getBooleanExtra(EXTRA_SHOULD_STREAM, true);
boolean allowStreamThisTime = intent.getBooleanExtra(EXTRA_ALLOW_STREAM_THIS_TIME, false);
boolean allowStreamAlways = intent.getBooleanExtra(EXTRA_ALLOW_STREAM_ALWAYS, false);
boolean startWhenPrepared = intent.getBooleanExtra(EXTRA_START_WHEN_PREPARED, false);
boolean prepareImmediately = intent.getBooleanExtra(EXTRA_PREPARE_IMMEDIATELY, false);
sendNotificationBroadcast(NOTIFICATION_TYPE_RELOAD, 0);
if (allowStreamAlways) {
UserPreferences.setAllowMobileStreaming(true);
}
boolean localFeed = URLUtil.isContentUrl(playable.getStreamUrl());
if (stream && !NetworkUtils.isStreamingAllowed() && !allowStreamThisTime && !localFeed) {
displayStreamingNotAllowedNotification(intent);
PlaybackPreferences.writeNoMediaPlaying();
stateManager.stopService();
return Service.START_NOT_STICKY;
}
Observable.fromCallable(
() -> {
if (playable instanceof FeedMedia) {
return DBReader.getFeedMedia(((FeedMedia) playable).getId());
} else {
return playable;
}
})
.subscribeOn(Schedulers.io())
.observeOn(AndroidSchedulers.mainThread())
.subscribe(
playableLoaded -> {
if (!playable.getIdentifier().equals(
PlaybackPreferences.getCurrentlyPlayingFeedMediaId())) {
PlaybackPreferences.clearCurrentlyPlayingTemporaryPlaybackSpeed();
}
mediaPlayer.playMediaObject(playableLoaded, stream, startWhenPrepared,
prepareImmediately);
addPlayableToQueue(playableLoaded);
}, error -> {
Log.d(TAG, "Playable was not found. Stopping service.");
error.printStackTrace();
stateManager.stopService();
});
return Service.START_NOT_STICKY;
} else {
Log.d(TAG, "Did not handle intent to PlaybackService: " + intent);
Log.d(TAG, "Extras: " + intent.getExtras());
}
}
return Service.START_NOT_STICKY;
}
private void skipIntro(Playable playable) {
if (! (playable instanceof FeedMedia)) {
return;
}
FeedMedia feedMedia = (FeedMedia) playable;
FeedPreferences preferences = feedMedia.getItem().getFeed().getPreferences();
int skipIntro = preferences.getFeedSkipIntro();
Context context = getApplicationContext();
if (skipIntro > 0 && playable.getPosition() < skipIntro * 1000) {
int duration = getDuration();
if (skipIntro * 1000 < duration || duration <= 0) {
Log.d(TAG, "skipIntro " + playable.getEpisodeTitle());
mediaPlayer.seekTo(skipIntro * 1000);
String skipIntroMesg = context.getString(R.string.pref_feed_skip_intro_toast,
skipIntro);
Toast toast = Toast.makeText(context, skipIntroMesg,
Toast.LENGTH_LONG);
toast.show();
}
}
}
private void displayStreamingNotAllowedNotification(Intent originalIntent) {
Intent intentAllowThisTime = new Intent(originalIntent);
intentAllowThisTime.setAction(EXTRA_ALLOW_STREAM_THIS_TIME);
intentAllowThisTime.putExtra(EXTRA_ALLOW_STREAM_THIS_TIME, true);
PendingIntent pendingIntentAllowThisTime;
if (android.os.Build.VERSION.SDK_INT >= android.os.Build.VERSION_CODES.O) {
pendingIntentAllowThisTime = PendingIntent.getForegroundService(this,
R.id.pending_intent_allow_stream_this_time, intentAllowThisTime, PendingIntent.FLAG_UPDATE_CURRENT);
} else {
pendingIntentAllowThisTime = PendingIntent.getService(this,
R.id.pending_intent_allow_stream_this_time, intentAllowThisTime, PendingIntent.FLAG_UPDATE_CURRENT);
}
Intent intentAlwaysAllow = new Intent(intentAllowThisTime);
intentAlwaysAllow.setAction(EXTRA_ALLOW_STREAM_ALWAYS);
intentAlwaysAllow.putExtra(EXTRA_ALLOW_STREAM_ALWAYS, true);
PendingIntent pendingIntentAlwaysAllow;
if (android.os.Build.VERSION.SDK_INT >= android.os.Build.VERSION_CODES.O) {
pendingIntentAlwaysAllow = PendingIntent.getForegroundService(this,
R.id.pending_intent_allow_stream_always, intentAlwaysAllow, PendingIntent.FLAG_UPDATE_CURRENT);
} else {
pendingIntentAlwaysAllow = PendingIntent.getService(this,
R.id.pending_intent_allow_stream_always, intentAlwaysAllow, PendingIntent.FLAG_UPDATE_CURRENT);
}
NotificationCompat.Builder builder = new NotificationCompat.Builder(this,
NotificationUtils.CHANNEL_ID_USER_ACTION)
.setSmallIcon(R.drawable.ic_notification_stream)
.setContentTitle(getString(R.string.confirm_mobile_streaming_notification_title))
.setContentText(getString(R.string.confirm_mobile_streaming_notification_message))
.setStyle(new NotificationCompat.BigTextStyle()
.bigText(getString(R.string.confirm_mobile_streaming_notification_message)))
.setPriority(NotificationCompat.PRIORITY_DEFAULT)
.setContentIntent(pendingIntentAllowThisTime)
.addAction(R.drawable.ic_notification_stream,
getString(R.string.confirm_mobile_streaming_button_once),
pendingIntentAllowThisTime)
.addAction(R.drawable.ic_notification_stream,
getString(R.string.confirm_mobile_streaming_button_always),
pendingIntentAlwaysAllow)
.setAutoCancel(true);
NotificationManagerCompat notificationManager = NotificationManagerCompat.from(this);
notificationManager.notify(R.id.notification_streaming_confirmation, builder.build());
}
/**
* Handles media button events
* return: keycode was handled
*/
private boolean handleKeycode(int keycode, boolean notificationButton) {
Log.d(TAG, "Handling keycode: " + keycode);
final PlaybackServiceMediaPlayer.PSMPInfo info = mediaPlayer.getPSMPInfo();
final PlayerStatus status = info.playerStatus;
switch (keycode) {
case KeyEvent.KEYCODE_HEADSETHOOK:
case KeyEvent.KEYCODE_MEDIA_PLAY_PAUSE:
if (status == PlayerStatus.PLAYING) {
mediaPlayer.pause(!UserPreferences.isPersistNotify(), false);
} else if (status == PlayerStatus.PAUSED || status == PlayerStatus.PREPARED) {
mediaPlayer.resume();
} else if (status == PlayerStatus.PREPARING) {
mediaPlayer.setStartWhenPrepared(!mediaPlayer.isStartWhenPrepared());
} else if (status == PlayerStatus.INITIALIZED) {
mediaPlayer.setStartWhenPrepared(true);
mediaPlayer.prepare();
} else if (mediaPlayer.getPlayable() == null) {
startPlayingFromPreferences();
} else {
return false;
}
taskManager.restartSleepTimer();
return true;
case KeyEvent.KEYCODE_MEDIA_PLAY:
if (status == PlayerStatus.PAUSED || status == PlayerStatus.PREPARED) {
mediaPlayer.resume();
} else if (status == PlayerStatus.INITIALIZED) {
mediaPlayer.setStartWhenPrepared(true);
mediaPlayer.prepare();
} else if (mediaPlayer.getPlayable() == null) {
startPlayingFromPreferences();
} else {
return false;
}
taskManager.restartSleepTimer();
return true;
case KeyEvent.KEYCODE_MEDIA_PAUSE:
if (status == PlayerStatus.PLAYING) {
mediaPlayer.pause(!UserPreferences.isPersistNotify(), false);
return true;
}
return false;
case KeyEvent.KEYCODE_MEDIA_NEXT:
if (!notificationButton) {
// Handle remapped button as notification button which is not remapped again.
return handleKeycode(UserPreferences.getHardwareForwardButton(), true);
} else if (getStatus() == PlayerStatus.PLAYING || getStatus() == PlayerStatus.PAUSED) {
mediaPlayer.skip();
return true;
}
return false;
case KeyEvent.KEYCODE_MEDIA_FAST_FORWARD:
if (getStatus() == PlayerStatus.PLAYING || getStatus() == PlayerStatus.PAUSED) {
mediaPlayer.seekDelta(UserPreferences.getFastForwardSecs() * 1000);
return true;
}
return false;
case KeyEvent.KEYCODE_MEDIA_PREVIOUS:
if (!notificationButton) {
// Handle remapped button as notification button which is not remapped again.
return handleKeycode(UserPreferences.getHardwarePreviousButton(), true);
} else if (getStatus() == PlayerStatus.PLAYING || getStatus() == PlayerStatus.PAUSED) {
mediaPlayer.seekTo(0);
return true;
}
return false;
case KeyEvent.KEYCODE_MEDIA_REWIND:
if (getStatus() == PlayerStatus.PLAYING || getStatus() == PlayerStatus.PAUSED) {
mediaPlayer.seekDelta(-UserPreferences.getRewindSecs() * 1000);
return true;
}
return false;
case KeyEvent.KEYCODE_MEDIA_STOP:
if (status == PlayerStatus.PLAYING) {
mediaPlayer.pause(true, true);
}
stateManager.stopForeground(true); // gets rid of persistent notification
return true;
default:
Log.d(TAG, "Unhandled key code: " + keycode);
if (info.playable != null && info.playerStatus == PlayerStatus.PLAYING) { // only notify the user about an unknown key event if it is actually doing something
String message = String.format(getResources().getString(R.string.unknown_media_key), keycode);
Toast.makeText(this, message, Toast.LENGTH_SHORT).show();
}
}
return false;
}
private void startPlayingFromPreferences() {
Observable.fromCallable(() -> PlayableUtils.createInstanceFromPreferences(getApplicationContext()))
.subscribeOn(Schedulers.io())
.observeOn(AndroidSchedulers.mainThread())
.subscribe(
playable -> {
boolean localFeed = URLUtil.isContentUrl(playable.getStreamUrl());
if (PlaybackPreferences.getCurrentEpisodeIsStream()
&& !NetworkUtils.isStreamingAllowed() && !localFeed) {
displayStreamingNotAllowedNotification(
new PlaybackServiceStarter(this, playable)
.prepareImmediately(true)
.startWhenPrepared(true)
.shouldStream(true)
.getIntent());
PlaybackPreferences.writeNoMediaPlaying();
stateManager.stopService();
return;
}
mediaPlayer.playMediaObject(playable, PlaybackPreferences.getCurrentEpisodeIsStream(),
true, true);
stateManager.validStartCommandWasReceived();
PlaybackService.this.updateMediaSessionMetadata(playable);
addPlayableToQueue(playable);
}, error -> {
Log.d(TAG, "Playable was not loaded from preferences. Stopping service.");
error.printStackTrace();
stateManager.stopService();
});
}
/**
* Called by a mediaplayer Activity as soon as it has prepared its
* mediaplayer.
*/
public void setVideoSurface(SurfaceHolder sh) {
Log.d(TAG, "Setting display");
mediaPlayer.setVideoSurface(sh);
}
public void notifyVideoSurfaceAbandoned() {
mediaPlayer.pause(true, false);
mediaPlayer.resetVideoSurface();
setupNotification(getPlayable());
stateManager.stopForeground(!UserPreferences.isPersistNotify());
}
private final PlaybackServiceTaskManager.PSTMCallback taskManagerCallback = new PlaybackServiceTaskManager.PSTMCallback() {
@Override
public void positionSaverTick() {
saveCurrentPosition(true, null, PlaybackServiceMediaPlayer.INVALID_TIME);
}
@Override
public void onSleepTimerAlmostExpired(long timeLeft) {
final float[] multiplicators = {0.1f, 0.2f, 0.3f, 0.3f, 0.3f, 0.4f, 0.4f, 0.4f, 0.6f, 0.8f};
float multiplicator = multiplicators[Math.max(0, (int) timeLeft / 1000)];
Log.d(TAG, "onSleepTimerAlmostExpired: " + multiplicator);
float leftVolume = multiplicator * UserPreferences.getLeftVolume();
float rightVolume = multiplicator * UserPreferences.getRightVolume();
mediaPlayer.setVolume(leftVolume, rightVolume);
}
@Override
public void onSleepTimerExpired() {
mediaPlayer.pause(true, true);
float leftVolume = UserPreferences.getLeftVolume();
float rightVolume = UserPreferences.getRightVolume();
mediaPlayer.setVolume(leftVolume, rightVolume);
sendNotificationBroadcast(NOTIFICATION_TYPE_SLEEPTIMER_UPDATE, 0);
}
@Override
public void onSleepTimerReset() {
float leftVolume = UserPreferences.getLeftVolume();
float rightVolume = UserPreferences.getRightVolume();
mediaPlayer.setVolume(leftVolume, rightVolume);
}
@Override
public WidgetUpdater.WidgetState requestWidgetState() {
return new WidgetUpdater.WidgetState(getPlayable(), getStatus(),
getCurrentPosition(), getDuration(), getCurrentPlaybackSpeed(), isCasting());
}
@Override
public void onChapterLoaded(Playable media) {
sendNotificationBroadcast(NOTIFICATION_TYPE_RELOAD, 0);
}
};
private final PlaybackServiceMediaPlayer.PSMPCallback mediaPlayerCallback = new PlaybackServiceMediaPlayer.PSMPCallback() {
@Override
public void statusChanged(PlaybackServiceMediaPlayer.PSMPInfo newInfo) {
if (mediaPlayer != null) {
currentMediaType = mediaPlayer.getCurrentMediaType();
} else {
currentMediaType = MediaType.UNKNOWN;
}
updateMediaSession(newInfo.playerStatus);
switch (newInfo.playerStatus) {
case INITIALIZED:
PlaybackPreferences.writeMediaPlaying(mediaPlayer.getPSMPInfo().playable,
mediaPlayer.getPSMPInfo().playerStatus, mediaPlayer.isStreaming());
setupNotification(newInfo);
break;
case PREPARED:
taskManager.startChapterLoader(newInfo.playable);
break;
case PAUSED:
if ((UserPreferences.isPersistNotify() || isCasting) &&
android.os.Build.VERSION.SDK_INT >= Build.VERSION_CODES.JELLY_BEAN) {
// do not remove notification on pause based on user pref and whether android version supports expanded notifications
// Change [Play] button to [Pause]
setupNotification(newInfo);
} else if (!UserPreferences.isPersistNotify() && !isCasting) {
// remove notification on pause
stateManager.stopForeground(true);
}
cancelPositionObserver();
PlaybackPreferences.writePlayerStatus(mediaPlayer.getPlayerStatus());
break;
case STOPPED:
//writePlaybackPreferencesNoMediaPlaying();
//stopService();
break;
case PLAYING:
PlaybackPreferences.writePlayerStatus(mediaPlayer.getPlayerStatus());
setupNotification(newInfo);
setupPositionObserver();
stateManager.validStartCommandWasReceived();
// set sleep timer if auto-enabled
if (newInfo.oldPlayerStatus != null && newInfo.oldPlayerStatus != PlayerStatus.SEEKING
&& SleepTimerPreferences.autoEnable() && !sleepTimerActive()) {
setSleepTimer(SleepTimerPreferences.timerMillis());
EventBus.getDefault().post(new MessageEvent(getString(R.string.sleep_timer_enabled_label),
PlaybackService.this::disableSleepTimer));
}
break;
case ERROR:
PlaybackPreferences.writeNoMediaPlaying();
stateManager.stopService();
break;
}
IntentUtils.sendLocalBroadcast(getApplicationContext(), ACTION_PLAYER_STATUS_CHANGED);
bluetoothNotifyChange(newInfo, AVRCP_ACTION_PLAYER_STATUS_CHANGED);
bluetoothNotifyChange(newInfo, AVRCP_ACTION_META_CHANGED);
taskManager.requestWidgetUpdate();
}
@Override
public void shouldStop() {
setupNotification(getPlayable()); // Stops foreground if not playing
}
@Override
public void playbackSpeedChanged(float s) {
sendNotificationBroadcast(NOTIFICATION_TYPE_PLAYBACK_SPEED_CHANGE, 0);
}
@Override
public void onBufferingUpdate(int percent) {
sendNotificationBroadcast(NOTIFICATION_TYPE_BUFFER_UPDATE, percent);
}
@Override
public void onMediaChanged(boolean reloadUI) {
Log.d(TAG, "reloadUI callback reached");
if (reloadUI) {
sendNotificationBroadcast(NOTIFICATION_TYPE_RELOAD, 0);
}
PlaybackService.this.updateMediaSessionMetadata(getPlayable());
}
@Override
public boolean onMediaPlayerInfo(int code, @StringRes int resourceId) {
switch (code) {
case MediaPlayer.MEDIA_INFO_BUFFERING_START:
sendNotificationBroadcast(NOTIFICATION_TYPE_BUFFER_START, 0);
return true;
case MediaPlayer.MEDIA_INFO_BUFFERING_END:
sendNotificationBroadcast(NOTIFICATION_TYPE_BUFFER_END, 0);
Playable playable = getPlayable();
if (getPlayable() instanceof FeedMedia
&& playable.getDuration() <= 0 && mediaPlayer.getDuration() > 0) {
// Playable is being streamed and does not have a duration specified in the feed
playable.setDuration(mediaPlayer.getDuration());
DBWriter.setFeedMedia((FeedMedia) playable);
updateMediaSessionMetadata(playable);
setupNotification(playable);
}
return true;
default:
return flavorHelper.onMediaPlayerInfo(PlaybackService.this, code, resourceId);
}
}
@Override
public boolean onMediaPlayerError(Object inObj, int what, int extra) {
final String TAG = "PlaybackSvc.onErrorLtsn";
Log.w(TAG, "An error has occured: " + what + " " + extra);
if (mediaPlayer.getPlayerStatus() == PlayerStatus.PLAYING) {
mediaPlayer.pause(true, false);
}
sendNotificationBroadcast(NOTIFICATION_TYPE_ERROR, what);
PlaybackPreferences.writeNoMediaPlaying();
stateManager.stopService();
return true;
}
@Override
public void onPostPlayback(@NonNull Playable media, boolean ended, boolean skipped,
boolean playingNext) {
PlaybackService.this.onPostPlayback(media, ended, skipped, playingNext);
}
@Override
public void onPlaybackStart(@NonNull Playable playable, int position) {
taskManager.startWidgetUpdater();
if (position != PlaybackServiceMediaPlayer.INVALID_TIME) {
playable.setPosition(position);
} else {
skipIntro(playable);
}
playable.onPlaybackStart();
taskManager.startPositionSaver();
}
@Override
public void onPlaybackPause(Playable playable, int position) {
taskManager.cancelPositionSaver();
cancelPositionObserver();
saveCurrentPosition(position == PlaybackServiceMediaPlayer.INVALID_TIME || playable == null,
playable, position);
taskManager.cancelWidgetUpdater();
if (playable != null) {
if (playable instanceof FeedMedia) {
SyncService.enqueueEpisodePlayed(getApplicationContext(), (FeedMedia) playable, false);
}
playable.onPlaybackPause(getApplicationContext());
}
}
@Override
public Playable getNextInQueue(Playable currentMedia) {
return PlaybackService.this.getNextInQueue(currentMedia);
}
@Override
public void onPlaybackEnded(MediaType mediaType, boolean stopPlaying) {
PlaybackService.this.onPlaybackEnded(mediaType, stopPlaying);
}
};
private Playable getNextInQueue(final Playable currentMedia) {
if (!(currentMedia instanceof FeedMedia)) {
Log.d(TAG, "getNextInQueue(), but playable not an instance of FeedMedia, so not proceeding");
return null;
}
Log.d(TAG, "getNextInQueue()");
FeedMedia media = (FeedMedia) currentMedia;
if (media.getItem() == null) {
media.setItem(DBReader.getFeedItem(media.getItemId()));
}
FeedItem item = media.getItem();
if (item == null) {
Log.w(TAG, "getNextInQueue() with FeedMedia object whose FeedItem is null");
return null;
}
FeedItem nextItem;
try {
final List<FeedItem> queue = taskManager.getQueue();
nextItem = DBTasks.getQueueSuccessorOfItem(item.getId(), queue);
} catch (InterruptedException e) {
Log.e(TAG, "Error handling the queue in order to retrieve the next item", e);
return null;
}
if (nextItem == null || nextItem.getMedia() == null) {
return null;
}
if (!nextItem.getMedia().localFileAvailable() && !NetworkUtils.isStreamingAllowed()
&& UserPreferences.isFollowQueue() && !nextItem.getFeed().isLocalFeed()) {
displayStreamingNotAllowedNotification(
new PlaybackServiceStarter(this, nextItem.getMedia())
.prepareImmediately(true)
.startWhenPrepared(true)
.shouldStream(true)
.getIntent());
PlaybackPreferences.writeNoMediaPlaying();
stateManager.stopService();
return null;
}
return nextItem.getMedia();
}
/**
* Set of instructions to be performed when playback ends.
*/
private void onPlaybackEnded(MediaType mediaType, boolean stopPlaying) {
Log.d(TAG, "Playback ended");
PlaybackPreferences.clearCurrentlyPlayingTemporaryPlaybackSpeed();
if (stopPlaying) {
taskManager.cancelPositionSaver();
cancelPositionObserver();
PlaybackPreferences.writeNoMediaPlaying();
if (!isCasting) {
stateManager.stopForeground(true);
stateManager.stopService();
}
}
if (mediaType == null) {
sendNotificationBroadcast(NOTIFICATION_TYPE_PLAYBACK_END, 0);
} else {
sendNotificationBroadcast(NOTIFICATION_TYPE_RELOAD,
isCasting ? EXTRA_CODE_CAST :
(mediaType == MediaType.VIDEO) ? EXTRA_CODE_VIDEO : EXTRA_CODE_AUDIO);
}
}
/**
* This method processes the media object after its playback ended, either because it completed
* or because a different media object was selected for playback.
* <p>
* Even though these tasks aren't supposed to be resource intensive, a good practice is to
* usually call this method on a background thread.
*
* @param playable the media object that was playing. It is assumed that its position
* property was updated before this method was called.
* @param ended if true, it signals that {@param playable} was played until its end.
* In such case, the position property of the media becomes irrelevant for
* most of the tasks (although it's still a good practice to keep it
* accurate).
* @param skipped if the user pressed a skip >| button.
* @param playingNext if true, it means another media object is being loaded in place of this
* one.
* Instances when we'd set it to false would be when we're not following the
* queue or when the queue has ended.
*/
private void onPostPlayback(final Playable playable, boolean ended, boolean skipped,
boolean playingNext) {
if (playable == null) {
Log.e(TAG, "Cannot do post-playback processing: media was null");
return;
}
Log.d(TAG, "onPostPlayback(): media=" + playable.getEpisodeTitle());
if (!(playable instanceof FeedMedia)) {
Log.d(TAG, "Not doing post-playback processing: media not of type FeedMedia");
if (ended) {
playable.onPlaybackCompleted(getApplicationContext());
} else {
playable.onPlaybackPause(getApplicationContext());
}
return;
}
FeedMedia media = (FeedMedia) playable;
FeedItem item = media.getItem();
boolean smartMarkAsPlayed = FeedItemUtil.hasAlmostEnded(media);
if (!ended && smartMarkAsPlayed) {
Log.d(TAG, "smart mark as played");
}
boolean autoSkipped = false;
if (autoSkippedFeedMediaId != null && autoSkippedFeedMediaId.equals(item.getIdentifyingValue())) {
autoSkippedFeedMediaId = null;
autoSkipped = true;
}
if (ended || smartMarkAsPlayed) {
SyncService.enqueueEpisodePlayed(getApplicationContext(), media, true);
media.onPlaybackCompleted(getApplicationContext());
} else {
SyncService.enqueueEpisodePlayed(getApplicationContext(), media, false);
media.onPlaybackPause(getApplicationContext());
}
if (item != null) {
if (ended || smartMarkAsPlayed
|| autoSkipped
|| (skipped && !UserPreferences.shouldSkipKeepEpisode())) {
// only mark the item as played if we're not keeping it anyways
DBWriter.markItemPlayed(item, FeedItem.PLAYED, ended);
// don't know if it actually matters to not autodownload when smart mark as played is triggered
DBWriter.removeQueueItem(PlaybackService.this, ended, item);
// Delete episode if enabled
FeedPreferences.AutoDeleteAction action =
item.getFeed().getPreferences().getCurrentAutoDelete();
boolean shouldAutoDelete = action == FeedPreferences.AutoDeleteAction.YES
|| (action == FeedPreferences.AutoDeleteAction.GLOBAL && UserPreferences.isAutoDelete());
if (shouldAutoDelete && (!item.isTagged(FeedItem.TAG_FAVORITE)
|| !UserPreferences.shouldFavoriteKeepEpisode())) {
DBWriter.deleteFeedMediaOfItem(PlaybackService.this, media.getId());
Log.d(TAG, "Episode Deleted");
}
}
}
if (ended || skipped || playingNext) {
DBWriter.addItemToPlaybackHistory(media);
}
}
public void setSleepTimer(long waitingTime) {
Log.d(TAG, "Setting sleep timer to " + waitingTime + " milliseconds");
taskManager.setSleepTimer(waitingTime);
sendNotificationBroadcast(NOTIFICATION_TYPE_SLEEPTIMER_UPDATE, 0);
}
public void disableSleepTimer() {
taskManager.disableSleepTimer();
sendNotificationBroadcast(NOTIFICATION_TYPE_SLEEPTIMER_UPDATE, 0);
}
private void sendNotificationBroadcast(int type, int code) {
Intent intent = new Intent(ACTION_PLAYER_NOTIFICATION);
intent.putExtra(EXTRA_NOTIFICATION_TYPE, type);
intent.putExtra(EXTRA_NOTIFICATION_CODE, code);
intent.setPackage(getPackageName());
sendBroadcast(intent);
}
private void skipEndingIfNecessary() {
Playable playable = mediaPlayer.getPlayable();
if (! (playable instanceof FeedMedia)) {
return;
}
int duration = getDuration();
int remainingTime = duration - getCurrentPosition();
FeedMedia feedMedia = (FeedMedia) playable;
FeedPreferences preferences = feedMedia.getItem().getFeed().getPreferences();
int skipEnd = preferences.getFeedSkipEnding();
if (skipEnd > 0
&& skipEnd * 1000 < getDuration()
&& (remainingTime - (skipEnd * 1000) > 0)
&& ((remainingTime - skipEnd * 1000) < (getCurrentPlaybackSpeed() * 1000))) {
Log.d(TAG, "skipEndingIfNecessary: Skipping the remaining " + remainingTime + " " + skipEnd * 1000 + " speed " + getCurrentPlaybackSpeed());
Context context = getApplicationContext();
String skipMesg = context.getString(R.string.pref_feed_skip_ending_toast, skipEnd);
Toast toast = Toast.makeText(context, skipMesg, Toast.LENGTH_LONG);
toast.show();
this.autoSkippedFeedMediaId = feedMedia.getItem().getIdentifyingValue();
mediaPlayer.skip();
}
}
/**
* Updates the Media Session for the corresponding status.
*
* @param playerStatus the current {@link PlayerStatus}
*/
private void updateMediaSession(final PlayerStatus playerStatus) {
PlaybackStateCompat.Builder sessionState = new PlaybackStateCompat.Builder();
int state;
if (playerStatus != null) {
switch (playerStatus) {
case PLAYING:
state = PlaybackStateCompat.STATE_PLAYING;
break;
case PREPARED:
case PAUSED:
state = PlaybackStateCompat.STATE_PAUSED;
break;
case STOPPED:
state = PlaybackStateCompat.STATE_STOPPED;
break;
case SEEKING:
state = PlaybackStateCompat.STATE_FAST_FORWARDING;
break;
case PREPARING:
case INITIALIZING:
state = PlaybackStateCompat.STATE_CONNECTING;
break;
case ERROR:
state = PlaybackStateCompat.STATE_ERROR;
break;
case INITIALIZED: // Deliberate fall-through
case INDETERMINATE:
default:
state = PlaybackStateCompat.STATE_NONE;
break;
}
} else {
state = PlaybackStateCompat.STATE_NONE;
}
sessionState.setState(state, getCurrentPosition(), getCurrentPlaybackSpeed());
long capabilities = PlaybackStateCompat.ACTION_PLAY_PAUSE
| PlaybackStateCompat.ACTION_REWIND
| PlaybackStateCompat.ACTION_PAUSE
| PlaybackStateCompat.ACTION_FAST_FORWARD
| PlaybackStateCompat.ACTION_SKIP_TO_NEXT
| PlaybackStateCompat.ACTION_SEEK_TO;
if (useSkipToPreviousForRewindInLockscreen()) {
// Workaround to fool Android so that Lockscreen will expose a skip-to-previous button,
// which will be used for rewind.
// The workaround is used for pre Lollipop (Androidv5) devices.
// For Androidv5+, lockscreen widges are really notifications (compact),
// with an independent codepath
//
// @see #sessionCallback in the backing callback, skipToPrevious implementation
// is actually the same as rewind. So no new inconsistency is created.
// @see #setupNotification() for the method to create Androidv5+ lockscreen UI
// with notification (compact)
capabilities = capabilities | PlaybackStateCompat.ACTION_SKIP_TO_PREVIOUS;
}
UiModeManager uiModeManager = (UiModeManager) getApplicationContext()
.getSystemService(Context.UI_MODE_SERVICE);
if (uiModeManager.getCurrentModeType() == Configuration.UI_MODE_TYPE_CAR) {
sessionState.addCustomAction(
new PlaybackStateCompat.CustomAction.Builder(
CUSTOM_ACTION_REWIND,
getString(R.string.rewind_label), R.drawable.ic_notification_fast_rewind)
.build());
sessionState.addCustomAction(
new PlaybackStateCompat.CustomAction.Builder(
CUSTOM_ACTION_FAST_FORWARD,
getString(R.string.fast_forward_label), R.drawable.ic_notification_fast_forward)
.build());
} else {
// This would give the PIP of videos a play button
capabilities = capabilities | PlaybackStateCompat.ACTION_PLAY;
if (uiModeManager.getCurrentModeType() == Configuration.UI_MODE_TYPE_WATCH) {
flavorHelper.sessionStateAddActionForWear(sessionState,
CUSTOM_ACTION_REWIND,
getString(R.string.rewind_label),
android.R.drawable.ic_media_rew);
flavorHelper.sessionStateAddActionForWear(sessionState,
CUSTOM_ACTION_FAST_FORWARD,
getString(R.string.fast_forward_label),
android.R.drawable.ic_media_ff);
flavorHelper.mediaSessionSetExtraForWear(mediaSession);
}
}
sessionState.setActions(capabilities);
mediaSession.setPlaybackState(sessionState.build());
}
private static boolean useSkipToPreviousForRewindInLockscreen() {
// showRewindOnCompactNotification() corresponds to the "Set Lockscreen Buttons"
// Settings in UI.
// Hence, from user perspective, he/she is setting the buttons for Lockscreen
return (UserPreferences.showRewindOnCompactNotification() &&
(Build.VERSION.SDK_INT < Build.VERSION_CODES.LOLLIPOP));
}
/**
* Used by updateMediaSessionMetadata to load notification data in another thread.
*/
private Thread mediaSessionSetupThread;
private void updateMediaSessionMetadata(final Playable p) {
if (p == null || mediaSession == null) {
return;
}
if (mediaSessionSetupThread != null) {
mediaSessionSetupThread.interrupt();
}
Runnable mediaSessionSetupTask = () -> {
MediaMetadataCompat.Builder builder = new MediaMetadataCompat.Builder();
builder.putString(MediaMetadataCompat.METADATA_KEY_ARTIST, p.getFeedTitle());
builder.putString(MediaMetadataCompat.METADATA_KEY_TITLE, p.getEpisodeTitle());
builder.putString(MediaMetadataCompat.METADATA_KEY_ALBUM, p.getFeedTitle());
builder.putLong(MediaMetadataCompat.METADATA_KEY_DURATION, p.getDuration());
builder.putString(MediaMetadataCompat.METADATA_KEY_DISPLAY_TITLE, p.getEpisodeTitle());
builder.putString(MediaMetadataCompat.METADATA_KEY_DISPLAY_SUBTITLE, p.getFeedTitle());
String imageLocation = p.getImageLocation();
if (!TextUtils.isEmpty(imageLocation)) {
if (UserPreferences.setLockscreenBackground()) {
Bitmap art;
builder.putString(MediaMetadataCompat.METADATA_KEY_ART_URI, imageLocation);
try {
art = Glide.with(this)
.asBitmap()
.load(imageLocation)
.apply(RequestOptions.diskCacheStrategyOf(ApGlideSettings.AP_DISK_CACHE_STRATEGY))
.submit(Target.SIZE_ORIGINAL, Target.SIZE_ORIGINAL)
.get();
builder.putBitmap(MediaMetadataCompat.METADATA_KEY_ART, art);
} catch (Throwable tr1) {
try {
art = Glide.with(this)
.asBitmap()
.load(ImageResourceUtils.getFallbackImageLocation(p))
.apply(RequestOptions.diskCacheStrategyOf(ApGlideSettings.AP_DISK_CACHE_STRATEGY))
.submit(Target.SIZE_ORIGINAL, Target.SIZE_ORIGINAL)
.get();
builder.putBitmap(MediaMetadataCompat.METADATA_KEY_ART, art);
} catch (Throwable tr2) {
Log.e(TAG, Log.getStackTraceString(tr2));
}
}
} else if (isCasting) {
// In the absence of metadata art, the controller dialog takes care of creating it.
builder.putString(MediaMetadataCompat.METADATA_KEY_DISPLAY_ICON_URI, imageLocation);
}
}
if (!Thread.currentThread().isInterrupted() && stateManager.hasReceivedValidStartCommand()) {
mediaSession.setSessionActivity(PendingIntent.getActivity(this, R.id.pending_intent_player_activity,
PlaybackService.getPlayerActivityIntent(this), PendingIntent.FLAG_UPDATE_CURRENT));
try {
mediaSession.setMetadata(builder.build());
} catch (OutOfMemoryError e) {
Log.e(TAG, "Setting media session metadata", e);
builder.putBitmap(MediaMetadataCompat.METADATA_KEY_ART, null);
mediaSession.setMetadata(builder.build());
}
}
};
mediaSessionSetupThread = new Thread(mediaSessionSetupTask);
mediaSessionSetupThread.start();
}
/**
* Used by setupNotification to load notification data in another thread.
*/
private Thread notificationSetupThread;
/**
* Prepares notification and starts the service in the foreground.
*/
private void setupNotification(final PlaybackServiceMediaPlayer.PSMPInfo info) {
setupNotification(info.playable);
}
private synchronized void setupNotification(final Playable playable) {
Log.d(TAG, "setupNotification");
if (notificationSetupThread != null) {
notificationSetupThread.interrupt();
}
if (playable == null || mediaPlayer == null) {
Log.d(TAG, "setupNotification: playable=" + playable);
Log.d(TAG, "setupNotification: mediaPlayer=" + mediaPlayer);
if (!stateManager.hasReceivedValidStartCommand()) {
stateManager.stopService();
}
return;
}
PlayerStatus playerStatus = mediaPlayer.getPlayerStatus();
notificationBuilder.setPlayable(playable);
notificationBuilder.setMediaSessionToken(mediaSession.getSessionToken());
notificationBuilder.setPlayerStatus(playerStatus);
notificationBuilder.setCasting(isCasting);
notificationBuilder.updatePosition(getCurrentPosition(), getCurrentPlaybackSpeed());
NotificationManagerCompat notificationManager = NotificationManagerCompat.from(this);
notificationManager.notify(R.id.notification_playing, notificationBuilder.build());
startForegroundIfPlaying(playerStatus);
if (!notificationBuilder.isIconCached()) {
notificationSetupThread = new Thread(() -> {
Log.d(TAG, "Loading notification icon");
notificationBuilder.loadIcon();
if (!Thread.currentThread().isInterrupted()) {
notificationManager.notify(R.id.notification_playing, notificationBuilder.build());
}
});
notificationSetupThread.start();
}
}
private void startForegroundIfPlaying(@NonNull PlayerStatus status) {
Log.d(TAG, "startForegroundIfPlaying: " + status);
if (stateManager.hasReceivedValidStartCommand()) {
if (isCasting || status == PlayerStatus.PLAYING || status == PlayerStatus.PREPARING
|| status == PlayerStatus.SEEKING) {
stateManager.startForeground(R.id.notification_playing, notificationBuilder.build());
Log.d(TAG, "foreground");
} else {
stateManager.stopForeground(false);
NotificationManagerCompat notificationManager = NotificationManagerCompat.from(this);
notificationManager.notify(R.id.notification_playing, notificationBuilder.build());
}
}
}
/**
* Persists the current position and last played time of the media file.
*
* @param fromMediaPlayer if true, the information is gathered from the current Media Player
* and {@param playable} and {@param position} become irrelevant.
* @param playable the playable for which the current position should be saved, unless
* {@param fromMediaPlayer} is true.
* @param position the position that should be saved, unless {@param fromMediaPlayer} is true.
*/
private synchronized void saveCurrentPosition(boolean fromMediaPlayer, Playable playable, int position) {
int duration;
if (fromMediaPlayer) {
position = getCurrentPosition();
duration = getDuration();
playable = mediaPlayer.getPlayable();
} else {
duration = playable.getDuration();
}
if (position != INVALID_TIME && duration != INVALID_TIME && playable != null) {
Log.d(TAG, "Saving current position to " + position);
PlayableUtils.saveCurrentPosition(playable, position, System.currentTimeMillis());
}
}
public boolean sleepTimerActive() {
return taskManager.isSleepTimerActive();
}
public long getSleepTimerTimeLeft() {
return taskManager.getSleepTimerTimeLeft();
}
private void bluetoothNotifyChange(PlaybackServiceMediaPlayer.PSMPInfo info, String whatChanged) {
boolean isPlaying = false;
if (info.playerStatus == PlayerStatus.PLAYING) {
isPlaying = true;
}
if (info.playable != null) {
Intent i = new Intent(whatChanged);
i.putExtra("id", 1L);
i.putExtra("artist", "");
i.putExtra("album", info.playable.getFeedTitle());
i.putExtra("track", info.playable.getEpisodeTitle());
i.putExtra("playing", isPlaying);
final List<FeedItem> queue = taskManager.getQueueIfLoaded();
if (queue != null) {
i.putExtra("ListSize", queue.size());
}
i.putExtra("duration", (long) info.playable.getDuration());
i.putExtra("position", (long) info.playable.getPosition());
sendBroadcast(i);
}
}
private final BroadcastReceiver autoStateUpdated = new BroadcastReceiver() {
@Override
public void onReceive(Context context, Intent intent) {
String status = intent.getStringExtra("media_connection_status");
boolean isConnectedToCar = "media_connected".equals(status);
Log.d(TAG, "Received Auto Connection update: " + status);
if (!isConnectedToCar) {
Log.d(TAG, "Car was unplugged during playback.");
pauseIfPauseOnDisconnect();
} else {
PlayerStatus playerStatus = mediaPlayer.getPlayerStatus();
if (playerStatus == PlayerStatus.PAUSED || playerStatus == PlayerStatus.PREPARED) {
mediaPlayer.resume();
} else if (playerStatus == PlayerStatus.PREPARING) {
mediaPlayer.setStartWhenPrepared(!mediaPlayer.isStartWhenPrepared());
} else if (playerStatus == PlayerStatus.INITIALIZED) {
mediaPlayer.setStartWhenPrepared(true);
mediaPlayer.prepare();
}
}
}
};
/**
* Pauses playback when the headset is disconnected and the preference is
* set
*/
private final BroadcastReceiver headsetDisconnected = new BroadcastReceiver() {
private static final String TAG = "headsetDisconnected";
private static final int UNPLUGGED = 0;
private static final int PLUGGED = 1;
@Override
public void onReceive(Context context, Intent intent) {
if (isInitialStickyBroadcast()) {
// Don't pause playback after we just started, just because the receiver
// delivers the current headset state (instead of a change)
return;
}
if (TextUtils.equals(intent.getAction(), Intent.ACTION_HEADSET_PLUG)) {
int state = intent.getIntExtra("state", -1);
if (state != -1) {
Log.d(TAG, "Headset plug event. State is " + state);
if (state == UNPLUGGED) {
Log.d(TAG, "Headset was unplugged during playback.");
pauseIfPauseOnDisconnect();
} else if (state == PLUGGED) {
Log.d(TAG, "Headset was plugged in during playback.");
unpauseIfPauseOnDisconnect(false);
}
} else {
Log.e(TAG, "Received invalid ACTION_HEADSET_PLUG intent");
}
}
}
};
private final BroadcastReceiver bluetoothStateUpdated = new BroadcastReceiver() {
@Override
public void onReceive(Context context, Intent intent) {
if (TextUtils.equals(intent.getAction(), BluetoothA2dp.ACTION_CONNECTION_STATE_CHANGED)) {
int state = intent.getIntExtra(BluetoothA2dp.EXTRA_STATE, -1);
if (state == BluetoothA2dp.STATE_CONNECTED) {
Log.d(TAG, "Received bluetooth connection intent");
unpauseIfPauseOnDisconnect(true);
}
}
}
};
private final BroadcastReceiver audioBecomingNoisy = new BroadcastReceiver() {
@Override
public void onReceive(Context context, Intent intent) {
// sound is about to change, eg. bluetooth -> speaker
Log.d(TAG, "Pausing playback because audio is becoming noisy");
pauseIfPauseOnDisconnect();
}
// android.media.AUDIO_BECOMING_NOISY
};
/**
* Pauses playback if PREF_PAUSE_ON_HEADSET_DISCONNECT was set to true.
*/
private void pauseIfPauseOnDisconnect() {
if (UserPreferences.isPauseOnHeadsetDisconnect() && !isCasting()) {
if (mediaPlayer.getPlayerStatus() == PlayerStatus.PLAYING) {
transientPause = true;
}
mediaPlayer.pause(!UserPreferences.isPersistNotify(), true);
}
}
/**
* @param bluetooth true if the event for unpausing came from bluetooth
*/
private void unpauseIfPauseOnDisconnect(boolean bluetooth) {
if (transientPause) {
transientPause = false;
if (!bluetooth && UserPreferences.isUnpauseOnHeadsetReconnect()) {
mediaPlayer.resume();
} else if (bluetooth && UserPreferences.isUnpauseOnBluetoothReconnect()) {
// let the user know we've started playback again...
Vibrator v = (Vibrator) getApplicationContext().getSystemService(Context.VIBRATOR_SERVICE);
if (v != null) {
v.vibrate(500);
}
mediaPlayer.resume();
}
}
}
private final BroadcastReceiver shutdownReceiver = new BroadcastReceiver() {
@Override
public void onReceive(Context context, Intent intent) {
if (TextUtils.equals(intent.getAction(), ACTION_SHUTDOWN_PLAYBACK_SERVICE)) {
EventBus.getDefault().post(new ServiceEvent(ServiceEvent.Action.SERVICE_SHUT_DOWN));
stateManager.stopService();
}
}
};
private final BroadcastReceiver skipCurrentEpisodeReceiver = new BroadcastReceiver() {
@Override
public void onReceive(Context context, Intent intent) {
if (TextUtils.equals(intent.getAction(), ACTION_SKIP_CURRENT_EPISODE)) {
Log.d(TAG, "Received SKIP_CURRENT_EPISODE intent");
mediaPlayer.skip();
}
}
};
private final BroadcastReceiver pausePlayCurrentEpisodeReceiver = new BroadcastReceiver() {
@Override
public void onReceive(Context context, Intent intent) {
if (TextUtils.equals(intent.getAction(), ACTION_PAUSE_PLAY_CURRENT_EPISODE)) {
Log.d(TAG, "Received PAUSE_PLAY_CURRENT_EPISODE intent");
mediaPlayer.pause(false, false);
}
}
};
@Subscribe(threadMode = ThreadMode.MAIN)
@SuppressWarnings("unused")
public void volumeAdaptionChanged(VolumeAdaptionChangedEvent event) {
PlaybackVolumeUpdater playbackVolumeUpdater = new PlaybackVolumeUpdater();
playbackVolumeUpdater.updateVolumeIfNecessary(mediaPlayer, event.getFeedId(), event.getVolumeAdaptionSetting());
}
@Subscribe(threadMode = ThreadMode.MAIN)
@SuppressWarnings("unused")
public void speedPresetChanged(SpeedPresetChangedEvent event) {
if (getPlayable() instanceof FeedMedia) {
if (((FeedMedia) getPlayable()).getItem().getFeed().getId() == event.getFeedId()) {
if (event.getSpeed() == SPEED_USE_GLOBAL) {
setSpeed(UserPreferences.getPlaybackSpeed(getPlayable().getMediaType()));
} else {
setSpeed(event.getSpeed());
}
}
}
}
@Subscribe(threadMode = ThreadMode.MAIN)
@SuppressWarnings("unused")
public void skipIntroEndingPresetChanged(SkipIntroEndingChangedEvent event) {
if (getPlayable() instanceof FeedMedia) {
if (((FeedMedia) getPlayable()).getItem().getFeed().getId() == event.getFeedId()) {
if (event.getSkipEnding() != 0) {
FeedPreferences feedPreferences
= ((FeedMedia) getPlayable()).getItem().getFeed().getPreferences();
feedPreferences.setFeedSkipIntro(event.getSkipIntro());
feedPreferences.setFeedSkipEnding(event.getSkipEnding());
}
}
}
}
public static MediaType getCurrentMediaType() {
return currentMediaType;
}
public static boolean isCasting() {
return isCasting;
}
public void resume() {
mediaPlayer.resume();
taskManager.restartSleepTimer();
}
public void prepare() {
mediaPlayer.prepare();
taskManager.restartSleepTimer();
}
public void pause(boolean abandonAudioFocus, boolean reinit) {
mediaPlayer.pause(abandonAudioFocus, reinit);
}
public void reinit() {
mediaPlayer.reinit();
}
public PlaybackServiceMediaPlayer.PSMPInfo getPSMPInfo() {
return mediaPlayer.getPSMPInfo();
}
public PlayerStatus getStatus() {
return mediaPlayer.getPlayerStatus();
}
public Playable getPlayable() {
return mediaPlayer.getPlayable();
}
public void setSpeed(float speed) {
mediaPlayer.setPlaybackParams(speed, UserPreferences.isSkipSilence());
}
public void skipSilence(boolean skipSilence) {
mediaPlayer.setPlaybackParams(getCurrentPlaybackSpeed(), skipSilence);
}
public void setVolume(float leftVolume, float rightVolume) {
mediaPlayer.setVolume(leftVolume, rightVolume);
}
public float getCurrentPlaybackSpeed() {
if(mediaPlayer == null) {
return 1.0f;
}
return mediaPlayer.getPlaybackSpeed();
}
public boolean canDownmix() {
return mediaPlayer.canDownmix();
}
public void setDownmix(boolean enable) {
mediaPlayer.setDownmix(enable);
}
public boolean isStartWhenPrepared() {
return mediaPlayer.isStartWhenPrepared();
}
public void setStartWhenPrepared(boolean s) {
mediaPlayer.setStartWhenPrepared(s);
}
public void seekTo(final int t) {
mediaPlayer.seekTo(t);
}
private void seekDelta(final int d) {
mediaPlayer.seekDelta(d);
}
/**
* Seek to the start of the specified chapter.
*/
public void seekToChapter(Chapter c) {
seekTo((int) c.getStart());
}
/**
* call getDuration() on mediaplayer or return INVALID_TIME if player is in
* an invalid state.
*/
public int getDuration() {
if (mediaPlayer == null) {
return INVALID_TIME;
}
return mediaPlayer.getDuration();
}
/**
* call getCurrentPosition() on mediaplayer or return INVALID_TIME if player
* is in an invalid state.
*/
public int getCurrentPosition() {
if (mediaPlayer == null) {
return INVALID_TIME;
}
return mediaPlayer.getPosition();
}
public List<String> getAudioTracks() {
if (mediaPlayer == null) {
return Collections.emptyList();
}
return mediaPlayer.getAudioTracks();
}
public int getSelectedAudioTrack() {
if (mediaPlayer == null) {
return -1;
}
return mediaPlayer.getSelectedAudioTrack();
}
public void setAudioTrack(int track) {
if (mediaPlayer != null) {
mediaPlayer.setAudioTrack(track);
}
}
public boolean isStreaming() {
return mediaPlayer.isStreaming();
}
public Pair<Integer, Integer> getVideoSize() {
return mediaPlayer.getVideoSize();
}
private void setupPositionObserver() {
if (positionEventTimer != null) {
positionEventTimer.dispose();
}
Log.d(TAG, "Setting up position observer");
positionEventTimer = Observable.interval(1, TimeUnit.SECONDS)
.observeOn(AndroidSchedulers.mainThread())
.subscribe(number -> {
EventBus.getDefault().post(new PlaybackPositionEvent(getCurrentPosition(), getDuration()));
if (Build.VERSION.SDK_INT < 29) {
notificationBuilder.updatePosition(getCurrentPosition(), getCurrentPlaybackSpeed());
NotificationManager notificationManager = (NotificationManager)
getSystemService(NOTIFICATION_SERVICE);
notificationManager.notify(R.id.notification_playing, notificationBuilder.build());
}
skipEndingIfNecessary();
});
}
private void cancelPositionObserver() {
if (positionEventTimer != null) {
positionEventTimer.dispose();
}
}
private void addPlayableToQueue(Playable playable) {
if (playable instanceof FeedMedia) {
long itemId = ((FeedMedia) playable).getItem().getId();
DBWriter.addQueueItem(this, false, false, itemId);
}
}
private final MediaSessionCompat.Callback sessionCallback = new MediaSessionCompat.Callback() {
private static final String TAG = "MediaSessionCompat";
@Override
public void onPlay() {
Log.d(TAG, "onPlay()");
PlayerStatus status = getStatus();
if (status == PlayerStatus.PAUSED || status == PlayerStatus.PREPARED) {
resume();
} else if (status == PlayerStatus.INITIALIZED) {
setStartWhenPrepared(true);
prepare();
}
}
@Override
public void onPlayFromMediaId(String mediaId, Bundle extras) {
Log.d(TAG, "onPlayFromMediaId: mediaId: " + mediaId + " extras: " + extras.toString());
FeedMedia p = DBReader.getFeedMedia(Long.parseLong(mediaId));
if (p != null) {
mediaPlayer.playMediaObject(p, !p.localFileAvailable(), true, true);
addPlayableToQueue(p);
}
}
@Override
public void onPlayFromSearch(String query, Bundle extras) {
Log.d(TAG, "onPlayFromSearch query=" + query + " extras=" + extras.toString());
List<FeedItem> results = FeedSearcher.searchFeedItems(getBaseContext(), query, 0);
if (results.size() > 0 && results.get(0).getMedia() != null) {
FeedMedia media = results.get(0).getMedia();
mediaPlayer.playMediaObject(media, !media.localFileAvailable(), true, true);
addPlayableToQueue(media);
return;
}
onPlay();
}
@Override
public void onPause() {
Log.d(TAG, "onPause()");
if (getStatus() == PlayerStatus.PLAYING) {
pause(!UserPreferences.isPersistNotify(), true);
}
}
@Override
public void onStop() {
Log.d(TAG, "onStop()");
mediaPlayer.stopPlayback(true);
}
@Override
public void onSkipToPrevious() {
Log.d(TAG, "onSkipToPrevious()");
seekDelta(-UserPreferences.getRewindSecs() * 1000);
}
@Override
public void onRewind() {
Log.d(TAG, "onRewind()");
seekDelta(-UserPreferences.getRewindSecs() * 1000);
}
@Override
public void onFastForward() {
Log.d(TAG, "onFastForward()");
seekDelta(UserPreferences.getFastForwardSecs() * 1000);
}
@Override
public void onSkipToNext() {
Log.d(TAG, "onSkipToNext()");
UiModeManager uiModeManager = (UiModeManager) getApplicationContext()
.getSystemService(Context.UI_MODE_SERVICE);
if (UserPreferences.getHardwareForwardButton() == KeyEvent.KEYCODE_MEDIA_NEXT
|| uiModeManager.getCurrentModeType() == Configuration.UI_MODE_TYPE_CAR) {
mediaPlayer.skip();
} else {
seekDelta(UserPreferences.getFastForwardSecs() * 1000);
}
}
@Override
public void onSeekTo(long pos) {
Log.d(TAG, "onSeekTo()");
seekTo((int) pos);
}
@Override
public boolean onMediaButtonEvent(final Intent mediaButton) {
Log.d(TAG, "onMediaButtonEvent(" + mediaButton + ")");
if (mediaButton != null) {
KeyEvent keyEvent = mediaButton.getParcelableExtra(Intent.EXTRA_KEY_EVENT);
if (keyEvent != null &&
keyEvent.getAction() == KeyEvent.ACTION_DOWN &&
keyEvent.getRepeatCount() == 0) {
return handleKeycode(keyEvent.getKeyCode(), false);
}
}
return false;
}
@Override
public void onCustomAction(String action, Bundle extra) {
Log.d(TAG, "onCustomAction(" + action + ")");
if (CUSTOM_ACTION_FAST_FORWARD.equals(action)) {
onFastForward();
} else if (CUSTOM_ACTION_REWIND.equals(action)) {
onRewind();
}
}
};
private final SharedPreferences.OnSharedPreferenceChangeListener prefListener =
(sharedPreferences, key) -> {
if (UserPreferences.PREF_LOCKSCREEN_BACKGROUND.equals(key)) {
updateMediaSessionMetadata(getPlayable());
} else {
flavorHelper.onSharedPreference(key);
}
};
interface FlavorHelperCallback {
PlaybackServiceMediaPlayer.PSMPCallback getMediaPlayerCallback();
void setMediaPlayer(PlaybackServiceMediaPlayer mediaPlayer);
PlaybackServiceMediaPlayer getMediaPlayer();
void setIsCasting(boolean isCasting);
void sendNotificationBroadcast(int type, int code);
void saveCurrentPosition(boolean fromMediaPlayer, Playable playable, int position);
void setupNotification(boolean connected, PlaybackServiceMediaPlayer.PSMPInfo info);
MediaSessionCompat getMediaSession();
Intent registerReceiver(BroadcastReceiver receiver, IntentFilter filter);
void unregisterReceiver(BroadcastReceiver receiver);
}
private final FlavorHelperCallback flavorHelperCallback = new FlavorHelperCallback() {
@Override
public PlaybackServiceMediaPlayer.PSMPCallback getMediaPlayerCallback() {
return PlaybackService.this.mediaPlayerCallback;
}
@Override
public void setMediaPlayer(PlaybackServiceMediaPlayer mediaPlayer) {
PlaybackService.this.mediaPlayer = mediaPlayer;
}
@Override
public PlaybackServiceMediaPlayer getMediaPlayer() {
return PlaybackService.this.mediaPlayer;
}
@Override
public void setIsCasting(boolean isCasting) {
PlaybackService.isCasting = isCasting;
stateManager.validStartCommandWasReceived();
}
@Override
public void sendNotificationBroadcast(int type, int code) {
PlaybackService.this.sendNotificationBroadcast(type, code);
}
@Override
public void saveCurrentPosition(boolean fromMediaPlayer, Playable playable, int position) {
PlaybackService.this.saveCurrentPosition(fromMediaPlayer, playable, position);
}
@Override
public void setupNotification(boolean connected, PlaybackServiceMediaPlayer.PSMPInfo info) {
if (connected) {
PlaybackService.this.setupNotification(info);
} else {
PlayerStatus status = info.playerStatus;
if ((status == PlayerStatus.PLAYING ||
status == PlayerStatus.SEEKING ||
status == PlayerStatus.PREPARING ||
UserPreferences.isPersistNotify()) &&
android.os.Build.VERSION.SDK_INT >= Build.VERSION_CODES.JELLY_BEAN) {
PlaybackService.this.setupNotification(info);
} else if (!UserPreferences.isPersistNotify()) {
stateManager.stopForeground(true);
}
}
}
@Override
public MediaSessionCompat getMediaSession() {
return PlaybackService.this.mediaSession;
}
@Override
public Intent registerReceiver(BroadcastReceiver receiver, IntentFilter filter) {
return PlaybackService.this.registerReceiver(receiver, filter);
}
@Override
public void unregisterReceiver(BroadcastReceiver receiver) {
PlaybackService.this.unregisterReceiver(receiver);
}
};
}
| 1 | 20,667 | Looks like the episode should also not be deleted when repeating | AntennaPod-AntennaPod | java |
@@ -13,6 +13,7 @@ package proxy
import (
"crypto/tls"
+ "github.com/mholt/caddy/caddyhttp/httpserver"
"io"
"net"
"net/http" | 1 | // This file is adapted from code in the net/http/httputil
// package of the Go standard library, which is by the
// Go Authors, and bears this copyright and license info:
//
// Copyright 2011 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.
//
// This file has been modified from the standard lib to
// meet the needs of the application.
package proxy
import (
"crypto/tls"
"io"
"net"
"net/http"
"net/url"
"strings"
"sync"
"time"
)
var bufferPool = sync.Pool{New: createBuffer}
func createBuffer() interface{} {
return make([]byte, 32*1024)
}
// onExitFlushLoop is a callback set by tests to detect the state of the
// flushLoop() goroutine.
var onExitFlushLoop func()
// ReverseProxy is an HTTP Handler that takes an incoming request and
// sends it to another server, proxying the response back to the
// client.
type ReverseProxy struct {
// Director must be a function which modifies
// the request into a new request to be sent
// using Transport. Its response is then copied
// back to the original client unmodified.
Director func(*http.Request)
// The transport used to perform proxy requests.
// If nil, http.DefaultTransport is used.
Transport http.RoundTripper
// FlushInterval specifies the flush interval
// to flush to the client while copying the
// response body.
// If zero, no periodic flushing is done.
FlushInterval time.Duration
}
func singleJoiningSlash(a, b string) string {
aslash := strings.HasSuffix(a, "/")
bslash := strings.HasPrefix(b, "/")
switch {
case aslash && bslash:
return a + b[1:]
case !aslash && !bslash:
return a + "/" + b
}
return a + b
}
// Though the relevant directive prefix is just "unix:", url.Parse
// will - assuming the regular URL scheme - add additional slashes
// as if "unix" was a request protocol.
// What we need is just the path, so if "unix:/var/run/www.socket"
// was the proxy directive, the parsed hostName would be
// "unix:///var/run/www.socket", hence the ambiguous trimming.
func socketDial(hostName string) func(network, addr string) (conn net.Conn, err error) {
return func(network, addr string) (conn net.Conn, err error) {
return net.Dial("unix", hostName[len("unix://"):])
}
}
// NewSingleHostReverseProxy returns a new ReverseProxy that rewrites
// URLs to the scheme, host, and base path provided in target. If the
// target's path is "/base" and the incoming request was for "/dir",
// the target request will be for /base/dir.
// Without logic: target's path is "/", incoming is "/api/messages",
// without is "/api", then the target request will be for /messages.
func NewSingleHostReverseProxy(target *url.URL, without string) *ReverseProxy {
targetQuery := target.RawQuery
director := func(req *http.Request) {
if target.Scheme == "unix" {
// to make Dial work with unix URL,
// scheme and host have to be faked
req.URL.Scheme = "http"
req.URL.Host = "socket"
} else {
req.URL.Scheme = target.Scheme
req.URL.Host = target.Host
}
req.URL.Path = singleJoiningSlash(target.Path, req.URL.Path)
if targetQuery == "" || req.URL.RawQuery == "" {
req.URL.RawQuery = targetQuery + req.URL.RawQuery
} else {
req.URL.RawQuery = targetQuery + "&" + req.URL.RawQuery
}
// Trims the path of the socket from the URL path.
// This is done because req.URL passed to your proxied service
// will have the full path of the socket file prefixed to it.
// Calling /test on a server that proxies requests to
// unix:/var/run/www.socket will thus set the requested path
// to /var/run/www.socket/test, rendering paths useless.
if target.Scheme == "unix" {
// See comment on socketDial for the trim
socketPrefix := target.String()[len("unix://"):]
req.URL.Path = strings.TrimPrefix(req.URL.Path, socketPrefix)
}
// We are then safe to remove the `without` prefix.
if without != "" {
req.URL.Path = strings.TrimPrefix(req.URL.Path, without)
}
}
rp := &ReverseProxy{Director: director, FlushInterval: 250 * time.Millisecond} // flushing good for streaming & server-sent events
if target.Scheme == "unix" {
rp.Transport = &http.Transport{
Dial: socketDial(target.String()),
}
}
return rp
}
func copyHeader(dst, src http.Header) {
for k, vv := range src {
for _, v := range vv {
dst.Add(k, v)
}
}
}
// Hop-by-hop headers. These are removed when sent to the backend.
// http://www.w3.org/Protocols/rfc2616/rfc2616-sec13.html
var hopHeaders = []string{
"Connection",
"Keep-Alive",
"Proxy-Authenticate",
"Proxy-Authorization",
"Te", // canonicalized version of "TE"
"Trailers",
"Transfer-Encoding",
"Upgrade",
}
// InsecureTransport is used to facilitate HTTPS proxying
// when it is OK for upstream to be using a bad certificate,
// since this transport skips verification.
var InsecureTransport http.RoundTripper = &http.Transport{
Proxy: http.ProxyFromEnvironment,
Dial: (&net.Dialer{
Timeout: 30 * time.Second,
KeepAlive: 30 * time.Second,
}).Dial,
TLSHandshakeTimeout: 10 * time.Second,
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
}
type respUpdateFn func(resp *http.Response)
func (p *ReverseProxy) ServeHTTP(rw http.ResponseWriter, outreq *http.Request, respUpdateFn respUpdateFn) error {
transport := p.Transport
if transport == nil {
transport = http.DefaultTransport
}
p.Director(outreq)
outreq.Proto = "HTTP/1.1"
outreq.ProtoMajor = 1
outreq.ProtoMinor = 1
outreq.Close = false
res, err := transport.RoundTrip(outreq)
if err != nil {
return err
}
if respUpdateFn != nil {
respUpdateFn(res)
}
if res.StatusCode == http.StatusSwitchingProtocols && strings.ToLower(res.Header.Get("Upgrade")) == "websocket" {
res.Body.Close()
hj, ok := rw.(http.Hijacker)
if !ok {
return nil
}
conn, _, err := hj.Hijack()
if err != nil {
return err
}
defer conn.Close()
backendConn, err := net.Dial("tcp", outreq.URL.Host)
if err != nil {
return err
}
defer backendConn.Close()
outreq.Write(backendConn)
go func() {
io.Copy(backendConn, conn) // write tcp stream to backend.
}()
io.Copy(conn, backendConn) // read tcp stream from backend.
} else {
defer res.Body.Close()
for _, h := range hopHeaders {
res.Header.Del(h)
}
copyHeader(rw.Header(), res.Header)
rw.WriteHeader(res.StatusCode)
p.copyResponse(rw, res.Body)
}
return nil
}
func (p *ReverseProxy) copyResponse(dst io.Writer, src io.Reader) {
buf := bufferPool.Get()
defer bufferPool.Put(buf)
if p.FlushInterval != 0 {
if wf, ok := dst.(writeFlusher); ok {
mlw := &maxLatencyWriter{
dst: wf,
latency: p.FlushInterval,
done: make(chan bool),
}
go mlw.flushLoop()
defer mlw.stop()
dst = mlw
}
}
io.CopyBuffer(dst, src, buf.([]byte))
}
type writeFlusher interface {
io.Writer
http.Flusher
}
type maxLatencyWriter struct {
dst writeFlusher
latency time.Duration
lk sync.Mutex // protects Write + Flush
done chan bool
}
func (m *maxLatencyWriter) Write(p []byte) (int, error) {
m.lk.Lock()
defer m.lk.Unlock()
return m.dst.Write(p)
}
func (m *maxLatencyWriter) flushLoop() {
t := time.NewTicker(m.latency)
defer t.Stop()
for {
select {
case <-m.done:
if onExitFlushLoop != nil {
onExitFlushLoop()
}
return
case <-t.C:
m.lk.Lock()
m.dst.Flush()
m.lk.Unlock()
}
}
}
func (m *maxLatencyWriter) stop() { m.done <- true }
| 1 | 8,742 | I feel like goimports would move this import line by the other non-std packages... can you double-check that? Run goimports or set up your editor to run it on save. :+1: | caddyserver-caddy | go |
@@ -60,6 +60,19 @@ module Selenium
class << self
def chrome(opts = {})
+ define_method(:options) { @capabilities[:chrome_options] ||= {} }
+ define_method("options=") { |value| @capabilities[:chrome_options] = value }
+ define_method("profile=") do |profile|
+ profile_json = profile.as_json
+ options['args'] ||= []
+ if options['args'].none? { |arg| arg =~ /user-data-dir/ }
+ options['args'] << "--user-data-dir=#{profile_json[:directory]}"
+ end
+ options['extensions'] = profile_json[:extensions]
+ end
+ alias_method :chrome_options, :options
+ alias_method :chrome_options=, :options=
+
new({
browser_name: 'chrome',
javascript_enabled: true, | 1 | # encoding: utf-8
#
# Licensed to the Software Freedom Conservancy (SFC) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The SFC licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
module Selenium
module WebDriver
module Remote
#
# Specification of the desired and/or actual capabilities of the browser that the
# server is being asked to create.
#
class Capabilities
DEFAULTS = {
browser_name: '',
version: '',
platform: :any,
javascript_enabled: false,
css_selectors_enabled: false,
takes_screenshot: false,
native_events: false,
rotatable: false,
firefox_profile: nil,
proxy: nil
}.freeze
DEFAULTS.each_key do |key|
define_method key do
@capabilities.fetch(key)
end
define_method "#{key}=" do |value|
@capabilities[key] = value
end
end
alias_method :css_selectors_enabled?, :css_selectors_enabled
alias_method :javascript_enabled?, :javascript_enabled
alias_method :native_events?, :native_events
alias_method :takes_screenshot?, :takes_screenshot
alias_method :rotatable?, :rotatable
#
# Convenience methods for the common choices.
#
class << self
def chrome(opts = {})
new({
browser_name: 'chrome',
javascript_enabled: true,
css_selectors_enabled: true
}.merge(opts))
end
def edge(opts = {})
new({
browser_name: 'MicrosoftEdge',
platform: :windows,
javascript_enabled: true,
takes_screenshot: true,
css_selectors_enabled: true
}.merge(opts))
end
def firefox(opts = {})
new({
browser_name: 'firefox',
javascript_enabled: true,
takes_screenshot: true,
css_selectors_enabled: true
}.merge(opts))
end
def htmlunit(opts = {})
new({
browser_name: 'htmlunit'
}.merge(opts))
end
def htmlunitwithjs(opts = {})
new({
browser_name: 'htmlunit',
javascript_enabled: true
}.merge(opts))
end
def internet_explorer(opts = {})
new({
browser_name: 'internet explorer',
platform: :windows,
takes_screenshot: true,
css_selectors_enabled: true,
native_events: true
}.merge(opts))
end
alias_method :ie, :internet_explorer
def phantomjs(opts = {})
new({
browser_name: 'phantomjs',
javascript_enabled: true,
takes_screenshot: true,
css_selectors_enabled: true
}.merge(opts))
end
def safari(opts = {})
new({
browser_name: 'safari',
platform: :mac,
javascript_enabled: true,
takes_screenshot: true,
css_selectors_enabled: true
}.merge(opts))
end
#
# @api private
#
def json_create(data)
data = data.dup
caps = new
caps.browser_name = data.delete('browserName')
caps.version = data.delete('version')
caps.platform = data.delete('platform').downcase.to_sym if data.key?('platform')
caps.javascript_enabled = data.delete('javascriptEnabled')
caps.css_selectors_enabled = data.delete('cssSelectorsEnabled')
caps.takes_screenshot = data.delete('takesScreenshot')
caps.native_events = data.delete('nativeEvents')
caps.rotatable = data.delete('rotatable')
caps.proxy = Proxy.json_create(data['proxy']) if data.key?('proxy') && !data['proxy'].empty?
# any remaining pairs will be added as is, with no conversion
caps.merge!(data)
caps
end
end
# @option :browser_name [String] required browser name
# @option :version [String] required browser version number
# @option :platform [Symbol] one of :any, :win, :mac, or :x
# @option :javascript_enabled [Boolean] does the driver have javascript enabled?
# @option :css_selectors_enabled [Boolean] does the driver support CSS selectors?
# @option :takes_screenshot [Boolean] can this driver take screenshots?
# @option :native_events [Boolean] does this driver use native events?
# @option :proxy [Selenium::WebDriver::Proxy, Hash] proxy configuration
#
# Firefox-specific options:
#
# @option :firefox_profile [Selenium::WebDriver::Firefox::Profile] the firefox profile to use
#
# @api public
#
def initialize(opts = {})
@capabilities = DEFAULTS.merge(opts)
self.proxy = opts.delete(:proxy)
end
#
# Allows setting arbitrary capabilities.
#
def []=(key, value)
@capabilities[key] = value
end
def [](key)
@capabilities[key]
end
def merge!(other)
if other.respond_to?(:capabilities, true) && other.capabilities.is_a?(Hash)
@capabilities.merge! other.capabilities
elsif other.is_a? Hash
@capabilities.merge! other
else
raise ArgumentError, 'argument should be a Hash or implement #capabilities'
end
end
def proxy=(proxy)
case proxy
when Hash
@capabilities[:proxy] = Proxy.new(proxy)
when Proxy, nil
@capabilities[:proxy] = proxy
else
raise TypeError, "expected Hash or #{Proxy.name}, got #{proxy.inspect}:#{proxy.class}"
end
end
# @api private
#
def as_json(*)
hash = {}
@capabilities.each do |key, value|
case key
when :platform
hash['platform'] = value.to_s.upcase
when :firefox_profile
hash['firefox_profile'] = value.as_json['zip'] if value
when :proxy
hash['proxy'] = value.as_json if value
when String, :firefox_binary
hash[key.to_s] = value
when Symbol
hash[camel_case(key.to_s)] = value
else
raise TypeError, "expected String or Symbol, got #{key.inspect}:#{key.class} / #{value.inspect}"
end
end
hash
end
def to_json(*)
JSON.generate as_json
end
def ==(other)
return false unless other.is_a? self.class
as_json == other.as_json
end
alias_method :eql?, :==
protected
attr_reader :capabilities
private
def camel_case(str)
str.gsub(/_([a-z])/) { Regexp.last_match(1).upcase }
end
end # Capabilities
end # Remote
end # WebDriver
end # Selenium
| 1 | 14,241 | `options['binary'] = WebDriver::Chrome.path` if set? | SeleniumHQ-selenium | java |
@@ -134,6 +134,9 @@ var (
DB: DB{
UseBadgerDB: false,
NumRetries: 3,
+ SQLITE3: SQLITE3{
+ SQLite3File: "./explorer.db",
+ },
},
}
| 1 | // Copyright (c) 2018 IoTeX
// This is an alpha (internal) release and is not suitable for production. This source code is provided 'as is' and no
// warranties are given as to title or non-infringement, merchantability or fitness for purpose and, to the extent
// permitted by law, all liability for your use of the code is disclaimed. This source code is governed by Apache
// License 2.0 that can be found in the LICENSE file.
package config
import (
"flag"
"os"
"time"
"github.com/pkg/errors"
uconfig "go.uber.org/config"
"github.com/iotexproject/iotex-core/address"
"github.com/iotexproject/iotex-core/crypto"
"github.com/iotexproject/iotex-core/pkg/keypair"
)
// IMPORTANT: to define a config, add a field or a new config type to the existing config types. In addition, provide
// the default value in Default var.
func init() {
flag.StringVar(&_overwritePath, "config-path", "", "Config path")
flag.StringVar(&_secretPath, "secret-path", "", "Secret path")
flag.StringVar(&_subChainPath, "sub-config-path", "", "Sub chain Config path")
}
var (
// overwritePath is the path to the config file which overwrite default values
_overwritePath string
// secretPath is the path to the config file store secret values
_secretPath string
_subChainPath string
)
const (
// DelegateType represents the delegate node type
DelegateType = "delegate"
// FullNodeType represents the full node type
FullNodeType = "full_node"
// LightweightType represents the lightweight type
LightweightType = "lightweight"
// RollDPoSScheme means randomized delegated proof of stake
RollDPoSScheme = "ROLLDPOS"
// StandaloneScheme means that the node creates a block periodically regardless of others (if there is any)
StandaloneScheme = "STANDALONE"
// NOOPScheme means that the node does not create only block
NOOPScheme = "NOOP"
)
var (
// Default is the default config
Default = Config{
NodeType: FullNodeType,
Network: Network{
Host: "127.0.0.1",
Port: 4689,
ExternalHost: "",
ExternalPort: 4689,
BootstrapNodes: make([]string, 0),
},
Chain: Chain{
ChainDBPath: "/tmp/chain.db",
TrieDBPath: "/tmp/trie.db",
ID: 1,
Address: "",
ProducerPubKey: keypair.EncodePublicKey(keypair.ZeroPublicKey),
ProducerPrivKey: keypair.EncodePrivateKey(keypair.ZeroPrivateKey),
GenesisActionsPath: "",
EmptyGenesis: false,
NumCandidates: 101,
EnableFallBackToFreshDB: false,
EnableSubChainStartInGenesis: false,
EnableGasCharge: false,
},
ActPool: ActPool{
MaxNumActsPerPool: 32000,
MaxNumActsPerAcct: 2000,
MaxNumActsToPick: 0,
},
Consensus: Consensus{
Scheme: NOOPScheme,
RollDPoS: RollDPoS{
DelegateInterval: 10 * time.Second,
ProposerInterval: 10 * time.Second,
UnmatchedEventTTL: 3 * time.Second,
UnmatchedEventInterval: 100 * time.Millisecond,
RoundStartTTL: 10 * time.Second,
AcceptProposeTTL: time.Second,
AcceptProposalEndorseTTL: time.Second,
AcceptCommitEndorseTTL: time.Second,
Delay: 5 * time.Second,
NumSubEpochs: 1,
EventChanSize: 10000,
NumDelegates: 21,
TimeBasedRotation: false,
EnableDKG: false,
},
BlockCreationInterval: 10 * time.Second,
},
BlockSync: BlockSync{
Interval: 10 * time.Second,
BufferSize: 16,
},
Dispatcher: Dispatcher{
EventChanSize: 10000,
},
Explorer: Explorer{
Enabled: false,
UseRDS: false,
Port: 14004,
TpsWindow: 10,
GasStation: GasStation{
SuggestBlockWindow: 20,
DefaultGas: 1,
Percentile: 60,
},
MaxTransferPayloadBytes: 1024,
},
Indexer: Indexer{
Enabled: false,
NodeAddr: "",
},
System: System{
HeartbeatInterval: 10 * time.Second,
HTTPProfilingPort: 0,
HTTPMetricsPort: 8080,
StartSubChainInterval: 10 * time.Second,
},
DB: DB{
UseBadgerDB: false,
NumRetries: 3,
},
}
// ErrInvalidCfg indicates the invalid config value
ErrInvalidCfg = errors.New("invalid config value")
// Validates is the collection config validation functions
Validates = []Validate{
ValidateKeyPair,
ValidateConsensusScheme,
ValidateRollDPoS,
ValidateDispatcher,
ValidateExplorer,
ValidateActPool,
ValidateChain,
}
)
// Network is the config struct for network package
type (
Network struct {
Host string `yaml:"host"`
Port int `yaml:"port"`
ExternalHost string `yaml:"externalHost"`
ExternalPort int `yaml:"externalPort"`
BootstrapNodes []string `yaml:"bootstrapNodes"`
}
// Chain is the config struct for blockchain package
Chain struct {
ChainDBPath string `yaml:"chainDBPath"`
TrieDBPath string `yaml:"trieDBPath"`
ID uint32 `yaml:"id"`
Address string `yaml:"address"`
ProducerPubKey string `yaml:"producerPubKey"`
ProducerPrivKey string `yaml:"producerPrivKey"`
GenesisActionsPath string `yaml:"genesisActionsPath"`
EmptyGenesis bool `yaml:"emptyGenesis"`
NumCandidates uint `yaml:"numCandidates"`
EnableFallBackToFreshDB bool `yaml:"enableFallbackToFreshDb"`
EnableSubChainStartInGenesis bool `yaml:"enableSubChainStartInGenesis"`
// enable gas charge for block producer
EnableGasCharge bool `yaml:"enableGasCharge"`
}
// Consensus is the config struct for consensus package
Consensus struct {
// There are three schemes that are supported
Scheme string `yaml:"scheme"`
RollDPoS RollDPoS `yaml:"rollDPoS"`
BlockCreationInterval time.Duration `yaml:"blockCreationInterval"`
}
// BlockSync is the config struct for the BlockSync
BlockSync struct {
Interval time.Duration `yaml:"interval"` // update duration
BufferSize uint64 `yaml:"bufferSize"`
}
// RollDPoS is the config struct for RollDPoS consensus package
RollDPoS struct {
DelegateInterval time.Duration `yaml:"delegateInterval"`
ProposerInterval time.Duration `yaml:"proposerInterval"`
UnmatchedEventTTL time.Duration `yaml:"unmatchedEventTTL"`
UnmatchedEventInterval time.Duration `yaml:"unmatchedEventInterval"`
RoundStartTTL time.Duration `yaml:"roundStartTTL"`
AcceptProposeTTL time.Duration `yaml:"acceptProposeTTL"`
AcceptProposalEndorseTTL time.Duration `yaml:"acceptProposalEndorseTTL"`
AcceptCommitEndorseTTL time.Duration `yaml:"acceptCommitEndorseTTL"`
Delay time.Duration `yaml:"delay"`
NumSubEpochs uint `yaml:"numSubEpochs"`
EventChanSize uint `yaml:"eventChanSize"`
NumDelegates uint `yaml:"numDelegates"`
TimeBasedRotation bool `yaml:"timeBasedRotation"`
EnableDKG bool `yaml:"enableDKG"`
}
// Dispatcher is the dispatcher config
Dispatcher struct {
EventChanSize uint `yaml:"eventChanSize"`
}
// Explorer is the explorer service config
Explorer struct {
Enabled bool `yaml:"enabled"`
IsTest bool `yaml:"isTest"`
UseRDS bool `yaml:"useRDS"`
Port int `yaml:"port"`
TpsWindow int `yaml:"tpsWindow"`
GasStation GasStation `yaml:"gasStation"`
// MaxTransferPayloadBytes limits how many bytes a playload can contain at most
MaxTransferPayloadBytes uint64 `yaml:"maxTransferPayloadBytes"`
}
// GasStation is the gas station config
GasStation struct {
SuggestBlockWindow int `yaml:"suggestBlockWindow"`
DefaultGas int `yaml:"defaultGas"`
Percentile int `yaml:"Percentile"`
}
// Indexer is the index service config
Indexer struct {
Enabled bool `yaml:"enabled"`
NodeAddr string `yaml:"nodeAddr"`
}
// System is the system config
System struct {
HeartbeatInterval time.Duration `yaml:"heartbeatInterval"`
// HTTPProfilingPort is the port number to access golang performance profiling data of a blockchain node. It is
// 0 by default, meaning performance profiling has been disabled
HTTPProfilingPort int `yaml:"httpProfilingPort"`
HTTPMetricsPort int `yaml:"httpMetricsPort"`
StartSubChainInterval time.Duration `yaml:"startSubChainInterval"`
}
// ActPool is the actpool config
ActPool struct {
// MaxNumActsPerPool indicates maximum number of actions the whole actpool can hold
MaxNumActsPerPool uint64 `yaml:"maxNumActsPerPool"`
// MaxNumActsPerAcct indicates maximum number of actions an account queue can hold
MaxNumActsPerAcct uint64 `yaml:"maxNumActsPerAcct"`
// MaxNumActsToPick indicates maximum number of actions to pick to mint a block. Default is 0, which means no
// limit on the number of actions to pick.
MaxNumActsToPick uint64 `yaml:"maxNumActsToPick"`
}
// DB is the config for database
DB struct {
DbPath string `yaml:"dbPath"`
// Use BadgerDB, otherwise use BoltDB
UseBadgerDB bool `yaml:"useBadgerDB"`
// NumRetries is the number of retries
NumRetries uint8 `yaml:"numRetries"`
// RDS is the config for rds
RDS RDS `yaml:"RDS"`
}
// RDS is the cloud rds config
RDS struct {
// AwsRDSEndpoint is the endpoint of aws rds
AwsRDSEndpoint string `yaml:"awsRDSEndpoint"`
// AwsRDSPort is the port of aws rds
AwsRDSPort uint64 `yaml:"awsRDSPort"`
// AwsRDSUser is the user to access aws rds
AwsRDSUser string `yaml:"awsRDSUser"`
// AwsPass is the pass to access aws rds
AwsPass string `yaml:"awsPass"`
// AwsDBName is the db name of aws rds
AwsDBName string `yaml:"awsDBName"`
}
// Config is the root config struct, each package's config should be put as its sub struct
Config struct {
NodeType string `yaml:"nodeType"`
Network Network `yaml:"network"`
Chain Chain `yaml:"chain"`
ActPool ActPool `yaml:"actPool"`
Consensus Consensus `yaml:"consensus"`
BlockSync BlockSync `yaml:"blockSync"`
Dispatcher Dispatcher `yaml:"dispatcher"`
Explorer Explorer `yaml:"explorer"`
Indexer Indexer `yaml:"indexer"`
System System `yaml:"system"`
DB DB `yaml:"db"`
}
// Validate is the interface of validating the config
Validate func(Config) error
)
// New creates a config instance. It first loads the default configs. If the config path is not empty, it will read from
// the file and override the default configs. By default, it will apply all validation functions. To bypass validation,
// use DoNotValidate instead.
func New(validates ...Validate) (Config, error) {
opts := make([]uconfig.YAMLOption, 0)
opts = append(opts, uconfig.Static(Default))
opts = append(opts, uconfig.Expand(os.LookupEnv))
if _overwritePath != "" {
opts = append(opts, uconfig.File(_overwritePath))
}
if _secretPath != "" {
opts = append(opts, uconfig.File(_secretPath))
}
yaml, err := uconfig.NewYAML(opts...)
if err != nil {
return Config{}, errors.Wrap(err, "failed to init config")
}
var cfg Config
if err := yaml.Get(uconfig.Root).Populate(&cfg); err != nil {
return Config{}, errors.Wrap(err, "failed to unmarshal YAML config to struct")
}
// By default, the config needs to pass all the validation
if len(validates) == 0 {
validates = Validates
}
for _, validate := range validates {
if err := validate(cfg); err != nil {
return Config{}, errors.Wrap(err, "failed to validate config")
}
}
return cfg, nil
}
// NewSub create config for sub chain.
func NewSub(validates ...Validate) (Config, error) {
if _subChainPath == "" {
return Config{}, nil
}
opts := make([]uconfig.YAMLOption, 0)
opts = append(opts, uconfig.Static(Default))
opts = append(opts, uconfig.Expand(os.LookupEnv))
opts = append(opts, uconfig.File(_subChainPath))
if _secretPath != "" {
opts = append(opts, uconfig.File(_secretPath))
}
yaml, err := uconfig.NewYAML(opts...)
if err != nil {
return Config{}, errors.Wrap(err, "failed to init config")
}
var cfg Config
if err := yaml.Get(uconfig.Root).Populate(&cfg); err != nil {
return Config{}, errors.Wrap(err, "failed to unmarshal YAML config to struct")
}
// By default, the config needs to pass all the validation
if len(validates) == 0 {
validates = Validates
}
for _, validate := range validates {
if err := validate(cfg); err != nil {
return Config{}, errors.Wrap(err, "failed to validate config")
}
}
return cfg, nil
}
// IsDelegate returns true if the node type is Delegate
func (cfg Config) IsDelegate() bool {
return cfg.NodeType == DelegateType
}
// IsFullnode returns true if the node type is Fullnode
func (cfg Config) IsFullnode() bool {
return cfg.NodeType == FullNodeType
}
// IsLightweight returns true if the node type is Lightweight
func (cfg Config) IsLightweight() bool {
return cfg.NodeType == LightweightType
}
// BlockchainAddress returns the address derived from the configured chain ID and public key
func (cfg Config) BlockchainAddress() (address.Address, error) {
pk, err := keypair.DecodePublicKey(cfg.Chain.ProducerPubKey)
if err != nil {
return nil, errors.Wrapf(err, "error when decoding public key %s", cfg.Chain.ProducerPubKey)
}
pkHash := keypair.HashPubKey(pk)
return address.New(cfg.Chain.ID, pkHash[:]), nil
}
// KeyPair returns the decoded public and private key pair
func (cfg Config) KeyPair() (keypair.PublicKey, keypair.PrivateKey, error) {
pk, err := keypair.DecodePublicKey(cfg.Chain.ProducerPubKey)
if err != nil {
return keypair.ZeroPublicKey,
keypair.ZeroPrivateKey,
errors.Wrapf(err, "error when decoding public key %s", cfg.Chain.ProducerPubKey)
}
sk, err := keypair.DecodePrivateKey(cfg.Chain.ProducerPrivKey)
if err != nil {
return keypair.ZeroPublicKey,
keypair.ZeroPrivateKey,
errors.Wrapf(err, "error when decoding private key %s", cfg.Chain.ProducerPrivKey)
}
return pk, sk, nil
}
// ValidateKeyPair validates the block producer address
func ValidateKeyPair(cfg Config) error {
priKey, err := keypair.DecodePrivateKey(cfg.Chain.ProducerPrivKey)
if err != nil {
return err
}
pubKey, err := keypair.DecodePublicKey(cfg.Chain.ProducerPubKey)
if err != nil {
return err
}
// Validate producer pubkey and prikey by signing a dummy message and verify it
validationMsg := "connecting the physical world block by block"
sig := crypto.EC283.Sign(priKey, []byte(validationMsg))
if !crypto.EC283.Verify(pubKey, []byte(validationMsg), sig) {
return errors.Wrap(ErrInvalidCfg, "block producer has unmatched pubkey and prikey")
}
return nil
}
// ValidateChain validates the chain configure
func ValidateChain(cfg Config) error {
if cfg.Chain.NumCandidates <= 0 {
return errors.Wrapf(ErrInvalidCfg, "candidate number should be greater than 0")
}
if cfg.Consensus.Scheme == RollDPoSScheme && cfg.Chain.NumCandidates < cfg.Consensus.RollDPoS.NumDelegates {
return errors.Wrapf(ErrInvalidCfg, "candidate number should be greater than or equal to delegate number")
}
return nil
}
// ValidateConsensusScheme validates the if scheme and node type match
func ValidateConsensusScheme(cfg Config) error {
switch cfg.NodeType {
case DelegateType:
case FullNodeType:
if cfg.Consensus.Scheme != NOOPScheme {
return errors.Wrap(ErrInvalidCfg, "consensus scheme of fullnode should be NOOP")
}
case LightweightType:
if cfg.Consensus.Scheme != NOOPScheme {
return errors.Wrap(ErrInvalidCfg, "consensus scheme of lightweight node should be NOOP")
}
default:
return errors.Wrapf(ErrInvalidCfg, "unknown node type %s", cfg.NodeType)
}
return nil
}
// ValidateDispatcher validates the dispatcher configs
func ValidateDispatcher(cfg Config) error {
if cfg.Dispatcher.EventChanSize <= 0 {
return errors.Wrap(ErrInvalidCfg, "dispatcher event chan size should be greater than 0")
}
return nil
}
// ValidateRollDPoS validates the roll-DPoS configs
func ValidateRollDPoS(cfg Config) error {
if cfg.Consensus.Scheme != RollDPoSScheme {
return nil
}
rollDPoS := cfg.Consensus.RollDPoS
if rollDPoS.EventChanSize <= 0 {
return errors.Wrap(ErrInvalidCfg, "roll-DPoS event chan size should be greater than 0")
}
if rollDPoS.NumDelegates <= 0 {
return errors.Wrap(ErrInvalidCfg, "roll-DPoS event delegate number should be greater than 0")
}
ttl := rollDPoS.AcceptCommitEndorseTTL + rollDPoS.AcceptProposeTTL + rollDPoS.AcceptProposalEndorseTTL
if ttl >= rollDPoS.ProposerInterval {
return errors.Wrap(ErrInvalidCfg, "roll-DPoS ttl sum is larger than proposer interval")
}
return nil
}
// ValidateExplorer validates the explorer configs
func ValidateExplorer(cfg Config) error {
if cfg.Explorer.Enabled && cfg.Explorer.TpsWindow <= 0 {
return errors.Wrap(ErrInvalidCfg, "tps window is not a positive integer when the explorer is enabled")
}
return nil
}
// ValidateActPool validates the given config
func ValidateActPool(cfg Config) error {
maxNumActPerPool := cfg.ActPool.MaxNumActsPerPool
maxNumActPerAcct := cfg.ActPool.MaxNumActsPerAcct
if maxNumActPerPool <= 0 || maxNumActPerAcct <= 0 {
return errors.Wrap(
ErrInvalidCfg,
"maximum number of actions per pool or per account cannot be zero or negative",
)
}
if maxNumActPerPool < maxNumActPerAcct {
return errors.Wrap(
ErrInvalidCfg,
"maximum number of actions per pool cannot be less than maximum number of actions per account",
)
}
return nil
}
// DoNotValidate validates the given config
func DoNotValidate(cfg Config) error { return nil }
| 1 | 14,107 | File is not `goimports`-ed (from `goimports`) | iotexproject-iotex-core | go |
@@ -19,13 +19,18 @@ namespace Microsoft.AspNetCore.Server.Kestrel.Core
/// </summary>
public class ListenOptions : IEndPointInformation, IConnectionBuilder
{
+ internal const string Http2ExperimentSwitch = "Switch.Microsoft.AspNetCore.Server.Kestrel.Experimental.Http2";
+
private FileHandleType _handleType;
+ private HttpProtocols _protocols = HttpProtocols.Http1;
+ internal bool _isHttp2Supported;
private readonly List<Func<ConnectionDelegate, ConnectionDelegate>> _components = new List<Func<ConnectionDelegate, ConnectionDelegate>>();
internal ListenOptions(IPEndPoint endPoint)
{
Type = ListenType.IPEndPoint;
IPEndPoint = endPoint;
+ AppContext.TryGetSwitch(Http2ExperimentSwitch, out _isHttp2Supported);
}
internal ListenOptions(string socketPath) | 1 | // Copyright (c) .NET Foundation. All rights reserved.
// Licensed under the Apache License, Version 2.0. See License.txt in the project root for license information.
using System;
using System.Collections.Generic;
using System.Linq;
using System.Net;
using System.Threading.Tasks;
using Microsoft.AspNetCore.Protocols;
using Microsoft.AspNetCore.Server.Kestrel.Core.Adapter.Internal;
using Microsoft.AspNetCore.Server.Kestrel.Core.Internal;
using Microsoft.AspNetCore.Server.Kestrel.Transport.Abstractions.Internal;
namespace Microsoft.AspNetCore.Server.Kestrel.Core
{
/// <summary>
/// Describes either an <see cref="IPEndPoint"/>, Unix domain socket path, or a file descriptor for an already open
/// socket that Kestrel should bind to or open.
/// </summary>
public class ListenOptions : IEndPointInformation, IConnectionBuilder
{
private FileHandleType _handleType;
private readonly List<Func<ConnectionDelegate, ConnectionDelegate>> _components = new List<Func<ConnectionDelegate, ConnectionDelegate>>();
internal ListenOptions(IPEndPoint endPoint)
{
Type = ListenType.IPEndPoint;
IPEndPoint = endPoint;
}
internal ListenOptions(string socketPath)
{
Type = ListenType.SocketPath;
SocketPath = socketPath;
}
internal ListenOptions(ulong fileHandle)
: this(fileHandle, FileHandleType.Auto)
{
}
internal ListenOptions(ulong fileHandle, FileHandleType handleType)
{
Type = ListenType.FileHandle;
FileHandle = fileHandle;
switch (handleType)
{
case FileHandleType.Auto:
case FileHandleType.Tcp:
case FileHandleType.Pipe:
_handleType = handleType;
break;
default:
throw new NotSupportedException();
}
}
/// <summary>
/// The type of interface being described: either an <see cref="IPEndPoint"/>, Unix domain socket path, or a file descriptor.
/// </summary>
public ListenType Type { get; }
public FileHandleType HandleType
{
get => _handleType;
set
{
if (value == _handleType)
{
return;
}
if (Type != ListenType.FileHandle || _handleType != FileHandleType.Auto)
{
throw new InvalidOperationException();
}
switch (value)
{
case FileHandleType.Tcp:
case FileHandleType.Pipe:
_handleType = value;
break;
default:
throw new ArgumentException(nameof(HandleType));
}
}
}
// IPEndPoint is mutable so port 0 can be updated to the bound port.
/// <summary>
/// The <see cref="IPEndPoint"/> to bind to.
/// Only set if the <see cref="ListenOptions"/> <see cref="Type"/> is <see cref="ListenType.IPEndPoint"/>.
/// </summary>
public IPEndPoint IPEndPoint { get; set; }
/// <summary>
/// The absolute path to a Unix domain socket to bind to.
/// Only set if the <see cref="ListenOptions"/> <see cref="Type"/> is <see cref="ListenType.SocketPath"/>.
/// </summary>
public string SocketPath { get; }
/// <summary>
/// A file descriptor for the socket to open.
/// Only set if the <see cref="ListenOptions"/> <see cref="Type"/> is <see cref="ListenType.FileHandle"/>.
/// </summary>
public ulong FileHandle { get; }
/// <summary>
/// Enables an <see cref="IConnectionAdapter"/> to resolve and use services registered by the application during startup.
/// Only set if accessed from the callback of a <see cref="KestrelServerOptions"/> Listen* method.
/// </summary>
public KestrelServerOptions KestrelServerOptions { get; internal set; }
/// <summary>
/// Set to false to enable Nagle's algorithm for all connections.
/// </summary>
/// <remarks>
/// Defaults to true.
/// </remarks>
public bool NoDelay { get; set; } = true;
/// <summary>
/// The protocols enabled on this endpoint.
/// </summary>
/// <remarks>Defaults to HTTP/1.x only.</remarks>
public HttpProtocols Protocols { get; set; } = HttpProtocols.Http1;
/// <summary>
/// Gets the <see cref="List{IConnectionAdapter}"/> that allows each connection <see cref="System.IO.Stream"/>
/// to be intercepted and transformed.
/// Configured by the <c>UseHttps()</c> and <see cref="Hosting.ListenOptionsConnectionLoggingExtensions.UseConnectionLogging(ListenOptions)"/>
/// extension methods.
/// </summary>
/// <remarks>
/// Defaults to empty.
/// </remarks>
public List<IConnectionAdapter> ConnectionAdapters { get; } = new List<IConnectionAdapter>();
public IServiceProvider ApplicationServices => KestrelServerOptions?.ApplicationServices;
/// <summary>
/// Gets the name of this endpoint to display on command-line when the web server starts.
/// </summary>
internal virtual string GetDisplayName()
{
var scheme = ConnectionAdapters.Any(f => f.IsHttps)
? "https"
: "http";
switch (Type)
{
case ListenType.IPEndPoint:
return $"{scheme}://{IPEndPoint}";
case ListenType.SocketPath:
return $"{scheme}://unix:{SocketPath}";
case ListenType.FileHandle:
return $"{scheme}://<file handle>";
default:
throw new InvalidOperationException();
}
}
public override string ToString() => GetDisplayName();
public IConnectionBuilder Use(Func<ConnectionDelegate, ConnectionDelegate> middleware)
{
_components.Add(middleware);
return this;
}
public ConnectionDelegate Build()
{
ConnectionDelegate app = context =>
{
return Task.CompletedTask;
};
for (int i = _components.Count - 1; i >= 0; i--)
{
var component = _components[i];
app = component(app);
}
return app;
}
internal virtual async Task BindAsync(AddressBindContext context)
{
await AddressBinder.BindEndpointAsync(this, context).ConfigureAwait(false);
context.Addresses.Add(GetDisplayName());
}
}
}
| 1 | 14,623 | Nit: Supported -> Enabled. | aspnet-KestrelHttpServer | .cs |
@@ -128,7 +128,7 @@ class ImportTest(QuiltTestCase):
# Modify an existing dataframe
csv = package1.dataframes2.csv._data()
- csv.set_value(0, 'Int0', 42)
+ csv.at[0, 'Int0'] = 42
# Add a new dataframe
df = pd.DataFrame(dict(a=[1, 2, 3])) | 1 | """
Tests for magic imports.
"""
import os
import pandas as pd
from six import string_types
from quilt.data import GroupNode, DataNode
from quilt.tools import command
from quilt.tools.const import PACKAGE_DIR_NAME
from quilt.tools.package import PackageException
from .utils import QuiltTestCase
class ImportTest(QuiltTestCase):
def test_imports(self):
mydir = os.path.dirname(__file__)
build_path = os.path.join(mydir, './build.yml')
command.build('foo/package', build_path)
# Good imports
from quilt.data.foo import package
from quilt.data.foo.package import dataframes
from quilt.data.foo.package import README
# Contents of the imports
assert isinstance(package, GroupNode)
assert isinstance(dataframes, GroupNode)
assert isinstance(dataframes.csv, DataNode)
assert isinstance(README, DataNode)
assert package.dataframes == dataframes
assert package.README == README
assert set(dataframes._keys()) == {'xls', 'csv', 'tsv'}
assert set(dataframes._group_keys()) == set()
assert set(dataframes._data_keys()) == {'xls', 'csv', 'tsv'}
assert isinstance(README(), string_types)
assert isinstance(README._data(), string_types)
assert isinstance(dataframes.csv(), pd.DataFrame)
assert isinstance(dataframes.csv._data(), pd.DataFrame)
str(package)
str(dataframes)
str(README)
# Bad attributes of imported packages
with self.assertRaises(AttributeError):
package.foo
with self.assertRaises(AttributeError):
package.dataframes.foo
with self.assertRaises(AttributeError):
package.dataframes.csv.foo
# Bad imports
with self.assertRaises(ImportError):
import quilt.data.foo.bad_package
with self.assertRaises(ImportError):
import quilt.data.bad_user.bad_package
with self.assertRaises(ImportError):
from quilt.data.foo.dataframes import blah
with self.assertRaises(ImportError):
from quilt.data.foo.baz import blah
def test_import_group_as_data(self):
mydir = os.path.dirname(__file__)
build_path = os.path.join(mydir, './build_group_data.yml')
command.build('foo/grppkg', build_path)
# Good imports
from quilt.data.foo.grppkg import dataframes
assert isinstance(dataframes, GroupNode)
assert isinstance(dataframes.csvs.csv, DataNode)
assert isinstance(dataframes._data(), pd.DataFrame)
# Incompatible Schema
from quilt.data.foo.grppkg import incompatible
with self.assertRaises(PackageException):
incompatible._data()
def test_multiple_package_dirs(self):
# First level
mydir = os.path.dirname(__file__)
build_path = os.path.join(mydir, './build_simple.yml')
command.build('foo/nested', build_path)
# Second level: different package
os.mkdir("aaa")
os.chdir("aaa")
build_path = os.path.join(mydir, './build.yml')
command.build('foo/nested', build_path)
# Third level: empty package directory
os.mkdir("bbb")
os.chdir("bbb")
os.mkdir(PACKAGE_DIR_NAME)
# Imports should find the second package
from quilt.data.foo.nested import dataframes
def test_save(self):
mydir = os.path.dirname(__file__)
build_path = os.path.join(mydir, './build.yml')
command.build('foo/package1', build_path)
from quilt.data.foo import package1
# Build an identical package
command.build('foo/package2', package1)
contents1 = open('quilt_packages/foo/package1.json').read()
contents2 = open('quilt_packages/foo/package2.json').read()
assert contents1 == contents2
# Rename an attribute
package1.dataframes2 = package1.dataframes
del package1.dataframes
# Modify an existing dataframe
csv = package1.dataframes2.csv._data()
csv.set_value(0, 'Int0', 42)
# Add a new dataframe
df = pd.DataFrame(dict(a=[1, 2, 3]))
package1._set(['new', 'df'], df)
assert package1.new.df._data() is df
# Add a new file
file_path = os.path.join(mydir, 'data/foo.csv')
package1._set(['new', 'file'], file_path)
assert package1.new.file._data() == file_path
# Add a new group
package1._add_group('newgroup')
assert isinstance(package1.newgroup, GroupNode)
package1.newgroup._add_group('foo')
assert isinstance(package1.newgroup.foo, GroupNode)
# Overwrite a leaf node
new_path = os.path.join(mydir, 'data/nuts.csv')
package1._set(['newgroup', 'foo'], new_path)
assert package1.newgroup.foo._data() == new_path
# Overwrite the whole group
package1._set(['newgroup'], new_path)
assert package1.newgroup._data() == new_path
# Built a new package and verify the new contents
command.build('foo/package3', package1)
from quilt.data.foo import package3
assert hasattr(package3, 'dataframes2')
assert not hasattr(package3, 'dataframes')
new_csv = package3.dataframes2.csv._data()
assert new_csv.xs(0)['Int0'] == 42
new_df = package3.new.df._data()
assert new_df.xs(2)['a'] == 3
new_file = package3.new.file._data()
assert isinstance(new_file, string_types)
def test_set_non_node_attr(self):
mydir = os.path.dirname(__file__)
build_path = os.path.join(mydir, './build.yml')
command.build('foo/package1', build_path)
from quilt.data.foo import package1
# Assign a DataFrame as a node
# (should throw exception)
df = pd.DataFrame(dict(a=[1, 2, 3]))
with self.assertRaises(AttributeError):
package1.newdf = df
| 1 | 15,263 | Yay! I was too lazy to fix this. | quiltdata-quilt | py |
@@ -28,8 +28,7 @@ import (
apis "github.com/openebs/maya/pkg/apis/openebs.io/v1alpha1"
clientset "github.com/openebs/maya/pkg/client/generated/clientset/versioned"
"github.com/openebs/maya/pkg/debug"
- merrors "github.com/openebs/maya/pkg/errors/v1alpha1"
- pkg_errors "github.com/pkg/errors"
+ errors "github.com/pkg/errors"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" | 1 | /*
Copyright 2018 The OpenEBS Authors.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
*/
package replicacontroller
import (
"encoding/json"
"fmt"
"os"
"reflect"
"strings"
"github.com/openebs/maya/cmd/cstor-pool-mgmt/controller/common"
"github.com/openebs/maya/cmd/cstor-pool-mgmt/volumereplica"
apis "github.com/openebs/maya/pkg/apis/openebs.io/v1alpha1"
clientset "github.com/openebs/maya/pkg/client/generated/clientset/versioned"
"github.com/openebs/maya/pkg/debug"
merrors "github.com/openebs/maya/pkg/errors/v1alpha1"
pkg_errors "github.com/pkg/errors"
corev1 "k8s.io/api/core/v1"
"k8s.io/apimachinery/pkg/api/errors"
metav1 "k8s.io/apimachinery/pkg/apis/meta/v1"
"k8s.io/apimachinery/pkg/types"
"k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/client-go/tools/cache"
"k8s.io/klog"
)
const (
v130 = "1.3.0"
)
type upgradeParams struct {
cvr *apis.CStorVolumeReplica
client clientset.Interface
}
type upgradeFunc func(u *upgradeParams) (*apis.CStorVolumeReplica, error)
var (
upgradeMap = map[string]upgradeFunc{
"1.0.0": setReplicaID,
"1.1.0": setReplicaID,
"1.2.0": setReplicaID,
}
)
// CVRPatch struct represent the struct used to patch
// the cvr object
type CVRPatch struct {
// Op defines the operation
Op string `json:"op"`
// Path defines the key path
// eg. for
// {
// "Name": "openebs"
// Category: {
// "Inclusive": "v1",
// "Rank": "A"
// }
// }
// The path of 'Inclusive' would be
// "/Name/Category/Inclusive"
Path string `json:"path"`
Value string `json:"value"`
}
// syncHandler handles CVR changes based on the provided
// operation. It reconciles desired state of CVR with the
// actual state.
//
// Finally, it updates CVR Status
func (c *CStorVolumeReplicaController) syncHandler(
key string,
operation common.QueueOperation,
) error {
cvrGot, err := c.getVolumeReplicaResource(key)
if err != nil {
return err
}
if cvrGot == nil {
return merrors.Errorf(
"failed to reconcile cvr {%s}: object not found",
key,
)
}
cvrGot, err = c.populateVersion(cvrGot)
if err != nil {
klog.Errorf("failed to add versionDetails to cvr %s:%s", cvrGot.Name, err.Error())
c.recorder.Event(
cvrGot,
corev1.EventTypeWarning,
"FailedPopulate",
fmt.Sprintf("Failed to add current version: %s", err.Error()),
)
return nil
}
cvrGot, err = c.reconcileVersion(cvrGot)
if err != nil {
klog.Errorf("failed to upgrade cvr %s:%s", cvrGot.Name, err.Error())
c.recorder.Event(
cvrGot,
corev1.EventTypeWarning,
"FailedUpgrade",
fmt.Sprintf("Failed to upgrade cvr to %s version: %s",
cvrGot.VersionDetails.Desired,
err.Error(),
),
)
cvrGot.VersionDetails.Status.SetErrorStatus(
"Failed to reconcile cvr version",
err,
)
_, err = c.clientset.OpenebsV1alpha1().
CStorVolumeReplicas(cvrGot.Namespace).Update(cvrGot)
if err != nil {
klog.Errorf("failed to update versionDetails status for cvr %s:%s", cvrGot.Name, err.Error())
}
return nil
}
status, err := c.cVREventHandler(operation, cvrGot)
if status == "" {
// TODO
// need to rethink on this logic !!
// status holds more importance than error
return nil
}
cvrGot.Status.LastUpdateTime = metav1.Now()
if cvrGot.Status.Phase != apis.CStorVolumeReplicaPhase(status) {
cvrGot.Status.LastTransitionTime = cvrGot.Status.LastUpdateTime
// set phase based on received status
cvrGot.Status.Phase = apis.CStorVolumeReplicaPhase(status)
}
// need to update cvr before returning this error
if err != nil {
if debug.EI.IsCVRUpdateErrorInjected() {
return merrors.Errorf("CVR update error via injection")
}
_, err1 := c.clientset.
OpenebsV1alpha1().
CStorVolumeReplicas(cvrGot.Namespace).
Update(cvrGot)
if err1 != nil {
return merrors.Wrapf(
err,
"failed to reconcile cvr {%s}: failed to update cvr with phase {%s}: {%s}",
key,
cvrGot.Status.Phase,
err1.Error(),
)
}
return merrors.Wrapf(err, "failed to reconcile cvr {%s}", key)
}
// Synchronize cstor volume total allocated and
// used capacity fields on CVR object.
// Any kind of sync activity should be done from here.
c.syncCvr(cvrGot)
_, err = c.clientset.
OpenebsV1alpha1().
CStorVolumeReplicas(cvrGot.Namespace).
Update(cvrGot)
if err != nil {
return merrors.Wrapf(
err,
"failed to reconcile cvr {%s}: failed to update cvr with phase {%s}",
key,
cvrGot.Status.Phase,
)
}
klog.V(4).Infof(
"cvr {%s} reconciled successfully with current phase being {%s}",
key,
cvrGot.Status.Phase,
)
return nil
}
func (c *CStorVolumeReplicaController) cVREventHandler(
operation common.QueueOperation,
cvrObj *apis.CStorVolumeReplica,
) (string, error) {
err := volumereplica.CheckValidVolumeReplica(cvrObj)
if err != nil {
c.recorder.Event(
cvrObj,
corev1.EventTypeWarning,
string(common.FailureValidate),
string(common.MessageResourceFailValidate),
)
return string(apis.CVRStatusOffline), err
}
// PoolNameHandler tries to get pool name and blocks for
// particular number of attempts.
var noOfAttempts = 2
if !common.PoolNameHandler(cvrObj, noOfAttempts) {
return string(cvrObj.Status.Phase), merrors.New("pool not found")
}
// cvr is created at zfs in the form poolname/volname
fullVolName :=
volumereplica.PoolNameFromCVR(cvrObj) + "/" +
cvrObj.Labels["cstorvolume.openebs.io/name"]
switch operation {
case common.QOpAdd:
klog.Infof(
"will process add event for cvr {%s} as volume {%s}",
cvrObj.Name,
fullVolName,
)
status, err := c.cVRAddEventHandler(cvrObj, fullVolName)
return status, err
case common.QOpDestroy:
klog.Infof(
"will process delete event for cvr {%s} as volume {%s}",
cvrObj.Name,
fullVolName,
)
err := volumereplica.DeleteVolume(fullVolName)
if err != nil {
c.recorder.Event(
cvrObj,
corev1.EventTypeWarning,
string(common.FailureDestroy),
string(common.MessageResourceFailDestroy),
)
return string(apis.CVRStatusDeletionFailed), err
}
err = c.removeFinalizer(cvrObj)
if err != nil {
c.recorder.Event(
cvrObj,
corev1.EventTypeWarning,
string(common.FailureRemoveFinalizer),
string(common.MessageResourceFailDestroy),
)
return string(apis.CVRStatusDeletionFailed), err
}
return "", nil
case common.QOpModify:
fallthrough
case common.QOpSync:
klog.V(4).Infof(
"will process sync event for cvr {%s} as volume {%s}",
cvrObj.Name,
operation,
)
if isCVRCreateStatus(cvrObj) {
return c.cVRAddEventHandler(cvrObj, fullVolName)
}
return c.getCVRStatus(cvrObj)
}
klog.Errorf(
"failed to handle event for cvr {%s}: operation {%s} not supported",
cvrObj.Name,
string(operation),
)
return string(apis.CVRStatusInvalid), nil
}
// removeFinalizer removes finalizers present in
// CVR resource
func (c *CStorVolumeReplicaController) removeFinalizer(
cvrObj *apis.CStorVolumeReplica,
) error {
cvrPatch := []CVRPatch{
CVRPatch{
Op: "remove",
Path: "/metadata/finalizers",
},
}
cvrPatchBytes, err := json.Marshal(cvrPatch)
if err != nil {
return merrors.Wrapf(
err,
"failed to remove finalizers from cvr {%s}",
cvrObj.Name,
)
}
_, err = c.clientset.
OpenebsV1alpha1().
CStorVolumeReplicas(cvrObj.Namespace).
Patch(cvrObj.Name, types.JSONPatchType, cvrPatchBytes)
if err != nil {
return merrors.Wrapf(
err,
"failed to remove finalizers from cvr {%s}",
cvrObj.Name,
)
}
klog.Infof("finalizers removed successfully from cvr {%s}", cvrObj.Name)
return nil
}
func (c *CStorVolumeReplicaController) cVRAddEventHandler(
cVR *apis.CStorVolumeReplica,
fullVolName string,
) (string, error) {
var err error
// lock is to synchronize pool and volumereplica. Until certain pool related
// operations are over, the volumereplica threads will be held.
common.SyncResources.Mux.Lock()
if common.SyncResources.IsImported {
common.SyncResources.Mux.Unlock()
// To check if volume is already imported with pool.
importedFlag := common.CheckForInitialImportedPoolVol(
common.InitialImportedPoolVol,
fullVolName,
)
if importedFlag && !IsEmptyStatus(cVR) {
klog.Infof(
"CStorVolumeReplica %v is already imported",
string(cVR.ObjectMeta.UID),
)
c.recorder.Event(
cVR,
corev1.EventTypeNormal,
string(common.SuccessImported),
string(common.MessageResourceImported),
)
// If the volume already present then get the status of replica from ZFS
// and update it with corresponding status phase. If status gives error
// then return old phase.
return getVolumeReplicaStatus(cVR, fullVolName)
}
} else {
common.SyncResources.Mux.Unlock()
}
// Below block will be useful when the only cstor-pool-mgmt gets restarted
// then it is required to cross-check whether the volume exists or not.
existingvol, _ := volumereplica.GetVolumes()
if common.CheckIfPresent(existingvol, fullVolName) {
klog.Warningf(
"CStorVolumeReplica %v is already present",
string(cVR.GetUID()),
)
c.recorder.Event(
cVR,
corev1.EventTypeWarning,
string(common.AlreadyPresent),
string(common.MessageResourceAlreadyPresent),
)
// After creating zfs datasets in zpool but update to etcd might be
// failed
if isEmptyReplicaID(cVR) {
cVR.Spec.ReplicaID, err = volumereplica.GetReplicaIDFromZFS(fullVolName)
if err != nil {
// If error happened then update with same as with existing CVR
// phase. So, in next reconciliation it will try to update with
// proper changes
return string(cVR.Status.Phase), pkg_errors.Wrapf(err,
"volume replica %s exists", cVR.Name)
}
}
// If the volume already present then get the status of replica from ZFS
// and update it with corresponding status
return getVolumeReplicaStatus(cVR, fullVolName)
}
//TODO: Follow best practice while refactor reconciliation logic
if isCVRCreateStatus(cVR) {
return c.createVolumeReplica(cVR, fullVolName)
}
return string(apis.CVRStatusOffline),
fmt.Errorf(
"VolumeReplica offline: %v, %v",
cVR.Name,
cVR.Labels["cstorvolume.openebs.io/name"],
)
}
// createVolumeReplica will do following things
// 1. If replicaID is empty and if it is new volume generate replicaID.
// 2. Trigger ZFS volume dataset create command on success get the status from
// ZFS and update it. If `ZFS command` fails then return with same status phase
// which is currently holding by CVR.
func (c *CStorVolumeReplicaController) createVolumeReplica(
cVR *apis.CStorVolumeReplica, fullVolName string) (string, error) {
// Setting quorum to true for newly creating Volumes.
var quorum = true
if IsRecreateStatus(cVR) {
klog.Infof(
"Pool is recreated hence creating the volumes by setting off the quorum property",
)
quorum = false
}
// We should generate replicaID for new volume replicas only if it doesn't has
// replica ID.
if isEmptyReplicaID(cVR) && (IsEmptyStatus(cVR) || IsInitStatus(cVR)) {
if err := volumereplica.GenerateReplicaID(cVR); err != nil {
klog.Errorf("cVR ReplicaID creation failure: %v", err.Error())
return string(cVR.Status.Phase), err
}
}
if len(cVR.Spec.ReplicaID) == 0 {
return string(cVR.Status.Phase), merrors.New("ReplicaID is not set")
}
err := volumereplica.CreateVolumeReplica(cVR, fullVolName, quorum)
if err != nil {
klog.Errorf("cVR creation failure: %v", err.Error())
c.recorder.Event(
cVR,
corev1.EventTypeWarning,
string(common.FailureCreate),
fmt.Sprintf("failed to create volume replica error: %v", err.Error()),
)
return string(cVR.Status.Phase), err
}
c.recorder.Event(
cVR,
corev1.EventTypeNormal,
string(common.SuccessCreated),
string(common.MessageResourceCreated),
)
klog.Infof(
"cVR creation successful: %v, %v",
cVR.ObjectMeta.Name,
string(cVR.GetUID()),
)
return getVolumeReplicaStatus(cVR, fullVolName)
}
// getVolumeReplicaStatus return the status of replica after executing ZFS
// stats command and return previous state and error if any error occured while
// getting the status from ZFS
func getVolumeReplicaStatus(
cVR *apis.CStorVolumeReplica, fullVolName string) (string, error) {
status, err := volumereplica.Status(fullVolName)
if err != nil {
return string(cVR.Status.Phase), err
}
return status, nil
}
// getVolumeReplicaResource returns object corresponding to the resource key
func (c *CStorVolumeReplicaController) getVolumeReplicaResource(
key string,
) (*apis.CStorVolumeReplica, error) {
// Convert the key(namespace/name) string into a distinct name
namespace, name, err := cache.SplitMetaNamespaceKey(key)
if err != nil {
runtime.HandleError(fmt.Errorf("invalid resource key: %s", key))
return nil, nil
}
cStorVolumeReplicaUpdated, err := c.clientset.OpenebsV1alpha1().
CStorVolumeReplicas(namespace).
Get(name, metav1.GetOptions{})
if err != nil {
// The cStorPool resource may no longer exist, in which case we stop
// processing.
if errors.IsNotFound(err) {
runtime.HandleError(
fmt.Errorf(
"cStorVolumeReplicaUpdated '%s' in work queue no longer exists",
key,
),
)
return nil, nil
}
return nil, err
}
return cStorVolumeReplicaUpdated, nil
}
// IsRightCStorVolumeReplica is to check if the cvr
// request is for particular pod/application.
func IsRightCStorVolumeReplica(cVR *apis.CStorVolumeReplica) bool {
if strings.TrimSpace(string(cVR.ObjectMeta.Labels["cstorpool.openebs.io/uid"])) != "" {
return os.Getenv(string(common.OpenEBSIOCStorID)) ==
string(cVR.ObjectMeta.Labels["cstorpool.openebs.io/uid"])
}
if strings.TrimSpace(string(cVR.ObjectMeta.Labels["cstorpoolinstance.openebs.io/uid"])) != "" {
return os.Getenv(string(common.OpenEBSIOCSPIID)) ==
string(cVR.ObjectMeta.Labels["cstorpoolinstance.openebs.io/uid"])
}
return false
}
// IsDestroyEvent is to check if the call is for CStorVolumeReplica destroy.
func IsDestroyEvent(cVR *apis.CStorVolumeReplica) bool {
if cVR.ObjectMeta.DeletionTimestamp != nil {
return true
}
return false
}
// IsOnlyStatusChange is to check only status change of cStorVolumeReplica object.
func IsOnlyStatusChange(oldCVR, newCVR *apis.CStorVolumeReplica) bool {
if reflect.DeepEqual(oldCVR.Spec, newCVR.Spec) &&
!reflect.DeepEqual(oldCVR.Status, newCVR.Status) {
return true
}
return false
}
// IsDeletionFailedBefore flags if status of
// cvr is CVRStatusDeletionFailed
func IsDeletionFailedBefore(cvrObj *apis.CStorVolumeReplica) bool {
return cvrObj.Status.Phase == apis.CVRStatusDeletionFailed
}
// IsOnlineStatus is to check if the status of cStorVolumeReplica object is
// Healthy.
func IsOnlineStatus(cVR *apis.CStorVolumeReplica) bool {
if string(cVR.Status.Phase) == string(apis.CVRStatusOnline) {
klog.Infof("cVR Healthy status: %v", string(cVR.ObjectMeta.UID))
return true
}
klog.Infof(
"cVR '%s': uid '%s': phase '%s': is_healthy_status: false",
string(cVR.ObjectMeta.Name),
string(cVR.ObjectMeta.UID),
cVR.Status.Phase,
)
return false
}
// IsEmptyStatus is to check if the status of cStorVolumeReplica object is empty.
func IsEmptyStatus(cVR *apis.CStorVolumeReplica) bool {
if string(cVR.Status.Phase) == string(apis.CVRStatusEmpty) {
klog.Infof("cVR empty status: %v", string(cVR.ObjectMeta.UID))
return true
}
klog.Infof(
"cVR '%s': uid '%s': phase '%s': is_empty_status: false",
string(cVR.ObjectMeta.Name),
string(cVR.ObjectMeta.UID),
cVR.Status.Phase,
)
return false
}
// IsInitStatus is to check if the status of cStorVolumeReplica object is pending.
func IsInitStatus(cVR *apis.CStorVolumeReplica) bool {
if string(cVR.Status.Phase) == string(apis.CVRStatusInit) {
klog.Infof("cVR pending: %v", string(cVR.ObjectMeta.UID))
return true
}
klog.V(4).Infof("Not pending status: %v", string(cVR.ObjectMeta.UID))
return false
}
// IsRecreateStatus is to check if the status of cStorVolumeReplica object is
// in recreated state.
func IsRecreateStatus(cVR *apis.CStorVolumeReplica) bool {
if string(cVR.Status.Phase) == string(apis.CVRStatusRecreate) {
klog.Infof("cVR Recreate: %v", string(cVR.ObjectMeta.UID))
return true
}
klog.V(4).Infof("Not Recreate status: %v", string(cVR.ObjectMeta.UID))
return false
}
// isCVRCreateStatus returns true if volume replica needs to be created else
// return false
func isCVRCreateStatus(cVR *apis.CStorVolumeReplica) bool {
cVRStatus := string(cVR.Status.Phase)
if strings.EqualFold(cVRStatus, string(apis.CVRStatusEmpty)) ||
strings.EqualFold(cVRStatus, string(apis.CVRStatusRecreate)) ||
strings.EqualFold(cVRStatus, string(apis.CVRStatusInit)) {
return true
}
return false
}
func isEmptyReplicaID(cVR *apis.CStorVolumeReplica) bool {
return cVR.Spec.ReplicaID == ""
}
// getCVRStatus is a wrapper that fetches the status of cstor volume.
func (c *CStorVolumeReplicaController) getCVRStatus(
cVR *apis.CStorVolumeReplica,
) (string, error) {
volumeName, err := volumereplica.GetVolumeName(cVR)
if err != nil {
return "", fmt.Errorf("unable to get volume name:%s", err.Error())
}
replicaStatus, err := volumereplica.Status(volumeName)
if err != nil {
// ToDO : Put error in event recorder
c.recorder.Event(
cVR,
corev1.EventTypeWarning,
string(common.FailureStatusSync),
string(common.MessageResourceFailStatusSync),
)
return "", err
}
return replicaStatus, nil
}
// syncCvr updates field on CVR object after fetching the values from zfs utility.
func (c *CStorVolumeReplicaController) syncCvr(cvr *apis.CStorVolumeReplica) {
// Get the zfs volume name corresponding to this cvr.
volumeName, err := volumereplica.GetVolumeName(cvr)
if err != nil {
klog.Errorf("Unable to sync CVR capacity: %v", err)
c.recorder.Event(
cvr,
corev1.EventTypeWarning,
string(common.FailureCapacitySync),
string(common.MessageResourceFailCapacitySync),
)
}
// Get capacity of the volume.
capacity, err := volumereplica.Capacity(volumeName)
if err != nil {
klog.Errorf("Unable to sync CVR capacity: %v", err)
c.recorder.Event(
cvr,
corev1.EventTypeWarning,
string(common.FailureCapacitySync),
string(common.MessageResourceFailCapacitySync),
)
} else {
cvr.Status.Capacity = *capacity
}
}
func (c *CStorVolumeReplicaController) reconcileVersion(cvr *apis.CStorVolumeReplica) (
*apis.CStorVolumeReplica, error,
) {
var err error
// the below code uses deep copy to have the state of object just before
// any update call is done so that on failure the last state object can be returned
if cvr.VersionDetails.Status.Current != cvr.VersionDetails.Desired {
if !apis.IsCurrentVersionValid(cvr.VersionDetails.Status.Current) {
return cvr, pkg_errors.Errorf("invalid current version %s", cvr.VersionDetails.Status.Current)
}
if !apis.IsDesiredVersionValid(cvr.VersionDetails.Desired) {
return cvr, pkg_errors.Errorf("invalid desired version %s", cvr.VersionDetails.Desired)
}
cvrObj := cvr.DeepCopy()
if cvrObj.VersionDetails.Status.State != apis.ReconcileInProgress {
cvrObj.VersionDetails.Status.SetInProgressStatus()
cvrObj, err = c.clientset.OpenebsV1alpha1().
CStorVolumeReplicas(cvrObj.Namespace).Update(cvrObj)
if err != nil {
return cvr, err
}
}
path := strings.Split(cvrObj.VersionDetails.Status.Current, "-")[0]
u := &upgradeParams{
cvr: cvrObj,
client: c.clientset,
}
// Get upgrade function for corresponding path, if path does not
// exits then no upgrade is required and funcValue will be nil.
funcValue := upgradeMap[path]
if funcValue != nil {
cvrObj, err = funcValue(u)
if err != nil {
return cvrObj, err
}
}
cvr = cvrObj.DeepCopy()
cvrObj.VersionDetails.SetSuccessStatus()
cvrObj, err = c.clientset.OpenebsV1alpha1().
CStorVolumeReplicas(cvrObj.Namespace).Update(cvrObj)
if err != nil {
return cvr, err
}
return cvrObj, nil
}
return cvr, nil
}
// populateVersion assigns VersionDetails for old cvr object
func (c *CStorVolumeReplicaController) populateVersion(cvr *apis.CStorVolumeReplica) (
*apis.CStorVolumeReplica, error,
) {
v := cvr.Labels[string(apis.OpenEBSVersionKey)]
// 1.3.0 onwards new CVR will have the field populated during creation
if v < v130 && cvr.VersionDetails.Status.Current == "" {
cvrObj := cvr.DeepCopy()
cvrObj.VersionDetails.Status.Current = v
cvrObj.VersionDetails.Desired = v
cvrObj, err := c.clientset.OpenebsV1alpha1().CStorVolumeReplicas(cvrObj.Namespace).
Update(cvrObj)
if err != nil {
return cvr, err
}
klog.Infof("Version %s added on cvr %s", v, cvrObj.Name)
return cvrObj, nil
}
return cvr, nil
}
// setReplicaID sets the replica_id if not present for old cvrs when
// they are upgraded to version 1.3.0 or above.
func setReplicaID(u *upgradeParams) (*apis.CStorVolumeReplica, error) {
cvr := u.cvr
cvrObj := cvr.DeepCopy()
err := volumereplica.GetAndUpdateReplicaID(cvrObj)
if err != nil {
return cvr, err
}
cvrObj, err = u.client.OpenebsV1alpha1().
CStorVolumeReplicas(cvrObj.Namespace).Update(cvrObj)
if err != nil {
return cvr, err
}
return cvrObj, nil
}
| 1 | 17,663 | other declaration of errors (from `typecheck`) | openebs-maya | go |
@@ -13,12 +13,15 @@ import (
)
var (
- errValueEmpty = errors.New("value must not be empty")
- errValueTooLong = errors.New("value must not exceed 255 characters")
- errValueBadFormat = errors.New("value must start with a letter and contain only lower-case letters, numbers, and hyphens")
- errValueNotAString = errors.New("value must be a string")
+ errValueEmpty = errors.New("value must not be empty")
+ errValueTooLong = errors.New("value must not exceed 255 characters")
+ errValueBadFormat = errors.New("value must start with a letter and contain only lower-case letters, numbers, and hyphens")
+ errValueNotAString = errors.New("value must be a string")
+ errInvalidGitHubRepo = errors.New("Please enter a valid GitHub repository, e.g. https://github.com/myCompany/myRepo")
)
+var githubRepoExp = regexp.MustCompile(`https:\/\/github\.com\/(?P<owner>.+)\/(?P<repo>.+)`)
+
func validateProjectName(val interface{}) error {
if err := basicNameValidation(val); err != nil {
return fmt.Errorf("project name %v is invalid: %w", val, err) | 1 | // Copyright 2019 Amazon.com, Inc. or its affiliates. All Rights Reserved.
// SPDX-License-Identifier: Apache-2.0
package cli
import (
"errors"
"fmt"
"regexp"
"strings"
"github.com/aws/amazon-ecs-cli-v2/internal/pkg/manifest"
)
var (
errValueEmpty = errors.New("value must not be empty")
errValueTooLong = errors.New("value must not exceed 255 characters")
errValueBadFormat = errors.New("value must start with a letter and contain only lower-case letters, numbers, and hyphens")
errValueNotAString = errors.New("value must be a string")
)
func validateProjectName(val interface{}) error {
if err := basicNameValidation(val); err != nil {
return fmt.Errorf("project name %v is invalid: %w", val, err)
}
return nil
}
func validateApplicationName(val interface{}) error {
if err := basicNameValidation(val); err != nil {
return fmt.Errorf("application name %v is invalid: %w", val, err)
}
return nil
}
func validateApplicationType(val interface{}) error {
appType, ok := val.(string)
if !ok {
return errValueNotAString
}
for _, validType := range manifest.AppTypes {
if appType == validType {
return nil
}
}
var prettyTypes []string
for _, validType := range manifest.AppTypes {
prettyTypes = append(prettyTypes, fmt.Sprintf(`"%s"`, validType))
}
return fmt.Errorf("invalid app type %s: must be one of %s", appType, strings.Join(prettyTypes, ", "))
}
func validateEnvironmentName(val interface{}) error {
if err := basicNameValidation(val); err != nil {
return fmt.Errorf("environment name %v is invalid: %w", val, err)
}
return nil
}
func basicNameValidation(val interface{}) error {
s, ok := val.(string)
if !ok {
return errValueNotAString
}
if s == "" {
return errValueEmpty
}
if len(s) > 255 {
return errValueTooLong
}
if !isCorrectFormat(s) {
return errValueBadFormat
}
return nil
}
func isCorrectFormat(s string) bool {
valid, err := regexp.MatchString(`^[a-z][a-z0-9\-]+$`, s)
if err != nil {
return false // bubble up error?
}
return valid
}
| 1 | 11,019 | nit: error starts with capital letter | aws-copilot-cli | go |
@@ -1616,16 +1616,7 @@ public class ImapStore extends Store {
ImapList flags = fetchList.getKeyedList("FLAGS");
if (flags != null) {
for (int i = 0, count = flags.size(); i < count; i++) {
- String flag = flags.getString(i);
- if (flag.equalsIgnoreCase("\\Deleted")) {
- message.setFlagInternal(Flag.DELETED, true);
- } else if (flag.equalsIgnoreCase("\\Answered")) {
- message.setFlagInternal(Flag.ANSWERED, true);
- } else if (flag.equalsIgnoreCase("\\Seen")) {
- message.setFlagInternal(Flag.SEEN, true);
- } else if (flag.equalsIgnoreCase("\\Flagged")) {
- message.setFlagInternal(Flag.FLAGGED, true);
- }
+ message.setFlagInternal(Flag.valueOfByRealName(flags.getString(i)), true);
}
}
} | 1 |
package com.fsck.k9.mail.store;
import java.io.BufferedInputStream;
import java.io.BufferedOutputStream;
import java.io.ByteArrayInputStream;
import java.io.IOException;
import java.io.InputStream;
import java.io.OutputStream;
import java.io.UnsupportedEncodingException;
import java.net.ConnectException;
import java.net.InetAddress;
import java.net.InetSocketAddress;
import java.net.Socket;
import java.net.SocketAddress;
import java.net.SocketException;
import java.net.URI;
import java.net.URISyntaxException;
import java.net.URLDecoder;
import java.net.URLEncoder;
import java.nio.ByteBuffer;
import java.nio.CharBuffer;
import java.nio.charset.CharacterCodingException;
import java.nio.charset.Charset;
import java.nio.charset.CharsetDecoder;
import java.nio.charset.CodingErrorAction;
import java.security.GeneralSecurityException;
import java.security.SecureRandom;
import java.security.Security;
import java.text.SimpleDateFormat;
import java.util.ArrayList;
import java.util.Arrays;
import java.util.Collections;
import java.util.Date;
import java.util.HashMap;
import java.util.HashSet;
import java.util.Iterator;
import java.util.LinkedHashSet;
import java.util.LinkedList;
import java.util.List;
import java.util.Locale;
import java.util.Map;
import java.util.Set;
import java.util.StringTokenizer;
import java.util.concurrent.ConcurrentHashMap;
import java.util.concurrent.atomic.AtomicBoolean;
import java.util.concurrent.atomic.AtomicInteger;
import javax.net.ssl.SSLContext;
import javax.net.ssl.SSLException;
import javax.net.ssl.TrustManager;
import android.content.Context;
import android.net.ConnectivityManager;
import android.net.NetworkInfo;
import android.os.PowerManager;
import android.util.Log;
import com.beetstra.jutf7.CharsetProvider;
import com.fsck.k9.Account;
import com.fsck.k9.K9;
import com.fsck.k9.R;
import com.fsck.k9.controller.MessageRetrievalListener;
import com.fsck.k9.helper.StringUtils;
import com.fsck.k9.helper.Utility;
import com.fsck.k9.helper.power.TracingPowerManager;
import com.fsck.k9.helper.power.TracingPowerManager.TracingWakeLock;
import com.fsck.k9.mail.Authentication;
import com.fsck.k9.mail.AuthenticationFailedException;
import com.fsck.k9.mail.Body;
import com.fsck.k9.mail.CertificateValidationException;
import com.fsck.k9.mail.ConnectionSecurity;
import com.fsck.k9.mail.FetchProfile;
import com.fsck.k9.mail.Flag;
import com.fsck.k9.mail.Folder;
import com.fsck.k9.mail.Message;
import com.fsck.k9.mail.MessagingException;
import com.fsck.k9.mail.Part;
import com.fsck.k9.mail.PushReceiver;
import com.fsck.k9.mail.Pusher;
import com.fsck.k9.mail.Store;
import com.fsck.k9.mail.ServerSettings;
import com.fsck.k9.mail.filter.EOLConvertingOutputStream;
import com.fsck.k9.mail.filter.FixedLengthInputStream;
import com.fsck.k9.mail.filter.PeekableInputStream;
import com.fsck.k9.mail.internet.MimeBodyPart;
import com.fsck.k9.mail.internet.MimeHeader;
import com.fsck.k9.mail.internet.MimeMessage;
import com.fsck.k9.mail.internet.MimeMultipart;
import com.fsck.k9.mail.internet.MimeUtility;
import com.fsck.k9.mail.store.ImapResponseParser.ImapList;
import com.fsck.k9.mail.store.ImapResponseParser.ImapResponse;
import com.fsck.k9.mail.store.imap.ImapUtility;
import com.fsck.k9.mail.transport.imap.ImapSettings;
import com.jcraft.jzlib.JZlib;
import com.jcraft.jzlib.ZOutputStream;
import java.util.zip.Inflater;
import java.util.zip.InflaterInputStream;
import org.apache.commons.io.IOUtils;
/**
* <pre>
* TODO Need to start keeping track of UIDVALIDITY
* TODO Need a default response handler for things like folder updates
* </pre>
*/
public class ImapStore extends Store {
public static final String STORE_TYPE = "IMAP";
public static final int CONNECTION_SECURITY_NONE = 0;
public static final int CONNECTION_SECURITY_TLS_OPTIONAL = 1;
public static final int CONNECTION_SECURITY_TLS_REQUIRED = 2;
public static final int CONNECTION_SECURITY_SSL_REQUIRED = 3;
public static final int CONNECTION_SECURITY_SSL_OPTIONAL = 4;
public enum AuthType { PLAIN, CRAM_MD5 }
private static final int IDLE_READ_TIMEOUT_INCREMENT = 5 * 60 * 1000;
private static final int IDLE_FAILURE_COUNT_LIMIT = 10;
private static int MAX_DELAY_TIME = 5 * 60 * 1000; // 5 minutes
private static int NORMAL_DELAY_TIME = 5000;
private static int FETCH_WINDOW_SIZE = 100;
private static final Flag[] PERMANENT_FLAGS = { Flag.DELETED, Flag.SEEN };
private static final String CAPABILITY_IDLE = "IDLE";
private static final String COMMAND_IDLE = "IDLE";
private static final String CAPABILITY_NAMESPACE = "NAMESPACE";
private static final String COMMAND_NAMESPACE = "NAMESPACE";
private static final String CAPABILITY_CAPABILITY = "CAPABILITY";
private static final String COMMAND_CAPABILITY = "CAPABILITY";
private static final String CAPABILITY_COMPRESS_DEFLATE = "COMPRESS=DEFLATE";
private static final String COMMAND_COMPRESS_DEFLATE = "COMPRESS DEFLATE";
private static final Message[] EMPTY_MESSAGE_ARRAY = new Message[0];
private static final String[] EMPTY_STRING_ARRAY = new String[0];
/**
* Decodes an ImapStore URI.
*
* <p>Possible forms:</p>
* <pre>
* imap://auth:user:password@server:port CONNECTION_SECURITY_NONE
* imap+tls://auth:user:password@server:port CONNECTION_SECURITY_TLS_OPTIONAL
* imap+tls+://auth:user:password@server:port CONNECTION_SECURITY_TLS_REQUIRED
* imap+ssl+://auth:user:password@server:port CONNECTION_SECURITY_SSL_REQUIRED
* imap+ssl://auth:user:password@server:port CONNECTION_SECURITY_SSL_OPTIONAL
* </pre>
*/
public static ImapStoreSettings decodeUri(String uri) {
String host;
int port;
ConnectionSecurity connectionSecurity;
String authenticationType = null;
String username = null;
String password = null;
String pathPrefix = null;
boolean autoDetectNamespace = true;
URI imapUri;
try {
imapUri = new URI(uri);
} catch (URISyntaxException use) {
throw new IllegalArgumentException("Invalid ImapStore URI", use);
}
String scheme = imapUri.getScheme();
if (scheme.equals("imap")) {
connectionSecurity = ConnectionSecurity.NONE;
port = 143;
} else if (scheme.equals("imap+tls")) {
connectionSecurity = ConnectionSecurity.STARTTLS_OPTIONAL;
port = 143;
} else if (scheme.equals("imap+tls+")) {
connectionSecurity = ConnectionSecurity.STARTTLS_REQUIRED;
port = 143;
} else if (scheme.equals("imap+ssl+")) {
connectionSecurity = ConnectionSecurity.SSL_TLS_REQUIRED;
port = 993;
} else if (scheme.equals("imap+ssl")) {
connectionSecurity = ConnectionSecurity.SSL_TLS_OPTIONAL;
port = 993;
} else {
throw new IllegalArgumentException("Unsupported protocol (" + scheme + ")");
}
host = imapUri.getHost();
if (imapUri.getPort() != -1) {
port = imapUri.getPort();
}
if (imapUri.getUserInfo() != null) {
try {
String userinfo = imapUri.getUserInfo();
String[] userInfoParts = userinfo.split(":");
if (userinfo.endsWith(":")) {
// Password is empty. This can only happen after an account was imported.
authenticationType = AuthType.valueOf(userInfoParts[0]).name();
username = URLDecoder.decode(userInfoParts[1], "UTF-8");
} else if (userInfoParts.length == 2) {
authenticationType = AuthType.PLAIN.name();
username = URLDecoder.decode(userInfoParts[0], "UTF-8");
password = URLDecoder.decode(userInfoParts[1], "UTF-8");
} else {
authenticationType = AuthType.valueOf(userInfoParts[0]).name();
username = URLDecoder.decode(userInfoParts[1], "UTF-8");
password = URLDecoder.decode(userInfoParts[2], "UTF-8");
}
} catch (UnsupportedEncodingException enc) {
// This shouldn't happen since the encoding is hardcoded to UTF-8
throw new IllegalArgumentException("Couldn't urldecode username or password.", enc);
}
}
String path = imapUri.getPath();
if (path != null && path.length() > 1) {
// Strip off the leading "/"
String cleanPath = path.substring(1);
if (cleanPath.length() >= 2 && cleanPath.charAt(1) == '|') {
autoDetectNamespace = cleanPath.charAt(0) == '1';
if (!autoDetectNamespace) {
pathPrefix = cleanPath.substring(2);
}
} else {
if (cleanPath.length() > 0) {
pathPrefix = cleanPath;
autoDetectNamespace = false;
}
}
}
return new ImapStoreSettings(host, port, connectionSecurity, authenticationType, username,
password, autoDetectNamespace, pathPrefix);
}
/**
* Creates an ImapStore URI with the supplied settings.
*
* @param server
* The {@link ServerSettings} object that holds the server settings.
*
* @return An ImapStore URI that holds the same information as the {@code server} parameter.
*
* @see Account#getStoreUri()
* @see ImapStore#decodeUri(String)
*/
public static String createUri(ServerSettings server) {
String userEnc;
String passwordEnc;
try {
userEnc = URLEncoder.encode(server.username, "UTF-8");
passwordEnc = (server.password != null) ?
URLEncoder.encode(server.password, "UTF-8") : "";
}
catch (UnsupportedEncodingException e) {
throw new IllegalArgumentException("Could not encode username or password", e);
}
String scheme;
switch (server.connectionSecurity) {
case SSL_TLS_OPTIONAL:
scheme = "imap+ssl";
break;
case SSL_TLS_REQUIRED:
scheme = "imap+ssl+";
break;
case STARTTLS_OPTIONAL:
scheme = "imap+tls";
break;
case STARTTLS_REQUIRED:
scheme = "imap+tls+";
break;
default:
case NONE:
scheme = "imap";
break;
}
AuthType authType;
try {
authType = AuthType.valueOf(server.authenticationType);
} catch (Exception e) {
throw new IllegalArgumentException("Invalid authentication type: " +
server.authenticationType);
}
String userInfo = authType.toString() + ":" + userEnc + ":" + passwordEnc;
try {
Map<String, String> extra = server.getExtra();
String path = null;
if (extra != null) {
boolean autoDetectNamespace = Boolean.TRUE.toString().equals(
extra.get(ImapStoreSettings.AUTODETECT_NAMESPACE_KEY));
String pathPrefix = (autoDetectNamespace) ?
null : extra.get(ImapStoreSettings.PATH_PREFIX_KEY);
path = "/" + (autoDetectNamespace ? "1" : "0") + "|" +
((pathPrefix == null) ? "" : pathPrefix);
} else {
path = "/1|";
}
return new URI(scheme, userInfo, server.host, server.port,
path,
null, null).toString();
} catch (URISyntaxException e) {
throw new IllegalArgumentException("Can't create ImapStore URI", e);
}
}
/**
* This class is used to store the decoded contents of an ImapStore URI.
*
* @see ImapStore#decodeUri(String)
*/
public static class ImapStoreSettings extends ServerSettings {
public static final String AUTODETECT_NAMESPACE_KEY = "autoDetectNamespace";
public static final String PATH_PREFIX_KEY = "pathPrefix";
public final boolean autoDetectNamespace;
public final String pathPrefix;
protected ImapStoreSettings(String host, int port, ConnectionSecurity connectionSecurity,
String authenticationType, String username, String password,
boolean autodetectNamespace, String pathPrefix) {
super(STORE_TYPE, host, port, connectionSecurity, authenticationType, username,
password);
this.autoDetectNamespace = autodetectNamespace;
this.pathPrefix = pathPrefix;
}
@Override
public Map<String, String> getExtra() {
Map<String, String> extra = new HashMap<String, String>();
extra.put(AUTODETECT_NAMESPACE_KEY, Boolean.valueOf(autoDetectNamespace).toString());
putIfNotNull(extra, PATH_PREFIX_KEY, pathPrefix);
return extra;
}
@Override
public ServerSettings newPassword(String newPassword) {
return new ImapStoreSettings(host, port, connectionSecurity, authenticationType,
username, newPassword, autoDetectNamespace, pathPrefix);
}
}
private String mHost;
private int mPort;
private String mUsername;
private String mPassword;
private int mConnectionSecurity;
private AuthType mAuthType;
private volatile String mPathPrefix;
private volatile String mCombinedPrefix = null;
private volatile String mPathDelimeter = null;
public class StoreImapSettings implements ImapSettings {
@Override
public String getHost() {
return mHost;
}
@Override
public int getPort() {
return mPort;
}
@Override
public int getConnectionSecurity() {
return mConnectionSecurity;
}
@Override
public AuthType getAuthType() {
return mAuthType;
}
@Override
public String getUsername() {
return mUsername;
}
@Override
public String getPassword() {
return mPassword;
}
@Override
public boolean useCompression(final int type) {
return mAccount.useCompression(type);
}
@Override
public String getPathPrefix() {
return mPathPrefix;
}
@Override
public void setPathPrefix(String prefix) {
mPathPrefix = prefix;
}
@Override
public String getPathDelimeter() {
return mPathDelimeter;
}
@Override
public void setPathDelimeter(String delimeter) {
mPathDelimeter = delimeter;
}
@Override
public String getCombinedPrefix() {
return mCombinedPrefix;
}
@Override
public void setCombinedPrefix(String prefix) {
mCombinedPrefix = prefix;
}
}
private static final SimpleDateFormat RFC3501_DATE = new SimpleDateFormat("dd-MMM-yyyy", Locale.US);
private LinkedList<ImapConnection> mConnections =
new LinkedList<ImapConnection>();
/**
* Charset used for converting folder names to and from UTF-7 as defined by RFC 3501.
*/
private Charset mModifiedUtf7Charset;
/**
* Cache of ImapFolder objects. ImapFolders are attached to a given folder on the server
* and as long as their associated connection remains open they are reusable between
* requests. This cache lets us make sure we always reuse, if possible, for a given
* folder name.
*/
private HashMap<String, ImapFolder> mFolderCache = new HashMap<String, ImapFolder>();
public ImapStore(Account account) throws MessagingException {
super(account);
ImapStoreSettings settings;
try {
settings = decodeUri(mAccount.getStoreUri());
} catch (IllegalArgumentException e) {
throw new MessagingException("Error while decoding store URI", e);
}
mHost = settings.host;
mPort = settings.port;
switch (settings.connectionSecurity) {
case NONE:
mConnectionSecurity = CONNECTION_SECURITY_NONE;
break;
case STARTTLS_OPTIONAL:
mConnectionSecurity = CONNECTION_SECURITY_TLS_OPTIONAL;
break;
case STARTTLS_REQUIRED:
mConnectionSecurity = CONNECTION_SECURITY_TLS_REQUIRED;
break;
case SSL_TLS_OPTIONAL:
mConnectionSecurity = CONNECTION_SECURITY_SSL_OPTIONAL;
break;
case SSL_TLS_REQUIRED:
mConnectionSecurity = CONNECTION_SECURITY_SSL_REQUIRED;
break;
}
mAuthType = AuthType.valueOf(settings.authenticationType);
mUsername = settings.username;
mPassword = settings.password;
// Make extra sure mPathPrefix is null if "auto-detect namespace" is configured
mPathPrefix = (settings.autoDetectNamespace) ? null : settings.pathPrefix;
mModifiedUtf7Charset = new CharsetProvider().charsetForName("X-RFC-3501");
}
@Override
public Folder getFolder(String name) {
ImapFolder folder;
synchronized (mFolderCache) {
folder = mFolderCache.get(name);
if (folder == null) {
folder = new ImapFolder(this, name);
mFolderCache.put(name, folder);
}
}
return folder;
}
private String getCombinedPrefix() {
if (mCombinedPrefix == null) {
if (mPathPrefix != null) {
String tmpPrefix = mPathPrefix.trim();
String tmpDelim = (mPathDelimeter != null ? mPathDelimeter.trim() : "");
if (tmpPrefix.endsWith(tmpDelim)) {
mCombinedPrefix = tmpPrefix;
} else if (tmpPrefix.length() > 0) {
mCombinedPrefix = tmpPrefix + tmpDelim;
} else {
mCombinedPrefix = "";
}
} else {
mCombinedPrefix = "";
}
}
return mCombinedPrefix;
}
@Override
public List <? extends Folder > getPersonalNamespaces(boolean forceListAll) throws MessagingException {
ImapConnection connection = getConnection();
try {
List <? extends Folder > allFolders = listFolders(connection, false);
if (forceListAll || !mAccount.subscribedFoldersOnly()) {
return allFolders;
} else {
List<Folder> resultFolders = new LinkedList<Folder>();
HashSet<String> subscribedFolderNames = new HashSet<String>();
List <? extends Folder > subscribedFolders = listFolders(connection, true);
for (Folder subscribedFolder : subscribedFolders) {
subscribedFolderNames.add(subscribedFolder.getName());
}
for (Folder folder : allFolders) {
if (subscribedFolderNames.contains(folder.getName())) {
resultFolders.add(folder);
}
}
return resultFolders;
}
} catch (IOException ioe) {
connection.close();
throw new MessagingException("Unable to get folder list.", ioe);
} catch (MessagingException me) {
connection.close();
throw new MessagingException("Unable to get folder list.", me);
} finally {
releaseConnection(connection);
}
}
private List <? extends Folder > listFolders(ImapConnection connection, boolean LSUB) throws IOException, MessagingException {
String commandResponse = LSUB ? "LSUB" : "LIST";
LinkedList<Folder> folders = new LinkedList<Folder>();
List<ImapResponse> responses =
connection.executeSimpleCommand(String.format("%s \"\" %s", commandResponse,
encodeString(getCombinedPrefix() + "*")));
for (ImapResponse response : responses) {
if (ImapResponseParser.equalsIgnoreCase(response.get(0), commandResponse)) {
boolean includeFolder = true;
String decodedFolderName;
try {
decodedFolderName = decodeFolderName(response.getString(3));
} catch (CharacterCodingException e) {
Log.w(K9.LOG_TAG, "Folder name not correctly encoded with the UTF-7 variant " +
"as defined by RFC 3501: " + response.getString(3), e);
//TODO: Use the raw name returned by the server for all commands that require
// a folder name. Use the decoded name only for showing it to the user.
// We currently just skip folders with malformed names.
continue;
}
String folder = decodedFolderName;
if (mPathDelimeter == null) {
mPathDelimeter = response.getString(2);
mCombinedPrefix = null;
}
if (folder.equalsIgnoreCase(mAccount.getInboxFolderName())) {
continue;
} else if (folder.equals(mAccount.getOutboxFolderName())) {
/*
* There is a folder on the server with the same name as our local
* outbox. Until we have a good plan to deal with this situation
* we simply ignore the folder on the server.
*/
continue;
} else {
int prefixLength = getCombinedPrefix().length();
if (prefixLength > 0) {
// Strip prefix from the folder name
if (folder.length() >= prefixLength) {
folder = folder.substring(prefixLength);
}
if (!decodedFolderName.equalsIgnoreCase(getCombinedPrefix() + folder)) {
includeFolder = false;
}
}
}
ImapList attributes = response.getList(1);
for (int i = 0, count = attributes.size(); i < count; i++) {
String attribute = attributes.getString(i);
if (attribute.equalsIgnoreCase("\\NoSelect")) {
includeFolder = false;
}
}
if (includeFolder) {
folders.add(getFolder(folder));
}
}
}
folders.add(getFolder(mAccount.getInboxFolderName()));
return folders;
}
/**
* Attempt to auto-configure folders by attributes if the server advertises that capability.
*
* The parsing here is essentially the same as
* {@link #listFolders(com.fsck.k9.mail.store.ImapStore.ImapConnection, boolean)}; we should try to consolidate
* this at some point. :(
* @param connection IMAP Connection
* @throws IOException uh oh!
* @throws MessagingException uh oh!
*/
private void autoconfigureFolders(final ImapConnection connection) throws IOException, MessagingException {
String commandResponse = null;
String commandOptions = "";
if (connection.capabilities.contains("XLIST")) {
if (K9.DEBUG) Log.d(K9.LOG_TAG, "Folder auto-configuration: Using XLIST.");
commandResponse = "XLIST";
} else if(connection.capabilities.contains("SPECIAL-USE")) {
if (K9.DEBUG) Log.d(K9.LOG_TAG, "Folder auto-configuration: Using RFC6154/SPECIAL-USE.");
commandResponse = "LIST";
commandOptions = " (SPECIAL-USE)";
} else {
if (K9.DEBUG) Log.d(K9.LOG_TAG, "No detected folder auto-configuration methods.");
return;
}
final List<ImapResponse> responses =
connection.executeSimpleCommand(String.format("%s%s \"\" %s", commandResponse, commandOptions,
encodeString(getCombinedPrefix() + "*")));
for (ImapResponse response : responses) {
if (ImapResponseParser.equalsIgnoreCase(response.get(0), commandResponse)) {
String decodedFolderName;
try {
decodedFolderName = decodeFolderName(response.getString(3));
} catch (CharacterCodingException e) {
Log.w(K9.LOG_TAG, "Folder name not correctly encoded with the UTF-7 variant " +
"as defined by RFC 3501: " + response.getString(3), e);
// We currently just skip folders with malformed names.
continue;
}
if (mPathDelimeter == null) {
mPathDelimeter = response.getString(2);
mCombinedPrefix = null;
}
ImapList attributes = response.getList(1);
for (int i = 0, count = attributes.size(); i < count; i++) {
String attribute = attributes.getString(i);
if (attribute.equals("\\Drafts")) {
mAccount.setDraftsFolderName(decodedFolderName);
if (K9.DEBUG) Log.d(K9.LOG_TAG, "Folder auto-configuration detected draft folder: " + decodedFolderName);
} else if (attribute.equals("\\Sent")) {
mAccount.setSentFolderName(decodedFolderName);
if (K9.DEBUG) Log.d(K9.LOG_TAG, "Folder auto-configuration detected sent folder: " + decodedFolderName);
} else if (attribute.equals("\\Spam")) {
mAccount.setSpamFolderName(decodedFolderName);
if (K9.DEBUG) Log.d(K9.LOG_TAG, "Folder auto-configuration detected spam folder: " + decodedFolderName);
} else if (attribute.equals("\\Trash")) {
mAccount.setTrashFolderName(decodedFolderName);
if (K9.DEBUG) Log.d(K9.LOG_TAG, "Folder auto-configuration detected trash folder: " + decodedFolderName);
}
}
}
}
}
@Override
public void checkSettings() throws MessagingException {
try {
ImapConnection connection = new ImapConnection(new StoreImapSettings());
connection.open();
autoconfigureFolders(connection);
connection.close();
} catch (IOException ioe) {
throw new MessagingException(K9.app.getString(R.string.error_unable_to_connect), ioe);
}
}
/**
* Gets a connection if one is available for reuse, or creates a new one if not.
* @return
*/
private ImapConnection getConnection() throws MessagingException {
synchronized (mConnections) {
ImapConnection connection = null;
while ((connection = mConnections.poll()) != null) {
try {
connection.executeSimpleCommand("NOOP");
break;
} catch (IOException ioe) {
connection.close();
}
}
if (connection == null) {
connection = new ImapConnection(new StoreImapSettings());
}
return connection;
}
}
private void releaseConnection(ImapConnection connection) {
if (connection != null && connection.isOpen()) {
synchronized (mConnections) {
mConnections.offer(connection);
}
}
}
/**
* Encode a string to be able to use it in an IMAP command.
*
* "A quoted string is a sequence of zero or more 7-bit characters,
* excluding CR and LF, with double quote (<">) characters at each
* end." - Section 4.3, RFC 3501
*
* Double quotes and backslash are escaped by prepending a backslash.
*
* @param str
* The input string (only 7-bit characters allowed).
* @return
* The string encoded as quoted (IMAP) string.
*/
private static String encodeString(String str) {
return "\"" + str.replace("\\", "\\\\").replace("\"", "\\\"") + "\"";
}
private String encodeFolderName(String name) {
try {
ByteBuffer bb = mModifiedUtf7Charset.encode(name);
byte[] b = new byte[bb.limit()];
bb.get(b);
return new String(b, "US-ASCII");
} catch (UnsupportedEncodingException uee) {
/*
* The only thing that can throw this is getBytes("US-ASCII") and if US-ASCII doesn't
* exist we're totally screwed.
*/
throw new RuntimeException("Unable to encode folder name: " + name, uee);
}
}
private String decodeFolderName(String name) throws CharacterCodingException {
/*
* Convert the encoded name to US-ASCII, then pass it through the modified UTF-7
* decoder and return the Unicode String.
*/
try {
// Make sure the decoder throws an exception if it encounters an invalid encoding.
CharsetDecoder decoder = mModifiedUtf7Charset.newDecoder().onMalformedInput(CodingErrorAction.REPORT);
CharBuffer cb = decoder.decode(ByteBuffer.wrap(name.getBytes("US-ASCII")));
return cb.toString();
} catch (UnsupportedEncodingException uee) {
/*
* The only thing that can throw this is getBytes("US-ASCII") and if US-ASCII doesn't
* exist we're totally screwed.
*/
throw new RuntimeException("Unable to decode folder name: " + name, uee);
}
}
@Override
public boolean isMoveCapable() {
return true;
}
@Override
public boolean isCopyCapable() {
return true;
}
@Override
public boolean isPushCapable() {
return true;
}
@Override
public boolean isExpungeCapable() {
return true;
}
class ImapFolder extends Folder {
private String mName;
protected volatile int mMessageCount = -1;
protected volatile int uidNext = -1;
protected volatile ImapConnection mConnection;
private OpenMode mMode;
private volatile boolean mExists;
private ImapStore store = null;
Map<Integer, String> msgSeqUidMap = new ConcurrentHashMap<Integer, String>();
public ImapFolder(ImapStore nStore, String name) {
super(nStore.getAccount());
store = nStore;
this.mName = name;
}
public String getPrefixedName() throws MessagingException {
String prefixedName = "";
if (!mAccount.getInboxFolderName().equalsIgnoreCase(mName)) {
ImapConnection connection = null;
synchronized (this) {
if (mConnection == null) {
connection = getConnection();
} else {
connection = mConnection;
}
}
try {
connection.open();
} catch (IOException ioe) {
throw new MessagingException("Unable to get IMAP prefix", ioe);
} finally {
if (mConnection == null) {
releaseConnection(connection);
}
}
prefixedName = getCombinedPrefix();
}
prefixedName += mName;
return prefixedName;
}
protected List<ImapResponse> executeSimpleCommand(String command) throws MessagingException, IOException {
return handleUntaggedResponses(mConnection.executeSimpleCommand(command));
}
protected List<ImapResponse> executeSimpleCommand(String command, boolean sensitve, UntaggedHandler untaggedHandler) throws MessagingException, IOException {
return handleUntaggedResponses(mConnection.executeSimpleCommand(command, sensitve, untaggedHandler));
}
@Override
public void open(OpenMode mode) throws MessagingException {
internalOpen(mode);
if (mMessageCount == -1) {
throw new MessagingException(
"Did not find message count during open");
}
}
public List<ImapResponse> internalOpen(OpenMode mode) throws MessagingException {
if (isOpen() && mMode == mode) {
// Make sure the connection is valid. If it's not we'll close it down and continue
// on to get a new one.
try {
List<ImapResponse> responses = executeSimpleCommand("NOOP");
return responses;
} catch (IOException ioe) {
ioExceptionHandler(mConnection, ioe);
}
}
releaseConnection(mConnection);
synchronized (this) {
mConnection = getConnection();
}
// * FLAGS (\Answered \Flagged \Deleted \Seen \Draft NonJunk
// $MDNSent)
// * OK [PERMANENTFLAGS (\Answered \Flagged \Deleted \Seen \Draft
// NonJunk $MDNSent \*)] Flags permitted.
// * 23 EXISTS
// * 0 RECENT
// * OK [UIDVALIDITY 1125022061] UIDs valid
// * OK [UIDNEXT 57576] Predicted next UID
// 2 OK [READ-WRITE] Select completed.
try {
msgSeqUidMap.clear();
String command = String.format("%s %s", mode == OpenMode.READ_WRITE ? "SELECT" : "EXAMINE",
encodeString(encodeFolderName(getPrefixedName())));
List<ImapResponse> responses = executeSimpleCommand(command);
/*
* If the command succeeds we expect the folder has been opened read-write
* unless we are notified otherwise in the responses.
*/
mMode = mode;
for (ImapResponse response : responses) {
if (response.mTag != null && response.size() >= 2) {
Object bracketedObj = response.get(1);
if (bracketedObj instanceof ImapList) {
ImapList bracketed = (ImapList)bracketedObj;
if (!bracketed.isEmpty()) {
Object keyObj = bracketed.get(0);
if (keyObj instanceof String) {
String key = (String)keyObj;
if ("READ-ONLY".equalsIgnoreCase(key)) {
mMode = OpenMode.READ_ONLY;
} else if ("READ-WRITE".equalsIgnoreCase(key)) {
mMode = OpenMode.READ_WRITE;
}
}
}
}
}
}
mExists = true;
return responses;
} catch (IOException ioe) {
throw ioExceptionHandler(mConnection, ioe);
} catch (MessagingException me) {
Log.e(K9.LOG_TAG, "Unable to open connection for " + getLogId(), me);
throw me;
}
}
@Override
public boolean isOpen() {
return mConnection != null;
}
@Override
public OpenMode getMode() {
return mMode;
}
@Override
public void close() {
if (mMessageCount != -1) {
mMessageCount = -1;
}
if (!isOpen()) {
return;
}
synchronized (this) {
releaseConnection(mConnection);
mConnection = null;
}
}
@Override
public String getName() {
return mName;
}
/**
* Check if a given folder exists on the server.
*
* @param folderName
* The name of the folder encoded as quoted string.
* See {@link ImapStore#encodeString}
*
* @return
* {@code True}, if the folder exists. {@code False}, otherwise.
*/
private boolean exists(String folderName) throws MessagingException {
try {
// Since we don't care about RECENT, we'll use that for the check, because we're checking
// a folder other than ourself, and don't want any untagged responses to cause a change
// in our own fields
mConnection.executeSimpleCommand(String.format("STATUS %s (RECENT)", folderName));
return true;
} catch (IOException ioe) {
throw ioExceptionHandler(mConnection, ioe);
} catch (ImapException ie) {
// We got a response, but it was not "OK"
return false;
}
}
@Override
public boolean exists() throws MessagingException {
if (mExists) {
return true;
}
/*
* This method needs to operate in the unselected mode as well as the selected mode
* so we must get the connection ourselves if it's not there. We are specifically
* not calling checkOpen() since we don't care if the folder is open.
*/
ImapConnection connection = null;
synchronized (this) {
if (mConnection == null) {
connection = getConnection();
} else {
connection = mConnection;
}
}
try {
connection.executeSimpleCommand(String.format("STATUS %s (UIDVALIDITY)",
encodeString(encodeFolderName(getPrefixedName()))));
mExists = true;
return true;
} catch (ImapException ie) {
// We got a response, but it was not "OK"
return false;
} catch (IOException ioe) {
throw ioExceptionHandler(connection, ioe);
} finally {
if (mConnection == null) {
releaseConnection(connection);
}
}
}
@Override
public boolean create(FolderType type) throws MessagingException {
/*
* This method needs to operate in the unselected mode as well as the selected mode
* so we must get the connection ourselves if it's not there. We are specifically
* not calling checkOpen() since we don't care if the folder is open.
*/
ImapConnection connection = null;
synchronized (this) {
if (mConnection == null) {
connection = getConnection();
} else {
connection = mConnection;
}
}
try {
connection.executeSimpleCommand(String.format("CREATE %s",
encodeString(encodeFolderName(getPrefixedName()))));
return true;
} catch (ImapException ie) {
// We got a response, but it was not "OK"
return false;
} catch (IOException ioe) {
throw ioExceptionHandler(mConnection, ioe);
} finally {
if (mConnection == null) {
releaseConnection(connection);
}
}
}
/**
* Copies the given messages to the specified folder.
*
* <p>
* <strong>Note:</strong>
* Only the UIDs of the given {@link Message} instances are used. It is assumed that all
* UIDs represent valid messages in this folder.
* </p>
*
* @param messages
* The messages to copy to the specfied folder.
* @param folder
* The name of the target folder.
*
* @return The mapping of original message UIDs to the new server UIDs.
*/
@Override
public Map<String, String> copyMessages(Message[] messages, Folder folder)
throws MessagingException {
if (!(folder instanceof ImapFolder)) {
throw new MessagingException("ImapFolder.copyMessages passed non-ImapFolder");
}
if (messages.length == 0) {
return null;
}
ImapFolder iFolder = (ImapFolder)folder;
checkOpen();
String[] uids = new String[messages.length];
for (int i = 0, count = messages.length; i < count; i++) {
uids[i] = messages[i].getUid();
}
try {
String remoteDestName = encodeString(encodeFolderName(iFolder.getPrefixedName()));
//TODO: Split this into multiple commands if the command exceeds a certain length.
mConnection.sendCommand(String.format("UID COPY %s %s",
Utility.combine(uids, ','),
remoteDestName), false);
ImapResponse response;
do {
response = mConnection.readResponse();
handleUntaggedResponse(response);
} while (response.mTag == null);
Map<String, String> uidMap = null;
if (response.size() > 1) {
/*
* If the server supports UIDPLUS, then along with the COPY response it will
* return an COPYUID response code, e.g.
*
* 24 OK [COPYUID 38505 304,319:320 3956:3958] Success
*
* COPYUID is followed by UIDVALIDITY, the set of UIDs of copied messages from
* the source folder and the set of corresponding UIDs assigned to them in the
* destination folder.
*
* We can use the new UIDs included in this response to update our records.
*/
Object responseList = response.get(1);
if (responseList instanceof ImapList) {
final ImapList copyList = (ImapList) responseList;
if (copyList.size() >= 4 && copyList.getString(0).equals("COPYUID")) {
List<String> srcUids = ImapUtility.getImapSequenceValues(
copyList.getString(2));
List<String> destUids = ImapUtility.getImapSequenceValues(
copyList.getString(3));
if (srcUids != null && destUids != null) {
if (srcUids.size() == destUids.size()) {
Iterator<String> srcUidsIterator = srcUids.iterator();
Iterator<String> destUidsIterator = destUids.iterator();
uidMap = new HashMap<String, String>();
while (srcUidsIterator.hasNext() &&
destUidsIterator.hasNext()) {
String srcUid = srcUidsIterator.next();
String destUid = destUidsIterator.next();
uidMap.put(srcUid, destUid);
}
} else {
if (K9.DEBUG) {
Log.v(K9.LOG_TAG, "Parse error: size of source UIDs " +
"list is not the same as size of destination " +
"UIDs list.");
}
}
} else {
if (K9.DEBUG) {
Log.v(K9.LOG_TAG, "Parsing of the sequence set failed.");
}
}
}
}
}
return uidMap;
} catch (IOException ioe) {
throw ioExceptionHandler(mConnection, ioe);
}
}
@Override
public Map<String, String> moveMessages(Message[] messages, Folder folder) throws MessagingException {
if (messages.length == 0)
return null;
Map<String, String> uidMap = copyMessages(messages, folder);
setFlags(messages, new Flag[] { Flag.DELETED }, true);
return uidMap;
}
@Override
public void delete(Message[] messages, String trashFolderName) throws MessagingException {
if (messages.length == 0)
return;
if (trashFolderName == null || getName().equalsIgnoreCase(trashFolderName)) {
setFlags(messages, new Flag[] { Flag.DELETED }, true);
} else {
ImapFolder remoteTrashFolder = (ImapFolder)getStore().getFolder(trashFolderName);
String remoteTrashName = encodeString(encodeFolderName(remoteTrashFolder.getPrefixedName()));
if (!exists(remoteTrashName)) {
/*
* If the remote trash folder doesn't exist we try to create it.
*/
if (K9.DEBUG)
Log.i(K9.LOG_TAG, "IMAPMessage.delete: attempting to create remote '" + trashFolderName + "' folder for " + getLogId());
remoteTrashFolder.create(FolderType.HOLDS_MESSAGES);
}
if (exists(remoteTrashName)) {
if (K9.DEBUG)
Log.d(K9.LOG_TAG, "IMAPMessage.delete: copying remote " + messages.length + " messages to '" + trashFolderName + "' for " + getLogId());
moveMessages(messages, remoteTrashFolder);
} else {
throw new MessagingException("IMAPMessage.delete: remote Trash folder " + trashFolderName + " does not exist and could not be created for " + getLogId()
, true);
}
}
}
@Override
public int getMessageCount() {
return mMessageCount;
}
private int getRemoteMessageCount(String criteria) throws MessagingException {
checkOpen();
try {
int count = 0;
int start = 1;
List<ImapResponse> responses = executeSimpleCommand(String.format("SEARCH %d:* %s", start, criteria));
for (ImapResponse response : responses) {
if (ImapResponseParser.equalsIgnoreCase(response.get(0), "SEARCH")) {
count += response.size() - 1;
}
}
return count;
} catch (IOException ioe) {
throw ioExceptionHandler(mConnection, ioe);
}
}
@Override
public int getUnreadMessageCount() throws MessagingException {
return getRemoteMessageCount("UNSEEN NOT DELETED");
}
@Override
public int getFlaggedMessageCount() throws MessagingException {
return getRemoteMessageCount("FLAGGED NOT DELETED");
}
protected int getHighestUid() {
try {
ImapSearcher searcher = new ImapSearcher() {
public List<ImapResponse> search() throws IOException, MessagingException {
return executeSimpleCommand("UID SEARCH *:*");
}
};
Message[] messages = search(searcher, null);
if (messages.length > 0) {
return Integer.parseInt(messages[0].getUid());
}
} catch (Exception e) {
Log.e(K9.LOG_TAG, "Unable to find highest UID in folder " + getName(), e);
}
return -1;
}
@Override
public void delete(boolean recurse) throws MessagingException {
throw new Error("ImapStore.delete() not yet implemented");
}
@Override
public Message getMessage(String uid) throws MessagingException {
return new ImapMessage(uid, this);
}
@Override
public Message[] getMessages(int start, int end, Date earliestDate, MessageRetrievalListener listener)
throws MessagingException {
return getMessages(start, end, earliestDate, false, listener);
}
protected Message[] getMessages(final int start, final int end, Date earliestDate, final boolean includeDeleted, final MessageRetrievalListener listener)
throws MessagingException {
if (start < 1 || end < 1 || end < start) {
throw new MessagingException(
String.format("Invalid message set %d %d",
start, end));
}
final StringBuilder dateSearchString = new StringBuilder();
if (earliestDate != null) {
dateSearchString.append(" SINCE ");
synchronized (RFC3501_DATE) {
dateSearchString.append(RFC3501_DATE.format(earliestDate));
}
}
ImapSearcher searcher = new ImapSearcher() {
public List<ImapResponse> search() throws IOException, MessagingException {
return executeSimpleCommand(String.format("UID SEARCH %d:%d%s%s", start, end, dateSearchString, includeDeleted ? "" : " NOT DELETED"));
}
};
return search(searcher, listener);
}
protected Message[] getMessages(final List<Integer> mesgSeqs, final boolean includeDeleted, final MessageRetrievalListener listener)
throws MessagingException {
ImapSearcher searcher = new ImapSearcher() {
public List<ImapResponse> search() throws IOException, MessagingException {
return executeSimpleCommand(String.format("UID SEARCH %s%s", Utility.combine(mesgSeqs.toArray(), ','), includeDeleted ? "" : " NOT DELETED"));
}
};
return search(searcher, listener);
}
protected Message[] getMessagesFromUids(final List<String> mesgUids, final boolean includeDeleted, final MessageRetrievalListener listener)
throws MessagingException {
ImapSearcher searcher = new ImapSearcher() {
public List<ImapResponse> search() throws IOException, MessagingException {
return executeSimpleCommand(String.format("UID SEARCH UID %s%s", Utility.combine(mesgUids.toArray(), ','), includeDeleted ? "" : " NOT DELETED"));
}
};
return search(searcher, listener);
}
private Message[] search(ImapSearcher searcher, MessageRetrievalListener listener) throws MessagingException {
checkOpen();
ArrayList<Message> messages = new ArrayList<Message>();
try {
ArrayList<Integer> uids = new ArrayList<Integer>();
List<ImapResponse> responses = searcher.search(); //
for (ImapResponse response : responses) {
if (response.mTag == null) {
if (ImapResponseParser.equalsIgnoreCase(response.get(0), "SEARCH")) {
for (int i = 1, count = response.size(); i < count; i++) {
uids.add(Integer.parseInt(response.getString(i)));
}
}
}
}
// Sort the uids in numerically ascending order
Collections.sort(uids);
for (int i = 0, count = uids.size(); i < count; i++) {
if (listener != null) {
listener.messageStarted("" + uids.get(i), i, count);
}
ImapMessage message = new ImapMessage("" + uids.get(i), this);
messages.add(message);
if (listener != null) {
listener.messageFinished(message, i, count);
}
}
} catch (IOException ioe) {
throw ioExceptionHandler(mConnection, ioe);
}
return messages.toArray(EMPTY_MESSAGE_ARRAY);
}
@Override
public Message[] getMessages(MessageRetrievalListener listener) throws MessagingException {
return getMessages(null, listener);
}
@Override
public Message[] getMessages(String[] uids, MessageRetrievalListener listener)
throws MessagingException {
checkOpen();
ArrayList<Message> messages = new ArrayList<Message>();
try {
if (uids == null) {
List<ImapResponse> responses = executeSimpleCommand("UID SEARCH 1:* NOT DELETED");
ArrayList<String> tempUids = new ArrayList<String>();
for (ImapResponse response : responses) {
if (ImapResponseParser.equalsIgnoreCase(response.get(0), "SEARCH")) {
for (int i = 1, count = response.size(); i < count; i++) {
tempUids.add(response.getString(i));
}
}
}
uids = tempUids.toArray(EMPTY_STRING_ARRAY);
}
for (int i = 0, count = uids.length; i < count; i++) {
if (listener != null) {
listener.messageStarted(uids[i], i, count);
}
ImapMessage message = new ImapMessage(uids[i], this);
messages.add(message);
if (listener != null) {
listener.messageFinished(message, i, count);
}
}
} catch (IOException ioe) {
throw ioExceptionHandler(mConnection, ioe);
}
return messages.toArray(EMPTY_MESSAGE_ARRAY);
}
@Override
public void fetch(Message[] messages, FetchProfile fp, MessageRetrievalListener listener)
throws MessagingException {
if (messages == null || messages.length == 0) {
return;
}
checkOpen();
List<String> uids = new ArrayList<String>(messages.length);
HashMap<String, Message> messageMap = new HashMap<String, Message>();
for (int i = 0, count = messages.length; i < count; i++) {
String uid = messages[i].getUid();
uids.add(uid);
messageMap.put(uid, messages[i]);
}
/*
* Figure out what command we are going to run:
* Flags - UID FETCH (FLAGS)
* Envelope - UID FETCH ([FLAGS] INTERNALDATE UID RFC822.SIZE FLAGS BODY.PEEK[HEADER.FIELDS (date subject from content-type to cc)])
*
*/
LinkedHashSet<String> fetchFields = new LinkedHashSet<String>();
fetchFields.add("UID");
if (fp.contains(FetchProfile.Item.FLAGS)) {
fetchFields.add("FLAGS");
}
if (fp.contains(FetchProfile.Item.ENVELOPE)) {
fetchFields.add("INTERNALDATE");
fetchFields.add("RFC822.SIZE");
fetchFields.add("BODY.PEEK[HEADER.FIELDS (date subject from content-type to cc " +
"reply-to message-id " + K9.IDENTITY_HEADER + ")]");
}
if (fp.contains(FetchProfile.Item.STRUCTURE)) {
fetchFields.add("BODYSTRUCTURE");
}
if (fp.contains(FetchProfile.Item.BODY_SANE)) {
// If the user wants to download unlimited-size messages, don't go only for the truncated body
if (mAccount.getMaximumAutoDownloadMessageSize() > 0) {
fetchFields.add(String.format("BODY.PEEK[]<0.%d>", mAccount.getMaximumAutoDownloadMessageSize()));
} else {
fetchFields.add("BODY.PEEK[]");
}
}
if (fp.contains(FetchProfile.Item.BODY)) {
fetchFields.add("BODY.PEEK[]");
}
for (int windowStart = 0; windowStart < messages.length; windowStart += (FETCH_WINDOW_SIZE)) {
List<String> uidWindow = uids.subList(windowStart, Math.min((windowStart + FETCH_WINDOW_SIZE), messages.length));
try {
mConnection.sendCommand(String.format("UID FETCH %s (%s)",
Utility.combine(uidWindow.toArray(new String[uidWindow.size()]), ','),
Utility.combine(fetchFields.toArray(new String[fetchFields.size()]), ' ')
), false);
ImapResponse response;
int messageNumber = 0;
ImapResponseParser.IImapResponseCallback callback = null;
if (fp.contains(FetchProfile.Item.BODY) || fp.contains(FetchProfile.Item.BODY_SANE)) {
callback = new FetchBodyCallback(messageMap);
}
do {
response = mConnection.readResponse(callback);
if (response.mTag == null && ImapResponseParser.equalsIgnoreCase(response.get(1), "FETCH")) {
ImapList fetchList = (ImapList)response.getKeyedValue("FETCH");
String uid = fetchList.getKeyedString("UID");
int msgSeq = response.getNumber(0);
if (uid != null) {
try {
msgSeqUidMap.put(msgSeq, uid);
if (K9.DEBUG) {
Log.v(K9.LOG_TAG, "Stored uid '" + uid + "' for msgSeq " + msgSeq + " into map " /*+ msgSeqUidMap.toString() */);
}
} catch (Exception e) {
Log.e(K9.LOG_TAG, "Unable to store uid '" + uid + "' for msgSeq " + msgSeq);
}
}
Message message = messageMap.get(uid);
if (message == null) {
if (K9.DEBUG)
Log.d(K9.LOG_TAG, "Do not have message in messageMap for UID " + uid + " for " + getLogId());
handleUntaggedResponse(response);
continue;
}
if (listener != null) {
listener.messageStarted(uid, messageNumber++, messageMap.size());
}
ImapMessage imapMessage = (ImapMessage) message;
Object literal = handleFetchResponse(imapMessage, fetchList);
if (literal != null) {
if (literal instanceof String) {
String bodyString = (String)literal;
InputStream bodyStream = new ByteArrayInputStream(bodyString.getBytes());
imapMessage.parse(bodyStream);
} else if (literal instanceof Integer) {
// All the work was done in FetchBodyCallback.foundLiteral()
} else {
// This shouldn't happen
throw new MessagingException("Got FETCH response with bogus parameters");
}
}
if (listener != null) {
listener.messageFinished(message, messageNumber, messageMap.size());
}
} else {
handleUntaggedResponse(response);
}
} while (response.mTag == null);
} catch (IOException ioe) {
throw ioExceptionHandler(mConnection, ioe);
}
}
}
@Override
public void fetchPart(Message message, Part part, MessageRetrievalListener listener)
throws MessagingException {
checkOpen();
String[] parts = part.getHeader(MimeHeader.HEADER_ANDROID_ATTACHMENT_STORE_DATA);
if (parts == null) {
return;
}
String fetch;
String partId = parts[0];
if ("TEXT".equalsIgnoreCase(partId)) {
fetch = String.format("BODY.PEEK[TEXT]<0.%d>", mAccount.getMaximumAutoDownloadMessageSize());
} else {
fetch = String.format("BODY.PEEK[%s]", partId);
}
try {
mConnection.sendCommand(
String.format("UID FETCH %s (UID %s)", message.getUid(), fetch),
false);
ImapResponse response;
int messageNumber = 0;
ImapResponseParser.IImapResponseCallback callback = new FetchPartCallback(part);
do {
response = mConnection.readResponse(callback);
if ((response.mTag == null) &&
(ImapResponseParser.equalsIgnoreCase(response.get(1), "FETCH"))) {
ImapList fetchList = (ImapList)response.getKeyedValue("FETCH");
String uid = fetchList.getKeyedString("UID");
if (!message.getUid().equals(uid)) {
if (K9.DEBUG)
Log.d(K9.LOG_TAG, "Did not ask for UID " + uid + " for " + getLogId());
handleUntaggedResponse(response);
continue;
}
if (listener != null) {
listener.messageStarted(uid, messageNumber++, 1);
}
ImapMessage imapMessage = (ImapMessage) message;
Object literal = handleFetchResponse(imapMessage, fetchList);
if (literal != null) {
if (literal instanceof Body) {
// Most of the work was done in FetchAttchmentCallback.foundLiteral()
part.setBody((Body)literal);
} else if (literal instanceof String) {
String bodyString = (String)literal;
InputStream bodyStream = new ByteArrayInputStream(bodyString.getBytes());
String contentTransferEncoding = part.getHeader(
MimeHeader.HEADER_CONTENT_TRANSFER_ENCODING)[0];
part.setBody(MimeUtility.decodeBody(bodyStream, contentTransferEncoding));
} else {
// This shouldn't happen
throw new MessagingException("Got FETCH response with bogus parameters");
}
}
if (listener != null) {
listener.messageFinished(message, messageNumber, 1);
}
} else {
handleUntaggedResponse(response);
}
} while (response.mTag == null);
} catch (IOException ioe) {
throw ioExceptionHandler(mConnection, ioe);
}
}
// Returns value of body field
private Object handleFetchResponse(ImapMessage message, ImapList fetchList) throws MessagingException {
Object result = null;
if (fetchList.containsKey("FLAGS")) {
ImapList flags = fetchList.getKeyedList("FLAGS");
if (flags != null) {
for (int i = 0, count = flags.size(); i < count; i++) {
String flag = flags.getString(i);
if (flag.equalsIgnoreCase("\\Deleted")) {
message.setFlagInternal(Flag.DELETED, true);
} else if (flag.equalsIgnoreCase("\\Answered")) {
message.setFlagInternal(Flag.ANSWERED, true);
} else if (flag.equalsIgnoreCase("\\Seen")) {
message.setFlagInternal(Flag.SEEN, true);
} else if (flag.equalsIgnoreCase("\\Flagged")) {
message.setFlagInternal(Flag.FLAGGED, true);
}
}
}
}
if (fetchList.containsKey("INTERNALDATE")) {
Date internalDate = fetchList.getKeyedDate("INTERNALDATE");
message.setInternalDate(internalDate);
}
if (fetchList.containsKey("RFC822.SIZE")) {
int size = fetchList.getKeyedNumber("RFC822.SIZE");
message.setSize(size);
}
if (fetchList.containsKey("BODYSTRUCTURE")) {
ImapList bs = fetchList.getKeyedList("BODYSTRUCTURE");
if (bs != null) {
try {
parseBodyStructure(bs, message, "TEXT");
} catch (MessagingException e) {
if (K9.DEBUG)
Log.d(K9.LOG_TAG, "Error handling message for " + getLogId(), e);
message.setBody(null);
}
}
}
if (fetchList.containsKey("BODY")) {
int index = fetchList.getKeyIndex("BODY") + 2;
int size = fetchList.size();
if (index < size) {
result = fetchList.getObject(index);
// Check if there's an origin octet
if (result instanceof String) {
String originOctet = (String) result;
if (originOctet.startsWith("<") && (index + 1) < size) {
result = fetchList.getObject(index + 1);
}
}
}
}
return result;
}
@Override
public Flag[] getPermanentFlags() {
return PERMANENT_FLAGS;
}
/**
* Handle any untagged responses that the caller doesn't care to handle themselves.
* @param responses
*/
protected List<ImapResponse> handleUntaggedResponses(List<ImapResponse> responses) {
for (ImapResponse response : responses) {
handleUntaggedResponse(response);
}
return responses;
}
protected void handlePossibleUidNext(ImapResponse response) {
if (ImapResponseParser.equalsIgnoreCase(response.get(0), "OK") && response.size() > 1) {
Object bracketedObj = response.get(1);
if (bracketedObj instanceof ImapList) {
ImapList bracketed = (ImapList)bracketedObj;
if (bracketed.size() > 1) {
Object keyObj = bracketed.get(0);
if (keyObj instanceof String) {
String key = (String)keyObj;
if ("UIDNEXT".equalsIgnoreCase(key)) {
uidNext = bracketed.getNumber(1);
if (K9.DEBUG)
Log.d(K9.LOG_TAG, "Got UidNext = " + uidNext + " for " + getLogId());
}
}
}
}
}
}
/**
* Handle an untagged response that the caller doesn't care to handle themselves.
* @param response
*/
protected void handleUntaggedResponse(ImapResponse response) {
if (response.mTag == null && response.size() > 1) {
if (ImapResponseParser.equalsIgnoreCase(response.get(1), "EXISTS")) {
mMessageCount = response.getNumber(0);
if (K9.DEBUG)
Log.d(K9.LOG_TAG, "Got untagged EXISTS with value " + mMessageCount + " for " + getLogId());
}
handlePossibleUidNext(response);
if (ImapResponseParser.equalsIgnoreCase(response.get(1), "EXPUNGE") && mMessageCount > 0) {
mMessageCount--;
if (K9.DEBUG)
Log.d(K9.LOG_TAG, "Got untagged EXPUNGE with mMessageCount " + mMessageCount + " for " + getLogId());
}
// if (response.size() > 1) {
// Object bracketedObj = response.get(1);
// if (bracketedObj instanceof ImapList)
// {
// ImapList bracketed = (ImapList)bracketedObj;
//
// if (!bracketed.isEmpty())
// {
// Object keyObj = bracketed.get(0);
// if (keyObj instanceof String)
// {
// String key = (String)keyObj;
// if ("ALERT".equalsIgnoreCase(key))
// {
// StringBuilder sb = new StringBuilder();
// for (int i = 2, count = response.size(); i < count; i++) {
// sb.append(response.get(i).toString());
// sb.append(' ');
// }
//
// Log.w(K9.LOG_TAG, "ALERT: " + sb.toString() + " for " + getLogId());
// }
// }
// }
//
//
// }
// }
}
//Log.i(K9.LOG_TAG, "mMessageCount = " + mMessageCount + " for " + getLogId());
}
private void parseBodyStructure(ImapList bs, Part part, String id)
throws MessagingException {
if (bs.get(0) instanceof ImapList) {
/*
* This is a multipart/*
*/
MimeMultipart mp = new MimeMultipart();
for (int i = 0, count = bs.size(); i < count; i++) {
if (bs.get(i) instanceof ImapList) {
/*
* For each part in the message we're going to add a new BodyPart and parse
* into it.
*/
ImapBodyPart bp = new ImapBodyPart();
if (id.equalsIgnoreCase("TEXT")) {
parseBodyStructure(bs.getList(i), bp, Integer.toString(i + 1));
} else {
parseBodyStructure(bs.getList(i), bp, id + "." + (i + 1));
}
mp.addBodyPart(bp);
} else {
/*
* We've got to the end of the children of the part, so now we can find out
* what type it is and bail out.
*/
String subType = bs.getString(i);
mp.setSubType(subType.toLowerCase(Locale.US));
break;
}
}
part.setBody(mp);
} else {
/*
* This is a body. We need to add as much information as we can find out about
* it to the Part.
*/
/*
* 0| 0 body type
* 1| 1 body subtype
* 2| 2 body parameter parenthesized list
* 3| 3 body id (unused)
* 4| 4 body description (unused)
* 5| 5 body encoding
* 6| 6 body size
* -| 7 text lines (only for type TEXT, unused)
* Extensions (optional):
* 7| 8 body MD5 (unused)
* 8| 9 body disposition
* 9|10 body language (unused)
* 10|11 body location (unused)
*/
String type = bs.getString(0);
String subType = bs.getString(1);
String mimeType = (type + "/" + subType).toLowerCase(Locale.US);
ImapList bodyParams = null;
if (bs.get(2) instanceof ImapList) {
bodyParams = bs.getList(2);
}
String encoding = bs.getString(5);
int size = bs.getNumber(6);
if (MimeUtility.mimeTypeMatches(mimeType, "message/rfc822")) {
// A body type of type MESSAGE and subtype RFC822
// contains, immediately after the basic fields, the
// envelope structure, body structure, and size in
// text lines of the encapsulated message.
// [MESSAGE, RFC822, [NAME, Fwd: [#HTR-517941]: update plans at 1am Friday - Memory allocation - displayware.eml], NIL, NIL, 7BIT, 5974, NIL, [INLINE, [FILENAME*0, Fwd: [#HTR-517941]: update plans at 1am Friday - Memory all, FILENAME*1, ocation - displayware.eml]], NIL]
/*
* This will be caught by fetch and handled appropriately.
*/
throw new MessagingException("BODYSTRUCTURE message/rfc822 not yet supported.");
}
/*
* Set the content type with as much information as we know right now.
*/
StringBuilder contentType = new StringBuilder();
contentType.append(mimeType);
if (bodyParams != null) {
/*
* If there are body params we might be able to get some more information out
* of them.
*/
for (int i = 0, count = bodyParams.size(); i < count; i += 2) {
contentType.append(String.format(";\n %s=\"%s\"",
bodyParams.getString(i),
bodyParams.getString(i + 1)));
}
}
part.setHeader(MimeHeader.HEADER_CONTENT_TYPE, contentType.toString());
// Extension items
ImapList bodyDisposition = null;
if (("text".equalsIgnoreCase(type))
&& (bs.size() > 9)
&& (bs.get(9) instanceof ImapList)) {
bodyDisposition = bs.getList(9);
} else if (!("text".equalsIgnoreCase(type))
&& (bs.size() > 8)
&& (bs.get(8) instanceof ImapList)) {
bodyDisposition = bs.getList(8);
}
StringBuilder contentDisposition = new StringBuilder();
if (bodyDisposition != null && !bodyDisposition.isEmpty()) {
if (!"NIL".equalsIgnoreCase(bodyDisposition.getString(0))) {
contentDisposition.append(bodyDisposition.getString(0).toLowerCase(Locale.US));
}
if ((bodyDisposition.size() > 1)
&& (bodyDisposition.get(1) instanceof ImapList)) {
ImapList bodyDispositionParams = bodyDisposition.getList(1);
/*
* If there is body disposition information we can pull some more information
* about the attachment out.
*/
for (int i = 0, count = bodyDispositionParams.size(); i < count; i += 2) {
contentDisposition.append(String.format(";\n %s=\"%s\"",
bodyDispositionParams.getString(i).toLowerCase(Locale.US),
bodyDispositionParams.getString(i + 1)));
}
}
}
if (MimeUtility.getHeaderParameter(contentDisposition.toString(), "size") == null) {
contentDisposition.append(String.format(";\n size=%d", size));
}
/*
* Set the content disposition containing at least the size. Attachment
* handling code will use this down the road.
*/
part.setHeader(MimeHeader.HEADER_CONTENT_DISPOSITION, contentDisposition.toString());
/*
* Set the Content-Transfer-Encoding header. Attachment code will use this
* to parse the body.
*/
part.setHeader(MimeHeader.HEADER_CONTENT_TRANSFER_ENCODING, encoding);
if (part instanceof ImapMessage) {
((ImapMessage) part).setSize(size);
} else if (part instanceof ImapBodyPart) {
((ImapBodyPart) part).setSize(size);
} else {
throw new MessagingException("Unknown part type " + part.toString());
}
part.setHeader(MimeHeader.HEADER_ANDROID_ATTACHMENT_STORE_DATA, id);
}
}
/**
* Appends the given messages to the selected folder.
*
* <p>
* This implementation also determines the new UIDs of the given messages on the IMAP
* server and changes the messages' UIDs to the new server UIDs.
* </p>
*
* @param messages
* The messages to append to the folder.
*
* @return The mapping of original message UIDs to the new server UIDs.
*/
@Override
public Map<String, String> appendMessages(Message[] messages) throws MessagingException {
checkOpen();
try {
Map<String, String> uidMap = new HashMap<String, String>();
for (Message message : messages) {
mConnection.sendCommand(
String.format("APPEND %s (%s) {%d}",
encodeString(encodeFolderName(getPrefixedName())),
combineFlags(message.getFlags()),
message.calculateSize()), false);
ImapResponse response;
do {
response = mConnection.readResponse();
handleUntaggedResponse(response);
if (response.mCommandContinuationRequested) {
EOLConvertingOutputStream eolOut = new EOLConvertingOutputStream(mConnection.mOut);
message.writeTo(eolOut);
eolOut.write('\r');
eolOut.write('\n');
eolOut.flush();
}
} while (response.mTag == null);
if (response.size() > 1) {
/*
* If the server supports UIDPLUS, then along with the APPEND response it
* will return an APPENDUID response code, e.g.
*
* 11 OK [APPENDUID 2 238268] APPEND completed
*
* We can use the UID included in this response to update our records.
*/
Object responseList = response.get(1);
if (responseList instanceof ImapList) {
ImapList appendList = (ImapList) responseList;
if (appendList.size() >= 3 &&
appendList.getString(0).equals("APPENDUID")) {
String newUid = appendList.getString(2);
if (!StringUtils.isNullOrEmpty(newUid)) {
message.setUid(newUid);
uidMap.put(message.getUid(), newUid);
continue;
}
}
}
}
/*
* This part is executed in case the server does not support UIDPLUS or does
* not implement the APPENDUID response code.
*/
String newUid = getUidFromMessageId(message);
if (K9.DEBUG) {
Log.d(K9.LOG_TAG, "Got UID " + newUid + " for message for " + getLogId());
}
if (!StringUtils.isNullOrEmpty(newUid)) {
uidMap.put(message.getUid(), newUid);
message.setUid(newUid);
}
}
/*
* We need uidMap to be null if new UIDs are not available to maintain consistency
* with the behavior of other similar methods (copyMessages, moveMessages) which
* return null.
*/
return (uidMap.size() == 0) ? null : uidMap;
} catch (IOException ioe) {
throw ioExceptionHandler(mConnection, ioe);
}
}
@Override
public String getUidFromMessageId(Message message) throws MessagingException {
try {
/*
* Try to find the UID of the message we just appended using the
* Message-ID header.
*/
String[] messageIdHeader = message.getHeader("Message-ID");
if (messageIdHeader == null || messageIdHeader.length == 0) {
if (K9.DEBUG)
Log.d(K9.LOG_TAG, "Did not get a message-id in order to search for UID for " + getLogId());
return null;
}
String messageId = messageIdHeader[0];
if (K9.DEBUG)
Log.d(K9.LOG_TAG, "Looking for UID for message with message-id " + messageId + " for " + getLogId());
List<ImapResponse> responses =
executeSimpleCommand(
String.format("UID SEARCH HEADER MESSAGE-ID %s", encodeString(messageId)));
for (ImapResponse response1 : responses) {
if (response1.mTag == null && ImapResponseParser.equalsIgnoreCase(response1.get(0), "SEARCH")
&& response1.size() > 1) {
return response1.getString(1);
}
}
return null;
} catch (IOException ioe) {
throw new MessagingException("Could not find UID for message based on Message-ID", ioe);
}
}
@Override
public void expunge() throws MessagingException {
checkOpen();
try {
executeSimpleCommand("EXPUNGE");
} catch (IOException ioe) {
throw ioExceptionHandler(mConnection, ioe);
}
}
private String combineFlags(Flag[] flags) {
ArrayList<String> flagNames = new ArrayList<String>();
for (Flag flag : flags) {
if (flag == Flag.SEEN) {
flagNames.add("\\Seen");
} else if (flag == Flag.DELETED) {
flagNames.add("\\Deleted");
} else if (flag == Flag.ANSWERED) {
flagNames.add("\\Answered");
} else if (flag == Flag.FLAGGED) {
flagNames.add("\\Flagged");
}
}
return Utility.combine(flagNames.toArray(new String[flagNames.size()]), ' ');
}
@Override
public void setFlags(Flag[] flags, boolean value)
throws MessagingException {
checkOpen();
try {
executeSimpleCommand(String.format("UID STORE 1:* %sFLAGS.SILENT (%s)",
value ? "+" : "-", combineFlags(flags)));
} catch (IOException ioe) {
throw ioExceptionHandler(mConnection, ioe);
}
}
@Override
public String getNewPushState(String oldPushStateS, Message message) {
try {
String messageUidS = message.getUid();
int messageUid = Integer.parseInt(messageUidS);
ImapPushState oldPushState = ImapPushState.parse(oldPushStateS);
if (messageUid >= oldPushState.uidNext) {
int uidNext = messageUid + 1;
ImapPushState newPushState = new ImapPushState(uidNext);
return newPushState.toString();
} else {
return null;
}
} catch (Exception e) {
Log.e(K9.LOG_TAG, "Exception while updated push state for " + getLogId(), e);
return null;
}
}
@Override
public void setFlags(Message[] messages, Flag[] flags, boolean value)
throws MessagingException {
checkOpen();
String[] uids = new String[messages.length];
for (int i = 0, count = messages.length; i < count; i++) {
uids[i] = messages[i].getUid();
}
try {
executeSimpleCommand(String.format("UID STORE %s %sFLAGS.SILENT (%s)",
Utility.combine(uids, ','),
value ? "+" : "-",
combineFlags(flags)));
} catch (IOException ioe) {
throw ioExceptionHandler(mConnection, ioe);
}
}
private void checkOpen() throws MessagingException {
if (!isOpen()) {
throw new MessagingException("Folder " + getPrefixedName() + " is not open.");
}
}
private MessagingException ioExceptionHandler(ImapConnection connection, IOException ioe) {
Log.e(K9.LOG_TAG, "IOException for " + getLogId(), ioe);
if (connection != null) {
connection.close();
}
close();
return new MessagingException("IO Error", ioe);
}
@Override
public boolean equals(Object o) {
if (o instanceof ImapFolder) {
return ((ImapFolder)o).getName().equalsIgnoreCase(getName());
}
return super.equals(o);
}
@Override
public int hashCode() {
return getName().hashCode();
}
protected ImapStore getStore() {
return store;
}
protected String getLogId() {
String id = getAccount().getDescription() + ":" + getName() + "/" + Thread.currentThread().getName();
if (mConnection != null) {
id += "/" + mConnection.getLogId();
}
return id;
}
}
/**
* A cacheable class that stores the details for a single IMAP connection.
*/
public static class ImapConnection {
protected Socket mSocket;
protected PeekableInputStream mIn;
protected OutputStream mOut;
protected ImapResponseParser mParser;
protected int mNextCommandTag;
protected Set<String> capabilities = new HashSet<String>();
private ImapSettings mSettings;
public ImapConnection(final ImapSettings settings) {
this.mSettings = settings;
}
protected String getLogId() {
return "conn" + hashCode();
}
private List<ImapResponse> receiveCapabilities(List<ImapResponse> responses) {
for (ImapResponse response : responses) {
ImapList capabilityList = null;
if (!response.isEmpty() && ImapResponseParser.equalsIgnoreCase(response.get(0), "OK")) {
for (Object thisPart : response) {
if (thisPart instanceof ImapList) {
ImapList thisList = (ImapList)thisPart;
if (ImapResponseParser.equalsIgnoreCase(thisList.get(0), CAPABILITY_CAPABILITY)) {
capabilityList = thisList;
break;
}
}
}
} else if (response.mTag == null) {
capabilityList = response;
}
if (capabilityList != null) {
if (!capabilityList.isEmpty() && ImapResponseParser.equalsIgnoreCase(capabilityList.get(0), CAPABILITY_CAPABILITY)) {
if (K9.DEBUG) {
Log.d(K9.LOG_TAG, "Saving " + capabilityList.size() + " capabilities for " + getLogId());
}
for (Object capability : capabilityList) {
if (capability instanceof String) {
// if (K9.DEBUG)
// {
// Log.v(K9.LOG_TAG, "Saving capability '" + capability + "' for " + getLogId());
// }
capabilities.add(((String)capability).toUpperCase(Locale.US));
}
}
}
}
}
return responses;
}
public void open() throws IOException, MessagingException {
if (isOpen()) {
return;
}
boolean authSuccess = false;
mNextCommandTag = 1;
try {
Security.setProperty("networkaddress.cache.ttl", "0");
} catch (Exception e) {
Log.w(K9.LOG_TAG, "Could not set DNS ttl to 0 for " + getLogId(), e);
}
try {
Security.setProperty("networkaddress.cache.negative.ttl", "0");
} catch (Exception e) {
Log.w(K9.LOG_TAG, "Could not set DNS negative ttl to 0 for " + getLogId(), e);
}
try {
int connectionSecurity = mSettings.getConnectionSecurity();
// Try all IPv4 and IPv6 addresses of the host
InetAddress[] addresses = InetAddress.getAllByName(mSettings.getHost());
for (int i = 0; i < addresses.length; i++) {
try {
if (K9.DEBUG && K9.DEBUG_PROTOCOL_IMAP) {
Log.d(K9.LOG_TAG, "Connecting to " + mSettings.getHost() + " as " +
addresses[i]);
}
SocketAddress socketAddress = new InetSocketAddress(addresses[i],
mSettings.getPort());
if (connectionSecurity == CONNECTION_SECURITY_SSL_REQUIRED ||
connectionSecurity == CONNECTION_SECURITY_SSL_OPTIONAL) {
SSLContext sslContext = SSLContext.getInstance("TLS");
boolean secure = connectionSecurity == CONNECTION_SECURITY_SSL_REQUIRED;
sslContext.init(null, new TrustManager[] {
TrustManagerFactory.get(mSettings.getHost(), secure)
}, new SecureRandom());
mSocket = sslContext.getSocketFactory().createSocket();
} else {
mSocket = new Socket();
}
mSocket.connect(socketAddress, SOCKET_CONNECT_TIMEOUT);
// Successfully connected to the server; don't try any other addresses
break;
} catch (SocketException e) {
if (i < (addresses.length - 1)) {
// There are still other addresses for that host to try
continue;
}
throw new MessagingException("Cannot connect to host", e);
}
}
setReadTimeout(Store.SOCKET_READ_TIMEOUT);
mIn = new PeekableInputStream(new BufferedInputStream(mSocket.getInputStream(),
1024));
mParser = new ImapResponseParser(mIn);
mOut = mSocket.getOutputStream();
capabilities.clear();
ImapResponse nullResponse = mParser.readResponse();
if (K9.DEBUG && K9.DEBUG_PROTOCOL_IMAP)
Log.v(K9.LOG_TAG, getLogId() + "<<<" + nullResponse);
List<ImapResponse> nullResponses = new LinkedList<ImapResponse>();
nullResponses.add(nullResponse);
receiveCapabilities(nullResponses);
if (!hasCapability(CAPABILITY_CAPABILITY)) {
if (K9.DEBUG)
Log.i(K9.LOG_TAG, "Did not get capabilities in banner, requesting CAPABILITY for " + getLogId());
List<ImapResponse> responses = receiveCapabilities(executeSimpleCommand(COMMAND_CAPABILITY));
if (responses.size() != 2) {
throw new MessagingException("Invalid CAPABILITY response received");
}
}
if (mSettings.getConnectionSecurity() == CONNECTION_SECURITY_TLS_OPTIONAL
|| mSettings.getConnectionSecurity() == CONNECTION_SECURITY_TLS_REQUIRED) {
if (hasCapability("STARTTLS")) {
// STARTTLS
executeSimpleCommand("STARTTLS");
SSLContext sslContext = SSLContext.getInstance("TLS");
boolean secure = mSettings.getConnectionSecurity() == CONNECTION_SECURITY_TLS_REQUIRED;
sslContext.init(null, new TrustManager[] {
TrustManagerFactory.get(mSettings.getHost(), secure)
}, new SecureRandom());
mSocket = sslContext.getSocketFactory().createSocket(mSocket, mSettings.getHost(), mSettings.getPort(),
true);
mSocket.setSoTimeout(Store.SOCKET_READ_TIMEOUT);
mIn = new PeekableInputStream(new BufferedInputStream(mSocket
.getInputStream(), 1024));
mParser = new ImapResponseParser(mIn);
mOut = mSocket.getOutputStream();
} else if (mSettings.getConnectionSecurity() == CONNECTION_SECURITY_TLS_REQUIRED) {
throw new MessagingException("TLS not supported but required");
}
}
mOut = new BufferedOutputStream(mOut, 1024);
try {
// Yahoo! requires a custom IMAP command to work right over a non-3G network
if (mSettings.getHost().endsWith("yahoo.com")) {
if (K9.DEBUG)
Log.v(K9.LOG_TAG, "Found Yahoo! account. Sending proprietary commands.");
executeSimpleCommand("ID (\"GUID\" \"1\")");
}
if (mSettings.getAuthType() == AuthType.CRAM_MD5) {
authCramMD5();
// The authCramMD5 method called on the previous line does not allow for handling updated capabilities
// sent by the server. So, to make sure we update to the post-authentication capability list
// we fetch the capabilities here.
if (K9.DEBUG)
Log.i(K9.LOG_TAG, "Updating capabilities after CRAM-MD5 authentication for " + getLogId());
List<ImapResponse> responses = receiveCapabilities(executeSimpleCommand(COMMAND_CAPABILITY));
if (responses.size() != 2) {
throw new MessagingException("Invalid CAPABILITY response received");
}
} else if (mSettings.getAuthType() == AuthType.PLAIN) {
receiveCapabilities(executeSimpleCommand(String.format("LOGIN %s %s", ImapStore.encodeString(mSettings.getUsername()), ImapStore.encodeString(mSettings.getPassword())), true));
}
authSuccess = true;
} catch (ImapException ie) {
throw new AuthenticationFailedException(ie.getAlertText(), ie);
} catch (MessagingException me) {
throw new AuthenticationFailedException(null, me);
}
if (K9.DEBUG) {
Log.d(K9.LOG_TAG, CAPABILITY_COMPRESS_DEFLATE + " = " + hasCapability(CAPABILITY_COMPRESS_DEFLATE));
}
if (hasCapability(CAPABILITY_COMPRESS_DEFLATE)) {
ConnectivityManager connectivityManager = (ConnectivityManager)K9.app.getSystemService(Context.CONNECTIVITY_SERVICE);
boolean useCompression = true;
NetworkInfo netInfo = connectivityManager.getActiveNetworkInfo();
if (netInfo != null) {
int type = netInfo.getType();
if (K9.DEBUG)
Log.d(K9.LOG_TAG, "On network type " + type);
useCompression = mSettings.useCompression(type);
}
if (K9.DEBUG)
Log.d(K9.LOG_TAG, "useCompression " + useCompression);
if (useCompression) {
try {
executeSimpleCommand(COMMAND_COMPRESS_DEFLATE);
Inflater inf = new Inflater(true);
InflaterInputStream zInputStream = new InflaterInputStream(mSocket.getInputStream(), inf);
mIn = new PeekableInputStream(new BufferedInputStream(zInputStream, 1024));
mParser = new ImapResponseParser(mIn);
ZOutputStream zOutputStream = new ZOutputStream(mSocket.getOutputStream(), JZlib.Z_BEST_SPEED, true);
mOut = new BufferedOutputStream(zOutputStream, 1024);
zOutputStream.setFlushMode(JZlib.Z_PARTIAL_FLUSH);
if (K9.DEBUG) {
Log.i(K9.LOG_TAG, "Compression enabled for " + getLogId());
}
} catch (Exception e) {
Log.e(K9.LOG_TAG, "Unable to negotiate compression", e);
}
}
}
if (K9.DEBUG)
Log.d(K9.LOG_TAG, "NAMESPACE = " + hasCapability(CAPABILITY_NAMESPACE)
+ ", mPathPrefix = " + mSettings.getPathPrefix());
if (mSettings.getPathPrefix() == null) {
if (hasCapability(CAPABILITY_NAMESPACE)) {
if (K9.DEBUG)
Log.i(K9.LOG_TAG, "mPathPrefix is unset and server has NAMESPACE capability");
List<ImapResponse> namespaceResponses =
executeSimpleCommand(COMMAND_NAMESPACE);
for (ImapResponse response : namespaceResponses) {
if (ImapResponseParser.equalsIgnoreCase(response.get(0), COMMAND_NAMESPACE)) {
if (K9.DEBUG)
Log.d(K9.LOG_TAG, "Got NAMESPACE response " + response + " on " + getLogId());
Object personalNamespaces = response.get(1);
if (personalNamespaces != null && personalNamespaces instanceof ImapList) {
if (K9.DEBUG)
Log.d(K9.LOG_TAG, "Got personal namespaces: " + personalNamespaces);
ImapList bracketed = (ImapList)personalNamespaces;
Object firstNamespace = bracketed.get(0);
if (firstNamespace != null && firstNamespace instanceof ImapList) {
if (K9.DEBUG)
Log.d(K9.LOG_TAG, "Got first personal namespaces: " + firstNamespace);
bracketed = (ImapList)firstNamespace;
mSettings.setPathPrefix(bracketed.getString(0));
mSettings.setPathDelimeter(bracketed.getString(1));
mSettings.setCombinedPrefix(null);
if (K9.DEBUG)
Log.d(K9.LOG_TAG, "Got path '" + mSettings.getPathPrefix() + "' and separator '" + mSettings.getPathDelimeter() + "'");
}
}
}
}
} else {
if (K9.DEBUG)
Log.i(K9.LOG_TAG, "mPathPrefix is unset but server does not have NAMESPACE capability");
mSettings.setPathPrefix("");
}
}
if (mSettings.getPathDelimeter() == null) {
try {
List<ImapResponse> nameResponses =
executeSimpleCommand(String.format("LIST \"\" \"\""));
for (ImapResponse response : nameResponses) {
if (ImapResponseParser.equalsIgnoreCase(response.get(0), "LIST")) {
mSettings.setPathDelimeter(response.getString(2));
mSettings.setCombinedPrefix(null);
if (K9.DEBUG)
Log.d(K9.LOG_TAG, "Got path delimeter '" + mSettings.getPathDelimeter() + "' for " + getLogId());
}
}
} catch (Exception e) {
Log.e(K9.LOG_TAG, "Unable to get path delimeter using LIST", e);
}
}
} catch (SSLException e) {
throw new CertificateValidationException(e.getMessage(), e);
} catch (GeneralSecurityException gse) {
throw new MessagingException(
"Unable to open connection to IMAP server due to security error.", gse);
} catch (ConnectException ce) {
String ceMess = ce.getMessage();
String[] tokens = ceMess.split("-");
if (tokens != null && tokens.length > 1 && tokens[1] != null) {
Log.e(K9.LOG_TAG, "Stripping host/port from ConnectionException for " + getLogId(), ce);
throw new ConnectException(tokens[1].trim());
} else {
throw ce;
}
} finally {
if (!authSuccess) {
Log.e(K9.LOG_TAG, "Failed to login, closing connection for " + getLogId());
close();
}
}
}
protected void authCramMD5() throws AuthenticationFailedException, MessagingException {
try {
String tag = sendCommand("AUTHENTICATE CRAM-MD5", false);
byte[] buf = new byte[1024];
int b64NonceLen = 0;
for (int i = 0; i < buf.length; i++) {
buf[i] = (byte)mIn.read();
if (buf[i] == 0x0a) {
b64NonceLen = i;
break;
}
}
if (b64NonceLen == 0) {
throw new AuthenticationFailedException("Error negotiating CRAM-MD5: nonce too long.");
}
byte[] b64NonceTrim = new byte[b64NonceLen - 2];
System.arraycopy(buf, 1, b64NonceTrim, 0, b64NonceLen - 2);
byte[] b64CRAM = Authentication.computeCramMd5Bytes(mSettings.getUsername(),
mSettings.getPassword(), b64NonceTrim);
mOut.write(b64CRAM);
mOut.write(new byte[] { 0x0d, 0x0a });
mOut.flush();
int respLen = 0;
for (int i = 0; i < buf.length; i++) {
buf[i] = (byte)mIn.read();
if (buf[i] == 0x0a) {
respLen = i;
break;
}
}
String toMatch = tag + " OK";
String respStr = new String(buf, 0, respLen);
if (!respStr.startsWith(toMatch)) {
throw new AuthenticationFailedException("CRAM-MD5 error: " + respStr);
}
} catch (IOException ioe) {
throw new AuthenticationFailedException("CRAM-MD5 Auth Failed.", ioe);
}
}
protected void setReadTimeout(int millis) throws SocketException {
Socket sock = mSocket;
if (sock != null) {
sock.setSoTimeout(millis);
}
}
protected boolean isIdleCapable() {
if (K9.DEBUG)
Log.v(K9.LOG_TAG, "Connection " + getLogId() + " has " + capabilities.size() + " capabilities");
return capabilities.contains(CAPABILITY_IDLE);
}
protected boolean hasCapability(String capability) {
return capabilities.contains(capability.toUpperCase(Locale.US));
}
public boolean isOpen() {
return (mIn != null && mOut != null && mSocket != null && mSocket.isConnected() && !mSocket.isClosed());
}
public void close() {
// if (isOpen()) {
// try {
// executeSimpleCommand("LOGOUT");
// } catch (Exception e) {
//
// }
// }
IOUtils.closeQuietly(mIn);
IOUtils.closeQuietly(mOut);
IOUtils.closeQuietly(mSocket);
mIn = null;
mOut = null;
mSocket = null;
}
public ImapResponse readResponse() throws IOException, MessagingException {
return readResponse(null);
}
public ImapResponse readResponse(ImapResponseParser.IImapResponseCallback callback) throws IOException {
try {
ImapResponse response = mParser.readResponse(callback);
if (K9.DEBUG && K9.DEBUG_PROTOCOL_IMAP)
Log.v(K9.LOG_TAG, getLogId() + "<<<" + response);
return response;
} catch (IOException ioe) {
close();
throw ioe;
}
}
public void sendContinuation(String continuation) throws IOException {
mOut.write(continuation.getBytes());
mOut.write('\r');
mOut.write('\n');
mOut.flush();
if (K9.DEBUG && K9.DEBUG_PROTOCOL_IMAP)
Log.v(K9.LOG_TAG, getLogId() + ">>> " + continuation);
}
public String sendCommand(String command, boolean sensitive)
throws MessagingException, IOException {
try {
open();
String tag = Integer.toString(mNextCommandTag++);
String commandToSend = tag + " " + command;
mOut.write(commandToSend.getBytes());
mOut.write('\r');
mOut.write('\n');
mOut.flush();
if (K9.DEBUG && K9.DEBUG_PROTOCOL_IMAP) {
if (sensitive && !K9.DEBUG_SENSITIVE) {
Log.v(K9.LOG_TAG, getLogId() + ">>> "
+ "[Command Hidden, Enable Sensitive Debug Logging To Show]");
} else {
Log.v(K9.LOG_TAG, getLogId() + ">>> " + commandToSend);
}
}
return tag;
} catch (IOException ioe) {
close();
throw ioe;
} catch (ImapException ie) {
close();
throw ie;
} catch (MessagingException me) {
close();
throw me;
}
}
public List<ImapResponse> executeSimpleCommand(String command) throws IOException,
ImapException, MessagingException {
return executeSimpleCommand(command, false);
}
public List<ImapResponse> executeSimpleCommand(String command, boolean sensitive) throws IOException,
ImapException, MessagingException {
return executeSimpleCommand(command, sensitive, null);
}
public List<ImapResponse> executeSimpleCommand(String command, boolean sensitive, UntaggedHandler untaggedHandler)
throws IOException, ImapException, MessagingException {
String commandToLog = command;
if (sensitive && !K9.DEBUG_SENSITIVE) {
commandToLog = "*sensitive*";
}
//if (K9.DEBUG)
// Log.v(K9.LOG_TAG, "Sending IMAP command " + commandToLog + " on connection " + getLogId());
String tag = sendCommand(command, sensitive);
//if (K9.DEBUG)
// Log.v(K9.LOG_TAG, "Sent IMAP command " + commandToLog + " with tag " + tag + " for " + getLogId());
ArrayList<ImapResponse> responses = new ArrayList<ImapResponse>();
ImapResponse response;
do {
response = mParser.readResponse();
if (K9.DEBUG && K9.DEBUG_PROTOCOL_IMAP)
Log.v(K9.LOG_TAG, getLogId() + "<<<" + response);
if (response.mTag != null && !response.mTag.equalsIgnoreCase(tag)) {
Log.w(K9.LOG_TAG, "After sending tag " + tag + ", got tag response from previous command " + response + " for " + getLogId());
Iterator<ImapResponse> iter = responses.iterator();
while (iter.hasNext()) {
ImapResponse delResponse = iter.next();
if (delResponse.mTag != null || delResponse.size() < 2
|| (!ImapResponseParser.equalsIgnoreCase(delResponse.get(1), "EXISTS") && !ImapResponseParser.equalsIgnoreCase(delResponse.get(1), "EXPUNGE"))) {
iter.remove();
}
}
response.mTag = null;
continue;
}
if (untaggedHandler != null) {
untaggedHandler.handleAsyncUntaggedResponse(response);
}
responses.add(response);
} while (response.mTag == null);
if (response.size() < 1 || !ImapResponseParser.equalsIgnoreCase(response.get(0), "OK")) {
throw new ImapException("Command: " + commandToLog + "; response: " + response.toString(), response.getAlertText());
}
return responses;
}
}
static class ImapMessage extends MimeMessage {
ImapMessage(String uid, Folder folder) {
this.mUid = uid;
this.mFolder = folder;
}
public void setSize(int size) {
this.mSize = size;
}
@Override
public void parse(InputStream in) throws IOException, MessagingException {
super.parse(in);
}
public void setFlagInternal(Flag flag, boolean set) throws MessagingException {
super.setFlag(flag, set);
}
@Override
public void setFlag(Flag flag, boolean set) throws MessagingException {
super.setFlag(flag, set);
mFolder.setFlags(new Message[] { this }, new Flag[] { flag }, set);
}
@Override
public void delete(String trashFolderName) throws MessagingException {
getFolder().delete(new Message[] { this }, trashFolderName);
}
}
static class ImapBodyPart extends MimeBodyPart {
public ImapBodyPart() throws MessagingException {
super();
}
public void setSize(int size) {
this.mSize = size;
}
}
static class ImapException extends MessagingException {
private static final long serialVersionUID = 3725007182205882394L;
String mAlertText;
public ImapException(String message, String alertText) {
super(message, true);
this.mAlertText = alertText;
}
public String getAlertText() {
return mAlertText;
}
public void setAlertText(String alertText) {
mAlertText = alertText;
}
}
public class ImapFolderPusher extends ImapFolder implements UntaggedHandler {
final PushReceiver receiver;
Thread listeningThread = null;
final AtomicBoolean stop = new AtomicBoolean(false);
final AtomicBoolean idling = new AtomicBoolean(false);
final AtomicBoolean doneSent = new AtomicBoolean(false);
final AtomicInteger delayTime = new AtomicInteger(NORMAL_DELAY_TIME);
final AtomicInteger idleFailureCount = new AtomicInteger(0);
final AtomicBoolean needsPoll = new AtomicBoolean(false);
List<ImapResponse> storedUntaggedResponses = new ArrayList<ImapResponse>();
TracingWakeLock wakeLock = null;
public ImapFolderPusher(ImapStore store, String name, PushReceiver nReceiver) {
super(store, name);
receiver = nReceiver;
TracingPowerManager pm = TracingPowerManager.getPowerManager(receiver.getContext());
wakeLock = pm.newWakeLock(PowerManager.PARTIAL_WAKE_LOCK, "ImapFolderPusher " + store.getAccount().getDescription() + ":" + getName());
wakeLock.setReferenceCounted(false);
}
public void refresh() throws IOException, MessagingException {
if (idling.get()) {
wakeLock.acquire(K9.PUSH_WAKE_LOCK_TIMEOUT);
sendDone();
}
}
private void sendDone() throws IOException, MessagingException {
if (doneSent.compareAndSet(false, true)) {
ImapConnection conn = mConnection;
if (conn != null) {
conn.setReadTimeout(Store.SOCKET_READ_TIMEOUT);
sendContinuation("DONE");
}
}
}
private void sendContinuation(String continuation)
throws IOException {
ImapConnection conn = mConnection;
if (conn != null) {
conn.sendContinuation(continuation);
}
}
public void start() {
Runnable runner = new Runnable() {
public void run() {
wakeLock.acquire(K9.PUSH_WAKE_LOCK_TIMEOUT);
if (K9.DEBUG)
Log.i(K9.LOG_TAG, "Pusher starting for " + getLogId());
while (!stop.get()) {
try {
int oldUidNext = -1;
try {
String pushStateS = receiver.getPushState(getName());
ImapPushState pushState = ImapPushState.parse(pushStateS);
oldUidNext = pushState.uidNext;
if (K9.DEBUG)
Log.i(K9.LOG_TAG, "Got oldUidNext " + oldUidNext + " for " + getLogId());
} catch (Exception e) {
Log.e(K9.LOG_TAG, "Unable to get oldUidNext for " + getLogId(), e);
}
ImapConnection oldConnection = mConnection;
internalOpen(OpenMode.READ_ONLY);
ImapConnection conn = mConnection;
if (conn == null) {
receiver.pushError("Could not establish connection for IDLE", null);
throw new MessagingException("Could not establish connection for IDLE");
}
if (!conn.isIdleCapable()) {
stop.set(true);
receiver.pushError("IMAP server is not IDLE capable: " + conn.toString(), null);
throw new MessagingException("IMAP server is not IDLE capable:" + conn.toString());
}
if (!stop.get() && mAccount.isPushPollOnConnect() && (conn != oldConnection || needsPoll.getAndSet(false))) {
List<ImapResponse> untaggedResponses = new ArrayList<ImapResponse>(storedUntaggedResponses);
storedUntaggedResponses.clear();
processUntaggedResponses(untaggedResponses);
if (mMessageCount == -1) {
throw new MessagingException("Message count = -1 for idling");
}
receiver.syncFolder(ImapFolderPusher.this);
}
if (stop.get()) {
continue;
}
int startUid = oldUidNext;
int newUidNext = uidNext;
if (newUidNext == -1) {
if (K9.DEBUG) {
Log.d(K9.LOG_TAG, "uidNext is -1, using search to find highest UID");
}
int highestUid = getHighestUid();
if (highestUid != -1) {
if (K9.DEBUG)
Log.d(K9.LOG_TAG, "highest UID = " + highestUid);
newUidNext = highestUid + 1;
if (K9.DEBUG)
Log.d(K9.LOG_TAG, "highest UID = " + highestUid
+ ", set newUidNext to " + newUidNext);
}
}
if (startUid < newUidNext - mAccount.getDisplayCount()) {
startUid = newUidNext - mAccount.getDisplayCount();
}
if (startUid < 1) {
startUid = 1;
}
if (newUidNext > startUid) {
if (K9.DEBUG)
Log.i(K9.LOG_TAG, "Needs sync from uid " + startUid + " to " + newUidNext + " for " + getLogId());
List<Message> messages = new ArrayList<Message>();
for (int uid = startUid; uid < newUidNext; uid++) {
ImapMessage message = new ImapMessage("" + uid, ImapFolderPusher.this);
messages.add(message);
}
if (!messages.isEmpty()) {
pushMessages(messages, true);
}
} else {
List<ImapResponse> untaggedResponses = null;
while (!storedUntaggedResponses.isEmpty()) {
if (K9.DEBUG)
Log.i(K9.LOG_TAG, "Processing " + storedUntaggedResponses.size() + " untagged responses from previous commands for " + getLogId());
untaggedResponses = new ArrayList<ImapResponse>(storedUntaggedResponses);
storedUntaggedResponses.clear();
processUntaggedResponses(untaggedResponses);
}
if (K9.DEBUG)
Log.i(K9.LOG_TAG, "About to IDLE for " + getLogId());
receiver.setPushActive(getName(), true);
idling.set(true);
doneSent.set(false);
conn.setReadTimeout((getAccount().getIdleRefreshMinutes() * 60 * 1000) + IDLE_READ_TIMEOUT_INCREMENT);
untaggedResponses = executeSimpleCommand(COMMAND_IDLE, false, ImapFolderPusher.this);
idling.set(false);
delayTime.set(NORMAL_DELAY_TIME);
idleFailureCount.set(0);
}
} catch (Exception e) {
wakeLock.acquire(K9.PUSH_WAKE_LOCK_TIMEOUT);
storedUntaggedResponses.clear();
idling.set(false);
receiver.setPushActive(getName(), false);
try {
close();
} catch (Exception me) {
Log.e(K9.LOG_TAG, "Got exception while closing for exception for " + getLogId(), me);
}
if (stop.get()) {
Log.i(K9.LOG_TAG, "Got exception while idling, but stop is set for " + getLogId());
} else {
receiver.pushError("Push error for " + getName(), e);
Log.e(K9.LOG_TAG, "Got exception while idling for " + getLogId(), e);
int delayTimeInt = delayTime.get();
receiver.sleep(wakeLock, delayTimeInt);
delayTimeInt *= 2;
if (delayTimeInt > MAX_DELAY_TIME) {
delayTimeInt = MAX_DELAY_TIME;
}
delayTime.set(delayTimeInt);
if (idleFailureCount.incrementAndGet() > IDLE_FAILURE_COUNT_LIMIT) {
Log.e(K9.LOG_TAG, "Disabling pusher for " + getLogId() + " after " + idleFailureCount.get() + " consecutive errors");
receiver.pushError("Push disabled for " + getName() + " after " + idleFailureCount.get() + " consecutive errors", e);
stop.set(true);
}
}
}
}
receiver.setPushActive(getName(), false);
try {
if (K9.DEBUG)
Log.i(K9.LOG_TAG, "Pusher for " + getLogId() + " is exiting");
close();
} catch (Exception me) {
Log.e(K9.LOG_TAG, "Got exception while closing for " + getLogId(), me);
} finally {
wakeLock.release();
}
}
};
listeningThread = new Thread(runner);
listeningThread.start();
}
@Override
protected void handleUntaggedResponse(ImapResponse response) {
if (response.mTag == null && response.size() > 1) {
Object responseType = response.get(1);
if (ImapResponseParser.equalsIgnoreCase(responseType, "FETCH")
|| ImapResponseParser.equalsIgnoreCase(responseType, "EXPUNGE")
|| ImapResponseParser.equalsIgnoreCase(responseType, "EXISTS")) {
if (K9.DEBUG)
Log.d(K9.LOG_TAG, "Storing response " + response + " for later processing");
storedUntaggedResponses.add(response);
}
handlePossibleUidNext(response);
}
}
protected void processUntaggedResponses(List<ImapResponse> responses) throws MessagingException {
boolean skipSync = false;
int oldMessageCount = mMessageCount;
if (oldMessageCount == -1) {
skipSync = true;
}
List<Integer> flagSyncMsgSeqs = new ArrayList<Integer>();
List<String> removeMsgUids = new LinkedList<String>();
for (ImapResponse response : responses) {
oldMessageCount += processUntaggedResponse(oldMessageCount, response, flagSyncMsgSeqs, removeMsgUids);
}
if (!skipSync) {
if (oldMessageCount < 0) {
oldMessageCount = 0;
}
if (mMessageCount > oldMessageCount) {
syncMessages(mMessageCount, true);
}
}
if (K9.DEBUG)
Log.d(K9.LOG_TAG, "UIDs for messages needing flag sync are " + flagSyncMsgSeqs + " for " + getLogId());
if (!flagSyncMsgSeqs.isEmpty()) {
syncMessages(flagSyncMsgSeqs);
}
if (!removeMsgUids.isEmpty()) {
removeMessages(removeMsgUids);
}
}
private void syncMessages(int end, boolean newArrivals) throws MessagingException {
int oldUidNext = -1;
try {
String pushStateS = receiver.getPushState(getName());
ImapPushState pushState = ImapPushState.parse(pushStateS);
oldUidNext = pushState.uidNext;
if (K9.DEBUG)
Log.i(K9.LOG_TAG, "Got oldUidNext " + oldUidNext + " for " + getLogId());
} catch (Exception e) {
Log.e(K9.LOG_TAG, "Unable to get oldUidNext for " + getLogId(), e);
}
Message[] messageArray = getMessages(end, end, null, true, null);
if (messageArray != null && messageArray.length > 0) {
int newUid = Integer.parseInt(messageArray[0].getUid());
if (K9.DEBUG)
Log.i(K9.LOG_TAG, "Got newUid " + newUid + " for message " + end + " on " + getLogId());
int startUid = oldUidNext;
if (startUid < newUid - 10) {
startUid = newUid - 10;
}
if (startUid < 1) {
startUid = 1;
}
if (newUid >= startUid) {
if (K9.DEBUG)
Log.i(K9.LOG_TAG, "Needs sync from uid " + startUid + " to " + newUid + " for " + getLogId());
List<Message> messages = new ArrayList<Message>();
for (int uid = startUid; uid <= newUid; uid++) {
ImapMessage message = new ImapMessage("" + uid, ImapFolderPusher.this);
messages.add(message);
}
if (!messages.isEmpty()) {
pushMessages(messages, true);
}
}
}
}
private void syncMessages(List<Integer> flagSyncMsgSeqs) {
try {
Message[] messageArray = null;
messageArray = getMessages(flagSyncMsgSeqs, true, null);
List<Message> messages = new ArrayList<Message>();
messages.addAll(Arrays.asList(messageArray));
pushMessages(messages, false);
} catch (Exception e) {
receiver.pushError("Exception while processing Push untagged responses", e);
}
}
private void removeMessages(List<String> removeUids) {
List<Message> messages = new ArrayList<Message>(removeUids.size());
try {
Message[] existingMessages = getMessagesFromUids(removeUids, true, null);
for (Message existingMessage : existingMessages) {
needsPoll.set(true);
msgSeqUidMap.clear();
String existingUid = existingMessage.getUid();
Log.w(K9.LOG_TAG, "Message with UID " + existingUid + " still exists on server, not expunging");
removeUids.remove(existingUid);
}
for (String uid : removeUids) {
ImapMessage message = new ImapMessage(uid, this);
try {
message.setFlagInternal(Flag.DELETED, true);
} catch (MessagingException me) {
Log.e(K9.LOG_TAG, "Unable to set DELETED flag on message " + message.getUid());
}
messages.add(message);
}
receiver.messagesRemoved(this, messages);
} catch (Exception e) {
Log.e(K9.LOG_TAG, "Cannot remove EXPUNGEd messages", e);
}
}
protected int processUntaggedResponse(int oldMessageCount, ImapResponse response, List<Integer> flagSyncMsgSeqs, List<String> removeMsgUids) {
super.handleUntaggedResponse(response);
int messageCountDelta = 0;
if (response.mTag == null && response.size() > 1) {
try {
Object responseType = response.get(1);
if (ImapResponseParser.equalsIgnoreCase(responseType, "FETCH")) {
Log.i(K9.LOG_TAG, "Got FETCH " + response);
int msgSeq = response.getNumber(0);
if (K9.DEBUG)
Log.d(K9.LOG_TAG, "Got untagged FETCH for msgseq " + msgSeq + " for " + getLogId());
if (!flagSyncMsgSeqs.contains(msgSeq)) {
flagSyncMsgSeqs.add(msgSeq);
}
}
if (ImapResponseParser.equalsIgnoreCase(responseType, "EXPUNGE")) {
int msgSeq = response.getNumber(0);
if (msgSeq <= oldMessageCount) {
messageCountDelta = -1;
}
if (K9.DEBUG)
Log.d(K9.LOG_TAG, "Got untagged EXPUNGE for msgseq " + msgSeq + " for " + getLogId());
List<Integer> newSeqs = new ArrayList<Integer>();
Iterator<Integer> flagIter = flagSyncMsgSeqs.iterator();
while (flagIter.hasNext()) {
Integer flagMsg = flagIter.next();
if (flagMsg >= msgSeq) {
flagIter.remove();
if (flagMsg > msgSeq) {
newSeqs.add(flagMsg--);
}
}
}
flagSyncMsgSeqs.addAll(newSeqs);
List<Integer> msgSeqs = new ArrayList<Integer>(msgSeqUidMap.keySet());
Collections.sort(msgSeqs); // Have to do comparisons in order because of msgSeq reductions
for (Integer msgSeqNumI : msgSeqs) {
if (K9.DEBUG) {
Log.v(K9.LOG_TAG, "Comparing EXPUNGEd msgSeq " + msgSeq + " to " + msgSeqNumI);
}
int msgSeqNum = msgSeqNumI;
if (msgSeqNum == msgSeq) {
String uid = msgSeqUidMap.get(msgSeqNum);
if (K9.DEBUG) {
Log.d(K9.LOG_TAG, "Scheduling removal of UID " + uid + " because msgSeq " + msgSeqNum + " was expunged");
}
removeMsgUids.add(uid);
msgSeqUidMap.remove(msgSeqNum);
} else if (msgSeqNum > msgSeq) {
String uid = msgSeqUidMap.get(msgSeqNum);
if (K9.DEBUG) {
Log.d(K9.LOG_TAG, "Reducing msgSeq for UID " + uid + " from " + msgSeqNum + " to " + (msgSeqNum - 1));
}
msgSeqUidMap.remove(msgSeqNum);
msgSeqUidMap.put(msgSeqNum - 1, uid);
}
}
}
} catch (Exception e) {
Log.e(K9.LOG_TAG, "Could not handle untagged FETCH for " + getLogId(), e);
}
}
return messageCountDelta;
}
private void pushMessages(List<Message> messages, boolean newArrivals) {
RuntimeException holdException = null;
try {
if (newArrivals) {
receiver.messagesArrived(this, messages);
} else {
receiver.messagesFlagsChanged(this, messages);
}
} catch (RuntimeException e) {
holdException = e;
}
if (holdException != null) {
throw holdException;
}
}
public void stop() {
stop.set(true);
if (listeningThread != null) {
listeningThread.interrupt();
}
ImapConnection conn = mConnection;
if (conn != null) {
if (K9.DEBUG)
Log.v(K9.LOG_TAG, "Closing mConnection to stop pushing for " + getLogId());
conn.close();
} else {
Log.w(K9.LOG_TAG, "Attempt to interrupt null mConnection to stop pushing on folderPusher for " + getLogId());
}
}
public void handleAsyncUntaggedResponse(ImapResponse response) {
if (K9.DEBUG)
Log.v(K9.LOG_TAG, "Got async response: " + response);
if (stop.get()) {
if (K9.DEBUG)
Log.d(K9.LOG_TAG, "Got async untagged response: " + response + ", but stop is set for " + getLogId());
try {
sendDone();
} catch (Exception e) {
Log.e(K9.LOG_TAG, "Exception while sending DONE for " + getLogId(), e);
}
} else {
if (response.mTag == null) {
if (response.size() > 1) {
boolean started = false;
Object responseType = response.get(1);
if (ImapResponseParser.equalsIgnoreCase(responseType, "EXISTS") || ImapResponseParser.equalsIgnoreCase(responseType, "EXPUNGE") ||
ImapResponseParser.equalsIgnoreCase(responseType, "FETCH")) {
if (!started) {
wakeLock.acquire(K9.PUSH_WAKE_LOCK_TIMEOUT);
started = true;
}
if (K9.DEBUG)
Log.d(K9.LOG_TAG, "Got useful async untagged response: " + response + " for " + getLogId());
try {
sendDone();
} catch (Exception e) {
Log.e(K9.LOG_TAG, "Exception while sending DONE for " + getLogId(), e);
}
}
} else if (response.mCommandContinuationRequested) {
if (K9.DEBUG)
Log.d(K9.LOG_TAG, "Idling " + getLogId());
wakeLock.release();
}
}
}
}
}
@Override
public Pusher getPusher(PushReceiver receiver) {
return new ImapPusher(this, receiver);
}
public class ImapPusher implements Pusher {
final ImapStore mStore;
final PushReceiver mReceiver;
private long lastRefresh = -1;
HashMap<String, ImapFolderPusher> folderPushers = new HashMap<String, ImapFolderPusher>();
public ImapPusher(ImapStore store, PushReceiver receiver) {
mStore = store;
mReceiver = receiver;
}
public void start(List<String> folderNames) {
stop();
synchronized (folderPushers) {
setLastRefresh(System.currentTimeMillis());
for (String folderName : folderNames) {
ImapFolderPusher pusher = folderPushers.get(folderName);
if (pusher == null) {
pusher = new ImapFolderPusher(mStore, folderName, mReceiver);
folderPushers.put(folderName, pusher);
pusher.start();
}
}
}
}
public void refresh() {
synchronized (folderPushers) {
for (ImapFolderPusher folderPusher : folderPushers.values()) {
try {
folderPusher.refresh();
} catch (Exception e) {
Log.e(K9.LOG_TAG, "Got exception while refreshing for " + folderPusher.getName(), e);
}
}
}
}
public void stop() {
if (K9.DEBUG)
Log.i(K9.LOG_TAG, "Requested stop of IMAP pusher");
synchronized (folderPushers) {
for (ImapFolderPusher folderPusher : folderPushers.values()) {
try {
if (K9.DEBUG)
Log.i(K9.LOG_TAG, "Requesting stop of IMAP folderPusher " + folderPusher.getName());
folderPusher.stop();
} catch (Exception e) {
Log.e(K9.LOG_TAG, "Got exception while stopping " + folderPusher.getName(), e);
}
}
folderPushers.clear();
}
}
public int getRefreshInterval() {
return (getAccount().getIdleRefreshMinutes() * 60 * 1000);
}
public long getLastRefresh() {
return lastRefresh;
}
public void setLastRefresh(long lastRefresh) {
this.lastRefresh = lastRefresh;
}
}
private interface UntaggedHandler {
void handleAsyncUntaggedResponse(ImapResponse respose);
}
protected static class ImapPushState {
protected int uidNext;
protected ImapPushState(int nUidNext) {
uidNext = nUidNext;
}
protected static ImapPushState parse(String pushState) {
int newUidNext = -1;
if (pushState != null) {
StringTokenizer tokenizer = new StringTokenizer(pushState, ";");
while (tokenizer.hasMoreTokens()) {
StringTokenizer thisState = new StringTokenizer(tokenizer.nextToken(), "=");
if (thisState.hasMoreTokens()) {
String key = thisState.nextToken();
if ("uidNext".equalsIgnoreCase(key) && thisState.hasMoreTokens()) {
String value = thisState.nextToken();
try {
newUidNext = Integer.parseInt(value);
} catch (Exception e) {
Log.e(K9.LOG_TAG, "Unable to part uidNext value " + value, e);
}
}
}
}
}
return new ImapPushState(newUidNext);
}
@Override
public String toString() {
return "uidNext=" + uidNext;
}
}
private interface ImapSearcher {
List<ImapResponse> search() throws IOException, MessagingException;
}
private static class FetchBodyCallback implements ImapResponseParser.IImapResponseCallback {
private HashMap<String, Message> mMessageMap;
FetchBodyCallback(HashMap<String, Message> mesageMap) {
mMessageMap = mesageMap;
}
@Override
public Object foundLiteral(ImapResponse response,
FixedLengthInputStream literal) throws IOException, Exception {
if (response.mTag == null &&
ImapResponseParser.equalsIgnoreCase(response.get(1), "FETCH")) {
ImapList fetchList = (ImapList)response.getKeyedValue("FETCH");
String uid = fetchList.getKeyedString("UID");
ImapMessage message = (ImapMessage) mMessageMap.get(uid);
message.parse(literal);
// Return placeholder object
return Integer.valueOf(1);
}
return null;
}
}
private static class FetchPartCallback implements ImapResponseParser.IImapResponseCallback {
private Part mPart;
FetchPartCallback(Part part) {
mPart = part;
}
@Override
public Object foundLiteral(ImapResponse response,
FixedLengthInputStream literal) throws IOException, Exception {
if (response.mTag == null &&
ImapResponseParser.equalsIgnoreCase(response.get(1), "FETCH")) {
//TODO: check for correct UID
String contentTransferEncoding = mPart.getHeader(
MimeHeader.HEADER_CONTENT_TRANSFER_ENCODING)[0];
return MimeUtility.decodeBody(literal, contentTransferEncoding);
}
return null;
}
}
}
| 1 | 11,743 | the original code ignores \Draft and \Recent, whereas your change will not. what this matters, i'm not sure. | k9mail-k-9 | java |
@@ -381,6 +381,8 @@ func TestQuotaReclamationDeletedBlocks(t *testing.T) {
}
// Stall the puts that comes as part of the sync call.
+ oldBServer := config2.BlockServer()
+ defer config2.SetBlockServer(oldBServer)
onWriteStalledCh, writeUnstallCh, ctxStall := StallBlockOp(
ctx, config2, StallableBlockPut)
| 1 | // Copyright 2016 Keybase Inc. All rights reserved.
// Use of this source code is governed by a BSD
// license that can be found in the LICENSE file.
package libkbfs
import (
"bytes"
"reflect"
"testing"
"time"
"github.com/keybase/client/go/libkb"
"golang.org/x/net/context"
)
func totalBlockRefs(m map[BlockID]map[BlockRefNonce]blockRefLocalStatus) int {
n := 0
for _, refs := range m {
n += len(refs)
}
return n
}
// Test that quota reclamation works for a simple case where the user
// does a few updates, then lets quota reclamation run, and we make
// sure that all historical blocks have been deleted.
func testQuotaReclamation(t *testing.T, ctx context.Context, config Config,
userName libkb.NormalizedUsername) {
clock, now := newTestClockAndTimeNow()
config.SetClock(clock)
rootNode := GetRootNodeOrBust(t, config, userName.String(), false)
kbfsOps := config.KBFSOps()
_, _, err := kbfsOps.CreateDir(ctx, rootNode, "a")
if err != nil {
t.Fatalf("Couldn't create dir: %v", err)
}
err = kbfsOps.RemoveDir(ctx, rootNode, "a")
if err != nil {
t.Fatalf("Couldn't remove dir: %v", err)
}
// Wait for outstanding archives
err = kbfsOps.SyncFromServerForTesting(ctx, rootNode.GetFolderBranch())
if err != nil {
t.Fatalf("Couldn't sync from server: %v", err)
}
// Make sure no blocks are deleted before there's a new-enough update.
bserverLocal, ok := config.BlockServer().(blockServerLocal)
if !ok {
t.Fatalf("Bad block server")
}
preQR1Blocks, err := bserverLocal.getAll(
ctx, rootNode.GetFolderBranch().Tlf)
if err != nil {
t.Fatalf("Couldn't get blocks: %v", err)
}
ops := kbfsOps.(*KBFSOpsStandard).getOpsByNode(ctx, rootNode)
ops.fbm.forceQuotaReclamation()
err = ops.fbm.waitForQuotaReclamations(ctx)
if err != nil {
t.Fatalf("Couldn't wait for QR: %v", err)
}
postQR1Blocks, err := bserverLocal.getAll(
ctx, rootNode.GetFolderBranch().Tlf)
if err != nil {
t.Fatalf("Couldn't get blocks: %v", err)
}
if !reflect.DeepEqual(preQR1Blocks, postQR1Blocks) {
t.Fatalf("Blocks deleted too early (%v vs %v)!",
preQR1Blocks, postQR1Blocks)
}
// Increase the time and make a new revision, but don't run quota
// reclamation yet.
clock.Set(now.Add(2 * config.QuotaReclamationMinUnrefAge()))
_, _, err = kbfsOps.CreateDir(ctx, rootNode, "b")
if err != nil {
t.Fatalf("Couldn't create dir: %v", err)
}
preQR2Blocks, err := bserverLocal.getAll(
ctx, rootNode.GetFolderBranch().Tlf)
if err != nil {
t.Fatalf("Couldn't get blocks: %v", err)
}
ops.fbm.forceQuotaReclamation()
err = ops.fbm.waitForQuotaReclamations(ctx)
if err != nil {
t.Fatalf("Couldn't wait for QR: %v", err)
}
postQR2Blocks, err := bserverLocal.getAll(
ctx, rootNode.GetFolderBranch().Tlf)
if err != nil {
t.Fatalf("Couldn't get blocks: %v", err)
}
if pre, post := totalBlockRefs(preQR2Blocks),
totalBlockRefs(postQR2Blocks); post >= pre {
t.Errorf("Blocks didn't shrink after reclamation: pre: %d, post %d",
pre, post)
}
}
func TestQuotaReclamationSimple(t *testing.T) {
var userName libkb.NormalizedUsername = "test_user"
config, _, ctx := kbfsOpsInitNoMocks(t, userName)
defer CheckConfigAndShutdown(t, config)
testQuotaReclamation(t, ctx, config, userName)
}
// Just like the simple case, except tests that it unembeds large sets
// of pointers correctly.
func TestQuotaReclamationUnembedded(t *testing.T) {
var userName libkb.NormalizedUsername = "test_user"
config, _, ctx := kbfsOpsInitNoMocks(t, userName)
defer CheckConfigAndShutdown(t, config)
config.bsplit.(*BlockSplitterSimple).blockChangeEmbedMaxSize = 32
testQuotaReclamation(t, ctx, config, userName)
// Make sure the MD has an unembedded change block.
rootNode := GetRootNodeOrBust(t, config, userName.String(), false)
md, err := config.MDOps().GetForTLF(ctx, rootNode.GetFolderBranch().Tlf)
if err != nil {
t.Fatalf("Couldn't get MD: %v", err)
}
if md.data.cachedChanges.Info.BlockPointer == zeroPtr {
t.Fatalf("No unembedded changes for ops %v", md.data.Changes.Ops)
}
}
// Test that a single quota reclamation run doesn't try to reclaim too
// much quota at once.
func TestQuotaReclamationIncrementalReclamation(t *testing.T) {
var userName libkb.NormalizedUsername = "test_user"
config, _, ctx := kbfsOpsInitNoMocks(t, userName)
defer CheckConfigAndShutdown(t, config)
now := time.Now()
var clock TestClock
clock.Set(now)
config.SetClock(&clock)
// Allow for big embedded block changes, so they don't confuse our
// block-checking logic.
config.bsplit.(*BlockSplitterSimple).blockChangeEmbedMaxSize = 16 << 20
rootNode := GetRootNodeOrBust(t, config, userName.String(), false)
// Do a bunch of operations.
kbfsOps := config.KBFSOps()
for i := 0; i < numPointersPerGCThreshold; i++ {
_, _, err := kbfsOps.CreateDir(ctx, rootNode, "a")
if err != nil {
t.Fatalf("Couldn't create dir: %v", err)
}
err = kbfsOps.RemoveDir(ctx, rootNode, "a")
if err != nil {
t.Fatalf("Couldn't remove dir: %v", err)
}
}
// Increase the time, and make sure that there is still more than
// one block in the history
clock.Set(now.Add(2 * config.QuotaReclamationMinUnrefAge()))
// Run it.
ops := kbfsOps.(*KBFSOpsStandard).getOpsByNode(ctx, rootNode)
ops.fbm.forceQuotaReclamation()
err := ops.fbm.waitForQuotaReclamations(ctx)
if err != nil {
t.Fatalf("Couldn't wait for QR: %v", err)
}
bserverLocal, ok := config.BlockServer().(blockServerLocal)
if !ok {
t.Fatalf("Bad block server")
}
blocks, err := bserverLocal.getAll(ctx, rootNode.GetFolderBranch().Tlf)
if err != nil {
t.Fatalf("Couldn't get blocks: %v", err)
}
b := totalBlockRefs(blocks)
if b <= 1 {
t.Errorf("Too many blocks left after first QR: %d", b)
}
// Now let it run to completion
for b > 1 {
ops.fbm.forceQuotaReclamation()
err = ops.fbm.waitForQuotaReclamations(ctx)
if err != nil {
t.Fatalf("Couldn't wait for QR: %v", err)
}
blocks, err := bserverLocal.getAll(
ctx, rootNode.GetFolderBranch().Tlf)
if err != nil {
t.Fatalf("Couldn't get blocks: %v", err)
}
oldB := b
b = totalBlockRefs(blocks)
if b >= oldB {
t.Fatalf("Blocks didn't shrink after reclamation: %d vs. %d",
b, oldB)
}
}
}
// Test that deleted blocks are correctly flushed from the user cache.
func TestQuotaReclamationDeletedBlocks(t *testing.T) {
var u1, u2 libkb.NormalizedUsername = "u1", "u2"
config1, _, ctx := kbfsOpsInitNoMocks(t, u1, u2)
defer CheckConfigAndShutdown(t, config1)
clock, now := newTestClockAndTimeNow()
config1.SetClock(clock)
// Initialize the MD using a different config
config2 := ConfigAsUser(config1, u2)
defer CheckConfigAndShutdown(t, config2)
config2.SetClock(clock)
name := u1.String() + "," + u2.String()
rootNode1 := GetRootNodeOrBust(t, config1, name, false)
data := []byte{1, 2, 3, 4, 5}
kbfsOps1 := config1.KBFSOps()
aNode1, _, err := kbfsOps1.CreateFile(ctx, rootNode1, "a", false, NoExcl)
if err != nil {
t.Fatalf("Couldn't create dir: %v", err)
}
err = kbfsOps1.Write(ctx, aNode1, data, 0)
if err != nil {
t.Fatalf("Couldn't write file: %v", err)
}
err = kbfsOps1.Sync(ctx, aNode1)
if err != nil {
t.Fatalf("Couldn't sync file: %v", err)
}
// Make two more files that share a block, only one of which will
// be deleted.
otherData := []byte{5, 4, 3, 2, 1}
for _, name := range []string{"b", "c"} {
node, _, err := kbfsOps1.CreateFile(ctx, rootNode1, name, false, NoExcl)
if err != nil {
t.Fatalf("Couldn't create dir: %v", err)
}
err = kbfsOps1.Write(ctx, node, otherData, 0)
if err != nil {
t.Fatalf("Couldn't write file: %v", err)
}
err = kbfsOps1.Sync(ctx, node)
if err != nil {
t.Fatalf("Couldn't sync file: %v", err)
}
}
// u2 reads the file
rootNode2 := GetRootNodeOrBust(t, config2, name, false)
kbfsOps2 := config2.KBFSOps()
aNode2, _, err := kbfsOps2.Lookup(ctx, rootNode2, "a")
if err != nil {
t.Fatalf("Couldn't create dir: %v", err)
}
data2 := make([]byte, len(data))
_, err = kbfsOps2.Read(ctx, aNode2, data2, 0)
if err != nil {
t.Fatalf("Couldn't read file: %v", err)
}
if !bytes.Equal(data, data2) {
t.Fatalf("Read bad data: %v", data2)
}
bNode2, _, err := kbfsOps2.Lookup(ctx, rootNode2, "b")
if err != nil {
t.Fatalf("Couldn't create dir: %v", err)
}
data2 = make([]byte, len(data))
_, err = kbfsOps2.Read(ctx, bNode2, data2, 0)
if err != nil {
t.Fatalf("Couldn't read file: %v", err)
}
if !bytes.Equal(otherData, data2) {
t.Fatalf("Read bad data: %v", data2)
}
// Remove two of the files
err = kbfsOps1.RemoveEntry(ctx, rootNode1, "a")
if err != nil {
t.Fatalf("Couldn't remove file: %v", err)
}
err = kbfsOps1.RemoveEntry(ctx, rootNode1, "b")
if err != nil {
t.Fatalf("Couldn't remove file: %v", err)
}
// Wait for outstanding archives
err = kbfsOps1.SyncFromServerForTesting(ctx, rootNode1.GetFolderBranch())
if err != nil {
t.Fatalf("Couldn't sync from server: %v", err)
}
// Get the current set of blocks
bserverLocal, ok := config1.BlockServer().(blockServerLocal)
if !ok {
t.Fatalf("Bad block server")
}
preQRBlocks, err := bserverLocal.getAll(
ctx, rootNode1.GetFolderBranch().Tlf)
if err != nil {
t.Fatalf("Couldn't get blocks: %v", err)
}
clock.Set(now.Add(2 * config1.QuotaReclamationMinUnrefAge()))
ops1 := kbfsOps1.(*KBFSOpsStandard).getOpsByNode(ctx, rootNode1)
ops1.fbm.forceQuotaReclamation()
err = ops1.fbm.waitForQuotaReclamations(ctx)
if err != nil {
t.Fatalf("Couldn't wait for QR: %v", err)
}
postQRBlocks, err := bserverLocal.getAll(
ctx, rootNode1.GetFolderBranch().Tlf)
if err != nil {
t.Fatalf("Couldn't get blocks: %v", err)
}
if pre, post := totalBlockRefs(preQRBlocks),
totalBlockRefs(postQRBlocks); post >= pre {
t.Errorf("Blocks didn't shrink after reclamation: pre: %d, post %d",
pre, post)
}
// Sync u2
err = kbfsOps2.SyncFromServerForTesting(ctx, rootNode2.GetFolderBranch())
if err != nil {
t.Fatalf("Couldn't sync from server: %v", err)
}
// Make a file with the other data on node 2, which uses a block
// for which one reference has been deleted, but the other should
// still be live. This will cause one dedup reference, and 3 new
// blocks (2 from the create, and 1 from the sync).
dNode, _, err := kbfsOps2.CreateFile(ctx, rootNode2, "d", false, NoExcl)
if err != nil {
t.Fatalf("Couldn't create file: %v", err)
}
err = kbfsOps2.Write(ctx, dNode, otherData, 0)
if err != nil {
t.Fatalf("Couldn't write file: %v", err)
}
err = kbfsOps2.Sync(ctx, dNode)
if err != nil {
t.Fatalf("Couldn't write file: %v", err)
}
// Wait for outstanding archives
err = kbfsOps2.SyncFromServerForTesting(ctx, rootNode2.GetFolderBranch())
if err != nil {
t.Fatalf("Couldn't sync from server: %v", err)
}
// Make the same file on node 2, making sure this doesn't try to
// reuse the same block (i.e., there are only 2 put calls).
eNode, _, err := kbfsOps2.CreateFile(ctx, rootNode2, "e", false, NoExcl)
if err != nil {
t.Fatalf("Couldn't create dir: %v", err)
}
err = kbfsOps2.Write(ctx, eNode, data, 0)
if err != nil {
t.Fatalf("Couldn't write file: %v", err)
}
// Stall the puts that comes as part of the sync call.
onWriteStalledCh, writeUnstallCh, ctxStall := StallBlockOp(
ctx, config2, StallableBlockPut)
// Start the sync and wait for it to stall twice only.
errChan := make(chan error)
go func() {
errChan <- kbfsOps2.Sync(ctxStall, eNode)
}()
<-onWriteStalledCh
<-onWriteStalledCh
writeUnstallCh <- struct{}{}
writeUnstallCh <- struct{}{}
// Don't close the channel, we want to make sure other Puts get
// stalled.
err = <-errChan
if err != nil {
t.Fatalf("Couldn't sync file: %v", err)
}
// Wait for outstanding archives
err = kbfsOps2.SyncFromServerForTesting(ctx, rootNode2.GetFolderBranch())
if err != nil {
t.Fatalf("Couldn't sync from server: %v", err)
}
// Delete any blocks that happened to be put during a failed (due
// to recoverable block errors) update.
clock.Set(now.Add(2 * config1.QuotaReclamationMinUnrefAge()))
ops1.fbm.forceQuotaReclamation()
err = ops1.fbm.waitForQuotaReclamations(ctx)
if err != nil {
t.Fatalf("Couldn't wait for QR: %v", err)
}
endBlocks, err := bserverLocal.getAll(
ctx, rootNode2.GetFolderBranch().Tlf)
if err != nil {
t.Fatalf("Couldn't get blocks: %v", err)
}
// There should be exactly 8 extra blocks refs (2 for the create,
// and 2 for the write/sync, for both files above) as a result of
// the operations, and exactly one should have more than one
// reference.
if pre, post := totalBlockRefs(postQRBlocks),
totalBlockRefs(endBlocks); post != pre+8 {
t.Errorf("Different number of blocks than expected: pre: %d, post %d",
pre, post)
}
oneDedupFound := false
for id, refs := range endBlocks {
if len(refs) > 2 {
t.Errorf("Block %v unexpectedly had %d refs %v", id, len(refs), refs)
} else if len(refs) == 2 {
if oneDedupFound {
t.Errorf("Extra dedup block %v with refs %v", id, refs)
} else {
oneDedupFound = true
}
}
}
if !oneDedupFound {
t.Error("No dedup reference found")
}
}
// Test that quota reclamation doesn't happen while waiting for a
// requested rekey.
func TestQuotaReclamationFailAfterRekeyRequest(t *testing.T) {
var u1, u2 libkb.NormalizedUsername = "u1", "u2"
config1, _, ctx := kbfsOpsConcurInit(t, u1, u2)
defer CheckConfigAndShutdown(t, config1)
clock := newTestClockNow()
config1.SetClock(clock)
config2 := ConfigAsUser(config1.(*ConfigLocal), u2)
defer CheckConfigAndShutdown(t, config2)
_, uid2, err := config2.KBPKI().GetCurrentUserInfo(context.Background())
if err != nil {
t.Fatal(err)
}
// Create a shared folder.
name := u1.String() + "," + u2.String()
rootNode1 := GetRootNodeOrBust(t, config1, name, false)
config2Dev2 := ConfigAsUser(config1.(*ConfigLocal), u2)
defer CheckConfigAndShutdown(t, config2Dev2)
// Now give u2 a new device. The configs don't share a Keybase
// Daemon so we have to do it in all places.
AddDeviceForLocalUserOrBust(t, config1, uid2)
AddDeviceForLocalUserOrBust(t, config2, uid2)
devIndex := AddDeviceForLocalUserOrBust(t, config2Dev2, uid2)
SwitchDeviceForLocalUserOrBust(t, config2Dev2, devIndex)
// user 2 should be unable to read the data now since its device
// wasn't registered when the folder was originally created.
_, err = GetRootNodeForTest(config2Dev2, name, false)
if _, ok := err.(NeedSelfRekeyError); !ok {
t.Fatalf("Got unexpected error when reading with new key: %v", err)
}
// Request a rekey from the new device, which will only be
// able to set the rekey bit (copying the root MD).
kbfsOps2Dev2 := config2Dev2.KBFSOps()
err = kbfsOps2Dev2.Rekey(ctx, rootNode1.GetFolderBranch().Tlf)
if err != nil {
t.Fatalf("Couldn't rekey: %v", err)
}
// Make sure QR returns an error.
ops := config2Dev2.KBFSOps().(*KBFSOpsStandard).getOpsByNode(ctx, rootNode1)
timer := time.NewTimer(config2Dev2.QuotaReclamationPeriod())
ops.fbm.reclamationGroup.Add(1)
err = ops.fbm.doReclamation(timer)
if _, ok := err.(NeedSelfRekeyError); !ok {
t.Fatalf("Unexpected rekey error: %v", err)
}
// Rekey from another device.
kbfsOps1 := config1.KBFSOps()
err = kbfsOps1.SyncFromServerForTesting(ctx, rootNode1.GetFolderBranch())
if err != nil {
t.Fatalf("Couldn't sync from server: %v", err)
}
err = kbfsOps1.Rekey(ctx, rootNode1.GetFolderBranch().Tlf)
if err != nil {
t.Fatalf("Couldn't rekey: %v", err)
}
// Retry the QR; should work now.
err = kbfsOps2Dev2.SyncFromServerForTesting(ctx,
rootNode1.GetFolderBranch())
if err != nil {
t.Fatalf("Couldn't sync from server: %v", err)
}
ops.fbm.reclamationGroup.Add(1)
err = ops.fbm.doReclamation(timer)
if err != nil {
t.Fatalf("Unexpected rekey error: %v", err)
}
}
| 1 | 13,076 | Why was this necessary? | keybase-kbfs | go |
@@ -111,6 +111,7 @@ var opts struct {
Test struct {
FailingTestsOk bool `long:"failing_tests_ok" hidden:"true" description:"Exit with status 0 even if tests fail (nonzero only if catastrophe happens)"`
NumRuns int `long:"num_runs" short:"n" default:"1" description:"Number of times to run each test target."`
+ Rerun bool `long:"rerun" description:"To force rerunning tests for a target"`
Sequentially bool `long:"sequentially" description:"Whether to run multiple runs of the same test sequentially"`
TestResultsFile cli.Filepath `long:"test_results_file" default:"plz-out/log/test_results.xml" description:"File to write combined test results to."`
SurefireDir cli.Filepath `long:"surefire_dir" default:"plz-out/surefire-reports" description:"Directory to copy XML test results to."` | 1 | package main
import (
"context"
"fmt"
"net/http"
_ "net/http/pprof"
"os"
"path"
"runtime/pprof"
"strings"
"sync"
"syscall"
"github.com/jessevdk/go-flags"
"gopkg.in/op/go-logging.v1"
"github.com/thought-machine/please/src/build"
"github.com/thought-machine/please/src/cache"
"github.com/thought-machine/please/src/clean"
"github.com/thought-machine/please/src/cli"
"github.com/thought-machine/please/src/core"
"github.com/thought-machine/please/src/export"
"github.com/thought-machine/please/src/gc"
"github.com/thought-machine/please/src/hashes"
"github.com/thought-machine/please/src/help"
"github.com/thought-machine/please/src/ide/intellij"
"github.com/thought-machine/please/src/output"
"github.com/thought-machine/please/src/plz"
"github.com/thought-machine/please/src/query"
"github.com/thought-machine/please/src/run"
"github.com/thought-machine/please/src/scm"
"github.com/thought-machine/please/src/test"
"github.com/thought-machine/please/src/tool"
"github.com/thought-machine/please/src/update"
"github.com/thought-machine/please/src/utils"
"github.com/thought-machine/please/src/watch"
"github.com/thought-machine/please/src/worker"
)
var log = logging.MustGetLogger("plz")
var config *core.Configuration
var opts struct {
Usage string `usage:"Please is a high-performance multi-language build system.\n\nIt uses BUILD files to describe what to build and how to build it.\nSee https://please.build for more information about how it works and what Please can do for you."`
BuildFlags struct {
Config string `short:"c" long:"config" env:"PLZ_BUILD_CONFIG" description:"Build config to use. Defaults to opt."`
Arch cli.Arch `short:"a" long:"arch" description:"Architecture to compile for."`
RepoRoot cli.Filepath `short:"r" long:"repo_root" description:"Root of repository to build."`
NumThreads int `short:"n" long:"num_threads" description:"Number of concurrent build operations. Default is number of CPUs + 2."`
Include []string `short:"i" long:"include" description:"Label of targets to include in automatic detection."`
Exclude []string `short:"e" long:"exclude" description:"Label of targets to exclude from automatic detection."`
Option ConfigOverrides `short:"o" long:"override" env:"PLZ_OVERRIDES" env-delim:";" description:"Options to override from .plzconfig (e.g. -o please.selfupdate:false)"`
Profile core.ConfigProfiles `long:"profile" env:"PLZ_CONFIG_PROFILE" description:"Configuration profile to load; e.g. --profile=dev will load .plzconfig.dev if it exists."`
PreTargets []core.BuildLabel `long:"pre" hidden:"true" description:"Targets to build before the other command-line ones. Sometimes useful to debug targets generated as part of a post-build function."`
} `group:"Options controlling what to build & how to build it"`
OutputFlags struct {
Verbosity cli.Verbosity `short:"v" long:"verbosity" description:"Verbosity of output (error, warning, notice, info, debug)" default:"warning"`
LogFile cli.Filepath `long:"log_file" description:"File to echo full logging output to" default:"plz-out/log/build.log"`
LogFileLevel cli.Verbosity `long:"log_file_level" description:"Log level for file output" default:"debug"`
InteractiveOutput bool `long:"interactive_output" description:"Show interactive output in a terminal"`
PlainOutput bool `short:"p" long:"plain_output" description:"Don't show interactive output."`
Colour bool `long:"colour" description:"Forces coloured output from logging & other shell output."`
NoColour bool `long:"nocolour" description:"Forces colourless output from logging & other shell output."`
TraceFile cli.Filepath `long:"trace_file" description:"File to write Chrome tracing output into"`
ShowAllOutput bool `long:"show_all_output" description:"Show all output live from all commands. Implies --plain_output."`
CompletionScript bool `long:"completion_script" description:"Prints the bash / zsh completion script to stdout"`
} `group:"Options controlling output & logging"`
FeatureFlags struct {
NoUpdate bool `long:"noupdate" description:"Disable Please attempting to auto-update itself."`
NoCache bool `long:"nocache" description:"Disable caches (NB. not incrementality)"`
NoHashVerification bool `long:"nohash_verification" description:"Hash verification errors are nonfatal."`
NoLock bool `long:"nolock" description:"Don't attempt to lock the repo exclusively. Use with care."`
KeepWorkdirs bool `long:"keep_workdirs" description:"Don't clean directories in plz-out/tmp after successfully building targets."`
HTTPProxy cli.URL `long:"http_proxy" env:"HTTP_PROXY" description:"HTTP proxy to use for downloads"`
} `group:"Options that enable / disable certain features"`
HelpFlags struct {
Help bool `short:"h" long:"help" description:"Show this help message"`
Version bool `long:"version" description:"Print the version of Please"`
} `group:"Help Options"`
Profile string `long:"profile_file" hidden:"true" description:"Write profiling output to this file"`
MemProfile string `long:"mem_profile_file" hidden:"true" description:"Write a memory profile to this file"`
ProfilePort int `long:"profile_port" hidden:"true" description:"Serve profiling info on this port."`
ParsePackageOnly bool `description:"Parses a single package only. All that's necessary for some commands." no-flag:"true"`
Complete string `long:"complete" hidden:"true" env:"PLZ_COMPLETE" description:"Provide completion options for this build target."`
Build struct {
Prepare bool `long:"prepare" description:"Prepare build directory for these targets but don't build them."`
Shell bool `long:"shell" description:"Like --prepare, but opens a shell in the build directory with the appropriate environment variables."`
Rebuild bool `long:"rebuild" description:"To force the optimisation and rebuild one or more targets."`
NoDownload bool `long:"nodownload" hidden:"true" description:"Don't download outputs after building. Only applies when using remote build execution."`
Download bool `long:"download" hidden:"true" description:"Force download of all outputs regardless of original target spec. Only applies when using remote build execution."`
Args struct { // Inner nesting is necessary to make positional-args work :(
Targets []core.BuildLabel `positional-arg-name:"targets" description:"Targets to build"`
} `positional-args:"true" required:"true"`
} `command:"build" description:"Builds one or more targets"`
Hash struct {
Detailed bool `long:"detailed" description:"Produces a detailed breakdown of the hash"`
Update bool `short:"u" long:"update" description:"Rewrites the hashes in the BUILD file to the new values"`
Args struct {
Targets []core.BuildLabel `positional-arg-name:"targets" description:"Targets to build"`
} `positional-args:"true" required:"true"`
} `command:"hash" description:"Calculates hash for one or more targets"`
Test struct {
FailingTestsOk bool `long:"failing_tests_ok" hidden:"true" description:"Exit with status 0 even if tests fail (nonzero only if catastrophe happens)"`
NumRuns int `long:"num_runs" short:"n" default:"1" description:"Number of times to run each test target."`
Sequentially bool `long:"sequentially" description:"Whether to run multiple runs of the same test sequentially"`
TestResultsFile cli.Filepath `long:"test_results_file" default:"plz-out/log/test_results.xml" description:"File to write combined test results to."`
SurefireDir cli.Filepath `long:"surefire_dir" default:"plz-out/surefire-reports" description:"Directory to copy XML test results to."`
ShowOutput bool `short:"s" long:"show_output" description:"Always show output of tests, even on success."`
Debug bool `short:"d" long:"debug" description:"Allows starting an interactive debugger on test failure. Does not work with all test types (currently only python/pytest, C and C++). Implies -c dbg unless otherwise set."`
Failed bool `short:"f" long:"failed" description:"Runs just the test cases that failed from the immediately previous run."`
Detailed bool `long:"detailed" description:"Prints more detailed output after tests."`
Shell bool `long:"shell" description:"Opens a shell in the test directory with the appropriate environment variables."`
StreamResults bool `long:"stream_results" description:"Prints test results on stdout as they are run."`
// Slightly awkward since we can specify a single test with arguments or multiple test targets.
Args struct {
Target core.BuildLabel `positional-arg-name:"target" description:"Target to test"`
Args []string `positional-arg-name:"arguments" description:"Arguments or test selectors"`
} `positional-args:"true"`
} `command:"test" description:"Builds and tests one or more targets"`
Cover struct {
active bool `no-flag:"true"`
FailingTestsOk bool `long:"failing_tests_ok" hidden:"true" description:"Exit with status 0 even if tests fail (nonzero only if catastrophe happens)"`
NoCoverageReport bool `long:"nocoverage_report" description:"Suppress the per-file coverage report displayed in the shell"`
LineCoverageReport bool `short:"l" long:"line_coverage_report" description:" Show a line-by-line coverage report for all affected files."`
NumRuns int `short:"n" long:"num_runs" default:"1" description:"Number of times to run each test target."`
Sequentially bool `long:"sequentially" description:"Whether to run multiple runs of the same test sequentially"`
IncludeAllFiles bool `short:"a" long:"include_all_files" description:"Include all dependent files in coverage (default is just those from relevant packages)"`
IncludeFile cli.Filepaths `long:"include_file" description:"Filenames to filter coverage display to"`
TestResultsFile cli.Filepath `long:"test_results_file" default:"plz-out/log/test_results.xml" description:"File to write combined test results to."`
SurefireDir cli.Filepath `long:"surefire_dir" default:"plz-out/surefire-reports" description:"Directory to copy XML test results to."`
CoverageResultsFile cli.Filepath `long:"coverage_results_file" default:"plz-out/log/coverage.json" description:"File to write combined coverage results to."`
CoverageXMLReport cli.Filepath `long:"coverage_xml_report" default:"plz-out/log/coverage.xml" description:"XML File to write combined coverage results to."`
Incremental bool `short:"i" long:"incremental" description:"Calculates summary statistics for incremental coverage, i.e. stats for just the lines currently modified."`
ShowOutput bool `short:"s" long:"show_output" description:"Always show output of tests, even on success."`
Debug bool `short:"d" long:"debug" description:"Allows starting an interactive debugger on test failure. Does not work with all test types (currently only python/pytest, C and C++). Implies -c dbg unless otherwise set."`
Failed bool `short:"f" long:"failed" description:"Runs just the test cases that failed from the immediately previous run."`
Detailed bool `long:"detailed" description:"Prints more detailed output after tests."`
Shell bool `long:"shell" description:"Opens a shell in the test directory with the appropriate environment variables."`
StreamResults bool `long:"stream_results" description:"Prints test results on stdout as they are run."`
Args struct {
Target core.BuildLabel `positional-arg-name:"target" description:"Target to test" group:"one test"`
Args []string `positional-arg-name:"arguments" description:"Arguments or test selectors" group:"one test"`
} `positional-args:"true"`
} `command:"cover" description:"Builds and tests one or more targets, and calculates coverage."`
Run struct {
Env bool `long:"env" description:"Overrides environment variables (e.g. PATH) in the new process."`
Parallel struct {
NumTasks int `short:"n" long:"num_tasks" default:"10" description:"Maximum number of subtasks to run in parallel"`
Quiet bool `short:"q" long:"quiet" description:"Suppress output from successful subprocesses."`
PositionalArgs struct {
Targets []core.BuildLabel `positional-arg-name:"target" description:"Targets to run"`
} `positional-args:"true" required:"true"`
Args cli.Filepaths `short:"a" long:"arg" description:"Arguments to pass to the called processes."`
Detach bool `long:"detach" description:"Detach from the parent process when all children have spawned"`
} `command:"parallel" description:"Runs a sequence of targets in parallel"`
Sequential struct {
Quiet bool `short:"q" long:"quiet" description:"Suppress output from successful subprocesses."`
PositionalArgs struct {
Targets []core.BuildLabel `positional-arg-name:"target" description:"Targets to run"`
} `positional-args:"true" required:"true"`
Args cli.Filepaths `short:"a" long:"arg" description:"Arguments to pass to the called processes."`
} `command:"sequential" description:"Runs a sequence of targets sequentially."`
Args struct {
Target core.BuildLabel `positional-arg-name:"target" required:"true" description:"Target to run"`
Args cli.Filepaths `positional-arg-name:"arguments" description:"Arguments to pass to target when running (to pass flags to the target, put -- before them)"`
} `positional-args:"true"`
Remote bool `long:"remote" description:"Send targets to be executed remotely."`
} `command:"run" subcommands-optional:"true" description:"Builds and runs a single target"`
Clean struct {
NoBackground bool `long:"nobackground" short:"f" description:"Don't fork & detach until clean is finished."`
Args struct { // Inner nesting is necessary to make positional-args work :(
Targets []core.BuildLabel `positional-arg-name:"targets" description:"Targets to clean (default is to clean everything)"`
} `positional-args:"true"`
} `command:"clean" description:"Cleans build artifacts" subcommands-optional:"true"`
Watch struct {
Run bool `short:"r" long:"run" description:"Runs the specified targets when they change (default is to build or test as appropriate)."`
Args struct {
Targets []core.BuildLabel `positional-arg-name:"targets" required:"true" description:"Targets to watch the sources of for changes"`
} `positional-args:"true" required:"true"`
} `command:"watch" description:"Watches sources of targets for changes and rebuilds them"`
Update struct {
Force bool `long:"force" description:"Forces a re-download of the new version."`
NoVerify bool `long:"noverify" description:"Skips signature verification of downloaded version"`
Latest bool `long:"latest" description:"Update to latest available version (overrides config)."`
Version cli.Version `long:"version" description:"Updates to a particular version (overrides config)."`
} `command:"update" description:"Checks for an update and updates if needed."`
Op struct {
} `command:"op" description:"Re-runs previous command."`
Init struct {
Dir cli.Filepath `long:"dir" description:"Directory to create config in" default:"."`
BazelCompatibility bool `long:"bazel_compat" description:"Initialises config for Bazel compatibility mode."`
Config struct {
User bool `short:"u" long:"user" description:"Modifies the user-level config file"`
Local bool `short:"l" long:"local" description:"Modifies the local config file (.plzconfig.local)"`
Args struct {
Options ConfigOverrides `positional-arg-name:"config" required:"true" description:"Attributes to set"`
} `positional-args:"true" required:"true"`
} `command:"config" description:"Initialises specific attributes of config files"`
} `command:"init" subcommands-optional:"true" description:"Initialises a .plzconfig file in the current directory"`
Gc struct {
Conservative bool `short:"c" long:"conservative" description:"Runs a more conservative / safer GC."`
TargetsOnly bool `short:"t" long:"targets_only" description:"Only print the targets to delete"`
SrcsOnly bool `short:"s" long:"srcs_only" description:"Only print the source files to delete"`
NoPrompt bool `short:"y" long:"no_prompt" description:"Remove targets without prompting"`
DryRun bool `short:"n" long:"dry_run" description:"Don't remove any targets or files, just print what would be done"`
Git bool `short:"g" long:"git" description:"Use 'git rm' to remove unused files instead of just 'rm'."`
Args struct {
Targets []core.BuildLabel `positional-arg-name:"targets" description:"Targets to limit gc to."`
} `positional-args:"true"`
} `command:"gc" description:"Analyzes the repo to determine unneeded targets."`
Export struct {
Output string `short:"o" long:"output" required:"true" description:"Directory to export into"`
Args struct {
Targets []core.BuildLabel `positional-arg-name:"targets" description:"Labels to export."`
} `positional-args:"true"`
Outputs struct {
Args struct {
Targets []core.BuildLabel `positional-arg-name:"targets" description:"Targets to export."`
} `positional-args:"true"`
} `command:"outputs" description:"Exports outputs of a set of targets"`
} `command:"export" subcommands-optional:"true" description:"Exports a set of targets and files from the repo."`
Help struct {
Args struct {
Topic help.Topic `positional-arg-name:"topic" description:"Topic to display help on"`
} `positional-args:"true"`
} `command:"help" alias:"halp" description:"Displays help about various parts of plz or its build rules"`
Tool struct {
Args struct {
Tool tool.Tool `positional-arg-name:"tool" description:"Tool to invoke (jarcat, lint, etc)"`
Args cli.Filepaths `positional-arg-name:"arguments" description:"Arguments to pass to the tool"`
} `positional-args:"true"`
} `command:"tool" hidden:"true" description:"Invoke one of Please's sub-tools"`
Query struct {
Deps struct {
Hidden bool `long:"hidden" short:"h" description:"Output internal / hidden dependencies too"`
Level int `long:"level" default:"-1" description:"Levels of the dependencies to retrieve."`
Args struct {
Targets []core.BuildLabel `positional-arg-name:"targets" description:"Targets to query" required:"true"`
} `positional-args:"true" required:"true"`
} `command:"deps" description:"Queries the dependencies of a target."`
ReverseDeps struct {
Level int `long:"level" default:"1" description:"Levels of the dependencies to retrieve (-1 for unlimited)."`
Hidden bool `long:"hidden" short:"h" description:"Output internal / hidden dependencies too"`
Args struct {
Targets []core.BuildLabel `positional-arg-name:"targets" description:"Targets to query" required:"true"`
} `positional-args:"true" required:"true"`
} `command:"revdeps" alias:"reverseDeps" description:"Queries all the reverse dependencies of a target."`
SomePath struct {
Args struct {
Target1 core.BuildLabel `positional-arg-name:"target1" description:"First build target" required:"true"`
Target2 core.BuildLabel `positional-arg-name:"target2" description:"Second build target" required:"true"`
} `positional-args:"true" required:"true"`
} `command:"somepath" description:"Queries for a path between two targets"`
AllTargets struct {
Hidden bool `long:"hidden" description:"Show hidden targets as well"`
Args struct {
Targets []core.BuildLabel `positional-arg-name:"targets" description:"Targets to query"`
} `positional-args:"true"`
} `command:"alltargets" description:"Lists all targets in the graph"`
Print struct {
Fields []string `short:"f" long:"field" description:"Individual fields to print of the target"`
Labels []string `short:"l" long:"label" description:"Prints all labels with the given prefix (with the prefix stripped off). Overrides --field."`
Args struct {
Targets []core.BuildLabel `positional-arg-name:"targets" description:"Targets to print" required:"true"`
} `positional-args:"true" required:"true"`
} `command:"print" description:"Prints a representation of a single target"`
Completions struct {
Cmd string `long:"cmd" description:"Command to complete for" default:"build"`
Args struct {
Fragments cli.StdinStrings `positional-arg-name:"fragment" description:"Initial fragment to attempt to complete"`
} `positional-args:"true"`
} `command:"completions" subcommands-optional:"true" description:"Prints possible completions for a string."`
Input struct {
Args struct {
Targets []core.BuildLabel `positional-arg-name:"targets" description:"Targets to display inputs for" required:"true"`
} `positional-args:"true" required:"true"`
} `command:"input" alias:"inputs" description:"Prints all transitive inputs of a target."`
Output struct {
Args struct {
Targets []core.BuildLabel `positional-arg-name:"targets" description:"Targets to display outputs for" required:"true"`
} `positional-args:"true" required:"true"`
} `command:"output" alias:"outputs" description:"Prints all outputs of a target."`
Graph struct {
Args struct {
Targets []core.BuildLabel `positional-arg-name:"targets" description:"Targets to render graph for"`
} `positional-args:"true"`
} `command:"graph" description:"Prints a JSON representation of the build graph."`
WhatOutputs struct {
EchoFiles bool `long:"echo_files" description:"Echo the file for which the printed output is responsible."`
Args struct {
Files cli.StdinStrings `positional-arg-name:"files" required:"true" description:"Files to query targets responsible for"`
} `positional-args:"true"`
} `command:"whatoutputs" description:"Prints out target(s) responsible for outputting provided file(s)"`
Rules struct {
Args struct {
Targets []core.BuildLabel `hidden:"true" description:"deprecated, has no effect"`
} `positional-args:"true"`
} `command:"rules" description:"Prints built-in rules to stdout as JSON"`
Changes struct {
Since string `short:"s" long:"since" default:"origin/master" description:"Revision to compare against"`
IncludeDependees string `long:"include_dependees" default:"none" choice:"none" choice:"direct" choice:"transitive" description:"Include direct or transitive dependees of changed targets."`
Inexact bool `long:"inexact" description:"Calculate changes more quickly and without doing any SCM checkouts, but may miss some targets."`
In string `long:"in" description:"Calculate changes contained within given scm spec (commit range/sha/ref/etc). Implies --inexact."`
Args struct {
Files cli.StdinStrings `positional-arg-name:"files" description:"Files to calculate changes for. Overrides flags relating to SCM operations."`
} `positional-args:"true"`
} `command:"changes" description:"Calculates the set of changed targets in regard to a set of modified files or SCM commits."`
Roots struct {
Args struct {
Targets []core.BuildLabel `positional-arg-name:"targets" description:"Targets to query" required:"true"`
} `positional-args:"true"`
} `command:"roots" description:"Show build labels with no dependents in the given list, from the list."`
Filter struct {
Args struct {
Targets []core.BuildLabel `positional-arg-name:"targets" description:"Targets to filter"`
} `positional-args:"true"`
} `command:"filter" description:"Filter the given set of targets according to some rules"`
} `command:"query" description:"Queries information about the build graph"`
Ide struct {
IntelliJ struct {
Args struct {
Labels []core.BuildLabel `positional-arg-name:"labels" description:"Targets to include."`
} `positional-args:"true"`
} `command:"intellij" description:"Export intellij structure for the given targets and their dependencies."`
} `command:"ide" description:"IDE Support and generation."`
}
// Definitions of what we do for each command.
// Functions are called after args are parsed and return true for success.
var buildFunctions = map[string]func() int{
"build": func() int {
if opts.Build.Rebuild == true {
opts.FeatureFlags.NoCache = true
}
success, state := runBuild(opts.Build.Args.Targets, true, false, false)
return toExitCode(success, state)
},
"hash": func() int {
success, state := runBuild(opts.Hash.Args.Targets, true, false, false)
if success {
if opts.Hash.Detailed {
for _, target := range state.ExpandOriginalTargets() {
build.PrintHashes(state, state.Graph.TargetOrDie(target))
}
}
if opts.Hash.Update {
hashes.RewriteHashes(state, state.ExpandOriginalTargets())
}
}
return toExitCode(success, state)
},
"test": func() int {
targets := testTargets(opts.Test.Args.Target, opts.Test.Args.Args, opts.Test.Failed, opts.Test.TestResultsFile)
success, state := doTest(targets, opts.Test.SurefireDir, opts.Test.TestResultsFile)
return toExitCode(success, state)
},
"cover": func() int {
opts.Cover.active = true
if opts.BuildFlags.Config != "" {
log.Warning("Build config overridden; coverage may not be available for some languages")
} else {
opts.BuildFlags.Config = "cover"
}
targets := testTargets(opts.Cover.Args.Target, opts.Cover.Args.Args, opts.Cover.Failed, opts.Cover.TestResultsFile)
os.RemoveAll(string(opts.Cover.CoverageResultsFile))
success, state := doTest(targets, opts.Cover.SurefireDir, opts.Cover.TestResultsFile)
test.AddOriginalTargetsToCoverage(state, opts.Cover.IncludeAllFiles)
test.RemoveFilesFromCoverage(state.Coverage, state.Config.Cover.ExcludeExtension)
var stats *test.IncrementalStats
if opts.Cover.Incremental {
lines, err := scm.NewFallback(core.RepoRoot).ChangedLines()
if err != nil {
log.Fatalf("Failed to determine changes: %s", err)
}
stats = test.CalculateIncrementalStats(state, lines)
}
test.WriteCoverageToFileOrDie(state.Coverage, string(opts.Cover.CoverageResultsFile), stats)
test.WriteXMLCoverageToFileOrDie(targets, state.Coverage, string(opts.Cover.CoverageXMLReport))
if opts.Cover.LineCoverageReport {
output.PrintLineCoverageReport(state, opts.Cover.IncludeFile.AsStrings())
} else if !opts.Cover.NoCoverageReport {
output.PrintCoverage(state, opts.Cover.IncludeFile.AsStrings())
}
if opts.Cover.Incremental {
output.PrintIncrementalCoverage(stats)
}
return toExitCode(success, state)
},
"run": func() int {
if success, state := runBuild([]core.BuildLabel{opts.Run.Args.Target}, true, false, false); success {
run.Run(state, opts.Run.Args.Target, opts.Run.Args.Args.AsStrings(), opts.Run.Remote, opts.Run.Env)
}
return 1 // We should never return from run.Run so if we make it here something's wrong.
},
"parallel": func() int {
if success, state := runBuild(opts.Run.Parallel.PositionalArgs.Targets, true, false, false); success {
os.Exit(run.Parallel(context.Background(), state, state.ExpandOriginalTargets(), opts.Run.Parallel.Args.AsStrings(), opts.Run.Parallel.NumTasks, opts.Run.Parallel.Quiet, opts.Run.Remote, opts.Run.Env, opts.Run.Parallel.Detach))
}
return 1
},
"sequential": func() int {
if success, state := runBuild(opts.Run.Sequential.PositionalArgs.Targets, true, false, false); success {
os.Exit(run.Sequential(state, state.ExpandOriginalTargets(), opts.Run.Sequential.Args.AsStrings(), opts.Run.Sequential.Quiet, opts.Run.Remote, opts.Run.Env))
}
return 1
},
"clean": func() int {
config.Cache.DirClean = false // don't run the normal cleaner
if len(opts.Clean.Args.Targets) == 0 && core.InitialPackage()[0].PackageName == "" {
if len(opts.BuildFlags.Include) == 0 && len(opts.BuildFlags.Exclude) == 0 {
// Clean everything, doesn't require parsing at all.
state := core.NewBuildState(config)
clean.Clean(config, newCache(state), !opts.Clean.NoBackground)
return 0
}
opts.Clean.Args.Targets = core.WholeGraph
}
if success, state := runBuild(opts.Clean.Args.Targets, false, false, false); success {
clean.Targets(state, state.ExpandOriginalTargets(), !opts.FeatureFlags.NoCache)
return 0
}
return 1
},
"update": func() int {
fmt.Printf("Up to date (version %s).\n", core.PleaseVersion)
return 0 // We'd have died already if something was wrong.
},
"op": func() int {
cmd := core.ReadLastOperationOrDie()
log.Notice("OP PLZ: %s", strings.Join(cmd, " "))
// Annoyingly we don't seem to have any access to execvp() which would be rather useful here...
executable, err := os.Executable()
if err == nil {
err = syscall.Exec(executable, append([]string{executable}, cmd...), os.Environ())
}
log.Fatalf("SORRY OP: %s", err) // On success Exec never returns.
return 1
},
"gc": func() int {
success, state := runBuild(core.WholeGraph, false, false, true)
if success {
state.OriginalTargets = state.Config.Gc.Keep
gc.GarbageCollect(state, opts.Gc.Args.Targets, state.ExpandOriginalTargets(), state.Config.Gc.Keep, state.Config.Gc.KeepLabel,
opts.Gc.Conservative, opts.Gc.TargetsOnly, opts.Gc.SrcsOnly, opts.Gc.NoPrompt, opts.Gc.DryRun, opts.Gc.Git)
}
return toExitCode(success, state)
},
"init": func() int {
utils.InitConfig(string(opts.Init.Dir), opts.Init.BazelCompatibility)
return 0
},
"config": func() int {
if opts.Init.Config.User {
utils.InitConfigFile(core.ExpandHomePath(core.UserConfigFileName), opts.Init.Config.Args.Options)
} else if opts.Init.Config.Local {
utils.InitConfigFile(core.LocalConfigFileName, opts.Init.Config.Args.Options)
} else {
utils.InitConfigFile(core.ConfigFileName, opts.Init.Config.Args.Options)
}
return 0
},
"export": func() int {
success, state := runBuild(opts.Export.Args.Targets, false, false, false)
if success {
export.ToDir(state, opts.Export.Output, state.ExpandOriginalTargets())
}
return toExitCode(success, state)
},
"outputs": func() int {
success, state := runBuild(opts.Export.Outputs.Args.Targets, true, false, true)
if success {
export.Outputs(state, opts.Export.Output, state.ExpandOriginalTargets())
}
return toExitCode(success, state)
},
"help": func() int {
return toExitCode(help.Help(string(opts.Help.Args.Topic)), nil)
},
"tool": func() int {
tool.Run(config, opts.Tool.Args.Tool, opts.Tool.Args.Args.AsStrings())
return 1 // If the function returns (which it shouldn't), something went wrong.
},
"deps": func() int {
return runQuery(true, opts.Query.Deps.Args.Targets, func(state *core.BuildState) {
query.Deps(state, state.ExpandOriginalTargets(), opts.Query.Deps.Hidden, opts.Query.Deps.Level)
})
},
"revdeps": func() int {
return runQuery(true, core.WholeGraph, func(state *core.BuildState) {
query.ReverseDeps(state, state.ExpandLabels(utils.ReadStdinLabels(opts.Query.ReverseDeps.Args.Targets)), opts.Query.ReverseDeps.Level, opts.Query.ReverseDeps.Hidden)
})
},
"somepath": func() int {
return runQuery(true,
[]core.BuildLabel{opts.Query.SomePath.Args.Target1, opts.Query.SomePath.Args.Target2},
func(state *core.BuildState) {
query.SomePath(state.Graph, opts.Query.SomePath.Args.Target1, opts.Query.SomePath.Args.Target2)
},
)
},
"alltargets": func() int {
return runQuery(true, opts.Query.AllTargets.Args.Targets, func(state *core.BuildState) {
query.AllTargets(state.Graph, state.ExpandOriginalTargets(), opts.Query.AllTargets.Hidden)
})
},
"print": func() int {
return runQuery(false, opts.Query.Print.Args.Targets, func(state *core.BuildState) {
query.Print(state.Graph, state.ExpandOriginalTargets(), opts.Query.Print.Fields, opts.Query.Print.Labels)
})
},
"input": func() int {
return runQuery(true, opts.Query.Input.Args.Targets, func(state *core.BuildState) {
query.TargetInputs(state.Graph, state.ExpandOriginalTargets())
})
},
"output": func() int {
return runQuery(true, opts.Query.Output.Args.Targets, func(state *core.BuildState) {
query.TargetOutputs(state.Graph, state.ExpandOriginalTargets())
})
},
"completions": func() int {
// Somewhat fiddly because the inputs are not necessarily well-formed at this point.
opts.ParsePackageOnly = true
fragments := opts.Query.Completions.Args.Fragments.Get()
if opts.Query.Completions.Cmd == "help" {
// Special-case completing help topics rather than build targets.
if len(fragments) == 0 {
help.Topics("")
} else {
help.Topics(fragments[0])
}
return 0
}
if len(fragments) == 0 || len(fragments) == 1 && strings.Trim(fragments[0], "/ ") == "" {
os.Exit(0) // Don't do anything for empty completion, it's normally too slow.
}
labels, parseLabels, hidden := query.CompletionLabels(config, fragments, core.RepoRoot)
if success, state := Please(parseLabels, config, false, false); success {
binary := opts.Query.Completions.Cmd == "run"
test := opts.Query.Completions.Cmd == "test" || opts.Query.Completions.Cmd == "cover"
query.Completions(state.Graph, labels, binary, test, hidden)
return 0
}
return 1
},
"graph": func() int {
return runQuery(true, opts.Query.Graph.Args.Targets, func(state *core.BuildState) {
if len(opts.Query.Graph.Args.Targets) == 0 {
state.OriginalTargets = opts.Query.Graph.Args.Targets // It special-cases doing the full graph.
}
query.Graph(state, state.ExpandOriginalTargets())
})
},
"whatoutputs": func() int {
return runQuery(true, core.WholeGraph, func(state *core.BuildState) {
query.WhatOutputs(state.Graph, opts.Query.WhatOutputs.Args.Files.Get(), opts.Query.WhatOutputs.EchoFiles)
})
},
"rules": func() int {
help.PrintRuleArgs()
return 0
},
"changes": func() int {
// query changes always excludes 'manual' targets.
opts.BuildFlags.Exclude = append(opts.BuildFlags.Exclude, "manual", "manual:"+core.OsArch)
transitive := opts.Query.Changes.IncludeDependees == "transitive"
direct := opts.Query.Changes.IncludeDependees == "direct" || transitive
runInexact := func(files []string) int {
return runQuery(true, core.WholeGraph, func(state *core.BuildState) {
for _, target := range query.Changes(state, files, direct, transitive) {
fmt.Println(target.String())
}
})
}
if len(opts.Query.Changes.Args.Files) > 0 {
return runInexact(opts.Query.Changes.Args.Files.Get())
}
scm := scm.MustNew(core.RepoRoot)
if opts.Query.Changes.In != "" {
return runInexact(scm.ChangesIn(opts.Query.Changes.In, ""))
} else if opts.Query.Changes.Inexact {
return runInexact(scm.ChangedFiles(scm.CurrentRevIdentifier(), true, ""))
}
original := scm.CurrentRevIdentifier()
files := scm.ChangedFiles(opts.Query.Changes.Since, true, "")
if err := scm.Checkout(opts.Query.Changes.Since); err != nil {
log.Fatalf("%s", err)
}
readConfig(false)
_, before := runBuild(core.WholeGraph, false, false, false)
// N.B. Ignore failure here; if we can't parse the graph before then it will suffice to
// assume that anything we don't know about has changed.
if err := scm.Checkout(original); err != nil {
log.Fatalf("%s", err)
}
readConfig(false)
success, after := runBuild(core.WholeGraph, false, false, false)
if !success {
return 1
}
for _, target := range query.DiffGraphs(before, after, files, direct, transitive) {
fmt.Println(target.String())
}
return 0
},
"roots": func() int {
return runQuery(true, opts.Query.Roots.Args.Targets, func(state *core.BuildState) {
query.Roots(state.Graph, opts.Query.Roots.Args.Targets)
})
},
"watch": func() int {
// Don't ask it to test now since we don't know if any of them are tests yet.
success, state := runBuild(opts.Watch.Args.Targets, true, false, false)
state.NeedRun = opts.Watch.Run
watch.Watch(state, state.ExpandOriginalTargets(), runPlease)
return toExitCode(success, state)
},
"filter": func() int {
return runQuery(false, opts.Query.Filter.Args.Targets, func(state *core.BuildState) {
query.Filter(state, state.ExpandOriginalTargets())
})
},
"intellij": func() int {
success, state := runBuild(opts.Ide.IntelliJ.Args.Labels, false, false, false)
if success {
intellij.ExportIntellijStructure(state.Config, state.Graph, state.ExpandOriginalLabels())
}
return toExitCode(success, state)
},
}
// ConfigOverrides are used to implement completion on the -o flag.
type ConfigOverrides map[string]string
// Complete implements the flags.Completer interface.
func (overrides ConfigOverrides) Complete(match string) []flags.Completion {
return core.DefaultConfiguration().Completions(match)
}
// Used above as a convenience wrapper for query functions.
func runQuery(needFullParse bool, labels []core.BuildLabel, onSuccess func(state *core.BuildState)) int {
if !needFullParse {
opts.ParsePackageOnly = true
}
if len(labels) == 0 {
labels = core.WholeGraph
}
if success, state := runBuild(labels, false, false, true); success {
onSuccess(state)
return 0
}
return 1
}
func doTest(targets []core.BuildLabel, surefireDir cli.Filepath, resultsFile cli.Filepath) (bool, *core.BuildState) {
os.RemoveAll(string(surefireDir))
os.RemoveAll(string(resultsFile))
os.MkdirAll(string(surefireDir), core.DirPermissions)
success, state := runBuild(targets, true, true, false)
test.CopySurefireXMLFilesToDir(state, string(surefireDir))
test.WriteResultsToFileOrDie(state.Graph, string(resultsFile))
return success, state
}
// prettyOutputs determines from input flags whether we should show 'pretty' output (ie. interactive).
func prettyOutput(interactiveOutput bool, plainOutput bool, verbosity cli.Verbosity) bool {
if interactiveOutput && plainOutput {
log.Fatal("Can't pass both --interactive_output and --plain_output")
}
return interactiveOutput || (!plainOutput && cli.StdErrIsATerminal && verbosity < 4)
}
// newCache constructs a new cache based on the current config / flags.
func newCache(state *core.BuildState) core.Cache {
if opts.FeatureFlags.NoCache {
return nil
}
return cache.NewCache(state)
}
// Please starts & runs the main build process through to its completion.
func Please(targets []core.BuildLabel, config *core.Configuration, shouldBuild, shouldTest bool) (bool, *core.BuildState) {
if opts.BuildFlags.NumThreads > 0 {
config.Please.NumThreads = opts.BuildFlags.NumThreads
}
debugTests := opts.Test.Debug || opts.Cover.Debug
if opts.BuildFlags.Config != "" {
config.Build.Config = opts.BuildFlags.Config
} else if debugTests {
config.Build.Config = "dbg"
}
state := core.NewBuildState(config)
state.VerifyHashes = !opts.FeatureFlags.NoHashVerification
state.NumTestRuns = utils.Max(opts.Test.NumRuns, opts.Cover.NumRuns) // Only one of these can be passed
state.TestSequentially = opts.Test.Sequentially || opts.Cover.Sequentially // Similarly here.
state.TestArgs = append(opts.Test.Args.Args, opts.Cover.Args.Args...) // And here
state.NeedCoverage = opts.Cover.active
state.NeedBuild = shouldBuild
state.NeedTests = shouldTest
state.NeedRun = !opts.Run.Args.Target.IsEmpty() || len(opts.Run.Parallel.PositionalArgs.Targets) > 0 || len(opts.Run.Sequential.PositionalArgs.Targets) > 0
state.NeedHashesOnly = len(opts.Hash.Args.Targets) > 0
state.PrepareOnly = opts.Build.Prepare || opts.Build.Shell
state.PrepareShell = opts.Build.Shell || opts.Test.Shell || opts.Cover.Shell
state.Watch = len(opts.Watch.Args.Targets) > 0
state.CleanWorkdirs = !opts.FeatureFlags.KeepWorkdirs
state.ForceRebuild = opts.Build.Rebuild
state.ShowTestOutput = opts.Test.ShowOutput || opts.Cover.ShowOutput
state.DebugTests = debugTests
state.ShowAllOutput = opts.OutputFlags.ShowAllOutput
state.ParsePackageOnly = opts.ParsePackageOnly
state.DownloadOutputs = (!opts.Build.NoDownload && !opts.Run.Remote && len(targets) > 0 && (!targets[0].IsAllSubpackages() || len(opts.BuildFlags.Include) > 0)) || opts.Build.Download
state.SetIncludeAndExclude(opts.BuildFlags.Include, opts.BuildFlags.Exclude)
if opts.BuildFlags.Arch.OS != "" {
state.OriginalArch = opts.BuildFlags.Arch
}
if state.DebugTests && len(targets) != 1 {
log.Fatalf("-d/--debug flag can only be used with a single test target")
}
runPlease(state, targets)
return state.Success, state
}
func runPlease(state *core.BuildState, targets []core.BuildLabel) {
// Acquire the lock before we start building
if (state.NeedBuild || state.NeedTests) && !opts.FeatureFlags.NoLock {
core.AcquireRepoLock(state)
defer core.ReleaseRepoLock()
}
detailedTests := state.NeedTests && (opts.Test.Detailed || opts.Cover.Detailed ||
(len(targets) == 1 && !targets[0].IsAllTargets() &&
!targets[0].IsAllSubpackages() && targets[0] != core.BuildLabelStdin))
streamTests := opts.Test.StreamResults || opts.Cover.StreamResults
pretty := prettyOutput(opts.OutputFlags.InteractiveOutput, opts.OutputFlags.PlainOutput, opts.OutputFlags.Verbosity) && state.NeedBuild && !streamTests
state.Cache = newCache(state)
// Run the display
state.Results() // important this is called now, don't ask...
var wg sync.WaitGroup
wg.Add(1)
ctx, cancel := context.WithCancel(context.Background())
go func() {
output.MonitorState(ctx, state, !pretty, detailedTests, streamTests, string(opts.OutputFlags.TraceFile))
wg.Done()
}()
plz.Run(targets, opts.BuildFlags.PreTargets, state, config, opts.BuildFlags.Arch)
cancel()
wg.Wait()
}
// testTargets handles test targets which can be given in two formats; a list of targets or a single
// target with a list of trailing arguments.
// Alternatively they can be completely omitted in which case we test everything under the working dir.
// One can also pass a 'failed' flag which runs the failed tests from last time.
func testTargets(target core.BuildLabel, args []string, failed bool, resultsFile cli.Filepath) []core.BuildLabel {
if failed {
targets, args := test.LoadPreviousFailures(string(resultsFile))
// Have to reset these - it doesn't matter which gets which.
opts.Test.Args.Args = args
opts.Cover.Args.Args = nil
return targets
} else if target.Name == "" {
return core.InitialPackage()
} else if len(args) > 0 && core.LooksLikeABuildLabel(args[0]) {
opts.Cover.Args.Args = []string{}
opts.Test.Args.Args = []string{}
return append(core.ParseBuildLabels(args), target)
}
return []core.BuildLabel{target}
}
// readConfig reads the initial configuration files
func readConfig(forceUpdate bool) *core.Configuration {
cfg, err := core.ReadDefaultConfigFiles(opts.BuildFlags.Profile.Strings())
if err != nil {
log.Fatalf("Error reading config file: %s", err)
} else if err := cfg.ApplyOverrides(opts.BuildFlags.Option); err != nil {
log.Fatalf("Can't override requested config setting: %s", err)
}
if opts.FeatureFlags.HTTPProxy != "" {
cfg.Build.HTTPProxy = opts.FeatureFlags.HTTPProxy
}
config = cfg
return cfg
}
// Runs the actual build
// Which phases get run are controlled by shouldBuild and shouldTest.
func runBuild(targets []core.BuildLabel, shouldBuild, shouldTest, isQuery bool) (bool, *core.BuildState) {
if !isQuery {
opts.BuildFlags.Exclude = append(opts.BuildFlags.Exclude, "manual", "manual:"+core.OsArch)
}
if stat, _ := os.Stdin.Stat(); (stat.Mode()&os.ModeCharDevice) == 0 && !utils.ReadingStdin(targets) {
if len(targets) == 0 {
// Assume they want us to read from stdin since nothing else was given.
targets = []core.BuildLabel{core.BuildLabelStdin}
}
}
if len(targets) == 0 {
targets = core.InitialPackage()
}
return Please(targets, config, shouldBuild, shouldTest)
}
// readConfigAndSetRoot reads the .plzconfig files and moves to the repo root.
func readConfigAndSetRoot(forceUpdate bool) *core.Configuration {
if opts.BuildFlags.RepoRoot == "" {
log.Debug("Found repo root at %s", core.MustFindRepoRoot())
} else {
core.RepoRoot = string(opts.BuildFlags.RepoRoot)
}
// Please always runs from the repo root, so move there now.
if err := os.Chdir(core.RepoRoot); err != nil {
log.Fatalf("%s", err)
}
// Reset this now we're at the repo root.
if opts.OutputFlags.LogFile != "" {
if !path.IsAbs(string(opts.OutputFlags.LogFile)) {
opts.OutputFlags.LogFile = cli.Filepath(path.Join(core.RepoRoot, string(opts.OutputFlags.LogFile)))
}
cli.InitFileLogging(string(opts.OutputFlags.LogFile), opts.OutputFlags.LogFileLevel)
}
if opts.FeatureFlags.NoHashVerification {
log.Warning("You've disabled hash verification; this is intended to help temporarily while modifying build targets. You shouldn't use this regularly.")
}
config := readConfig(forceUpdate)
// Now apply any flags that override this
if opts.Update.Latest {
config.Please.Version.Unset()
} else if opts.Update.Version.IsSet {
config.Please.Version = opts.Update.Version
}
update.CheckAndUpdate(config, !opts.FeatureFlags.NoUpdate, forceUpdate, opts.Update.Force, !opts.Update.NoVerify)
return config
}
// handleCompletions handles shell completion. Typically it just prints to stdout but
// may do a little more if we think we need to handle aliases.
func handleCompletions(parser *flags.Parser, items []flags.Completion) {
cli.InitLogging(cli.MinVerbosity) // Ensure this is quiet
opts.FeatureFlags.NoUpdate = true // Ensure we don't try to update
if len(items) > 0 && strings.HasPrefix(items[0].Item, "//") {
// Don't muck around with the config if we're predicting build labels.
cli.PrintCompletions(items)
} else if config := readConfigAndSetRoot(false); config.AttachAliasFlags(parser) {
// Run again without this registered as a completion handler
parser.CompletionHandler = nil
parser.ParseArgs(os.Args[1:])
} else {
cli.PrintCompletions(items)
}
// Regardless of what happened, always exit with 0 at this point.
os.Exit(0)
}
func initBuild(args []string) string {
if _, present := os.LookupEnv("GO_FLAGS_COMPLETION"); present {
cli.InitLogging(cli.MinVerbosity)
}
parser, extraArgs, flagsErr := cli.ParseFlags("Please", &opts, args, flags.PassDoubleDash, handleCompletions)
// Note that we must leave flagsErr for later, because it may be affected by aliases.
if opts.HelpFlags.Version {
fmt.Printf("Please version %s\n", core.PleaseVersion)
os.Exit(0) // Ignore other flags if --version was passed.
} else if opts.HelpFlags.Help {
// Attempt to read config files to produce help for aliases.
cli.InitLogging(cli.MinVerbosity)
parser.WriteHelp(os.Stderr)
if core.FindRepoRoot() {
if config, err := core.ReadDefaultConfigFiles(nil); err == nil {
config.PrintAliases(os.Stderr)
}
}
os.Exit(0)
}
if opts.OutputFlags.Colour {
cli.ShowColouredOutput = true
} else if opts.OutputFlags.NoColour {
cli.ShowColouredOutput = false
}
if opts.OutputFlags.ShowAllOutput {
opts.OutputFlags.PlainOutput = true
}
// Init logging, but don't do file output until we've chdir'd.
cli.InitLogging(opts.OutputFlags.Verbosity)
command := cli.ActiveCommand(parser.Command)
if opts.Complete != "" {
// Completion via PLZ_COMPLETE env var sidesteps other commands
opts.Query.Completions.Cmd = command
opts.Query.Completions.Args.Fragments = []string{opts.Complete}
command = "completions"
} else if command == "help" || command == "follow" || command == "init" || command == "config" || command == "tool" {
// These commands don't use a config file, allowing them to be run outside a repo.
if flagsErr != nil { // This error otherwise doesn't get checked until later.
cli.ParseFlagsFromArgsOrDie("Please", &opts, os.Args)
}
config = core.DefaultConfiguration()
if command == "tool" {
if cfg, err := core.ReadDefaultConfigFiles(opts.BuildFlags.Profile.Strings()); err == nil {
config = cfg
}
}
os.Exit(buildFunctions[command]())
} else if opts.OutputFlags.CompletionScript {
utils.PrintCompletionScript()
os.Exit(0)
}
// Read the config now
config = readConfigAndSetRoot(command == "update")
if parser.Command.Active != nil && parser.Command.Active.Name == "query" {
// Query commands don't need either of these set.
opts.OutputFlags.PlainOutput = true
config.Cache.DirClean = false
}
// Now we've read the config file, we may need to re-run the parser; the aliases in the config
// can affect how we parse otherwise illegal flag combinations.
if (flagsErr != nil || len(extraArgs) > 0) && command != "completions" {
args := config.UpdateArgsWithAliases(os.Args)
command = cli.ParseFlagsFromArgsOrDie("Please", &opts, args)
}
if opts.ProfilePort != 0 {
go func() {
log.Warning("%s", http.ListenAndServe(fmt.Sprintf("127.0.0.1:%d", opts.ProfilePort), nil))
}()
}
return command
}
// toExitCode returns an integer process exit code based on the outcome of a build.
// 0 -> success
// 1 -> general failure (and why is he reading my hard drive?)
// 2 -> a target failed to build
// 7 -> a test failed (this is 7 for compatibility)
func toExitCode(success bool, state *core.BuildState) int {
if success {
return 0
} else if state == nil {
return 1
} else if state.BuildFailed {
return 2
} else if state.TestFailed {
if opts.Test.FailingTestsOk || opts.Cover.FailingTestsOk {
return 0
}
return 7
}
return 1
}
func execute(command string) int {
if opts.Profile != "" {
f, err := os.Create(opts.Profile)
if err != nil {
log.Fatalf("Failed to open profile file: %s", err)
}
if err := pprof.StartCPUProfile(f); err != nil {
log.Fatalf("could not start profiler: %s", err)
}
defer f.Close()
defer pprof.StopCPUProfile()
}
if opts.MemProfile != "" {
f, err := os.Create(opts.MemProfile)
if err != nil {
log.Fatalf("Failed to open memory profile file: %s", err)
}
defer f.Close()
defer pprof.WriteHeapProfile(f)
}
defer worker.StopAll()
return buildFunctions[command]()
}
func main() {
os.Exit(execute(initBuild(os.Args)))
}
| 1 | 9,091 | Can you also do this for Cover? | thought-machine-please | go |
@@ -4978,6 +4978,16 @@ bool CoreChecks::ValidateImageSubresourceRange(const uint32_t image_mip_count, c
}
}
+ if (subresourceRange.aspectMask & VK_IMAGE_ASPECT_COLOR_BIT) {
+ if (subresourceRange.aspectMask &
+ (VK_IMAGE_ASPECT_PLANE_0_BIT | VK_IMAGE_ASPECT_PLANE_1_BIT | VK_IMAGE_ASPECT_PLANE_2_BIT)) {
+ skip |= LogError(image, "VUID-VkImageSubresourceRange-aspectMask-01670",
+ "%s: aspectMask includes both VK_IMAGE_ASPECT_COLOR_BIT and one of VK_IMAGE_ASPECT_PLANE_0_BIT, "
+ "VK_IMAGE_ASPECT_PLANE_1_BIT, or VK_IMAGE_ASPECT_PLANE_2_BIT.",
+ cmd_name);
+ }
+ }
+
return skip;
}
| 1 | /* Copyright (c) 2015-2021 The Khronos Group Inc.
* Copyright (c) 2015-2021 Valve Corporation
* Copyright (c) 2015-2021 LunarG, Inc.
* Copyright (C) 2015-2021 Google Inc.
* Modifications Copyright (C) 2020-2021 Advanced Micro Devices, Inc. All rights reserved.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*
* Author: Mark Lobodzinski <[email protected]>
* Author: Dave Houlton <[email protected]>
* Shannon McPherson <[email protected]>
* Author: Tobias Hector <[email protected]>
*/
#include <cmath>
#include <set>
#include <sstream>
#include <string>
#include <iostream>
#include "vk_enum_string_helper.h"
#include "vk_format_utils.h"
#include "vk_layer_data.h"
#include "vk_layer_utils.h"
#include "vk_layer_logging.h"
#include "vk_typemap_helper.h"
#include "chassis.h"
#include "core_validation.h"
#include "core_error_location.h"
#include "shader_validation.h"
#include "descriptor_sets.h"
#include "buffer_validation.h"
#include "sync_utils.h"
#include "sync_vuid_maps.h"
// All VUID from copy_bufferimage_to_imagebuffer_common.txt
static const char *GetBufferImageCopyCommandVUID(std::string id, bool image_to_buffer, bool copy2) {
// clang-format off
static const std::map<std::string, std::array<const char *, 4>> copy_imagebuffer_vuid = {
{"00193", {
"VUID-vkCmdCopyBufferToImage-bufferOffset-00193", // !copy2 & !image_to_buffer
"VUID-vkCmdCopyImageToBuffer-bufferOffset-00193", // !copy2 & image_to_buffer
"VUID-VkCopyBufferToImageInfo2KHR-bufferOffset-00193", // copy2 & !image_to_buffer
"VUID-VkCopyImageToBufferInfo2KHR-bufferOffset-00193", // copy2 & image_to_buffer
}},
{"01558", {
"VUID-vkCmdCopyBufferToImage-bufferOffset-01558",
"VUID-vkCmdCopyImageToBuffer-bufferOffset-01558",
"VUID-VkCopyBufferToImageInfo2KHR-bufferOffset-01558",
"VUID-VkCopyImageToBufferInfo2KHR-bufferOffset-01558",
}},
{"01559", {
"VUID-vkCmdCopyBufferToImage-bufferOffset-01559",
"VUID-vkCmdCopyImageToBuffer-bufferOffset-01559",
"VUID-VkCopyBufferToImageInfo2KHR-bufferOffset-01559",
"VUID-VkCopyImageToBufferInfo2KHR-bufferOffset-01559",
}},
{"00197", {
"VUID-vkCmdCopyBufferToImage-imageOffset-00197",
"VUID-vkCmdCopyImageToBuffer-imageOffset-00197",
"VUID-VkCopyBufferToImageInfo2KHR-imageOffset-00197",
"VUID-VkCopyImageToBufferInfo2KHR-imageOffset-00197",
}},
{"00198", {
"VUID-vkCmdCopyBufferToImage-imageOffset-00198",
"VUID-vkCmdCopyImageToBuffer-imageOffset-00198",
"VUID-VkCopyBufferToImageInfo2KHR-imageOffset-00198",
"VUID-VkCopyImageToBufferInfo2KHR-imageOffset-00198",
}},
{"00199", {
"VUID-vkCmdCopyBufferToImage-srcImage-00199",
"VUID-vkCmdCopyImageToBuffer-srcImage-00199",
"VUID-VkCopyBufferToImageInfo2KHR-srcImage-00199",
"VUID-VkCopyImageToBufferInfo2KHR-srcImage-00199",
}},
{"00200", {
"VUID-vkCmdCopyBufferToImage-imageOffset-00200",
"VUID-vkCmdCopyImageToBuffer-imageOffset-00200",
"VUID-VkCopyBufferToImageInfo2KHR-imageOffset-00200",
"VUID-VkCopyImageToBufferInfo2KHR-imageOffset-00200",
}},
{"00201", {
"VUID-vkCmdCopyBufferToImage-srcImage-00201",
"VUID-vkCmdCopyImageToBuffer-srcImage-00201",
"VUID-VkCopyBufferToImageInfo2KHR-srcImage-00201",
"VUID-VkCopyImageToBufferInfo2KHR-srcImage-00201",
}},
{"00203", {
"VUID-vkCmdCopyBufferToImage-bufferRowLength-00203",
"VUID-vkCmdCopyImageToBuffer-bufferRowLength-00203",
"VUID-VkCopyBufferToImageInfo2KHR-bufferRowLength-00203",
"VUID-VkCopyImageToBufferInfo2KHR-bufferRowLength-00203",
}},
{"00204", {
"VUID-vkCmdCopyBufferToImage-bufferImageHeight-00204",
"VUID-vkCmdCopyImageToBuffer-bufferImageHeight-00204",
"VUID-VkCopyBufferToImageInfo2KHR-bufferImageHeight-00204",
"VUID-VkCopyImageToBufferInfo2KHR-bufferImageHeight-00204",
}},
{"00205", {
"VUID-vkCmdCopyBufferToImage-imageOffset-00205",
"VUID-vkCmdCopyImageToBuffer-imageOffset-00205",
"VUID-VkCopyBufferToImageInfo2KHR-imageOffset-00205",
"VUID-VkCopyImageToBufferInfo2KHR-imageOffset-00205",
}},
{"00206", {
"VUID-vkCmdCopyBufferToImage-bufferOffset-00206",
"VUID-vkCmdCopyImageToBuffer-bufferOffset-00206",
"VUID-VkCopyBufferToImageInfo2KHR-bufferOffset-00206",
"VUID-VkCopyImageToBufferInfo2KHR-bufferOffset-00206",
}},
{"00207", {
"VUID-vkCmdCopyBufferToImage-imageExtent-00207",
"VUID-vkCmdCopyImageToBuffer-imageExtent-00207",
"VUID-VkCopyBufferToImageInfo2KHR-imageExtent-00207",
"VUID-VkCopyImageToBufferInfo2KHR-imageExtent-00207",
}},
{"00208", {
"VUID-vkCmdCopyBufferToImage-imageExtent-00208",
"VUID-vkCmdCopyImageToBuffer-imageExtent-00208",
"VUID-VkCopyBufferToImageInfo2KHR-imageExtent-00208",
"VUID-VkCopyImageToBufferInfo2KHR-imageExtent-00208",
}},
{"00209", {
"VUID-vkCmdCopyBufferToImage-imageExtent-00209",
"VUID-vkCmdCopyImageToBuffer-imageExtent-00209",
"VUID-VkCopyBufferToImageInfo2KHR-imageExtent-00209",
"VUID-VkCopyImageToBufferInfo2KHR-imageExtent-00209",
}},
{"00211", {
"VUID-vkCmdCopyBufferToImage-aspectMask-00211",
"VUID-vkCmdCopyImageToBuffer-aspectMask-00211",
"VUID-VkCopyBufferToImageInfo2KHR-aspectMask-00211",
"VUID-VkCopyImageToBufferInfo2KHR-aspectMask-00211",
}},
{"01560", {
"VUID-vkCmdCopyBufferToImage-aspectMask-01560",
"VUID-vkCmdCopyImageToBuffer-aspectMask-01560",
"VUID-VkCopyBufferToImageInfo2KHR-aspectMask-01560",
"VUID-VkCopyImageToBufferInfo2KHR-aspectMask-01560",
}},
{"00213", {
"VUID-vkCmdCopyBufferToImage-baseArrayLayer-00213",
"VUID-vkCmdCopyImageToBuffer-baseArrayLayer-00213",
"VUID-VkCopyBufferToImageInfo2KHR-baseArrayLayer-00213",
"VUID-VkCopyImageToBufferInfo2KHR-baseArrayLayer-00213",
}},
{"04052", {
"VUID-vkCmdCopyBufferToImage-commandBuffer-04052",
"VUID-vkCmdCopyImageToBuffer-commandBuffer-04052",
"VUID-VkCopyBufferToImageInfo2KHR-commandBuffer-04052",
"VUID-VkCopyImageToBufferInfo2KHR-commandBuffer-04052",
}},
{"04053", {
"VUID-vkCmdCopyBufferToImage-srcImage-04053",
"VUID-vkCmdCopyImageToBuffer-srcImage-04053",
"VUID-VkCopyBufferToImageInfo2KHR-srcImage-04053",
"VUID-VkCopyImageToBufferInfo2KHR-srcImage-04053",
}}
};
// clang-format on
uint8_t index = 0;
index |= uint8_t((image_to_buffer) ? 0x1 : 0);
index |= uint8_t((copy2) ? 0x2 : 0);
return copy_imagebuffer_vuid.at(id).at(index);
}
// Transfer VkImageSubresourceRange into VkImageSubresourceLayers struct
static VkImageSubresourceLayers LayersFromRange(const VkImageSubresourceRange &subresource_range) {
VkImageSubresourceLayers subresource_layers;
subresource_layers.aspectMask = subresource_range.aspectMask;
subresource_layers.baseArrayLayer = subresource_range.baseArrayLayer;
subresource_layers.layerCount = subresource_range.layerCount;
subresource_layers.mipLevel = subresource_range.baseMipLevel;
return subresource_layers;
}
// Transfer VkImageSubresourceLayers into VkImageSubresourceRange struct
static VkImageSubresourceRange RangeFromLayers(const VkImageSubresourceLayers &subresource_layers) {
VkImageSubresourceRange subresource_range;
subresource_range.aspectMask = subresource_layers.aspectMask;
subresource_range.baseArrayLayer = subresource_layers.baseArrayLayer;
subresource_range.layerCount = subresource_layers.layerCount;
subresource_range.baseMipLevel = subresource_layers.mipLevel;
subresource_range.levelCount = 1;
return subresource_range;
}
static VkImageLayout NormalizeDepthImageLayout(VkImageLayout layout) {
switch (layout) {
case VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL:
case VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL:
return VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_OPTIMAL;
case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL:
case VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL:
return VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_OPTIMAL;
default:
return layout;
}
}
static VkImageLayout NormalizeStencilImageLayout(VkImageLayout layout) {
switch (layout) {
case VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL:
case VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL:
return VK_IMAGE_LAYOUT_STENCIL_READ_ONLY_OPTIMAL;
case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL:
case VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL:
return VK_IMAGE_LAYOUT_STENCIL_ATTACHMENT_OPTIMAL;
default:
return layout;
}
}
static VkImageLayout NormalizeSynchronization2Layout(const VkImageAspectFlags aspect_mask, VkImageLayout layout) {
if (layout == VK_IMAGE_LAYOUT_ATTACHMENT_OPTIMAL_KHR) {
if (aspect_mask == VK_IMAGE_ASPECT_COLOR_BIT) {
layout = VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL;
} else if (aspect_mask == (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT)) {
layout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL;
} else if (aspect_mask == VK_IMAGE_ASPECT_DEPTH_BIT) {
layout = VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_OPTIMAL;
} else if (aspect_mask == VK_IMAGE_ASPECT_STENCIL_BIT) {
layout = VK_IMAGE_LAYOUT_STENCIL_ATTACHMENT_OPTIMAL;
}
} else if (layout == VK_IMAGE_LAYOUT_READ_ONLY_OPTIMAL_KHR) {
if (aspect_mask == VK_IMAGE_ASPECT_COLOR_BIT) {
layout = VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL;
} else if (aspect_mask == (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT)) {
layout = VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL;
} else if (aspect_mask == VK_IMAGE_ASPECT_DEPTH_BIT) {
layout = VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_OPTIMAL;
} else if (aspect_mask == VK_IMAGE_ASPECT_STENCIL_BIT) {
layout = VK_IMAGE_LAYOUT_STENCIL_READ_ONLY_OPTIMAL;
}
}
return layout;
}
static bool ImageLayoutMatches(const VkImageAspectFlags aspect_mask, VkImageLayout a, VkImageLayout b) {
bool matches = (a == b);
if (!matches) {
a = NormalizeSynchronization2Layout(aspect_mask, a);
b = NormalizeSynchronization2Layout(aspect_mask, b);
matches = (a == b);
if (!matches) {
// Relaxed rules when referencing *only* the depth or stencil aspects.
// When accessing both, normalize layouts for aspects separately.
if (aspect_mask == (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT)) {
matches = NormalizeDepthImageLayout(a) == NormalizeDepthImageLayout(b) &&
NormalizeStencilImageLayout(a) == NormalizeStencilImageLayout(b);
} else if (aspect_mask == VK_IMAGE_ASPECT_DEPTH_BIT) {
matches = NormalizeDepthImageLayout(a) == NormalizeDepthImageLayout(b);
} else if (aspect_mask == VK_IMAGE_ASPECT_STENCIL_BIT) {
matches = NormalizeStencilImageLayout(a) == NormalizeStencilImageLayout(b);
}
}
}
return matches;
}
// Utility type for ForRange callbacks
struct LayoutUseCheckAndMessage {
const static VkImageAspectFlags kDepthOrStencil = VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT;
const ImageSubresourceLayoutMap *layout_map;
const VkImageAspectFlags aspect_mask;
const char *message;
VkImageLayout layout;
LayoutUseCheckAndMessage() = delete;
LayoutUseCheckAndMessage(const ImageSubresourceLayoutMap *layout_map_, const VkImageAspectFlags aspect_mask_ = 0)
: layout_map(layout_map_), aspect_mask{aspect_mask_}, message(nullptr), layout(kInvalidLayout) {}
bool Check(const VkImageSubresource &subres, VkImageLayout check, VkImageLayout current_layout, VkImageLayout initial_layout) {
message = nullptr;
layout = kInvalidLayout; // Success status
if (current_layout != kInvalidLayout && !ImageLayoutMatches(aspect_mask, check, current_layout)) {
message = "previous known";
layout = current_layout;
} else if ((initial_layout != kInvalidLayout) && !ImageLayoutMatches(aspect_mask, check, initial_layout)) {
// To check the relaxed rule matching we need to see how the initial use was used
const auto initial_layout_state = layout_map->GetSubresourceInitialLayoutState(subres);
assert(initial_layout_state); // If we have an initial layout, we better have a state for it
if (!((initial_layout_state->aspect_mask & kDepthOrStencil) &&
ImageLayoutMatches(initial_layout_state->aspect_mask, check, initial_layout))) {
message = "previously used";
layout = initial_layout;
}
}
return layout == kInvalidLayout;
}
};
bool IMAGE_VIEW_STATE::OverlapSubresource(const IMAGE_VIEW_STATE &compare_view) const {
if (image_view() == compare_view.image_view()) {
return true;
}
if (image_state->image() != compare_view.image_state->image()) {
return false;
}
if (normalized_subresource_range.aspectMask != compare_view.normalized_subresource_range.aspectMask) {
return false;
}
// compare if overlap mip level
if ((normalized_subresource_range.baseMipLevel < compare_view.normalized_subresource_range.baseMipLevel) &&
((normalized_subresource_range.baseMipLevel + normalized_subresource_range.levelCount) <=
compare_view.normalized_subresource_range.baseMipLevel)) {
return false;
}
if ((normalized_subresource_range.baseMipLevel > compare_view.normalized_subresource_range.baseMipLevel) &&
(normalized_subresource_range.baseMipLevel >=
(compare_view.normalized_subresource_range.baseMipLevel + compare_view.normalized_subresource_range.levelCount))) {
return false;
}
// compare if overlap array layer
if ((normalized_subresource_range.baseArrayLayer < compare_view.normalized_subresource_range.baseArrayLayer) &&
((normalized_subresource_range.baseArrayLayer + normalized_subresource_range.layerCount) <=
compare_view.normalized_subresource_range.baseArrayLayer)) {
return false;
}
if ((normalized_subresource_range.baseArrayLayer > compare_view.normalized_subresource_range.baseArrayLayer) &&
(normalized_subresource_range.baseArrayLayer >=
(compare_view.normalized_subresource_range.baseArrayLayer + compare_view.normalized_subresource_range.layerCount))) {
return false;
}
return true;
}
uint32_t FullMipChainLevels(uint32_t height, uint32_t width, uint32_t depth) {
// uint cast applies floor()
return 1u + static_cast<uint32_t>(log2(std::max({height, width, depth})));
}
uint32_t FullMipChainLevels(VkExtent3D extent) { return FullMipChainLevels(extent.height, extent.width, extent.depth); }
uint32_t FullMipChainLevels(VkExtent2D extent) { return FullMipChainLevels(extent.height, extent.width); }
bool CoreChecks::FindLayouts(const IMAGE_STATE &image_state, std::vector<VkImageLayout> &layouts) const {
const auto *layout_range_map = GetLayoutRangeMap(imageLayoutMap, image_state.image());
if (!layout_range_map) return false;
// TODO: FindLayouts function should mutate into a ValidatePresentableLayout with the loop wrapping the LogError
// from the caller. You can then use decode to add the subresource of the range::begin to the error message.
// TODO: what is this test and what is it supposed to do?! -- the logic doesn't match the comment below?!
// TODO: Make this robust for >1 aspect mask. Now it will just say ignore potential errors in this case.
if (layout_range_map->size() >= (image_state.createInfo.arrayLayers * image_state.createInfo.mipLevels + 1)) {
return false;
}
for (const auto &entry : *layout_range_map) {
layouts.push_back(entry.second);
}
return true;
}
// Set image layout for given VkImageSubresourceRange struct
void CoreChecks::SetImageLayout(CMD_BUFFER_STATE *cb_node, const IMAGE_STATE &image_state,
const VkImageSubresourceRange &image_subresource_range, VkImageLayout layout,
VkImageLayout expected_layout) {
auto *subresource_map = cb_node->GetImageSubresourceLayoutMap(image_state);
assert(subresource_map); // the non-const getter must return a valid pointer
if (subresource_map->SetSubresourceRangeLayout(*cb_node, image_subresource_range, layout, expected_layout)) {
cb_node->image_layout_change_count++; // Change the version of this data to force revalidation
}
for (const auto *alias_state : image_state.aliasing_images) {
assert(alias_state);
// The map state of the aliases should all be in sync, so no need to check the return value
subresource_map = cb_node->GetImageSubresourceLayoutMap(*alias_state);
assert(subresource_map);
subresource_map->SetSubresourceRangeLayout(*cb_node, image_subresource_range, layout, expected_layout);
}
}
// Set the initial image layout for all slices of an image view
void CoreChecks::SetImageViewInitialLayout(CMD_BUFFER_STATE *cb_node, const IMAGE_VIEW_STATE &view_state, VkImageLayout layout) {
if (disabled[image_layout_validation]) {
return;
}
IMAGE_STATE *image_state = view_state.image_state.get();
auto *subresource_map = cb_node->GetImageSubresourceLayoutMap(*image_state);
subresource_map->SetSubresourceRangeInitialLayout(*cb_node, layout, view_state);
for (const auto *alias_state : image_state->aliasing_images) {
assert(alias_state);
subresource_map = cb_node->GetImageSubresourceLayoutMap(*alias_state);
subresource_map->SetSubresourceRangeInitialLayout(*cb_node, layout, view_state);
}
}
// Set the initial image layout for a passed non-normalized subresource range
void CoreChecks::SetImageInitialLayout(CMD_BUFFER_STATE *cb_node, const IMAGE_STATE &image_state,
const VkImageSubresourceRange &range, VkImageLayout layout) {
auto *subresource_map = cb_node->GetImageSubresourceLayoutMap(image_state);
assert(subresource_map);
subresource_map->SetSubresourceRangeInitialLayout(*cb_node, image_state.NormalizeSubresourceRange(range), layout);
for (const auto *alias_state : image_state.aliasing_images) {
assert(alias_state);
subresource_map = cb_node->GetImageSubresourceLayoutMap(*alias_state);
assert(subresource_map);
subresource_map->SetSubresourceRangeInitialLayout(*cb_node, alias_state->NormalizeSubresourceRange(range), layout);
}
}
void CoreChecks::SetImageInitialLayout(CMD_BUFFER_STATE *cb_node, VkImage image, const VkImageSubresourceRange &range,
VkImageLayout layout) {
const IMAGE_STATE *image_state = GetImageState(image);
if (!image_state) return;
SetImageInitialLayout(cb_node, *image_state, range, layout);
};
void CoreChecks::SetImageInitialLayout(CMD_BUFFER_STATE *cb_node, const IMAGE_STATE &image_state,
const VkImageSubresourceLayers &layers, VkImageLayout layout) {
SetImageInitialLayout(cb_node, image_state, RangeFromLayers(layers), layout);
}
// Set image layout for all slices of an image view
void CoreChecks::SetImageViewLayout(CMD_BUFFER_STATE *cb_node, const IMAGE_VIEW_STATE &view_state, VkImageLayout layout,
VkImageLayout layoutStencil) {
IMAGE_STATE *image_state = view_state.image_state.get();
VkImageSubresourceRange sub_range = view_state.normalized_subresource_range;
// When changing the layout of a 3D image subresource via a 2D or 2D_ARRRAY image view, all depth slices of
// the subresource mip level(s) are transitioned, ignoring any layers restriction in the subresource info.
if ((image_state->createInfo.imageType == VK_IMAGE_TYPE_3D) && (view_state.create_info.viewType != VK_IMAGE_VIEW_TYPE_3D)) {
sub_range.baseArrayLayer = 0;
sub_range.layerCount = image_state->createInfo.extent.depth;
}
if (sub_range.aspectMask == (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT) && layoutStencil != kInvalidLayout) {
sub_range.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT;
SetImageLayout(cb_node, *image_state, sub_range, layout);
sub_range.aspectMask = VK_IMAGE_ASPECT_STENCIL_BIT;
SetImageLayout(cb_node, *image_state, sub_range, layoutStencil);
} else {
SetImageLayout(cb_node, *image_state, sub_range, layout);
}
}
bool CoreChecks::ValidateRenderPassLayoutAgainstFramebufferImageUsage(RenderPassCreateVersion rp_version, VkImageLayout layout,
VkImage image, VkImageView image_view,
VkFramebuffer framebuffer, VkRenderPass renderpass,
uint32_t attachment_index, const char *variable_name) const {
bool skip = false;
auto image_state = GetImageState(image);
const char *vuid;
const bool use_rp2 = (rp_version == RENDER_PASS_VERSION_2);
const char *function_name = use_rp2 ? "vkCmdBeginRenderPass2()" : "vkCmdBeginRenderPass()";
if (!image_state) {
LogObjectList objlist(image);
objlist.add(renderpass);
objlist.add(framebuffer);
objlist.add(image_view);
skip |=
LogError(image, "VUID-VkRenderPassBeginInfo-framebuffer-parameter",
"%s: RenderPass %s uses %s where pAttachments[%" PRIu32 "] = %s, which refers to an invalid image",
function_name, report_data->FormatHandle(renderpass).c_str(), report_data->FormatHandle(framebuffer).c_str(),
attachment_index, report_data->FormatHandle(image_view).c_str());
return skip;
}
auto image_usage = image_state->createInfo.usage;
const auto stencil_usage_info = LvlFindInChain<VkImageStencilUsageCreateInfo>(image_state->createInfo.pNext);
if (stencil_usage_info) {
image_usage |= stencil_usage_info->stencilUsage;
}
// Check for layouts that mismatch image usages in the framebuffer
if (layout == VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL && !(image_usage & VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT)) {
vuid = use_rp2 ? "VUID-vkCmdBeginRenderPass2-initialLayout-03094" : "VUID-vkCmdBeginRenderPass-initialLayout-00895";
LogObjectList objlist(image);
objlist.add(renderpass);
objlist.add(framebuffer);
objlist.add(image_view);
skip |= LogError(objlist, vuid,
"%s: Layout/usage mismatch for attachment %u in %s"
" - the %s is %s but the image attached to %s via %s"
" was not created with VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT",
function_name, attachment_index, report_data->FormatHandle(renderpass).c_str(), variable_name,
string_VkImageLayout(layout), report_data->FormatHandle(framebuffer).c_str(),
report_data->FormatHandle(image_view).c_str());
}
if (layout == VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL &&
!(image_usage & (VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT | VK_IMAGE_USAGE_SAMPLED_BIT))) {
vuid = use_rp2 ? "VUID-vkCmdBeginRenderPass2-initialLayout-03097" : "VUID-vkCmdBeginRenderPass-initialLayout-00897";
LogObjectList objlist(image);
objlist.add(renderpass);
objlist.add(framebuffer);
objlist.add(image_view);
skip |= LogError(objlist, vuid,
"%s: Layout/usage mismatch for attachment %u in %s"
" - the %s is %s but the image attached to %s via %s"
" was not created with VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT or VK_IMAGE_USAGE_SAMPLED_BIT",
function_name, attachment_index, report_data->FormatHandle(renderpass).c_str(), variable_name,
string_VkImageLayout(layout), report_data->FormatHandle(framebuffer).c_str(),
report_data->FormatHandle(image_view).c_str());
}
if (layout == VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL && !(image_usage & VK_IMAGE_USAGE_TRANSFER_SRC_BIT)) {
vuid = use_rp2 ? "VUID-vkCmdBeginRenderPass2-initialLayout-03098" : "VUID-vkCmdBeginRenderPass-initialLayout-00898";
LogObjectList objlist(image);
objlist.add(renderpass);
objlist.add(framebuffer);
objlist.add(image_view);
skip |= LogError(objlist, vuid,
"%s: Layout/usage mismatch for attachment %u in %s"
" - the %s is %s but the image attached to %s via %s"
" was not created with VK_IMAGE_USAGE_TRANSFER_SRC_BIT",
function_name, attachment_index, report_data->FormatHandle(renderpass).c_str(), variable_name,
string_VkImageLayout(layout), report_data->FormatHandle(framebuffer).c_str(),
report_data->FormatHandle(image_view).c_str());
}
if (layout == VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL && !(image_usage & VK_IMAGE_USAGE_TRANSFER_DST_BIT)) {
vuid = use_rp2 ? "VUID-vkCmdBeginRenderPass2-initialLayout-03099" : "VUID-vkCmdBeginRenderPass-initialLayout-00899";
LogObjectList objlist(image);
objlist.add(renderpass);
objlist.add(framebuffer);
objlist.add(image_view);
skip |= LogError(objlist, vuid,
"%s: Layout/usage mismatch for attachment %u in %s"
" - the %s is %s but the image attached to %s via %s"
" was not created with VK_IMAGE_USAGE_TRANSFER_DST_BIT",
function_name, attachment_index, report_data->FormatHandle(renderpass).c_str(), variable_name,
string_VkImageLayout(layout), report_data->FormatHandle(framebuffer).c_str(),
report_data->FormatHandle(image_view).c_str());
}
if (device_extensions.vk_khr_maintenance2) {
if ((layout == VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL ||
layout == VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL ||
layout == VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL ||
layout == VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL) &&
!(image_usage & VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT)) {
vuid = use_rp2 ? "VUID-vkCmdBeginRenderPass2-initialLayout-03096" : "VUID-vkCmdBeginRenderPass-initialLayout-01758";
LogObjectList objlist(image);
objlist.add(renderpass);
objlist.add(framebuffer);
objlist.add(image_view);
skip |= LogError(objlist, vuid,
"%s: Layout/usage mismatch for attachment %u in %s"
" - the %s is %s but the image attached to %s via %s"
" was not created with VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT",
function_name, attachment_index, report_data->FormatHandle(renderpass).c_str(), variable_name,
string_VkImageLayout(layout), report_data->FormatHandle(framebuffer).c_str(),
report_data->FormatHandle(image_view).c_str());
}
} else {
// The create render pass 2 extension requires maintenance 2 (the previous branch), so no vuid switch needed here.
if ((layout == VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL ||
layout == VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL) &&
!(image_usage & VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT)) {
LogObjectList objlist(image);
objlist.add(renderpass);
objlist.add(framebuffer);
objlist.add(image_view);
skip |= LogError(objlist, "VUID-vkCmdBeginRenderPass-initialLayout-00896",
"%s: Layout/usage mismatch for attachment %u in %s"
" - the %s is %s but the image attached to %s via %s"
" was not created with VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT",
function_name, attachment_index, report_data->FormatHandle(renderpass).c_str(), variable_name,
string_VkImageLayout(layout), report_data->FormatHandle(framebuffer).c_str(),
report_data->FormatHandle(image_view).c_str());
}
}
return skip;
}
bool CoreChecks::VerifyFramebufferAndRenderPassLayouts(RenderPassCreateVersion rp_version, const CMD_BUFFER_STATE *pCB,
const VkRenderPassBeginInfo *pRenderPassBegin,
const FRAMEBUFFER_STATE *framebuffer_state) const {
bool skip = false;
auto const render_pass_info = GetRenderPassState(pRenderPassBegin->renderPass)->createInfo.ptr();
auto const &framebuffer_info = framebuffer_state->createInfo;
const VkImageView *attachments = framebuffer_info.pAttachments;
auto render_pass = GetRenderPassState(pRenderPassBegin->renderPass)->renderPass();
auto framebuffer = framebuffer_state->framebuffer();
if (render_pass_info->attachmentCount != framebuffer_info.attachmentCount) {
skip |= LogError(pCB->commandBuffer(), kVUID_Core_DrawState_InvalidRenderpass,
"You cannot start a render pass using a framebuffer with a different number of attachments.");
}
const auto *attachment_info = LvlFindInChain<VkRenderPassAttachmentBeginInfo>(pRenderPassBegin->pNext);
if (((framebuffer_info.flags & VK_FRAMEBUFFER_CREATE_IMAGELESS_BIT) != 0) && attachment_info != nullptr) {
attachments = attachment_info->pAttachments;
}
if (attachments != nullptr) {
const auto *const_p_cb = static_cast<const CMD_BUFFER_STATE *>(pCB);
for (uint32_t i = 0; i < render_pass_info->attachmentCount; ++i) {
auto image_view = attachments[i];
auto view_state = GetImageViewState(image_view);
if (!view_state) {
LogObjectList objlist(pRenderPassBegin->renderPass);
objlist.add(framebuffer_state->framebuffer());
objlist.add(image_view);
skip |= LogError(objlist, "VUID-VkRenderPassBeginInfo-framebuffer-parameter",
"vkCmdBeginRenderPass(): %s pAttachments[%" PRIu32 "] = %s is not a valid VkImageView handle",
report_data->FormatHandle(framebuffer_state->framebuffer()).c_str(), i,
report_data->FormatHandle(image_view).c_str());
continue;
}
const VkImage image = view_state->create_info.image;
const IMAGE_STATE *image_state = GetImageState(image);
if (!image_state) {
LogObjectList objlist(pRenderPassBegin->renderPass);
objlist.add(framebuffer_state->framebuffer());
objlist.add(image_view);
objlist.add(image);
skip |= LogError(objlist, "VUID-VkRenderPassBeginInfo-framebuffer-parameter",
"vkCmdBeginRenderPass(): %s pAttachments[%" PRIu32 "] = %s references non-extant %s.",
report_data->FormatHandle(framebuffer_state->framebuffer()).c_str(), i,
report_data->FormatHandle(image_view).c_str(), report_data->FormatHandle(image).c_str());
continue;
}
auto attachment_initial_layout = render_pass_info->pAttachments[i].initialLayout;
auto final_layout = render_pass_info->pAttachments[i].finalLayout;
// Default to expecting stencil in the same layout.
auto attachment_stencil_initial_layout = attachment_initial_layout;
// If a separate layout is specified, look for that.
const auto *attachment_description_stencil_layout =
LvlFindInChain<VkAttachmentDescriptionStencilLayout>(render_pass_info->pAttachments[i].pNext);
if (attachment_description_stencil_layout) {
attachment_stencil_initial_layout = attachment_description_stencil_layout->stencilInitialLayout;
}
const ImageSubresourceLayoutMap *subresource_map = nullptr;
bool has_queried_map = false;
bool subres_skip = false;
for (uint32_t aspect_index = 0; aspect_index < 32; aspect_index++) {
VkImageAspectFlags test_aspect = 1u << aspect_index;
if ((view_state->normalized_subresource_range.aspectMask & test_aspect) == 0) {
continue;
}
// Allow for differing depth and stencil layouts
VkImageLayout check_layout = attachment_initial_layout;
if (test_aspect == VK_IMAGE_ASPECT_STENCIL_BIT) {
check_layout = attachment_stencil_initial_layout;
}
if (check_layout != VK_IMAGE_LAYOUT_UNDEFINED) { // If no layout information for image yet, will be checked at QueueSubmit time
if (!has_queried_map) {
// Cast pCB to const because we don't want to create entries that don't exist here (in case the key changes to something
// in common with the non-const version.)
// The lookup is expensive, so cache it.
subresource_map = const_p_cb->GetImageSubresourceLayoutMap(image);
has_queried_map = true;
}
if (subresource_map) {
auto normalized_range = view_state->normalized_subresource_range;
normalized_range.aspectMask = test_aspect;
auto pos = subresource_map->Find(normalized_range);
LayoutUseCheckAndMessage layout_check(subresource_map, test_aspect);
// IncrementInterval skips over all the subresources that have the same state as we just checked, incrementing to the next "constant value" range
for (; !(pos.AtEnd()) && !subres_skip; pos.IncrementInterval()) {
const VkImageSubresource &subres = pos->subresource;
if (!layout_check.Check(subres, check_layout, pos->current_layout, pos->initial_layout)) {
subres_skip |= LogError(
device, kVUID_Core_DrawState_InvalidRenderpass,
"You cannot start a render pass using attachment %u where the render pass initial layout is %s "
"and the %s layout of the attachment is %s. The layouts must match, or the render "
"pass initial layout for the attachment must be VK_IMAGE_LAYOUT_UNDEFINED",
i, string_VkImageLayout(check_layout), layout_check.message,
string_VkImageLayout(layout_check.layout));
}
}
}
}
}
skip |= subres_skip;
ValidateRenderPassLayoutAgainstFramebufferImageUsage(rp_version, attachment_initial_layout, image, image_view,
framebuffer, render_pass, i, "initial layout");
ValidateRenderPassLayoutAgainstFramebufferImageUsage(rp_version, final_layout, image, image_view, framebuffer,
render_pass, i, "final layout");
}
for (uint32_t j = 0; j < render_pass_info->subpassCount; ++j) {
auto &subpass = render_pass_info->pSubpasses[j];
for (uint32_t k = 0; k < render_pass_info->pSubpasses[j].inputAttachmentCount; ++k) {
auto &attachment_ref = subpass.pInputAttachments[k];
if (attachment_ref.attachment != VK_ATTACHMENT_UNUSED) {
auto image_view = attachments[attachment_ref.attachment];
auto view_state = GetImageViewState(image_view);
if (view_state) {
auto image = view_state->create_info.image;
ValidateRenderPassLayoutAgainstFramebufferImageUsage(rp_version, attachment_ref.layout, image, image_view,
framebuffer, render_pass, attachment_ref.attachment,
"input attachment layout");
}
}
}
for (uint32_t k = 0; k < render_pass_info->pSubpasses[j].colorAttachmentCount; ++k) {
auto &attachment_ref = subpass.pColorAttachments[k];
if (attachment_ref.attachment != VK_ATTACHMENT_UNUSED) {
auto image_view = attachments[attachment_ref.attachment];
auto view_state = GetImageViewState(image_view);
if (view_state) {
auto image = view_state->create_info.image;
ValidateRenderPassLayoutAgainstFramebufferImageUsage(rp_version, attachment_ref.layout, image, image_view,
framebuffer, render_pass, attachment_ref.attachment,
"color attachment layout");
if (subpass.pResolveAttachments) {
ValidateRenderPassLayoutAgainstFramebufferImageUsage(
rp_version, attachment_ref.layout, image, image_view, framebuffer, render_pass,
attachment_ref.attachment, "resolve attachment layout");
}
}
}
}
if (render_pass_info->pSubpasses[j].pDepthStencilAttachment) {
auto &attachment_ref = *subpass.pDepthStencilAttachment;
if (attachment_ref.attachment != VK_ATTACHMENT_UNUSED) {
auto image_view = attachments[attachment_ref.attachment];
auto view_state = GetImageViewState(image_view);
if (view_state) {
auto image = view_state->create_info.image;
ValidateRenderPassLayoutAgainstFramebufferImageUsage(rp_version, attachment_ref.layout, image, image_view,
framebuffer, render_pass, attachment_ref.attachment,
"input attachment layout");
}
}
}
}
}
return skip;
}
void CoreChecks::TransitionAttachmentRefLayout(CMD_BUFFER_STATE *pCB, FRAMEBUFFER_STATE *pFramebuffer,
const safe_VkAttachmentReference2 &ref) {
if (ref.attachment != VK_ATTACHMENT_UNUSED) {
IMAGE_VIEW_STATE *image_view = pCB->GetActiveAttachmentImageViewState(ref.attachment);
if (image_view) {
VkImageLayout stencil_layout = kInvalidLayout;
const auto *attachment_reference_stencil_layout = LvlFindInChain<VkAttachmentReferenceStencilLayout>(ref.pNext);
if (attachment_reference_stencil_layout) {
stencil_layout = attachment_reference_stencil_layout->stencilLayout;
}
SetImageViewLayout(pCB, *image_view, ref.layout, stencil_layout);
}
}
}
void CoreChecks::TransitionSubpassLayouts(CMD_BUFFER_STATE *pCB, const RENDER_PASS_STATE *render_pass_state,
const int subpass_index, FRAMEBUFFER_STATE *framebuffer_state) {
assert(render_pass_state);
if (framebuffer_state) {
auto const &subpass = render_pass_state->createInfo.pSubpasses[subpass_index];
for (uint32_t j = 0; j < subpass.inputAttachmentCount; ++j) {
TransitionAttachmentRefLayout(pCB, framebuffer_state, subpass.pInputAttachments[j]);
}
for (uint32_t j = 0; j < subpass.colorAttachmentCount; ++j) {
TransitionAttachmentRefLayout(pCB, framebuffer_state, subpass.pColorAttachments[j]);
}
if (subpass.pDepthStencilAttachment) {
TransitionAttachmentRefLayout(pCB, framebuffer_state, *subpass.pDepthStencilAttachment);
}
}
}
// Transition the layout state for renderpass attachments based on the BeginRenderPass() call. This includes:
// 1. Transition into initialLayout state
// 2. Transition from initialLayout to layout used in subpass 0
void CoreChecks::TransitionBeginRenderPassLayouts(CMD_BUFFER_STATE *cb_state, const RENDER_PASS_STATE *render_pass_state,
FRAMEBUFFER_STATE *framebuffer_state) {
// First record expected initialLayout as a potential initial layout usage.
auto const rpci = render_pass_state->createInfo.ptr();
for (uint32_t i = 0; i < rpci->attachmentCount; ++i) {
auto *view_state = cb_state->GetActiveAttachmentImageViewState(i);
if (view_state) {
IMAGE_STATE *image_state = view_state->image_state.get();
const auto initial_layout = rpci->pAttachments[i].initialLayout;
const auto *attachment_description_stencil_layout =
LvlFindInChain<VkAttachmentDescriptionStencilLayout>(rpci->pAttachments[i].pNext);
if (attachment_description_stencil_layout) {
const auto stencil_initial_layout = attachment_description_stencil_layout->stencilInitialLayout;
VkImageSubresourceRange sub_range = view_state->normalized_subresource_range;
sub_range.aspectMask = VK_IMAGE_ASPECT_DEPTH_BIT;
SetImageInitialLayout(cb_state, *image_state, sub_range, initial_layout);
sub_range.aspectMask = VK_IMAGE_ASPECT_STENCIL_BIT;
SetImageInitialLayout(cb_state, *image_state, sub_range, stencil_initial_layout);
} else {
SetImageInitialLayout(cb_state, *image_state, view_state->normalized_subresource_range, initial_layout);
}
}
}
// Now transition for first subpass (index 0)
TransitionSubpassLayouts(cb_state, render_pass_state, 0, framebuffer_state);
}
bool VerifyAspectsPresent(VkImageAspectFlags aspect_mask, VkFormat format) {
if ((aspect_mask & VK_IMAGE_ASPECT_COLOR_BIT) != 0) {
if (!(FormatIsColor(format) || FormatIsMultiplane(format))) return false;
}
if ((aspect_mask & VK_IMAGE_ASPECT_DEPTH_BIT) != 0) {
if (!FormatHasDepth(format)) return false;
}
if ((aspect_mask & VK_IMAGE_ASPECT_STENCIL_BIT) != 0) {
if (!FormatHasStencil(format)) return false;
}
if (0 != (aspect_mask & (VK_IMAGE_ASPECT_PLANE_0_BIT | VK_IMAGE_ASPECT_PLANE_1_BIT | VK_IMAGE_ASPECT_PLANE_2_BIT))) {
if (FormatPlaneCount(format) == 1) return false;
}
return true;
}
// Verify an ImageMemoryBarrier's old/new ImageLayouts are compatible with the Image's ImageUsageFlags.
bool CoreChecks::ValidateBarrierLayoutToImageUsage(const Location &loc, VkImage image, VkImageLayout layout,
VkImageUsageFlags usage_flags) const {
bool skip = false;
bool is_error = false;
switch (layout) {
case VK_IMAGE_LAYOUT_COLOR_ATTACHMENT_OPTIMAL:
is_error = ((usage_flags & VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT) == 0);
break;
case VK_IMAGE_LAYOUT_DEPTH_STENCIL_ATTACHMENT_OPTIMAL:
is_error = ((usage_flags & VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT) == 0);
break;
case VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL:
is_error = ((usage_flags & VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT) == 0);
break;
case VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL:
is_error = ((usage_flags & (VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT)) == 0);
break;
case VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL:
is_error = ((usage_flags & VK_IMAGE_USAGE_TRANSFER_SRC_BIT) == 0);
break;
case VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL:
is_error = ((usage_flags & VK_IMAGE_USAGE_TRANSFER_DST_BIT) == 0);
break;
case VK_IMAGE_LAYOUT_SHADING_RATE_OPTIMAL_NV:
is_error = ((usage_flags & VK_IMAGE_USAGE_SHADING_RATE_IMAGE_BIT_NV) == 0);
break;
case VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL:
is_error = ((usage_flags & VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT) == 0);
break;
case VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL:
is_error = ((usage_flags & VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT) == 0);
break;
default:
// Other VkImageLayout values do not have VUs defined in this context.
break;
}
if (is_error) {
const auto &vuid = sync_vuid_maps::GetBadImageLayoutVUID(loc, layout);
skip |=
LogError(image, vuid, "%s Image barrier Layout=%s is not compatible with %s usage flags 0x%" PRIx32 ".",
loc.Message().c_str(), string_VkImageLayout(layout), report_data->FormatHandle(image).c_str(), usage_flags);
}
return skip;
}
// Verify image barriers are compatible with the images they reference.
template <typename ImageBarrier>
bool CoreChecks::ValidateBarriersToImages(const Location &outer_loc, const CMD_BUFFER_STATE *cb_state,
uint32_t imageMemoryBarrierCount, const ImageBarrier *pImageMemoryBarriers) const {
bool skip = false;
using sync_vuid_maps::GetImageBarrierVUID;
using sync_vuid_maps::ImageError;
// Scoreboard for checking for duplicate and inconsistent barriers to images
struct ImageBarrierScoreboardEntry {
uint32_t index;
// This is designed for temporary storage within the scope of the API call. If retained storage of the barriers is
// required, copies should be made and smart or unique pointers used in some other stucture (or this one refactored)
const ImageBarrier *barrier;
};
// Necessary to resolve warning C4503 when building with Visual Studio 2015.
// Adding a struct wrapper is their recommend solution for the expanded type name growing too long
// when creating maps full of maps.
struct ImageBarrierScoreboardSubresMap {
layer_data::unordered_map<VkImageSubresourceRange, ImageBarrierScoreboardEntry> map;
};
using ImageBarrierScoreboardImageMap = layer_data::unordered_map<VkImage, ImageBarrierScoreboardSubresMap>;
// Scoreboard for duplicate layout transition barriers within the list
// Pointers retained in the scoreboard only have the lifetime of *this* call (i.e. within the scope of the API call)
ImageBarrierScoreboardImageMap layout_transitions;
for (uint32_t i = 0; i < imageMemoryBarrierCount; ++i) {
auto loc = outer_loc.dot(Field::pImageMemoryBarriers, i);
const auto &img_barrier = pImageMemoryBarriers[i];
// Update the scoreboard of layout transitions and check for barriers affecting the same image and subresource
// TODO: a higher precision could be gained by adapting the command_buffer image_layout_map logic looking for conflicts
// at a per sub-resource level
if (img_barrier.oldLayout != img_barrier.newLayout) {
const ImageBarrierScoreboardEntry new_entry{i, &img_barrier};
const auto image_it = layout_transitions.find(img_barrier.image);
if (image_it != layout_transitions.end()) {
auto &subres_map = image_it->second.map;
auto subres_it = subres_map.find(img_barrier.subresourceRange);
if (subres_it != subres_map.end()) {
auto &entry = subres_it->second;
auto entry_layout =
NormalizeSynchronization2Layout(entry.barrier->subresourceRange.aspectMask, entry.barrier->newLayout);
auto old_layout =
NormalizeSynchronization2Layout(img_barrier.subresourceRange.aspectMask, img_barrier.oldLayout);
if ((entry_layout != old_layout) && (old_layout != VK_IMAGE_LAYOUT_UNDEFINED)) {
const VkImageSubresourceRange &range = img_barrier.subresourceRange;
const auto &vuid = GetImageBarrierVUID(loc, ImageError::kConflictingLayout);
skip = LogError(
cb_state->commandBuffer(), vuid,
"%s conflicts with earlier entry pImageMemoryBarrier[%u]. %s"
" subresourceRange: aspectMask=%u baseMipLevel=%u levelCount=%u, baseArrayLayer=%u, layerCount=%u; "
"conflicting barrier transitions image layout from %s when earlier barrier transitioned to layout %s.",
loc.Message().c_str(), entry.index, report_data->FormatHandle(img_barrier.image).c_str(),
range.aspectMask, range.baseMipLevel, range.levelCount, range.baseArrayLayer, range.layerCount,
string_VkImageLayout(img_barrier.oldLayout), string_VkImageLayout(entry.barrier->newLayout));
}
entry = new_entry;
} else {
subres_map[img_barrier.subresourceRange] = new_entry;
}
} else {
layout_transitions[img_barrier.image].map[img_barrier.subresourceRange] = new_entry;
}
}
auto image_state = GetImageState(img_barrier.image);
if (image_state) {
VkImageUsageFlags usage_flags = image_state->createInfo.usage;
skip |=
ValidateBarrierLayoutToImageUsage(loc.dot(Field::oldLayout), img_barrier.image, img_barrier.oldLayout, usage_flags);
skip |=
ValidateBarrierLayoutToImageUsage(loc.dot(Field::newLayout), img_barrier.image, img_barrier.newLayout, usage_flags);
// Make sure layout is able to be transitioned, currently only presented shared presentable images are locked
if (image_state->layout_locked) {
// TODO: Add unique id for error when available
skip |= LogError(
img_barrier.image, 0,
"%s Attempting to transition shared presentable %s"
" from layout %s to layout %s, but image has already been presented and cannot have its layout transitioned.",
loc.Message().c_str(), report_data->FormatHandle(img_barrier.image).c_str(),
string_VkImageLayout(img_barrier.oldLayout), string_VkImageLayout(img_barrier.newLayout));
}
const VkImageCreateInfo &image_create_info = image_state->createInfo;
const VkFormat image_format = image_create_info.format;
const VkImageAspectFlags aspect_mask = img_barrier.subresourceRange.aspectMask;
// For a Depth/Stencil image both aspects MUST be set
auto image_loc = loc.dot(Field::image);
if (FormatIsDepthAndStencil(image_format)) {
if (enabled_features.core12.separateDepthStencilLayouts) {
if (!(aspect_mask & (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT))) {
auto vuid = GetImageBarrierVUID(loc, ImageError::kNotDepthOrStencilAspect);
skip |= LogError(img_barrier.image, vuid,
"%s references %s of format %s that must have either the depth or stencil "
"aspects set, but its aspectMask is 0x%" PRIx32 ".",
image_loc.Message().c_str(), report_data->FormatHandle(img_barrier.image).c_str(),
string_VkFormat(image_format), aspect_mask);
}
} else {
auto const ds_mask = VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT;
if ((aspect_mask & ds_mask) != (ds_mask)) {
auto error = device_extensions.vk_khr_separate_depth_stencil_layouts
? ImageError::kNotSeparateDepthAndStencilAspect
: ImageError::kNotDepthAndStencilAspect;
auto vuid = GetImageBarrierVUID(image_loc, error);
skip |= LogError(img_barrier.image, vuid,
"%s references %s of format %s that must have the depth and stencil "
"aspects set, but its aspectMask is 0x%" PRIx32 ".",
image_loc.Message().c_str(), report_data->FormatHandle(img_barrier.image).c_str(),
string_VkFormat(image_format), aspect_mask);
}
}
}
const auto *subresource_map = cb_state->GetImageSubresourceLayoutMap(img_barrier.image);
if (img_barrier.oldLayout == VK_IMAGE_LAYOUT_UNDEFINED) {
// TODO: Set memory invalid which is in mem_tracker currently
// Not sure if this needs to be in the ForRange traversal, pulling it out as it is currently invariant with
// subresource.
} else if (subresource_map && !QueueFamilyIsExternal(img_barrier.srcQueueFamilyIndex)) {
bool subres_skip = false;
// Validate aspects in isolation.
// This is required when handling separate depth-stencil layouts.
for (uint32_t aspect_index = 0; aspect_index < 32; aspect_index++) {
VkImageAspectFlags test_aspect = 1u << aspect_index;
if ((img_barrier.subresourceRange.aspectMask & test_aspect) == 0) {
continue;
}
LayoutUseCheckAndMessage layout_check(subresource_map, test_aspect);
auto normalized_isr = image_state->NormalizeSubresourceRange(img_barrier.subresourceRange);
normalized_isr.aspectMask = test_aspect;
// IncrementInterval skips over all the subresources that have the same state as we just checked, incrementing to the next "constant value" range
for (auto pos = subresource_map->Find(normalized_isr); !(pos.AtEnd()) && !subres_skip; pos.IncrementInterval()) {
const auto &value = *pos;
auto old_layout =
NormalizeSynchronization2Layout(test_aspect, img_barrier.oldLayout);
if (!layout_check.Check(value.subresource, old_layout, value.current_layout, value.initial_layout)) {
const auto &vuid = GetImageBarrierVUID(loc, ImageError::kConflictingLayout);
subres_skip =
LogError(cb_state->commandBuffer(), vuid,
"%s %s cannot transition the layout of aspect=%d level=%d layer=%d from %s when the "
"%s layout is %s.",
loc.Message().c_str(), report_data->FormatHandle(img_barrier.image).c_str(),
value.subresource.aspectMask, value.subresource.mipLevel, value.subresource.arrayLayer,
string_VkImageLayout(img_barrier.oldLayout), layout_check.message,
string_VkImageLayout(layout_check.layout));
}
}
}
skip |= subres_skip;
}
// checks color format and (single-plane or non-disjoint)
// if ycbcr extension is not supported then single-plane and non-disjoint are always both true
if ((FormatIsColor(image_format) == true) &&
((FormatIsMultiplane(image_format) == false) || (image_state->disjoint == false))) {
if (aspect_mask != VK_IMAGE_ASPECT_COLOR_BIT) {
auto error = device_extensions.vk_khr_sampler_ycbcr_conversion ? ImageError::kNotColorAspect
: ImageError::kNotColorAspectYcbcr;
const auto &vuid = GetImageBarrierVUID(loc, error);
skip |= LogError(img_barrier.image, vuid,
"%s references %s of format %s that must be only VK_IMAGE_ASPECT_COLOR_BIT, "
"but its aspectMask is 0x%" PRIx32 ".",
image_loc.Message().c_str(), report_data->FormatHandle(img_barrier.image).c_str(),
string_VkFormat(image_format), aspect_mask);
}
}
VkImageAspectFlags valid_disjoint_mask =
VK_IMAGE_ASPECT_PLANE_0_BIT | VK_IMAGE_ASPECT_PLANE_1_BIT | VK_IMAGE_ASPECT_PLANE_2_BIT | VK_IMAGE_ASPECT_COLOR_BIT;
if ((FormatIsMultiplane(image_format) == true) && (image_state->disjoint == true) &&
((aspect_mask & valid_disjoint_mask) == 0)) {
const auto &vuid = GetImageBarrierVUID(image_loc, ImageError::kBadMultiplanarAspect);
skip |= LogError(img_barrier.image, vuid,
"%s references %s of format %s has aspectMask (0x%" PRIx32
") but needs to include either an VK_IMAGE_ASPECT_PLANE_*_BIT or VK_IMAGE_ASPECT_COLOR_BIT.",
image_loc.Message().c_str(), report_data->FormatHandle(img_barrier.image).c_str(),
string_VkFormat(image_format), aspect_mask);
}
if ((FormatPlaneCount(image_format) == 2) && ((aspect_mask & VK_IMAGE_ASPECT_PLANE_2_BIT) != 0)) {
const auto &vuid = GetImageBarrierVUID(image_loc, ImageError::kBadPlaneCount);
skip |= LogError(img_barrier.image, vuid,
"%s references %s of format %s has only two planes but included "
"VK_IMAGE_ASPECT_PLANE_2_BIT in its aspectMask (0x%" PRIx32 ").",
image_loc.Message().c_str(), report_data->FormatHandle(img_barrier.image).c_str(),
string_VkFormat(image_format), aspect_mask);
}
}
}
return skip;
}
template <typename Barrier, typename TransferBarrier>
bool CoreChecks::ValidateQFOTransferBarrierUniqueness(const Location &loc, const CMD_BUFFER_STATE *cb_state, const Barrier &barrier,
const QFOTransferBarrierSets<TransferBarrier> &barrier_sets) const {
bool skip = false;
const char *handle_name = TransferBarrier::HandleName();
const char *transfer_type = nullptr;
if (!IsTransferOp(barrier)) {
return skip;
}
const TransferBarrier *barrier_record = nullptr;
if (cb_state->IsReleaseOp(barrier) && !QueueFamilyIsExternal(barrier.dstQueueFamilyIndex)) {
const auto found = barrier_sets.release.find(barrier);
if (found != barrier_sets.release.cend()) {
barrier_record = &(*found);
transfer_type = "releasing";
}
} else if (cb_state->IsAcquireOp(barrier) && !QueueFamilyIsExternal(barrier.srcQueueFamilyIndex)) {
const auto found = barrier_sets.acquire.find(barrier);
if (found != barrier_sets.acquire.cend()) {
barrier_record = &(*found);
transfer_type = "acquiring";
}
}
if (barrier_record != nullptr) {
skip |=
LogWarning(cb_state->commandBuffer(), TransferBarrier::ErrMsgDuplicateQFOInCB(),
"%s %s queue ownership of %s (%s), from srcQueueFamilyIndex %" PRIu32 " to dstQueueFamilyIndex %" PRIu32
" duplicates existing barrier recorded in this command buffer.",
loc.Message().c_str(), transfer_type, handle_name, report_data->FormatHandle(barrier_record->handle).c_str(),
barrier_record->srcQueueFamilyIndex, barrier_record->dstQueueFamilyIndex);
}
return skip;
}
VulkanTypedHandle BarrierTypedHandle(const VkImageMemoryBarrier &barrier) {
return VulkanTypedHandle(barrier.image, kVulkanObjectTypeImage);
}
VulkanTypedHandle BarrierTypedHandle(const VkImageMemoryBarrier2KHR &barrier) {
return VulkanTypedHandle(barrier.image, kVulkanObjectTypeImage);
}
const IMAGE_STATE *BarrierHandleState(const ValidationStateTracker &device_state, const VkImageMemoryBarrier &barrier) {
return device_state.GetImageState(barrier.image);
}
const IMAGE_STATE *BarrierHandleState(const ValidationStateTracker &device_state, const VkImageMemoryBarrier2KHR &barrier) {
return device_state.GetImageState(barrier.image);
}
VulkanTypedHandle BarrierTypedHandle(const VkBufferMemoryBarrier &barrier) {
return VulkanTypedHandle(barrier.buffer, kVulkanObjectTypeBuffer);
}
VulkanTypedHandle BarrierTypedHandle(const VkBufferMemoryBarrier2KHR &barrier) {
return VulkanTypedHandle(barrier.buffer, kVulkanObjectTypeBuffer);
}
const BUFFER_STATE *BarrierHandleState(const ValidationStateTracker &device_state, const VkBufferMemoryBarrier &barrier) {
return device_state.GetBufferState(barrier.buffer);
}
const BUFFER_STATE *BarrierHandleState(const ValidationStateTracker &device_state, const VkBufferMemoryBarrier2KHR &barrier) {
return device_state.GetBufferState(barrier.buffer);
}
template <typename Barrier, typename TransferBarrier>
void CoreChecks::RecordBarrierValidationInfo(const Location &loc, CMD_BUFFER_STATE *cb_state, const Barrier &barrier,
QFOTransferBarrierSets<TransferBarrier> &barrier_sets) {
if (IsTransferOp(barrier)) {
if (cb_state->IsReleaseOp(barrier) && !QueueFamilyIsExternal(barrier.dstQueueFamilyIndex)) {
barrier_sets.release.emplace(barrier);
} else if (cb_state->IsAcquireOp(barrier) && !QueueFamilyIsExternal(barrier.srcQueueFamilyIndex)) {
barrier_sets.acquire.emplace(barrier);
}
}
// 7.7.4: If the values of srcQueueFamilyIndex and dstQueueFamilyIndex are equal, no ownership transfer is performed, and the
// barrier operates as if they were both set to VK_QUEUE_FAMILY_IGNORED.
const uint32_t src_queue_family = barrier.srcQueueFamilyIndex;
const uint32_t dst_queue_family = barrier.dstQueueFamilyIndex;
const bool is_ownership_transfer = src_queue_family != dst_queue_family;
if (is_ownership_transfer) {
// Only enqueue submit time check if it is needed. If more submit time checks are added, change the criteria
// TODO create a better named list, or rename the submit time lists to something that matches the broader usage...
auto handle_state = BarrierHandleState(*this, barrier);
bool mode_concurrent = handle_state ? handle_state->createInfo.sharingMode == VK_SHARING_MODE_CONCURRENT : false;
if (!mode_concurrent) {
const auto typed_handle = BarrierTypedHandle(barrier);
core_error::LocationCapture loc_capture(loc);
cb_state->queue_submit_functions.emplace_back(
[loc_capture, cb_state, typed_handle, src_queue_family, dst_queue_family](const ValidationStateTracker *device_data,
const QUEUE_STATE *queue_state) {
return ValidateConcurrentBarrierAtSubmit(loc_capture.Get(), device_data, queue_state, cb_state, typed_handle,
src_queue_family, dst_queue_family);
});
}
}
}
// Verify image barrier image state and that the image is consistent with FB image
template <typename ImgBarrier>
bool CoreChecks::ValidateImageBarrierAttachment(const Location &loc, CMD_BUFFER_STATE const *cb_state,
const FRAMEBUFFER_STATE *framebuffer, uint32_t active_subpass,
const safe_VkSubpassDescription2 &sub_desc, const VkRenderPass rp_handle,
const ImgBarrier &img_barrier, const CMD_BUFFER_STATE *primary_cb_state) const {
using sync_vuid_maps::GetImageBarrierVUID;
using sync_vuid_maps::ImageError;
bool skip = false;
const auto *fb_state = framebuffer;
assert(fb_state);
const auto img_bar_image = img_barrier.image;
bool image_match = false;
bool sub_image_found = false; // Do we find a corresponding subpass description
VkImageLayout sub_image_layout = VK_IMAGE_LAYOUT_UNDEFINED;
uint32_t attach_index = 0;
// Verify that a framebuffer image matches barrier image
const auto attachment_count = fb_state->createInfo.attachmentCount;
for (uint32_t attachment = 0; attachment < attachment_count; ++attachment) {
auto view_state = primary_cb_state ? primary_cb_state->GetActiveAttachmentImageViewState(attachment) : cb_state->GetActiveAttachmentImageViewState(attachment);
if (view_state && (img_bar_image == view_state->create_info.image)) {
image_match = true;
attach_index = attachment;
break;
}
}
if (image_match) { // Make sure subpass is referring to matching attachment
if (sub_desc.pDepthStencilAttachment && sub_desc.pDepthStencilAttachment->attachment == attach_index) {
sub_image_layout = sub_desc.pDepthStencilAttachment->layout;
sub_image_found = true;
}
if (!sub_image_found && device_extensions.vk_khr_depth_stencil_resolve) {
const auto *resolve = LvlFindInChain<VkSubpassDescriptionDepthStencilResolve>(sub_desc.pNext);
if (resolve && resolve->pDepthStencilResolveAttachment &&
resolve->pDepthStencilResolveAttachment->attachment == attach_index) {
sub_image_layout = resolve->pDepthStencilResolveAttachment->layout;
sub_image_found = true;
}
}
if (!sub_image_found) {
for (uint32_t j = 0; j < sub_desc.colorAttachmentCount; ++j) {
if (sub_desc.pColorAttachments && sub_desc.pColorAttachments[j].attachment == attach_index) {
sub_image_layout = sub_desc.pColorAttachments[j].layout;
sub_image_found = true;
break;
}
if (!sub_image_found && sub_desc.pResolveAttachments &&
sub_desc.pResolveAttachments[j].attachment == attach_index) {
sub_image_layout = sub_desc.pResolveAttachments[j].layout;
sub_image_found = true;
break;
}
}
}
if (!sub_image_found) {
auto img_loc = loc.dot(Field::image);
const auto &vuid = GetImageBarrierVUID(img_loc, ImageError::kRenderPassMismatch);
skip |=
LogError(rp_handle, vuid,
"%s Barrier for %s is not referenced by the VkSubpassDescription for active subpass (%d) of current %s.",
img_loc.Message().c_str(), report_data->FormatHandle(img_bar_image).c_str(), active_subpass,
report_data->FormatHandle(rp_handle).c_str());
}
} else { // !image_match
auto img_loc = loc.dot(Field::image);
const auto &vuid = GetImageBarrierVUID(img_loc, ImageError::kRenderPassMismatch);
skip |= LogError(fb_state->framebuffer(), vuid, "%s Barrier for %s does not match an image from the current %s.",
img_loc.Message().c_str(), report_data->FormatHandle(img_bar_image).c_str(),
report_data->FormatHandle(fb_state->framebuffer()).c_str());
}
if (img_barrier.oldLayout != img_barrier.newLayout) {
auto layout_loc = loc.dot(Field::oldLayout);
const auto &vuid = GetImageBarrierVUID(layout_loc, ImageError::kRenderPassLayoutChange);
skip |= LogError(cb_state->commandBuffer(), vuid,
"%s As the Image Barrier for %s is being executed within a render pass instance, oldLayout must "
"equal newLayout yet they are %s and %s.",
layout_loc.Message().c_str(), report_data->FormatHandle(img_barrier.image).c_str(),
string_VkImageLayout(img_barrier.oldLayout), string_VkImageLayout(img_barrier.newLayout));
} else {
if (sub_image_found && sub_image_layout != img_barrier.oldLayout) {
LogObjectList objlist(rp_handle);
objlist.add(img_bar_image);
auto layout_loc = loc.dot(Field::oldLayout);
const auto &vuid = GetImageBarrierVUID(layout_loc, ImageError::kRenderPassLayoutChange);
skip |= LogError(objlist, vuid,
"%s Barrier for %s is referenced by the VkSubpassDescription for active "
"subpass (%d) of current %s as having layout %s, but image barrier has layout %s.",
layout_loc.Message().c_str(), report_data->FormatHandle(img_bar_image).c_str(), active_subpass,
report_data->FormatHandle(rp_handle).c_str(), string_VkImageLayout(sub_image_layout),
string_VkImageLayout(img_barrier.oldLayout));
}
}
return skip;
}
// explictly instantiate so these can be used in core_validation.cpp
template bool CoreChecks::ValidateImageBarrierAttachment(const Location &loc, CMD_BUFFER_STATE const *cb_state,
const FRAMEBUFFER_STATE *framebuffer, uint32_t active_subpass,
const safe_VkSubpassDescription2 &sub_desc, const VkRenderPass rp_handle,
const VkImageMemoryBarrier &img_barrier,
const CMD_BUFFER_STATE *primary_cb_state) const;
template bool CoreChecks::ValidateImageBarrierAttachment(const Location &loc, CMD_BUFFER_STATE const *cb_state,
const FRAMEBUFFER_STATE *framebuffer, uint32_t active_subpass,
const safe_VkSubpassDescription2 &sub_desc, const VkRenderPass rp_handle,
const VkImageMemoryBarrier2KHR &img_barrier,
const CMD_BUFFER_STATE *primary_cb_state) const;
template <typename ImgBarrier>
void CoreChecks::EnqueueSubmitTimeValidateImageBarrierAttachment(const Location &loc, CMD_BUFFER_STATE *cb_state,
const ImgBarrier &barrier) {
// Secondary CBs can have null framebuffer so queue up validation in that case 'til FB is known
if ((cb_state->activeRenderPass) && (VK_NULL_HANDLE == cb_state->activeFramebuffer) &&
(VK_COMMAND_BUFFER_LEVEL_SECONDARY == cb_state->createInfo.level)) {
const auto active_subpass = cb_state->activeSubpass;
const auto rp_state = cb_state->activeRenderPass;
const auto &sub_desc = rp_state->createInfo.pSubpasses[active_subpass];
// Secondary CB case w/o FB specified delay validation
auto *this_ptr = this; // Required for older compilers with c++20 compatibility
core_error::LocationCapture loc_capture(loc);
const auto render_pass = rp_state->renderPass();
cb_state->cmd_execute_commands_functions.emplace_back(
[this_ptr, loc_capture, cb_state, active_subpass, sub_desc, render_pass, barrier](const CMD_BUFFER_STATE *primary_cb,
const FRAMEBUFFER_STATE *fb) {
return this_ptr->ValidateImageBarrierAttachment(loc_capture.Get(), cb_state, fb, active_subpass, sub_desc,
render_pass, barrier, primary_cb);
});
}
}
void CoreChecks::RecordBarriers(Func func_name, CMD_BUFFER_STATE *cb_state, uint32_t bufferBarrierCount,
const VkBufferMemoryBarrier *pBufferMemBarriers, uint32_t imageMemBarrierCount,
const VkImageMemoryBarrier *pImageMemBarriers) {
for (uint32_t i = 0; i < bufferBarrierCount; i++) {
Location loc(func_name, Struct::VkBufferMemoryBarrier, Field::pBufferMemoryBarriers, i);
RecordBarrierValidationInfo(loc, cb_state, pBufferMemBarriers[i], cb_state->qfo_transfer_buffer_barriers);
}
for (uint32_t i = 0; i < imageMemBarrierCount; i++) {
Location loc(func_name, Struct::VkImageMemoryBarrier, Field::pImageMemoryBarriers, i);
const auto &img_barrier = pImageMemBarriers[i];
RecordBarrierValidationInfo(loc, cb_state, img_barrier, cb_state->qfo_transfer_image_barriers);
EnqueueSubmitTimeValidateImageBarrierAttachment(loc, cb_state, img_barrier);
}
}
void CoreChecks::RecordBarriers(Func func_name, CMD_BUFFER_STATE *cb_state, const VkDependencyInfoKHR &dep_info) {
for (uint32_t i = 0; i < dep_info.bufferMemoryBarrierCount; i++) {
Location loc(func_name, Struct::VkBufferMemoryBarrier2KHR, Field::pBufferMemoryBarriers, i);
RecordBarrierValidationInfo(loc, cb_state, dep_info.pBufferMemoryBarriers[i], cb_state->qfo_transfer_buffer_barriers);
}
for (uint32_t i = 0; i < dep_info.imageMemoryBarrierCount; i++) {
Location loc(func_name, Struct::VkImageMemoryBarrier2KHR, Field::pImageMemoryBarriers, i);
const auto &img_barrier = dep_info.pImageMemoryBarriers[i];
RecordBarrierValidationInfo(loc, cb_state, img_barrier, cb_state->qfo_transfer_image_barriers);
EnqueueSubmitTimeValidateImageBarrierAttachment(loc, cb_state, img_barrier);
}
}
template <typename TransferBarrier, typename Scoreboard>
bool CoreChecks::ValidateAndUpdateQFOScoreboard(const debug_report_data *report_data, const CMD_BUFFER_STATE *cb_state,
const char *operation, const TransferBarrier &barrier,
Scoreboard *scoreboard) const {
// Record to the scoreboard or report that we have a duplication
bool skip = false;
auto inserted = scoreboard->emplace(barrier, cb_state);
if (!inserted.second && inserted.first->second != cb_state) {
// This is a duplication (but don't report duplicates from the same CB, as we do that at record time
LogObjectList objlist(cb_state->commandBuffer());
objlist.add(barrier.handle);
objlist.add(inserted.first->second->commandBuffer());
skip = LogWarning(objlist, TransferBarrier::ErrMsgDuplicateQFOInSubmit(),
"%s: %s %s queue ownership of %s (%s), from srcQueueFamilyIndex %" PRIu32
" to dstQueueFamilyIndex %" PRIu32 " duplicates existing barrier submitted in this batch from %s.",
"vkQueueSubmit()", TransferBarrier::BarrierName(), operation, TransferBarrier::HandleName(),
report_data->FormatHandle(barrier.handle).c_str(), barrier.srcQueueFamilyIndex,
barrier.dstQueueFamilyIndex, report_data->FormatHandle(inserted.first->second->commandBuffer()).c_str());
}
return skip;
}
template <typename TransferBarrier>
bool CoreChecks::ValidateQueuedQFOTransferBarriers(
const CMD_BUFFER_STATE *cb_state, QFOTransferCBScoreboards<TransferBarrier> *scoreboards,
const GlobalQFOTransferBarrierMap<TransferBarrier> &global_release_barriers) const {
bool skip = false;
const auto &cb_barriers = cb_state->GetQFOBarrierSets(TransferBarrier());
const char *barrier_name = TransferBarrier::BarrierName();
const char *handle_name = TransferBarrier::HandleName();
// No release should have an extant duplicate (WARNING)
for (const auto &release : cb_barriers.release) {
// Check the global pending release barriers
const auto set_it = global_release_barriers.find(release.handle);
if (set_it != global_release_barriers.cend()) {
const QFOTransferBarrierSet<TransferBarrier> &set_for_handle = set_it->second;
const auto found = set_for_handle.find(release);
if (found != set_for_handle.cend()) {
skip |= LogWarning(cb_state->commandBuffer(), TransferBarrier::ErrMsgDuplicateQFOSubmitted(),
"%s: %s releasing queue ownership of %s (%s), from srcQueueFamilyIndex %" PRIu32
" to dstQueueFamilyIndex %" PRIu32
" duplicates existing barrier queued for execution, without intervening acquire operation.",
"vkQueueSubmit()", barrier_name, handle_name, report_data->FormatHandle(found->handle).c_str(),
found->srcQueueFamilyIndex, found->dstQueueFamilyIndex);
}
}
skip |= ValidateAndUpdateQFOScoreboard(report_data, cb_state, "releasing", release, &scoreboards->release);
}
// Each acquire must have a matching release (ERROR)
for (const auto &acquire : cb_barriers.acquire) {
const auto set_it = global_release_barriers.find(acquire.handle);
bool matching_release_found = false;
if (set_it != global_release_barriers.cend()) {
const QFOTransferBarrierSet<TransferBarrier> &set_for_handle = set_it->second;
matching_release_found = set_for_handle.find(acquire) != set_for_handle.cend();
}
if (!matching_release_found) {
skip |= LogError(cb_state->commandBuffer(), TransferBarrier::ErrMsgMissingQFOReleaseInSubmit(),
"%s: in submitted command buffer %s acquiring ownership of %s (%s), from srcQueueFamilyIndex %" PRIu32
" to dstQueueFamilyIndex %" PRIu32 " has no matching release barrier queued for execution.",
"vkQueueSubmit()", barrier_name, handle_name, report_data->FormatHandle(acquire.handle).c_str(),
acquire.srcQueueFamilyIndex, acquire.dstQueueFamilyIndex);
}
skip |= ValidateAndUpdateQFOScoreboard(report_data, cb_state, "acquiring", acquire, &scoreboards->acquire);
}
return skip;
}
bool CoreChecks::ValidateQueuedQFOTransfers(const CMD_BUFFER_STATE *cb_state,
QFOTransferCBScoreboards<QFOImageTransferBarrier> *qfo_image_scoreboards,
QFOTransferCBScoreboards<QFOBufferTransferBarrier> *qfo_buffer_scoreboards) const {
bool skip = false;
skip |=
ValidateQueuedQFOTransferBarriers<QFOImageTransferBarrier>(cb_state, qfo_image_scoreboards, qfo_release_image_barrier_map);
skip |= ValidateQueuedQFOTransferBarriers<QFOBufferTransferBarrier>(cb_state, qfo_buffer_scoreboards,
qfo_release_buffer_barrier_map);
return skip;
}
template <typename TransferBarrier>
void RecordQueuedQFOTransferBarriers(QFOTransferBarrierSets<TransferBarrier> &cb_barriers,
GlobalQFOTransferBarrierMap<TransferBarrier> &global_release_barriers) {
// Add release barriers from this submit to the global map
for (const auto &release : cb_barriers.release) {
// the global barrier list is mapped by resource handle to allow cleanup on resource destruction
// NOTE: We're using [] because creation of a Set is a needed side effect for new handles
global_release_barriers[release.handle].insert(release);
}
// Erase acquired barriers from this submit from the global map -- essentially marking releases as consumed
for (const auto &acquire : cb_barriers.acquire) {
// NOTE: We're not using [] because we don't want to create entries for missing releases
auto set_it = global_release_barriers.find(acquire.handle);
if (set_it != global_release_barriers.end()) {
QFOTransferBarrierSet<TransferBarrier> &set_for_handle = set_it->second;
set_for_handle.erase(acquire);
if (set_for_handle.size() == 0) { // Clean up empty sets
global_release_barriers.erase(set_it);
}
}
}
}
void CoreChecks::RecordQueuedQFOTransfers(CMD_BUFFER_STATE *cb_state) {
RecordQueuedQFOTransferBarriers<QFOImageTransferBarrier>(cb_state->qfo_transfer_image_barriers, qfo_release_image_barrier_map);
RecordQueuedQFOTransferBarriers<QFOBufferTransferBarrier>(cb_state->qfo_transfer_buffer_barriers,
qfo_release_buffer_barrier_map);
}
template <typename ImgBarrier>
void CoreChecks::TransitionImageLayouts(CMD_BUFFER_STATE *cb_state, uint32_t barrier_count, const ImgBarrier *barriers) {
// For ownership transfers, the barrier is specified twice; as a release
// operation on the yielding queue family, and as an acquire operation
// on the acquiring queue family. This barrier may also include a layout
// transition, which occurs 'between' the two operations. For validation
// purposes it doesn't seem important which side performs the layout
// transition, but it must not be performed twice. We'll arbitrarily
// choose to perform it as part of the acquire operation.
//
// However, we still need to record initial layout for the "initial layout" validation
for (uint32_t i = 0; i < barrier_count; i++) {
const auto &mem_barrier = barriers[i];
const bool is_release_op = cb_state->IsReleaseOp(mem_barrier);
auto *image_state = GetImageState(mem_barrier.image);
if (image_state) {
RecordTransitionImageLayout(cb_state, image_state, mem_barrier, is_release_op);
}
}
}
// explictly instantiate this template so it can be used in core_validation.cpp
template void CoreChecks::TransitionImageLayouts(CMD_BUFFER_STATE *cb_state, uint32_t barrier_count,
const VkImageMemoryBarrier *barrier);
template void CoreChecks::TransitionImageLayouts(CMD_BUFFER_STATE *cb_state, uint32_t barrier_count,
const VkImageMemoryBarrier2KHR *barrier);
VkImageLayout NormalizeSynchronization2Layout(const VkImageAspectFlags aspect_mask, VkImageLayout layout);
template <typename ImgBarrier>
void CoreChecks::RecordTransitionImageLayout(CMD_BUFFER_STATE *cb_state, const IMAGE_STATE *image_state,
const ImgBarrier &mem_barrier, bool is_release_op) {
if (enabled_features.synchronization2_features.synchronization2) {
if (mem_barrier.oldLayout == mem_barrier.newLayout) {
return;
}
}
auto normalized_isr = image_state->NormalizeSubresourceRange(mem_barrier.subresourceRange);
const auto &image_create_info = image_state->createInfo;
// Special case for 3D images with VK_IMAGE_CREATE_2D_ARRAY_COMPATIBLE_BIT flag bit, where <extent.depth> and
// <arrayLayers> can potentially alias. When recording layout for the entire image, pre-emptively record layouts
// for all (potential) layer sub_resources.
if (0 != (image_create_info.flags & VK_IMAGE_CREATE_2D_ARRAY_COMPATIBLE_BIT)) {
normalized_isr.baseArrayLayer = 0;
normalized_isr.layerCount = image_create_info.extent.depth; // Treat each depth slice as a layer subresource
}
VkImageLayout initial_layout = NormalizeSynchronization2Layout(mem_barrier.subresourceRange.aspectMask, mem_barrier.oldLayout);
VkImageLayout new_layout = NormalizeSynchronization2Layout(mem_barrier.subresourceRange.aspectMask, mem_barrier.newLayout);
// Layout transitions in external instance are not tracked, so don't validate initial layout.
if (QueueFamilyIsExternal(mem_barrier.srcQueueFamilyIndex)) {
initial_layout = VK_IMAGE_LAYOUT_UNDEFINED;
}
if (is_release_op) {
SetImageInitialLayout(cb_state, *image_state, normalized_isr, initial_layout);
} else {
SetImageLayout(cb_state, *image_state, normalized_isr, new_layout, initial_layout);
}
}
bool CoreChecks::VerifyImageLayout(const CMD_BUFFER_STATE *cb_node, const IMAGE_STATE *image_state,
const VkImageSubresourceRange &range, VkImageAspectFlags aspect_mask,
VkImageLayout explicit_layout, VkImageLayout optimal_layout, const char *caller,
const char *layout_invalid_msg_code, const char *layout_mismatch_msg_code, bool *error) const {
if (disabled[image_layout_validation]) return false;
assert(cb_node);
assert(image_state);
const auto image = image_state->image();
bool skip = false;
const auto *subresource_map = cb_node->GetImageSubresourceLayoutMap(image);
if (subresource_map) {
bool subres_skip = false;
LayoutUseCheckAndMessage layout_check(subresource_map, aspect_mask);
// IncrementInterval skips over all the subresources that have the same state as we just checked, incrementing to
// the next "constant value" range
for (auto pos = subresource_map->Find(range); !(pos.AtEnd()) && !subres_skip; pos.IncrementInterval()) {
if (!layout_check.Check(pos->subresource, explicit_layout, pos->current_layout, pos->initial_layout)) {
*error = true;
subres_skip |= LogError(cb_node->commandBuffer(), layout_mismatch_msg_code,
"%s: Cannot use %s (layer=%u mip=%u) with specific layout %s that doesn't match the "
"%s layout %s.",
caller, report_data->FormatHandle(image).c_str(), pos->subresource.arrayLayer,
pos->subresource.mipLevel, string_VkImageLayout(explicit_layout), layout_check.message,
string_VkImageLayout(layout_check.layout));
}
}
skip |= subres_skip;
}
// If optimal_layout is not UNDEFINED, check that layout matches optimal for this case
if ((VK_IMAGE_LAYOUT_UNDEFINED != optimal_layout) && (explicit_layout != optimal_layout)) {
if (VK_IMAGE_LAYOUT_GENERAL == explicit_layout) {
if (image_state->createInfo.tiling != VK_IMAGE_TILING_LINEAR) {
// LAYOUT_GENERAL is allowed, but may not be performance optimal, flag as perf warning.
skip |= LogPerformanceWarning(cb_node->commandBuffer(), kVUID_Core_DrawState_InvalidImageLayout,
"%s: For optimal performance %s layout should be %s instead of GENERAL.", caller,
report_data->FormatHandle(image).c_str(), string_VkImageLayout(optimal_layout));
}
} else if (device_extensions.vk_khr_shared_presentable_image) {
if (image_state->shared_presentable) {
if (VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR != explicit_layout) {
skip |=
LogError(device, layout_invalid_msg_code,
"%s: Layout for shared presentable image is %s but must be VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR.",
caller, string_VkImageLayout(optimal_layout));
}
}
} else {
*error = true;
skip |= LogError(cb_node->commandBuffer(), layout_invalid_msg_code,
"%s: Layout for %s is %s but can only be %s or VK_IMAGE_LAYOUT_GENERAL.", caller,
report_data->FormatHandle(image).c_str(), string_VkImageLayout(explicit_layout),
string_VkImageLayout(optimal_layout));
}
}
return skip;
}
bool CoreChecks::VerifyImageLayout(const CMD_BUFFER_STATE *cb_node, const IMAGE_STATE *image_state,
const VkImageSubresourceLayers &subLayers, VkImageLayout explicit_layout,
VkImageLayout optimal_layout, const char *caller, const char *layout_invalid_msg_code,
const char *layout_mismatch_msg_code, bool *error) const {
return VerifyImageLayout(cb_node, image_state, RangeFromLayers(subLayers), explicit_layout, optimal_layout, caller,
layout_invalid_msg_code, layout_mismatch_msg_code, error);
}
void CoreChecks::TransitionFinalSubpassLayouts(CMD_BUFFER_STATE *pCB, const VkRenderPassBeginInfo *pRenderPassBegin,
FRAMEBUFFER_STATE *framebuffer_state) {
auto render_pass = GetRenderPassState(pRenderPassBegin->renderPass);
if (!render_pass) return;
const VkRenderPassCreateInfo2 *render_pass_info = render_pass->createInfo.ptr();
if (framebuffer_state) {
for (uint32_t i = 0; i < render_pass_info->attachmentCount; ++i) {
auto *view_state = pCB->GetActiveAttachmentImageViewState(i);
if (view_state) {
VkImageLayout stencil_layout = kInvalidLayout;
const auto *attachment_description_stencil_layout =
LvlFindInChain<VkAttachmentDescriptionStencilLayout>(render_pass_info->pAttachments[i].pNext);
if (attachment_description_stencil_layout) {
stencil_layout = attachment_description_stencil_layout->stencilFinalLayout;
}
SetImageViewLayout(pCB, *view_state, render_pass_info->pAttachments[i].finalLayout, stencil_layout);
}
}
}
}
#ifdef VK_USE_PLATFORM_ANDROID_KHR
// Android-specific validation that uses types defined only with VK_USE_PLATFORM_ANDROID_KHR
// This could also move into a seperate core_validation_android.cpp file... ?
//
// AHB-specific validation within non-AHB APIs
//
bool CoreChecks::ValidateCreateImageANDROID(const debug_report_data *report_data, const VkImageCreateInfo *create_info) const {
bool skip = false;
const VkExternalFormatANDROID *ext_fmt_android = LvlFindInChain<VkExternalFormatANDROID>(create_info->pNext);
if (ext_fmt_android) {
if (0 != ext_fmt_android->externalFormat) {
if (VK_FORMAT_UNDEFINED != create_info->format) {
skip |=
LogError(device, "VUID-VkImageCreateInfo-pNext-01974",
"vkCreateImage(): VkImageCreateInfo struct has a chained VkExternalFormatANDROID struct with non-zero "
"externalFormat, but the VkImageCreateInfo's format is not VK_FORMAT_UNDEFINED.");
}
if (0 != (VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT & create_info->flags)) {
skip |= LogError(device, "VUID-VkImageCreateInfo-pNext-02396",
"vkCreateImage(): VkImageCreateInfo struct has a chained VkExternalFormatANDROID struct with "
"non-zero externalFormat, but flags include VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT.");
}
if (0 != (~VK_IMAGE_USAGE_SAMPLED_BIT & create_info->usage)) {
skip |= LogError(device, "VUID-VkImageCreateInfo-pNext-02397",
"vkCreateImage(): VkImageCreateInfo struct has a chained VkExternalFormatANDROID struct with "
"non-zero externalFormat, but usage includes bits (0x%" PRIx32 ") other than VK_IMAGE_USAGE_SAMPLED_BIT.",
create_info->usage);
}
if (VK_IMAGE_TILING_OPTIMAL != create_info->tiling) {
skip |= LogError(device, "VUID-VkImageCreateInfo-pNext-02398",
"vkCreateImage(): VkImageCreateInfo struct has a chained VkExternalFormatANDROID struct with "
"non-zero externalFormat, but layout is not VK_IMAGE_TILING_OPTIMAL.");
}
}
if ((0 != ext_fmt_android->externalFormat) &&
(ahb_ext_formats_map.find(ext_fmt_android->externalFormat) == ahb_ext_formats_map.end())) {
skip |= LogError(device, "VUID-VkExternalFormatANDROID-externalFormat-01894",
"vkCreateImage(): Chained VkExternalFormatANDROID struct contains a non-zero externalFormat (%" PRIu64
") which has "
"not been previously retrieved by vkGetAndroidHardwareBufferPropertiesANDROID().",
ext_fmt_android->externalFormat);
}
}
if ((nullptr == ext_fmt_android) || (0 == ext_fmt_android->externalFormat)) {
if (VK_FORMAT_UNDEFINED == create_info->format) {
skip |=
LogError(device, "VUID-VkImageCreateInfo-pNext-01975",
"vkCreateImage(): VkImageCreateInfo struct's format is VK_FORMAT_UNDEFINED, but either does not have a "
"chained VkExternalFormatANDROID struct or the struct exists but has an externalFormat of 0.");
}
}
const VkExternalMemoryImageCreateInfo *emici = LvlFindInChain<VkExternalMemoryImageCreateInfo>(create_info->pNext);
if (emici && (emici->handleTypes & VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID)) {
if (create_info->imageType != VK_IMAGE_TYPE_2D) {
skip |=
LogError(device, "VUID-VkImageCreateInfo-pNext-02393",
"vkCreateImage(): VkImageCreateInfo struct with imageType %s has chained VkExternalMemoryImageCreateInfo "
"struct with handleType VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID.",
string_VkImageType(create_info->imageType));
}
if ((create_info->mipLevels != 1) && (create_info->mipLevels != FullMipChainLevels(create_info->extent))) {
skip |= LogError(device, "VUID-VkImageCreateInfo-pNext-02394",
"vkCreateImage(): VkImageCreateInfo struct with chained VkExternalMemoryImageCreateInfo struct of "
"handleType VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID "
"specifies mipLevels = %" PRId32 " (full chain mipLevels are %" PRId32 ").",
create_info->mipLevels, FullMipChainLevels(create_info->extent));
}
}
return skip;
}
bool CoreChecks::ValidateCreateImageViewANDROID(const VkImageViewCreateInfo *create_info) const {
bool skip = false;
const IMAGE_STATE *image_state = GetImageState(create_info->image);
if (image_state->HasAHBFormat()) {
if (VK_FORMAT_UNDEFINED != create_info->format) {
skip |= LogError(create_info->image, "VUID-VkImageViewCreateInfo-image-02399",
"vkCreateImageView(): VkImageViewCreateInfo struct has a chained VkExternalFormatANDROID struct, but "
"format member is %s and must be VK_FORMAT_UNDEFINED.",
string_VkFormat(create_info->format));
}
// Chain must include a compatible ycbcr conversion
bool conv_found = false;
uint64_t external_format = 0;
const VkSamplerYcbcrConversionInfo *ycbcr_conv_info = LvlFindInChain<VkSamplerYcbcrConversionInfo>(create_info->pNext);
if (ycbcr_conv_info != nullptr) {
auto ycbcr_state = GetSamplerYcbcrConversionState(ycbcr_conv_info->conversion);
if (ycbcr_state) {
conv_found = true;
external_format = ycbcr_state->external_format;
}
}
if ((!conv_found) || (external_format != image_state->ahb_format)) {
skip |= LogError(create_info->image, "VUID-VkImageViewCreateInfo-image-02400",
"vkCreateImageView(): VkImageViewCreateInfo struct has a chained VkExternalFormatANDROID struct with "
"an externalFormat (%" PRIu64
") but needs a chained VkSamplerYcbcrConversionInfo struct with a VkSamplerYcbcrConversion created "
"with the same external format.",
image_state->ahb_format);
}
// Errors in create_info swizzles
if (IsIdentitySwizzle(create_info->components) == false) {
skip |= LogError(
create_info->image, "VUID-VkImageViewCreateInfo-image-02401",
"vkCreateImageView(): VkImageViewCreateInfo struct has a chained VkExternalFormatANDROID struct, but "
"includes one or more non-identity component swizzles, r swizzle = %s, g swizzle = %s, b swizzle = %s, a swizzle "
"= %s.",
string_VkComponentSwizzle(create_info->components.r), string_VkComponentSwizzle(create_info->components.g),
string_VkComponentSwizzle(create_info->components.b), string_VkComponentSwizzle(create_info->components.a));
}
}
return skip;
}
bool CoreChecks::ValidateGetImageSubresourceLayoutANDROID(const VkImage image) const {
bool skip = false;
const IMAGE_STATE *image_state = GetImageState(image);
if (image_state != nullptr) {
if (image_state->IsExternalAHB() && (0 == image_state->GetBoundMemory().size())) {
skip |= LogError(image, "VUID-vkGetImageSubresourceLayout-image-01895",
"vkGetImageSubresourceLayout(): Attempt to query layout from an image created with "
"VK_EXTERNAL_MEMORY_HANDLE_TYPE_ANDROID_HARDWARE_BUFFER_BIT_ANDROID handleType which has not yet been "
"bound to memory.");
}
}
return skip;
}
#else
bool CoreChecks::ValidateCreateImageANDROID(const debug_report_data *report_data, const VkImageCreateInfo *create_info) const {
return false;
}
bool CoreChecks::ValidateCreateImageViewANDROID(const VkImageViewCreateInfo *create_info) const { return false; }
bool CoreChecks::ValidateGetImageSubresourceLayoutANDROID(const VkImage image) const { return false; }
#endif // VK_USE_PLATFORM_ANDROID_KHR
bool CoreChecks::ValidateImageFormatFeatures(const VkImageCreateInfo *pCreateInfo) const {
bool skip = false;
// validates based on imageCreateFormatFeatures from vkspec.html#resources-image-creation-limits
VkFormatFeatureFlags tiling_features = VK_FORMAT_FEATURE_FLAG_BITS_MAX_ENUM;
const VkImageTiling image_tiling = pCreateInfo->tiling;
const VkFormat image_format = pCreateInfo->format;
if (image_format == VK_FORMAT_UNDEFINED) {
// VU 01975 states format can't be undefined unless an android externalFormat
#ifdef VK_USE_PLATFORM_ANDROID_KHR
const VkExternalFormatANDROID *ext_fmt_android = LvlFindInChain<VkExternalFormatANDROID>(pCreateInfo->pNext);
if ((image_tiling == VK_IMAGE_TILING_OPTIMAL) && (ext_fmt_android != nullptr) && (0 != ext_fmt_android->externalFormat)) {
auto it = ahb_ext_formats_map.find(ext_fmt_android->externalFormat);
if (it != ahb_ext_formats_map.end()) {
tiling_features = it->second;
}
}
#endif
} else if (image_tiling == VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT) {
uint64_t drm_format_modifier = 0;
const VkImageDrmFormatModifierExplicitCreateInfoEXT *drm_explicit =
LvlFindInChain<VkImageDrmFormatModifierExplicitCreateInfoEXT>(pCreateInfo->pNext);
const VkImageDrmFormatModifierListCreateInfoEXT *drm_implicit =
LvlFindInChain<VkImageDrmFormatModifierListCreateInfoEXT>(pCreateInfo->pNext);
if (drm_explicit != nullptr) {
drm_format_modifier = drm_explicit->drmFormatModifier;
} else {
// VUID 02261 makes sure its only explict or implict in parameter checking
assert(drm_implicit != nullptr);
for (uint32_t i = 0; i < drm_implicit->drmFormatModifierCount; i++) {
drm_format_modifier |= drm_implicit->pDrmFormatModifiers[i];
}
}
VkFormatProperties2 format_properties_2 = {VK_STRUCTURE_TYPE_FORMAT_PROPERTIES_2, nullptr};
VkDrmFormatModifierPropertiesListEXT drm_properties_list = {VK_STRUCTURE_TYPE_DRM_FORMAT_MODIFIER_PROPERTIES_LIST_EXT,
nullptr};
format_properties_2.pNext = (void *)&drm_properties_list;
DispatchGetPhysicalDeviceFormatProperties2(physical_device, image_format, &format_properties_2);
std::vector<VkDrmFormatModifierPropertiesEXT> drm_properties;
drm_properties.resize(drm_properties_list.drmFormatModifierCount);
drm_properties_list.pDrmFormatModifierProperties = &drm_properties[0];
DispatchGetPhysicalDeviceFormatProperties2(physical_device, image_format, &format_properties_2);
for (uint32_t i = 0; i < drm_properties_list.drmFormatModifierCount; i++) {
if ((drm_properties_list.pDrmFormatModifierProperties[i].drmFormatModifier & drm_format_modifier) != 0) {
tiling_features |= drm_properties_list.pDrmFormatModifierProperties[i].drmFormatModifierTilingFeatures;
}
}
} else {
VkFormatProperties format_properties = GetPDFormatProperties(image_format);
tiling_features = (image_tiling == VK_IMAGE_TILING_LINEAR) ? format_properties.linearTilingFeatures
: format_properties.optimalTilingFeatures;
}
// Lack of disjoint format feature support while using the flag
if (FormatIsMultiplane(image_format) && ((pCreateInfo->flags & VK_IMAGE_CREATE_DISJOINT_BIT) != 0) &&
((tiling_features & VK_FORMAT_FEATURE_DISJOINT_BIT) == 0)) {
skip |= LogError(device, "VUID-VkImageCreateInfo-imageCreateFormatFeatures-02260",
"vkCreateImage(): can't use VK_IMAGE_CREATE_DISJOINT_BIT because %s doesn't support "
"VK_FORMAT_FEATURE_DISJOINT_BIT based on imageCreateFormatFeatures.",
string_VkFormat(pCreateInfo->format));
}
return skip;
}
bool CoreChecks::PreCallValidateCreateImage(VkDevice device, const VkImageCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkImage *pImage) const {
bool skip = false;
if (device_extensions.vk_android_external_memory_android_hardware_buffer) {
skip |= ValidateCreateImageANDROID(report_data, pCreateInfo);
} else { // These checks are omitted or replaced when Android HW Buffer extension is active
if (pCreateInfo->format == VK_FORMAT_UNDEFINED) {
return LogError(device, "VUID-VkImageCreateInfo-format-00943",
"vkCreateImage(): VkFormat for image must not be VK_FORMAT_UNDEFINED.");
}
}
if (pCreateInfo->flags & VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT) {
if (VK_IMAGE_TYPE_2D != pCreateInfo->imageType) {
skip |= LogError(device, "VUID-VkImageCreateInfo-flags-00949",
"vkCreateImage(): Image type must be VK_IMAGE_TYPE_2D when VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT "
"flag bit is set");
}
if ((pCreateInfo->extent.width != pCreateInfo->extent.height) || (pCreateInfo->arrayLayers < 6)) {
skip |= LogError(device, "VUID-VkImageCreateInfo-imageType-00954",
"vkCreateImage(): If VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT flag bit is set, width (%d) must equal "
"height (%d) and arrayLayers (%d) must be >= 6.",
pCreateInfo->extent.width, pCreateInfo->extent.height, pCreateInfo->arrayLayers);
}
}
const VkPhysicalDeviceLimits *device_limits = &phys_dev_props.limits;
VkImageUsageFlags attach_flags = VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT | VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT |
VK_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT | VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT;
if ((pCreateInfo->usage & attach_flags) && (pCreateInfo->extent.width > device_limits->maxFramebufferWidth)) {
skip |= LogError(device, "VUID-VkImageCreateInfo-usage-00964",
"vkCreateImage(): Image usage flags include a frame buffer attachment bit and image width (%u) exceeds "
"device maxFramebufferWidth (%u).",
pCreateInfo->extent.width, device_limits->maxFramebufferWidth);
}
if ((pCreateInfo->usage & attach_flags) && (pCreateInfo->extent.height > device_limits->maxFramebufferHeight)) {
skip |= LogError(device, "VUID-VkImageCreateInfo-usage-00965",
"vkCreateImage(): Image usage flags include a frame buffer attachment bit and image height (%u) exceeds "
"device maxFramebufferHeight (%u).",
pCreateInfo->extent.height, device_limits->maxFramebufferHeight);
}
VkImageCreateFlags sparseFlags =
VK_IMAGE_CREATE_SPARSE_BINDING_BIT | VK_IMAGE_CREATE_SPARSE_RESIDENCY_BIT | VK_IMAGE_CREATE_SPARSE_ALIASED_BIT;
if ((pCreateInfo->flags & sparseFlags) && (pCreateInfo->usage & VK_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT)) {
skip |= LogError(
device, "VUID-VkImageCreateInfo-None-01925",
"vkCreateImage(): images using sparse memory cannot have VK_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT set");
}
if (device_extensions.vk_ext_fragment_density_map || device_extensions.vk_ext_fragment_density_map2) {
uint32_t ceiling_width = static_cast<uint32_t>(ceil(
static_cast<float>(device_limits->maxFramebufferWidth) /
std::max(static_cast<float>(phys_dev_ext_props.fragment_density_map_props.minFragmentDensityTexelSize.width), 1.0f)));
if ((pCreateInfo->usage & VK_IMAGE_USAGE_FRAGMENT_DENSITY_MAP_BIT_EXT) && (pCreateInfo->extent.width > ceiling_width)) {
skip |=
LogError(device, "VUID-VkImageCreateInfo-usage-02559",
"vkCreateImage(): Image usage flags include a fragment density map bit and image width (%u) exceeds the "
"ceiling of device "
"maxFramebufferWidth (%u) / minFragmentDensityTexelSize.width (%u). The ceiling value: %u",
pCreateInfo->extent.width, device_limits->maxFramebufferWidth,
phys_dev_ext_props.fragment_density_map_props.minFragmentDensityTexelSize.width, ceiling_width);
}
uint32_t ceiling_height = static_cast<uint32_t>(ceil(
static_cast<float>(device_limits->maxFramebufferHeight) /
std::max(static_cast<float>(phys_dev_ext_props.fragment_density_map_props.minFragmentDensityTexelSize.height), 1.0f)));
if ((pCreateInfo->usage & VK_IMAGE_USAGE_FRAGMENT_DENSITY_MAP_BIT_EXT) && (pCreateInfo->extent.height > ceiling_height)) {
skip |=
LogError(device, "VUID-VkImageCreateInfo-usage-02560",
"vkCreateImage(): Image usage flags include a fragment density map bit and image height (%u) exceeds the "
"ceiling of device "
"maxFramebufferHeight (%u) / minFragmentDensityTexelSize.height (%u). The ceiling value: %u",
pCreateInfo->extent.height, device_limits->maxFramebufferHeight,
phys_dev_ext_props.fragment_density_map_props.minFragmentDensityTexelSize.height, ceiling_height);
}
}
VkImageFormatProperties format_limits = {};
VkResult result = VK_SUCCESS;
if (pCreateInfo->tiling != VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT) {
result = DispatchGetPhysicalDeviceImageFormatProperties(physical_device, pCreateInfo->format, pCreateInfo->imageType,
pCreateInfo->tiling, pCreateInfo->usage, pCreateInfo->flags,
&format_limits);
} else {
auto modifier_list = LvlFindInChain<VkImageDrmFormatModifierListCreateInfoEXT>(pCreateInfo->pNext);
auto explicit_modifier = LvlFindInChain<VkImageDrmFormatModifierExplicitCreateInfoEXT>(pCreateInfo->pNext);
if (modifier_list) {
for (uint32_t i = 0; i < modifier_list->drmFormatModifierCount; i++) {
auto drm_format_modifier = LvlInitStruct<VkPhysicalDeviceImageDrmFormatModifierInfoEXT>();
drm_format_modifier.drmFormatModifier = modifier_list->pDrmFormatModifiers[i];
auto image_format_info = LvlInitStruct<VkPhysicalDeviceImageFormatInfo2>(&drm_format_modifier);
image_format_info.type = pCreateInfo->imageType;
image_format_info.format = pCreateInfo->format;
image_format_info.tiling = pCreateInfo->tiling;
image_format_info.usage = pCreateInfo->usage;
image_format_info.flags = pCreateInfo->flags;
auto image_format_properties = LvlInitStruct<VkImageFormatProperties2>();
result =
DispatchGetPhysicalDeviceImageFormatProperties2(physical_device, &image_format_info, &image_format_properties);
format_limits = image_format_properties.imageFormatProperties;
/* The application gives a list of modifier and the driver
* selects one. If one is wrong, stop there.
*/
if (result != VK_SUCCESS) break;
}
} else if (explicit_modifier) {
auto drm_format_modifier = LvlInitStruct<VkPhysicalDeviceImageDrmFormatModifierInfoEXT>();
drm_format_modifier.drmFormatModifier = explicit_modifier->drmFormatModifier;
auto image_format_info = LvlInitStruct<VkPhysicalDeviceImageFormatInfo2>(&drm_format_modifier);
image_format_info.type = pCreateInfo->imageType;
image_format_info.format = pCreateInfo->format;
image_format_info.tiling = pCreateInfo->tiling;
image_format_info.usage = pCreateInfo->usage;
image_format_info.flags = pCreateInfo->flags;
auto image_format_properties = LvlInitStruct<VkImageFormatProperties2>();
result = DispatchGetPhysicalDeviceImageFormatProperties2(physical_device, &image_format_info, &image_format_properties);
format_limits = image_format_properties.imageFormatProperties;
}
}
// 1. vkGetPhysicalDeviceImageFormatProperties[2] only success code is VK_SUCCESS
// 2. If call returns an error, then "imageCreateImageFormatPropertiesList" is defined to be the empty list
// 3. All values in 02251 are undefined if "imageCreateImageFormatPropertiesList" is empty.
if (result != VK_SUCCESS) {
// External memory will always have a "imageCreateImageFormatPropertiesList" so skip
#ifdef VK_USE_PLATFORM_ANDROID_KHR
if (!LvlFindInChain<VkExternalFormatANDROID>(pCreateInfo->pNext)) {
#endif // VK_USE_PLATFORM_ANDROID_KHR
skip |= LogError(device, "VUID-VkImageCreateInfo-imageCreateMaxMipLevels-02251",
"vkCreateImage(): Format %s is not supported for this combination of parameters and "
"VkGetPhysicalDeviceImageFormatProperties returned back %s.",
string_VkFormat(pCreateInfo->format), string_VkResult(result));
#ifdef VK_USE_PLATFORM_ANDROID_KHR
}
#endif // VK_USE_PLATFORM_ANDROID_KHR
} else {
if (pCreateInfo->mipLevels > format_limits.maxMipLevels) {
const char *format_string = string_VkFormat(pCreateInfo->format);
skip |= LogError(device, "VUID-VkImageCreateInfo-mipLevels-02255",
"vkCreateImage(): Image mip levels=%d exceed image format maxMipLevels=%d for format %s.",
pCreateInfo->mipLevels, format_limits.maxMipLevels, format_string);
}
uint64_t texel_count = static_cast<uint64_t>(pCreateInfo->extent.width) *
static_cast<uint64_t>(pCreateInfo->extent.height) *
static_cast<uint64_t>(pCreateInfo->extent.depth) * static_cast<uint64_t>(pCreateInfo->arrayLayers) *
static_cast<uint64_t>(pCreateInfo->samples);
uint64_t total_size =
static_cast<uint64_t>(std::ceil(FormatTexelSize(pCreateInfo->format) * static_cast<double>(texel_count)));
// Round up to imageGranularity boundary
VkDeviceSize image_granularity = phys_dev_props.limits.bufferImageGranularity;
uint64_t ig_mask = image_granularity - 1;
total_size = (total_size + ig_mask) & ~ig_mask;
if (total_size > format_limits.maxResourceSize) {
skip |= LogWarning(device, kVUID_Core_Image_InvalidFormatLimitsViolation,
"vkCreateImage(): resource size exceeds allowable maximum Image resource size = 0x%" PRIxLEAST64
", maximum resource size = 0x%" PRIxLEAST64 " ",
total_size, format_limits.maxResourceSize);
}
if (pCreateInfo->arrayLayers > format_limits.maxArrayLayers) {
skip |= LogError(device, "VUID-VkImageCreateInfo-arrayLayers-02256",
"vkCreateImage(): arrayLayers=%d exceeds allowable maximum supported by format of %d.",
pCreateInfo->arrayLayers, format_limits.maxArrayLayers);
}
if ((pCreateInfo->samples & format_limits.sampleCounts) == 0) {
skip |= LogError(device, "VUID-VkImageCreateInfo-samples-02258",
"vkCreateImage(): samples %s is not supported by format 0x%.8X.",
string_VkSampleCountFlagBits(pCreateInfo->samples), format_limits.sampleCounts);
}
if (pCreateInfo->extent.width > format_limits.maxExtent.width) {
skip |= LogError(device, "VUID-VkImageCreateInfo-extent-02252",
"vkCreateImage(): extent.width %u exceeds allowable maximum image extent width %u.",
pCreateInfo->extent.width, format_limits.maxExtent.width);
}
if (pCreateInfo->extent.height > format_limits.maxExtent.height) {
skip |= LogError(device, "VUID-VkImageCreateInfo-extent-02253",
"vkCreateImage(): extent.height %u exceeds allowable maximum image extent height %u.",
pCreateInfo->extent.height, format_limits.maxExtent.height);
}
if (pCreateInfo->extent.depth > format_limits.maxExtent.depth) {
skip |= LogError(device, "VUID-VkImageCreateInfo-extent-02254",
"vkCreateImage(): extent.depth %u exceeds allowable maximum image extent depth %u.",
pCreateInfo->extent.depth, format_limits.maxExtent.depth);
}
}
// Tests for "Formats requiring sampler YCBCR conversion for VK_IMAGE_ASPECT_COLOR_BIT image views"
if (FormatRequiresYcbcrConversion(pCreateInfo->format)) {
if (!enabled_features.ycbcr_image_array_features.ycbcrImageArrays && pCreateInfo->arrayLayers != 1) {
const char *error_vuid = (device_extensions.vk_ext_ycbcr_image_arrays) ? "VUID-VkImageCreateInfo-format-02653"
: "VUID-VkImageCreateInfo-format-02564";
skip |= LogError(device, error_vuid,
"vkCreateImage(): arrayLayers = %d, but when the ycbcrImagesArrays feature is not enabled and using a "
"YCbCr Conversion format, arrayLayers must be 1",
pCreateInfo->arrayLayers);
}
if (pCreateInfo->mipLevels != 1) {
skip |= LogError(device, "VUID-VkImageCreateInfo-format-02561",
"vkCreateImage(): mipLevels = %d, but when using a YCbCr Conversion format, mipLevels must be 1",
pCreateInfo->arrayLayers);
}
if (pCreateInfo->samples != VK_SAMPLE_COUNT_1_BIT) {
skip |= LogError(
device, "VUID-VkImageCreateInfo-format-02562",
"vkCreateImage(): samples = %s, but when using a YCbCr Conversion format, samples must be VK_SAMPLE_COUNT_1_BIT",
string_VkSampleCountFlagBits(pCreateInfo->samples));
}
if (pCreateInfo->imageType != VK_IMAGE_TYPE_2D) {
skip |= LogError(
device, "VUID-VkImageCreateInfo-format-02563",
"vkCreateImage(): imageType = %s, but when using a YCbCr Conversion format, imageType must be VK_IMAGE_TYPE_2D ",
string_VkImageType(pCreateInfo->imageType));
}
}
if (device_extensions.vk_khr_maintenance2) {
if (pCreateInfo->flags & VK_IMAGE_CREATE_BLOCK_TEXEL_VIEW_COMPATIBLE_BIT) {
if (!(FormatIsCompressed_BC(pCreateInfo->format) || FormatIsCompressed_ASTC(pCreateInfo->format) ||
FormatIsCompressed_ETC2_EAC(pCreateInfo->format))) {
skip |= LogError(device, "VUID-VkImageCreateInfo-flags-01572",
"vkCreateImage(): If pCreateInfo->flags contains VK_IMAGE_CREATE_BLOCK_TEXEL_VIEW_COMPATIBLE_BIT, "
"format must be block, ETC or ASTC compressed, but is %s",
string_VkFormat(pCreateInfo->format));
}
if (!(pCreateInfo->flags & VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT)) {
skip |= LogError(device, "VUID-VkImageCreateInfo-flags-01573",
"vkCreateImage(): If pCreateInfo->flags contains VK_IMAGE_CREATE_BLOCK_TEXEL_VIEW_COMPATIBLE_BIT, "
"flags must also contain VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT.");
}
}
}
if (pCreateInfo->sharingMode == VK_SHARING_MODE_CONCURRENT && pCreateInfo->pQueueFamilyIndices) {
const char *vuid = device_extensions.vk_khr_get_physical_device_properties2 ? "VUID-VkImageCreateInfo-sharingMode-01420"
: "VUID-VkImageCreateInfo-sharingMode-01392";
skip |= ValidatePhysicalDeviceQueueFamilies(pCreateInfo->queueFamilyIndexCount, pCreateInfo->pQueueFamilyIndices,
"vkCreateImage", "pCreateInfo->pQueueFamilyIndices", vuid);
}
if (!FormatIsMultiplane(pCreateInfo->format) && !(pCreateInfo->flags & VK_IMAGE_CREATE_ALIAS_BIT) &&
(pCreateInfo->flags & VK_IMAGE_CREATE_DISJOINT_BIT)) {
skip |=
LogError(device, "VUID-VkImageCreateInfo-format-01577",
"vkCreateImage(): format is %s and flags are %s. The flags should not include VK_IMAGE_CREATE_DISJOINT_BIT.",
string_VkFormat(pCreateInfo->format), string_VkImageCreateFlags(pCreateInfo->flags).c_str());
}
const auto swapchain_create_info = LvlFindInChain<VkImageSwapchainCreateInfoKHR>(pCreateInfo->pNext);
if (swapchain_create_info != nullptr) {
if (swapchain_create_info->swapchain != VK_NULL_HANDLE) {
const SWAPCHAIN_NODE *swapchain_state = GetSwapchainState(swapchain_create_info->swapchain);
const VkSwapchainCreateFlagsKHR swapchain_flags = swapchain_state->createInfo.flags;
// Validate rest of Swapchain Image create check that require swapchain state
const char *vuid = "VUID-VkImageSwapchainCreateInfoKHR-swapchain-00995";
if (((swapchain_flags & VK_SWAPCHAIN_CREATE_SPLIT_INSTANCE_BIND_REGIONS_BIT_KHR) != 0) &&
((pCreateInfo->flags & VK_IMAGE_CREATE_SPLIT_INSTANCE_BIND_REGIONS_BIT) == 0)) {
skip |= LogError(
device, vuid,
"vkCreateImage(): Swapchain was created with VK_SWAPCHAIN_CREATE_SPLIT_INSTANCE_BIND_REGIONS_BIT_KHR flag so "
"all swapchain images must have the VK_IMAGE_CREATE_SPLIT_INSTANCE_BIND_REGIONS_BIT flag set.");
}
if (((swapchain_flags & VK_SWAPCHAIN_CREATE_PROTECTED_BIT_KHR) != 0) &&
((pCreateInfo->flags & VK_IMAGE_CREATE_PROTECTED_BIT) == 0)) {
skip |= LogError(device, vuid,
"vkCreateImage(): Swapchain was created with VK_SWAPCHAIN_CREATE_PROTECTED_BIT_KHR flag so all "
"swapchain images must have the VK_IMAGE_CREATE_PROTECTED_BIT flag set.");
}
const VkImageCreateFlags mutable_flags = (VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT | VK_IMAGE_CREATE_EXTENDED_USAGE_BIT);
if (((swapchain_flags & VK_SWAPCHAIN_CREATE_MUTABLE_FORMAT_BIT_KHR) != 0) &&
((pCreateInfo->flags & mutable_flags) != mutable_flags)) {
skip |= LogError(device, vuid,
"vkCreateImage(): Swapchain was created with VK_SWAPCHAIN_CREATE_MUTABLE_FORMAT_BIT_KHR flag so "
"all swapchain images must have the VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT and "
"VK_IMAGE_CREATE_EXTENDED_USAGE_BIT flags both set.");
}
}
}
if ((pCreateInfo->flags & VK_IMAGE_CREATE_PROTECTED_BIT) != 0) {
if (enabled_features.core11.protectedMemory == VK_FALSE) {
skip |= LogError(device, "VUID-VkImageCreateInfo-flags-01890",
"vkCreateImage(): the protectedMemory device feature is disabled: Images cannot be created with the "
"VK_IMAGE_CREATE_PROTECTED_BIT set.");
}
const VkImageCreateFlags invalid_flags =
VK_IMAGE_CREATE_SPARSE_BINDING_BIT | VK_IMAGE_CREATE_SPARSE_RESIDENCY_BIT | VK_IMAGE_CREATE_SPARSE_ALIASED_BIT;
if ((pCreateInfo->flags & invalid_flags) != 0) {
skip |= LogError(device, "VUID-VkImageCreateInfo-None-01891",
"vkCreateImage(): VK_IMAGE_CREATE_PROTECTED_BIT is set so no sparse create flags can be used at same "
"time (VK_IMAGE_CREATE_SPARSE_BINDING_BIT | VK_IMAGE_CREATE_SPARSE_RESIDENCY_BIT | "
"VK_IMAGE_CREATE_SPARSE_ALIASED_BIT).");
}
}
skip |= ValidateImageFormatFeatures(pCreateInfo);
// Check compatibility with VK_KHR_portability_subset
if (ExtEnabled::kNotEnabled != device_extensions.vk_khr_portability_subset) {
if (VK_IMAGE_CREATE_2D_ARRAY_COMPATIBLE_BIT & pCreateInfo->flags &&
VK_FALSE == enabled_features.portability_subset_features.imageView2DOn3DImage) {
skip |= LogError(device, "VUID-VkImageCreateInfo-imageView2DOn3DImage-04459",
"vkCreateImage (portability error): VK_IMAGE_CREATE_2D_ARRAY_COMPATIBLE_BIT is not supported.");
}
if ((VK_SAMPLE_COUNT_1_BIT != pCreateInfo->samples) && (1 != pCreateInfo->arrayLayers) &&
(VK_FALSE == enabled_features.portability_subset_features.multisampleArrayImage)) {
skip |=
LogError(device, "VUID-VkImageCreateInfo-multisampleArrayImage-04460",
"vkCreateImage (portability error): Cannot create an image with samples/texel > 1 && arrayLayers != 1");
}
}
const auto external_memory_create_info_nv = LvlFindInChain<VkExternalMemoryImageCreateInfoNV>(pCreateInfo->pNext);
if (external_memory_create_info_nv != nullptr) {
const auto external_memory_create_info = LvlFindInChain<VkExternalMemoryImageCreateInfo>(pCreateInfo->pNext);
if (external_memory_create_info != nullptr) {
skip |= LogError(device, "VUID-VkImageCreateInfo-pNext-00988",
"vkCreateImage(): VkImageCreateInfo struct has both VkExternalMemoryImageCreateInfoNV and "
"VkExternalMemoryImageCreateInfo chained structs.");
}
}
if (device_group_create_info.physicalDeviceCount == 1) {
if (pCreateInfo->flags & VK_IMAGE_CREATE_SPLIT_INSTANCE_BIND_REGIONS_BIT) {
skip |= LogError(device, "VUID-VkImageCreateInfo-physicalDeviceCount-01421",
"vkCreateImage: Device was created with VkDeviceGroupDeviceCreateInfo::physicalDeviceCount 1, but "
"flags contain VK_IMAGE_CREATE_SPLIT_INSTANCE_BIND_REGIONS_BIT bit.");
}
}
return skip;
}
void CoreChecks::PostCallRecordCreateImage(VkDevice device, const VkImageCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkImage *pImage, VkResult result) {
if (VK_SUCCESS != result) return;
StateTracker::PostCallRecordCreateImage(device, pCreateInfo, pAllocator, pImage, result);
auto image_state = Get<IMAGE_STATE>(*pImage);
AddInitialLayoutintoImageLayoutMap(*image_state, imageLayoutMap);
}
bool CoreChecks::PreCallValidateDestroyImage(VkDevice device, VkImage image, const VkAllocationCallbacks *pAllocator) const {
const IMAGE_STATE *image_state = GetImageState(image);
bool skip = false;
if (image_state) {
if (image_state->IsSwapchainImage()) {
skip |= LogError(device, "VUID-vkDestroyImage-image-04882",
"vkDestroyImage(): %s is a presentable image and it is controlled by the implementation and is "
"destroyed with vkDestroySwapchainKHR.",
report_data->FormatHandle(image_state->image()).c_str());
}
skip |= ValidateObjectNotInUse(image_state, "vkDestroyImage", "VUID-vkDestroyImage-image-01000");
}
return skip;
}
void CoreChecks::PreCallRecordDestroyImage(VkDevice device, VkImage image, const VkAllocationCallbacks *pAllocator) {
// Clean up validation specific data
qfo_release_image_barrier_map.erase(image);
imageLayoutMap.erase(image);
// Clean up generic image state
StateTracker::PreCallRecordDestroyImage(device, image, pAllocator);
}
bool CoreChecks::ValidateImageAttributes(const IMAGE_STATE *image_state, const VkImageSubresourceRange &range,
const char *param_name) const {
bool skip = false;
const VkImage image = image_state->image();
const VkFormat format = image_state->createInfo.format;
if (range.aspectMask != VK_IMAGE_ASPECT_COLOR_BIT) {
skip |= LogError(image, "VUID-vkCmdClearColorImage-aspectMask-02498",
"vkCmdClearColorImage(): %s.aspectMasks must only be set to VK_IMAGE_ASPECT_COLOR_BIT.", param_name);
}
if (FormatIsDepthOrStencil(format)) {
skip |= LogError(image, "VUID-vkCmdClearColorImage-image-00007",
"vkCmdClearColorImage(): %s called with image %s which has a depth/stencil format (%s).", param_name,
report_data->FormatHandle(image).c_str(), string_VkFormat(format));
} else if (FormatIsCompressed(format)) {
skip |= LogError(image, "VUID-vkCmdClearColorImage-image-00007",
"vkCmdClearColorImage(): %s called with image %s which has a compressed format (%s).", param_name,
report_data->FormatHandle(image).c_str(), string_VkFormat(format));
}
if (!(image_state->createInfo.usage & VK_IMAGE_USAGE_TRANSFER_DST_BIT)) {
skip |=
LogError(image, "VUID-vkCmdClearColorImage-image-00002",
"vkCmdClearColorImage() %s called with image %s which was created without VK_IMAGE_USAGE_TRANSFER_DST_BIT.",
param_name, report_data->FormatHandle(image).c_str());
}
return skip;
}
bool CoreChecks::VerifyClearImageLayout(const CMD_BUFFER_STATE *cb_node, const IMAGE_STATE *image_state,
const VkImageSubresourceRange &range, VkImageLayout dest_image_layout,
const char *func_name) const {
bool skip = false;
if (strcmp(func_name, "vkCmdClearDepthStencilImage()") == 0) {
if ((dest_image_layout != VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL) && (dest_image_layout != VK_IMAGE_LAYOUT_GENERAL)) {
skip |= LogError(image_state->image(), "VUID-vkCmdClearDepthStencilImage-imageLayout-00012",
"%s: Layout for cleared image is %s but can only be TRANSFER_DST_OPTIMAL or GENERAL.", func_name,
string_VkImageLayout(dest_image_layout));
}
} else {
assert(strcmp(func_name, "vkCmdClearColorImage()") == 0);
if (!device_extensions.vk_khr_shared_presentable_image) {
if ((dest_image_layout != VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL) && (dest_image_layout != VK_IMAGE_LAYOUT_GENERAL)) {
skip |= LogError(image_state->image(), "VUID-vkCmdClearColorImage-imageLayout-00005",
"%s: Layout for cleared image is %s but can only be TRANSFER_DST_OPTIMAL or GENERAL.", func_name,
string_VkImageLayout(dest_image_layout));
}
} else {
if ((dest_image_layout != VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL) && (dest_image_layout != VK_IMAGE_LAYOUT_GENERAL) &&
(dest_image_layout != VK_IMAGE_LAYOUT_SHARED_PRESENT_KHR)) {
skip |= LogError(
image_state->image(), "VUID-vkCmdClearColorImage-imageLayout-01394",
"%s: Layout for cleared image is %s but can only be TRANSFER_DST_OPTIMAL, SHARED_PRESENT_KHR, or GENERAL.",
func_name, string_VkImageLayout(dest_image_layout));
}
}
}
// Cast to const to prevent creation at validate time.
const auto *subresource_map = cb_node->GetImageSubresourceLayoutMap(image_state->image());
if (subresource_map) {
bool subres_skip = false;
LayoutUseCheckAndMessage layout_check(subresource_map);
auto normalized_isr = image_state->NormalizeSubresourceRange(range);
// IncrementInterval skips over all the subresources that have the same state as we just checked, incrementing to
// the next "constant value" range
for (auto pos = subresource_map->Find(normalized_isr); !(pos.AtEnd()) && !subres_skip; pos.IncrementInterval()) {
if (!layout_check.Check(pos->subresource, dest_image_layout, pos->current_layout, pos->initial_layout)) {
const char *error_code = "VUID-vkCmdClearColorImage-imageLayout-00004";
if (strcmp(func_name, "vkCmdClearDepthStencilImage()") == 0) {
error_code = "VUID-vkCmdClearDepthStencilImage-imageLayout-00011";
} else {
assert(strcmp(func_name, "vkCmdClearColorImage()") == 0);
}
subres_skip |= LogError(cb_node->commandBuffer(), error_code,
"%s: Cannot clear an image whose layout is %s and doesn't match the %s layout %s.",
func_name, string_VkImageLayout(dest_image_layout), layout_check.message,
string_VkImageLayout(layout_check.layout));
}
}
skip |= subres_skip;
}
return skip;
}
bool CoreChecks::PreCallValidateCmdClearColorImage(VkCommandBuffer commandBuffer, VkImage image, VkImageLayout imageLayout,
const VkClearColorValue *pColor, uint32_t rangeCount,
const VkImageSubresourceRange *pRanges) const {
bool skip = false;
// TODO : Verify memory is in VK_IMAGE_STATE_CLEAR state
const auto *cb_node = GetCBState(commandBuffer);
const auto *image_state = GetImageState(image);
if (cb_node && image_state) {
skip |= ValidateMemoryIsBoundToImage(image_state, "vkCmdClearColorImage()", "VUID-vkCmdClearColorImage-image-00003");
skip |= ValidateCmd(cb_node, CMD_CLEARCOLORIMAGE, "vkCmdClearColorImage()");
if (device_extensions.vk_khr_maintenance1) {
skip |= ValidateImageFormatFeatureFlags(image_state, VK_FORMAT_FEATURE_TRANSFER_DST_BIT, "vkCmdClearColorImage",
"VUID-vkCmdClearColorImage-image-01993");
}
skip |=
ValidateProtectedImage(cb_node, image_state, "vkCmdClearColorImage()", "VUID-vkCmdClearColorImage-commandBuffer-01805");
skip |= ValidateUnprotectedImage(cb_node, image_state, "vkCmdClearColorImage()",
"VUID-vkCmdClearColorImage-commandBuffer-01806");
for (uint32_t i = 0; i < rangeCount; ++i) {
std::string param_name = "pRanges[" + std::to_string(i) + "]";
skip |= ValidateCmdClearColorSubresourceRange(image_state, pRanges[i], param_name.c_str());
skip |= ValidateImageAttributes(image_state, pRanges[i], param_name.c_str());
skip |= VerifyClearImageLayout(cb_node, image_state, pRanges[i], imageLayout, "vkCmdClearColorImage()");
}
// Tests for "Formats requiring sampler Y’CBCR conversion for VK_IMAGE_ASPECT_COLOR_BIT image views"
if (FormatRequiresYcbcrConversion(image_state->createInfo.format)) {
skip |= LogError(device, "VUID-vkCmdClearColorImage-image-01545",
"vkCmdClearColorImage(): format (%s) must not be one of the formats requiring sampler YCBCR "
"conversion for VK_IMAGE_ASPECT_COLOR_BIT image views",
string_VkFormat(image_state->createInfo.format));
}
}
return skip;
}
void CoreChecks::PreCallRecordCmdClearColorImage(VkCommandBuffer commandBuffer, VkImage image, VkImageLayout imageLayout,
const VkClearColorValue *pColor, uint32_t rangeCount,
const VkImageSubresourceRange *pRanges) {
StateTracker::PreCallRecordCmdClearColorImage(commandBuffer, image, imageLayout, pColor, rangeCount, pRanges);
auto cb_node = GetCBState(commandBuffer);
auto image_state = GetImageState(image);
if (cb_node && image_state) {
for (uint32_t i = 0; i < rangeCount; ++i) {
SetImageInitialLayout(cb_node, image, pRanges[i], imageLayout);
}
}
}
bool CoreChecks::ValidateClearDepthStencilValue(VkCommandBuffer commandBuffer, VkClearDepthStencilValue clearValue,
const char *apiName) const {
bool skip = false;
// The extension was not created with a feature bit whichs prevents displaying the 2 variations of the VUIDs
if (!device_extensions.vk_ext_depth_range_unrestricted) {
if (!(clearValue.depth >= 0.0) || !(clearValue.depth <= 1.0)) {
// Also VUID-VkClearDepthStencilValue-depth-00022
skip |= LogError(commandBuffer, "VUID-VkClearDepthStencilValue-depth-02506",
"%s: VK_EXT_depth_range_unrestricted extension is not enabled and VkClearDepthStencilValue::depth "
"(=%f) is not within the [0.0, 1.0] range.",
apiName, clearValue.depth);
}
}
return skip;
}
bool CoreChecks::PreCallValidateCmdClearDepthStencilImage(VkCommandBuffer commandBuffer, VkImage image, VkImageLayout imageLayout,
const VkClearDepthStencilValue *pDepthStencil, uint32_t rangeCount,
const VkImageSubresourceRange *pRanges) const {
bool skip = false;
// TODO : Verify memory is in VK_IMAGE_STATE_CLEAR state
const auto *cb_node = GetCBState(commandBuffer);
const auto *image_state = GetImageState(image);
if (cb_node && image_state) {
const VkFormat image_format = image_state->createInfo.format;
skip |= ValidateMemoryIsBoundToImage(image_state, "vkCmdClearDepthStencilImage()",
"VUID-vkCmdClearDepthStencilImage-image-00010");
skip |= ValidateCmd(cb_node, CMD_CLEARDEPTHSTENCILIMAGE, "vkCmdClearDepthStencilImage()");
if (device_extensions.vk_khr_maintenance1) {
skip |= ValidateImageFormatFeatureFlags(image_state, VK_FORMAT_FEATURE_TRANSFER_DST_BIT, "vkCmdClearDepthStencilImage",
"VUID-vkCmdClearDepthStencilImage-image-01994");
}
skip |= ValidateClearDepthStencilValue(commandBuffer, *pDepthStencil, "vkCmdClearDepthStencilImage()");
skip |= ValidateProtectedImage(cb_node, image_state, "vkCmdClearDepthStencilImage()",
"VUID-vkCmdClearDepthStencilImage-commandBuffer-01807");
skip |= ValidateUnprotectedImage(cb_node, image_state, "vkCmdClearDepthStencilImage()",
"VUID-vkCmdClearDepthStencilImage-commandBuffer-01808");
bool any_include_aspect_depth_bit = false;
bool any_include_aspect_stencil_bit = false;
for (uint32_t i = 0; i < rangeCount; ++i) {
std::string param_name = "pRanges[" + std::to_string(i) + "]";
skip |= ValidateCmdClearDepthSubresourceRange(image_state, pRanges[i], param_name.c_str());
skip |= VerifyClearImageLayout(cb_node, image_state, pRanges[i], imageLayout, "vkCmdClearDepthStencilImage()");
// Image aspect must be depth or stencil or both
VkImageAspectFlags valid_aspects = VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT;
if (((pRanges[i].aspectMask & valid_aspects) == 0) || ((pRanges[i].aspectMask & ~valid_aspects) != 0)) {
skip |= LogError(commandBuffer, "VUID-vkCmdClearDepthStencilImage-aspectMask-02824",
"vkCmdClearDepthStencilImage(): pRanges[%u].aspectMask can only be VK_IMAGE_ASPECT_DEPTH_BIT "
"and/or VK_IMAGE_ASPECT_STENCIL_BIT.",
i);
}
if ((pRanges[i].aspectMask & VK_IMAGE_ASPECT_DEPTH_BIT) != 0) {
any_include_aspect_depth_bit = true;
if (FormatHasDepth(image_format) == false) {
skip |= LogError(commandBuffer, "VUID-vkCmdClearDepthStencilImage-image-02826",
"vkCmdClearDepthStencilImage(): pRanges[%u].aspectMask has a VK_IMAGE_ASPECT_DEPTH_BIT but %s "
"doesn't have a depth component.",
i, string_VkFormat(image_format));
}
}
if ((pRanges[i].aspectMask & VK_IMAGE_ASPECT_STENCIL_BIT) != 0) {
any_include_aspect_stencil_bit = true;
if (FormatHasStencil(image_format) == false) {
skip |= LogError(commandBuffer, "VUID-vkCmdClearDepthStencilImage-image-02825",
"vkCmdClearDepthStencilImage(): pRanges[%u].aspectMask has a VK_IMAGE_ASPECT_STENCIL_BIT but "
"%s doesn't have a stencil component.",
i, string_VkFormat(image_format));
}
}
}
if (any_include_aspect_stencil_bit) {
const auto image_stencil_struct = LvlFindInChain<VkImageStencilUsageCreateInfo>(image_state->createInfo.pNext);
if (image_stencil_struct != nullptr) {
if ((image_stencil_struct->stencilUsage & VK_IMAGE_USAGE_TRANSFER_DST_BIT) == 0) {
skip |=
LogError(device, "VUID-vkCmdClearDepthStencilImage-pRanges-02658",
"vkCmdClearDepthStencilImage(): an element of pRanges.aspect includes VK_IMAGE_ASPECT_STENCIL_BIT "
"and image was created with separate stencil usage, VK_IMAGE_USAGE_TRANSFER_DST_BIT must be "
"included in VkImageStencilUsageCreateInfo::stencilUsage used to create image");
}
} else {
if ((image_state->createInfo.usage & VK_IMAGE_USAGE_TRANSFER_DST_BIT) == 0) {
skip |= LogError(
device, "VUID-vkCmdClearDepthStencilImage-pRanges-02659",
"vkCmdClearDepthStencilImage(): an element of pRanges.aspect includes VK_IMAGE_ASPECT_STENCIL_BIT and "
"image was not created with separate stencil usage, VK_IMAGE_USAGE_TRANSFER_DST_BIT must be included "
"in VkImageCreateInfo::usage used to create image");
}
}
}
if (any_include_aspect_depth_bit && (image_state->createInfo.usage & VK_IMAGE_USAGE_TRANSFER_DST_BIT) == 0) {
skip |= LogError(device, "VUID-vkCmdClearDepthStencilImage-pRanges-02660",
"vkCmdClearDepthStencilImage(): an element of pRanges.aspect includes VK_IMAGE_ASPECT_DEPTH_BIT, "
"VK_IMAGE_USAGE_TRANSFER_DST_BIT must be included in VkImageCreateInfo::usage used to create image");
}
if (image_state && !FormatIsDepthOrStencil(image_format)) {
skip |= LogError(image, "VUID-vkCmdClearDepthStencilImage-image-00014",
"vkCmdClearDepthStencilImage(): called with image %s which doesn't have a depth/stencil format (%s).",
report_data->FormatHandle(image).c_str(), string_VkFormat(image_format));
}
if (VK_IMAGE_USAGE_TRANSFER_DST_BIT != (VK_IMAGE_USAGE_TRANSFER_DST_BIT & image_state->createInfo.usage)) {
skip |= LogError(image, "VUID-vkCmdClearDepthStencilImage-image-00009",
"vkCmdClearDepthStencilImage(): called with image %s which was not created with the "
"VK_IMAGE_USAGE_TRANSFER_DST_BIT set.",
report_data->FormatHandle(image).c_str());
}
}
return skip;
}
void CoreChecks::PreCallRecordCmdClearDepthStencilImage(VkCommandBuffer commandBuffer, VkImage image, VkImageLayout imageLayout,
const VkClearDepthStencilValue *pDepthStencil, uint32_t rangeCount,
const VkImageSubresourceRange *pRanges) {
StateTracker::PreCallRecordCmdClearDepthStencilImage(commandBuffer, image, imageLayout, pDepthStencil, rangeCount, pRanges);
auto cb_node = GetCBState(commandBuffer);
auto image_state = GetImageState(image);
if (cb_node && image_state) {
for (uint32_t i = 0; i < rangeCount; ++i) {
SetImageInitialLayout(cb_node, image, pRanges[i], imageLayout);
}
}
}
// Returns true if [x, xoffset] and [y, yoffset] overlap
static bool RangesIntersect(int32_t start, uint32_t start_offset, int32_t end, uint32_t end_offset) {
bool result = false;
uint32_t intersection_min = std::max(static_cast<uint32_t>(start), static_cast<uint32_t>(end));
uint32_t intersection_max = std::min(static_cast<uint32_t>(start) + start_offset, static_cast<uint32_t>(end) + end_offset);
if (intersection_max > intersection_min) {
result = true;
}
return result;
}
// Returns true if source area of first vkImageCopy/vkImageCopy2KHR region intersects dest area of second region
// It is assumed that these are copy regions within a single image (otherwise no possibility of collision)
template <typename RegionType>
static bool RegionIntersects(const RegionType *rgn0, const RegionType *rgn1, VkImageType type, bool is_multiplane) {
bool result = false;
// Separate planes within a multiplane image cannot intersect
if (is_multiplane && (rgn0->srcSubresource.aspectMask != rgn1->dstSubresource.aspectMask)) {
return result;
}
if ((rgn0->srcSubresource.mipLevel == rgn1->dstSubresource.mipLevel) &&
(RangesIntersect(rgn0->srcSubresource.baseArrayLayer, rgn0->srcSubresource.layerCount, rgn1->dstSubresource.baseArrayLayer,
rgn1->dstSubresource.layerCount))) {
result = true;
switch (type) {
case VK_IMAGE_TYPE_3D:
result &= RangesIntersect(rgn0->srcOffset.z, rgn0->extent.depth, rgn1->dstOffset.z, rgn1->extent.depth);
// fall through
case VK_IMAGE_TYPE_2D:
result &= RangesIntersect(rgn0->srcOffset.y, rgn0->extent.height, rgn1->dstOffset.y, rgn1->extent.height);
// fall through
case VK_IMAGE_TYPE_1D:
result &= RangesIntersect(rgn0->srcOffset.x, rgn0->extent.width, rgn1->dstOffset.x, rgn1->extent.width);
break;
default:
// Unrecognized or new IMAGE_TYPE enums will be caught in parameter_validation
assert(false);
}
}
return result;
}
// Returns non-zero if offset and extent exceed image extents
static const uint32_t kXBit = 1;
static const uint32_t kYBit = 2;
static const uint32_t kZBit = 4;
static uint32_t ExceedsBounds(const VkOffset3D *offset, const VkExtent3D *extent, const VkExtent3D *image_extent) {
uint32_t result = 0;
// Extents/depths cannot be negative but checks left in for clarity
if ((offset->z + extent->depth > image_extent->depth) || (offset->z < 0) ||
((offset->z + static_cast<int32_t>(extent->depth)) < 0)) {
result |= kZBit;
}
if ((offset->y + extent->height > image_extent->height) || (offset->y < 0) ||
((offset->y + static_cast<int32_t>(extent->height)) < 0)) {
result |= kYBit;
}
if ((offset->x + extent->width > image_extent->width) || (offset->x < 0) ||
((offset->x + static_cast<int32_t>(extent->width)) < 0)) {
result |= kXBit;
}
return result;
}
// Test if two VkExtent3D structs are equivalent
static inline bool IsExtentEqual(const VkExtent3D *extent, const VkExtent3D *other_extent) {
bool result = true;
if ((extent->width != other_extent->width) || (extent->height != other_extent->height) ||
(extent->depth != other_extent->depth)) {
result = false;
}
return result;
}
// Test if the extent argument has all dimensions set to 0.
static inline bool IsExtentAllZeroes(const VkExtent3D *extent) {
return ((extent->width == 0) && (extent->height == 0) && (extent->depth == 0));
}
// Returns the image transfer granularity for a specific image scaled by compressed block size if necessary.
VkExtent3D CoreChecks::GetScaledItg(const CMD_BUFFER_STATE *cb_node, const IMAGE_STATE *img) const {
// Default to (0, 0, 0) granularity in case we can't find the real granularity for the physical device.
VkExtent3D granularity = {0, 0, 0};
auto pool = cb_node->command_pool.get();
if (pool) {
granularity = GetPhysicalDeviceState()->queue_family_properties[pool->queueFamilyIndex].minImageTransferGranularity;
if (FormatIsCompressed(img->createInfo.format) || FormatIsSinglePlane_422(img->createInfo.format)) {
auto block_size = FormatTexelBlockExtent(img->createInfo.format);
granularity.width *= block_size.width;
granularity.height *= block_size.height;
}
}
return granularity;
}
// Test elements of a VkExtent3D structure against alignment constraints contained in another VkExtent3D structure
static inline bool IsExtentAligned(const VkExtent3D *extent, const VkExtent3D *granularity) {
bool valid = true;
if ((SafeModulo(extent->depth, granularity->depth) != 0) || (SafeModulo(extent->width, granularity->width) != 0) ||
(SafeModulo(extent->height, granularity->height) != 0)) {
valid = false;
}
return valid;
}
// Check elements of a VkOffset3D structure against a queue family's Image Transfer Granularity values
bool CoreChecks::CheckItgOffset(const CMD_BUFFER_STATE *cb_node, const VkOffset3D *offset, const VkExtent3D *granularity,
const uint32_t i, const char *function, const char *member, const char *vuid) const {
bool skip = false;
VkExtent3D offset_extent = {};
offset_extent.width = static_cast<uint32_t>(abs(offset->x));
offset_extent.height = static_cast<uint32_t>(abs(offset->y));
offset_extent.depth = static_cast<uint32_t>(abs(offset->z));
if (IsExtentAllZeroes(granularity)) {
// If the queue family image transfer granularity is (0, 0, 0), then the offset must always be (0, 0, 0)
if (IsExtentAllZeroes(&offset_extent) == false) {
skip |= LogError(cb_node->commandBuffer(), vuid,
"%s: pRegion[%d].%s (x=%d, y=%d, z=%d) must be (x=0, y=0, z=0) when the command buffer's queue family "
"image transfer granularity is (w=0, h=0, d=0).",
function, i, member, offset->x, offset->y, offset->z);
}
} else {
// If the queue family image transfer granularity is not (0, 0, 0), then the offset dimensions must always be even
// integer multiples of the image transfer granularity.
if (IsExtentAligned(&offset_extent, granularity) == false) {
skip |= LogError(cb_node->commandBuffer(), vuid,
"%s: pRegion[%d].%s (x=%d, y=%d, z=%d) dimensions must be even integer multiples of this command "
"buffer's queue family image transfer granularity (w=%d, h=%d, d=%d).",
function, i, member, offset->x, offset->y, offset->z, granularity->width, granularity->height,
granularity->depth);
}
}
return skip;
}
// Check elements of a VkExtent3D structure against a queue family's Image Transfer Granularity values
bool CoreChecks::CheckItgExtent(const CMD_BUFFER_STATE *cb_node, const VkExtent3D *extent, const VkOffset3D *offset,
const VkExtent3D *granularity, const VkExtent3D *subresource_extent, const VkImageType image_type,
const uint32_t i, const char *function, const char *member, const char *vuid) const {
bool skip = false;
if (IsExtentAllZeroes(granularity)) {
// If the queue family image transfer granularity is (0, 0, 0), then the extent must always match the image
// subresource extent.
if (IsExtentEqual(extent, subresource_extent) == false) {
skip |= LogError(cb_node->commandBuffer(), vuid,
"%s: pRegion[%d].%s (w=%d, h=%d, d=%d) must match the image subresource extents (w=%d, h=%d, d=%d) "
"when the command buffer's queue family image transfer granularity is (w=0, h=0, d=0).",
function, i, member, extent->width, extent->height, extent->depth, subresource_extent->width,
subresource_extent->height, subresource_extent->depth);
}
} else {
// If the queue family image transfer granularity is not (0, 0, 0), then the extent dimensions must always be even
// integer multiples of the image transfer granularity or the offset + extent dimensions must always match the image
// subresource extent dimensions.
VkExtent3D offset_extent_sum = {};
offset_extent_sum.width = static_cast<uint32_t>(abs(offset->x)) + extent->width;
offset_extent_sum.height = static_cast<uint32_t>(abs(offset->y)) + extent->height;
offset_extent_sum.depth = static_cast<uint32_t>(abs(offset->z)) + extent->depth;
bool x_ok = true;
bool y_ok = true;
bool z_ok = true;
switch (image_type) {
case VK_IMAGE_TYPE_3D:
z_ok = ((0 == SafeModulo(extent->depth, granularity->depth)) ||
(subresource_extent->depth == offset_extent_sum.depth));
// fall through
case VK_IMAGE_TYPE_2D:
y_ok = ((0 == SafeModulo(extent->height, granularity->height)) ||
(subresource_extent->height == offset_extent_sum.height));
// fall through
case VK_IMAGE_TYPE_1D:
x_ok = ((0 == SafeModulo(extent->width, granularity->width)) ||
(subresource_extent->width == offset_extent_sum.width));
break;
default:
// Unrecognized or new IMAGE_TYPE enums will be caught in parameter_validation
assert(false);
}
if (!(x_ok && y_ok && z_ok)) {
skip |=
LogError(cb_node->commandBuffer(), vuid,
"%s: pRegion[%d].%s (w=%d, h=%d, d=%d) dimensions must be even integer multiples of this command "
"buffer's queue family image transfer granularity (w=%d, h=%d, d=%d) or offset (x=%d, y=%d, z=%d) + "
"extent (w=%d, h=%d, d=%d) must match the image subresource extents (w=%d, h=%d, d=%d).",
function, i, member, extent->width, extent->height, extent->depth, granularity->width, granularity->height,
granularity->depth, offset->x, offset->y, offset->z, extent->width, extent->height, extent->depth,
subresource_extent->width, subresource_extent->height, subresource_extent->depth);
}
}
return skip;
}
bool CoreChecks::ValidateImageMipLevel(const CMD_BUFFER_STATE *cb_node, const IMAGE_STATE *img, uint32_t mip_level,
const uint32_t i, const char *function, const char *member, const char *vuid) const {
bool skip = false;
if (mip_level >= img->createInfo.mipLevels) {
skip |= LogError(cb_node->commandBuffer(), vuid, "In %s, pRegions[%u].%s.mipLevel is %u, but provided %s has %u mip levels.",
function, i, member, mip_level, report_data->FormatHandle(img->image()).c_str(), img->createInfo.mipLevels);
}
return skip;
}
bool CoreChecks::ValidateImageArrayLayerRange(const CMD_BUFFER_STATE *cb_node, const IMAGE_STATE *img, const uint32_t base_layer,
const uint32_t layer_count, const uint32_t i, const char *function,
const char *member, const char *vuid) const {
bool skip = false;
if (base_layer >= img->createInfo.arrayLayers || layer_count > img->createInfo.arrayLayers ||
(base_layer + layer_count) > img->createInfo.arrayLayers) {
skip |= LogError(cb_node->commandBuffer(), vuid,
"In %s, pRegions[%u].%s.baseArrayLayer is %u and .layerCount is "
"%u, but provided %s has %u array layers.",
function, i, member, base_layer, layer_count, report_data->FormatHandle(img->image()).c_str(),
img->createInfo.arrayLayers);
}
return skip;
}
// Check valid usage Image Transfer Granularity requirements for elements of a VkBufferImageCopy/VkBufferImageCopy2KHR structure
template <typename BufferImageCopyRegionType>
bool CoreChecks::ValidateCopyBufferImageTransferGranularityRequirements(const CMD_BUFFER_STATE *cb_node, const IMAGE_STATE *img,
const BufferImageCopyRegionType *region, const uint32_t i,
const char *function, const char *vuid) const {
bool skip = false;
VkExtent3D granularity = GetScaledItg(cb_node, img);
skip |= CheckItgOffset(cb_node, ®ion->imageOffset, &granularity, i, function, "imageOffset", vuid);
VkExtent3D subresource_extent = img->GetSubresourceExtent(region->imageSubresource);
skip |= CheckItgExtent(cb_node, ®ion->imageExtent, ®ion->imageOffset, &granularity, &subresource_extent,
img->createInfo.imageType, i, function, "imageExtent", vuid);
return skip;
}
// Check valid usage Image Transfer Granularity requirements for elements of a VkImageCopy/VkImageCopy2KHR structure
template <typename RegionType>
bool CoreChecks::ValidateCopyImageTransferGranularityRequirements(const CMD_BUFFER_STATE *cb_node, const IMAGE_STATE *src_img,
const IMAGE_STATE *dst_img, const RegionType *region,
const uint32_t i, const char *function,
CopyCommandVersion version) const {
bool skip = false;
const bool is_2khr = (version == COPY_COMMAND_VERSION_2);
const char *vuid;
// Source image checks
VkExtent3D granularity = GetScaledItg(cb_node, src_img);
vuid = is_2khr ? "VUID-VkCopyImageInfo2KHR-srcOffset-01783" : "VUID-vkCmdCopyImage-srcOffset-01783";
skip |= CheckItgOffset(cb_node, ®ion->srcOffset, &granularity, i, function, "srcOffset", vuid);
VkExtent3D subresource_extent = src_img->GetSubresourceExtent(region->srcSubresource);
const VkExtent3D extent = region->extent;
vuid = is_2khr ? "VUID-VkCopyImageInfo2KHR-srcOffset-01783" : "VUID-vkCmdCopyImage-srcOffset-01783";
skip |= CheckItgExtent(cb_node, &extent, ®ion->srcOffset, &granularity, &subresource_extent, src_img->createInfo.imageType,
i, function, "extent", vuid);
// Destination image checks
granularity = GetScaledItg(cb_node, dst_img);
vuid = is_2khr ? "VUID-VkCopyImageInfo2KHR-dstOffset-01784" : "VUID-vkCmdCopyImage-dstOffset-01784";
skip |= CheckItgOffset(cb_node, ®ion->dstOffset, &granularity, i, function, "dstOffset", vuid);
// Adjust dest extent, if necessary
const VkExtent3D dest_effective_extent =
GetAdjustedDestImageExtent(src_img->createInfo.format, dst_img->createInfo.format, extent);
subresource_extent = dst_img->GetSubresourceExtent(region->dstSubresource);
vuid = is_2khr ? "VUID-VkCopyImageInfo2KHR-dstOffset-01784" : "VUID-vkCmdCopyImage-dstOffset-01784";
skip |= CheckItgExtent(cb_node, &dest_effective_extent, ®ion->dstOffset, &granularity, &subresource_extent,
dst_img->createInfo.imageType, i, function, "extent", vuid);
return skip;
}
// Validate contents of a VkImageCopy or VkImageCopy2KHR struct
template <typename ImageCopyRegionType>
bool CoreChecks::ValidateImageCopyData(const uint32_t regionCount, const ImageCopyRegionType *ic_regions,
const IMAGE_STATE *src_state, const IMAGE_STATE *dst_state,
CopyCommandVersion version) const {
bool skip = false;
const bool is_2khr = (version == COPY_COMMAND_VERSION_2);
const char *func_name = is_2khr ? "vkCmdCopyImage2KHR()" : "vkCmdCopyImage()";
const char *vuid;
for (uint32_t i = 0; i < regionCount; i++) {
const ImageCopyRegionType region = ic_regions[i];
// For comp<->uncomp copies, the copy extent for the dest image must be adjusted
const VkExtent3D src_copy_extent = region.extent;
const VkExtent3D dst_copy_extent =
GetAdjustedDestImageExtent(src_state->createInfo.format, dst_state->createInfo.format, region.extent);
bool slice_override = false;
uint32_t depth_slices = 0;
// Special case for copying between a 1D/2D array and a 3D image
// TBD: This seems like the only way to reconcile 3 mutually-exclusive VU checks for 2D/3D copies. Heads up.
if ((VK_IMAGE_TYPE_3D == src_state->createInfo.imageType) && (VK_IMAGE_TYPE_3D != dst_state->createInfo.imageType)) {
depth_slices = region.dstSubresource.layerCount; // Slice count from 2D subresource
slice_override = (depth_slices != 1);
} else if ((VK_IMAGE_TYPE_3D == dst_state->createInfo.imageType) && (VK_IMAGE_TYPE_3D != src_state->createInfo.imageType)) {
depth_slices = region.srcSubresource.layerCount; // Slice count from 2D subresource
slice_override = (depth_slices != 1);
}
// Do all checks on source image
if (src_state->createInfo.imageType == VK_IMAGE_TYPE_1D) {
if ((0 != region.srcOffset.y) || (1 != src_copy_extent.height)) {
vuid = is_2khr ? "VUID-VkCopyImageInfo2KHR-srcImage-00146" : "VUID-vkCmdCopyImage-srcImage-00146";
skip |= LogError(src_state->image(), vuid,
"%s: pRegion[%d] srcOffset.y is %d and extent.height is %d. For 1D images these must "
"be 0 and 1, respectively.",
func_name, i, region.srcOffset.y, src_copy_extent.height);
}
}
if ((src_state->createInfo.imageType == VK_IMAGE_TYPE_1D) && ((0 != region.srcOffset.z) || (1 != src_copy_extent.depth))) {
vuid = is_2khr ? "VUID-VkCopyImageInfo2KHR-srcImage-01785" : "VUID-vkCmdCopyImage-srcImage-01785";
skip |= LogError(src_state->image(), vuid,
"%s: pRegion[%d] srcOffset.z is %d and extent.depth is %d. For 1D images "
"these must be 0 and 1, respectively.",
func_name, i, region.srcOffset.z, src_copy_extent.depth);
}
if ((src_state->createInfo.imageType == VK_IMAGE_TYPE_2D) && (0 != region.srcOffset.z)) {
vuid = is_2khr ? "VUID-VkCopyImageInfo2KHR-srcImage-01787" : "VUID-vkCmdCopyImage-srcImage-01787";
skip |= LogError(src_state->image(), vuid, "%s: pRegion[%d] srcOffset.z is %d. For 2D images the z-offset must be 0.",
func_name, i, region.srcOffset.z);
}
// Source checks that apply only to compressed images (or to _422 images if ycbcr enabled)
bool ext_ycbcr = IsExtEnabled(device_extensions.vk_khr_sampler_ycbcr_conversion);
if (FormatIsCompressed(src_state->createInfo.format) ||
(ext_ycbcr && FormatIsSinglePlane_422(src_state->createInfo.format))) {
const VkExtent3D block_size = FormatTexelBlockExtent(src_state->createInfo.format);
// image offsets must be multiples of block dimensions
if ((SafeModulo(region.srcOffset.x, block_size.width) != 0) ||
(SafeModulo(region.srcOffset.y, block_size.height) != 0) ||
(SafeModulo(region.srcOffset.z, block_size.depth) != 0)) {
vuid = is_2khr ? "VUID-VkCopyImageInfo2KHR-srcImage-01727" : "VUID-vkCmdCopyImage-srcImage-01727";
skip |= LogError(src_state->image(), vuid,
"%s: pRegion[%d] srcOffset (%d, %d) must be multiples of the compressed image's "
"texel width & height (%d, %d).",
func_name, i, region.srcOffset.x, region.srcOffset.y, block_size.width, block_size.height);
}
const VkExtent3D mip_extent = src_state->GetSubresourceExtent(region.srcSubresource);
if ((SafeModulo(src_copy_extent.width, block_size.width) != 0) &&
(src_copy_extent.width + region.srcOffset.x != mip_extent.width)) {
vuid = is_2khr ? "VUID-VkCopyImageInfo2KHR-srcImage-01728" : "VUID-vkCmdCopyImage-srcImage-01728";
skip |= LogError(src_state->image(), vuid,
"%s: pRegion[%d] extent width (%d) must be a multiple of the compressed texture block "
"width (%d), or when added to srcOffset.x (%d) must equal the image subresource width (%d).",
func_name, i, src_copy_extent.width, block_size.width, region.srcOffset.x, mip_extent.width);
}
// Extent height must be a multiple of block height, or extent+offset height must equal subresource height
if ((SafeModulo(src_copy_extent.height, block_size.height) != 0) &&
(src_copy_extent.height + region.srcOffset.y != mip_extent.height)) {
vuid = is_2khr ? "VUID-VkCopyImageInfo2KHR-srcImage-01729" : "VUID-vkCmdCopyImage-srcImage-01729";
skip |= LogError(src_state->image(), vuid,
"%s: pRegion[%d] extent height (%d) must be a multiple of the compressed texture block "
"height (%d), or when added to srcOffset.y (%d) must equal the image subresource height (%d).",
func_name, i, src_copy_extent.height, block_size.height, region.srcOffset.y, mip_extent.height);
}
// Extent depth must be a multiple of block depth, or extent+offset depth must equal subresource depth
uint32_t copy_depth = (slice_override ? depth_slices : src_copy_extent.depth);
if ((SafeModulo(copy_depth, block_size.depth) != 0) && (copy_depth + region.srcOffset.z != mip_extent.depth)) {
vuid = is_2khr ? "VUID-VkCopyImageInfo2KHR-srcImage-01730" : "VUID-vkCmdCopyImage-srcImage-01730";
skip |= LogError(src_state->image(), vuid,
"%s: pRegion[%d] extent width (%d) must be a multiple of the compressed texture block "
"depth (%d), or when added to srcOffset.z (%d) must equal the image subresource depth (%d).",
func_name, i, src_copy_extent.depth, block_size.depth, region.srcOffset.z, mip_extent.depth);
}
} // Compressed
// Do all checks on dest image
if (dst_state->createInfo.imageType == VK_IMAGE_TYPE_1D) {
if ((0 != region.dstOffset.y) || (1 != dst_copy_extent.height)) {
vuid = is_2khr ? "VUID-VkCopyImageInfo2KHR-dstImage-00152" : "VUID-vkCmdCopyImage-dstImage-00152";
skip |= LogError(dst_state->image(), vuid,
"%s: pRegion[%d] dstOffset.y is %d and dst_copy_extent.height is %d. For 1D images "
"these must be 0 and 1, respectively.",
func_name, i, region.dstOffset.y, dst_copy_extent.height);
}
}
if ((dst_state->createInfo.imageType == VK_IMAGE_TYPE_1D) && ((0 != region.dstOffset.z) || (1 != dst_copy_extent.depth))) {
vuid = is_2khr ? "VUID-VkCopyImageInfo2KHR-dstImage-01786" : "VUID-vkCmdCopyImage-dstImage-01786";
skip |= LogError(dst_state->image(), vuid,
"%s: pRegion[%d] dstOffset.z is %d and extent.depth is %d. For 1D images these must be 0 "
"and 1, respectively.",
func_name, i, region.dstOffset.z, dst_copy_extent.depth);
}
if ((dst_state->createInfo.imageType == VK_IMAGE_TYPE_2D) && (0 != region.dstOffset.z)) {
vuid = is_2khr ? "VUID-VkCopyImageInfo2KHR-dstImage-01788" : "VUID-vkCmdCopyImage-dstImage-01788";
skip |= LogError(dst_state->image(), vuid, "%s: pRegion[%d] dstOffset.z is %d. For 2D images the z-offset must be 0.",
func_name, i, region.dstOffset.z);
}
// Handle difference between Maintenance 1
if (device_extensions.vk_khr_maintenance1) {
if (src_state->createInfo.imageType == VK_IMAGE_TYPE_3D) {
if ((0 != region.srcSubresource.baseArrayLayer) || (1 != region.srcSubresource.layerCount)) {
vuid = is_2khr ? "VUID-VkCopyImageInfo2KHR-srcImage-04443" : "VUID-vkCmdCopyImage-srcImage-04443";
skip |= LogError(src_state->image(), vuid,
"%s: pRegion[%d] srcSubresource.baseArrayLayer is %d and srcSubresource.layerCount "
"is %d. For VK_IMAGE_TYPE_3D images these must be 0 and 1, respectively.",
func_name, i, region.srcSubresource.baseArrayLayer, region.srcSubresource.layerCount);
}
}
if (dst_state->createInfo.imageType == VK_IMAGE_TYPE_3D) {
if ((0 != region.dstSubresource.baseArrayLayer) || (1 != region.dstSubresource.layerCount)) {
vuid = is_2khr ? "VUID-VkCopyImageInfo2KHR-dstImage-04444" : "VUID-vkCmdCopyImage-dstImage-04444";
skip |= LogError(dst_state->image(), vuid,
"%s: pRegion[%d] dstSubresource.baseArrayLayer is %d and dstSubresource.layerCount "
"is %d. For VK_IMAGE_TYPE_3D images these must be 0 and 1, respectively.",
func_name, i, region.dstSubresource.baseArrayLayer, region.dstSubresource.layerCount);
}
}
} else { // Pre maint 1
if (src_state->createInfo.imageType == VK_IMAGE_TYPE_3D || dst_state->createInfo.imageType == VK_IMAGE_TYPE_3D) {
if ((0 != region.srcSubresource.baseArrayLayer) || (1 != region.srcSubresource.layerCount)) {
vuid = is_2khr ? "VUID-VkCopyImageInfo2KHR-srcImage-00139" : "VUID-vkCmdCopyImage-srcImage-00139";
skip |= LogError(src_state->image(), vuid,
"%s: pRegion[%d] srcSubresource.baseArrayLayer is %d and "
"srcSubresource.layerCount is %d. For copies with either source or dest of type "
"VK_IMAGE_TYPE_3D, these must be 0 and 1, respectively.",
func_name, i, region.srcSubresource.baseArrayLayer, region.srcSubresource.layerCount);
}
if ((0 != region.dstSubresource.baseArrayLayer) || (1 != region.dstSubresource.layerCount)) {
vuid = is_2khr ? "VUID-VkCopyImageInfo2KHR-srcImage-00139" : "VUID-vkCmdCopyImage-srcImage-00139";
skip |= LogError(dst_state->image(), vuid,
"%s: pRegion[%d] dstSubresource.baseArrayLayer is %d and "
"dstSubresource.layerCount is %d. For copies with either source or dest of type "
"VK_IMAGE_TYPE_3D, these must be 0 and 1, respectively.",
func_name, i, region.dstSubresource.baseArrayLayer, region.dstSubresource.layerCount);
}
}
}
// Dest checks that apply only to compressed images (or to _422 images if ycbcr enabled)
if (FormatIsCompressed(dst_state->createInfo.format) ||
(ext_ycbcr && FormatIsSinglePlane_422(dst_state->createInfo.format))) {
const VkExtent3D block_size = FormatTexelBlockExtent(dst_state->createInfo.format);
// image offsets must be multiples of block dimensions
if ((SafeModulo(region.dstOffset.x, block_size.width) != 0) ||
(SafeModulo(region.dstOffset.y, block_size.height) != 0) ||
(SafeModulo(region.dstOffset.z, block_size.depth) != 0)) {
vuid = is_2khr ? "VUID-VkCopyImageInfo2KHR-dstImage-01731" : "VUID-vkCmdCopyImage-dstImage-01731";
skip |= LogError(dst_state->image(), vuid,
"%s: pRegion[%d] dstOffset (%d, %d) must be multiples of the compressed image's "
"texel width & height (%d, %d).",
func_name, i, region.dstOffset.x, region.dstOffset.y, block_size.width, block_size.height);
}
const VkExtent3D mip_extent = dst_state->GetSubresourceExtent(region.dstSubresource);
if ((SafeModulo(dst_copy_extent.width, block_size.width) != 0) &&
(dst_copy_extent.width + region.dstOffset.x != mip_extent.width)) {
vuid = is_2khr ? "VUID-VkCopyImageInfo2KHR-dstImage-01732" : "VUID-vkCmdCopyImage-dstImage-01732";
skip |= LogError(dst_state->image(), vuid,
"%s: pRegion[%d] dst_copy_extent width (%d) must be a multiple of the compressed texture "
"block width (%d), or when added to dstOffset.x (%d) must equal the image subresource width (%d).",
func_name, i, dst_copy_extent.width, block_size.width, region.dstOffset.x, mip_extent.width);
}
// Extent height must be a multiple of block height, or dst_copy_extent+offset height must equal subresource height
if ((SafeModulo(dst_copy_extent.height, block_size.height) != 0) &&
(dst_copy_extent.height + region.dstOffset.y != mip_extent.height)) {
vuid = is_2khr ? "VUID-VkCopyImageInfo2KHR-dstImage-01733" : "VUID-vkCmdCopyImage-dstImage-01733";
skip |= LogError(dst_state->image(), vuid,
"%s: pRegion[%d] dst_copy_extent height (%d) must be a multiple of the compressed "
"texture block height (%d), or when added to dstOffset.y (%d) must equal the image subresource "
"height (%d).",
func_name, i, dst_copy_extent.height, block_size.height, region.dstOffset.y, mip_extent.height);
}
// Extent depth must be a multiple of block depth, or dst_copy_extent+offset depth must equal subresource depth
uint32_t copy_depth = (slice_override ? depth_slices : dst_copy_extent.depth);
if ((SafeModulo(copy_depth, block_size.depth) != 0) && (copy_depth + region.dstOffset.z != mip_extent.depth)) {
vuid = is_2khr ? "VUID-VkCopyImageInfo2KHR-dstImage-01734" : "VUID-vkCmdCopyImage-dstImage-01734";
skip |= LogError(dst_state->image(), vuid,
"%s: pRegion[%d] dst_copy_extent width (%d) must be a multiple of the compressed texture "
"block depth (%d), or when added to dstOffset.z (%d) must equal the image subresource depth (%d).",
func_name, i, dst_copy_extent.depth, block_size.depth, region.dstOffset.z, mip_extent.depth);
}
} // Compressed
}
return skip;
}
template <typename RegionType>
bool CoreChecks::ValidateCmdCopyImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount,
const RegionType *pRegions, CopyCommandVersion version) const {
const auto *cb_node = GetCBState(commandBuffer);
const auto *src_image_state = GetImageState(srcImage);
const auto *dst_image_state = GetImageState(dstImage);
const VkFormat src_format = src_image_state->createInfo.format;
const VkFormat dst_format = dst_image_state->createInfo.format;
const bool is_2khr = (version == COPY_COMMAND_VERSION_2);
bool skip = false;
const char *func_name = is_2khr ? "vkCmdCopyImage2KHR()" : "vkCmdCopyImage()";
const CMD_TYPE cmd_type = is_2khr ? CMD_COPYIMAGE2KHR : CMD_COPYIMAGE;
const char *vuid;
skip = ValidateImageCopyData(regionCount, pRegions, src_image_state, dst_image_state, version);
VkCommandBuffer command_buffer = cb_node->commandBuffer();
for (uint32_t i = 0; i < regionCount; i++) {
const RegionType region = pRegions[i];
// For comp/uncomp copies, the copy extent for the dest image must be adjusted
VkExtent3D src_copy_extent = region.extent;
VkExtent3D dst_copy_extent = GetAdjustedDestImageExtent(src_format, dst_format, region.extent);
bool slice_override = false;
uint32_t depth_slices = 0;
// Special case for copying between a 1D/2D array and a 3D image
// TBD: This seems like the only way to reconcile 3 mutually-exclusive VU checks for 2D/3D copies. Heads up.
if ((VK_IMAGE_TYPE_3D == src_image_state->createInfo.imageType) &&
(VK_IMAGE_TYPE_3D != dst_image_state->createInfo.imageType)) {
depth_slices = region.dstSubresource.layerCount; // Slice count from 2D subresource
slice_override = (depth_slices != 1);
} else if ((VK_IMAGE_TYPE_3D == dst_image_state->createInfo.imageType) &&
(VK_IMAGE_TYPE_3D != src_image_state->createInfo.imageType)) {
depth_slices = region.srcSubresource.layerCount; // Slice count from 2D subresource
slice_override = (depth_slices != 1);
}
skip |= ValidateImageSubresourceLayers(cb_node, ®ion.srcSubresource, func_name, "srcSubresource", i);
skip |= ValidateImageSubresourceLayers(cb_node, ®ion.dstSubresource, func_name, "dstSubresource", i);
vuid = is_2khr ? "VUID-VkCopyImageInfo2KHR-srcSubresource-01696" : "VUID-vkCmdCopyImage-srcSubresource-01696";
skip |=
ValidateImageMipLevel(cb_node, src_image_state, region.srcSubresource.mipLevel, i, func_name, "srcSubresource", vuid);
vuid = is_2khr ? "VUID-VkCopyImageInfo2KHR-dstSubresource-01697" : "VUID-vkCmdCopyImage-dstSubresource-01697";
skip |=
ValidateImageMipLevel(cb_node, dst_image_state, region.dstSubresource.mipLevel, i, func_name, "dstSubresource", vuid);
vuid = is_2khr ? "VUID-VkCopyImageInfo2KHR-srcSubresource-01698" : "VUID-vkCmdCopyImage-srcSubresource-01698";
skip |= ValidateImageArrayLayerRange(cb_node, src_image_state, region.srcSubresource.baseArrayLayer,
region.srcSubresource.layerCount, i, func_name, "srcSubresource", vuid);
vuid = is_2khr ? "VUID-VkCopyImageInfo2KHR-dstSubresource-01699" : "VUID-vkCmdCopyImage-dstSubresource-01699";
skip |= ValidateImageArrayLayerRange(cb_node, dst_image_state, region.dstSubresource.baseArrayLayer,
region.dstSubresource.layerCount, i, func_name, "dstSubresource", vuid);
if (device_extensions.vk_khr_maintenance1) {
// No chance of mismatch if we're overriding depth slice count
if (!slice_override) {
// The number of depth slices in srcSubresource and dstSubresource must match
// Depth comes from layerCount for 1D,2D resources, from extent.depth for 3D
uint32_t src_slices =
(VK_IMAGE_TYPE_3D == src_image_state->createInfo.imageType ? src_copy_extent.depth
: region.srcSubresource.layerCount);
uint32_t dst_slices =
(VK_IMAGE_TYPE_3D == dst_image_state->createInfo.imageType ? dst_copy_extent.depth
: region.dstSubresource.layerCount);
if (src_slices != dst_slices) {
vuid = is_2khr ? "VUID-VkImageCopy2KHR-extent-00140" : "VUID-VkImageCopy-extent-00140";
skip |= LogError(command_buffer, vuid,
"%s: number of depth slices in source (%u) and destination (%u) subresources for pRegions[%u] "
"do not match.",
func_name, src_slices, dst_slices, i);
}
}
} else {
// For each region the layerCount member of srcSubresource and dstSubresource must match
if (region.srcSubresource.layerCount != region.dstSubresource.layerCount) {
vuid = is_2khr ? "VUID-VkImageCopy2KHR-layerCount-00138" : "VUID-VkImageCopy-layerCount-00138";
skip |=
LogError(command_buffer, vuid,
"%s: number of layers in source (%u) and destination (%u) subresources for pRegions[%u] do not match",
func_name, region.srcSubresource.layerCount, region.dstSubresource.layerCount, i);
}
}
// Do multiplane-specific checks, if extension enabled
if (device_extensions.vk_khr_sampler_ycbcr_conversion) {
if ((!FormatIsMultiplane(src_format)) && (!FormatIsMultiplane(dst_format))) {
// If neither image is multi-plane the aspectMask member of src and dst must match
if (region.srcSubresource.aspectMask != region.dstSubresource.aspectMask) {
vuid = is_2khr ? "VUID-VkCopyImageInfo2KHR-srcImage-01551" : "VUID-vkCmdCopyImage-srcImage-01551";
skip |= LogError(command_buffer, vuid,
"%s: Copy between non-multiplane images with differing aspectMasks in pRegions[%u] with "
"source (0x%x) destination (0x%x).",
func_name, i, region.srcSubresource.aspectMask, region.dstSubresource.aspectMask);
}
} else {
// Source image multiplane checks
uint32_t planes = FormatPlaneCount(src_format);
VkImageAspectFlags aspect = region.srcSubresource.aspectMask;
if ((2 == planes) && (aspect != VK_IMAGE_ASPECT_PLANE_0_BIT) && (aspect != VK_IMAGE_ASPECT_PLANE_1_BIT)) {
vuid = is_2khr ? "VUID-VkCopyImageInfo2KHR-srcImage-01552" : "VUID-vkCmdCopyImage-srcImage-01552";
skip |= LogError(command_buffer, vuid,
"%s: pRegions[%u].srcSubresource.aspectMask (0x%x) is invalid for 2-plane format.", func_name,
i, aspect);
}
if ((3 == planes) && (aspect != VK_IMAGE_ASPECT_PLANE_0_BIT) && (aspect != VK_IMAGE_ASPECT_PLANE_1_BIT) &&
(aspect != VK_IMAGE_ASPECT_PLANE_2_BIT)) {
vuid = is_2khr ? "VUID-VkCopyImageInfo2KHR-srcImage-01553" : "VUID-vkCmdCopyImage-srcImage-01553";
skip |= LogError(command_buffer, vuid,
"%s: pRegions[%u].srcSubresource.aspectMask (0x%x) is invalid for 3-plane format.", func_name,
i, aspect);
}
// Single-plane to multi-plane
if ((!FormatIsMultiplane(src_format)) && (FormatIsMultiplane(dst_format)) &&
(VK_IMAGE_ASPECT_COLOR_BIT != aspect)) {
vuid = is_2khr ? "VUID-VkCopyImageInfo2KHR-dstImage-01557" : "VUID-vkCmdCopyImage-dstImage-01557";
skip |= LogError(command_buffer, vuid,
"%s: pRegions[%u].srcSubresource.aspectMask (0x%x) is not VK_IMAGE_ASPECT_COLOR_BIT.",
func_name, i, aspect);
}
// Dest image multiplane checks
planes = FormatPlaneCount(dst_format);
aspect = region.dstSubresource.aspectMask;
if ((2 == planes) && (aspect != VK_IMAGE_ASPECT_PLANE_0_BIT) && (aspect != VK_IMAGE_ASPECT_PLANE_1_BIT)) {
vuid = is_2khr ? "VUID-VkCopyImageInfo2KHR-dstImage-01554" : "VUID-vkCmdCopyImage-dstImage-01554";
skip |= LogError(command_buffer, vuid,
"%s: pRegions[%u].dstSubresource.aspectMask (0x%x) is invalid for 2-plane format.", func_name,
i, aspect);
}
if ((3 == planes) && (aspect != VK_IMAGE_ASPECT_PLANE_0_BIT) && (aspect != VK_IMAGE_ASPECT_PLANE_1_BIT) &&
(aspect != VK_IMAGE_ASPECT_PLANE_2_BIT)) {
vuid = is_2khr ? "VUID-VkCopyImageInfo2KHR-dstImage-01555" : "VUID-vkCmdCopyImage-dstImage-01555";
skip |= LogError(command_buffer, vuid,
"%s: pRegions[%u].dstSubresource.aspectMask (0x%x) is invalid for 3-plane format.", func_name,
i, aspect);
}
// Multi-plane to single-plane
if ((FormatIsMultiplane(src_format)) && (!FormatIsMultiplane(dst_format)) &&
(VK_IMAGE_ASPECT_COLOR_BIT != aspect)) {
vuid = is_2khr ? "VUID-VkCopyImageInfo2KHR-srcImage-01556" : "VUID-vkCmdCopyImage-srcImage-01556";
skip |= LogError(command_buffer, vuid,
"%s: pRegions[%u].dstSubresource.aspectMask (0x%x) is not VK_IMAGE_ASPECT_COLOR_BIT.",
func_name, i, aspect);
}
}
} else {
// !vk_khr_sampler_ycbcr_conversion
// not multi-plane, the aspectMask member of srcSubresource and dstSubresource must match
if (region.srcSubresource.aspectMask != region.dstSubresource.aspectMask) {
vuid = is_2khr ? "VUID-VkImageCopy2KHR-aspectMask-00137" : "VUID-VkImageCopy-aspectMask-00137";
skip |= LogError(
command_buffer, vuid,
"%s: Copy between images with differing aspectMasks in pRegions[%u] with source (0x%x) destination (0x%x).",
func_name, i, region.srcSubresource.aspectMask, region.dstSubresource.aspectMask);
}
}
// For each region, the aspectMask member of srcSubresource must be present in the source image
if (!VerifyAspectsPresent(region.srcSubresource.aspectMask, src_format)) {
vuid = is_2khr ? "VUID-VkCopyImageInfo2KHR-aspectMask-00142" : "VUID-vkCmdCopyImage-aspectMask-00142";
skip |=
LogError(command_buffer, vuid,
"%s: pRegions[%u].srcSubresource.aspectMask (0x%x) cannot specify aspects not present in source image.",
func_name, i, region.srcSubresource.aspectMask);
}
// For each region, the aspectMask member of dstSubresource must be present in the destination image
if (!VerifyAspectsPresent(region.dstSubresource.aspectMask, dst_format)) {
vuid = is_2khr ? "VUID-VkCopyImageInfo2KHR-aspectMask-00143" : "VUID-vkCmdCopyImage-aspectMask-00143";
skip |= LogError(
command_buffer, vuid,
"%s: pRegions[%u].dstSubresource.aspectMask (0x%x) cannot specify aspects not present in destination image.",
func_name, i, region.dstSubresource.aspectMask);
}
// Each dimension offset + extent limits must fall with image subresource extent
VkExtent3D subresource_extent = src_image_state->GetSubresourceExtent(region.srcSubresource);
if (slice_override) src_copy_extent.depth = depth_slices;
uint32_t extent_check = ExceedsBounds(&(region.srcOffset), &src_copy_extent, &subresource_extent);
if (extent_check & kXBit) {
vuid = is_2khr ? "VUID-VkCopyImageInfo2KHR-srcOffset-00144" : "VUID-vkCmdCopyImage-srcOffset-00144";
skip |= LogError(command_buffer, vuid,
"%s: Source image pRegion[%u] x-dimension offset [%1d] + extent [%1d] exceeds subResource "
"width [%1d].",
func_name, i, region.srcOffset.x, src_copy_extent.width, subresource_extent.width);
}
if (extent_check & kYBit) {
vuid = is_2khr ? "VUID-VkCopyImageInfo2KHR-srcOffset-00145" : "VUID-vkCmdCopyImage-srcOffset-00145";
skip |= LogError(command_buffer, vuid,
"%s: Source image pRegion[%u] y-dimension offset [%1d] + extent [%1d] exceeds subResource "
"height [%1d].",
func_name, i, region.srcOffset.y, src_copy_extent.height, subresource_extent.height);
}
if (extent_check & kZBit) {
vuid = is_2khr ? "VUID-VkCopyImageInfo2KHR-srcOffset-00147" : "VUID-vkCmdCopyImage-srcOffset-00147";
skip |= LogError(command_buffer, vuid,
"%s: Source image pRegion[%u] z-dimension offset [%1d] + extent [%1d] exceeds subResource "
"depth [%1d].",
func_name, i, region.srcOffset.z, src_copy_extent.depth, subresource_extent.depth);
}
// Adjust dest extent if necessary
subresource_extent = dst_image_state->GetSubresourceExtent(region.dstSubresource);
if (slice_override) dst_copy_extent.depth = depth_slices;
extent_check = ExceedsBounds(&(region.dstOffset), &dst_copy_extent, &subresource_extent);
if (extent_check & kXBit) {
vuid = is_2khr ? "VUID-VkCopyImageInfo2KHR-dstOffset-00150" : "VUID-vkCmdCopyImage-dstOffset-00150";
skip |= LogError(command_buffer, vuid,
"%s: Dest image pRegion[%u] x-dimension offset [%1d] + extent [%1d] exceeds subResource "
"width [%1d].",
func_name, i, region.dstOffset.x, dst_copy_extent.width, subresource_extent.width);
}
if (extent_check & kYBit) {
vuid = is_2khr ? "VUID-VkCopyImageInfo2KHR-dstOffset-00151" : "VUID-vkCmdCopyImage-dstOffset-00151";
skip |= LogError(command_buffer, vuid,
"%s): Dest image pRegion[%u] y-dimension offset [%1d] + extent [%1d] exceeds subResource "
"height [%1d].",
func_name, i, region.dstOffset.y, dst_copy_extent.height, subresource_extent.height);
}
if (extent_check & kZBit) {
vuid = is_2khr ? "VUID-VkCopyImageInfo2KHR-dstOffset-00153" : "VUID-vkCmdCopyImage-dstOffset-00153";
skip |= LogError(command_buffer, vuid,
"%s: Dest image pRegion[%u] z-dimension offset [%1d] + extent [%1d] exceeds subResource "
"depth [%1d].",
func_name, i, region.dstOffset.z, dst_copy_extent.depth, subresource_extent.depth);
}
// The union of all source regions, and the union of all destination regions, specified by the elements of regions,
// must not overlap in memory
if (src_image_state->image() == dst_image_state->image()) {
for (uint32_t j = 0; j < regionCount; j++) {
if (RegionIntersects(®ion, &pRegions[j], src_image_state->createInfo.imageType,
FormatIsMultiplane(src_format))) {
vuid = is_2khr ? "VUID-VkCopyImageInfo2KHR-pRegions-00124" : "VUID-vkCmdCopyImage-pRegions-00124";
skip |= LogError(command_buffer, vuid, "%s: pRegion[%u] src overlaps with pRegions[%u].", func_name, i, j);
}
}
}
// Check depth for 2D as post Maintaince 1 requires both while prior only required one to be 2D
if (device_extensions.vk_khr_maintenance1) {
if (((VK_IMAGE_TYPE_2D == src_image_state->createInfo.imageType) &&
(VK_IMAGE_TYPE_2D == dst_image_state->createInfo.imageType)) &&
(src_copy_extent.depth != 1)) {
vuid = is_2khr ? "VUID-VkCopyImageInfo2KHR-srcImage-01790" : "VUID-vkCmdCopyImage-srcImage-01790";
skip |= LogError(command_buffer, vuid,
"%s: pRegion[%u] both srcImage and dstImage are 2D and extent.depth is %u and has to be 1",
func_name, i, src_copy_extent.depth);
}
} else {
if (((VK_IMAGE_TYPE_2D == src_image_state->createInfo.imageType) ||
(VK_IMAGE_TYPE_2D == dst_image_state->createInfo.imageType)) &&
(src_copy_extent.depth != 1)) {
vuid = is_2khr ? "VUID-VkCopyImageInfo2KHR-srcImage-01789" : "VUID-vkCmdCopyImage-srcImage-01789";
skip |= LogError(command_buffer, vuid,
"%s: pRegion[%u] either srcImage or dstImage is 2D and extent.depth is %u and has to be 1",
func_name, i, src_copy_extent.depth);
}
}
// Check if 2D with 3D and depth not equal to 2D layerCount
if ((VK_IMAGE_TYPE_2D == src_image_state->createInfo.imageType) &&
(VK_IMAGE_TYPE_3D == dst_image_state->createInfo.imageType) &&
(src_copy_extent.depth != region.srcSubresource.layerCount)) {
vuid = is_2khr ? "VUID-VkCopyImageInfo2KHR-srcImage-01791" : "VUID-vkCmdCopyImage-srcImage-01791";
skip |= LogError(command_buffer, vuid,
"%s: pRegion[%u] srcImage is 2D, dstImage is 3D and extent.depth is %u and has to be "
"srcSubresource.layerCount (%u)",
func_name, i, src_copy_extent.depth, region.srcSubresource.layerCount);
} else if ((VK_IMAGE_TYPE_3D == src_image_state->createInfo.imageType) &&
(VK_IMAGE_TYPE_2D == dst_image_state->createInfo.imageType) &&
(src_copy_extent.depth != region.dstSubresource.layerCount)) {
vuid = is_2khr ? "VUID-VkCopyImageInfo2KHR-dstImage-01792" : "VUID-vkCmdCopyImage-dstImage-01792";
skip |= LogError(command_buffer, vuid,
"%s: pRegion[%u] srcImage is 3D, dstImage is 2D and extent.depth is %u and has to be "
"dstSubresource.layerCount (%u)",
func_name, i, src_copy_extent.depth, region.dstSubresource.layerCount);
}
// Check for multi-plane format compatiblity
if (FormatIsMultiplane(src_format) || FormatIsMultiplane(dst_format)) {
size_t src_format_size = 0;
size_t dst_format_size = 0;
if (FormatIsMultiplane(src_format)) {
const VkFormat plane_format = FindMultiplaneCompatibleFormat(src_format, region.srcSubresource.aspectMask);
src_format_size = FormatElementSize(plane_format);
} else {
src_format_size = FormatElementSize(src_format);
}
if (FormatIsMultiplane(dst_format)) {
const VkFormat plane_format = FindMultiplaneCompatibleFormat(dst_format, region.dstSubresource.aspectMask);
dst_format_size = FormatElementSize(plane_format);
} else {
dst_format_size = FormatElementSize(dst_format);
}
// If size is still zero, then format is invalid and will be caught in another VU
if ((src_format_size != dst_format_size) && (src_format_size != 0) && (dst_format_size != 0)) {
vuid = is_2khr ? "VUID-VkCopyImageInfo2KHR-None-01549" : "VUID-vkCmdCopyImage-None-01549";
skip |= LogError(command_buffer, vuid,
"%s: pRegions[%u] called with non-compatible image formats. "
"The src format %s with aspectMask %s is not compatible with dst format %s aspectMask %s.",
func_name, i, string_VkFormat(src_format),
string_VkImageAspectFlags(region.srcSubresource.aspectMask).c_str(), string_VkFormat(dst_format),
string_VkImageAspectFlags(region.dstSubresource.aspectMask).c_str());
}
}
}
// The formats of non-multiplane src_image and dst_image must be compatible. Formats are considered compatible if their texel
// size in bytes is the same between both formats. For example, VK_FORMAT_R8G8B8A8_UNORM is compatible with VK_FORMAT_R32_UINT
// because because both texels are 4 bytes in size.
if (!FormatIsMultiplane(src_format) && !FormatIsMultiplane(dst_format)) {
const char *compatible_vuid =
(device_extensions.vk_khr_sampler_ycbcr_conversion)
? (is_2khr ? "VUID-VkCopyImageInfo2KHR-srcImage-01548" : "VUID-vkCmdCopyImage-srcImage-01548")
: (is_2khr ? "VUID-VkCopyImageInfo2KHR-srcImage-00135" : "VUID-vkCmdCopyImage-srcImage-00135");
// Depth/stencil formats must match exactly.
if (FormatIsDepthOrStencil(src_format) || FormatIsDepthOrStencil(dst_format)) {
if (src_format != dst_format) {
skip |= LogError(command_buffer, compatible_vuid,
"%s: Depth/stencil formats must match exactly for src (%s) and dst (%s).", func_name,
string_VkFormat(src_format), string_VkFormat(dst_format));
}
} else {
if (FormatElementSize(src_format) != FormatElementSize(dst_format)) {
skip |= LogError(command_buffer, compatible_vuid,
"%s: Unmatched image format sizes. "
"The src format %s has size of %" PRIu32 " and dst format %s has size of %" PRIu32 ".",
func_name, string_VkFormat(src_format), FormatElementSize(src_format), string_VkFormat(dst_format),
FormatElementSize(dst_format));
}
}
}
// Source and dest image sample counts must match
if (src_image_state->createInfo.samples != dst_image_state->createInfo.samples) {
std::stringstream ss;
ss << func_name << " called on image pair with non-identical sample counts.";
vuid = is_2khr ? "VUID-VkCopyImageInfo2KHR-srcImage-00136" : "VUID-vkCmdCopyImage-srcImage-00136";
skip |=
LogError(command_buffer, vuid, "%s: The src image sample count (%s) dose not match the dst image sample count (%s).",
func_name, string_VkSampleCountFlagBits(src_image_state->createInfo.samples),
string_VkSampleCountFlagBits(dst_image_state->createInfo.samples));
}
vuid = (device_extensions.vk_khr_sampler_ycbcr_conversion)
? (is_2khr ? "VUID-VkCopyImageInfo2KHR-srcImage-01546" : "VUID-vkCmdCopyImage-srcImage-01546")
: (is_2khr ? "VUID-VkCopyImageInfo2KHR-srcImage-00127" : "VUID-vkCmdCopyImage-srcImage-00127");
skip |= ValidateMemoryIsBoundToImage(src_image_state, func_name, vuid);
vuid = (device_extensions.vk_khr_sampler_ycbcr_conversion)
? (is_2khr ? "VUID-VkCopyImageInfo2KHR-dstImage-01547" : "VUID-vkCmdCopyImage-dstImage-01547")
: (is_2khr ? "VUID-VkCopyImageInfo2KHR-dstImage-00132" : "VUID-vkCmdCopyImage-dstImage-00132");
skip |= ValidateMemoryIsBoundToImage(dst_image_state, func_name, vuid);
// Validate that SRC & DST images have correct usage flags set
vuid = is_2khr ? "VUID-VkCopyImageInfo2KHR-srcImage-00126" : "VUID-vkCmdCopyImage-srcImage-00126";
skip |= ValidateImageUsageFlags(src_image_state, VK_IMAGE_USAGE_TRANSFER_SRC_BIT, true, vuid, func_name,
"VK_IMAGE_USAGE_TRANSFER_SRC_BIT");
vuid = is_2khr ? "VUID-VkCopyImageInfo2KHR-dstImage-00131" : "VUID-vkCmdCopyImage-dstImage-00131";
skip |= ValidateImageUsageFlags(dst_image_state, VK_IMAGE_USAGE_TRANSFER_DST_BIT, true, vuid, func_name,
"VK_IMAGE_USAGE_TRANSFER_DST_BIT");
vuid = is_2khr ? "VUID-vkCmdCopyImage2KHR-commandBuffer-01825" : "VUID-vkCmdCopyImage-commandBuffer-01825";
skip |= ValidateProtectedImage(cb_node, src_image_state, func_name, vuid);
vuid = is_2khr ? "VUID-vkCmdCopyImage2KHR-commandBuffer-01826" : "VUID-vkCmdCopyImage-commandBuffer-01826";
skip |= ValidateProtectedImage(cb_node, dst_image_state, func_name, vuid);
vuid = is_2khr ? "VUID-vkCmdCopyImage2KHR-commandBuffer-01827" : "VUID-vkCmdCopyImage-commandBuffer-01827";
skip |= ValidateUnprotectedImage(cb_node, dst_image_state, func_name, vuid);
// Validation for VK_EXT_fragment_density_map
if (src_image_state->createInfo.flags & VK_IMAGE_CREATE_SUBSAMPLED_BIT_EXT) {
vuid = is_2khr ? "VUID-VkCopyImageInfo2KHR-dstImage-02542" : "VUID-vkCmdCopyImage-dstImage-02542";
skip |=
LogError(command_buffer, vuid,
"%s: srcImage must not have been created with flags containing VK_IMAGE_CREATE_SUBSAMPLED_BIT_EXT", func_name);
}
if (dst_image_state->createInfo.flags & VK_IMAGE_CREATE_SUBSAMPLED_BIT_EXT) {
vuid = is_2khr ? "VUID-VkCopyImageInfo2KHR-dstImage-02542" : "VUID-vkCmdCopyImage-dstImage-02542";
skip |=
LogError(command_buffer, vuid,
"%s: dstImage must not have been created with flags containing VK_IMAGE_CREATE_SUBSAMPLED_BIT_EXT", func_name);
}
if (device_extensions.vk_khr_maintenance1) {
vuid = is_2khr ? "VUID-VkCopyImageInfo2KHR-srcImage-01995" : "VUID-vkCmdCopyImage-srcImage-01995";
skip |= ValidateImageFormatFeatureFlags(src_image_state, VK_FORMAT_FEATURE_TRANSFER_SRC_BIT, func_name, vuid);
vuid = is_2khr ? "VUID-VkCopyImageInfo2KHR-dstImage-01996" : "VUID-vkCmdCopyImage-dstImage-01996";
skip |= ValidateImageFormatFeatureFlags(dst_image_state, VK_FORMAT_FEATURE_TRANSFER_DST_BIT, func_name, vuid);
}
skip |= ValidateCmd(cb_node, cmd_type, func_name);
bool hit_error = false;
const char *invalid_src_layout_vuid =
(src_image_state->shared_presentable && device_extensions.vk_khr_shared_presentable_image)
? (is_2khr ? "VUID-VkCopyImageInfo2KHR-srcImageLayout-01917" : "VUID-vkCmdCopyImage-srcImageLayout-01917")
: (is_2khr ? "VUID-VkCopyImageInfo2KHR-srcImageLayout-00129" : "VUID-vkCmdCopyImage-srcImageLayout-00129");
const char *invalid_dst_layout_vuid =
(dst_image_state->shared_presentable && device_extensions.vk_khr_shared_presentable_image)
? (is_2khr ? "VUID-VkCopyImageInfo2KHR-dstImageLayout-01395" : "VUID-vkCmdCopyImage-dstImageLayout-01395")
: (is_2khr ? "VUID-VkCopyImageInfo2KHR-dstImageLayout-00134" : "VUID-vkCmdCopyImage-dstImageLayout-00134");
for (uint32_t i = 0; i < regionCount; ++i) {
vuid = is_2khr ? "VUID-VkCopyImageInfo2KHR-srcImageLayout-00128" : "VUID-vkCmdCopyImage-srcImageLayout-00128";
skip |= VerifyImageLayout(cb_node, src_image_state, pRegions[i].srcSubresource, srcImageLayout,
VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, func_name, invalid_src_layout_vuid, vuid, &hit_error);
vuid = is_2khr ? "VUID-VkCopyImageInfo2KHR-dstImageLayout-00133" : "VUID-vkCmdCopyImage-dstImageLayout-00133";
skip |= VerifyImageLayout(cb_node, dst_image_state, pRegions[i].dstSubresource, dstImageLayout,
VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, func_name, invalid_dst_layout_vuid, vuid, &hit_error);
skip |= ValidateCopyImageTransferGranularityRequirements(cb_node, src_image_state, dst_image_state, &pRegions[i], i,
func_name, version);
}
return skip;
}
bool CoreChecks::PreCallValidateCmdCopyImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount,
const VkImageCopy *pRegions) const {
return ValidateCmdCopyImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, regionCount, pRegions,
COPY_COMMAND_VERSION_1);
}
bool CoreChecks::PreCallValidateCmdCopyImage2KHR(VkCommandBuffer commandBuffer, const VkCopyImageInfo2KHR *pCopyImageInfo) const {
return ValidateCmdCopyImage(commandBuffer, pCopyImageInfo->srcImage, pCopyImageInfo->srcImageLayout, pCopyImageInfo->dstImage,
pCopyImageInfo->dstImageLayout, pCopyImageInfo->regionCount, pCopyImageInfo->pRegions,
COPY_COMMAND_VERSION_2);
}
void CoreChecks::PreCallRecordCmdCopyImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount,
const VkImageCopy *pRegions) {
StateTracker::PreCallRecordCmdCopyImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, regionCount,
pRegions);
auto cb_node = GetCBState(commandBuffer);
auto src_image_state = GetImageState(srcImage);
auto dst_image_state = GetImageState(dstImage);
// Make sure that all image slices are updated to correct layout
for (uint32_t i = 0; i < regionCount; ++i) {
SetImageInitialLayout(cb_node, *src_image_state, pRegions[i].srcSubresource, srcImageLayout);
SetImageInitialLayout(cb_node, *dst_image_state, pRegions[i].dstSubresource, dstImageLayout);
}
}
void CoreChecks::PreCallRecordCmdCopyImage2KHR(VkCommandBuffer commandBuffer, const VkCopyImageInfo2KHR *pCopyImageInfo) {
StateTracker::PreCallRecordCmdCopyImage2KHR(commandBuffer, pCopyImageInfo);
auto cb_node = GetCBState(commandBuffer);
auto src_image_state = GetImageState(pCopyImageInfo->srcImage);
auto dst_image_state = GetImageState(pCopyImageInfo->dstImage);
// Make sure that all image slices are updated to correct layout
for (uint32_t i = 0; i < pCopyImageInfo->regionCount; ++i) {
SetImageInitialLayout(cb_node, *src_image_state, pCopyImageInfo->pRegions[i].srcSubresource,
pCopyImageInfo->srcImageLayout);
SetImageInitialLayout(cb_node, *dst_image_state, pCopyImageInfo->pRegions[i].dstSubresource,
pCopyImageInfo->dstImageLayout);
}
}
// Returns true if sub_rect is entirely contained within rect
static inline bool ContainsRect(VkRect2D rect, VkRect2D sub_rect) {
if ((sub_rect.offset.x < rect.offset.x) || (sub_rect.offset.x + sub_rect.extent.width > rect.offset.x + rect.extent.width) ||
(sub_rect.offset.y < rect.offset.y) || (sub_rect.offset.y + sub_rect.extent.height > rect.offset.y + rect.extent.height)) {
return false;
}
return true;
}
bool CoreChecks::ValidateClearAttachmentExtent(VkCommandBuffer command_buffer, uint32_t attachment_index,
const FRAMEBUFFER_STATE *framebuffer, uint32_t fb_attachment,
const VkRect2D &render_area, uint32_t rect_count, const VkClearRect *clear_rects,
const CMD_BUFFER_STATE *primary_cb_state) const {
bool skip = false;
const IMAGE_VIEW_STATE *image_view_state = nullptr;
if (framebuffer && (fb_attachment != VK_ATTACHMENT_UNUSED) && (fb_attachment < framebuffer->createInfo.attachmentCount)) {
if (primary_cb_state) {
image_view_state = primary_cb_state->GetActiveAttachmentImageViewState(fb_attachment);
} else {
image_view_state = GetCBState(command_buffer)->GetActiveAttachmentImageViewState(fb_attachment);
}
}
for (uint32_t j = 0; j < rect_count; j++) {
if (!ContainsRect(render_area, clear_rects[j].rect)) {
skip |= LogError(command_buffer, "VUID-vkCmdClearAttachments-pRects-00016",
"vkCmdClearAttachments(): The area defined by pRects[%d] is not contained in the area of "
"the current render pass instance.",
j);
}
if (image_view_state) {
// The layers specified by a given element of pRects must be contained within every attachment that
// pAttachments refers to
const auto attachment_layer_count = image_view_state->normalized_subresource_range.layerCount;
if ((clear_rects[j].baseArrayLayer >= attachment_layer_count) ||
(clear_rects[j].baseArrayLayer + clear_rects[j].layerCount > attachment_layer_count)) {
skip |= LogError(command_buffer, "VUID-vkCmdClearAttachments-pRects-00017",
"vkCmdClearAttachments(): The layers defined in pRects[%d] are not contained in the layers "
"of pAttachment[%d].",
j, attachment_index);
}
}
}
return skip;
}
bool CoreChecks::PreCallValidateCmdClearAttachments(VkCommandBuffer commandBuffer, uint32_t attachmentCount,
const VkClearAttachment *pAttachments, uint32_t rectCount,
const VkClearRect *pRects) const {
bool skip = false;
const CMD_BUFFER_STATE *cb_node = GetCBState(commandBuffer); // TODO: Should be const, and never modified during validation
if (!cb_node) return skip;
skip |= ValidateCmd(cb_node, CMD_CLEARATTACHMENTS, "vkCmdClearAttachments()");
// Validate that attachment is in reference list of active subpass
if (cb_node->activeRenderPass) {
const VkRenderPassCreateInfo2 *renderpass_create_info = cb_node->activeRenderPass->createInfo.ptr();
const uint32_t renderpass_attachment_count = renderpass_create_info->attachmentCount;
const VkSubpassDescription2 *subpass_desc = &renderpass_create_info->pSubpasses[cb_node->activeSubpass];
const auto *framebuffer = cb_node->activeFramebuffer.get();
const auto &render_area = cb_node->activeRenderPassBeginInfo.renderArea;
for (uint32_t attachment_index = 0; attachment_index < attachmentCount; attachment_index++) {
auto clear_desc = &pAttachments[attachment_index];
uint32_t fb_attachment = VK_ATTACHMENT_UNUSED;
const VkImageAspectFlags aspect_mask = clear_desc->aspectMask;
if (aspect_mask & VK_IMAGE_ASPECT_METADATA_BIT) {
skip |= LogError(commandBuffer, "VUID-VkClearAttachment-aspectMask-00020",
"vkCmdClearAttachments() pAttachments[%u] mask contains VK_IMAGE_ASPECT_METADATA_BIT",
attachment_index);
} else if (aspect_mask & (VK_IMAGE_ASPECT_MEMORY_PLANE_0_BIT_EXT | VK_IMAGE_ASPECT_MEMORY_PLANE_1_BIT_EXT |
VK_IMAGE_ASPECT_MEMORY_PLANE_2_BIT_EXT | VK_IMAGE_ASPECT_MEMORY_PLANE_3_BIT_EXT)) {
skip |=
LogError(commandBuffer, "VUID-VkClearAttachment-aspectMask-02246",
"vkCmdClearAttachments() pAttachments[%u] mask contains a VK_IMAGE_ASPECT_MEMORY_PLANE_*_BIT_EXT bit",
attachment_index);
} else if (aspect_mask & VK_IMAGE_ASPECT_COLOR_BIT) {
uint32_t color_attachment = VK_ATTACHMENT_UNUSED;
if (clear_desc->colorAttachment < subpass_desc->colorAttachmentCount) {
color_attachment = subpass_desc->pColorAttachments[clear_desc->colorAttachment].attachment;
if ((color_attachment != VK_ATTACHMENT_UNUSED) && (color_attachment >= renderpass_attachment_count)) {
skip |= LogError(
commandBuffer, "VUID-vkCmdClearAttachments-aspectMask-02501",
"vkCmdClearAttachments() pAttachments[%u].colorAttachment=%u is not VK_ATTACHMENT_UNUSED "
"and not a valid attachment for %s attachmentCount=%u. Subpass %u pColorAttachment[%u]=%u.",
attachment_index, clear_desc->colorAttachment,
report_data->FormatHandle(cb_node->activeRenderPass->renderPass()).c_str(), cb_node->activeSubpass,
clear_desc->colorAttachment, color_attachment, renderpass_attachment_count);
color_attachment = VK_ATTACHMENT_UNUSED; // Defensive, prevent lookup past end of renderpass attachment
}
} else {
skip |= LogError(commandBuffer, "VUID-vkCmdClearAttachments-aspectMask-02501",
"vkCmdClearAttachments() pAttachments[%u].colorAttachment=%u out of range for %s"
" subpass %u. colorAttachmentCount=%u",
attachment_index, clear_desc->colorAttachment,
report_data->FormatHandle(cb_node->activeRenderPass->renderPass()).c_str(),
cb_node->activeSubpass, subpass_desc->colorAttachmentCount);
}
fb_attachment = color_attachment;
if ((clear_desc->aspectMask & VK_IMAGE_ASPECT_DEPTH_BIT) ||
(clear_desc->aspectMask & VK_IMAGE_ASPECT_STENCIL_BIT)) {
skip |= LogError(commandBuffer, "VUID-VkClearAttachment-aspectMask-00019",
"vkCmdClearAttachments() pAttachments[%u] aspectMask must set only VK_IMAGE_ASPECT_COLOR_BIT "
"of a color attachment.",
attachment_index);
}
} else { // Must be depth and/or stencil
bool subpass_depth = false;
bool subpass_stencil = false;
if (subpass_desc->pDepthStencilAttachment &&
(subpass_desc->pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED)) {
auto index = subpass_desc->pDepthStencilAttachment->attachment;
subpass_depth = FormatHasDepth(renderpass_create_info->pAttachments[index].format);
subpass_stencil = FormatHasStencil(renderpass_create_info->pAttachments[index].format);
}
if (!subpass_desc->pDepthStencilAttachment ||
(subpass_desc->pDepthStencilAttachment->attachment != VK_ATTACHMENT_UNUSED)) {
if ((clear_desc->aspectMask & VK_IMAGE_ASPECT_DEPTH_BIT) && !subpass_depth) {
skip |= LogError(
commandBuffer, "VUID-vkCmdClearAttachments-aspectMask-02502",
"vkCmdClearAttachments() pAttachments[%u] aspectMask has VK_IMAGE_ASPECT_DEPTH_BIT but there is no "
"depth attachment in subpass",
attachment_index);
}
if ((clear_desc->aspectMask & VK_IMAGE_ASPECT_STENCIL_BIT) && !subpass_stencil) {
skip |= LogError(
commandBuffer, "VUID-vkCmdClearAttachments-aspectMask-02503",
"vkCmdClearAttachments() pAttachments[%u] aspectMask has VK_IMAGE_ASPECT_STENCIL_BIT but there is no "
"stencil attachment in subpass",
attachment_index);
}
} else {
fb_attachment = subpass_desc->pDepthStencilAttachment->attachment;
}
if (subpass_depth) {
skip |= ValidateClearDepthStencilValue(commandBuffer, clear_desc->clearValue.depthStencil,
"vkCmdClearAttachments()");
}
}
if (cb_node->createInfo.level == VK_COMMAND_BUFFER_LEVEL_PRIMARY) {
skip |= ValidateClearAttachmentExtent(commandBuffer, attachment_index, framebuffer, fb_attachment, render_area,
rectCount, pRects);
}
// Once the framebuffer attachment is found, can get the image view state
if (framebuffer && (fb_attachment != VK_ATTACHMENT_UNUSED) &&
(fb_attachment < framebuffer->createInfo.attachmentCount)) {
const IMAGE_VIEW_STATE *image_view_state =
GetCBState(commandBuffer)->GetActiveAttachmentImageViewState(fb_attachment);
if (image_view_state != nullptr) {
skip |= ValidateProtectedImage(cb_node, image_view_state->image_state.get(), "vkCmdClearAttachments()",
"VUID-vkCmdClearAttachments-commandBuffer-02504");
skip |= ValidateUnprotectedImage(cb_node, image_view_state->image_state.get(), "vkCmdClearAttachments()",
"VUID-vkCmdClearAttachments-commandBuffer-02505");
}
}
}
}
return skip;
}
void CoreChecks::PreCallRecordCmdClearAttachments(VkCommandBuffer commandBuffer, uint32_t attachmentCount,
const VkClearAttachment *pAttachments, uint32_t rectCount,
const VkClearRect *pRects) {
auto *cb_node = GetCBState(commandBuffer);
if (cb_node->activeRenderPass && (cb_node->createInfo.level == VK_COMMAND_BUFFER_LEVEL_SECONDARY)) {
const VkRenderPassCreateInfo2 *renderpass_create_info = cb_node->activeRenderPass->createInfo.ptr();
const VkSubpassDescription2 *subpass_desc = &renderpass_create_info->pSubpasses[cb_node->activeSubpass];
std::shared_ptr<std::vector<VkClearRect>> clear_rect_copy;
for (uint32_t attachment_index = 0; attachment_index < attachmentCount; attachment_index++) {
const auto clear_desc = &pAttachments[attachment_index];
uint32_t fb_attachment = VK_ATTACHMENT_UNUSED;
if ((clear_desc->aspectMask & VK_IMAGE_ASPECT_COLOR_BIT) &&
(clear_desc->colorAttachment < subpass_desc->colorAttachmentCount)) {
fb_attachment = subpass_desc->pColorAttachments[clear_desc->colorAttachment].attachment;
} else if ((clear_desc->aspectMask & (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT)) &&
subpass_desc->pDepthStencilAttachment) {
fb_attachment = subpass_desc->pDepthStencilAttachment->attachment;
}
if (fb_attachment != VK_ATTACHMENT_UNUSED) {
if (!clear_rect_copy) {
// We need a copy of the clear rectangles that will persist until the last lambda executes
// but we want to create it as lazily as possible
clear_rect_copy.reset(new std::vector<VkClearRect>(pRects, pRects + rectCount));
}
// if a secondary level command buffer inherits the framebuffer from the primary command buffer
// (see VkCommandBufferInheritanceInfo), this validation must be deferred until queue submit time
auto val_fn = [this, commandBuffer, attachment_index, fb_attachment, rectCount, clear_rect_copy](
const CMD_BUFFER_STATE *prim_cb, const FRAMEBUFFER_STATE *fb) {
assert(rectCount == clear_rect_copy->size());
const auto &render_area = prim_cb->activeRenderPassBeginInfo.renderArea;
bool skip = false;
skip = ValidateClearAttachmentExtent(commandBuffer, attachment_index, fb, fb_attachment, render_area, rectCount,
clear_rect_copy->data(), prim_cb);
return skip;
};
cb_node->cmd_execute_commands_functions.emplace_back(val_fn);
}
}
}
}
template <typename RegionType>
bool CoreChecks::ValidateCmdResolveImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount,
const RegionType *pRegions, CopyCommandVersion version) const {
const auto *cb_node = GetCBState(commandBuffer);
const auto *src_image_state = GetImageState(srcImage);
const auto *dst_image_state = GetImageState(dstImage);
const bool is_2khr = (version == COPY_COMMAND_VERSION_2);
const char *func_name = is_2khr ? "vkCmdResolveImage2KHR()" : "vkCmdResolveImage()";
const CMD_TYPE cmd_type = is_2khr ? CMD_RESOLVEIMAGE : CMD_RESOLVEIMAGE2KHR;
const char *vuid;
bool skip = false;
if (cb_node && src_image_state && dst_image_state) {
vuid = is_2khr ? "VUID-VkResolveImageInfo2KHR-srcImage-00256" : "VUID-vkCmdResolveImage-srcImage-00256";
skip |= ValidateMemoryIsBoundToImage(src_image_state, func_name, vuid);
vuid = is_2khr ? "VUID-VkResolveImageInfo2KHR-dstImage-00258" : "VUID-vkCmdResolveImage-dstImage-00258";
skip |= ValidateMemoryIsBoundToImage(dst_image_state, func_name, vuid);
skip |= ValidateCmd(cb_node, cmd_type, func_name);
vuid = is_2khr ? "VUID-VkResolveImageInfo2KHR-dstImage-02003" : "VUID-vkCmdResolveImage-dstImage-02003";
skip |= ValidateImageFormatFeatureFlags(dst_image_state, VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT, func_name, vuid);
vuid = is_2khr ? "VUID-vkCmdResolveImage2KHR-commandBuffer-01837" : "VUID-vkCmdResolveImage-commandBuffer-01837";
skip |= ValidateProtectedImage(cb_node, src_image_state, func_name, vuid);
vuid = is_2khr ? "VUID-vkCmdResolveImage2KHR-commandBuffer-01838" : "VUID-vkCmdResolveImage-commandBuffer-01838";
skip |= ValidateProtectedImage(cb_node, dst_image_state, func_name, vuid);
vuid = is_2khr ? "VUID-vkCmdResolveImage2KHR-commandBuffer-01839" : "VUID-vkCmdResolveImage-commandBuffer-01839";
skip |= ValidateUnprotectedImage(cb_node, dst_image_state, func_name, vuid);
// Validation for VK_EXT_fragment_density_map
if (src_image_state->createInfo.flags & VK_IMAGE_CREATE_SUBSAMPLED_BIT_EXT) {
vuid = is_2khr ? "VUID-VkResolveImageInfo2KHR-dstImage-02546" : "VUID-vkCmdResolveImage-dstImage-02546";
skip |= LogError(cb_node->commandBuffer(), vuid,
"%s: srcImage must not have been created with flags containing "
"VK_IMAGE_CREATE_SUBSAMPLED_BIT_EXT",
func_name);
}
if (dst_image_state->createInfo.flags & VK_IMAGE_CREATE_SUBSAMPLED_BIT_EXT) {
vuid = is_2khr ? "VUID-VkResolveImageInfo2KHR-dstImage-02546" : "VUID-vkCmdResolveImage-dstImage-02546";
skip |= LogError(cb_node->commandBuffer(), vuid,
"%s: dstImage must not have been created with flags containing "
"VK_IMAGE_CREATE_SUBSAMPLED_BIT_EXT",
func_name);
}
bool hit_error = false;
const char *invalid_src_layout_vuid =
is_2khr ? ((src_image_state->shared_presentable && device_extensions.vk_khr_shared_presentable_image)
? "VUID-VkResolveImageInfo2KHR-srcImageLayout-01400"
: "VUID-VkResolveImageInfo2KHR-srcImageLayout-00261")
: ((src_image_state->shared_presentable && device_extensions.vk_khr_shared_presentable_image)
? "VUID-vkCmdResolveImage-srcImageLayout-01400"
: "VUID-vkCmdResolveImage-srcImageLayout-00261");
const char *invalid_dst_layout_vuid =
is_2khr ? ((dst_image_state->shared_presentable && device_extensions.vk_khr_shared_presentable_image)
? "VUID-VkResolveImageInfo2KHR-dstImageLayout-01401"
: "VUID-VkResolveImageInfo2KHR-dstImageLayout-00263")
: ((dst_image_state->shared_presentable && device_extensions.vk_khr_shared_presentable_image)
? "VUID-vkCmdResolveImage-dstImageLayout-01401"
: "VUID-vkCmdResolveImage-dstImageLayout-00263");
// For each region, the number of layers in the image subresource should not be zero
// For each region, src and dest image aspect must be color only
for (uint32_t i = 0; i < regionCount; i++) {
const RegionType region = pRegions[i];
const VkImageSubresourceLayers src_subresource = region.srcSubresource;
const VkImageSubresourceLayers dst_subresource = region.dstSubresource;
skip |= ValidateImageSubresourceLayers(cb_node, &src_subresource, func_name, "srcSubresource", i);
skip |= ValidateImageSubresourceLayers(cb_node, &dst_subresource, func_name, "dstSubresource", i);
vuid = is_2khr ? "VUID-VkResolveImageInfo2KHR-srcImageLayout-00260" : "VUID-vkCmdResolveImage-srcImageLayout-00260";
skip |= VerifyImageLayout(cb_node, src_image_state, src_subresource, srcImageLayout,
VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, func_name, invalid_src_layout_vuid, vuid, &hit_error);
vuid = is_2khr ? "VUID-VkResolveImageInfo2KHR-dstImageLayout-00262" : "VUID-vkCmdResolveImage-dstImageLayout-00262";
skip |= VerifyImageLayout(cb_node, dst_image_state, dst_subresource, dstImageLayout,
VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, func_name, invalid_dst_layout_vuid, vuid, &hit_error);
vuid = is_2khr ? "VUID-VkResolveImageInfo2KHR-srcSubresource-01709" : "VUID-vkCmdResolveImage-srcSubresource-01709";
skip |= ValidateImageMipLevel(cb_node, src_image_state, src_subresource.mipLevel, i, func_name, "srcSubresource", vuid);
vuid = is_2khr ? "VUID-VkResolveImageInfo2KHR-dstSubresource-01710" : "VUID-vkCmdResolveImage-dstSubresource-01710";
skip |= ValidateImageMipLevel(cb_node, dst_image_state, dst_subresource.mipLevel, i, func_name, "dstSubresource", vuid);
vuid = is_2khr ? "VUID-VkResolveImageInfo2KHR-srcSubresource-01711" : "VUID-vkCmdResolveImage-srcSubresource-01711";
skip |= ValidateImageArrayLayerRange(cb_node, src_image_state, src_subresource.baseArrayLayer,
src_subresource.layerCount, i, func_name, "srcSubresource", vuid);
vuid = is_2khr ? "VUID-VkResolveImageInfo2KHR-dstSubresource-01712" : "VUID-vkCmdResolveImage-dstSubresource-01712";
skip |= ValidateImageArrayLayerRange(cb_node, dst_image_state, dst_subresource.baseArrayLayer,
dst_subresource.layerCount, i, func_name, "srcSubresource", vuid);
// layer counts must match
if (src_subresource.layerCount != dst_subresource.layerCount) {
vuid = is_2khr ? "VUID-VkImageResolve2KHR-layerCount-00267" : "VUID-VkImageResolve-layerCount-00267";
skip |=
LogError(cb_node->commandBuffer(), vuid,
"%s: layerCount in source and destination subresource of pRegions[%u] does not match.", func_name, i);
}
// For each region, src and dest image aspect must be color only
if ((src_subresource.aspectMask != VK_IMAGE_ASPECT_COLOR_BIT) ||
(dst_subresource.aspectMask != VK_IMAGE_ASPECT_COLOR_BIT)) {
vuid = is_2khr ? "VUID-VkImageResolve2KHR-aspectMask-00266" : "VUID-VkImageResolve-aspectMask-00266";
skip |= LogError(cb_node->commandBuffer(), vuid,
"%s: src and dest aspectMasks for pRegions[%u] must specify only VK_IMAGE_ASPECT_COLOR_BIT.",
func_name, i);
}
const VkImageType src_image_type = src_image_state->createInfo.imageType;
const VkImageType dst_image_type = dst_image_state->createInfo.imageType;
if ((VK_IMAGE_TYPE_3D == src_image_type) || (VK_IMAGE_TYPE_3D == dst_image_type)) {
if ((0 != src_subresource.baseArrayLayer) || (1 != src_subresource.layerCount)) {
LogObjectList objlist(cb_node->commandBuffer());
objlist.add(src_image_state->image());
objlist.add(dst_image_state->image());
vuid = is_2khr ? "VUID-VkResolveImageInfo2KHR-srcImage-04446" : "VUID-vkCmdResolveImage-srcImage-04446";
skip |= LogError(objlist, vuid,
"%s: pRegions[%u] baseArrayLayer must be 0 and layerCount must be 1 for all "
"subresources if the src or dst image is 3D.",
func_name, i);
}
if ((0 != dst_subresource.baseArrayLayer) || (1 != dst_subresource.layerCount)) {
LogObjectList objlist(cb_node->commandBuffer());
objlist.add(src_image_state->image());
objlist.add(dst_image_state->image());
vuid = is_2khr ? "VUID-VkResolveImageInfo2KHR-srcImage-04447" : "VUID-vkCmdResolveImage-srcImage-04447";
skip |= LogError(objlist, vuid,
"%s: pRegions[%u] baseArrayLayer must be 0 and layerCount must be 1 for all "
"subresources if the src or dst image is 3D.",
func_name, i);
}
}
if (VK_IMAGE_TYPE_1D == src_image_type) {
if ((pRegions[i].srcOffset.y != 0) || (pRegions[i].extent.height != 1)) {
LogObjectList objlist(cb_node->commandBuffer());
objlist.add(src_image_state->image());
vuid = is_2khr ? "VUID-VkResolveImageInfo2KHR-srcImage-00271" : "VUID-vkCmdResolveImage-srcImage-00271";
skip |= LogError(objlist, vuid,
"%s: srcImage (%s) is 1D but pRegions[%u] srcOffset.y (%d) is not 0 or "
"extent.height (%u) is not 1.",
func_name, report_data->FormatHandle(src_image_state->image()).c_str(), i,
pRegions[i].srcOffset.y, pRegions[i].extent.height);
}
}
if ((VK_IMAGE_TYPE_1D == src_image_type) || (VK_IMAGE_TYPE_2D == src_image_type)) {
if ((pRegions[i].srcOffset.z != 0) || (pRegions[i].extent.depth != 1)) {
LogObjectList objlist(cb_node->commandBuffer());
objlist.add(src_image_state->image());
vuid = is_2khr ? "VUID-VkResolveImageInfo2KHR-srcImage-00273" : "VUID-vkCmdResolveImage-srcImage-00273";
skip |= LogError(objlist, vuid,
"%s: srcImage (%s) is 2D but pRegions[%u] srcOffset.z (%d) is not 0 or "
"extent.depth (%u) is not 1.",
func_name, report_data->FormatHandle(src_image_state->image()).c_str(), i,
pRegions[i].srcOffset.z, pRegions[i].extent.depth);
}
}
if (VK_IMAGE_TYPE_1D == dst_image_type) {
if ((pRegions[i].dstOffset.y != 0) || (pRegions[i].extent.height != 1)) {
LogObjectList objlist(cb_node->commandBuffer());
objlist.add(dst_image_state->image());
vuid = is_2khr ? "VUID-VkResolveImageInfo2KHR-dstImage-00276" : "VUID-vkCmdResolveImage-dstImage-00276";
skip |= LogError(objlist, vuid,
"%s: dstImage (%s) is 1D but pRegions[%u] dstOffset.y (%d) is not 0 or "
"extent.height (%u) is not 1.",
func_name, report_data->FormatHandle(dst_image_state->image()).c_str(), i,
pRegions[i].dstOffset.y, pRegions[i].extent.height);
}
}
if ((VK_IMAGE_TYPE_1D == dst_image_type) || (VK_IMAGE_TYPE_2D == dst_image_type)) {
if ((pRegions[i].dstOffset.z != 0) || (pRegions[i].extent.depth != 1)) {
LogObjectList objlist(cb_node->commandBuffer());
objlist.add(dst_image_state->image());
vuid = is_2khr ? "VUID-VkResolveImageInfo2KHR-dstImage-00278" : "VUID-vkCmdResolveImage-dstImage-00278";
skip |= LogError(objlist, vuid,
"%s: dstImage (%s) is 2D but pRegions[%u] dstOffset.z (%d) is not 0 or "
"extent.depth (%u) is not 1.",
func_name, report_data->FormatHandle(dst_image_state->image()).c_str(), i,
pRegions[i].dstOffset.z, pRegions[i].extent.depth);
}
}
// Each srcImage dimension offset + extent limits must fall with image subresource extent
VkExtent3D subresource_extent = src_image_state->GetSubresourceExtent(src_subresource);
// MipLevel bound is checked already and adding extra errors with a "subresource extent of zero" is confusing to
// developer
if (src_subresource.mipLevel < src_image_state->createInfo.mipLevels) {
uint32_t extent_check = ExceedsBounds(&(region.srcOffset), &(region.extent), &subresource_extent);
if ((extent_check & kXBit) != 0) {
LogObjectList objlist(cb_node->commandBuffer());
objlist.add(src_image_state->image());
vuid = is_2khr ? "VUID-VkResolveImageInfo2KHR-srcOffset-00269" : "VUID-vkCmdResolveImage-srcOffset-00269";
skip |= LogError(objlist, vuid,
"%s: srcImage (%s) pRegions[%u] x-dimension offset [%1d] + extent [%u] "
"exceeds subResource width [%u].",
func_name, report_data->FormatHandle(src_image_state->image()).c_str(), i, region.srcOffset.x,
region.extent.width, subresource_extent.width);
}
if ((extent_check & kYBit) != 0) {
LogObjectList objlist(cb_node->commandBuffer());
objlist.add(src_image_state->image());
vuid = is_2khr ? "VUID-VkResolveImageInfo2KHR-srcOffset-00270" : "VUID-vkCmdResolveImage-srcOffset-00270";
skip |= LogError(objlist, vuid,
"%s: srcImage (%s) pRegions[%u] y-dimension offset [%1d] + extent [%u] "
"exceeds subResource height [%u].",
func_name, report_data->FormatHandle(src_image_state->image()).c_str(), i, region.srcOffset.y,
region.extent.height, subresource_extent.height);
}
if ((extent_check & kZBit) != 0) {
LogObjectList objlist(cb_node->commandBuffer());
objlist.add(src_image_state->image());
vuid = is_2khr ? "VUID-VkResolveImageInfo2KHR-srcOffset-00272" : "VUID-vkCmdResolveImage-srcOffset-00272";
skip |= LogError(objlist, vuid,
"%s: srcImage (%s) pRegions[%u] z-dimension offset [%1d] + extent [%u] "
"exceeds subResource depth [%u].",
func_name, report_data->FormatHandle(src_image_state->image()).c_str(), i, region.srcOffset.z,
region.extent.depth, subresource_extent.depth);
}
}
// Each dstImage dimension offset + extent limits must fall with image subresource extent
subresource_extent = dst_image_state->GetSubresourceExtent(dst_subresource);
// MipLevel bound is checked already and adding extra errors with a "subresource extent of zero" is confusing to
// developer
if (dst_subresource.mipLevel < dst_image_state->createInfo.mipLevels) {
uint32_t extent_check = ExceedsBounds(&(region.dstOffset), &(region.extent), &subresource_extent);
if ((extent_check & kXBit) != 0) {
LogObjectList objlist(cb_node->commandBuffer());
objlist.add(dst_image_state->image());
vuid = is_2khr ? "VUID-VkResolveImageInfo2KHR-dstOffset-00274" : "VUID-vkCmdResolveImage-dstOffset-00274";
skip |= LogError(objlist, vuid,
"%s: dstImage (%s) pRegions[%u] x-dimension offset [%1d] + extent [%u] "
"exceeds subResource width [%u].",
func_name, report_data->FormatHandle(dst_image_state->image()).c_str(), i, region.srcOffset.x,
region.extent.width, subresource_extent.width);
}
if ((extent_check & kYBit) != 0) {
LogObjectList objlist(cb_node->commandBuffer());
objlist.add(dst_image_state->image());
vuid = is_2khr ? "VUID-VkResolveImageInfo2KHR-dstOffset-00275" : "VUID-vkCmdResolveImage-dstOffset-00275";
skip |= LogError(objlist, vuid,
"%s: dstImage (%s) pRegions[%u] y-dimension offset [%1d] + extent [%u] "
"exceeds subResource height [%u].",
func_name, report_data->FormatHandle(dst_image_state->image()).c_str(), i, region.srcOffset.y,
region.extent.height, subresource_extent.height);
}
if ((extent_check & kZBit) != 0) {
LogObjectList objlist(cb_node->commandBuffer());
objlist.add(dst_image_state->image());
vuid = is_2khr ? "VUID-VkResolveImageInfo2KHR-dstOffset-00277" : "VUID-vkCmdResolveImage-dstOffset-00277";
skip |= LogError(objlist, vuid,
"%s: dstImage (%s) pRegions[%u] z-dimension offset [%1d] + extent [%u] "
"exceeds subResource depth [%u].",
func_name, report_data->FormatHandle(dst_image_state->image()).c_str(), i, region.srcOffset.z,
region.extent.depth, subresource_extent.depth);
}
}
}
if (src_image_state->createInfo.format != dst_image_state->createInfo.format) {
vuid = is_2khr ? "VUID-VkResolveImageInfo2KHR-srcImage-01386" : "VUID-vkCmdResolveImage-srcImage-01386";
skip |= LogError(cb_node->commandBuffer(), vuid, "%s: srcImage format (%s) and dstImage format (%s) are not the same.",
func_name, string_VkFormat(src_image_state->createInfo.format),
string_VkFormat(dst_image_state->createInfo.format));
}
if (src_image_state->createInfo.imageType != dst_image_state->createInfo.imageType) {
skip |= LogWarning(cb_node->commandBuffer(), kVUID_Core_DrawState_MismatchedImageType,
"%s: srcImage type (%s) and dstImage type (%s) are not the same.", func_name,
string_VkImageType(src_image_state->createInfo.imageType),
string_VkImageType(dst_image_state->createInfo.imageType));
}
if (src_image_state->createInfo.samples == VK_SAMPLE_COUNT_1_BIT) {
vuid = is_2khr ? "VUID-VkResolveImageInfo2KHR-srcImage-00257" : "VUID-vkCmdResolveImage-srcImage-00257";
skip |= LogError(cb_node->commandBuffer(), vuid, "%s: srcImage sample count is VK_SAMPLE_COUNT_1_BIT.", func_name);
}
if (dst_image_state->createInfo.samples != VK_SAMPLE_COUNT_1_BIT) {
vuid = is_2khr ? "VUID-VkResolveImageInfo2KHR-dstImage-00259" : "VUID-vkCmdResolveImage-dstImage-00259";
skip |= LogError(cb_node->commandBuffer(), vuid, "%s: dstImage sample count (%s) is not VK_SAMPLE_COUNT_1_BIT.",
func_name, string_VkSampleCountFlagBits(dst_image_state->createInfo.samples));
}
} else {
assert(0);
}
return skip;
}
bool CoreChecks::PreCallValidateCmdResolveImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount,
const VkImageResolve *pRegions) const {
return ValidateCmdResolveImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, regionCount, pRegions,
COPY_COMMAND_VERSION_1);
}
bool CoreChecks::PreCallValidateCmdResolveImage2KHR(VkCommandBuffer commandBuffer,
const VkResolveImageInfo2KHR *pResolveImageInfo) const {
return ValidateCmdResolveImage(commandBuffer, pResolveImageInfo->srcImage, pResolveImageInfo->srcImageLayout,
pResolveImageInfo->dstImage, pResolveImageInfo->dstImageLayout, pResolveImageInfo->regionCount,
pResolveImageInfo->pRegions, COPY_COMMAND_VERSION_2);
}
template <typename RegionType>
bool CoreChecks::ValidateCmdBlitImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount,
const RegionType *pRegions, VkFilter filter, CopyCommandVersion version) const {
const auto *cb_node = GetCBState(commandBuffer);
const auto *src_image_state = GetImageState(srcImage);
const auto *dst_image_state = GetImageState(dstImage);
const bool is_2khr = (version == COPY_COMMAND_VERSION_2);
const char *func_name = is_2khr ? "vkCmdBlitImage2KHR()" : "vkCmdBlitImage()";
const CMD_TYPE cmd_type = is_2khr ? CMD_BLITIMAGE : CMD_BLITIMAGE2KHR;
bool skip = false;
if (cb_node) {
skip |= ValidateCmd(cb_node, cmd_type, func_name);
}
if (cb_node && src_image_state && dst_image_state) {
const char *vuid;
const char *location;
vuid = is_2khr ? "VUID-VkBlitImageInfo2KHR-srcImage-00233" : "VUID-vkCmdBlitImage-srcImage-00233";
location = is_2khr ? "vkCmdBlitImage2KHR(): pBlitImageInfo->srcImage" : "vkCmdBlitImage(): srcImage";
skip |= ValidateImageSampleCount(src_image_state, VK_SAMPLE_COUNT_1_BIT, location, vuid);
vuid = is_2khr ? "VUID-VkBlitImageInfo2KHR-dstImage-00234" : "VUID-vkCmdBlitImage-dstImage-00234";
location = is_2khr ? "vkCmdBlitImage2KHR(): pBlitImageInfo->dstImage" : "vkCmdBlitImage(): dstImage";
skip |= ValidateImageSampleCount(dst_image_state, VK_SAMPLE_COUNT_1_BIT, location, vuid);
vuid = is_2khr ? "VUID-VkBlitImageInfo2KHR-srcImage-00220" : "VUID-vkCmdBlitImage-srcImage-00220";
skip |= ValidateMemoryIsBoundToImage(src_image_state, func_name, vuid);
vuid = is_2khr ? "VUID-VkBlitImageInfo2KHR-dstImage-00225" : "VUID-vkCmdBlitImage-dstImage-00225";
skip |= ValidateMemoryIsBoundToImage(dst_image_state, func_name, vuid);
vuid = is_2khr ? "VUID-VkBlitImageInfo2KHR-srcImage-00219" : "VUID-vkCmdBlitImage-srcImage-00219";
skip |= ValidateImageUsageFlags(src_image_state, VK_IMAGE_USAGE_TRANSFER_SRC_BIT, true, vuid, func_name,
"VK_IMAGE_USAGE_TRANSFER_SRC_BIT");
vuid = is_2khr ? "VUID-VkBlitImageInfo2KHR-dstImage-00224" : "VUID-vkCmdBlitImage-dstImage-00224";
skip |= ValidateImageUsageFlags(dst_image_state, VK_IMAGE_USAGE_TRANSFER_DST_BIT, true, vuid, func_name,
"VK_IMAGE_USAGE_TRANSFER_DST_BIT");
skip |= ValidateCmd(cb_node, cmd_type, func_name);
vuid = is_2khr ? "VUID-VkBlitImageInfo2KHR-srcImage-01999" : "VUID-vkCmdBlitImage-srcImage-01999";
skip |= ValidateImageFormatFeatureFlags(src_image_state, VK_FORMAT_FEATURE_BLIT_SRC_BIT, func_name, vuid);
vuid = is_2khr ? "VUID-VkBlitImageInfo2KHR-dstImage-02000" : "VUID-vkCmdBlitImage-dstImage-02000";
skip |= ValidateImageFormatFeatureFlags(dst_image_state, VK_FORMAT_FEATURE_BLIT_DST_BIT, func_name, vuid);
vuid = is_2khr ? "VUID-vkCmdBlitImage2KHR-commandBuffer-01834" : "VUID-vkCmdBlitImage-commandBuffer-01834";
skip |= ValidateProtectedImage(cb_node, src_image_state, func_name, vuid);
vuid = is_2khr ? "VUID-vkCmdBlitImage2KHR-commandBuffer-01835" : "VUID-vkCmdBlitImage-commandBuffer-01835";
skip |= ValidateProtectedImage(cb_node, dst_image_state, func_name, vuid);
vuid = is_2khr ? "VUID-vkCmdBlitImage2KHR-commandBuffer-01836" : "VUID-vkCmdBlitImage-commandBuffer-01836";
skip |= ValidateUnprotectedImage(cb_node, dst_image_state, func_name, vuid);
// Validation for VK_EXT_fragment_density_map
if (src_image_state->createInfo.flags & VK_IMAGE_CREATE_SUBSAMPLED_BIT_EXT) {
vuid = is_2khr ? "VUID-VkBlitImageInfo2KHR-dstImage-02545" : "VUID-vkCmdBlitImage-dstImage-02545";
skip |= LogError(cb_node->commandBuffer(), vuid,
"%s: srcImage must not have been created with flags containing VK_IMAGE_CREATE_SUBSAMPLED_BIT_EXT",
func_name);
}
if (dst_image_state->createInfo.flags & VK_IMAGE_CREATE_SUBSAMPLED_BIT_EXT) {
vuid = is_2khr ? "VUID-VkBlitImageInfo2KHR-dstImage-02545" : "VUID-vkCmdBlitImage-dstImage-02545";
skip |= LogError(cb_node->commandBuffer(), vuid,
"%s: dstImage must not have been created with flags containing VK_IMAGE_CREATE_SUBSAMPLED_BIT_EXT",
func_name);
}
// TODO: Need to validate image layouts, which will include layout validation for shared presentable images
VkFormat src_format = src_image_state->createInfo.format;
VkFormat dst_format = dst_image_state->createInfo.format;
VkImageType src_type = src_image_state->createInfo.imageType;
VkImageType dst_type = dst_image_state->createInfo.imageType;
if (VK_FILTER_LINEAR == filter) {
vuid = is_2khr ? "VUID-VkBlitImageInfo2KHR-filter-02001" : "VUID-vkCmdBlitImage-filter-02001";
skip |= ValidateImageFormatFeatureFlags(src_image_state, VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_LINEAR_BIT, func_name,
vuid);
} else if (VK_FILTER_CUBIC_IMG == filter) {
vuid = is_2khr ? "VUID-VkBlitImageInfo2KHR-filter-02002" : "VUID-vkCmdBlitImage-filter-02002";
skip |= ValidateImageFormatFeatureFlags(src_image_state, VK_FORMAT_FEATURE_SAMPLED_IMAGE_FILTER_CUBIC_BIT_IMG,
func_name, vuid);
}
if (FormatRequiresYcbcrConversion(src_format)) {
vuid = is_2khr ? "VUID-VkBlitImageInfo2KHR-srcImage-01561" : "VUID-vkCmdBlitImage-srcImage-01561";
skip |= LogError(device, vuid,
"%s: srcImage format (%s) must not be one of the formats requiring sampler YCBCR "
"conversion for VK_IMAGE_ASPECT_COLOR_BIT image views",
func_name, string_VkFormat(src_format));
}
if (FormatRequiresYcbcrConversion(dst_format)) {
vuid = is_2khr ? "VUID-VkBlitImageInfo2KHR-dstImage-01562" : "VUID-vkCmdBlitImage-dstImage-01562";
skip |= LogError(device, vuid,
"%s: dstImage format (%s) must not be one of the formats requiring sampler YCBCR "
"conversion for VK_IMAGE_ASPECT_COLOR_BIT image views",
func_name, string_VkFormat(dst_format));
}
if ((VK_FILTER_CUBIC_IMG == filter) && (VK_IMAGE_TYPE_3D != src_type)) {
vuid = is_2khr ? "VUID-VkBlitImageInfo2KHR-filter-00237" : "VUID-vkCmdBlitImage-filter-00237";
skip |= LogError(cb_node->commandBuffer(), vuid,
"%s: source image type must be VK_IMAGE_TYPE_3D when cubic filtering is specified.", func_name);
}
// Validate consistency for unsigned formats
if (FormatIsUInt(src_format) != FormatIsUInt(dst_format)) {
std::stringstream ss;
ss << func_name << ": If one of srcImage and dstImage images has unsigned integer format, "
<< "the other one must also have unsigned integer format. "
<< "Source format is " << string_VkFormat(src_format) << " Destination format is " << string_VkFormat(dst_format);
vuid = is_2khr ? "VUID-VkBlitImageInfo2KHR-srcImage-00230" : "VUID-vkCmdBlitImage-srcImage-00230";
skip |= LogError(cb_node->commandBuffer(), vuid, "%s.", ss.str().c_str());
}
// Validate consistency for signed formats
if (FormatIsSInt(src_format) != FormatIsSInt(dst_format)) {
std::stringstream ss;
ss << func_name << ": If one of srcImage and dstImage images has signed integer format, "
<< "the other one must also have signed integer format. "
<< "Source format is " << string_VkFormat(src_format) << " Destination format is " << string_VkFormat(dst_format);
vuid = is_2khr ? "VUID-VkBlitImageInfo2KHR-srcImage-00229" : "VUID-vkCmdBlitImage-srcImage-00229";
skip |= LogError(cb_node->commandBuffer(), vuid, "%s.", ss.str().c_str());
}
// Validate filter for Depth/Stencil formats
if (FormatIsDepthOrStencil(src_format) && (filter != VK_FILTER_NEAREST)) {
std::stringstream ss;
ss << func_name << ": If the format of srcImage is a depth, stencil, or depth stencil "
<< "then filter must be VK_FILTER_NEAREST.";
vuid = is_2khr ? "VUID-VkBlitImageInfo2KHR-srcImage-00232" : "VUID-vkCmdBlitImage-srcImage-00232";
skip |= LogError(cb_node->commandBuffer(), vuid, "%s.", ss.str().c_str());
}
// Validate aspect bits and formats for depth/stencil images
if (FormatIsDepthOrStencil(src_format) || FormatIsDepthOrStencil(dst_format)) {
if (src_format != dst_format) {
std::stringstream ss;
ss << func_name << ": If one of srcImage and dstImage images has a format of depth, stencil or depth "
<< "stencil, the other one must have exactly the same format. "
<< "Source format is " << string_VkFormat(src_format) << " Destination format is "
<< string_VkFormat(dst_format);
vuid = is_2khr ? "VUID-VkBlitImageInfo2KHR-srcImage-00231" : "VUID-vkCmdBlitImage-srcImage-00231";
skip |= LogError(cb_node->commandBuffer(), vuid, "%s.", ss.str().c_str());
}
} // Depth or Stencil
// Do per-region checks
const char *invalid_src_layout_vuid =
is_2khr ? ((src_image_state->shared_presentable && device_extensions.vk_khr_shared_presentable_image)
? "VUID-VkBlitImageInfo2KHR-srcImageLayout-01398"
: "VUID-VkBlitImageInfo2KHR-srcImageLayout-00222")
: ((src_image_state->shared_presentable && device_extensions.vk_khr_shared_presentable_image)
? "VUID-vkCmdBlitImage-srcImageLayout-01398"
: "VUID-vkCmdBlitImage-srcImageLayout-00222");
const char *invalid_dst_layout_vuid =
is_2khr ? ((dst_image_state->shared_presentable && device_extensions.vk_khr_shared_presentable_image)
? "VUID-VkBlitImageInfo2KHR-dstImageLayout-01399"
: "VUID-VkBlitImageInfo2KHR-dstImageLayout-00227")
: ((dst_image_state->shared_presentable && device_extensions.vk_khr_shared_presentable_image)
? "VUID-vkCmdBlitImage-dstImageLayout-01399"
: "VUID-vkCmdBlitImage-dstImageLayout-00227");
for (uint32_t i = 0; i < regionCount; i++) {
const RegionType rgn = pRegions[i];
bool hit_error = false;
vuid = is_2khr ? "VUID-VkBlitImageInfo2KHR-srcImageLayout-00221" : "VUID-vkCmdBlitImage-srcImageLayout-00221";
skip |= VerifyImageLayout(cb_node, src_image_state, rgn.srcSubresource, srcImageLayout,
VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, func_name, invalid_src_layout_vuid, vuid, &hit_error);
vuid = is_2khr ? "VUID-VkBlitImageInfo2KHR-dstImageLayout-00226" : "VUID-vkCmdBlitImage-dstImageLayout-00226";
skip |= VerifyImageLayout(cb_node, dst_image_state, rgn.dstSubresource, dstImageLayout,
VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, func_name, invalid_dst_layout_vuid, vuid, &hit_error);
skip |= ValidateImageSubresourceLayers(cb_node, &rgn.srcSubresource, func_name, "srcSubresource", i);
skip |= ValidateImageSubresourceLayers(cb_node, &rgn.dstSubresource, func_name, "dstSubresource", i);
vuid = is_2khr ? "VUID-VkBlitImageInfo2KHR-srcSubresource-01705" : "VUID-vkCmdBlitImage-srcSubresource-01705";
skip |=
ValidateImageMipLevel(cb_node, src_image_state, rgn.srcSubresource.mipLevel, i, func_name, "srcSubresource", vuid);
vuid = is_2khr ? "VUID-VkBlitImageInfo2KHR-dstSubresource-01706" : "VUID-vkCmdBlitImage-dstSubresource-01706";
skip |=
ValidateImageMipLevel(cb_node, dst_image_state, rgn.dstSubresource.mipLevel, i, func_name, "dstSubresource", vuid);
vuid = is_2khr ? "VUID-VkBlitImageInfo2KHR-srcSubresource-01707" : "VUID-vkCmdBlitImage-srcSubresource-01707";
skip |= ValidateImageArrayLayerRange(cb_node, src_image_state, rgn.srcSubresource.baseArrayLayer,
rgn.srcSubresource.layerCount, i, func_name, "srcSubresource", vuid);
vuid = is_2khr ? "VUID-VkBlitImageInfo2KHR-dstSubresource-01708" : "VUID-vkCmdBlitImage-dstSubresource-01708";
skip |= ValidateImageArrayLayerRange(cb_node, dst_image_state, rgn.dstSubresource.baseArrayLayer,
rgn.dstSubresource.layerCount, i, func_name, "dstSubresource", vuid);
// Warn for zero-sized regions
if ((rgn.srcOffsets[0].x == rgn.srcOffsets[1].x) || (rgn.srcOffsets[0].y == rgn.srcOffsets[1].y) ||
(rgn.srcOffsets[0].z == rgn.srcOffsets[1].z)) {
std::stringstream ss;
ss << func_name << ": pRegions[" << i << "].srcOffsets specify a zero-volume area.";
skip |= LogWarning(cb_node->commandBuffer(), kVUID_Core_DrawState_InvalidExtents, "%s", ss.str().c_str());
}
if ((rgn.dstOffsets[0].x == rgn.dstOffsets[1].x) || (rgn.dstOffsets[0].y == rgn.dstOffsets[1].y) ||
(rgn.dstOffsets[0].z == rgn.dstOffsets[1].z)) {
std::stringstream ss;
ss << func_name << ": pRegions[" << i << "].dstOffsets specify a zero-volume area.";
skip |= LogWarning(cb_node->commandBuffer(), kVUID_Core_DrawState_InvalidExtents, "%s", ss.str().c_str());
}
// Check that src/dst layercounts match
if (rgn.srcSubresource.layerCount != rgn.dstSubresource.layerCount) {
vuid = is_2khr ? "VUID-VkImageBlit2KHR-layerCount-00239" : "VUID-VkImageBlit-layerCount-00239";
skip |=
LogError(cb_node->commandBuffer(), vuid,
"%s: layerCount in source and destination subresource of pRegions[%d] does not match.", func_name, i);
}
if (rgn.srcSubresource.aspectMask != rgn.dstSubresource.aspectMask) {
vuid = is_2khr ? "VUID-VkImageBlit2KHR-aspectMask-00238" : "VUID-VkImageBlit-aspectMask-00238";
skip |=
LogError(cb_node->commandBuffer(), vuid, "%s: aspectMask members for pRegion[%d] do not match.", func_name, i);
}
if (!VerifyAspectsPresent(rgn.srcSubresource.aspectMask, src_format)) {
vuid = is_2khr ? "VUID-VkBlitImageInfo2KHR-aspectMask-00241" : "VUID-vkCmdBlitImage-aspectMask-00241";
skip |= LogError(cb_node->commandBuffer(), vuid,
"%s: region [%d] source aspectMask (0x%x) specifies aspects not present in source "
"image format %s.",
func_name, i, rgn.srcSubresource.aspectMask, string_VkFormat(src_format));
}
if (!VerifyAspectsPresent(rgn.dstSubresource.aspectMask, dst_format)) {
vuid = is_2khr ? "VUID-VkBlitImageInfo2KHR-aspectMask-00242" : "VUID-vkCmdBlitImage-aspectMask-00242";
skip |= LogError(cb_node->commandBuffer(), vuid,
"%s: region [%d] dest aspectMask (0x%x) specifies aspects not present in dest image format %s.",
func_name, i, rgn.dstSubresource.aspectMask, string_VkFormat(dst_format));
}
// Validate source image offsets
VkExtent3D src_extent = src_image_state->GetSubresourceExtent(rgn.srcSubresource);
if (VK_IMAGE_TYPE_1D == src_type) {
if ((0 != rgn.srcOffsets[0].y) || (1 != rgn.srcOffsets[1].y)) {
vuid = is_2khr ? "VUID-VkBlitImageInfo2KHR-srcImage-00245" : "VUID-vkCmdBlitImage-srcImage-00245";
skip |= LogError(cb_node->commandBuffer(), vuid,
"%s: region [%d], source image of type VK_IMAGE_TYPE_1D with srcOffset[].y values "
"of (%1d, %1d). These must be (0, 1).",
func_name, i, rgn.srcOffsets[0].y, rgn.srcOffsets[1].y);
}
}
if ((VK_IMAGE_TYPE_1D == src_type) || (VK_IMAGE_TYPE_2D == src_type)) {
if ((0 != rgn.srcOffsets[0].z) || (1 != rgn.srcOffsets[1].z)) {
vuid = is_2khr ? "VUID-VkBlitImageInfo2KHR-srcImage-00247" : "VUID-vkCmdBlitImage-srcImage-00247";
skip |= LogError(cb_node->commandBuffer(), vuid,
"%s: region [%d], source image of type VK_IMAGE_TYPE_1D or VK_IMAGE_TYPE_2D with "
"srcOffset[].z values of (%1d, %1d). These must be (0, 1).",
func_name, i, rgn.srcOffsets[0].z, rgn.srcOffsets[1].z);
}
}
bool oob = false;
if ((rgn.srcOffsets[0].x < 0) || (rgn.srcOffsets[0].x > static_cast<int32_t>(src_extent.width)) ||
(rgn.srcOffsets[1].x < 0) || (rgn.srcOffsets[1].x > static_cast<int32_t>(src_extent.width))) {
oob = true;
vuid = is_2khr ? "VUID-VkBlitImageInfo2KHR-srcOffset-00243" : "VUID-vkCmdBlitImage-srcOffset-00243";
skip |= LogError(cb_node->commandBuffer(), vuid,
"%s: region [%d] srcOffset[].x values (%1d, %1d) exceed srcSubresource width extent (%1d).",
func_name, i, rgn.srcOffsets[0].x, rgn.srcOffsets[1].x, src_extent.width);
}
if ((rgn.srcOffsets[0].y < 0) || (rgn.srcOffsets[0].y > static_cast<int32_t>(src_extent.height)) ||
(rgn.srcOffsets[1].y < 0) || (rgn.srcOffsets[1].y > static_cast<int32_t>(src_extent.height))) {
oob = true;
vuid = is_2khr ? "VUID-VkBlitImageInfo2KHR-srcOffset-00244" : "VUID-vkCmdBlitImage-srcOffset-00244";
skip |= LogError(cb_node->commandBuffer(), vuid,
"%s: region [%d] srcOffset[].y values (%1d, %1d) exceed srcSubresource height extent (%1d).",
func_name, i, rgn.srcOffsets[0].y, rgn.srcOffsets[1].y, src_extent.height);
}
if ((rgn.srcOffsets[0].z < 0) || (rgn.srcOffsets[0].z > static_cast<int32_t>(src_extent.depth)) ||
(rgn.srcOffsets[1].z < 0) || (rgn.srcOffsets[1].z > static_cast<int32_t>(src_extent.depth))) {
oob = true;
vuid = is_2khr ? "VUID-VkBlitImageInfo2KHR-srcOffset-00246" : "VUID-vkCmdBlitImage-srcOffset-00246";
skip |= LogError(cb_node->commandBuffer(), vuid,
"%s: region [%d] srcOffset[].z values (%1d, %1d) exceed srcSubresource depth extent (%1d).",
func_name, i, rgn.srcOffsets[0].z, rgn.srcOffsets[1].z, src_extent.depth);
}
if (oob) {
vuid = is_2khr ? "VUID-VkBlitImageInfo2KHR-pRegions-00215" : "VUID-vkCmdBlitImage-pRegions-00215";
skip |= LogError(cb_node->commandBuffer(), vuid, "%s: region [%d] source image blit region exceeds image dimensions.",
func_name, i);
}
// Validate dest image offsets
VkExtent3D dst_extent = dst_image_state->GetSubresourceExtent(rgn.dstSubresource);
if (VK_IMAGE_TYPE_1D == dst_type) {
if ((0 != rgn.dstOffsets[0].y) || (1 != rgn.dstOffsets[1].y)) {
vuid = is_2khr ? "VUID-VkBlitImageInfo2KHR-dstImage-00250" : "VUID-vkCmdBlitImage-dstImage-00250";
skip |= LogError(cb_node->commandBuffer(), vuid,
"%s: region [%d], dest image of type VK_IMAGE_TYPE_1D with dstOffset[].y values of "
"(%1d, %1d). These must be (0, 1).",
func_name, i, rgn.dstOffsets[0].y, rgn.dstOffsets[1].y);
}
}
if ((VK_IMAGE_TYPE_1D == dst_type) || (VK_IMAGE_TYPE_2D == dst_type)) {
if ((0 != rgn.dstOffsets[0].z) || (1 != rgn.dstOffsets[1].z)) {
vuid = is_2khr ? "VUID-VkBlitImageInfo2KHR-dstImage-00252" : "VUID-vkCmdBlitImage-dstImage-00252";
skip |= LogError(cb_node->commandBuffer(), vuid,
"%s: region [%d], dest image of type VK_IMAGE_TYPE_1D or VK_IMAGE_TYPE_2D with "
"dstOffset[].z values of (%1d, %1d). These must be (0, 1).",
func_name, i, rgn.dstOffsets[0].z, rgn.dstOffsets[1].z);
}
}
oob = false;
if ((rgn.dstOffsets[0].x < 0) || (rgn.dstOffsets[0].x > static_cast<int32_t>(dst_extent.width)) ||
(rgn.dstOffsets[1].x < 0) || (rgn.dstOffsets[1].x > static_cast<int32_t>(dst_extent.width))) {
oob = true;
vuid = is_2khr ? "VUID-VkBlitImageInfo2KHR-dstOffset-00248" : "VUID-vkCmdBlitImage-dstOffset-00248";
skip |= LogError(cb_node->commandBuffer(), vuid,
"%s: region [%d] dstOffset[].x values (%1d, %1d) exceed dstSubresource width extent (%1d).",
func_name, i, rgn.dstOffsets[0].x, rgn.dstOffsets[1].x, dst_extent.width);
}
if ((rgn.dstOffsets[0].y < 0) || (rgn.dstOffsets[0].y > static_cast<int32_t>(dst_extent.height)) ||
(rgn.dstOffsets[1].y < 0) || (rgn.dstOffsets[1].y > static_cast<int32_t>(dst_extent.height))) {
oob = true;
vuid = is_2khr ? "VUID-VkBlitImageInfo2KHR-dstOffset-00249" : "VUID-vkCmdBlitImage-dstOffset-00249";
skip |= LogError(cb_node->commandBuffer(), vuid,
"%s: region [%d] dstOffset[].y values (%1d, %1d) exceed dstSubresource height extent (%1d).",
func_name, i, rgn.dstOffsets[0].y, rgn.dstOffsets[1].y, dst_extent.height);
}
if ((rgn.dstOffsets[0].z < 0) || (rgn.dstOffsets[0].z > static_cast<int32_t>(dst_extent.depth)) ||
(rgn.dstOffsets[1].z < 0) || (rgn.dstOffsets[1].z > static_cast<int32_t>(dst_extent.depth))) {
oob = true;
vuid = is_2khr ? "VUID-VkBlitImageInfo2KHR-dstOffset-00251" : "VUID-vkCmdBlitImage-dstOffset-00251";
skip |= LogError(cb_node->commandBuffer(), vuid,
"%s: region [%d] dstOffset[].z values (%1d, %1d) exceed dstSubresource depth extent (%1d).",
func_name, i, rgn.dstOffsets[0].z, rgn.dstOffsets[1].z, dst_extent.depth);
}
if (oob) {
vuid = is_2khr ? "VUID-VkBlitImageInfo2KHR-pRegions-00216" : "VUID-vkCmdBlitImage-pRegions-00216";
skip |= LogError(cb_node->commandBuffer(), vuid,
"%s: region [%d] destination image blit region exceeds image dimensions.", func_name, i);
}
if ((VK_IMAGE_TYPE_3D == src_type) || (VK_IMAGE_TYPE_3D == dst_type)) {
if ((0 != rgn.srcSubresource.baseArrayLayer) || (1 != rgn.srcSubresource.layerCount) ||
(0 != rgn.dstSubresource.baseArrayLayer) || (1 != rgn.dstSubresource.layerCount)) {
vuid = is_2khr ? "VUID-VkBlitImageInfo2KHR-srcImage-00240" : "VUID-vkCmdBlitImage-srcImage-00240";
skip |= LogError(cb_node->commandBuffer(), vuid,
"%s: region [%d] blit to/from a 3D image type with a non-zero baseArrayLayer, or a "
"layerCount other than 1.",
func_name, i);
}
}
} // per-region checks
} else {
assert(0);
}
return skip;
}
bool CoreChecks::PreCallValidateCmdBlitImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount,
const VkImageBlit *pRegions, VkFilter filter) const {
return ValidateCmdBlitImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, regionCount, pRegions, filter,
COPY_COMMAND_VERSION_1);
}
bool CoreChecks::PreCallValidateCmdBlitImage2KHR(VkCommandBuffer commandBuffer, const VkBlitImageInfo2KHR *pBlitImageInfo) const {
return ValidateCmdBlitImage(commandBuffer, pBlitImageInfo->srcImage, pBlitImageInfo->srcImageLayout, pBlitImageInfo->dstImage,
pBlitImageInfo->dstImageLayout, pBlitImageInfo->regionCount, pBlitImageInfo->pRegions,
pBlitImageInfo->filter, COPY_COMMAND_VERSION_2);
}
template <typename RegionType>
void CoreChecks::RecordCmdBlitImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout, VkImage dstImage,
VkImageLayout dstImageLayout, uint32_t regionCount, const RegionType *pRegions,
VkFilter filter) {
auto cb_node = GetCBState(commandBuffer);
auto src_image_state = GetImageState(srcImage);
auto dst_image_state = GetImageState(dstImage);
// Make sure that all image slices are updated to correct layout
for (uint32_t i = 0; i < regionCount; ++i) {
SetImageInitialLayout(cb_node, *src_image_state, pRegions[i].srcSubresource, srcImageLayout);
SetImageInitialLayout(cb_node, *dst_image_state, pRegions[i].dstSubresource, dstImageLayout);
}
}
void CoreChecks::PreCallRecordCmdBlitImage(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
VkImage dstImage, VkImageLayout dstImageLayout, uint32_t regionCount,
const VkImageBlit *pRegions, VkFilter filter) {
StateTracker::PreCallRecordCmdBlitImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, regionCount,
pRegions, filter);
RecordCmdBlitImage(commandBuffer, srcImage, srcImageLayout, dstImage, dstImageLayout, regionCount, pRegions, filter);
}
void CoreChecks::PreCallRecordCmdBlitImage2KHR(VkCommandBuffer commandBuffer, const VkBlitImageInfo2KHR *pBlitImageInfo) {
StateTracker::PreCallRecordCmdBlitImage2KHR(commandBuffer, pBlitImageInfo);
RecordCmdBlitImage(commandBuffer, pBlitImageInfo->srcImage, pBlitImageInfo->srcImageLayout, pBlitImageInfo->dstImage,
pBlitImageInfo->dstImageLayout, pBlitImageInfo->regionCount, pBlitImageInfo->pRegions,
pBlitImageInfo->filter);
}
GlobalImageLayoutRangeMap *GetLayoutRangeMap(GlobalImageLayoutMap &map, const IMAGE_STATE &image_state) {
// This approach allows for a single hash lookup or/create new
auto &layout_map = map[image_state.image()];
if (!layout_map) {
layout_map.emplace(image_state.subresource_encoder.SubresourceCount());
}
return &layout_map;
}
const GlobalImageLayoutRangeMap *GetLayoutRangeMap(const GlobalImageLayoutMap &map, VkImage image) {
auto it = map.find(image);
if (it != map.end()) {
return &it->second;
}
return nullptr;
}
// Helper to update the Global or Overlay layout map
struct GlobalLayoutUpdater {
bool update(VkImageLayout &dst, const image_layout_map::ImageSubresourceLayoutMap::LayoutEntry &src) const {
if (src.current_layout != image_layout_map::kInvalidLayout && dst != src.current_layout) {
dst = src.current_layout;
return true;
}
return false;
}
layer_data::optional<VkImageLayout> insert(const image_layout_map::ImageSubresourceLayoutMap::LayoutEntry &src) const {
layer_data::optional<VkImageLayout> result;
if (src.current_layout != image_layout_map::kInvalidLayout) {
result.emplace(src.current_layout);
}
return result;
}
};
// This validates that the initial layout specified in the command buffer for the IMAGE is the same as the global IMAGE layout
bool CoreChecks::ValidateCmdBufImageLayouts(const Location &loc, const CMD_BUFFER_STATE *pCB,
const GlobalImageLayoutMap &globalImageLayoutMap,
GlobalImageLayoutMap &overlayLayoutMap) const {
if (disabled[image_layout_validation]) return false;
bool skip = false;
// Iterate over the layout maps for each referenced image
GlobalImageLayoutRangeMap empty_map(1);
for (const auto &layout_map_entry : pCB->image_layout_map) {
const auto image = layout_map_entry.first;
const auto *image_state = GetImageState(image);
if (!image_state) continue; // Can't check layouts of a dead image
const auto &subres_map = layout_map_entry.second;
const auto &layout_map = subres_map->GetLayoutMap();
// Validate the initial_uses for each subresource referenced
if (layout_map.empty()) continue;
auto *overlay_map = GetLayoutRangeMap(overlayLayoutMap, *image_state);
const auto *global_map = GetLayoutRangeMap(globalImageLayoutMap, image);
if (global_map == nullptr) {
global_map = &empty_map;
}
// Note: don't know if it would matter
// if (global_map->empty() && overlay_map->empty()) // skip this next loop...;
auto pos = layout_map.begin();
const auto end = layout_map.end();
sparse_container::parallel_iterator<const GlobalImageLayoutRangeMap> current_layout(*overlay_map, *global_map,
pos->first.begin);
while (pos != end) {
VkImageLayout initial_layout = pos->second.initial_layout;
assert(initial_layout != image_layout_map::kInvalidLayout);
if (initial_layout == image_layout_map::kInvalidLayout) {
continue;
}
VkImageLayout image_layout = kInvalidLayout;
if (current_layout->range.empty()) break; // When we are past the end of data in overlay and global... stop looking
if (current_layout->pos_A->valid) { // pos_A denotes the overlay map in the parallel iterator
image_layout = current_layout->pos_A->lower_bound->second;
} else if (current_layout->pos_B->valid) { // pos_B denotes the global map in the parallel iterator
image_layout = current_layout->pos_B->lower_bound->second;
}
const auto intersected_range = pos->first & current_layout->range;
if (initial_layout == VK_IMAGE_LAYOUT_UNDEFINED) {
// TODO: Set memory invalid which is in mem_tracker currently
} else if (image_layout != initial_layout) {
const auto aspect_mask = image_state->subresource_encoder.Decode(intersected_range.begin).aspectMask;
bool matches = ImageLayoutMatches(aspect_mask, image_layout, initial_layout);
if (!matches) {
// We can report all the errors for the intersected range directly
for (auto index : sparse_container::range_view<decltype(intersected_range)>(intersected_range)) {
const auto subresource = image_state->subresource_encoder.Decode(index);
skip |= LogError(
pCB->commandBuffer(), kVUID_Core_DrawState_InvalidImageLayout,
"%s command buffer %s expects %s (subresource: aspectMask 0x%X array layer %u, mip level %u) "
"to be in layout %s--instead, current layout is %s.",
loc.Message().c_str(), report_data->FormatHandle(pCB->commandBuffer()).c_str(),
report_data->FormatHandle(image).c_str(), subresource.aspectMask, subresource.arrayLayer,
subresource.mipLevel, string_VkImageLayout(initial_layout), string_VkImageLayout(image_layout));
}
}
}
if (pos->first.includes(intersected_range.end)) {
current_layout.seek(intersected_range.end);
} else {
++pos;
if (pos != end) {
current_layout.seek(pos->first.begin);
}
}
}
// Update all layout set operations (which will be a subset of the initial_layouts)
sparse_container::splice(*overlay_map, subres_map->GetLayoutMap(), GlobalLayoutUpdater());
}
return skip;
}
void CoreChecks::UpdateCmdBufImageLayouts(CMD_BUFFER_STATE *pCB) {
for (const auto &layout_map_entry : pCB->image_layout_map) {
const auto image = layout_map_entry.first;
const auto &subres_map = layout_map_entry.second;
const auto *image_state = GetImageState(image);
if (!image_state) continue; // Can't set layouts of a dead image
auto *global_map = GetLayoutRangeMap(imageLayoutMap, *image_state);
sparse_container::splice(*global_map, subres_map->GetLayoutMap(), GlobalLayoutUpdater());
}
}
// ValidateLayoutVsAttachmentDescription is a general function where we can validate various state associated with the
// VkAttachmentDescription structs that are used by the sub-passes of a renderpass. Initial check is to make sure that READ_ONLY
// layout attachments don't have CLEAR as their loadOp.
bool CoreChecks::ValidateLayoutVsAttachmentDescription(const debug_report_data *report_data, RenderPassCreateVersion rp_version,
const VkImageLayout first_layout, const uint32_t attachment,
const VkAttachmentDescription2 &attachment_description) const {
bool skip = false;
const bool use_rp2 = (rp_version == RENDER_PASS_VERSION_2);
// Verify that initial loadOp on READ_ONLY attachments is not CLEAR
// for both loadOp and stencilLoaOp rp2 has it in 1 VU while rp1 has it in 2 VU with half behind Maintenance2 extension
// Each is VUID is below in following order: rp2 -> rp1 with Maintenance2 -> rp1 with no extenstion
if (attachment_description.loadOp == VK_ATTACHMENT_LOAD_OP_CLEAR) {
if (use_rp2 && ((first_layout == VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL) ||
(first_layout == VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL) ||
(first_layout == VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL))) {
skip |= LogError(device, "VUID-VkRenderPassCreateInfo2-pAttachments-02522",
"vkCreateRenderPass2(): Cannot clear attachment %d with invalid first layout %s.", attachment,
string_VkImageLayout(first_layout));
} else if ((use_rp2 == false) && (device_extensions.vk_khr_maintenance2) &&
(first_layout == VK_IMAGE_LAYOUT_DEPTH_READ_ONLY_STENCIL_ATTACHMENT_OPTIMAL)) {
skip |= LogError(device, "VUID-VkRenderPassCreateInfo-pAttachments-01566",
"vkCreateRenderPass(): Cannot clear attachment %d with invalid first layout %s.", attachment,
string_VkImageLayout(first_layout));
} else if ((use_rp2 == false) && ((first_layout == VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL) ||
(first_layout == VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL))) {
skip |= LogError(device, "VUID-VkRenderPassCreateInfo-pAttachments-00836",
"vkCreateRenderPass(): Cannot clear attachment %d with invalid first layout %s.", attachment,
string_VkImageLayout(first_layout));
}
}
// Same as above for loadOp, but for stencilLoadOp
if (attachment_description.stencilLoadOp == VK_ATTACHMENT_LOAD_OP_CLEAR) {
if (use_rp2 && ((first_layout == VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL) ||
(first_layout == VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL) ||
(first_layout == VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL))) {
skip |= LogError(device, "VUID-VkRenderPassCreateInfo2-pAttachments-02523",
"vkCreateRenderPass2(): Cannot clear attachment %d with invalid first layout %s.", attachment,
string_VkImageLayout(first_layout));
} else if ((use_rp2 == false) && (device_extensions.vk_khr_maintenance2) &&
(first_layout == VK_IMAGE_LAYOUT_DEPTH_ATTACHMENT_STENCIL_READ_ONLY_OPTIMAL)) {
skip |= LogError(device, "VUID-VkRenderPassCreateInfo-pAttachments-01567",
"vkCreateRenderPass(): Cannot clear attachment %d with invalid first layout %s.", attachment,
string_VkImageLayout(first_layout));
} else if ((use_rp2 == false) && ((first_layout == VK_IMAGE_LAYOUT_DEPTH_STENCIL_READ_ONLY_OPTIMAL) ||
(first_layout == VK_IMAGE_LAYOUT_SHADER_READ_ONLY_OPTIMAL))) {
skip |= LogError(device, "VUID-VkRenderPassCreateInfo-pAttachments-02511",
"vkCreateRenderPass(): Cannot clear attachment %d with invalid first layout %s.", attachment,
string_VkImageLayout(first_layout));
}
}
return skip;
}
// Helper function to validate correct usage bits set for buffers or images. Verify that (actual & desired) flags != 0 or, if strict
// is true, verify that (actual & desired) flags == desired
template <typename T1>
bool CoreChecks::ValidateUsageFlags(VkFlags actual, VkFlags desired, VkBool32 strict, const T1 object,
const VulkanTypedHandle &typed_handle, const char *msgCode, char const *func_name,
char const *usage_str) const {
bool correct_usage = false;
bool skip = false;
const char *type_str = object_string[typed_handle.type];
if (strict) {
correct_usage = ((actual & desired) == desired);
} else {
correct_usage = ((actual & desired) != 0);
}
if (!correct_usage) {
// All callers should have a valid VUID
assert(msgCode != kVUIDUndefined);
skip =
LogError(object, msgCode, "Invalid usage flag for %s used by %s. In this case, %s should have %s set during creation.",
report_data->FormatHandle(typed_handle).c_str(), func_name, type_str, usage_str);
}
return skip;
}
// Helper function to validate usage flags for buffers. For given buffer_state send actual vs. desired usage off to helper above
// where an error will be flagged if usage is not correct
bool CoreChecks::ValidateImageUsageFlags(IMAGE_STATE const *image_state, VkFlags desired, bool strict, const char *msgCode,
char const *func_name, char const *usage_string) const {
return ValidateUsageFlags(image_state->createInfo.usage, desired, strict, image_state->image(),
image_state->Handle(), msgCode, func_name, usage_string);
}
bool CoreChecks::ValidateImageFormatFeatureFlags(IMAGE_STATE const *image_state, VkFormatFeatureFlags desired,
char const *func_name, const char *vuid) const {
bool skip = false;
const VkFormatFeatureFlags image_format_features = image_state->format_features;
if ((image_format_features & desired) != desired) {
// Same error, but more details if it was an AHB external format
if (image_state->HasAHBFormat()) {
skip |= LogError(image_state->image(), vuid,
"In %s, VkFormatFeatureFlags (0x%08X) does not support required feature %s for the external format "
"found in VkAndroidHardwareBufferFormatPropertiesANDROID::formatFeatures used by %s.",
func_name, image_format_features, string_VkFormatFeatureFlags(desired).c_str(),
report_data->FormatHandle(image_state->image()).c_str());
} else {
skip |= LogError(image_state->image(), vuid,
"In %s, VkFormatFeatureFlags (0x%08X) does not support required feature %s for format %u used by %s "
"with tiling %s.",
func_name, image_format_features, string_VkFormatFeatureFlags(desired).c_str(),
image_state->createInfo.format, report_data->FormatHandle(image_state->image()).c_str(),
string_VkImageTiling(image_state->createInfo.tiling));
}
}
return skip;
}
bool CoreChecks::ValidateImageSubresourceLayers(const CMD_BUFFER_STATE *cb_node, const VkImageSubresourceLayers *subresource_layers,
char const *func_name, char const *member, uint32_t i) const {
bool skip = false;
const VkImageAspectFlags apsect_mask = subresource_layers->aspectMask;
// layerCount must not be zero
if (subresource_layers->layerCount == 0) {
skip |= LogError(cb_node->commandBuffer(), "VUID-VkImageSubresourceLayers-layerCount-01700",
"In %s, pRegions[%u].%s.layerCount must not be zero.", func_name, i, member);
}
// aspectMask must not contain VK_IMAGE_ASPECT_METADATA_BIT
if (apsect_mask & VK_IMAGE_ASPECT_METADATA_BIT) {
skip |= LogError(cb_node->commandBuffer(), "VUID-VkImageSubresourceLayers-aspectMask-00168",
"In %s, pRegions[%u].%s.aspectMask has VK_IMAGE_ASPECT_METADATA_BIT set.", func_name, i, member);
}
// if aspectMask contains COLOR, it must not contain either DEPTH or STENCIL
if ((apsect_mask & VK_IMAGE_ASPECT_COLOR_BIT) && (apsect_mask & (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT))) {
skip |= LogError(cb_node->commandBuffer(), "VUID-VkImageSubresourceLayers-aspectMask-00167",
"In %s, pRegions[%u].%s.aspectMask has VK_IMAGE_ASPECT_COLOR_BIT and either VK_IMAGE_ASPECT_DEPTH_BIT or "
"VK_IMAGE_ASPECT_STENCIL_BIT set.",
func_name, i, member);
}
// aspectMask must not contain VK_IMAGE_ASPECT_MEMORY_PLANE_i_BIT_EXT
if (apsect_mask & (VK_IMAGE_ASPECT_MEMORY_PLANE_0_BIT_EXT | VK_IMAGE_ASPECT_MEMORY_PLANE_1_BIT_EXT |
VK_IMAGE_ASPECT_MEMORY_PLANE_2_BIT_EXT | VK_IMAGE_ASPECT_MEMORY_PLANE_3_BIT_EXT)) {
skip |= LogError(cb_node->commandBuffer(), "VUID-VkImageSubresourceLayers-aspectMask-02247",
"In %s, pRegions[%u].%s.aspectMask has a VK_IMAGE_ASPECT_MEMORY_PLANE_*_BIT_EXT bit set.", func_name, i,
member);
}
return skip;
}
// Helper function to validate usage flags for buffers. For given buffer_state send actual vs. desired usage off to helper above
// where an error will be flagged if usage is not correct
bool CoreChecks::ValidateBufferUsageFlags(BUFFER_STATE const *buffer_state, VkFlags desired, bool strict, const char *msgCode,
char const *func_name, char const *usage_string) const {
return ValidateUsageFlags(buffer_state->createInfo.usage, desired, strict, buffer_state->buffer(),
buffer_state->Handle(), msgCode, func_name, usage_string);
}
bool CoreChecks::ValidateBufferViewRange(const BUFFER_STATE *buffer_state, const VkBufferViewCreateInfo *pCreateInfo,
const VkPhysicalDeviceLimits *device_limits) const {
bool skip = false;
const VkDeviceSize &range = pCreateInfo->range;
if (range != VK_WHOLE_SIZE) {
// Range must be greater than 0
if (range <= 0) {
skip |= LogError(buffer_state->buffer(), "VUID-VkBufferViewCreateInfo-range-00928",
"vkCreateBufferView(): If VkBufferViewCreateInfo range (%" PRIuLEAST64
") does not equal VK_WHOLE_SIZE, range must be greater than 0.",
range);
}
// Range must be a multiple of the element size of format
const uint32_t format_size = FormatElementSize(pCreateInfo->format);
if (SafeModulo(range, format_size) != 0) {
skip |= LogError(buffer_state->buffer(), "VUID-VkBufferViewCreateInfo-range-00929",
"vkCreateBufferView(): If VkBufferViewCreateInfo range (%" PRIuLEAST64
") does not equal VK_WHOLE_SIZE, range must be a multiple of the element size of the format "
"(%" PRIu32 ").",
range, format_size);
}
// Range divided by the element size of format must be less than or equal to VkPhysicalDeviceLimits::maxTexelBufferElements
if (SafeDivision(range, format_size) > device_limits->maxTexelBufferElements) {
skip |= LogError(buffer_state->buffer(), "VUID-VkBufferViewCreateInfo-range-00930",
"vkCreateBufferView(): If VkBufferViewCreateInfo range (%" PRIuLEAST64
") does not equal VK_WHOLE_SIZE, range divided by the element size of the format (%" PRIu32
") must be less than or equal to VkPhysicalDeviceLimits::maxTexelBufferElements (%" PRIuLEAST32 ").",
range, format_size, device_limits->maxTexelBufferElements);
}
// The sum of range and offset must be less than or equal to the size of buffer
if (range + pCreateInfo->offset > buffer_state->createInfo.size) {
skip |= LogError(buffer_state->buffer(), "VUID-VkBufferViewCreateInfo-offset-00931",
"vkCreateBufferView(): If VkBufferViewCreateInfo range (%" PRIuLEAST64
") does not equal VK_WHOLE_SIZE, the sum of offset (%" PRIuLEAST64
") and range must be less than or equal to the size of the buffer (%" PRIuLEAST64 ").",
range, pCreateInfo->offset, buffer_state->createInfo.size);
}
} else {
const uint32_t format_size = FormatElementSize(pCreateInfo->format);
// Size of buffer - offset, divided by the element size of format must be less than or equal to
// VkPhysicalDeviceLimits::maxTexelBufferElements
if (SafeDivision(buffer_state->createInfo.size - pCreateInfo->offset, format_size) >
device_limits->maxTexelBufferElements) {
skip |= LogError(buffer_state->buffer(), "VUID-VkBufferViewCreateInfo-range-04059",
"vkCreateBufferView(): If VkBufferViewCreateInfo range (%" PRIuLEAST64
") equals VK_WHOLE_SIZE, the buffer's size (%" PRIuLEAST64 ") minus the offset (%" PRIuLEAST64
"), divided by the element size of the format (%" PRIu32
") must be less than or equal to VkPhysicalDeviceLimits::maxTexelBufferElements (%" PRIuLEAST32 ").",
range, buffer_state->createInfo.size, pCreateInfo->offset, format_size,
device_limits->maxTexelBufferElements);
}
}
return skip;
}
bool CoreChecks::ValidateBufferViewBuffer(const BUFFER_STATE *buffer_state, const VkBufferViewCreateInfo *pCreateInfo) const {
bool skip = false;
const VkFormatProperties format_properties = GetPDFormatProperties(pCreateInfo->format);
if ((buffer_state->createInfo.usage & VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT) &&
!(format_properties.bufferFeatures & VK_FORMAT_FEATURE_UNIFORM_TEXEL_BUFFER_BIT)) {
skip |= LogError(buffer_state->buffer(), "VUID-VkBufferViewCreateInfo-buffer-00933",
"vkCreateBufferView(): If buffer was created with `usage` containing "
"VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT, format (%s) must "
"be supported for uniform texel buffers",
string_VkFormat(pCreateInfo->format));
}
if ((buffer_state->createInfo.usage & VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT) &&
!(format_properties.bufferFeatures & VK_FORMAT_FEATURE_STORAGE_TEXEL_BUFFER_BIT)) {
skip |= LogError(buffer_state->buffer(), "VUID-VkBufferViewCreateInfo-buffer-00934",
"vkCreateBufferView(): If buffer was created with `usage` containing "
"VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT, format (%s) must "
"be supported for storage texel buffers",
string_VkFormat(pCreateInfo->format));
}
return skip;
}
bool CoreChecks::PreCallValidateCreateBuffer(VkDevice device, const VkBufferCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkBuffer *pBuffer) const {
bool skip = false;
// TODO: Add check for "VUID-vkCreateBuffer-flags-00911" (sparse address space accounting)
auto chained_devaddr_struct = LvlFindInChain<VkBufferDeviceAddressCreateInfoEXT>(pCreateInfo->pNext);
if (chained_devaddr_struct) {
if (!(pCreateInfo->flags & VK_BUFFER_CREATE_DEVICE_ADDRESS_CAPTURE_REPLAY_BIT) &&
chained_devaddr_struct->deviceAddress != 0) {
skip |= LogError(device, "VUID-VkBufferCreateInfo-deviceAddress-02604",
"vkCreateBuffer(): Non-zero VkBufferDeviceAddressCreateInfoEXT::deviceAddress "
"requires VK_BUFFER_CREATE_DEVICE_ADDRESS_CAPTURE_REPLAY_BIT.");
}
}
auto chained_opaqueaddr_struct = LvlFindInChain<VkBufferOpaqueCaptureAddressCreateInfo>(pCreateInfo->pNext);
if (chained_opaqueaddr_struct) {
if (!(pCreateInfo->flags & VK_BUFFER_CREATE_DEVICE_ADDRESS_CAPTURE_REPLAY_BIT) &&
chained_opaqueaddr_struct->opaqueCaptureAddress != 0) {
skip |= LogError(device, "VUID-VkBufferCreateInfo-opaqueCaptureAddress-03337",
"vkCreateBuffer(): Non-zero VkBufferOpaqueCaptureAddressCreateInfo::opaqueCaptureAddress"
"requires VK_BUFFER_CREATE_DEVICE_ADDRESS_CAPTURE_REPLAY_BIT.");
}
}
auto dedicated_allocation_buffer = LvlFindInChain<VkDedicatedAllocationBufferCreateInfoNV>(pCreateInfo->pNext);
if (dedicated_allocation_buffer && dedicated_allocation_buffer->dedicatedAllocation == VK_TRUE) {
if (pCreateInfo->flags &
(VK_BUFFER_CREATE_SPARSE_BINDING_BIT | VK_BUFFER_CREATE_SPARSE_RESIDENCY_BIT | VK_BUFFER_CREATE_SPARSE_ALIASED_BIT)) {
skip |= LogError(device, "VUID-VkBufferCreateInfo-pNext-01571",
"vkCreateBuffer(): pCreateInfos->flags must not include VK_BUFFER_CREATE_SPARSE_BINDING_BIT, "
"VK_BUFFER_CREATE_SPARSE_RESIDENCY_BIT, or VK_BUFFER_CREATE_SPARSE_ALIASED_BIT when "
"VkDedicatedAllocationBufferCreateInfoNV is in pNext chain with dedicatedAllocation VK_TRUE.");
}
}
if ((pCreateInfo->flags & VK_BUFFER_CREATE_DEVICE_ADDRESS_CAPTURE_REPLAY_BIT) &&
!enabled_features.core12.bufferDeviceAddressCaptureReplay &&
!enabled_features.buffer_device_address_ext.bufferDeviceAddressCaptureReplay) {
skip |= LogError(
device, "VUID-VkBufferCreateInfo-flags-03338",
"vkCreateBuffer(): the bufferDeviceAddressCaptureReplay device feature is disabled: Buffers cannot be created with "
"the VK_BUFFER_CREATE_DEVICE_ADDRESS_CAPTURE_REPLAY_BIT set.");
}
if (pCreateInfo->sharingMode == VK_SHARING_MODE_CONCURRENT && pCreateInfo->pQueueFamilyIndices) {
const char *vuid = device_extensions.vk_khr_get_physical_device_properties2 ? "VUID-VkBufferCreateInfo-sharingMode-01419"
: "VUID-VkBufferCreateInfo-sharingMode-01391";
skip |= ValidatePhysicalDeviceQueueFamilies(pCreateInfo->queueFamilyIndexCount, pCreateInfo->pQueueFamilyIndices,
"vkCreateBuffer", "pCreateInfo->pQueueFamilyIndices", vuid);
}
if ((pCreateInfo->flags & VK_BUFFER_CREATE_PROTECTED_BIT) != 0) {
if (enabled_features.core11.protectedMemory == VK_FALSE) {
skip |= LogError(device, "VUID-VkBufferCreateInfo-flags-01887",
"vkCreateBuffer(): the protectedMemory device feature is disabled: Buffers cannot be created with the "
"VK_BUFFER_CREATE_PROTECTED_BIT set.");
}
const VkBufferCreateFlags invalid_flags =
VK_BUFFER_CREATE_SPARSE_BINDING_BIT | VK_BUFFER_CREATE_SPARSE_RESIDENCY_BIT | VK_BUFFER_CREATE_SPARSE_ALIASED_BIT;
if ((pCreateInfo->flags & invalid_flags) != 0) {
skip |= LogError(device, "VUID-VkBufferCreateInfo-None-01888",
"vkCreateBuffer(): VK_BUFFER_CREATE_PROTECTED_BIT is set so no sparse create flags can be used at "
"same time (VK_BUFFER_CREATE_SPARSE_BINDING_BIT | VK_BUFFER_CREATE_SPARSE_RESIDENCY_BIT | "
"VK_BUFFER_CREATE_SPARSE_ALIASED_BIT).");
}
}
return skip;
}
bool CoreChecks::PreCallValidateCreateBufferView(VkDevice device, const VkBufferViewCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkBufferView *pView) const {
bool skip = false;
const BUFFER_STATE *buffer_state = GetBufferState(pCreateInfo->buffer);
// If this isn't a sparse buffer, it needs to have memory backing it at CreateBufferView time
if (buffer_state) {
skip |= ValidateMemoryIsBoundToBuffer(buffer_state, "vkCreateBufferView()", "VUID-VkBufferViewCreateInfo-buffer-00935");
// In order to create a valid buffer view, the buffer must have been created with at least one of the following flags:
// UNIFORM_TEXEL_BUFFER_BIT or STORAGE_TEXEL_BUFFER_BIT
skip |= ValidateBufferUsageFlags(buffer_state,
VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT | VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT, false,
"VUID-VkBufferViewCreateInfo-buffer-00932", "vkCreateBufferView()",
"VK_BUFFER_USAGE_[STORAGE|UNIFORM]_TEXEL_BUFFER_BIT");
// Buffer view offset must be less than the size of buffer
if (pCreateInfo->offset >= buffer_state->createInfo.size) {
skip |= LogError(buffer_state->buffer(), "VUID-VkBufferViewCreateInfo-offset-00925",
"vkCreateBufferView(): VkBufferViewCreateInfo offset (%" PRIuLEAST64
") must be less than the size of the buffer (%" PRIuLEAST64 ").",
pCreateInfo->offset, buffer_state->createInfo.size);
}
const VkPhysicalDeviceLimits *device_limits = &phys_dev_props.limits;
// Buffer view offset must be a multiple of VkPhysicalDeviceLimits::minTexelBufferOffsetAlignment
if ((pCreateInfo->offset % device_limits->minTexelBufferOffsetAlignment) != 0 &&
!enabled_features.texel_buffer_alignment_features.texelBufferAlignment) {
const char *vuid = device_extensions.vk_ext_texel_buffer_alignment ? "VUID-VkBufferViewCreateInfo-offset-02749"
: "VUID-VkBufferViewCreateInfo-offset-00926";
skip |= LogError(buffer_state->buffer(), vuid,
"vkCreateBufferView(): VkBufferViewCreateInfo offset (%" PRIuLEAST64
") must be a multiple of VkPhysicalDeviceLimits::minTexelBufferOffsetAlignment (%" PRIuLEAST64 ").",
pCreateInfo->offset, device_limits->minTexelBufferOffsetAlignment);
}
if (enabled_features.texel_buffer_alignment_features.texelBufferAlignment) {
VkDeviceSize element_size = FormatElementSize(pCreateInfo->format);
if ((element_size % 3) == 0) {
element_size /= 3;
}
if (buffer_state->createInfo.usage & VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT) {
VkDeviceSize alignment_requirement =
phys_dev_ext_props.texel_buffer_alignment_props.storageTexelBufferOffsetAlignmentBytes;
if (phys_dev_ext_props.texel_buffer_alignment_props.storageTexelBufferOffsetSingleTexelAlignment) {
alignment_requirement = std::min(alignment_requirement, element_size);
}
if (SafeModulo(pCreateInfo->offset, alignment_requirement) != 0) {
skip |= LogError(
buffer_state->buffer(), "VUID-VkBufferViewCreateInfo-buffer-02750",
"vkCreateBufferView(): If buffer was created with usage containing "
"VK_BUFFER_USAGE_STORAGE_TEXEL_BUFFER_BIT, "
"VkBufferViewCreateInfo offset (%" PRIuLEAST64
") must be a multiple of the lesser of "
"VkPhysicalDeviceTexelBufferAlignmentPropertiesEXT::storageTexelBufferOffsetAlignmentBytes (%" PRIuLEAST64
") or, if VkPhysicalDeviceTexelBufferAlignmentPropertiesEXT::storageTexelBufferOffsetSingleTexelAlignment "
"(%" PRId32
") is VK_TRUE, the size of a texel of the requested format. "
"If the size of a texel is a multiple of three bytes, then the size of a "
"single component of format is used instead",
pCreateInfo->offset, phys_dev_ext_props.texel_buffer_alignment_props.storageTexelBufferOffsetAlignmentBytes,
phys_dev_ext_props.texel_buffer_alignment_props.storageTexelBufferOffsetSingleTexelAlignment);
}
}
if (buffer_state->createInfo.usage & VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT) {
VkDeviceSize alignment_requirement =
phys_dev_ext_props.texel_buffer_alignment_props.uniformTexelBufferOffsetAlignmentBytes;
if (phys_dev_ext_props.texel_buffer_alignment_props.uniformTexelBufferOffsetSingleTexelAlignment) {
alignment_requirement = std::min(alignment_requirement, element_size);
}
if (SafeModulo(pCreateInfo->offset, alignment_requirement) != 0) {
skip |= LogError(
buffer_state->buffer(), "VUID-VkBufferViewCreateInfo-buffer-02751",
"vkCreateBufferView(): If buffer was created with usage containing "
"VK_BUFFER_USAGE_UNIFORM_TEXEL_BUFFER_BIT, "
"VkBufferViewCreateInfo offset (%" PRIuLEAST64
") must be a multiple of the lesser of "
"VkPhysicalDeviceTexelBufferAlignmentPropertiesEXT::uniformTexelBufferOffsetAlignmentBytes (%" PRIuLEAST64
") or, if VkPhysicalDeviceTexelBufferAlignmentPropertiesEXT::uniformTexelBufferOffsetSingleTexelAlignment "
"(%" PRId32
") is VK_TRUE, the size of a texel of the requested format. "
"If the size of a texel is a multiple of three bytes, then the size of a "
"single component of format is used instead",
pCreateInfo->offset, phys_dev_ext_props.texel_buffer_alignment_props.uniformTexelBufferOffsetAlignmentBytes,
phys_dev_ext_props.texel_buffer_alignment_props.uniformTexelBufferOffsetSingleTexelAlignment);
}
}
}
skip |= ValidateBufferViewRange(buffer_state, pCreateInfo, device_limits);
skip |= ValidateBufferViewBuffer(buffer_state, pCreateInfo);
}
return skip;
}
// For the given format verify that the aspect masks make sense
bool CoreChecks::ValidateImageAspectMask(VkImage image, VkFormat format, VkImageAspectFlags aspect_mask, const char *func_name,
const char *vuid) const {
bool skip = false;
const IMAGE_STATE *image_state = GetImageState(image);
// checks color format and (single-plane or non-disjoint)
// if ycbcr extension is not supported then single-plane and non-disjoint are always both true
if ((FormatIsColor(format)) && ((FormatIsMultiplane(format) == false) || (image_state->disjoint == false))) {
if ((aspect_mask & VK_IMAGE_ASPECT_COLOR_BIT) != VK_IMAGE_ASPECT_COLOR_BIT) {
skip |= LogError(
image, vuid,
"%s: Using format (%s) with aspect flags (%s) but color image formats must have the VK_IMAGE_ASPECT_COLOR_BIT set.",
func_name, string_VkFormat(format), string_VkImageAspectFlags(aspect_mask).c_str());
} else if ((aspect_mask & VK_IMAGE_ASPECT_COLOR_BIT) != aspect_mask) {
skip |= LogError(image, vuid,
"%s: Using format (%s) with aspect flags (%s) but color image formats must have ONLY the "
"VK_IMAGE_ASPECT_COLOR_BIT set.",
func_name, string_VkFormat(format), string_VkImageAspectFlags(aspect_mask).c_str());
}
} else if (FormatIsDepthAndStencil(format)) {
if ((aspect_mask & (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT)) == 0) {
skip |= LogError(image, vuid,
"%s: Using format (%s) with aspect flags (%s) but depth/stencil image formats must have at least one "
"of VK_IMAGE_ASPECT_DEPTH_BIT and VK_IMAGE_ASPECT_STENCIL_BIT set.",
func_name, string_VkFormat(format), string_VkImageAspectFlags(aspect_mask).c_str());
} else if ((aspect_mask & (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT)) != aspect_mask) {
skip |= LogError(image, vuid,
"%s: Using format (%s) with aspect flags (%s) but combination depth/stencil image formats can have "
"only the VK_IMAGE_ASPECT_DEPTH_BIT and VK_IMAGE_ASPECT_STENCIL_BIT set.",
func_name, string_VkFormat(format), string_VkImageAspectFlags(aspect_mask).c_str());
}
} else if (FormatIsDepthOnly(format)) {
if ((aspect_mask & VK_IMAGE_ASPECT_DEPTH_BIT) != VK_IMAGE_ASPECT_DEPTH_BIT) {
skip |= LogError(image, vuid,
"%s: Using format (%s) with aspect flags (%s) but depth-only image formats must have the "
"VK_IMAGE_ASPECT_DEPTH_BIT set.",
func_name, string_VkFormat(format), string_VkImageAspectFlags(aspect_mask).c_str());
} else if ((aspect_mask & VK_IMAGE_ASPECT_DEPTH_BIT) != aspect_mask) {
skip |= LogError(image, vuid,
"%s: Using format (%s) with aspect flags (%s) but depth-only image formats can have only the "
"VK_IMAGE_ASPECT_DEPTH_BIT set.",
func_name, string_VkFormat(format), string_VkImageAspectFlags(aspect_mask).c_str());
}
} else if (FormatIsStencilOnly(format)) {
if ((aspect_mask & VK_IMAGE_ASPECT_STENCIL_BIT) != VK_IMAGE_ASPECT_STENCIL_BIT) {
skip |= LogError(image, vuid,
"%s: Using format (%s) with aspect flags (%s) but stencil-only image formats must have the "
"VK_IMAGE_ASPECT_STENCIL_BIT set.",
func_name, string_VkFormat(format), string_VkImageAspectFlags(aspect_mask).c_str());
} else if ((aspect_mask & VK_IMAGE_ASPECT_STENCIL_BIT) != aspect_mask) {
skip |= LogError(image, vuid,
"%s: Using format (%s) with aspect flags (%s) but stencil-only image formats can have only the "
"VK_IMAGE_ASPECT_STENCIL_BIT set.",
func_name, string_VkFormat(format), string_VkImageAspectFlags(aspect_mask).c_str());
}
} else if (FormatIsMultiplane(format)) {
VkImageAspectFlags valid_flags = VK_IMAGE_ASPECT_COLOR_BIT | VK_IMAGE_ASPECT_PLANE_0_BIT | VK_IMAGE_ASPECT_PLANE_1_BIT;
if (3 == FormatPlaneCount(format)) {
valid_flags = valid_flags | VK_IMAGE_ASPECT_PLANE_2_BIT;
}
if ((aspect_mask & valid_flags) != aspect_mask) {
skip |= LogError(image, vuid,
"%s: Using format (%s) with aspect flags (%s) but multi-plane image formats may have only "
"VK_IMAGE_ASPECT_COLOR_BIT or VK_IMAGE_ASPECT_PLANE_n_BITs set, where n = [0, 1, 2].",
func_name, string_VkFormat(format), string_VkImageAspectFlags(aspect_mask).c_str());
}
}
return skip;
}
bool CoreChecks::ValidateImageSubresourceRange(const uint32_t image_mip_count, const uint32_t image_layer_count,
const VkImageSubresourceRange &subresourceRange, const char *cmd_name,
const char *param_name, const char *image_layer_count_var_name, const VkImage image,
const SubresourceRangeErrorCodes &errorCodes) const {
bool skip = false;
// Validate mip levels
if (subresourceRange.baseMipLevel >= image_mip_count) {
skip |= LogError(image, errorCodes.base_mip_err,
"%s: %s.baseMipLevel (= %" PRIu32
") is greater or equal to the mip level count of the image (i.e. greater or equal to %" PRIu32 ").",
cmd_name, param_name, subresourceRange.baseMipLevel, image_mip_count);
}
if (subresourceRange.levelCount != VK_REMAINING_MIP_LEVELS) {
if (subresourceRange.levelCount == 0) {
skip |=
LogError(image, "VUID-VkImageSubresourceRange-levelCount-01720", "%s: %s.levelCount is 0.", cmd_name, param_name);
} else {
const uint64_t necessary_mip_count = uint64_t{subresourceRange.baseMipLevel} + uint64_t{subresourceRange.levelCount};
if (necessary_mip_count > image_mip_count) {
skip |= LogError(image, errorCodes.mip_count_err,
"%s: %s.baseMipLevel + .levelCount (= %" PRIu32 " + %" PRIu32 " = %" PRIu64
") is greater than the mip level count of the image (i.e. greater than %" PRIu32 ").",
cmd_name, param_name, subresourceRange.baseMipLevel, subresourceRange.levelCount,
necessary_mip_count, image_mip_count);
}
}
}
// Validate array layers
if (subresourceRange.baseArrayLayer >= image_layer_count) {
skip |= LogError(image, errorCodes.base_layer_err,
"%s: %s.baseArrayLayer (= %" PRIu32
") is greater or equal to the %s of the image when it was created (i.e. greater or equal to %" PRIu32 ").",
cmd_name, param_name, subresourceRange.baseArrayLayer, image_layer_count_var_name, image_layer_count);
}
if (subresourceRange.layerCount != VK_REMAINING_ARRAY_LAYERS) {
if (subresourceRange.layerCount == 0) {
skip |=
LogError(image, "VUID-VkImageSubresourceRange-layerCount-01721", "%s: %s.layerCount is 0.", cmd_name, param_name);
} else {
const uint64_t necessary_layer_count =
uint64_t{subresourceRange.baseArrayLayer} + uint64_t{subresourceRange.layerCount};
if (necessary_layer_count > image_layer_count) {
skip |= LogError(image, errorCodes.layer_count_err,
"%s: %s.baseArrayLayer + .layerCount (= %" PRIu32 " + %" PRIu32 " = %" PRIu64
") is greater than the %s of the image when it was created (i.e. greater than %" PRIu32 ").",
cmd_name, param_name, subresourceRange.baseArrayLayer, subresourceRange.layerCount,
necessary_layer_count, image_layer_count_var_name, image_layer_count);
}
}
}
return skip;
}
bool CoreChecks::ValidateCreateImageViewSubresourceRange(const IMAGE_STATE *image_state, bool is_imageview_2d_type,
const VkImageSubresourceRange &subresourceRange) const {
bool is_khr_maintenance1 = IsExtEnabled(device_extensions.vk_khr_maintenance1);
bool is_image_slicable = image_state->createInfo.imageType == VK_IMAGE_TYPE_3D &&
(image_state->createInfo.flags & VK_IMAGE_CREATE_2D_ARRAY_COMPATIBLE_BIT);
bool is_3_d_to_2_d_map = is_khr_maintenance1 && is_image_slicable && is_imageview_2d_type;
uint32_t image_layer_count;
if (is_3_d_to_2_d_map) {
const auto layers = LayersFromRange(subresourceRange);
const auto extent = image_state->GetSubresourceExtent(layers);
image_layer_count = extent.depth;
} else {
image_layer_count = image_state->createInfo.arrayLayers;
}
const auto image_layer_count_var_name = is_3_d_to_2_d_map ? "extent.depth" : "arrayLayers";
SubresourceRangeErrorCodes subresource_range_error_codes = {};
subresource_range_error_codes.base_mip_err = "VUID-VkImageViewCreateInfo-subresourceRange-01478";
subresource_range_error_codes.mip_count_err = "VUID-VkImageViewCreateInfo-subresourceRange-01718";
subresource_range_error_codes.base_layer_err =
is_khr_maintenance1
? (is_3_d_to_2_d_map ? "VUID-VkImageViewCreateInfo-image-02724" : "VUID-VkImageViewCreateInfo-image-01482")
: "VUID-VkImageViewCreateInfo-subresourceRange-01480";
subresource_range_error_codes.layer_count_err = is_khr_maintenance1
? (is_3_d_to_2_d_map ? "VUID-VkImageViewCreateInfo-subresourceRange-02725"
: "VUID-VkImageViewCreateInfo-subresourceRange-01483")
: "VUID-VkImageViewCreateInfo-subresourceRange-01719";
return ValidateImageSubresourceRange(image_state->createInfo.mipLevels, image_layer_count, subresourceRange,
"vkCreateImageView", "pCreateInfo->subresourceRange", image_layer_count_var_name,
image_state->image(), subresource_range_error_codes);
}
bool CoreChecks::ValidateCmdClearColorSubresourceRange(const IMAGE_STATE *image_state,
const VkImageSubresourceRange &subresourceRange,
const char *param_name) const {
SubresourceRangeErrorCodes subresource_range_error_codes = {};
subresource_range_error_codes.base_mip_err = "VUID-vkCmdClearColorImage-baseMipLevel-01470";
subresource_range_error_codes.mip_count_err = "VUID-vkCmdClearColorImage-pRanges-01692";
subresource_range_error_codes.base_layer_err = "VUID-vkCmdClearColorImage-baseArrayLayer-01472";
subresource_range_error_codes.layer_count_err = "VUID-vkCmdClearColorImage-pRanges-01693";
return ValidateImageSubresourceRange(image_state->createInfo.mipLevels, image_state->createInfo.arrayLayers, subresourceRange,
"vkCmdClearColorImage", param_name, "arrayLayers", image_state->image(),
subresource_range_error_codes);
}
bool CoreChecks::ValidateCmdClearDepthSubresourceRange(const IMAGE_STATE *image_state,
const VkImageSubresourceRange &subresourceRange,
const char *param_name) const {
SubresourceRangeErrorCodes subresource_range_error_codes = {};
subresource_range_error_codes.base_mip_err = "VUID-vkCmdClearDepthStencilImage-baseMipLevel-01474";
subresource_range_error_codes.mip_count_err = "VUID-vkCmdClearDepthStencilImage-pRanges-01694";
subresource_range_error_codes.base_layer_err = "VUID-vkCmdClearDepthStencilImage-baseArrayLayer-01476";
subresource_range_error_codes.layer_count_err = "VUID-vkCmdClearDepthStencilImage-pRanges-01695";
return ValidateImageSubresourceRange(image_state->createInfo.mipLevels, image_state->createInfo.arrayLayers, subresourceRange,
"vkCmdClearDepthStencilImage", param_name, "arrayLayers", image_state->image(),
subresource_range_error_codes);
}
bool CoreChecks::ValidateImageBarrierSubresourceRange(const Location &loc, const IMAGE_STATE *image_state,
const VkImageSubresourceRange &subresourceRange) const {
return ValidateImageSubresourceRange(image_state->createInfo.mipLevels, image_state->createInfo.arrayLayers, subresourceRange,
loc.StringFunc().c_str(), loc.StringField().c_str(), "arrayLayers", image_state->image(),
sync_vuid_maps::GetSubResourceVUIDs(loc));
}
namespace barrier_queue_families {
using sync_vuid_maps::GetBarrierQueueVUID;
using sync_vuid_maps::kQueueErrorSummary;
using sync_vuid_maps::QueueError;
class ValidatorState {
public:
ValidatorState(const ValidationStateTracker *device_data, LogObjectList &&obj, const core_error::Location &location,
const VulkanTypedHandle &barrier_handle, const VkSharingMode sharing_mode)
: device_data_(device_data),
objects_(std::move(obj)),
loc_(location),
barrier_handle_(barrier_handle),
sharing_mode_(sharing_mode),
limit_(static_cast<uint32_t>(device_data->physical_device_state->queue_family_properties.size())),
mem_ext_(IsExtEnabled(device_data->device_extensions.vk_khr_external_memory)) {}
// Log the messages using boilerplate from object state, and Vu specific information from the template arg
// One and two family versions, in the single family version, Vu holds the name of the passed parameter
bool LogMsg(QueueError vu_index, uint32_t family, const char *param_name) const {
const std::string val_code = GetBarrierQueueVUID(loc_, vu_index);
const char *annotation = GetFamilyAnnotation(family);
return device_data_->LogError(objects_, val_code, "%s Barrier using %s %s created with sharingMode %s, has %s %u%s. %s",
loc_.Message().c_str(), GetTypeString(),
device_data_->report_data->FormatHandle(barrier_handle_).c_str(), GetModeString(), param_name,
family, annotation, kQueueErrorSummary.at(vu_index).c_str());
}
bool LogMsg(QueueError vu_index, uint32_t src_family, uint32_t dst_family) const {
const std::string val_code = GetBarrierQueueVUID(loc_, vu_index);
const char *src_annotation = GetFamilyAnnotation(src_family);
const char *dst_annotation = GetFamilyAnnotation(dst_family);
return device_data_->LogError(
objects_, val_code,
"%s Barrier using %s %s created with sharingMode %s, has srcQueueFamilyIndex %u%s and dstQueueFamilyIndex %u%s. %s",
loc_.Message().c_str(), GetTypeString(), device_data_->report_data->FormatHandle(barrier_handle_).c_str(),
GetModeString(), src_family, src_annotation, dst_family, dst_annotation, kQueueErrorSummary.at(vu_index).c_str());
}
// This abstract Vu can only be tested at submit time, thus we need a callback from the closure containing the needed
// data. Note that the mem_barrier is copied to the closure as the lambda lifespan exceed the guarantees of validity for
// application input.
static bool ValidateAtQueueSubmit(const QUEUE_STATE *queue_state, const ValidationStateTracker *device_data,
uint32_t src_family, uint32_t dst_family, const ValidatorState &val) {
auto error_code = QueueError::kSubmitQueueMustMatchSrcOrDst;
uint32_t queue_family = queue_state->queueFamilyIndex;
if ((src_family != queue_family) && (dst_family != queue_family)) {
const std::string val_code = GetBarrierQueueVUID(val.loc_, error_code);
const char *src_annotation = val.GetFamilyAnnotation(src_family);
const char *dst_annotation = val.GetFamilyAnnotation(dst_family);
return device_data->LogError(
queue_state->queue, val_code,
"%s Barrier submitted to queue with family index %u, using %s %s created with sharingMode %s, has "
"srcQueueFamilyIndex %u%s and dstQueueFamilyIndex %u%s. %s",
val.loc_.Message().c_str(), queue_family, val.GetTypeString(),
device_data->report_data->FormatHandle(val.barrier_handle_).c_str(), val.GetModeString(), src_family,
src_annotation, dst_family, dst_annotation, kQueueErrorSummary.at(error_code).c_str());
}
return false;
}
// Logical helpers for semantic clarity
inline bool KhrExternalMem() const { return mem_ext_; }
inline bool IsValid(uint32_t queue_family) const { return (queue_family < limit_); }
inline bool IsValidOrSpecial(uint32_t queue_family) const {
return IsValid(queue_family) || (mem_ext_ && QueueFamilyIsExternal(queue_family));
}
// Helpers for LogMsg
const char *GetModeString() const { return string_VkSharingMode(sharing_mode_); }
// Descriptive text for the various types of queue family index
const char *GetFamilyAnnotation(uint32_t family) const {
const char *external = " (VK_QUEUE_FAMILY_EXTERNAL)";
const char *foreign = " (VK_QUEUE_FAMILY_FOREIGN_EXT)";
const char *ignored = " (VK_QUEUE_FAMILY_IGNORED)";
const char *valid = " (VALID)";
const char *invalid = " (INVALID)";
switch (family) {
case VK_QUEUE_FAMILY_EXTERNAL:
return external;
case VK_QUEUE_FAMILY_FOREIGN_EXT:
return foreign;
case VK_QUEUE_FAMILY_IGNORED:
return ignored;
default:
if (IsValid(family)) {
return valid;
}
return invalid;
};
}
const char *GetTypeString() const { return object_string[barrier_handle_.type]; }
VkSharingMode GetSharingMode() const { return sharing_mode_; }
protected:
const ValidationStateTracker *device_data_;
const LogObjectList objects_;
const core_error::Location loc_;
const VulkanTypedHandle barrier_handle_;
const VkSharingMode sharing_mode_;
const uint32_t limit_;
const bool mem_ext_;
};
bool Validate(const CoreChecks *device_data, const CMD_BUFFER_STATE *cb_state, const ValidatorState &val,
const uint32_t src_queue_family, const uint32_t dst_queue_family) {
bool skip = false;
const bool mode_concurrent = val.GetSharingMode() == VK_SHARING_MODE_CONCURRENT;
const bool src_ignored = QueueFamilyIsIgnored(src_queue_family);
const bool dst_ignored = QueueFamilyIsIgnored(dst_queue_family);
if (val.KhrExternalMem()) {
if (mode_concurrent) {
bool sync2 = device_data->enabled_features.synchronization2_features.synchronization2 != 0;
// this requirement is removed by VK_KHR_synchronization2
if (!(src_ignored || dst_ignored) && !sync2) {
skip |= val.LogMsg(QueueError::kSrcOrDstMustBeIgnore, src_queue_family, dst_queue_family);
}
if ((src_ignored && !(dst_ignored || QueueFamilyIsExternal(dst_queue_family))) ||
(dst_ignored && !(src_ignored || QueueFamilyIsExternal(src_queue_family)))) {
skip |= val.LogMsg(QueueError::kSpecialOrIgnoreOnly, src_queue_family, dst_queue_family);
}
} else {
// VK_SHARING_MODE_EXCLUSIVE
if (src_queue_family != dst_queue_family) {
if (!val.IsValidOrSpecial(dst_queue_family)) {
skip |= val.LogMsg(QueueError::kSrcAndDstValidOrSpecial, dst_queue_family, "dstQueueFamilyIndex");
}
if (!val.IsValidOrSpecial(src_queue_family)) {
skip |= val.LogMsg(QueueError::kSrcAndDstValidOrSpecial, src_queue_family, "srcQueueFamilyIndex");
}
}
}
} else {
// No memory extension
if (mode_concurrent) {
bool sync2 = device_data->enabled_features.synchronization2_features.synchronization2 != 0;
// this requirement is removed by VK_KHR_synchronization2
if ((!src_ignored || !dst_ignored) && !sync2) {
skip |= val.LogMsg(QueueError::kSrcAndDestMustBeIgnore, src_queue_family, dst_queue_family);
}
} else {
// VK_SHARING_MODE_EXCLUSIVE
if ((src_queue_family != dst_queue_family) && !(val.IsValid(src_queue_family) && val.IsValid(dst_queue_family))) {
skip |= val.LogMsg(QueueError::kSrcAndDstBothValid, src_queue_family, dst_queue_family);
}
}
}
return skip;
}
} // namespace barrier_queue_families
bool CoreChecks::ValidateConcurrentBarrierAtSubmit(const Location &loc, const ValidationStateTracker *state_data,
const QUEUE_STATE *queue_state, const CMD_BUFFER_STATE *cb_state,
const VulkanTypedHandle &typed_handle, uint32_t src_queue_family,
uint32_t dst_queue_family) {
using barrier_queue_families::ValidatorState;
ValidatorState val(state_data, LogObjectList(cb_state->commandBuffer()), loc, typed_handle, VK_SHARING_MODE_CONCURRENT);
return ValidatorState::ValidateAtQueueSubmit(queue_state, state_data, src_queue_family, dst_queue_family, val);
}
// Type specific wrapper for image barriers
template <typename ImgBarrier>
bool CoreChecks::ValidateBarrierQueueFamilies(const Location &loc, const CMD_BUFFER_STATE *cb_state, const ImgBarrier &barrier,
const IMAGE_STATE *state_data) const {
// State data is required
if (!state_data) {
return false;
}
// Create the validator state from the image state
barrier_queue_families::ValidatorState val(this, LogObjectList(cb_state->commandBuffer()), loc,
state_data->Handle(), state_data->createInfo.sharingMode);
const uint32_t src_queue_family = barrier.srcQueueFamilyIndex;
const uint32_t dst_queue_family = barrier.dstQueueFamilyIndex;
return barrier_queue_families::Validate(this, cb_state, val, src_queue_family, dst_queue_family);
}
// Type specific wrapper for buffer barriers
template <typename BufBarrier>
bool CoreChecks::ValidateBarrierQueueFamilies(const Location &loc, const CMD_BUFFER_STATE *cb_state, const BufBarrier &barrier,
const BUFFER_STATE *state_data) const {
// State data is required
if (!state_data) {
return false;
}
// Create the validator state from the buffer state
barrier_queue_families::ValidatorState val(this, LogObjectList(cb_state->commandBuffer()), loc,
state_data->Handle(), state_data->createInfo.sharingMode);
const uint32_t src_queue_family = barrier.srcQueueFamilyIndex;
const uint32_t dst_queue_family = barrier.dstQueueFamilyIndex;
return barrier_queue_families::Validate(this, cb_state, val, src_queue_family, dst_queue_family);
}
template <typename Barrier>
bool CoreChecks::ValidateBufferBarrier(const LogObjectList &objects, const Location &loc, const CMD_BUFFER_STATE *cb_state,
const Barrier &mem_barrier) const {
using sync_vuid_maps::BufferError;
using sync_vuid_maps::GetBufferBarrierVUID;
bool skip = false;
skip |= ValidateQFOTransferBarrierUniqueness(loc, cb_state, mem_barrier, cb_state->qfo_transfer_buffer_barriers);
// Validate buffer barrier queue family indices
auto buffer_state = GetBufferState(mem_barrier.buffer);
if (buffer_state) {
auto buf_loc = loc.dot(Field::buffer);
const auto &mem_vuid = GetBufferBarrierVUID(buf_loc, BufferError::kNoMemory);
skip |= ValidateMemoryIsBoundToBuffer(buffer_state, loc.StringFunc().c_str(), mem_vuid.c_str());
skip |= ValidateBarrierQueueFamilies(buf_loc, cb_state, mem_barrier, buffer_state);
auto buffer_size = buffer_state->createInfo.size;
if (mem_barrier.offset >= buffer_size) {
auto offset_loc = loc.dot(Field::offset);
const auto &vuid = GetBufferBarrierVUID(offset_loc, BufferError::kOffsetTooBig);
skip |= LogError(objects, vuid, "%s %s has offset 0x%" PRIx64 " which is not less than total size 0x%" PRIx64 ".",
offset_loc.Message().c_str(), report_data->FormatHandle(mem_barrier.buffer).c_str(),
HandleToUint64(mem_barrier.offset), HandleToUint64(buffer_size));
} else if (mem_barrier.size != VK_WHOLE_SIZE && (mem_barrier.offset + mem_barrier.size > buffer_size)) {
auto size_loc = loc.dot(Field::size);
const auto &vuid = GetBufferBarrierVUID(size_loc, BufferError::kSizeOutOfRange);
skip |= LogError(objects, vuid,
"%s %s has offset 0x%" PRIx64 " and size 0x%" PRIx64 " whose sum is greater than total size 0x%" PRIx64
".",
size_loc.Message().c_str(), report_data->FormatHandle(mem_barrier.buffer).c_str(),
HandleToUint64(mem_barrier.offset), HandleToUint64(mem_barrier.size), HandleToUint64(buffer_size));
}
if (mem_barrier.size == 0) {
auto size_loc = loc.dot(Field::size);
const auto &vuid = GetBufferBarrierVUID(size_loc, BufferError::kSizeZero);
skip |= LogError(objects, vuid, "%s %s has a size of 0.", loc.Message().c_str(),
report_data->FormatHandle(mem_barrier.buffer).c_str());
}
}
return skip;
}
template <typename Barrier>
bool CoreChecks::ValidateImageBarrier(const LogObjectList &objects, const Location &loc, const CMD_BUFFER_STATE *cb_state,
const Barrier &mem_barrier) const {
bool skip = false;
skip |= ValidateQFOTransferBarrierUniqueness(loc, cb_state, mem_barrier, cb_state->qfo_transfer_image_barriers);
bool is_ilt = true;
if (enabled_features.synchronization2_features.synchronization2) {
is_ilt = mem_barrier.oldLayout != mem_barrier.newLayout;
}
if (is_ilt) {
if (mem_barrier.newLayout == VK_IMAGE_LAYOUT_UNDEFINED || mem_barrier.newLayout == VK_IMAGE_LAYOUT_PREINITIALIZED) {
auto layout_loc = loc.dot(Field::newLayout);
const auto &vuid = sync_vuid_maps::GetImageBarrierVUID(loc, sync_vuid_maps::ImageError::kBadLayout);
skip |=
LogError(cb_state->commandBuffer(), vuid, "%s Image Layout cannot be transitioned to UNDEFINED or PREINITIALIZED.",
layout_loc.Message().c_str());
}
}
auto image_data = GetImageState(mem_barrier.image);
if (image_data) {
auto image_loc = loc.dot(Field::image);
skip |= ValidateMemoryIsBoundToImage(image_data, loc);
skip |= ValidateBarrierQueueFamilies(image_loc, cb_state, mem_barrier, image_data);
skip |= ValidateImageAspectMask(image_data->image(), image_data->createInfo.format, mem_barrier.subresourceRange.aspectMask,
loc.StringFunc().c_str());
skip |= ValidateImageBarrierSubresourceRange(loc.dot(Field::subresourceRange), image_data, mem_barrier.subresourceRange);
}
return skip;
}
bool CoreChecks::ValidateBarriers(const Location &outer_loc, const CMD_BUFFER_STATE *cb_state, VkPipelineStageFlags src_stage_mask,
VkPipelineStageFlags dst_stage_mask, uint32_t memBarrierCount,
const VkMemoryBarrier *pMemBarriers, uint32_t bufferBarrierCount,
const VkBufferMemoryBarrier *pBufferMemBarriers, uint32_t imageMemBarrierCount,
const VkImageMemoryBarrier *pImageMemBarriers) const {
bool skip = false;
LogObjectList objects(cb_state->commandBuffer());
for (uint32_t i = 0; i < memBarrierCount; ++i) {
const auto &mem_barrier = pMemBarriers[i];
auto loc = outer_loc.dot(Struct::VkMemoryBarrier, Field::pMemoryBarriers, i);
skip |= ValidateMemoryBarrier(objects, loc, cb_state, mem_barrier, src_stage_mask, dst_stage_mask);
}
for (uint32_t i = 0; i < imageMemBarrierCount; ++i) {
const auto &mem_barrier = pImageMemBarriers[i];
auto loc = outer_loc.dot(Struct::VkImageMemoryBarrier, Field::pImageMemoryBarriers, i);
skip |= ValidateMemoryBarrier(objects, loc, cb_state, mem_barrier, src_stage_mask, dst_stage_mask);
skip |= ValidateImageBarrier(objects, loc, cb_state, mem_barrier);
}
{
Location loc(outer_loc.function, Struct::VkImageMemoryBarrier);
skip |= ValidateBarriersToImages(loc, cb_state, imageMemBarrierCount, pImageMemBarriers);
}
for (uint32_t i = 0; i < bufferBarrierCount; ++i) {
const auto &mem_barrier = pBufferMemBarriers[i];
auto loc = outer_loc.dot(Struct::VkBufferMemoryBarrier, Field::pMemoryBarriers, i);
skip |= ValidateMemoryBarrier(objects, loc, cb_state, mem_barrier, src_stage_mask, dst_stage_mask);
skip |= ValidateBufferBarrier(objects, loc, cb_state, mem_barrier);
}
return skip;
}
bool CoreChecks::ValidateDependencyInfo(const LogObjectList &objects, const Location &outer_loc, const CMD_BUFFER_STATE *cb_state,
const VkDependencyInfoKHR *dep_info) const {
bool skip = false;
if (cb_state->activeRenderPass) {
skip |= ValidateRenderPassPipelineBarriers(outer_loc, cb_state, dep_info);
if (skip) return true; // Early return to avoid redundant errors from below calls
}
for (uint32_t i = 0; i < dep_info->memoryBarrierCount; ++i) {
const auto &mem_barrier = dep_info->pMemoryBarriers[i];
auto loc = outer_loc.dot(Struct::VkMemoryBarrier2KHR, Field::pMemoryBarriers, i);
skip |= ValidateMemoryBarrier(objects, loc, cb_state, mem_barrier);
}
for (uint32_t i = 0; i < dep_info->imageMemoryBarrierCount; ++i) {
const auto &mem_barrier = dep_info->pImageMemoryBarriers[i];
auto loc = outer_loc.dot(Struct::VkImageMemoryBarrier2KHR, Field::pImageMemoryBarriers, i);
skip |= ValidateMemoryBarrier(objects, loc, cb_state, mem_barrier);
skip |= ValidateImageBarrier(objects, loc, cb_state, mem_barrier);
}
{
Location loc(outer_loc.function, Struct::VkImageMemoryBarrier2KHR);
skip |= ValidateBarriersToImages(loc, cb_state, dep_info->imageMemoryBarrierCount, dep_info->pImageMemoryBarriers);
}
for (uint32_t i = 0; i < dep_info->bufferMemoryBarrierCount; ++i) {
const auto &mem_barrier = dep_info->pBufferMemoryBarriers[i];
auto loc = outer_loc.dot(Struct::VkBufferMemoryBarrier2KHR, Field::pBufferMemoryBarriers, i);
skip |= ValidateMemoryBarrier(objects, loc, cb_state, mem_barrier);
skip |= ValidateBufferBarrier(objects, loc, cb_state, mem_barrier);
}
return skip;
}
// template to check all original barrier structures
template <typename Barrier>
bool CoreChecks::ValidateMemoryBarrier(const LogObjectList &objects, const Location &loc, const CMD_BUFFER_STATE *cb_state,
const Barrier &barrier, VkPipelineStageFlags src_stage_mask,
VkPipelineStageFlags dst_stage_mask) const {
bool skip = false;
assert(cb_state);
auto queue_flags = cb_state->GetQueueFlags();
if (!cb_state->IsAcquireOp(barrier)) {
skip |= ValidateAccessMask(objects, loc.dot(Field::srcAccessMask), queue_flags, barrier.srcAccessMask, src_stage_mask);
}
if (!cb_state->IsReleaseOp(barrier)) {
skip |= ValidateAccessMask(objects, loc.dot(Field::dstAccessMask), queue_flags, barrier.dstAccessMask, dst_stage_mask);
}
return skip;
}
// template to check all synchronization2 barrier structures
template <typename Barrier>
bool CoreChecks::ValidateMemoryBarrier(const LogObjectList &objects, const Location &loc, const CMD_BUFFER_STATE *cb_state,
const Barrier &barrier) const {
bool skip = false;
assert(cb_state);
auto queue_flags = cb_state->GetQueueFlags();
skip |= ValidatePipelineStage(objects, loc.dot(Field::srcStageMask), queue_flags, barrier.srcStageMask);
if (!cb_state->IsAcquireOp(barrier)) {
skip |=
ValidateAccessMask(objects, loc.dot(Field::srcAccessMask), queue_flags, barrier.srcAccessMask, barrier.srcStageMask);
}
skip |= ValidatePipelineStage(objects, loc.dot(Field::dstStageMask), queue_flags, barrier.dstStageMask);
if (!cb_state->IsReleaseOp(barrier)) {
skip |=
ValidateAccessMask(objects, loc.dot(Field::dstAccessMask), queue_flags, barrier.dstAccessMask, barrier.dstStageMask);
}
return skip;
}
// VkSubpassDependency validation happens when vkCreateRenderPass() is called.
// Dependencies between subpasses can only use pipeline stages compatible with VK_QUEUE_GRAPHICS_BIT,
// for external subpasses we don't have a yet command buffer so we have to assume all of them are valid.
static inline VkQueueFlags SubpassToQueueFlags(uint32_t subpass) {
return subpass == VK_SUBPASS_EXTERNAL ? sync_utils::kAllQueueTypes : static_cast<VkQueueFlags>(VK_QUEUE_GRAPHICS_BIT);
}
bool CoreChecks::ValidateSubpassDependency(const LogObjectList &objects, const Location &in_loc,
const VkSubpassDependency2 &dependency) const {
bool skip = false;
Location loc = in_loc;
VkMemoryBarrier2KHR converted_barrier;
const auto *mem_barrier = LvlFindInChain<VkMemoryBarrier2KHR>(dependency.pNext);
if (mem_barrier && enabled_features.synchronization2_features.synchronization2) {
if (dependency.srcAccessMask != 0) {
skip |= LogError(objects, "UNASSIGNED-CoreChecks-VkSubpassDependency2-srcAccessMask",
"%s is non-zero when a VkMemoryBarrier2KHR is present in pNext.",
loc.dot(Field::srcAccessMask).Message().c_str());
}
if (dependency.dstAccessMask != 0) {
skip |= LogError(objects, "UNASSIGNED-CoreChecks-VkSubpassDependency2-dstAccessMask",
"%s dstAccessMask is non-zero when a VkMemoryBarrier2KHR is present in pNext.",
loc.dot(Field::dstAccessMask).Message().c_str());
}
if (dependency.srcStageMask != 0) {
skip |= LogError(objects, "UNASSIGNED-CoreChecks-VkSubpassDependency2-srcStageMask",
"%s srcStageMask is non-zero when a VkMemoryBarrier2KHR is present in pNext.",
loc.dot(Field::srcStageMask).Message().c_str());
}
if (dependency.dstStageMask != 0) {
skip |= LogError(objects, "UNASSIGNED-CoreChecks-VkSubpassDependency2-dstStageMask",
"%s dstStageMask is non-zero when a VkMemoryBarrier2KHR is present in pNext.",
loc.dot(Field::dstStageMask).Message().c_str());
}
loc = in_loc.dot(Field::pNext);
converted_barrier = *mem_barrier;
} else {
if (mem_barrier) {
skip |= LogError(objects, "UNASSIGNED-CoreChecks-VkSubpassDependency2-pNext",
"%s a VkMemoryBarrier2KHR is present in pNext but synchronization2 is not enabled.",
loc.Message().c_str());
}
// use the subpass dependency flags, upconverted into wider synchronization2 fields.
converted_barrier.srcStageMask = dependency.srcStageMask;
converted_barrier.dstStageMask = dependency.dstStageMask;
converted_barrier.srcAccessMask = dependency.srcAccessMask;
converted_barrier.dstAccessMask = dependency.dstAccessMask;
}
auto src_queue_flags = SubpassToQueueFlags(dependency.srcSubpass);
skip |= ValidatePipelineStage(objects, loc.dot(Field::srcStageMask), src_queue_flags, converted_barrier.srcStageMask);
skip |= ValidateAccessMask(objects, loc.dot(Field::srcAccessMask), src_queue_flags, converted_barrier.srcAccessMask,
converted_barrier.srcStageMask);
auto dst_queue_flags = SubpassToQueueFlags(dependency.dstSubpass);
skip |= ValidatePipelineStage(objects, loc.dot(Field::dstStageMask), dst_queue_flags, converted_barrier.dstStageMask);
skip |= ValidateAccessMask(objects, loc.dot(Field::dstAccessMask), dst_queue_flags, converted_barrier.dstAccessMask,
converted_barrier.dstStageMask);
return skip;
}
bool CoreChecks::ValidateImageViewFormatFeatures(const IMAGE_STATE *image_state, const VkFormat view_format,
const VkImageUsageFlags image_usage) const {
// Pass in image_usage here instead of extracting it from image_state in case there's a chained VkImageViewUsageCreateInfo
bool skip = false;
VkFormatFeatureFlags tiling_features = VK_FORMAT_FEATURE_FLAG_BITS_MAX_ENUM;
const VkImageTiling image_tiling = image_state->createInfo.tiling;
if (image_state->HasAHBFormat()) {
// AHB image view and image share same feature sets
tiling_features = image_state->format_features;
} else if (image_tiling == VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT) {
// Parameter validation should catch if this is used without VK_EXT_image_drm_format_modifier
assert(device_extensions.vk_ext_image_drm_format_modifier);
VkImageDrmFormatModifierPropertiesEXT drm_format_properties = {VK_STRUCTURE_TYPE_IMAGE_DRM_FORMAT_MODIFIER_PROPERTIES_EXT,
nullptr};
DispatchGetImageDrmFormatModifierPropertiesEXT(device, image_state->image(), &drm_format_properties);
VkFormatProperties2 format_properties_2 = {VK_STRUCTURE_TYPE_FORMAT_PROPERTIES_2, nullptr};
VkDrmFormatModifierPropertiesListEXT drm_properties_list = {VK_STRUCTURE_TYPE_DRM_FORMAT_MODIFIER_PROPERTIES_LIST_EXT,
nullptr};
format_properties_2.pNext = (void *)&drm_properties_list;
DispatchGetPhysicalDeviceFormatProperties2(physical_device, view_format, &format_properties_2);
std::vector<VkDrmFormatModifierPropertiesEXT> drm_properties;
drm_properties.resize(drm_properties_list.drmFormatModifierCount);
drm_properties_list.pDrmFormatModifierProperties = drm_properties.data();
DispatchGetPhysicalDeviceFormatProperties2(physical_device, view_format, &format_properties_2);
for (uint32_t i = 0; i < drm_properties_list.drmFormatModifierCount; i++) {
if ((drm_properties_list.pDrmFormatModifierProperties[i].drmFormatModifier & drm_format_properties.drmFormatModifier) !=
0) {
tiling_features |= drm_properties_list.pDrmFormatModifierProperties[i].drmFormatModifierTilingFeatures;
}
}
} else {
VkFormatProperties format_properties = GetPDFormatProperties(view_format);
tiling_features = (image_tiling == VK_IMAGE_TILING_LINEAR) ? format_properties.linearTilingFeatures
: format_properties.optimalTilingFeatures;
}
if (tiling_features == 0) {
skip |= LogError(image_state->image(), "VUID-VkImageViewCreateInfo-None-02273",
"vkCreateImageView(): pCreateInfo->format %s with tiling %s has no supported format features on this "
"physical device.",
string_VkFormat(view_format), string_VkImageTiling(image_tiling));
} else if ((image_usage & VK_IMAGE_USAGE_SAMPLED_BIT) && !(tiling_features & VK_FORMAT_FEATURE_SAMPLED_IMAGE_BIT)) {
skip |= LogError(image_state->image(), "VUID-VkImageViewCreateInfo-usage-02274",
"vkCreateImageView(): pCreateInfo->format %s with tiling %s does not support usage that includes "
"VK_IMAGE_USAGE_SAMPLED_BIT.",
string_VkFormat(view_format), string_VkImageTiling(image_tiling));
} else if ((image_usage & VK_IMAGE_USAGE_STORAGE_BIT) && !(tiling_features & VK_FORMAT_FEATURE_STORAGE_IMAGE_BIT)) {
skip |= LogError(image_state->image(), "VUID-VkImageViewCreateInfo-usage-02275",
"vkCreateImageView(): pCreateInfo->format %s with tiling %s does not support usage that includes "
"VK_IMAGE_USAGE_STORAGE_BIT.",
string_VkFormat(view_format), string_VkImageTiling(image_tiling));
} else if ((image_usage & VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT) && !(tiling_features & VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT)) {
skip |= LogError(image_state->image(), "VUID-VkImageViewCreateInfo-usage-02276",
"vkCreateImageView(): pCreateInfo->format %s with tiling %s does not support usage that includes "
"VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT.",
string_VkFormat(view_format), string_VkImageTiling(image_tiling));
} else if ((image_usage & VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT) &&
!(tiling_features & VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT)) {
skip |= LogError(image_state->image(), "VUID-VkImageViewCreateInfo-usage-02277",
"vkCreateImageView(): pCreateInfo->format %s with tiling %s does not support usage that includes "
"VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT.",
string_VkFormat(view_format), string_VkImageTiling(image_tiling));
} else if ((image_usage & VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT) &&
!(tiling_features & (VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT | VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT))) {
skip |= LogError(image_state->image(), "VUID-VkImageViewCreateInfo-usage-02652",
"vkCreateImageView(): pCreateInfo->format %s with tiling %s does not support usage that includes "
"VK_FORMAT_FEATURE_COLOR_ATTACHMENT_BIT or VK_FORMAT_FEATURE_DEPTH_STENCIL_ATTACHMENT_BIT.",
string_VkFormat(view_format), string_VkImageTiling(image_tiling));
} else if ((image_usage & VK_IMAGE_USAGE_FRAGMENT_SHADING_RATE_ATTACHMENT_BIT_KHR) &&
!(tiling_features & VK_FORMAT_FEATURE_FRAGMENT_SHADING_RATE_ATTACHMENT_BIT_KHR)) {
if (enabled_features.fragment_shading_rate_features.attachmentFragmentShadingRate) {
skip |= LogError(image_state->image(), "VUID-VkImageViewCreateInfo-usage-04550",
"vkCreateImageView(): pCreateInfo->format %s with tiling %s does not support usage that includes "
"VK_FORMAT_FEATURE_FRAGMENT_SHADING_RATE_ATTACHMENT_BIT_KHR.",
string_VkFormat(view_format), string_VkImageTiling(image_tiling));
}
}
return skip;
}
bool CoreChecks::PreCallValidateCreateImageView(VkDevice device, const VkImageViewCreateInfo *pCreateInfo,
const VkAllocationCallbacks *pAllocator, VkImageView *pView) const {
bool skip = false;
const IMAGE_STATE *image_state = GetImageState(pCreateInfo->image);
if (image_state) {
skip |=
ValidateImageUsageFlags(image_state,
VK_IMAGE_USAGE_SAMPLED_BIT | VK_IMAGE_USAGE_STORAGE_BIT | VK_IMAGE_USAGE_INPUT_ATTACHMENT_BIT |
VK_IMAGE_USAGE_DEPTH_STENCIL_ATTACHMENT_BIT | VK_IMAGE_USAGE_COLOR_ATTACHMENT_BIT |
VK_IMAGE_USAGE_TRANSIENT_ATTACHMENT_BIT | VK_IMAGE_USAGE_SHADING_RATE_IMAGE_BIT_NV |
VK_IMAGE_USAGE_FRAGMENT_DENSITY_MAP_BIT_EXT,
false, "VUID-VkImageViewCreateInfo-image-04441", "vkCreateImageView()",
"VK_IMAGE_USAGE_[SAMPLED|STORAGE|COLOR_ATTACHMENT|DEPTH_STENCIL_ATTACHMENT|INPUT_ATTACHMENT|"
"TRANSIENT_ATTACHMENT|SHADING_RATE_IMAGE|FRAGMENT_DENSITY_MAP]_BIT");
// If this isn't a sparse image, it needs to have memory backing it at CreateImageView time
skip |= ValidateMemoryIsBoundToImage(image_state, "vkCreateImageView()", "VUID-VkImageViewCreateInfo-image-01020");
// Checks imported from image layer
skip |= ValidateCreateImageViewSubresourceRange(
image_state, pCreateInfo->viewType == VK_IMAGE_VIEW_TYPE_2D || pCreateInfo->viewType == VK_IMAGE_VIEW_TYPE_2D_ARRAY,
pCreateInfo->subresourceRange);
VkImageCreateFlags image_flags = image_state->createInfo.flags;
VkFormat image_format = image_state->createInfo.format;
VkImageUsageFlags image_usage = image_state->createInfo.usage;
VkFormat view_format = pCreateInfo->format;
VkImageAspectFlags aspect_mask = pCreateInfo->subresourceRange.aspectMask;
VkImageType image_type = image_state->createInfo.imageType;
VkImageViewType view_type = pCreateInfo->viewType;
uint32_t layer_count = pCreateInfo->subresourceRange.layerCount;
// If there's a chained VkImageViewUsageCreateInfo struct, modify image_usage to match
auto chained_ivuci_struct = LvlFindInChain<VkImageViewUsageCreateInfo>(pCreateInfo->pNext);
if (chained_ivuci_struct) {
if (device_extensions.vk_khr_maintenance2) {
if (!device_extensions.vk_ext_separate_stencil_usage) {
if ((image_usage | chained_ivuci_struct->usage) != image_usage) {
skip |= LogError(pCreateInfo->image, "VUID-VkImageViewCreateInfo-pNext-02661",
"vkCreateImageView(): pNext chain includes VkImageViewUsageCreateInfo, usage must not "
"include any bits that were not set in VkImageCreateInfo::usage used to create image");
}
} else {
const auto image_stencil_struct = LvlFindInChain<VkImageStencilUsageCreateInfo>(image_state->createInfo.pNext);
if (image_stencil_struct == nullptr) {
if ((image_usage | chained_ivuci_struct->usage) != image_usage) {
skip |= LogError(
pCreateInfo->image, "VUID-VkImageViewCreateInfo-pNext-02662",
"vkCreateImageView(): pNext chain includes VkImageViewUsageCreateInfo and image was not created "
"with a VkImageStencilUsageCreateInfo in pNext of vkImageCreateInfo, usage must not include "
"any bits that were not set in VkImageCreateInfo::usage used to create image");
}
} else {
if ((aspect_mask & VK_IMAGE_ASPECT_STENCIL_BIT) == VK_IMAGE_ASPECT_STENCIL_BIT &&
(image_stencil_struct->stencilUsage | chained_ivuci_struct->usage) !=
image_stencil_struct->stencilUsage) {
skip |= LogError(
pCreateInfo->image, "VUID-VkImageViewCreateInfo-pNext-02663",
"vkCreateImageView(): pNext chain includes VkImageViewUsageCreateInfo, image was created with a "
"VkImageStencilUsageCreateInfo in pNext of vkImageCreateInfo, and subResourceRange.aspectMask "
"includes VK_IMAGE_ASPECT_STENCIL_BIT, VkImageViewUsageCreateInfo::usage must not include any "
"bits that were not set in VkImageStencilUsageCreateInfo::stencilUsage used to create image");
}
if ((aspect_mask & ~VK_IMAGE_ASPECT_STENCIL_BIT) != 0 &&
(image_usage | chained_ivuci_struct->usage) != image_usage) {
skip |= LogError(
pCreateInfo->image, "VUID-VkImageViewCreateInfo-pNext-02664",
"vkCreateImageView(): pNext chain includes VkImageViewUsageCreateInfo, image was created with a "
"VkImageStencilUsageCreateInfo in pNext of vkImageCreateInfo, and subResourceRange.aspectMask "
"includes bits other than VK_IMAGE_ASPECT_STENCIL_BIT, VkImageViewUsageCreateInfo::usage must not "
"include any bits that were not set in VkImageCreateInfo::usage used to create image");
}
}
}
}
image_usage = chained_ivuci_struct->usage;
}
// If image used VkImageFormatListCreateInfo need to make sure a format from list is used
const auto format_list_info = LvlFindInChain<VkImageFormatListCreateInfo>(image_state->createInfo.pNext);
if (format_list_info && (format_list_info->viewFormatCount > 0)) {
bool foundFormat = false;
for (uint32_t i = 0; i < format_list_info->viewFormatCount; i++) {
if (format_list_info->pViewFormats[i] == view_format) {
foundFormat = true;
break;
}
}
if (foundFormat == false) {
skip |= LogError(pCreateInfo->image, "VUID-VkImageViewCreateInfo-pNext-01585",
"vkCreateImageView(): image was created with a VkImageFormatListCreateInfo in pNext of "
"vkImageCreateInfo, but none of the formats match the VkImageViewCreateInfo::format (%s).",
string_VkFormat(view_format));
}
}
// Validate VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT state, if view/image formats differ
if ((image_flags & VK_IMAGE_CREATE_MUTABLE_FORMAT_BIT) && (image_format != view_format)) {
if (FormatIsMultiplane(image_format)) {
VkFormat compat_format = FindMultiplaneCompatibleFormat(image_format, aspect_mask);
if (view_format != compat_format) {
// View format must match the multiplane compatible format
std::stringstream ss;
ss << "vkCreateImageView(): ImageView format " << string_VkFormat(view_format)
<< " is not compatible with plane " << GetPlaneIndex(aspect_mask) << " of underlying image format "
<< string_VkFormat(image_format) << ", must be " << string_VkFormat(compat_format) << ".";
skip |= LogError(pCreateInfo->image, "VUID-VkImageViewCreateInfo-image-01586", "%s", ss.str().c_str());
}
} else {
if (!(image_flags & VK_IMAGE_CREATE_BLOCK_TEXEL_VIEW_COMPATIBLE_BIT)) {
// Format MUST be compatible (in the same format compatibility class) as the format the image was created with
const VkFormatCompatibilityClass imageClass = FormatCompatibilityClass(image_format);
const VkFormatCompatibilityClass viewClass = FormatCompatibilityClass(view_format);
// Need to only check if one is NONE_BIT to handle edge case both are NONE_BIT
if ((imageClass != viewClass) || (imageClass == VK_FORMAT_COMPATIBILITY_CLASS_NONE_BIT)) {
const char *error_vuid;
if ((!device_extensions.vk_khr_maintenance2) && (!device_extensions.vk_khr_sampler_ycbcr_conversion)) {
error_vuid = "VUID-VkImageViewCreateInfo-image-01018";
} else if ((device_extensions.vk_khr_maintenance2) &&
(!device_extensions.vk_khr_sampler_ycbcr_conversion)) {
error_vuid = "VUID-VkImageViewCreateInfo-image-01759";
} else if ((!device_extensions.vk_khr_maintenance2) &&
(device_extensions.vk_khr_sampler_ycbcr_conversion)) {
error_vuid = "VUID-VkImageViewCreateInfo-image-01760";
} else {
// both enabled
error_vuid = "VUID-VkImageViewCreateInfo-image-01761";
}
std::stringstream ss;
ss << "vkCreateImageView(): ImageView format " << string_VkFormat(view_format)
<< " is not in the same format compatibility class as "
<< report_data->FormatHandle(pCreateInfo->image).c_str() << " format " << string_VkFormat(image_format)
<< ". Images created with the VK_IMAGE_CREATE_MUTABLE_FORMAT BIT "
<< "can support ImageViews with differing formats but they must be in the same compatibility class.";
skip |= LogError(pCreateInfo->image, error_vuid, "%s", ss.str().c_str());
}
}
}
} else {
// Format MUST be IDENTICAL to the format the image was created with
// Unless it is a multi-planar color bit aspect
if ((image_format != view_format) &&
((FormatIsMultiplane(image_format) == false) || (aspect_mask != VK_IMAGE_ASPECT_COLOR_BIT))) {
const char *vuid = (device_extensions.vk_khr_sampler_ycbcr_conversion) ? "VUID-VkImageViewCreateInfo-image-01762"
: "VUID-VkImageViewCreateInfo-image-01019";
std::stringstream ss;
ss << "vkCreateImageView() format " << string_VkFormat(view_format) << " differs from "
<< report_data->FormatHandle(pCreateInfo->image).c_str() << " format " << string_VkFormat(image_format)
<< ". Formats MUST be IDENTICAL unless VK_IMAGE_CREATE_MUTABLE_FORMAT BIT was set on image creation.";
skip |= LogError(pCreateInfo->image, vuid, "%s", ss.str().c_str());
}
}
// Validate correct image aspect bits for desired formats and format consistency
skip |= ValidateImageAspectMask(image_state->image(), image_format, aspect_mask, "vkCreateImageView()");
// Valdiate Image/ImageView type compatibility #resources-image-views-compatibility
switch (image_type) {
case VK_IMAGE_TYPE_1D:
if (view_type != VK_IMAGE_VIEW_TYPE_1D && view_type != VK_IMAGE_VIEW_TYPE_1D_ARRAY) {
skip |= LogError(pCreateInfo->image, "VUID-VkImageViewCreateInfo-subResourceRange-01021",
"vkCreateImageView(): pCreateInfo->viewType %s is not compatible with image type %s.",
string_VkImageViewType(view_type), string_VkImageType(image_type));
}
break;
case VK_IMAGE_TYPE_2D:
if (view_type != VK_IMAGE_VIEW_TYPE_2D && view_type != VK_IMAGE_VIEW_TYPE_2D_ARRAY) {
if ((view_type == VK_IMAGE_VIEW_TYPE_CUBE || view_type == VK_IMAGE_VIEW_TYPE_CUBE_ARRAY) &&
!(image_flags & VK_IMAGE_CREATE_CUBE_COMPATIBLE_BIT)) {
skip |= LogError(pCreateInfo->image, "VUID-VkImageViewCreateInfo-image-01003",
"vkCreateImageView(): pCreateInfo->viewType %s is not compatible with image type %s.",
string_VkImageViewType(view_type), string_VkImageType(image_type));
} else if (view_type != VK_IMAGE_VIEW_TYPE_CUBE && view_type != VK_IMAGE_VIEW_TYPE_CUBE_ARRAY) {
skip |= LogError(pCreateInfo->image, "VUID-VkImageViewCreateInfo-subResourceRange-01021",
"vkCreateImageView(): pCreateInfo->viewType %s is not compatible with image type %s.",
string_VkImageViewType(view_type), string_VkImageType(image_type));
}
}
break;
case VK_IMAGE_TYPE_3D:
if (device_extensions.vk_khr_maintenance1) {
if (view_type != VK_IMAGE_VIEW_TYPE_3D) {
if ((view_type == VK_IMAGE_VIEW_TYPE_2D || view_type == VK_IMAGE_VIEW_TYPE_2D_ARRAY)) {
if (!(image_flags & VK_IMAGE_CREATE_2D_ARRAY_COMPATIBLE_BIT)) {
skip |=
LogError(pCreateInfo->image, "VUID-VkImageViewCreateInfo-image-01005",
"vkCreateImageView(): pCreateInfo->viewType %s is not compatible with image type "
"%s since the image doesn't have VK_IMAGE_CREATE_2D_ARRAY_COMPATIBLE_BIT flag set.",
string_VkImageViewType(view_type), string_VkImageType(image_type));
} else if ((image_flags & (VK_IMAGE_CREATE_SPARSE_BINDING_BIT | VK_IMAGE_CREATE_SPARSE_RESIDENCY_BIT |
VK_IMAGE_CREATE_SPARSE_ALIASED_BIT))) {
skip |= LogError(
pCreateInfo->image, "VUID-VkImageViewCreateInfo-image-04971",
"vkCreateImageView(): pCreateInfo->viewType %s is not compatible with image type %s "
"when the VK_IMAGE_CREATE_SPARSE_BINDING_BIT, VK_IMAGE_CREATE_SPARSE_RESIDENCY_BIT, or "
"VK_IMAGE_CREATE_SPARSE_ALIASED_BIT flags are enabled.",
string_VkImageViewType(view_type), string_VkImageType(image_type));
} else if (pCreateInfo->subresourceRange.levelCount != 1) {
skip |= LogError(pCreateInfo->image, "VUID-VkImageViewCreateInfo-image-04970",
"vkCreateImageView(): pCreateInfo->viewType %s is with image type %s must have a "
"levelCount of 1 but it is %u.",
string_VkImageViewType(view_type), string_VkImageType(image_type),
pCreateInfo->subresourceRange.levelCount);
}
} else {
skip |= LogError(pCreateInfo->image, "VUID-VkImageViewCreateInfo-subResourceRange-01021",
"vkCreateImageView(): pCreateInfo->viewType %s is not compatible with image type %s.",
string_VkImageViewType(view_type), string_VkImageType(image_type));
}
}
} else {
if (view_type != VK_IMAGE_VIEW_TYPE_3D) {
// Help point to VK_KHR_maintenance1
if ((view_type == VK_IMAGE_VIEW_TYPE_2D || view_type == VK_IMAGE_VIEW_TYPE_2D_ARRAY)) {
skip |= LogError(pCreateInfo->image, "VUID-VkImageViewCreateInfo-subResourceRange-01021",
"vkCreateImageView(): pCreateInfo->viewType %s is not compatible with image type %s "
"without VK_KHR_maintenance1 enabled which was promoted in Vulkan 1.0.",
string_VkImageViewType(view_type), string_VkImageType(image_type));
} else {
skip |= LogError(pCreateInfo->image, "VUID-VkImageViewCreateInfo-subResourceRange-01021",
"vkCreateImageView(): pCreateInfo->viewType %s is not compatible with image type %s.",
string_VkImageViewType(view_type), string_VkImageType(image_type));
}
}
}
break;
default:
break;
}
// External format checks needed when VK_ANDROID_external_memory_android_hardware_buffer enabled
if (device_extensions.vk_android_external_memory_android_hardware_buffer) {
skip |= ValidateCreateImageViewANDROID(pCreateInfo);
}
skip |= ValidateImageViewFormatFeatures(image_state, view_format, image_usage);
if (enabled_features.shading_rate_image.shadingRateImage) {
if (image_usage & VK_IMAGE_USAGE_SHADING_RATE_IMAGE_BIT_NV) {
if (view_format != VK_FORMAT_R8_UINT) {
skip |= LogError(pCreateInfo->image, "VUID-VkImageViewCreateInfo-image-02087",
"vkCreateImageView() If image was created with usage containing "
"VK_IMAGE_USAGE_SHADING_RATE_IMAGE_BIT_NV, format must be VK_FORMAT_R8_UINT.");
}
}
}
if (enabled_features.shading_rate_image.shadingRateImage ||
enabled_features.fragment_shading_rate_features.attachmentFragmentShadingRate) {
if (image_usage & VK_IMAGE_USAGE_FRAGMENT_SHADING_RATE_ATTACHMENT_BIT_KHR) {
if (view_type != VK_IMAGE_VIEW_TYPE_2D && view_type != VK_IMAGE_VIEW_TYPE_2D_ARRAY) {
skip |= LogError(pCreateInfo->image, "VUID-VkImageViewCreateInfo-image-02086",
"vkCreateImageView() If image was created with usage containing "
"VK_IMAGE_USAGE_FRAGMENT_SHADING_RATE_ATTACHMENT_BIT_KHR, viewType must be "
"VK_IMAGE_VIEW_TYPE_2D or VK_IMAGE_VIEW_TYPE_2D_ARRAY.");
}
}
}
if (enabled_features.fragment_shading_rate_features.attachmentFragmentShadingRate &&
!phys_dev_ext_props.fragment_shading_rate_props.layeredShadingRateAttachments &&
image_usage & VK_IMAGE_USAGE_FRAGMENT_SHADING_RATE_ATTACHMENT_BIT_KHR && layer_count != 1) {
skip |= LogError(device, "VUID-VkImageViewCreateInfo-usage-04551",
"vkCreateImageView(): subresourceRange.layerCount is %u for a shading rate attachment image view.",
layer_count);
}
if (layer_count == VK_REMAINING_ARRAY_LAYERS) {
const uint32_t remaining_layers = image_state->createInfo.arrayLayers - pCreateInfo->subresourceRange.baseArrayLayer;
if (view_type == VK_IMAGE_VIEW_TYPE_CUBE && remaining_layers != 6) {
skip |= LogError(device, "VUID-VkImageViewCreateInfo-viewType-02962",
"vkCreateImageView(): subresourceRange.layerCount VK_REMAINING_ARRAY_LAYERS=(%d) must be 6",
remaining_layers);
}
if (view_type == VK_IMAGE_VIEW_TYPE_CUBE_ARRAY && ((remaining_layers) % 6) != 0) {
skip |= LogError(
device, "VUID-VkImageViewCreateInfo-viewType-02963",
"vkCreateImageView(): subresourceRange.layerCount VK_REMAINING_ARRAY_LAYERS=(%d) must be a multiple of 6",
remaining_layers);
}
if ((remaining_layers != 1) && ((view_type == VK_IMAGE_VIEW_TYPE_1D) || (view_type == VK_IMAGE_VIEW_TYPE_2D) ||
(view_type == VK_IMAGE_VIEW_TYPE_3D))) {
skip |= LogError(pCreateInfo->image, "VUID-VkImageViewCreateInfo-imageViewType-04974",
"vkCreateImageView(): Using pCreateInfo->viewType %s and the subresourceRange.layerCount "
"VK_REMAINING_ARRAY_LAYERS=(%d) and must 1 (try looking into VK_IMAGE_VIEW_TYPE_*_ARRAY).",
string_VkImageViewType(view_type), remaining_layers);
}
} else {
if ((layer_count != 1) && ((view_type == VK_IMAGE_VIEW_TYPE_1D) || (view_type == VK_IMAGE_VIEW_TYPE_2D) ||
(view_type == VK_IMAGE_VIEW_TYPE_3D))) {
skip |= LogError(pCreateInfo->image, "VUID-VkImageViewCreateInfo-imageViewType-04973",
"vkCreateImageView(): Using pCreateInfo->viewType %s and the subresourceRange.layerCount is %d "
"and must 1 (try looking into VK_IMAGE_VIEW_TYPE_*_ARRAY).",
string_VkImageViewType(view_type), layer_count);
}
}
if (image_usage & VK_IMAGE_USAGE_FRAGMENT_DENSITY_MAP_BIT_EXT) {
if (pCreateInfo->subresourceRange.levelCount != 1) {
skip |= LogError(pCreateInfo->image, "VUID-VkImageViewCreateInfo-image-02571",
"vkCreateImageView(): If image was created with usage containing "
"VK_IMAGE_USAGE_FRAGMENT_DENSITY_MAP_BIT_EXT, subresourceRange.levelCount (%d) must: be 1",
pCreateInfo->subresourceRange.levelCount);
}
}
if (pCreateInfo->flags & VK_IMAGE_VIEW_CREATE_FRAGMENT_DENSITY_MAP_DYNAMIC_BIT_EXT) {
if (!enabled_features.fragment_density_map_features.fragmentDensityMapDynamic) {
skip |= LogError(pCreateInfo->image, "VUID-VkImageViewCreateInfo-flags-02572",
"vkCreateImageView(): If the fragmentDensityMapDynamic feature is not enabled, "
"flags must not contain VK_IMAGE_VIEW_CREATE_FRAGMENT_DENSITY_MAP_DYNAMIC_BIT_EXT");
}
} else {
if (image_usage & VK_IMAGE_USAGE_FRAGMENT_DENSITY_MAP_BIT_EXT) {
if (image_flags & (VK_IMAGE_CREATE_PROTECTED_BIT | VK_IMAGE_CREATE_SPARSE_BINDING_BIT |
VK_IMAGE_CREATE_SPARSE_RESIDENCY_BIT | VK_IMAGE_CREATE_SPARSE_ALIASED_BIT)) {
skip |= LogError(pCreateInfo->image, "VUID-VkImageViewCreateInfo-flags-04116",
"vkCreateImageView(): If image was created with usage containing "
"VK_IMAGE_USAGE_FRAGMENT_DENSITY_MAP_BIT_EXT flags must not contain any of "
"VK_IMAGE_CREATE_PROTECTED_BIT, VK_IMAGE_CREATE_SPARSE_BINDING_BIT, "
"VK_IMAGE_CREATE_SPARSE_RESIDENCY_BIT, or VK_IMAGE_CREATE_SPARSE_ALIASED_BIT");
}
}
}
if (image_flags & VK_IMAGE_CREATE_BLOCK_TEXEL_VIEW_COMPATIBLE_BIT) {
if (pCreateInfo->subresourceRange.levelCount != 1) {
skip |= LogError(pCreateInfo->image, "VUID-VkImageViewCreateInfo-image-01584",
"vkCreateImageView(): Image was created with VK_IMAGE_CREATE_BLOCK_TEXEL_VIEW_COMPATIBLE_BIT bit, "
"but subresourcesRange.levelCount (%" PRIu32 ") is not 1.",
pCreateInfo->subresourceRange.levelCount);
}
if (pCreateInfo->subresourceRange.layerCount != 1) {
skip |= LogError(pCreateInfo->image, "VUID-VkImageViewCreateInfo-image-01584",
"vkCreateImageView(): Image was created with VK_IMAGE_CREATE_BLOCK_TEXEL_VIEW_COMPATIBLE_BIT bit, "
"but subresourcesRange.layerCount (%" PRIu32 ") is not 1.",
pCreateInfo->subresourceRange.layerCount);
}
}
if (pCreateInfo->flags & VK_IMAGE_VIEW_CREATE_FRAGMENT_DENSITY_MAP_DEFERRED_BIT_EXT) {
if (!enabled_features.fragment_density_map2_features.fragmentDensityMapDeferred) {
skip |= LogError(pCreateInfo->image, "VUID-VkImageViewCreateInfo-flags-03567",
"vkCreateImageView(): If the fragmentDensityMapDeferred feature is not enabled, "
"flags must not contain VK_IMAGE_VIEW_CREATE_FRAGMENT_DENSITY_MAP_DEFERRED_BIT_EXT");
}
if (pCreateInfo->flags & VK_IMAGE_VIEW_CREATE_FRAGMENT_DENSITY_MAP_DYNAMIC_BIT_EXT) {
skip |=
LogError(pCreateInfo->image, "VUID-VkImageViewCreateInfo-flags-03568",
"vkCreateImageView(): If flags contains VK_IMAGE_VIEW_CREATE_FRAGMENT_DENSITY_MAP_DEFERRED_BIT_EXT, "
"flags must not contain VK_IMAGE_VIEW_CREATE_FRAGMENT_DENSITY_MAP_DYNAMIC_BIT_EXT");
}
}
if (device_extensions.vk_ext_fragment_density_map2) {
if ((image_flags & VK_IMAGE_CREATE_SUBSAMPLED_BIT_EXT) && (image_usage & VK_IMAGE_USAGE_SAMPLED_BIT) &&
(layer_count > phys_dev_ext_props.fragment_density_map2_props.maxSubsampledArrayLayers)) {
skip |= LogError(pCreateInfo->image, "VUID-VkImageViewCreateInfo-image-03569",
"vkCreateImageView(): If image was created with flags containing "
"VK_IMAGE_CREATE_SUBSAMPLED_BIT_EXT and usage containing VK_IMAGE_USAGE_SAMPLED_BIT "
"subresourceRange.layerCount (%d) must: be less than or equal to maxSubsampledArrayLayers (%d)",
layer_count, phys_dev_ext_props.fragment_density_map2_props.maxSubsampledArrayLayers);
}
}
auto astc_decode_mode = LvlFindInChain<VkImageViewASTCDecodeModeEXT>(pCreateInfo->pNext);
if ((device_extensions.vk_ext_astc_decode_mode) && (astc_decode_mode != nullptr)) {
if ((enabled_features.astc_decode_features.decodeModeSharedExponent == VK_FALSE) &&
(astc_decode_mode->decodeMode == VK_FORMAT_E5B9G9R9_UFLOAT_PACK32)) {
skip |= LogError(device, "VUID-VkImageViewASTCDecodeModeEXT-decodeMode-02231",
"vkCreateImageView(): decodeModeSharedExponent is not enabled but "
"VkImageViewASTCDecodeModeEXT::decodeMode is VK_FORMAT_E5B9G9R9_UFLOAT_PACK32.");
}
}
if (ExtEnabled::kNotEnabled != device_extensions.vk_khr_portability_subset) {
// If swizzling is disabled, make sure it isn't used
// NOTE: as of spec version 1.2.183, VUID 04465 states: "all elements of components _must_ be
// VK_COMPONENT_SWIZZLE_IDENTITY."
// However, issue https://github.com/KhronosGroup/Vulkan-Portability/issues/27 points out that the identity can
// also be defined via R, G, B, A enums in the correct order.
// Spec change is at https://gitlab.khronos.org/vulkan/vulkan/-/merge_requests/4600
if ((VK_FALSE == enabled_features.portability_subset_features.imageViewFormatSwizzle) &&
!IsIdentitySwizzle(pCreateInfo->components)) {
skip |= LogError(device, "VUID-VkImageViewCreateInfo-imageViewFormatSwizzle-04465",
"vkCreateImageView (portability error): swizzle is disabled for this device.");
}
// Ensure ImageView's format has the same number of bits and components as Image's format if format reinterpretation is
// disabled
// TODO (ncesario): This is not correct for some cases (e.g., VK_FORMAT_B10G11R11_UFLOAT_PACK32 and
// VK_FORMAT_E5B9G9R9_UFLOAT_PACK32), but requires additional information that should probably be generated from the
// spec. See Github issue #2361.
if ((VK_FALSE == enabled_features.portability_subset_features.imageViewFormatReinterpretation) &&
((FormatElementSize(pCreateInfo->format, VK_IMAGE_ASPECT_COLOR_BIT) !=
FormatElementSize(image_state->createInfo.format, VK_IMAGE_ASPECT_COLOR_BIT)) ||
(FormatChannelCount(pCreateInfo->format) != FormatChannelCount(image_state->createInfo.format)))) {
skip |= LogError(device, "VUID-VkImageViewCreateInfo-imageViewFormatReinterpretation-04466",
"vkCreateImageView (portability error): ImageView format must have"
" the same number of components and bits per component as the Image's format");
}
}
}
return skip;
}
template <typename RegionType>
bool CoreChecks::ValidateCmdCopyBufferBounds(const BUFFER_STATE *src_buffer_state, const BUFFER_STATE *dst_buffer_state,
uint32_t regionCount, const RegionType *pRegions, CopyCommandVersion version) const {
bool skip = false;
const bool is_2khr = (version == COPY_COMMAND_VERSION_2);
const char *func_name = is_2khr ? "vkCmdCopyBuffer2KHR()" : "vkCmdCopyBuffer()";
const char *vuid;
VkDeviceSize src_buffer_size = src_buffer_state->createInfo.size;
VkDeviceSize dst_buffer_size = dst_buffer_state->createInfo.size;
VkDeviceSize src_min = UINT64_MAX;
VkDeviceSize src_max = 0;
VkDeviceSize dst_min = UINT64_MAX;
VkDeviceSize dst_max = 0;
for (uint32_t i = 0; i < regionCount; i++) {
src_min = std::min(src_min, pRegions[i].srcOffset);
src_max = std::max(src_max, (pRegions[i].srcOffset + pRegions[i].size));
dst_min = std::min(dst_min, pRegions[i].dstOffset);
dst_max = std::max(dst_max, (pRegions[i].dstOffset + pRegions[i].size));
// The srcOffset member of each element of pRegions must be less than the size of srcBuffer
if (pRegions[i].srcOffset >= src_buffer_size) {
vuid = is_2khr ? "VUID-VkCopyBufferInfo2KHR-srcOffset-00113" : "VUID-vkCmdCopyBuffer-srcOffset-00113";
skip |= LogError(src_buffer_state->buffer(), vuid,
"%s: pRegions[%d].srcOffset (%" PRIuLEAST64 ") is greater than pRegions[%d].size (%" PRIuLEAST64 ").",
func_name, i, pRegions[i].srcOffset, i, pRegions[i].size);
}
// The dstOffset member of each element of pRegions must be less than the size of dstBuffer
if (pRegions[i].dstOffset >= dst_buffer_size) {
vuid = is_2khr ? "VUID-VkCopyBufferInfo2KHR-dstOffset-00114" : "VUID-vkCmdCopyBuffer-dstOffset-00114";
skip |= LogError(dst_buffer_state->buffer(), vuid,
"%s: pRegions[%d].dstOffset (%" PRIuLEAST64 ") is greater than pRegions[%d].size (%" PRIuLEAST64 ").",
func_name, i, pRegions[i].dstOffset, i, pRegions[i].size);
}
// The size member of each element of pRegions must be less than or equal to the size of srcBuffer minus srcOffset
if (pRegions[i].size > (src_buffer_size - pRegions[i].srcOffset)) {
vuid = is_2khr ? "VUID-VkCopyBufferInfo2KHR-size-00115" : "VUID-vkCmdCopyBuffer-size-00115";
skip |= LogError(src_buffer_state->buffer(), vuid,
"%s: pRegions[%d].size (%" PRIuLEAST64 ") is greater than the source buffer size (%" PRIuLEAST64
") minus pRegions[%d].srcOffset (%" PRIuLEAST64 ").",
func_name, i, pRegions[i].size, src_buffer_size, i, pRegions[i].srcOffset);
}
// The size member of each element of pRegions must be less than or equal to the size of dstBuffer minus dstOffset
if (pRegions[i].size > (dst_buffer_size - pRegions[i].dstOffset)) {
vuid = is_2khr ? "VUID-VkCopyBufferInfo2KHR-size-00116" : "VUID-vkCmdCopyBuffer-size-00116";
skip |= LogError(dst_buffer_state->buffer(), vuid,
"%s: pRegions[%d].size (%" PRIuLEAST64 ") is greater than the destination buffer size (%" PRIuLEAST64
") minus pRegions[%d].dstOffset (%" PRIuLEAST64 ").",
func_name, i, pRegions[i].size, dst_buffer_size, i, pRegions[i].dstOffset);
}
}
// The union of the source regions, and the union of the destination regions, must not overlap in memory
if (src_buffer_state->buffer() == dst_buffer_state->buffer()) {
if (((src_min > dst_min) && (src_min < dst_max)) || ((src_max > dst_min) && (src_max < dst_max))) {
vuid = is_2khr ? "VUID-VkCopyBufferInfo2KHR-pRegions-00117" : "VUID-vkCmdCopyBuffer-pRegions-00117";
skip |= LogError(src_buffer_state->buffer(), vuid, "%s: Detected overlap between source and dest regions in memory.",
func_name);
}
}
return skip;
}
bool CoreChecks::PreCallValidateCmdCopyBuffer(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkBuffer dstBuffer,
uint32_t regionCount, const VkBufferCopy *pRegions) const {
const auto cb_node = GetCBState(commandBuffer);
const auto src_buffer_state = GetBufferState(srcBuffer);
const auto dst_buffer_state = GetBufferState(dstBuffer);
bool skip = false;
skip |= ValidateMemoryIsBoundToBuffer(src_buffer_state, "vkCmdCopyBuffer()", "VUID-vkCmdCopyBuffer-srcBuffer-00119");
skip |= ValidateMemoryIsBoundToBuffer(dst_buffer_state, "vkCmdCopyBuffer()", "VUID-vkCmdCopyBuffer-dstBuffer-00121");
// Validate that SRC & DST buffers have correct usage flags set
skip |=
ValidateBufferUsageFlags(src_buffer_state, VK_BUFFER_USAGE_TRANSFER_SRC_BIT, true, "VUID-vkCmdCopyBuffer-srcBuffer-00118",
"vkCmdCopyBuffer()", "VK_BUFFER_USAGE_TRANSFER_SRC_BIT");
skip |=
ValidateBufferUsageFlags(dst_buffer_state, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true, "VUID-vkCmdCopyBuffer-dstBuffer-00120",
"vkCmdCopyBuffer()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
skip |= ValidateCmd(cb_node, CMD_COPYBUFFER, "vkCmdCopyBuffer()");
skip |= ValidateCmdCopyBufferBounds(src_buffer_state, dst_buffer_state, regionCount, pRegions, COPY_COMMAND_VERSION_1);
skip |= ValidateProtectedBuffer(cb_node, src_buffer_state, "vkCmdCopyBuffer()", "VUID-vkCmdCopyBuffer-commandBuffer-01822");
skip |= ValidateProtectedBuffer(cb_node, dst_buffer_state, "vkCmdCopyBuffer()", "VUID-vkCmdCopyBuffer-commandBuffer-01823");
skip |= ValidateUnprotectedBuffer(cb_node, dst_buffer_state, "vkCmdCopyBuffer()", "VUID-vkCmdCopyBuffer-commandBuffer-01824");
return skip;
}
bool CoreChecks::PreCallValidateCmdCopyBuffer2KHR(VkCommandBuffer commandBuffer,
const VkCopyBufferInfo2KHR *pCopyBufferInfos) const {
const auto cb_node = GetCBState(commandBuffer);
const auto src_buffer_state = GetBufferState(pCopyBufferInfos->srcBuffer);
const auto dst_buffer_state = GetBufferState(pCopyBufferInfos->dstBuffer);
bool skip = false;
skip |= ValidateMemoryIsBoundToBuffer(src_buffer_state, "vkCmdCopyBuffer2KHR()", "VUID-VkCopyBufferInfo2KHR-srcBuffer-00119");
skip |= ValidateMemoryIsBoundToBuffer(dst_buffer_state, "vkCmdCopyBuffer2KHR()", "VUID-VkCopyBufferInfo2KHR-dstBuffer-00121");
// Validate that SRC & DST buffers have correct usage flags set
skip |= ValidateBufferUsageFlags(src_buffer_state, VK_BUFFER_USAGE_TRANSFER_SRC_BIT, true,
"VUID-VkCopyBufferInfo2KHR-srcBuffer-00118", "vkCmdCopyBuffer2KHR()",
"VK_BUFFER_USAGE_TRANSFER_SRC_BIT");
skip |= ValidateBufferUsageFlags(dst_buffer_state, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true,
"VUID-VkCopyBufferInfo2KHR-dstBuffer-00120", "vkCmdCopyBuffer2KHR()",
"VK_BUFFER_USAGE_TRANSFER_DST_BIT");
skip |= ValidateCmd(cb_node, CMD_COPYBUFFER2KHR, "vkCmdCopyBuffer2KHR()");
skip |= ValidateCmdCopyBufferBounds(src_buffer_state, dst_buffer_state, pCopyBufferInfos->regionCount,
pCopyBufferInfos->pRegions, COPY_COMMAND_VERSION_2);
skip |=
ValidateProtectedBuffer(cb_node, src_buffer_state, "vkCmdCopyBuffer2KHR()", "VUID-vkCmdCopyBuffer2KHR-commandBuffer-01822");
skip |=
ValidateProtectedBuffer(cb_node, dst_buffer_state, "vkCmdCopyBuffer2KHR()", "VUID-vkCmdCopyBuffer2KHR-commandBuffer-01823");
skip |= ValidateUnprotectedBuffer(cb_node, dst_buffer_state, "vkCmdCopyBuffer2KHR()",
"VUID-vkCmdCopyBuffer2KHR-commandBuffer-01824");
return skip;
}
bool CoreChecks::ValidateIdleBuffer(VkBuffer buffer) const {
bool skip = false;
auto buffer_state = GetBufferState(buffer);
if (buffer_state) {
if (buffer_state->InUse()) {
skip |= LogError(buffer, "VUID-vkDestroyBuffer-buffer-00922", "Cannot free %s that is in use by a command buffer.",
report_data->FormatHandle(buffer).c_str());
}
}
return skip;
}
bool CoreChecks::PreCallValidateDestroyImageView(VkDevice device, VkImageView imageView,
const VkAllocationCallbacks *pAllocator) const {
const IMAGE_VIEW_STATE *image_view_state = GetImageViewState(imageView);
bool skip = false;
if (image_view_state) {
skip |=
ValidateObjectNotInUse(image_view_state, "vkDestroyImageView", "VUID-vkDestroyImageView-imageView-01026");
}
return skip;
}
bool CoreChecks::PreCallValidateDestroyBuffer(VkDevice device, VkBuffer buffer, const VkAllocationCallbacks *pAllocator) const {
auto buffer_state = GetBufferState(buffer);
bool skip = false;
if (buffer_state) {
skip |= ValidateIdleBuffer(buffer);
}
return skip;
}
void CoreChecks::PreCallRecordDestroyBuffer(VkDevice device, VkBuffer buffer, const VkAllocationCallbacks *pAllocator) {
const auto buffer_state = Get<BUFFER_STATE>(buffer);
if (buffer_state) {
auto itr = buffer_address_map_.find(buffer_state->deviceAddress);
if (itr != buffer_address_map_.end()) {
buffer_address_map_.erase(itr);
}
}
StateTracker::PreCallRecordDestroyBuffer(device, buffer, pAllocator);
}
bool CoreChecks::PreCallValidateDestroyBufferView(VkDevice device, VkBufferView bufferView,
const VkAllocationCallbacks *pAllocator) const {
auto buffer_view_state = GetBufferViewState(bufferView);
bool skip = false;
if (buffer_view_state) {
skip |= ValidateObjectNotInUse(buffer_view_state, "vkDestroyBufferView",
"VUID-vkDestroyBufferView-bufferView-00936");
}
return skip;
}
bool CoreChecks::PreCallValidateCmdFillBuffer(VkCommandBuffer commandBuffer, VkBuffer dstBuffer, VkDeviceSize dstOffset,
VkDeviceSize size, uint32_t data) const {
auto cb_node = GetCBState(commandBuffer);
auto buffer_state = GetBufferState(dstBuffer);
bool skip = false;
skip |= ValidateMemoryIsBoundToBuffer(buffer_state, "vkCmdFillBuffer()", "VUID-vkCmdFillBuffer-dstBuffer-00031");
skip |= ValidateCmd(cb_node, CMD_FILLBUFFER, "vkCmdFillBuffer()");
// Validate that DST buffer has correct usage flags set
skip |= ValidateBufferUsageFlags(buffer_state, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true, "VUID-vkCmdFillBuffer-dstBuffer-00029",
"vkCmdFillBuffer()", "VK_BUFFER_USAGE_TRANSFER_DST_BIT");
skip |= ValidateProtectedBuffer(cb_node, buffer_state, "vkCmdFillBuffer()", "VUID-vkCmdFillBuffer-commandBuffer-01811");
skip |= ValidateUnprotectedBuffer(cb_node, buffer_state, "vkCmdFillBuffer()", "VUID-vkCmdFillBuffer-commandBuffer-01812");
if (dstOffset >= buffer_state->createInfo.size) {
skip |= LogError(dstBuffer, "VUID-vkCmdFillBuffer-dstOffset-00024",
"vkCmdFillBuffer(): dstOffset (0x%" PRIxLEAST64
") is not less than destination buffer (%s) size (0x%" PRIxLEAST64 ").",
dstOffset, report_data->FormatHandle(dstBuffer).c_str(), buffer_state->createInfo.size);
}
if ((size != VK_WHOLE_SIZE) && (size > (buffer_state->createInfo.size - dstOffset))) {
skip |= LogError(dstBuffer, "VUID-vkCmdFillBuffer-size-00027",
"vkCmdFillBuffer(): size (0x%" PRIxLEAST64 ") is greater than dstBuffer (%s) size (0x%" PRIxLEAST64
") minus dstOffset (0x%" PRIxLEAST64 ").",
size, report_data->FormatHandle(dstBuffer).c_str(), buffer_state->createInfo.size, dstOffset);
}
return skip;
}
template <typename BufferImageCopyRegionType>
bool CoreChecks::ValidateBufferImageCopyData(const CMD_BUFFER_STATE *cb_node, uint32_t regionCount,
const BufferImageCopyRegionType *pRegions, const IMAGE_STATE *image_state,
const char *function, CopyCommandVersion version, bool image_to_buffer) const {
bool skip = false;
const bool is_2khr = (version == COPY_COMMAND_VERSION_2);
const char *vuid;
assert(image_state != nullptr);
const VkFormat image_format = image_state->createInfo.format;
for (uint32_t i = 0; i < regionCount; i++) {
const VkImageAspectFlags region_aspect_mask = pRegions[i].imageSubresource.aspectMask;
if (image_state->createInfo.imageType == VK_IMAGE_TYPE_1D) {
if ((pRegions[i].imageOffset.y != 0) || (pRegions[i].imageExtent.height != 1)) {
skip |= LogError(image_state->image(), GetBufferImageCopyCommandVUID("00199", image_to_buffer, is_2khr),
"%s: pRegion[%d] imageOffset.y is %d and imageExtent.height is %d. For 1D images these must be 0 "
"and 1, respectively.",
function, i, pRegions[i].imageOffset.y, pRegions[i].imageExtent.height);
}
}
if ((image_state->createInfo.imageType == VK_IMAGE_TYPE_1D) || (image_state->createInfo.imageType == VK_IMAGE_TYPE_2D)) {
if ((pRegions[i].imageOffset.z != 0) || (pRegions[i].imageExtent.depth != 1)) {
skip |= LogError(image_state->image(), GetBufferImageCopyCommandVUID("00201", image_to_buffer, is_2khr),
"%s: pRegion[%d] imageOffset.z is %d and imageExtent.depth is %d. For 1D and 2D images these "
"must be 0 and 1, respectively.",
function, i, pRegions[i].imageOffset.z, pRegions[i].imageExtent.depth);
}
}
if (image_state->createInfo.imageType == VK_IMAGE_TYPE_3D) {
if ((0 != pRegions[i].imageSubresource.baseArrayLayer) || (1 != pRegions[i].imageSubresource.layerCount)) {
skip |= LogError(image_state->image(), GetBufferImageCopyCommandVUID("00213", image_to_buffer, is_2khr),
"%s: pRegion[%d] imageSubresource.baseArrayLayer is %d and imageSubresource.layerCount is %d. "
"For 3D images these must be 0 and 1, respectively.",
function, i, pRegions[i].imageSubresource.baseArrayLayer, pRegions[i].imageSubresource.layerCount);
}
}
// If the the calling command's VkImage parameter's format is not a depth/stencil format,
// then bufferOffset must be a multiple of the calling command's VkImage parameter's element size
const uint32_t element_size = FormatElementSize(image_format, region_aspect_mask);
const VkDeviceSize bufferOffset = pRegions[i].bufferOffset;
if (FormatIsDepthOrStencil(image_format)) {
if (SafeModulo(bufferOffset, 4) != 0) {
skip |= LogError(image_state->image(), GetBufferImageCopyCommandVUID("04053", image_to_buffer, is_2khr),
"%s: pRegion[%d] bufferOffset 0x%" PRIxLEAST64
" must be a multiple 4 if using a depth/stencil format (%s).",
function, i, bufferOffset, string_VkFormat(image_format));
}
} else {
// If not depth/stencil and not multi-plane
if (!FormatIsMultiplane(image_format) && (SafeModulo(bufferOffset, element_size) != 0)) {
vuid = (device_extensions.vk_khr_sampler_ycbcr_conversion)
? GetBufferImageCopyCommandVUID("01558", image_to_buffer, is_2khr)
: GetBufferImageCopyCommandVUID("00193", image_to_buffer, is_2khr);
skip |= LogError(image_state->image(), vuid,
"%s: pRegion[%d] bufferOffset 0x%" PRIxLEAST64
" must be a multiple of this format's texel size (%" PRIu32 ").",
function, i, bufferOffset, element_size);
}
}
// BufferRowLength must be 0, or greater than or equal to the width member of imageExtent
if ((pRegions[i].bufferRowLength != 0) && (pRegions[i].bufferRowLength < pRegions[i].imageExtent.width)) {
vuid = (is_2khr) ? "VUID-VkBufferImageCopy2KHR-bufferRowLength-00195" : "VUID-VkBufferImageCopy-bufferRowLength-00195";
skip |=
LogError(image_state->image(), vuid,
"%s: pRegion[%d] bufferRowLength (%d) must be zero or greater-than-or-equal-to imageExtent.width (%d).",
function, i, pRegions[i].bufferRowLength, pRegions[i].imageExtent.width);
}
// BufferImageHeight must be 0, or greater than or equal to the height member of imageExtent
if ((pRegions[i].bufferImageHeight != 0) && (pRegions[i].bufferImageHeight < pRegions[i].imageExtent.height)) {
vuid =
(is_2khr) ? "VUID-VkBufferImageCopy2KHR-bufferImageHeight-00196" : "VUID-VkBufferImageCopy-bufferImageHeight-00196";
skip |=
LogError(image_state->image(), vuid,
"%s: pRegion[%d] bufferImageHeight (%d) must be zero or greater-than-or-equal-to imageExtent.height (%d).",
function, i, pRegions[i].bufferImageHeight, pRegions[i].imageExtent.height);
}
// Calculate adjusted image extent, accounting for multiplane image factors
VkExtent3D adjusted_image_extent = image_state->GetSubresourceExtent(pRegions[i].imageSubresource);
// imageOffset.x and (imageExtent.width + imageOffset.x) must both be >= 0 and <= image subresource width
if ((pRegions[i].imageOffset.x < 0) || (pRegions[i].imageOffset.x > static_cast<int32_t>(adjusted_image_extent.width)) ||
((pRegions[i].imageOffset.x + static_cast<int32_t>(pRegions[i].imageExtent.width)) >
static_cast<int32_t>(adjusted_image_extent.width))) {
skip |= LogError(image_state->image(), GetBufferImageCopyCommandVUID("00197", image_to_buffer, is_2khr),
"%s: Both pRegion[%d] imageoffset.x (%d) and (imageExtent.width + imageOffset.x) (%d) must be >= "
"zero or <= image subresource width (%d).",
function, i, pRegions[i].imageOffset.x, (pRegions[i].imageOffset.x + pRegions[i].imageExtent.width),
adjusted_image_extent.width);
}
// imageOffset.y and (imageExtent.height + imageOffset.y) must both be >= 0 and <= image subresource height
if ((pRegions[i].imageOffset.y < 0) || (pRegions[i].imageOffset.y > static_cast<int32_t>(adjusted_image_extent.height)) ||
((pRegions[i].imageOffset.y + static_cast<int32_t>(pRegions[i].imageExtent.height)) >
static_cast<int32_t>(adjusted_image_extent.height))) {
skip |= LogError(image_state->image(), GetBufferImageCopyCommandVUID("00198", image_to_buffer, is_2khr),
"%s: Both pRegion[%d] imageoffset.y (%d) and (imageExtent.height + imageOffset.y) (%d) must be >= "
"zero or <= image subresource height (%d).",
function, i, pRegions[i].imageOffset.y, (pRegions[i].imageOffset.y + pRegions[i].imageExtent.height),
adjusted_image_extent.height);
}
// imageOffset.z and (imageExtent.depth + imageOffset.z) must both be >= 0 and <= image subresource depth
if ((pRegions[i].imageOffset.z < 0) || (pRegions[i].imageOffset.z > static_cast<int32_t>(adjusted_image_extent.depth)) ||
((pRegions[i].imageOffset.z + static_cast<int32_t>(pRegions[i].imageExtent.depth)) >
static_cast<int32_t>(adjusted_image_extent.depth))) {
skip |= LogError(image_state->image(), GetBufferImageCopyCommandVUID("00200", image_to_buffer, is_2khr),
"%s: Both pRegion[%d] imageoffset.z (%d) and (imageExtent.depth + imageOffset.z) (%d) must be >= "
"zero or <= image subresource depth (%d).",
function, i, pRegions[i].imageOffset.z, (pRegions[i].imageOffset.z + pRegions[i].imageExtent.depth),
adjusted_image_extent.depth);
}
// subresource aspectMask must have exactly 1 bit set
const int num_bits = sizeof(VkFlags) * CHAR_BIT;
std::bitset<num_bits> aspect_mask_bits(region_aspect_mask);
if (aspect_mask_bits.count() != 1) {
vuid = (is_2khr) ? "VUID-VkBufferImageCopy2KHR-aspectMask-00212" : "VUID-VkBufferImageCopy-aspectMask-00212";
skip |= LogError(image_state->image(), vuid,
"%s: aspectMasks for imageSubresource in pRegion[%d] must have only a single bit set.", function, i);
}
// image subresource aspect bit must match format
if (!VerifyAspectsPresent(region_aspect_mask, image_format)) {
skip |=
LogError(image_state->image(), GetBufferImageCopyCommandVUID("00211", image_to_buffer, is_2khr),
"%s: pRegion[%d] subresource aspectMask 0x%x specifies aspects that are not present in image format 0x%x.",
function, i, region_aspect_mask, image_format);
}
// Checks that apply only to compressed images
if (FormatIsCompressed(image_format) || FormatIsSinglePlane_422(image_format)) {
auto block_size = FormatTexelBlockExtent(image_format);
// BufferRowLength must be a multiple of block width
if (SafeModulo(pRegions[i].bufferRowLength, block_size.width) != 0) {
skip |=
LogError(image_state->image(), GetBufferImageCopyCommandVUID("00203", image_to_buffer, is_2khr),
"%s: pRegion[%d] bufferRowLength (%d) must be a multiple of the compressed image's texel width (%d).",
function, i, pRegions[i].bufferRowLength, block_size.width);
}
// BufferRowHeight must be a multiple of block height
if (SafeModulo(pRegions[i].bufferImageHeight, block_size.height) != 0) {
skip |= LogError(
image_state->image(), GetBufferImageCopyCommandVUID("00204", image_to_buffer, is_2khr),
"%s: pRegion[%d] bufferImageHeight (%d) must be a multiple of the compressed image's texel height (%d).",
function, i, pRegions[i].bufferImageHeight, block_size.height);
}
// image offsets must be multiples of block dimensions
if ((SafeModulo(pRegions[i].imageOffset.x, block_size.width) != 0) ||
(SafeModulo(pRegions[i].imageOffset.y, block_size.height) != 0) ||
(SafeModulo(pRegions[i].imageOffset.z, block_size.depth) != 0)) {
skip |= LogError(image_state->image(), GetBufferImageCopyCommandVUID("00205", image_to_buffer, is_2khr),
"%s: pRegion[%d] imageOffset(x,y) (%d, %d) must be multiples of the compressed image's texel "
"width & height (%d, %d).",
function, i, pRegions[i].imageOffset.x, pRegions[i].imageOffset.y, block_size.width,
block_size.height);
}
// bufferOffset must be a multiple of block size (linear bytes)
uint32_t block_size_in_bytes = FormatElementSize(image_format);
if (SafeModulo(bufferOffset, block_size_in_bytes) != 0) {
skip |= LogError(image_state->image(), GetBufferImageCopyCommandVUID("00206", image_to_buffer, is_2khr),
"%s: pRegion[%d] bufferOffset (0x%" PRIxLEAST64
") must be a multiple of the compressed image's texel block size (%" PRIu32 ").",
function, i, bufferOffset, block_size_in_bytes);
}
// imageExtent width must be a multiple of block width, or extent+offset width must equal subresource width
VkExtent3D mip_extent = image_state->GetSubresourceExtent(pRegions[i].imageSubresource);
if ((SafeModulo(pRegions[i].imageExtent.width, block_size.width) != 0) &&
(pRegions[i].imageExtent.width + pRegions[i].imageOffset.x != mip_extent.width)) {
skip |= LogError(image_state->image(), GetBufferImageCopyCommandVUID("00207", image_to_buffer, is_2khr),
"%s: pRegion[%d] extent width (%d) must be a multiple of the compressed texture block width "
"(%d), or when added to offset.x (%d) must equal the image subresource width (%d).",
function, i, pRegions[i].imageExtent.width, block_size.width, pRegions[i].imageOffset.x,
mip_extent.width);
}
// imageExtent height must be a multiple of block height, or extent+offset height must equal subresource height
if ((SafeModulo(pRegions[i].imageExtent.height, block_size.height) != 0) &&
(pRegions[i].imageExtent.height + pRegions[i].imageOffset.y != mip_extent.height)) {
skip |= LogError(image_state->image(), GetBufferImageCopyCommandVUID("00208", image_to_buffer, is_2khr),
"%s: pRegion[%d] extent height (%d) must be a multiple of the compressed texture block height "
"(%d), or when added to offset.y (%d) must equal the image subresource height (%d).",
function, i, pRegions[i].imageExtent.height, block_size.height, pRegions[i].imageOffset.y,
mip_extent.height);
}
// imageExtent depth must be a multiple of block depth, or extent+offset depth must equal subresource depth
if ((SafeModulo(pRegions[i].imageExtent.depth, block_size.depth) != 0) &&
(pRegions[i].imageExtent.depth + pRegions[i].imageOffset.z != mip_extent.depth)) {
skip |= LogError(image_state->image(), GetBufferImageCopyCommandVUID("00209", image_to_buffer, is_2khr),
"%s: pRegion[%d] extent width (%d) must be a multiple of the compressed texture block depth "
"(%d), or when added to offset.z (%d) must equal the image subresource depth (%d).",
function, i, pRegions[i].imageExtent.depth, block_size.depth, pRegions[i].imageOffset.z,
mip_extent.depth);
}
}
// Checks that apply only to multi-planar format images
if (FormatIsMultiplane(image_format)) {
// VK_IMAGE_ASPECT_PLANE_2_BIT valid only for image formats with three planes
if ((FormatPlaneCount(image_format) < 3) && (region_aspect_mask == VK_IMAGE_ASPECT_PLANE_2_BIT)) {
skip |= LogError(image_state->image(), GetBufferImageCopyCommandVUID("01560", image_to_buffer, is_2khr),
"%s: pRegion[%d] subresource aspectMask cannot be VK_IMAGE_ASPECT_PLANE_2_BIT unless image "
"format has three planes.",
function, i);
}
// image subresource aspectMask must be VK_IMAGE_ASPECT_PLANE_*_BIT
if (0 ==
(region_aspect_mask & (VK_IMAGE_ASPECT_PLANE_0_BIT | VK_IMAGE_ASPECT_PLANE_1_BIT | VK_IMAGE_ASPECT_PLANE_2_BIT))) {
skip |= LogError(image_state->image(), GetBufferImageCopyCommandVUID("01560", image_to_buffer, is_2khr),
"%s: pRegion[%d] subresource aspectMask for multi-plane image formats must have a "
"VK_IMAGE_ASPECT_PLANE_*_BIT when copying to or from.",
function, i);
} else {
// Know aspect mask is valid
const VkFormat compatible_format = FindMultiplaneCompatibleFormat(image_format, region_aspect_mask);
const uint32_t compatible_size = FormatElementSize(compatible_format);
if (SafeModulo(bufferOffset, compatible_size) != 0) {
skip |= LogError(
image_state->image(), GetBufferImageCopyCommandVUID("01559", image_to_buffer, is_2khr),
"%s: pRegion[%d]->bufferOffset is 0x%" PRIxLEAST64
" but must be a multiple of the multi-plane compatible format's texel size (%u) for plane %u (%s).",
function, i, bufferOffset, element_size, GetPlaneIndex(region_aspect_mask),
string_VkFormat(compatible_format));
}
}
}
// TODO - Don't use ValidateCmdQueueFlags due to currently not having way to add more descriptive message
const COMMAND_POOL_STATE *command_pool = cb_node->command_pool.get();
assert(command_pool != nullptr);
const uint32_t queue_family_index = command_pool->queueFamilyIndex;
const VkQueueFlags queue_flags = GetPhysicalDeviceState()->queue_family_properties[queue_family_index].queueFlags;
if (((queue_flags & (VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT)) == 0) && (SafeModulo(bufferOffset, 4) != 0)) {
LogObjectList objlist(cb_node->commandBuffer());
objlist.add(command_pool->commandPool());
skip |= LogError(image_state->image(), GetBufferImageCopyCommandVUID("04052", image_to_buffer, is_2khr),
"%s: pRegion[%d] bufferOffset 0x%" PRIxLEAST64
" must be a multiple 4 because the command buffer %s was allocated from the command pool %s "
"which was created with queueFamilyIndex %u, which doesn't contain the VK_QUEUE_GRAPHICS_BIT or "
"VK_QUEUE_COMPUTE_BIT flag.",
function, i, bufferOffset, report_data->FormatHandle(cb_node->commandBuffer()).c_str(),
report_data->FormatHandle(command_pool->commandPool()).c_str(), queue_family_index);
}
}
return skip;
}
template <typename BufferImageCopyRegionType>
bool CoreChecks::ValidateImageBounds(const IMAGE_STATE *image_state, const uint32_t regionCount,
const BufferImageCopyRegionType *pRegions, const char *func_name, const char *msg_code) const {
bool skip = false;
const VkImageCreateInfo *image_info = &(image_state->createInfo);
for (uint32_t i = 0; i < regionCount; i++) {
VkExtent3D extent = pRegions[i].imageExtent;
VkOffset3D offset = pRegions[i].imageOffset;
if (IsExtentSizeZero(&extent)) // Warn on zero area subresource
{
skip |= LogWarning(image_state->image(), kVUID_Core_Image_ZeroAreaSubregion,
"%s: pRegion[%d] imageExtent of {%1d, %1d, %1d} has zero area", func_name, i, extent.width,
extent.height, extent.depth);
}
VkExtent3D image_extent = image_state->GetSubresourceExtent(pRegions[i].imageSubresource);
// If we're using a compressed format, valid extent is rounded up to multiple of block size (per 18.1)
if (FormatIsCompressed(image_info->format) || FormatIsSinglePlane_422(image_state->createInfo.format)) {
auto block_extent = FormatTexelBlockExtent(image_info->format);
if (image_extent.width % block_extent.width) {
image_extent.width += (block_extent.width - (image_extent.width % block_extent.width));
}
if (image_extent.height % block_extent.height) {
image_extent.height += (block_extent.height - (image_extent.height % block_extent.height));
}
if (image_extent.depth % block_extent.depth) {
image_extent.depth += (block_extent.depth - (image_extent.depth % block_extent.depth));
}
}
if (0 != ExceedsBounds(&offset, &extent, &image_extent)) {
skip |= LogError(image_state->image(), msg_code, "%s: pRegion[%d] exceeds image bounds.", func_name, i);
}
}
return skip;
}
template <typename BufferImageCopyRegionType>
bool CoreChecks::ValidateBufferBounds(const IMAGE_STATE *image_state, const BUFFER_STATE *buff_state, uint32_t regionCount,
const BufferImageCopyRegionType *pRegions, const char *func_name,
const char *msg_code) const {
bool skip = false;
VkDeviceSize buffer_size = buff_state->createInfo.size;
for (uint32_t i = 0; i < regionCount; i++) {
VkDeviceSize max_buffer_offset =
GetBufferSizeFromCopyImage(pRegions[i], image_state->createInfo.format) + pRegions[i].bufferOffset;
if (buffer_size < max_buffer_offset) {
skip |=
LogError(device, msg_code, "%s: pRegion[%d] exceeds buffer size of %" PRIu64 " bytes.", func_name, i, buffer_size);
}
}
return skip;
}
template <typename BufferImageCopyRegionType>
bool CoreChecks::ValidateCmdCopyImageToBuffer(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
VkBuffer dstBuffer, uint32_t regionCount, const BufferImageCopyRegionType *pRegions,
CopyCommandVersion version) const {
const auto cb_node = GetCBState(commandBuffer);
const auto src_image_state = GetImageState(srcImage);
const auto dst_buffer_state = GetBufferState(dstBuffer);
const bool is_2khr = (version == COPY_COMMAND_VERSION_2);
const char *func_name = is_2khr ? "vkCmdCopyImageToBuffer2KHR()" : "vkCmdCopyImageToBuffer()";
const CMD_TYPE cmd_type = is_2khr ? CMD_COPYIMAGETOBUFFER2KHR : CMD_COPYIMAGETOBUFFER;
const char *vuid;
bool skip = ValidateBufferImageCopyData(cb_node, regionCount, pRegions, src_image_state, func_name, version, true);
// Validate command buffer state
skip |= ValidateCmd(cb_node, cmd_type, func_name);
// Command pool must support graphics, compute, or transfer operations
const auto pool = cb_node->command_pool.get();
VkQueueFlags queue_flags = GetPhysicalDeviceState()->queue_family_properties[pool->queueFamilyIndex].queueFlags;
if (0 == (queue_flags & (VK_QUEUE_GRAPHICS_BIT | VK_QUEUE_COMPUTE_BIT | VK_QUEUE_TRANSFER_BIT))) {
vuid =
is_2khr ? "VUID-vkCmdCopyImageToBuffer2KHR-commandBuffer-cmdpool" : "VUID-vkCmdCopyImageToBuffer-commandBuffer-cmdpool";
skip |= LogError(cb_node->createInfo.commandPool, vuid,
"Cannot call %s on a command buffer allocated from a pool without graphics, compute, "
"or transfer capabilities.",
func_name);
}
vuid = is_2khr ? "VUID-VkCopyImageToBufferInfo2KHR-pRegions-00182" : "VUID-vkCmdCopyImageToBuffer-pRegions-00182";
skip |= ValidateImageBounds(src_image_state, regionCount, pRegions, func_name, vuid);
vuid = is_2khr ? "VUID-VkCopyImageToBufferInfo2KHR-pRegions-00183" : "VUID-vkCmdCopyImageToBuffer-pRegions-00183";
skip |= ValidateBufferBounds(src_image_state, dst_buffer_state, regionCount, pRegions, func_name, vuid);
vuid = is_2khr ? "VUID-VkCopyImageToBufferInfo2KHR-srcImage-00188" : "VUID-vkCmdCopyImageToBuffer-srcImage-00188";
const char *location = is_2khr ? "vkCmdCopyImageToBuffer2KHR(): srcImage" : "vkCmdCopyImageToBuffer(): srcImage";
skip |= ValidateImageSampleCount(src_image_state, VK_SAMPLE_COUNT_1_BIT, location, vuid);
vuid = is_2khr ? "VUID-VkCopyImageToBufferInfo2KHR-srcImage-00187" : "VUID-vkCmdCopyImageToBuffer-srcImage-00187";
skip |= ValidateMemoryIsBoundToImage(src_image_state, func_name, vuid);
vuid = is_2khr ? "vkCmdCopyImageToBuffer-dstBuffer2KHR-00192" : "vkCmdCopyImageToBuffer dstBuffer-00192";
skip |= ValidateMemoryIsBoundToBuffer(dst_buffer_state, func_name, vuid);
// Validate that SRC image & DST buffer have correct usage flags set
vuid = is_2khr ? "VUID-VkCopyImageToBufferInfo2KHR-srcImage-00186" : "VUID-vkCmdCopyImageToBuffer-srcImage-00186";
skip |= ValidateImageUsageFlags(src_image_state, VK_IMAGE_USAGE_TRANSFER_SRC_BIT, true, vuid, func_name,
"VK_IMAGE_USAGE_TRANSFER_SRC_BIT");
vuid = is_2khr ? "VUID-VkCopyImageToBufferInfo2KHR-dstBuffer-00191" : "VUID-vkCmdCopyImageToBuffer-dstBuffer-00191";
skip |= ValidateBufferUsageFlags(dst_buffer_state, VK_BUFFER_USAGE_TRANSFER_DST_BIT, true, vuid, func_name,
"VK_BUFFER_USAGE_TRANSFER_DST_BIT");
vuid = is_2khr ? "VUID-vkCmdCopyImageToBuffer2KHR-commandBuffer-01831" : "VUID-vkCmdCopyImageToBuffer-commandBuffer-01831";
skip |= ValidateProtectedImage(cb_node, src_image_state, func_name, vuid);
vuid = is_2khr ? "VUID-vkCmdCopyImageToBuffer2KHR-commandBuffer-01832" : "VUID-vkCmdCopyImageToBuffer-commandBuffer-01832";
skip |= ValidateProtectedBuffer(cb_node, dst_buffer_state, func_name, vuid);
vuid = is_2khr ? "VUID-vkCmdCopyImageToBuffer2KHR-commandBuffer-01833" : "VUID-vkCmdCopyImageToBuffer-commandBuffer-01833";
skip |= ValidateUnprotectedBuffer(cb_node, dst_buffer_state, func_name, vuid);
// Validation for VK_EXT_fragment_density_map
if (src_image_state->createInfo.flags & VK_IMAGE_CREATE_SUBSAMPLED_BIT_EXT) {
vuid = is_2khr ? "VUID-VkCopyImageToBufferInfo2KHR-srcImage-02544" : "VUID-vkCmdCopyImageToBuffer-srcImage-02544";
skip |= LogError(cb_node->commandBuffer(), vuid,
"%s: srcImage must not have been created with flags containing "
"VK_IMAGE_CREATE_SUBSAMPLED_BIT_EXT",
func_name);
}
if (device_extensions.vk_khr_maintenance1) {
vuid = is_2khr ? "VUID-VkCopyImageToBufferInfo2KHR-srcImage-01998" : "VUID-vkCmdCopyImageToBuffer-srcImage-01998";
skip |= ValidateImageFormatFeatureFlags(src_image_state, VK_FORMAT_FEATURE_TRANSFER_SRC_BIT, func_name, vuid);
}
bool hit_error = false;
const char *src_invalid_layout_vuid = (src_image_state->shared_presentable && device_extensions.vk_khr_shared_presentable_image)
? (vuid = is_2khr ? "VUID-VkCopyImageToBufferInfo2KHR-srcImageLayout-01397"
: "VUID-vkCmdCopyImageToBuffer-srcImageLayout-01397")
: (vuid = is_2khr ? "VUID-VkCopyImageToBufferInfo2KHR-srcImageLayout-00190"
: "VUID-vkCmdCopyImageToBuffer-srcImageLayout-00190");
for (uint32_t i = 0; i < regionCount; ++i) {
skip |= ValidateImageSubresourceLayers(cb_node, &pRegions[i].imageSubresource, func_name, "imageSubresource", i);
vuid =
is_2khr ? "VUID-VkCopyImageToBufferInfo2KHR-srcImageLayout-00189" : "VUID-vkCmdCopyImageToBuffer-srcImageLayout-00189";
skip |= VerifyImageLayout(cb_node, src_image_state, pRegions[i].imageSubresource, srcImageLayout,
VK_IMAGE_LAYOUT_TRANSFER_SRC_OPTIMAL, func_name, src_invalid_layout_vuid, vuid, &hit_error);
vuid = is_2khr ? "VUID-VkCopyImageToBufferInfo2KHR-imageOffset-01794" : "VUID-vkCmdCopyImageToBuffer-imageOffset-01794";
skip |= ValidateCopyBufferImageTransferGranularityRequirements(cb_node, src_image_state, &pRegions[i], i, func_name, vuid);
vuid = is_2khr ? "VUID-VkCopyImageToBufferInfo2KHR-imageSubresource-01703"
: "VUID-vkCmdCopyImageToBuffer-imageSubresource-01703";
skip |= ValidateImageMipLevel(cb_node, src_image_state, pRegions[i].imageSubresource.mipLevel, i, func_name,
"imageSubresource", vuid);
vuid = is_2khr ? "VUID-VkCopyImageToBufferInfo2KHR-imageSubresource-01704"
: "VUID-vkCmdCopyImageToBuffer-imageSubresource-01704";
skip |= ValidateImageArrayLayerRange(cb_node, src_image_state, pRegions[i].imageSubresource.baseArrayLayer,
pRegions[i].imageSubresource.layerCount, i, func_name, "imageSubresource", vuid);
}
return skip;
}
bool CoreChecks::PreCallValidateCmdCopyImageToBuffer(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
VkBuffer dstBuffer, uint32_t regionCount,
const VkBufferImageCopy *pRegions) const {
return ValidateCmdCopyImageToBuffer(commandBuffer, srcImage, srcImageLayout, dstBuffer, regionCount, pRegions,
COPY_COMMAND_VERSION_1);
}
bool CoreChecks::PreCallValidateCmdCopyImageToBuffer2KHR(VkCommandBuffer commandBuffer,
const VkCopyImageToBufferInfo2KHR *pCopyImageToBufferInfo) const {
return ValidateCmdCopyImageToBuffer(commandBuffer, pCopyImageToBufferInfo->srcImage, pCopyImageToBufferInfo->srcImageLayout,
pCopyImageToBufferInfo->dstBuffer, pCopyImageToBufferInfo->regionCount,
pCopyImageToBufferInfo->pRegions, COPY_COMMAND_VERSION_2);
}
void CoreChecks::PreCallRecordCmdCopyImageToBuffer(VkCommandBuffer commandBuffer, VkImage srcImage, VkImageLayout srcImageLayout,
VkBuffer dstBuffer, uint32_t regionCount, const VkBufferImageCopy *pRegions) {
StateTracker::PreCallRecordCmdCopyImageToBuffer(commandBuffer, srcImage, srcImageLayout, dstBuffer, regionCount, pRegions);
auto cb_node = GetCBState(commandBuffer);
auto src_image_state = GetImageState(srcImage);
// Make sure that all image slices record referenced layout
for (uint32_t i = 0; i < regionCount; ++i) {
SetImageInitialLayout(cb_node, *src_image_state, pRegions[i].imageSubresource, srcImageLayout);
}
}
void CoreChecks::PreCallRecordCmdCopyImageToBuffer2KHR(VkCommandBuffer commandBuffer,
const VkCopyImageToBufferInfo2KHR *pCopyImageToBufferInfo) {
StateTracker::PreCallRecordCmdCopyImageToBuffer2KHR(commandBuffer, pCopyImageToBufferInfo);
auto cb_node = GetCBState(commandBuffer);
auto src_image_state = GetImageState(pCopyImageToBufferInfo->srcImage);
// Make sure that all image slices record referenced layout
for (uint32_t i = 0; i < pCopyImageToBufferInfo->regionCount; ++i) {
SetImageInitialLayout(cb_node, *src_image_state, pCopyImageToBufferInfo->pRegions[i].imageSubresource,
pCopyImageToBufferInfo->srcImageLayout);
}
}
template <typename RegionType>
bool CoreChecks::ValidateCmdCopyBufferToImage(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkImage dstImage,
VkImageLayout dstImageLayout, uint32_t regionCount, const RegionType *pRegions,
CopyCommandVersion version) const {
const auto cb_node = GetCBState(commandBuffer);
const auto src_buffer_state = GetBufferState(srcBuffer);
const auto dst_image_state = GetImageState(dstImage);
const bool is_2khr = (version == COPY_COMMAND_VERSION_2);
const char *func_name = is_2khr ? "vkCmdCopyBufferToImage2KHR()" : "vkCmdCopyBufferToImage()";
const CMD_TYPE cmd_type = is_2khr ? CMD_COPYBUFFERTOIMAGE2KHR : CMD_COPYBUFFERTOIMAGE;
const char *vuid;
bool skip = ValidateBufferImageCopyData(cb_node, regionCount, pRegions, dst_image_state, func_name, version, false);
// Validate command buffer state
skip |= ValidateCmd(cb_node, cmd_type, func_name);
vuid = is_2khr ? "VUID-VkCopyBufferToImageInfo2KHR-pRegions-00172" : "VUID-vkCmdCopyBufferToImage-pRegions-00172";
skip |= ValidateImageBounds(dst_image_state, regionCount, pRegions, func_name, vuid);
vuid = is_2khr ? "VUID-VkCopyBufferToImageInfo2KHR-pRegions-00171" : "VUID-vkCmdCopyBufferToImage-pRegions-00171";
skip |= ValidateBufferBounds(dst_image_state, src_buffer_state, regionCount, pRegions, func_name, vuid);
vuid = is_2khr ? "VUID-VkCopyBufferToImageInfo2KHR-dstImage-00179" : "VUID-vkCmdCopyBufferToImage-dstImage-00179";
const char *location = is_2khr ? "vkCmdCopyBufferToImage2KHR(): dstImage" : "vkCmdCopyBufferToImage(): dstImage";
skip |= ValidateImageSampleCount(dst_image_state, VK_SAMPLE_COUNT_1_BIT, location, vuid);
vuid = is_2khr ? "VUID-VkCopyBufferToImageInfo2KHR-srcBuffer-00176" : "VUID-vkCmdCopyBufferToImage-srcBuffer-00176";
skip |= ValidateMemoryIsBoundToBuffer(src_buffer_state, func_name, vuid);
vuid = is_2khr ? "VUID-VkCopyBufferToImageInfo2KHR-dstImage-00178" : "VUID-vkCmdCopyBufferToImage-dstImage-00178";
skip |= ValidateMemoryIsBoundToImage(dst_image_state, func_name, vuid);
vuid = is_2khr ? "VUID-VkCopyBufferToImageInfo2KHR-srcBuffer-00174" : "VUID-vkCmdCopyBufferToImage-srcBuffer-00174";
skip |= ValidateBufferUsageFlags(src_buffer_state, VK_BUFFER_USAGE_TRANSFER_SRC_BIT, true, vuid, func_name,
"VK_BUFFER_USAGE_TRANSFER_SRC_BIT");
vuid = is_2khr ? "VUID-VkCopyBufferToImageInfo2KHR-dstImage-00177" : "VUID-vkCmdCopyBufferToImage-dstImage-00177";
skip |= ValidateImageUsageFlags(dst_image_state, VK_IMAGE_USAGE_TRANSFER_DST_BIT, true, vuid, func_name,
"VK_IMAGE_USAGE_TRANSFER_DST_BIT");
vuid = is_2khr ? "VUID-vkCmdCopyBufferToImage2KHR-commandBuffer-01828" : "VUID-vkCmdCopyBufferToImage-commandBuffer-01828";
skip |= ValidateProtectedBuffer(cb_node, src_buffer_state, func_name, vuid);
vuid = is_2khr ? "VUID-vkCmdCopyBufferToImage2KHR-commandBuffer-01829" : "VUID-vkCmdCopyBufferToImage-commandBuffer-01829";
skip |= ValidateProtectedImage(cb_node, dst_image_state, func_name, vuid);
vuid = is_2khr ? "VUID-vkCmdCopyBufferToImage-commandBuffer-01830" : "VUID-vkCmdCopyBufferToImage-commandBuffer-01830";
skip |= ValidateUnprotectedImage(cb_node, dst_image_state, func_name, vuid);
// Validation for VK_EXT_fragment_density_map
if (dst_image_state->createInfo.flags & VK_IMAGE_CREATE_SUBSAMPLED_BIT_EXT) {
vuid = is_2khr ? "VUID-VkCopyBufferToImageInfo2KHR-dstImage-02543" : "VUID-vkCmdCopyBufferToImage-dstImage-02543";
skip |= LogError(cb_node->commandBuffer(), vuid,
"%s: dstImage must not have been created with flags containing "
"VK_IMAGE_CREATE_SUBSAMPLED_BIT_EXT",
func_name);
}
if (device_extensions.vk_khr_maintenance1) {
vuid = is_2khr ? "VUID-VkCopyBufferToImageInfo2KHR-dstImage-01997" : "VUID-vkCmdCopyBufferToImage-dstImage-01997";
skip |= ValidateImageFormatFeatureFlags(dst_image_state, VK_FORMAT_FEATURE_TRANSFER_DST_BIT, func_name, vuid);
}
bool hit_error = false;
const char *dst_invalid_layout_vuid = (dst_image_state->shared_presentable && device_extensions.vk_khr_shared_presentable_image)
? (is_2khr ? "VUID-VkCopyBufferToImageInfo2KHR-dstImageLayout-01396"
: "VUID-vkCmdCopyBufferToImage-dstImageLayout-01396")
: (is_2khr ? "VUID-VkCopyBufferToImageInfo2KHR-dstImageLayout-00181"
: "VUID-vkCmdCopyBufferToImage-dstImageLayout-00181");
for (uint32_t i = 0; i < regionCount; ++i) {
skip |= ValidateImageSubresourceLayers(cb_node, &pRegions[i].imageSubresource, func_name, "imageSubresource", i);
vuid =
is_2khr ? "VUID-VkCopyBufferToImageInfo2KHR-dstImageLayout-00180" : "VUID-vkCmdCopyBufferToImage-dstImageLayout-00180";
skip |= VerifyImageLayout(cb_node, dst_image_state, pRegions[i].imageSubresource, dstImageLayout,
VK_IMAGE_LAYOUT_TRANSFER_DST_OPTIMAL, func_name, dst_invalid_layout_vuid, vuid, &hit_error);
vuid = is_2khr ? "VUID-VkCopyBufferToImageInfo2KHR-imageOffset-01793" : "VUID-vkCmdCopyBufferToImage-imageOffset-01793";
skip |= ValidateCopyBufferImageTransferGranularityRequirements(cb_node, dst_image_state, &pRegions[i], i, func_name, vuid);
vuid = is_2khr ? "VUID-VkCopyBufferToImageInfo2KHR-imageSubresource-01701"
: "VUID-vkCmdCopyBufferToImage-imageSubresource-01701";
skip |= ValidateImageMipLevel(cb_node, dst_image_state, pRegions[i].imageSubresource.mipLevel, i, func_name,
"imageSubresource", vuid);
vuid = is_2khr ? "VUID-VkCopyBufferToImageInfo2KHR-imageSubresource-01702"
: "VUID-vkCmdCopyBufferToImage-imageSubresource-01702";
skip |= ValidateImageArrayLayerRange(cb_node, dst_image_state, pRegions[i].imageSubresource.baseArrayLayer,
pRegions[i].imageSubresource.layerCount, i, func_name, "imageSubresource", vuid);
// TODO - Don't use ValidateCmdQueueFlags due to currently not having way to add more descriptive message
const COMMAND_POOL_STATE *command_pool = cb_node->command_pool.get();
assert(command_pool != nullptr);
const uint32_t queue_family_index = command_pool->queueFamilyIndex;
const VkQueueFlags queue_flags = GetPhysicalDeviceState()->queue_family_properties[queue_family_index].queueFlags;
const VkImageAspectFlags region_aspect_mask = pRegions[i].imageSubresource.aspectMask;
if (((queue_flags & VK_QUEUE_GRAPHICS_BIT) == 0) &&
((region_aspect_mask & (VK_IMAGE_ASPECT_DEPTH_BIT | VK_IMAGE_ASPECT_STENCIL_BIT)) != 0)) {
LogObjectList objlist(cb_node->commandBuffer());
objlist.add(command_pool->commandPool());
vuid = is_2khr ? "VUID-VkCopyBufferToImageInfo2KHR-commandBuffer-04477"
: "VUID-vkCmdCopyBufferToImage-commandBuffer-04477";
skip |= LogError(dst_image_state->image(), vuid,
"%s(): pRegion[%d] subresource aspectMask 0x%x specifies VK_IMAGE_ASPECT_DEPTH_BIT or "
"VK_IMAGE_ASPECT_STENCIL_BIT but the command buffer %s was allocated from the command pool %s "
"which was created with queueFamilyIndex %u, which doesn't contain the VK_QUEUE_GRAPHICS_BIT flag.",
func_name, i, region_aspect_mask, report_data->FormatHandle(cb_node->commandBuffer()).c_str(),
report_data->FormatHandle(command_pool->commandPool()).c_str(), queue_family_index);
}
}
return skip;
}
bool CoreChecks::PreCallValidateCmdCopyBufferToImage(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkImage dstImage,
VkImageLayout dstImageLayout, uint32_t regionCount,
const VkBufferImageCopy *pRegions) const {
return ValidateCmdCopyBufferToImage(commandBuffer, srcBuffer, dstImage, dstImageLayout, regionCount, pRegions,
COPY_COMMAND_VERSION_1);
}
bool CoreChecks::PreCallValidateCmdCopyBufferToImage2KHR(VkCommandBuffer commandBuffer,
const VkCopyBufferToImageInfo2KHR *pCopyBufferToImageInfo) const {
return ValidateCmdCopyBufferToImage(commandBuffer, pCopyBufferToImageInfo->srcBuffer, pCopyBufferToImageInfo->dstImage,
pCopyBufferToImageInfo->dstImageLayout, pCopyBufferToImageInfo->regionCount,
pCopyBufferToImageInfo->pRegions, COPY_COMMAND_VERSION_2);
}
void CoreChecks::PreCallRecordCmdCopyBufferToImage(VkCommandBuffer commandBuffer, VkBuffer srcBuffer, VkImage dstImage,
VkImageLayout dstImageLayout, uint32_t regionCount,
const VkBufferImageCopy *pRegions) {
StateTracker::PreCallRecordCmdCopyBufferToImage(commandBuffer, srcBuffer, dstImage, dstImageLayout, regionCount, pRegions);
auto cb_node = GetCBState(commandBuffer);
auto dst_image_state = GetImageState(dstImage);
// Make sure that all image slices are record referenced layout
for (uint32_t i = 0; i < regionCount; ++i) {
SetImageInitialLayout(cb_node, *dst_image_state, pRegions[i].imageSubresource, dstImageLayout);
}
}
void CoreChecks::PreCallRecordCmdCopyBufferToImage2KHR(VkCommandBuffer commandBuffer,
const VkCopyBufferToImageInfo2KHR *pCopyBufferToImageInfo2KHR) {
StateTracker::PreCallRecordCmdCopyBufferToImage2KHR(commandBuffer, pCopyBufferToImageInfo2KHR);
auto cb_node = GetCBState(commandBuffer);
auto dst_image_state = GetImageState(pCopyBufferToImageInfo2KHR->dstImage);
// Make sure that all image slices are record referenced layout
for (uint32_t i = 0; i < pCopyBufferToImageInfo2KHR->regionCount; ++i) {
SetImageInitialLayout(cb_node, *dst_image_state, pCopyBufferToImageInfo2KHR->pRegions[i].imageSubresource,
pCopyBufferToImageInfo2KHR->dstImageLayout);
}
}
bool CoreChecks::PreCallValidateGetImageSubresourceLayout(VkDevice device, VkImage image, const VkImageSubresource *pSubresource,
VkSubresourceLayout *pLayout) const {
bool skip = false;
const VkImageAspectFlags sub_aspect = pSubresource->aspectMask;
// The aspectMask member of pSubresource must only have a single bit set
const int num_bits = sizeof(sub_aspect) * CHAR_BIT;
std::bitset<num_bits> aspect_mask_bits(sub_aspect);
if (aspect_mask_bits.count() != 1) {
skip |= LogError(image, "VUID-vkGetImageSubresourceLayout-aspectMask-00997",
"vkGetImageSubresourceLayout(): VkImageSubresource.aspectMask must have exactly 1 bit set.");
}
const IMAGE_STATE *image_entry = GetImageState(image);
if (!image_entry) {
return skip;
}
// Image must have been created with tiling equal to VK_IMAGE_TILING_LINEAR
if (device_extensions.vk_ext_image_drm_format_modifier) {
if ((image_entry->createInfo.tiling != VK_IMAGE_TILING_LINEAR) &&
(image_entry->createInfo.tiling != VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT)) {
skip |= LogError(image, "VUID-vkGetImageSubresourceLayout-image-02270",
"vkGetImageSubresourceLayout(): Image must have tiling of VK_IMAGE_TILING_LINEAR or "
"VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT.");
}
} else {
if (image_entry->createInfo.tiling != VK_IMAGE_TILING_LINEAR) {
skip |= LogError(image, "VUID-vkGetImageSubresourceLayout-image-00996",
"vkGetImageSubresourceLayout(): Image must have tiling of VK_IMAGE_TILING_LINEAR.");
}
}
// mipLevel must be less than the mipLevels specified in VkImageCreateInfo when the image was created
if (pSubresource->mipLevel >= image_entry->createInfo.mipLevels) {
skip |= LogError(image, "VUID-vkGetImageSubresourceLayout-mipLevel-01716",
"vkGetImageSubresourceLayout(): pSubresource.mipLevel (%d) must be less than %d.", pSubresource->mipLevel,
image_entry->createInfo.mipLevels);
}
// arrayLayer must be less than the arrayLayers specified in VkImageCreateInfo when the image was created
if (pSubresource->arrayLayer >= image_entry->createInfo.arrayLayers) {
skip |= LogError(image, "VUID-vkGetImageSubresourceLayout-arrayLayer-01717",
"vkGetImageSubresourceLayout(): pSubresource.arrayLayer (%d) must be less than %d.",
pSubresource->arrayLayer, image_entry->createInfo.arrayLayers);
}
// subresource's aspect must be compatible with image's format.
const VkFormat img_format = image_entry->createInfo.format;
if (image_entry->createInfo.tiling == VK_IMAGE_TILING_LINEAR) {
if (FormatIsMultiplane(img_format)) {
VkImageAspectFlags allowed_flags = (VK_IMAGE_ASPECT_PLANE_0_BIT | VK_IMAGE_ASPECT_PLANE_1_BIT);
const char *vuid = "VUID-vkGetImageSubresourceLayout-format-01581"; // 2-plane version
if (FormatPlaneCount(img_format) > 2u) {
allowed_flags |= VK_IMAGE_ASPECT_PLANE_2_BIT;
vuid = "VUID-vkGetImageSubresourceLayout-format-01582"; // 3-plane version
}
if (sub_aspect != (sub_aspect & allowed_flags)) {
skip |= LogError(image, vuid,
"vkGetImageSubresourceLayout(): For multi-planar images, VkImageSubresource.aspectMask (0x%" PRIx32
") must be a single-plane specifier flag.",
sub_aspect);
}
} else if (FormatIsColor(img_format)) {
if (sub_aspect != VK_IMAGE_ASPECT_COLOR_BIT) {
skip |= LogError(image, kVUID_Core_DrawState_InvalidImageAspect,
"vkGetImageSubresourceLayout(): For color formats, VkImageSubresource.aspectMask must be "
"VK_IMAGE_ASPECT_COLOR.");
}
} else if (FormatIsDepthOrStencil(img_format)) {
if ((sub_aspect != VK_IMAGE_ASPECT_DEPTH_BIT) && (sub_aspect != VK_IMAGE_ASPECT_STENCIL_BIT)) {
}
}
} else if (image_entry->createInfo.tiling == VK_IMAGE_TILING_DRM_FORMAT_MODIFIER_EXT) {
if ((sub_aspect != VK_IMAGE_ASPECT_MEMORY_PLANE_0_BIT_EXT) && (sub_aspect != VK_IMAGE_ASPECT_MEMORY_PLANE_1_BIT_EXT) &&
(sub_aspect != VK_IMAGE_ASPECT_MEMORY_PLANE_2_BIT_EXT) && (sub_aspect != VK_IMAGE_ASPECT_MEMORY_PLANE_3_BIT_EXT)) {
// TODO: This VU also needs to ensure that the DRM index is in range and valid.
skip |= LogError(image, "VUID-vkGetImageSubresourceLayout-tiling-02271",
"vkGetImageSubresourceLayout(): VkImageSubresource.aspectMask must be "
"VK_IMAGE_ASPECT_MEMORY_PLANE_i_BIT_EXT.");
}
}
if (device_extensions.vk_android_external_memory_android_hardware_buffer) {
skip |= ValidateGetImageSubresourceLayoutANDROID(image);
}
return skip;
}
// Validates the image is allowed to be protected
bool CoreChecks::ValidateProtectedImage(const CMD_BUFFER_STATE *cb_state, const IMAGE_STATE *image_state, const char *cmd_name,
const char *vuid, const char *more_message) const {
bool skip = false;
if ((cb_state->unprotected == true) && (image_state->unprotected == false)) {
LogObjectList objlist(cb_state->commandBuffer());
objlist.add(image_state->image());
skip |= LogError(objlist, vuid, "%s: command buffer %s is unprotected while image %s is a protected image.%s", cmd_name,
report_data->FormatHandle(cb_state->commandBuffer()).c_str(),
report_data->FormatHandle(image_state->image()).c_str(), more_message);
}
return skip;
}
// Validates the image is allowed to be unprotected
bool CoreChecks::ValidateUnprotectedImage(const CMD_BUFFER_STATE *cb_state, const IMAGE_STATE *image_state, const char *cmd_name,
const char *vuid, const char *more_message) const {
bool skip = false;
if ((cb_state->unprotected == false) && (image_state->unprotected == true)) {
LogObjectList objlist(cb_state->commandBuffer());
objlist.add(image_state->image());
skip |= LogError(objlist, vuid, "%s: command buffer %s is protected while image %s is an unprotected image.%s", cmd_name,
report_data->FormatHandle(cb_state->commandBuffer()).c_str(),
report_data->FormatHandle(image_state->image()).c_str(), more_message);
}
return skip;
}
// Validates the buffer is allowed to be protected
bool CoreChecks::ValidateProtectedBuffer(const CMD_BUFFER_STATE *cb_state, const BUFFER_STATE *buffer_state, const char *cmd_name,
const char *vuid, const char *more_message) const {
bool skip = false;
if ((cb_state->unprotected == true) && (buffer_state->unprotected == false)) {
LogObjectList objlist(cb_state->commandBuffer());
objlist.add(buffer_state->buffer());
skip |= LogError(objlist, vuid, "%s: command buffer %s is unprotected while buffer %s is a protected buffer.%s", cmd_name,
report_data->FormatHandle(cb_state->commandBuffer()).c_str(),
report_data->FormatHandle(buffer_state->buffer()).c_str(), more_message);
}
return skip;
}
// Validates the buffer is allowed to be unprotected
bool CoreChecks::ValidateUnprotectedBuffer(const CMD_BUFFER_STATE *cb_state, const BUFFER_STATE *buffer_state, const char *cmd_name,
const char *vuid, const char *more_message) const {
bool skip = false;
if ((cb_state->unprotected == false) && (buffer_state->unprotected == true)) {
LogObjectList objlist(cb_state->commandBuffer());
objlist.add(buffer_state->buffer());
skip |= LogError(objlist, vuid, "%s: command buffer %s is protected while buffer %s is an unprotected buffer.%s", cmd_name,
report_data->FormatHandle(cb_state->commandBuffer()).c_str(),
report_data->FormatHandle(buffer_state->buffer()).c_str(), more_message);
}
return skip;
}
| 1 | 18,833 | So I tried adding this VU a long time ago, thought it would be this simple of a check, but turns out there were other validation in the way for getting here in `ValidateImageAspectMask` I assume that is what is failing CI here, realized it would require some more refactoring | KhronosGroup-Vulkan-ValidationLayers | cpp |
@@ -0,0 +1,6 @@
+class Arel::Table
+ def coalesce_and_sum(column, value)
+ coalesce_result = Arel::Nodes::NamedFunction.new('COALESCE', [self[column], value])
+ Arel::Nodes::NamedFunction.new('SUM', [coalesce_result], column.to_s)
+ end
+end | 1 | 1 | 7,133 | This method is not needed for postgresql, by default sum function omits null values so we can remove this file | blackducksoftware-ohloh-ui | rb |
|
@@ -378,7 +378,10 @@ dnl We have separate checks for libsystemd and the unit dir for historical reaso
PKG_CHECK_MODULES([LIBSYSTEMD], [libsystemd], [have_libsystemd=yes], [have_libsystemd=no])
AM_CONDITIONAL(BUILDOPT_LIBSYSTEMD, test x$have_libsystemd = xyes)
AM_COND_IF(BUILDOPT_LIBSYSTEMD,
- AC_DEFINE([HAVE_LIBSYSTEMD], 1, [Define if we have libsystemd]))
+ AC_DEFINE([HAVE_LIBSYSTEMD], 1, [Define if we have libsystemd])
+ systemdsystemgeneratordir=$($PKG_CONFIG --variable=systemdsystemgeneratordir systemd)
+ AC_SUBST(systemdsystemgeneratordir)
+ )
AS_IF([test "x$have_libsystemd" = "xyes"], [
with_systemd=yes | 1 | AC_PREREQ([2.63])
dnl If incrementing the version here, remember to update libostree.sym too
m4_define([year_version], [2017])
m4_define([release_version], [5])
m4_define([package_version], [year_version.release_version])
AC_INIT([libostree], [package_version], [[email protected]])
AC_CONFIG_HEADER([config.h])
AC_CONFIG_MACRO_DIR([buildutil])
AC_CONFIG_AUX_DIR([build-aux])
AM_INIT_AUTOMAKE([1.13 -Wno-portability foreign no-define tar-ustar no-dist-gzip dist-xz
color-tests subdir-objects])
AM_MAINTAINER_MODE([enable])
AM_SILENT_RULES([yes])
AC_USE_SYSTEM_EXTENSIONS
AC_SYS_LARGEFILE
AC_PROG_CC
AM_PROG_CC_C_O
AC_PROG_YACC
dnl Versioning information
AC_SUBST([YEAR_VERSION], [year_version])
AC_SUBST([RELEASE_VERSION], [release_version])
AC_SUBST([PACKAGE_VERSION], [package_version])
CC_CHECK_FLAGS_APPEND([WARN_CFLAGS], [CFLAGS], [\
-pipe \
-Wall \
-Werror=empty-body \
-Werror=strict-prototypes \
-Werror=missing-prototypes \
-Werror=implicit-function-declaration \
"-Werror=format=2 -Werror=format-security -Werror=format-nonliteral" \
-Werror=pointer-arith -Werror=init-self \
-Werror=missing-declarations \
-Werror=return-type \
-Werror=overflow \
-Werror=int-conversion \
-Werror=parenthesis \
-Werror=incompatible-pointer-types \
-Werror=misleading-indentation \
-Werror=missing-include-dirs -Werror=aggregate-return \
-Werror=unused-result \
])
AC_SUBST(WARN_CFLAGS)
AC_MSG_CHECKING([for -fsanitize=address in CFLAGS])
if echo $CFLAGS | grep -q -e -fsanitize=address; then
AC_MSG_RESULT([yes])
using_asan=yes
else
AC_MSG_RESULT([no])
fi
AM_CONDITIONAL(BUILDOPT_ASAN, [test x$using_asan = xyes])
# Initialize libtool
LT_PREREQ([2.2.4])
LT_INIT([disable-static])
OSTREE_FEATURES=""
AC_SUBST([OSTREE_FEATURES])
GLIB_TESTS
LIBGLNX_CONFIGURE
AC_CHECK_HEADER([sys/xattr.h],,[AC_MSG_ERROR([You must have sys/xattr.h from glibc])])
AS_IF([test "$YACC" != "bison -y"], [AC_MSG_ERROR([bison not found but required])])
PKG_PROG_PKG_CONFIG
AM_PATH_GLIB_2_0(,,AC_MSG_ERROR([GLib not found]))
dnl When bumping the gio-unix-2.0 dependency (or glib-2.0 in general),
dnl remember to bump GLIB_VERSION_MIN_REQUIRED and
dnl GLIB_VERSION_MAX_ALLOWED in Makefile.am
GIO_DEPENDENCY="gio-unix-2.0 >= 2.40.0"
PKG_CHECK_MODULES(OT_DEP_GIO_UNIX, $GIO_DEPENDENCY)
dnl 5.1.0 is an arbitrary version here
PKG_CHECK_MODULES(OT_DEP_LZMA, liblzma >= 5.0.5)
dnl Needed for rollsum
PKG_CHECK_MODULES(OT_DEP_ZLIB, zlib)
dnl We're not actually linking to this, just using the header
PKG_CHECK_MODULES(OT_DEP_E2P, e2p)
dnl Arbitrary version that's in CentOS7.2 now
CURL_DEPENDENCY=7.29.0
AC_ARG_WITH(curl,
AS_HELP_STRING([--with-curl], [Use libcurl @<:@default=no@:>@]),
[], [with_curl=no])
AS_IF([test x$with_curl != xno ], [
PKG_CHECK_MODULES(OT_DEP_CURL, libcurl >= $CURL_DEPENDENCY)
with_curl=yes
AC_DEFINE([HAVE_LIBCURL], 1, [Define if we have libcurl.pc])
dnl Currently using libcurl requires soup for trivial-httpd for tests
with_soup_default=yes
], [with_soup_default=check])
AM_CONDITIONAL(USE_CURL, test x$with_curl != xno)
if test x$with_curl = xyes; then OSTREE_FEATURES="$OSTREE_FEATURES libcurl"; fi
dnl When bumping the libsoup-2.4 dependency, remember to bump
dnl SOUP_VERSION_MIN_REQUIRED and SOUP_VERSION_MAX_ALLOWED in
dnl Makefile.am
SOUP_DEPENDENCY="libsoup-2.4 >= 2.39.1"
AC_ARG_WITH(soup,
AS_HELP_STRING([--with-soup], [Use libsoup @<:@default=yes@:>@]),
[], [with_soup=$with_soup_default])
AS_IF([test x$with_soup != xno], [
AC_ARG_ENABLE(libsoup_client_certs,
AS_HELP_STRING([--enable-libsoup-client-certs],
[Require availability of new enough libsoup TLS client cert API (default: auto)]),,
[enable_libsoup_client_certs=auto])
AC_MSG_CHECKING([for $SOUP_DEPENDENCY])
PKG_CHECK_EXISTS($SOUP_DEPENDENCY, have_soup=yes, have_soup=no)
AC_MSG_RESULT([$have_soup])
AS_IF([ test x$have_soup = xno && test x$with_soup != xcheck], [
AC_MSG_ERROR([libsoup is enabled but could not be found])
])
AS_IF([test x$have_soup = xyes], [
PKG_CHECK_MODULES(OT_DEP_SOUP, $SOUP_DEPENDENCY)
AC_DEFINE([HAVE_LIBSOUP], 1, [Define if we have libsoup.pc])
with_soup=yes
save_CFLAGS=$CFLAGS
CFLAGS=$OT_DEP_SOUP_CFLAGS
have_libsoup_client_certs=no
AC_CHECK_DECL([SOUP_SESSION_TLS_INTERACTION], [
AC_DEFINE([HAVE_LIBSOUP_CLIENT_CERTS], 1, [Define if we have libsoup client certs])
have_libsoup_client_certs=yes
], [], [#include <libsoup/soup.h>])
AS_IF([test x$enable_libsoup_client_certs = xyes && test x$have_libsoup_client_certs != xyes], [
AC_MSG_ERROR([libsoup client certs explicitly requested but not found])
])
CFLAGS=$save_CFLAGS
], [
with_soup=no
])
], [ with_soup=no ])
if test x$with_soup != xno; then OSTREE_FEATURES="$OSTREE_FEATURES libsoup"; fi
AM_CONDITIONAL(USE_LIBSOUP, test x$with_soup != xno)
AM_CONDITIONAL(HAVE_LIBSOUP_CLIENT_CERTS, test x$have_libsoup_client_certs = xyes)
AC_ARG_ENABLE(trivial-httpd-cmdline,
[AS_HELP_STRING([--enable-trivial-httpd-cmdline],
[Continue to support "ostree trivial-httpd" [default=no]])],,
enable_trivial_httpd_cmdline=no)
AS_IF([test x$enable_trivial_httpd_cmdline = xyes],
[AC_DEFINE([BUILDOPT_ENABLE_TRIVIAL_HTTPD_CMDLINE], 1, [Define if we are enabling ostree trivial-httpd entrypoint])]
)
AS_IF([test x$with_curl = xyes && test x$with_soup = xno], [
AC_MSG_ERROR([Curl enabled, but libsoup is not; libsoup is needed for tests])
])
AM_CONDITIONAL(USE_CURL_OR_SOUP, test x$with_curl != xno || test x$with_soup != xno)
AS_IF([test x$with_curl != xno || test x$with_soup != xno],
[AC_DEFINE([HAVE_LIBCURL_OR_LIBSOUP], 1, [Define if we have soup or curl])])
AS_IF([test x$with_curl = xyes], [fetcher_backend=curl], [test x$with_soup = xyes], [fetcher_backend=libsoup], [fetcher_backend=none])
m4_ifdef([GOBJECT_INTROSPECTION_CHECK], [
GOBJECT_INTROSPECTION_CHECK([1.34.0])
])
AM_CONDITIONAL(BUILDOPT_INTROSPECTION, test "x$found_introspection" = xyes)
LIBGPGME_DEPENDENCY="1.1.8"
PKG_CHECK_MODULES(OT_DEP_GPGME, gpgme-pthread >= $LIBGPGME_DEPENDENCY, have_gpgme=yes, [
m4_ifdef([AM_PATH_GPGME_PTHREAD], [
AM_PATH_GPGME_PTHREAD($LIBGPGME_DEPENDENCY, have_gpgme=yes, have_gpgme=no)
],[ have_gpgme=no ])
])
AS_IF([ test x$have_gpgme = xno ], [
AC_MSG_ERROR([Need GPGME_PTHREAD version $LIBGPGME_DEPENDENCY or later])
])
OSTREE_FEATURES="$OSTREE_FEATURES gpgme"
LIBARCHIVE_DEPENDENCY="libarchive >= 2.8.0"
# What's in RHEL7.2.
FUSE_DEPENDENCY="fuse >= 2.9.2"
# check for gtk-doc
m4_ifdef([GTK_DOC_CHECK], [
GTK_DOC_CHECK([1.15], [--flavour no-tmpl])
],[
enable_gtk_doc=no
AM_CONDITIONAL([ENABLE_GTK_DOC], false)
])
AC_ARG_ENABLE(man,
[AS_HELP_STRING([--enable-man],
[generate man pages [default=auto]])],,
enable_man=maybe)
AS_IF([test "$enable_man" != no], [
AC_PATH_PROG([XSLTPROC], [xsltproc])
AS_IF([test -z "$XSLTPROC"], [
AS_IF([test "$enable_man" = yes], [
AC_MSG_ERROR([xsltproc is required for --enable-man])
])
enable_man=no
],[
enable_man=yes
])
])
AM_CONDITIONAL(ENABLE_MAN, test "$enable_man" != no)
AC_ARG_ENABLE(rust,
[AS_HELP_STRING([--enable-rust],
[Compile Rust code instead of C [default=no]])],,
[enable_rust=no; rust_debug_release=no])
AS_IF([test "$enable_rust" = yes], [
AC_PATH_PROG([cargo], [cargo])
AS_IF([test -z "$cargo"], [AC_MSG_ERROR([cargo is required for --enable-rust])])
AC_PATH_PROG([rustc], [rustc])
AS_IF([test -z "$rustc"], [AC_MSG_ERROR([rustc is required for --enable-rust])])
dnl These bits based on gnome:librsvg/configure.ac
dnl By default, we build in public release mode.
AC_ARG_ENABLE(rust-debug,
AC_HELP_STRING([--enable-rust-debug],
[Build Rust code with debugging information [default=no]]),
[rust_debug_release=$enableval],
[rust_debug_release=release])
AC_MSG_CHECKING(whether to build Rust code with debugging information)
if test "x$rust_debug_release" = "xyes" ; then
rust_debug_release=debug
AC_MSG_RESULT(yes)
else
AC_MSG_RESULT(no)
fi
RUST_TARGET_SUBDIR=${rust_debug_release}
AC_SUBST([RUST_TARGET_SUBDIR])
])
AM_CONDITIONAL(RUST_DEBUG, [test "x$rust_debug_release" = "xdebug"])
AM_CONDITIONAL(ENABLE_RUST, [test "$enable_rust" != no])
AC_ARG_WITH(libarchive,
AS_HELP_STRING([--without-libarchive], [Do not use libarchive]),
:, with_libarchive=maybe)
AS_IF([ test x$with_libarchive != xno ], [
AC_MSG_CHECKING([for $LIBARCHIVE_DEPENDENCY])
PKG_CHECK_EXISTS($LIBARCHIVE_DEPENDENCY, have_libarchive=yes, have_libarchive=no)
AC_MSG_RESULT([$have_libarchive])
AS_IF([ test x$have_libarchive = xno && test x$with_libarchive != xmaybe ], [
AC_MSG_ERROR([libarchive is enabled but could not be found])
])
AS_IF([ test x$have_libarchive = xyes], [
AC_DEFINE([HAVE_LIBARCHIVE], 1, [Define if we have libarchive.pc])
PKG_CHECK_MODULES(OT_DEP_LIBARCHIVE, $LIBARCHIVE_DEPENDENCY)
save_LIBS=$LIBS
LIBS=$OT_DEP_LIBARCHIVE_LIBS
AC_CHECK_FUNCS(archive_read_support_filter_all)
LIBS=$save_LIBS
with_libarchive=yes
], [
with_libarchive=no
])
], [ with_libarchive=no ])
if test x$with_libarchive != xno; then OSTREE_FEATURES="$OSTREE_FEATURES libarchive"; fi
AM_CONDITIONAL(USE_LIBARCHIVE, test $with_libarchive != no)
dnl This is what is in RHEL7 anyways
SELINUX_DEPENDENCY="libselinux >= 2.1.13"
AC_ARG_WITH(selinux,
AS_HELP_STRING([--without-selinux], [Do not use SELinux]),
:, with_selinux=maybe)
AS_IF([ test x$with_selinux != xno ], [
AC_MSG_CHECKING([for $SELINUX_DEPENDENCY])
PKG_CHECK_EXISTS($SELINUX_DEPENDENCY, have_selinux=yes, have_selinux=no)
AC_MSG_RESULT([$have_selinux])
AS_IF([ test x$have_selinux = xno && test x$with_selinux != xmaybe ], [
AC_MSG_ERROR([SELinux is enabled but could not be found])
])
AS_IF([ test x$have_selinux = xyes], [
AC_DEFINE([HAVE_SELINUX], 1, [Define if we have libselinux.pc])
PKG_CHECK_MODULES(OT_DEP_SELINUX, $SELINUX_DEPENDENCY)
with_selinux=yes
], [
with_selinux=no
])
], [ with_selinux=no ])
if test x$with_selinux != xno; then OSTREE_FEATURES="$OSTREE_FEATURES selinux"; fi
AM_CONDITIONAL(USE_SELINUX, test $with_selinux != no)
AC_ARG_WITH(smack,
AS_HELP_STRING([--with-smack], [Enable smack]),
:, with_smack=no)
AS_IF([ test x$with_smack = xyes], [
AC_DEFINE([WITH_SMACK], 1, [Define if we have smack.pc])
])
AM_CONDITIONAL(USE_SMACK, test $with_smack != no)
dnl begin openssl (really just libcrypto right now)
OPENSSL_DEPENDENCY="libcrypto >= 1.0.1"
AC_ARG_WITH(openssl,
AS_HELP_STRING([--with-openssl], [Enable use of OpenSSL libcrypto (checksums)]),
:, with_openssl=no)
AS_IF([ test x$with_openssl != xno ], [
PKG_CHECK_MODULES(OT_DEP_OPENSSL, $OPENSSL_DEPENDENCY)
AC_DEFINE([HAVE_OPENSSL], 1, [Define if we have openssl])
with_openssl=yes
], [
with_openssl=no
])
if test x$with_openssl != xno; then OSTREE_FEATURES="$OSTREE_FEATURES openssl"; fi
AM_CONDITIONAL(USE_OPENSSL, test $with_openssl != no)
dnl end openssl
dnl This is what is in RHEL7.2 right now, picking it arbitrarily
LIBMOUNT_DEPENDENCY="mount >= 2.23.0"
AC_ARG_WITH(libmount,
AS_HELP_STRING([--without-libmount], [Do not use libmount]),
:, with_libmount=maybe)
AS_IF([ test x$with_libmount != xno ], [
AC_MSG_CHECKING([for $LIBMOUNT_DEPENDENCY])
PKG_CHECK_EXISTS($LIBMOUNT_DEPENDENCY, have_libmount=yes, have_libmount=no)
AC_MSG_RESULT([$have_libmount])
AS_IF([ test x$have_libmount = xno && test x$with_libmount != xmaybe ], [
AC_MSG_ERROR([libmount is enabled but could not be found])
])
AS_IF([ test x$have_libmount = xyes], [
AC_DEFINE([HAVE_LIBMOUNT], 1, [Define if we have libmount.pc])
PKG_CHECK_MODULES(OT_DEP_LIBMOUNT, $LIBMOUNT_DEPENDENCY)
with_libmount=yes
save_LIBS=$LIBS
LIBS=$OT_DEP_LIBMOUNT_LIBS
AC_CHECK_FUNCS(mnt_unref_cache)
LIBS=$save_LIBS
], [
with_libmount=no
])
], [ with_libmount=no ])
if test x$with_libmount != xno; then OSTREE_FEATURES="$OSTREE_FEATURES libmount"; fi
AM_CONDITIONAL(USE_LIBMOUNT, test $with_libmount != no)
# Enabled by default because I think people should use it.
AC_ARG_ENABLE(rofiles-fuse,
[AS_HELP_STRING([--enable-rofiles-fuse],
[generate rofiles-fuse helper [default=yes]])],,
enable_rofiles_fuse=yes)
AS_IF([ test x$enable_rofiles_fuse != xno ], [
PKG_CHECK_MODULES(BUILDOPT_FUSE, $FUSE_DEPENDENCY)
], [enable_rofiles_fuse=no])
AM_CONDITIONAL(BUILDOPT_FUSE, test x$enable_rofiles_fuse = xyes)
AC_ARG_WITH(dracut,
AS_HELP_STRING([--with-dracut],
[Install dracut module (default: no)]),,
[with_dracut=no])
case x$with_dracut in
xno) ;;
xyes) ;;
xyesbutnoconf) ;;
*) AC_MSG_ERROR([Unknown --with-dracut value $with_dracut])
esac
AM_CONDITIONAL(BUILDOPT_DRACUT, test x$with_dracut = xyes || test x$with_dracut = xyesbutnoconf)
AM_CONDITIONAL(BUILDOPT_DRACUT_CONF, test x$with_dracut = xyes)
AC_ARG_WITH(mkinitcpio,
AS_HELP_STRING([--with-mkinitcpio],
[Install mkinitcpio module (default: no)]),,
[with_mkinitcpio=no])
AM_CONDITIONAL(BUILDOPT_MKINITCPIO, test x$with_mkinitcpio = xyes)
dnl We have separate checks for libsystemd and the unit dir for historical reasons
PKG_CHECK_MODULES([LIBSYSTEMD], [libsystemd], [have_libsystemd=yes], [have_libsystemd=no])
AM_CONDITIONAL(BUILDOPT_LIBSYSTEMD, test x$have_libsystemd = xyes)
AM_COND_IF(BUILDOPT_LIBSYSTEMD,
AC_DEFINE([HAVE_LIBSYSTEMD], 1, [Define if we have libsystemd]))
AS_IF([test "x$have_libsystemd" = "xyes"], [
with_systemd=yes
AC_ARG_WITH([systemdsystemunitdir],
AS_HELP_STRING([--with-systemdsystemunitdir=DIR], [Directory for systemd service files]),
[],
[with_systemdsystemunitdir=$($PKG_CONFIG --variable=systemdsystemunitdir systemd)])
AS_IF([test "x$with_systemdsystemunitdir" != "xno"], [
AC_SUBST([systemdsystemunitdir], [$with_systemdsystemunitdir])
])
])
AM_CONDITIONAL(BUILDOPT_SYSTEMD, test x$with_systemd = xyes)
AC_ARG_WITH(builtin-grub2-mkconfig,
AS_HELP_STRING([--with-builtin-grub2-mkconfig],
[Use a builtin minimal grub2-mkconfig to generate a GRUB2 configuration file (default: no)]),,
[with_builtin_grub2_mkconfig=no])
AM_CONDITIONAL(BUILDOPT_BUILTIN_GRUB2_MKCONFIG, test x$with_builtin_grub2_mkconfig = xyes)
AM_COND_IF(BUILDOPT_BUILTIN_GRUB2_MKCONFIG,
AC_DEFINE([USE_BUILTIN_GRUB2_MKCONFIG], 1, [Define if using internal ostree-grub-generator]))
AC_ARG_WITH(grub2-mkconfig-path,
AS_HELP_STRING([--with-grub2-mkconfig-path],
[Path to grub2-mkconfig]))
AS_IF([test x$with_grub2_mkconfig_path = x], [
dnl Otherwise, look for the path to the system generator. On some
dnl distributions GRUB2 *-mkconfig executable has 'grub2' prefix and
dnl on some 'grub'. We default to grub2-mkconfig.
AC_CHECK_PROGS(GRUB2_MKCONFIG, [grub2-mkconfig grub-mkconfig], [grub2-mkconfig])
],[GRUB2_MKCONFIG=$with_grub2_mkconfig_path])
AC_DEFINE_UNQUOTED([GRUB2_MKCONFIG_PATH], ["$GRUB2_MKCONFIG"], [The system grub2-mkconfig executible name])
AC_ARG_WITH(static-compiler,
AS_HELP_STRING([--with-static-compiler],
[Use the given compiler to build ostree-prepare-root statically linked (default: no)]),,
[with_static_compiler=no])
AM_CONDITIONAL(BUILDOPT_USE_STATIC_COMPILER, test x$with_static_compiler != xno)
AC_SUBST(STATIC_COMPILER, $with_static_compiler)
dnl for tests (but we can't use asan with gjs or any introspection,
dnl see https://github.com/google/sanitizers/wiki/AddressSanitizerAsDso for more info)
AS_IF([test "x$found_introspection" = xyes && test x$using_asan != xyes], [
AC_PATH_PROG(GJS, [gjs])
if test -n "$GJS"; then
have_gjs=yes
else
have_gjs=no
fi
], [have_gjs=no])
AM_CONDITIONAL(BUILDOPT_GJS, test x$have_gjs = xyes)
# Do we enable building experimental (non-stable) API?
# The OSTREE_ENABLE_EXPERIMENTAL_API #define is used internally and in public
# headers, so any consumer of libostree who wants to use experimental API must
# #define OSTREE_ENABLE_EXPERIMENTAL_API 1
# before including libostree headers. This means the name in the AC_DEFINE below
# is public API.
AC_ARG_ENABLE([experimental-api],
[AS_HELP_STRING([--enable-experimental-api],
[Enable unstable experimental API in libostree [default=no]])],,
[enable_experimental_api=no])
AS_IF([test x$enable_experimental_api = xyes],
[AC_DEFINE([OSTREE_ENABLE_EXPERIMENTAL_API],[1],[Define if experimental API should be enabled])
OSTREE_FEATURES="$OSTREE_FEATURES experimental"]
)
AM_CONDITIONAL([ENABLE_EXPERIMENTAL_API],[test x$enable_experimental_api = xyes])
AC_CONFIG_FILES([
Makefile
apidoc/Makefile
src/libostree/ostree-1.pc
src/libostree/ostree-version.h
])
AC_OUTPUT
echo "
libOSTree $VERSION
===============
introspection: $found_introspection
Rust (internal oxidation): $rust_debug_release
rofiles-fuse: $enable_rofiles_fuse
HTTP backend: $fetcher_backend
\"ostree trivial-httpd\": $enable_trivial_httpd_cmdline
SELinux: $with_selinux
OpenSSL libcrypto (checksums): $with_openssl
systemd: $have_libsystemd
libmount: $with_libmount
libarchive (parse tar files directly): $with_libarchive
static deltas: yes (always enabled now)
O_TMPFILE: $enable_otmpfile
wrpseudo-compat: $enable_wrpseudo_compat
man pages (xsltproc): $enable_man
api docs (gtk-doc): $enable_gtk_doc
installed tests: $enable_installed_tests
gjs-based tests: $have_gjs
dracut: $with_dracut
mkinitcpio: $with_mkinitcpio
Static compiler for ostree-prepare-root: $with_static_compiler
Experimental API $enable_experimental_api"
AS_IF([test x$with_builtin_grub2_mkconfig = xyes], [
echo " builtin grub2-mkconfig (instead of system): $with_builtin_grub2_mkconfig"
], [
echo " grub2-mkconfig path: $GRUB2_MKCONFIG"
])
echo ""
| 1 | 10,566 | Can you provide a `--with-systemdsystemgeneratordir` option here? So that I can do unprivileged installs without completely turning off systemd. I can add it as a follow-up PR too. | ostreedev-ostree | c |
@@ -396,11 +396,10 @@ class CLAModel(Model):
inferences = self._multiStepCompute(rawInput=inputRecord)
# For temporal classification. Not used, and might not work anymore
elif self._isClassificationModel():
- inferences = self._classificationCompute()
-
- results.inferences.update(inferences)
-
- inferences = self._anomalyCompute()
+ inferences = self._classifcationCompute()
+ else:
+ inferences = self._anomalyCompute()
+
results.inferences.update(inferences)
# ----------------------------------------------------------------------- | 1 | # ----------------------------------------------------------------------
# Numenta Platform for Intelligent Computing (NuPIC)
# Copyright (C) 2013, Numenta, Inc. Unless you have an agreement
# with Numenta, Inc., for a separate license for this software code, the
# following terms and conditions apply:
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 3 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
# See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see http://www.gnu.org/licenses.
#
# http://numenta.org/licenses/
# ----------------------------------------------------------------------
""" @file clamodel.py
Encapsulation of CLAnetwork that implements the ModelBase.
"""
import copy
import math
import os
import json
import itertools
import logging
import traceback
from collections import deque
from operator import itemgetter
import numpy
from nupic.frameworks.opf.model import Model
from nupic.algorithms.anomaly import Anomaly
from nupic.data import SENTINEL_VALUE_FOR_MISSING_DATA
from nupic.data.fieldmeta import FieldMetaSpecial, FieldMetaInfo
from nupic.encoders import MultiEncoder, DeltaEncoder
from nupic.engine import Network
from nupic.support.fshelpers import makeDirectoryFromAbsolutePath
from nupic.frameworks.opf.opfutils import (InferenceType,
InferenceElement,
SensorInput,
initLogger)
DEFAULT_LIKELIHOOD_THRESHOLD = 0.0001
DEFAULT_MAX_PREDICTIONS_PER_STEP = 8
DEFAULT_ANOMALY_TRAINRECORDS = 4000
DEFAULT_ANOMALY_THRESHOLD = 1.1
DEFAULT_ANOMALY_CACHESIZE = 10000
def requireAnomalyModel(func):
"""
Decorator for functions that require anomaly models.
"""
def _decorator(self, *args, **kwargs):
if not self.getInferenceType() == InferenceType.TemporalAnomaly:
raise RuntimeError("Method required a TemporalAnomaly model.")
if self._getAnomalyClassifier() is None:
raise RuntimeError("Model does not support this command. Model must"
"be an active anomalyDetector model.")
return func(self, *args, **kwargs)
return _decorator
###############################################################
class NetworkInfo(object):
""" Data type used as return value type by
CLAModel.__createCLANetwork()
"""
def __init__(self, net, statsCollectors):
"""
net: The CLA Network instance
statsCollectors:
Sequence of 0 or more CLAStatistic-based instances
"""
self.net = net
self.statsCollectors = statsCollectors
return
def __repr__(self):
return "NetworkInfo(net=%r, statsCollectors=%r)" % (
self.net, self.statsCollectors)
###############################################################
class CLAModel(Model):
__supportedInferenceKindSet = set((InferenceType.TemporalNextStep,
InferenceType.TemporalClassification,
InferenceType.NontemporalClassification,
InferenceType.NontemporalAnomaly,
InferenceType.TemporalAnomaly,
InferenceType.TemporalMultiStep,
InferenceType.NontemporalMultiStep))
__myClassName = "CLAModel"
#############################################################################
def __init__(self,
sensorParams,
inferenceType=InferenceType.TemporalNextStep,
predictedField=None,
spEnable=True,
spParams={},
# TODO: We can't figure out what this is. Remove?
trainSPNetOnlyIfRequested=False,
tpEnable=True,
tpParams={},
clEnable=True,
clParams={},
anomalyParams={},
minLikelihoodThreshold=DEFAULT_LIKELIHOOD_THRESHOLD,
maxPredictionsPerStep=DEFAULT_MAX_PREDICTIONS_PER_STEP):
"""CLAModel constructor.
Args:
inferenceType: A value from the InferenceType enum class.
predictedField: The field to predict for multistep prediction.
sensorParams: A dictionary specifying the sensor parameters.
spEnable: Whether or not to use a spatial pooler.
spParams: A dictionary specifying the spatial pooler parameters. These
are passed to the spatial pooler.
trainSPNetOnlyIfRequested: If set, don't create an SP network unless the
user requests SP metrics.
tpEnable: Whether to use a temporal pooler.
tpParams: A dictionary specifying the temporal pooler parameters. These
are passed to the temporal pooler.
clEnable: Whether to use the classifier. If false, the classifier will
not be created and no predictions will be generated.
clParams: A dictionary specifying the classifier parameters. These are
are passed to the classifier.
anomalyParams: Anomaly detection parameters
minLikelihoodThreshold: The minimum likelihood value to include in
inferences. Currently only applies to multistep inferences.
maxPredictionsPerStep: Maximum number of predictions to include for
each step in inferences. The predictions with highest likelihood are
included.
"""
if not inferenceType in self.__supportedInferenceKindSet:
raise ValueError("{0} received incompatible inference type: {1}"\
.format(self.__class__, inferenceType))
# Call super class constructor
super(CLAModel, self).__init__(inferenceType)
# self.__restoringFromState is set to True by our __setstate__ method
# and back to False at completion of our _deSerializeExtraData() method.
self.__restoringFromState = False
self.__restoringFromV1 = False
# Intitialize logging
self.__logger = initLogger(self)
self.__logger.debug("Instantiating %s." % self.__myClassName)
self._minLikelihoodThreshold = minLikelihoodThreshold
self._maxPredictionsPerStep = maxPredictionsPerStep
# set up learning parameters (note: these may be replaced via
# enable/disable//SP/TP//Learning methods)
self.__spLearningEnabled = bool(spEnable)
self.__tpLearningEnabled = bool(tpEnable)
# Explicitly exclude the TP if this type of inference doesn't require it
if not InferenceType.isTemporal(self.getInferenceType()) \
or self.getInferenceType() == InferenceType.NontemporalMultiStep:
tpEnable = False
self._netInfo = None
self._hasSP = spEnable
self._hasTP = tpEnable
self._hasCL = clEnable
self._classifierInputEncoder = None
self._predictedFieldIdx = None
self._predictedFieldName = None
self._numFields = None
# init anomaly
windowSize = anomalyParams.get("slidingWindowSize", None)
mode = anomalyParams.get("mode", "pure")
anomalyThreshold = anomalyParams.get("autoDetectThreshold", None)
self._anomalyInst = Anomaly(slidingWindowSize=windowSize, mode=mode,
binaryAnomalyThreshold=anomalyThreshold)
# -----------------------------------------------------------------------
# Create the network
self._netInfo = self.__createCLANetwork(
sensorParams, spEnable, spParams, tpEnable, tpParams, clEnable,
clParams, anomalyParams)
# Initialize Spatial Anomaly detection parameters
if self.getInferenceType() == InferenceType.NontemporalAnomaly:
self._getSPRegion().setParameter("anomalyMode", True)
# Initialize Temporal Anomaly detection parameters
if self.getInferenceType() == InferenceType.TemporalAnomaly:
self._getTPRegion().setParameter("anomalyMode", True)
self._prevPredictedColumns = numpy.array([])
# -----------------------------------------------------------------------
# This flag, if present tells us not to train the SP network unless
# the user specifically asks for the SP inference metric
self.__trainSPNetOnlyIfRequested = trainSPNetOnlyIfRequested
self.__numRunCalls = 0
# Tracks whether finishedLearning() has been called
self.__finishedLearning = False
self.__logger.debug("Instantiated %s" % self.__class__.__name__)
self._input = None
return
def getParameter(self, paramName):
if paramName == '__numRunCalls':
return self.__numRunCalls
else:
raise RuntimeError("'%s' parameter is not exposed by clamodel." % \
(paramName))
#############################################################################
def resetSequenceStates(self):
""" [virtual method override] Resets the model's sequence states. Normally
called to force the delineation of a sequence, such as between OPF tasks.
"""
if self._hasTP:
# Reset TP's sequence states
self._getTPRegion().executeCommand(['resetSequenceStates'])
self.__logger.debug("CLAModel.resetSequenceStates(): reset temporal "
"pooler's sequence states")
return
#############################################################################
def finishLearning(self):
""" [virtual method override] Places the model in a permanent "finished
learning" mode where it will not be able to learn from subsequent input
records.
NOTE: Upon completion of this command, learning may not be resumed on
the given instance of the model (e.g., the implementation may optimize
itself by pruning data structures that are necessary for learning)
"""
assert not self.__finishedLearning
if self._hasSP:
# Finish SP learning
self._getSPRegion().executeCommand(['finishLearning'])
self.__logger.debug(
"CLAModel.finishLearning(): finished SP learning")
if self._hasTP:
# Finish temporal network's TP learning
self._getTPRegion().executeCommand(['finishLearning'])
self.__logger.debug(
"CLAModel.finishLearning(): finished TP learning")
self.__spLearningEnabled = self.__tpLearningEnabled = False
self.__finishedLearning = True
return
def setFieldStatistics(self,fieldStats):
encoder = self._getEncoder()
# Set the stats for the encoders. The first argument to setFieldStats
# is the field name of the encoder. Since we are using a multiencoder
# we leave it blank, the multiencoder will propagate the field names to the
# underlying encoders
encoder.setFieldStats('',fieldStats)
def enableLearning(self):
"""[override] Turn Learning on for the current model """
super(CLAModel, self).enableLearning()
self.setEncoderLearning(True)
def disableLearning(self):
"""[override] Turn Learning off for the current model """
super(CLAModel, self).disableLearning()
self.setEncoderLearning(False)
def setEncoderLearning(self,learningEnabled):
self._getEncoder().setLearning(learningEnabled)
# Anomaly Accessor Methods
@requireAnomalyModel
def setAnomalyParameter(self, param, value):
"""
Set a parameter of the anomaly classifier within this model.
"""
self._getAnomalyClassifier().setParameter(param, value)
@requireAnomalyModel
def getAnomalyParameter(self, param):
"""
Get a parameter of the anomaly classifier within this model.
"""
return self._getAnomalyClassifier().getParameter(param)
@requireAnomalyModel
def anomalyRemoveLabels(self, start, end, labelFilter):
"""
Remove labels from the anomaly classifier within this model.
"""
self._getAnomalyClassifier().getSelf().removeLabels(start, end, labelFilter)
@requireAnomalyModel
def anomalyAddLabel(self, start, end, labelName):
"""
Add labels from the anomaly classifier within this model.
"""
self._getAnomalyClassifier().getSelf().addLabel(start, end, labelName)
@requireAnomalyModel
def anomalyGetLabels(self, start, end):
"""
Get labels from the anomaly classifier within this model.
"""
return self._getAnomalyClassifier().getSelf().getLabels(start, end)
def run(self, inputRecord):
""" run one iteration of this model.
args:
inputRecord is a record object formatted according to
nupic.data.RecordStream.getNextRecordDict() result format.
return:
An ModelResult namedtuple (see opfutils.py) The contents of
ModelResult.inferences depends on the the specific inference
type of this model, which can be queried by getInferenceType()
"""
assert not self.__restoringFromState
assert inputRecord
results = super(CLAModel, self).run(inputRecord)
self.__numRunCalls += 1
if self.__logger.isEnabledFor(logging.DEBUG):
self.__logger.debug("CLAModel.run() inputRecord=%s", (inputRecord))
results.inferences = {}
self._input = inputRecord
# -------------------------------------------------------------------------
# Turn learning on or off?
if '_learning' in inputRecord:
if inputRecord['_learning']:
self.enableLearning()
else:
self.disableLearning()
###########################################################################
# Predictions and Learning
###########################################################################
self._sensorCompute(inputRecord)
self._spCompute()
self._tpCompute()
results.sensorInput = self._getSensorInputRecord(inputRecord)
inferences = {}
# TODO: Reconstruction and temporal classification not used. Remove
if self._isReconstructionModel():
inferences = self._reconstructionCompute()
elif self._isMultiStepModel():
inferences = self._multiStepCompute(rawInput=inputRecord)
# For temporal classification. Not used, and might not work anymore
elif self._isClassificationModel():
inferences = self._classificationCompute()
results.inferences.update(inferences)
inferences = self._anomalyCompute()
results.inferences.update(inferences)
# -----------------------------------------------------------------------
# Store the index and name of the predictedField
results.predictedFieldIdx = self._predictedFieldIdx
results.predictedFieldName = self._predictedFieldName
# =========================================================================
# output
assert (not self.isInferenceEnabled() or results.inferences is not None), \
"unexpected inferences: %r" % results.inferences
#self.__logger.setLevel(logging.DEBUG)
if self.__logger.isEnabledFor(logging.DEBUG):
self.__logger.debug("inputRecord: %r, results: %r" % (inputRecord,
results))
return results
def _getSensorInputRecord(self, inputRecord):
"""
inputRecord - dict containing the input to the sensor
Return a 'SensorInput' object, which represents the 'parsed'
representation of the input record
"""
sensor = self._getSensorRegion()
dataRow = copy.deepcopy(sensor.getSelf().getOutputValues('sourceOut'))
dataDict = copy.deepcopy(inputRecord)
inputRecordEncodings = sensor.getSelf().getOutputValues('sourceEncodings')
inputRecordCategory = int(sensor.getOutputData('categoryOut')[0])
resetOut = sensor.getOutputData('resetOut')[0]
return SensorInput(dataRow=dataRow,
dataDict=dataDict,
dataEncodings=inputRecordEncodings,
sequenceReset=resetOut,
category=inputRecordCategory)
def _sensorCompute(self, inputRecord):
sensor = self._getSensorRegion()
self._getDataSource().push(inputRecord)
sensor.setParameter('topDownMode', False)
sensor.prepareInputs()
try:
sensor.compute()
except StopIteration as e:
raise Exception("Unexpected StopIteration", e,
"ACTUAL TRACEBACK: %s" % traceback.format_exc())
def _spCompute(self):
sp = self._getSPRegion()
if sp is None:
return
sp.setParameter('topDownMode', False)
sp.setParameter('inferenceMode', self.isInferenceEnabled())
sp.setParameter('learningMode', self.isLearningEnabled())
sp.prepareInputs()
sp.compute()
def _tpCompute(self):
tp = self._getTPRegion()
if tp is None:
return
if (self.getInferenceType() == InferenceType.TemporalAnomaly or
self._isReconstructionModel()):
topDownCompute = True
else:
topDownCompute = False
tp = self._getTPRegion()
tp.setParameter('topDownMode', topDownCompute)
tp.setParameter('inferenceMode', self.isInferenceEnabled())
tp.setParameter('learningMode', self.isLearningEnabled())
tp.prepareInputs()
tp.compute()
def _isReconstructionModel(self):
inferenceType = self.getInferenceType()
inferenceArgs = self.getInferenceArgs()
if inferenceType == InferenceType.TemporalNextStep:
return True
if inferenceArgs:
return inferenceArgs.get('useReconstruction', False)
return False
def _isMultiStepModel(self):
return self.getInferenceType() in (InferenceType.NontemporalMultiStep,
InferenceType.NontemporalClassification,
InferenceType.TemporalMultiStep,
InferenceType.TemporalAnomaly)
def _isClassificationModel(self):
return self.getInferenceType() in InferenceType.TemporalClassification
def _multiStepCompute(self, rawInput):
patternNZ = None
if self._getTPRegion() is not None:
tp = self._getTPRegion()
tpOutput = tp.getSelf()._tfdr.infActiveState['t']
patternNZ = tpOutput.reshape(-1).nonzero()[0]
elif self._getSPRegion() is not None:
sp = self._getSPRegion()
spOutput = sp.getOutputData('bottomUpOut')
patternNZ = spOutput.nonzero()[0]
elif self._getSensorRegion() is not None:
sensor = self._getSensorRegion()
sensorOutput = sensor.getOutputData('dataOut')
patternNZ = sensorOutput.nonzero()[0]
else:
raise RuntimeError("Attempted to make multistep prediction without"
"TP, SP, or Sensor regions")
inputTSRecordIdx = rawInput.get('_timestampRecordIdx')
return self._handleCLAClassifierMultiStep(
patternNZ=patternNZ,
inputTSRecordIdx=inputTSRecordIdx,
rawInput=rawInput)
def _classificationCompute(self):
inference = {}
classifier = self._getClassifierRegion()
classifier.setParameter('inferenceMode', True)
classifier.setParameter('learningMode', self.isLearningEnabled())
classifier.prepareInputs()
classifier.compute()
# What we get out is the score for each category. The argmax is
# then the index of the winning category
classificationDist = classifier.getOutputData('categoriesOut')
classification = classificationDist.argmax()
probabilities = classifier.getOutputData('categoryProbabilitiesOut')
numCategories = classifier.getParameter('activeOutputCount')
classConfidences = dict(zip(xrange(numCategories), probabilities))
inference[InferenceElement.classification] = classification
inference[InferenceElement.classConfidences] = {0: classConfidences}
return inference
def _reconstructionCompute(self):
if not self.isInferenceEnabled():
return {}
sp = self._getSPRegion()
sensor = self._getSensorRegion()
#--------------------------------------------------
# SP Top-down flow
sp.setParameter('topDownMode', True)
sp.prepareInputs()
sp.compute()
#--------------------------------------------------
# Sensor Top-down flow
sensor.setParameter('topDownMode', True)
sensor.prepareInputs()
sensor.compute()
# Need to call getOutputValues() instead of going through getOutputData()
# because the return values may contain strings, which cannot be passed
# through the Region.cpp code.
# predictionRow is a list of values, one for each field. The value is
# in the same type as the original input to the encoder and may be a
# string for category fields for example.
predictionRow = copy.copy(sensor.getSelf().getOutputValues('temporalTopDownOut'))
predictionFieldEncodings = sensor.getSelf().getOutputValues('temporalTopDownEncodings')
inferences = {}
inferences[InferenceElement.prediction] = tuple(predictionRow)
inferences[InferenceElement.encodings] = tuple(predictionFieldEncodings)
return inferences
def _anomalyCompute(self):
"""
Compute Anomaly score, if required
"""
inferenceType = self.getInferenceType()
inferences = {}
sp = self._getSPRegion()
score = None
if inferenceType == InferenceType.NontemporalAnomaly:
score = sp.getOutputData("anomalyScore")[0] #TODO move from SP to Anomaly ?
elif inferenceType == InferenceType.TemporalAnomaly:
tp = self._getTPRegion()
if sp is not None:
activeColumns = sp.getOutputData("bottomUpOut").nonzero()[0]
else:
sensor = self._getSensorRegion()
activeColumns = sensor.getOutputData('dataOut').nonzero()[0]
if not self._predictedFieldName in self._input:
raise ValueError(
"Expected predicted field '%s' in input row, but was not found!"
% self._predictedFieldName
)
# Calculate the anomaly score using the active columns
# and previous predicted columns.
score = self._anomalyInst.compute(
activeColumns,
self._prevPredictedColumns,
inputValue=self._input[self._predictedFieldName])
# Store the predicted columns for the next timestep.
predictedColumns = tp.getOutputData("topDownOut").nonzero()[0]
self._prevPredictedColumns = copy.deepcopy(predictedColumns)
# Calculate the classifier's output and use the result as the anomaly
# label. Stores as string of results.
# TODO: make labels work with non-SP models
if sp is not None:
self._getAnomalyClassifier().setParameter(
"activeColumnCount", len(activeColumns))
self._getAnomalyClassifier().prepareInputs()
self._getAnomalyClassifier().compute()
labels = self._getAnomalyClassifier().getSelf().getLabelResults()
inferences[InferenceElement.anomalyLabel] = "%s" % labels
inferences[InferenceElement.anomalyScore] = score
return inferences
def _handleCLAClassifierMultiStep(self, patternNZ,
inputTSRecordIdx,
rawInput):
""" Handle the CLA Classifier compute logic when implementing multi-step
prediction. This is where the patternNZ is associated with one of the
other fields from the dataset 0 to N steps in the future. This method is
used by each type of network (encoder only, SP only, SP +TP) to handle the
compute logic through the CLA Classifier. It fills in the inference dict with
the results of the compute.
Parameters:
-------------------------------------------------------------------
patternNZ: The input the CLA Classifier as a list of active input indices
inputTSRecordIdx: The index of the record as computed from the timestamp
and aggregation interval. This normally increments by 1
each time unless there are missing records. If there is no
aggregation interval or timestamp in the data, this will be
None.
rawInput: The raw input to the sensor, as a dict.
"""
inferenceArgs = self.getInferenceArgs()
predictedFieldName = inferenceArgs.get('predictedField', None)
if predictedFieldName is None:
raise ValueError(
"No predicted field was enabled! Did you call enableInference()?"
)
self._predictedFieldName = predictedFieldName
classifier = self._getClassifierRegion()
if not self._hasCL or classifier is None:
# No classifier so return an empty dict for inferences.
return {}
sensor = self._getSensorRegion()
minLikelihoodThreshold = self._minLikelihoodThreshold
maxPredictionsPerStep = self._maxPredictionsPerStep
needLearning = self.isLearningEnabled()
inferences = {}
# Get the classifier input encoder, if we don't have it already
if self._classifierInputEncoder is None:
if predictedFieldName is None:
raise RuntimeError("This experiment description is missing "
"the 'predictedField' in its config, which is required "
"for multi-step prediction inference.")
encoderList = sensor.getSelf().encoder.getEncoderList()
self._numFields = len(encoderList)
# This is getting index of predicted field if being fed to CLA.
fieldNames = sensor.getSelf().encoder.getScalarNames()
if predictedFieldName in fieldNames:
self._predictedFieldIdx = fieldNames.index(predictedFieldName)
else:
# Predicted field was not fed into the network, only to the classifier
self._predictedFieldIdx = None
# In a multi-step model, the classifier input encoder is separate from
# the other encoders and always disabled from going into the bottom of
# the network.
if sensor.getSelf().disabledEncoder is not None:
encoderList = sensor.getSelf().disabledEncoder.getEncoderList()
else:
encoderList = []
if len(encoderList) >= 1:
fieldNames = sensor.getSelf().disabledEncoder.getScalarNames()
self._classifierInputEncoder = encoderList[fieldNames.index(
predictedFieldName)]
else:
# Legacy multi-step networks don't have a separate encoder for the
# classifier, so use the one that goes into the bottom of the network
encoderList = sensor.getSelf().encoder.getEncoderList()
self._classifierInputEncoder = encoderList[self._predictedFieldIdx]
# Get the actual value and the bucket index for this sample. The
# predicted field may not be enabled for input to the network, so we
# explicitly encode it outside of the sensor
# TODO: All this logic could be simpler if in the encoder itself
if not predictedFieldName in rawInput:
raise ValueError("Input row does not contain a value for the predicted "
"field configured for this model. Missing value for '%s'"
% predictedFieldName)
absoluteValue = rawInput[predictedFieldName]
bucketIdx = self._classifierInputEncoder.getBucketIndices(absoluteValue)[0]
# Convert the absolute values to deltas if necessary
# The bucket index should be handled correctly by the underlying delta encoder
if isinstance(self._classifierInputEncoder, DeltaEncoder):
# Make the delta before any values have been seen 0 so that we do not mess up the
# range for the adaptive scalar encoder.
if not hasattr(self,"_ms_prevVal"):
self._ms_prevVal = absoluteValue
prevValue = self._ms_prevVal
self._ms_prevVal = absoluteValue
actualValue = absoluteValue - prevValue
else:
actualValue = absoluteValue
if isinstance(actualValue, float) and math.isnan(actualValue):
actualValue = SENTINEL_VALUE_FOR_MISSING_DATA
# Pass this information to the classifier's custom compute method
# so that it can assign the current classification to possibly
# multiple patterns from the past and current, and also provide
# the expected classification for some time step(s) in the future.
classifier.setParameter('inferenceMode', True)
classifier.setParameter('learningMode', needLearning)
classificationIn = {'bucketIdx': bucketIdx,
'actValue': actualValue}
# Handle missing records
if inputTSRecordIdx is not None:
recordNum = inputTSRecordIdx
else:
recordNum = self.__numRunCalls
clResults = classifier.getSelf().customCompute(recordNum=recordNum,
patternNZ=patternNZ,
classification=classificationIn)
# ---------------------------------------------------------------
# Get the prediction for every step ahead learned by the classifier
predictionSteps = classifier.getParameter('steps')
predictionSteps = [int(x) for x in predictionSteps.split(',')]
# We will return the results in this dict. The top level keys
# are the step number, the values are the relative likelihoods for
# each classification value in that time step, represented as
# another dict where the keys are the classification values and
# the values are the relative likelihoods.
inferences[InferenceElement.multiStepPredictions] = dict()
inferences[InferenceElement.multiStepBestPredictions] = dict()
# ======================================================================
# Plug in the predictions for each requested time step.
for steps in predictionSteps:
# From the clResults, compute the predicted actual value. The
# CLAClassifier classifies the bucket index and returns a list of
# relative likelihoods for each bucket. Let's find the max one
# and then look up the actual value from that bucket index
likelihoodsVec = clResults[steps]
bucketValues = clResults['actualValues']
# Create a dict of value:likelihood pairs. We can't simply use
# dict(zip(bucketValues, likelihoodsVec)) because there might be
# duplicate bucketValues (this happens early on in the model when
# it doesn't have actual values for each bucket so it returns
# multiple buckets with the same default actual value).
likelihoodsDict = dict()
bestActValue = None
bestProb = None
for (actValue, prob) in zip(bucketValues, likelihoodsVec):
if actValue in likelihoodsDict:
likelihoodsDict[actValue] += prob
else:
likelihoodsDict[actValue] = prob
# Keep track of best
if bestProb is None or likelihoodsDict[actValue] > bestProb:
bestProb = likelihoodsDict[actValue]
bestActValue = actValue
# Remove entries with 0 likelihood or likelihood less than
# minLikelihoodThreshold, but don't leave an empty dict.
likelihoodsDict = CLAModel._removeUnlikelyPredictions(
likelihoodsDict, minLikelihoodThreshold, maxPredictionsPerStep)
# ---------------------------------------------------------------------
# If we have a delta encoder, we have to shift our predicted output value
# by the sum of the deltas
if isinstance(self._classifierInputEncoder, DeltaEncoder):
# Get the prediction history for this number of timesteps.
# The prediction history is a store of the previous best predicted values.
# This is used to get the final shift from the current absolute value.
if not hasattr(self, '_ms_predHistories'):
self._ms_predHistories = dict()
predHistories = self._ms_predHistories
if not steps in predHistories:
predHistories[steps] = deque()
predHistory = predHistories[steps]
# Find the sum of the deltas for the steps and use this to generate
# an offset from the current absolute value
sumDelta = sum(predHistory)
offsetDict = dict()
for (k, v) in likelihoodsDict.iteritems():
if k is not None:
# Reconstruct the absolute value based on the current actual value,
# the best predicted values from the previous iterations,
# and the current predicted delta
offsetDict[absoluteValue+float(k)+sumDelta] = v
# Push the current best delta to the history buffer for reconstructing the final delta
if bestActValue is not None:
predHistory.append(bestActValue)
# If we don't need any more values in the predictionHistory, pop off
# the earliest one.
if len(predHistory) >= steps:
predHistory.popleft()
# Provide the offsetDict as the return value
if len(offsetDict)>0:
inferences[InferenceElement.multiStepPredictions][steps] = \
offsetDict
else:
inferences[InferenceElement.multiStepPredictions][steps] = \
likelihoodsDict
if bestActValue is None:
inferences[InferenceElement.multiStepBestPredictions][steps] = \
None
else:
inferences[InferenceElement.multiStepBestPredictions][steps] = \
absoluteValue + sumDelta + bestActValue
# ---------------------------------------------------------------------
# Normal case, no delta encoder. Just plug in all our multi-step predictions
# with likelihoods as well as our best prediction
else:
# The multiStepPredictions element holds the probabilities for each
# bucket
inferences[InferenceElement.multiStepPredictions][steps] = \
likelihoodsDict
inferences[InferenceElement.multiStepBestPredictions][steps] = \
bestActValue
return inferences
#############################################################################
@classmethod
def _removeUnlikelyPredictions(cls, likelihoodsDict, minLikelihoodThreshold,
maxPredictionsPerStep):
"""Remove entries with 0 likelihood or likelihood less than
minLikelihoodThreshold, but don't leave an empty dict.
"""
maxVal = (None, None)
for (k, v) in likelihoodsDict.items():
if len(likelihoodsDict) <= 1:
break
if maxVal[0] is None or v >= maxVal[1]:
if maxVal[0] is not None and maxVal[1] < minLikelihoodThreshold:
del likelihoodsDict[maxVal[0]]
maxVal = (k, v)
elif v < minLikelihoodThreshold:
del likelihoodsDict[k]
# Limit the number of predictions to include.
likelihoodsDict = dict(sorted(likelihoodsDict.iteritems(),
key=itemgetter(1),
reverse=True)[:maxPredictionsPerStep])
return likelihoodsDict
def getRuntimeStats(self):
""" [virtual method override] get runtime statistics specific to this
model, i.e. activeCellOverlapAvg
return:
a dict where keys are statistic names and values are the stats
"""
ret = {"numRunCalls" : self.__numRunCalls}
#--------------------------------------------------
# Query temporal network stats
temporalStats = dict()
if self._hasTP:
for stat in self._netInfo.statsCollectors:
sdict = stat.getStats()
temporalStats.update(sdict)
ret[InferenceType.getLabel(InferenceType.TemporalNextStep)] = temporalStats
return ret
def getFieldInfo(self, includeClassifierOnlyField=False):
""" [virtual method override]
Returns the sequence of FieldMetaInfo objects specifying this
Model's output; note that this may be different than the list of
FieldMetaInfo objects supplied at initialization (e.g., due to the
transcoding of some input fields into meta-fields, such as datetime
-> dayOfWeek, timeOfDay, etc.)
Returns: List of FieldMetaInfo objects (see description above)
"""
encoder = self._getEncoder()
fieldNames = encoder.getScalarNames()
fieldTypes = encoder.getDecoderOutputFieldTypes()
assert len(fieldNames) == len(fieldTypes)
# Also include the classifierOnly field?
encoder = self._getClassifierOnlyEncoder()
if includeClassifierOnlyField and encoder is not None:
addFieldNames = encoder.getScalarNames()
addFieldTypes = encoder.getDecoderOutputFieldTypes()
assert len(addFieldNames) == len(addFieldTypes)
fieldNames = list(fieldNames) + addFieldNames
fieldTypes = list(fieldTypes) + addFieldTypes
fieldMetaList = map(FieldMetaInfo._make,
zip(fieldNames,
fieldTypes,
itertools.repeat(FieldMetaSpecial.none)))
return tuple(fieldMetaList)
def _getLogger(self):
""" Get the logger for this object. This is a protected method that is used
by the Model to access the logger created by the subclass
return:
A logging.Logger object. Should not be None
"""
return self.__logger
def _getSPRegion(self):
"""
Returns reference to the network's SP region
"""
return self._netInfo.net.regions.get('SP', None)
def _getTPRegion(self):
"""
Returns reference to the network's TP region
"""
return self._netInfo.net.regions.get('TP', None)
def _getSensorRegion(self):
"""
Returns reference to the network's Sensor region
"""
return self._netInfo.net.regions['sensor']
def _getClassifierRegion(self):
"""
Returns reference to the network's Classifier region
"""
if (self._netInfo.net is not None and
"Classifier" in self._netInfo.net.regions):
return self._netInfo.net.regions["Classifier"]
else:
return None
def _getAnomalyClassifier(self):
return self._netInfo.net.regions.get("AnomalyClassifier", None)
def _getEncoder(self):
"""
Returns: sensor region's encoder for the given network
"""
return self._getSensorRegion().getSelf().encoder
def _getClassifierOnlyEncoder(self):
"""
Returns: sensor region's encoder that is sent only to the classifier,
not to the bottom of the network
"""
return self._getSensorRegion().getSelf().disabledEncoder
def _getDataSource(self):
"""
Returns: data source that we installed in sensor region
"""
return self._getSensorRegion().getSelf().dataSource
#############################################################################
def __createCLANetwork(self, sensorParams, spEnable, spParams, tpEnable,
tpParams, clEnable, clParams, anomalyParams):
""" Create a CLA network and return it.
description: CLA Model description dictionary (TODO: define schema)
Returns: NetworkInfo instance;
"""
#--------------------------------------------------
# Create the network
n = Network()
#--------------------------------------------------
# Add the Sensor
n.addRegion("sensor", "py.RecordSensor", json.dumps(dict(verbosity=sensorParams['verbosity'])))
sensor = n.regions['sensor'].getSelf()
enabledEncoders = copy.deepcopy(sensorParams['encoders'])
for name, params in enabledEncoders.items():
if params is not None:
classifierOnly = params.pop('classifierOnly', False)
if classifierOnly:
enabledEncoders.pop(name)
# Disabled encoders are encoders that are fed to CLAClassifierRegion but not
# SP or TP Regions. This is to handle the case where the predicted field
# is not fed through the SP/TP. We typically just have one of these now.
disabledEncoders = copy.deepcopy(sensorParams['encoders'])
for name, params in disabledEncoders.items():
if params is None:
disabledEncoders.pop(name)
else:
classifierOnly = params.pop('classifierOnly', False)
if not classifierOnly:
disabledEncoders.pop(name)
encoder = MultiEncoder(enabledEncoders)
sensor.encoder = encoder
sensor.disabledEncoder = MultiEncoder(disabledEncoders)
sensor.dataSource = DataBuffer()
prevRegion = "sensor"
prevRegionWidth = encoder.getWidth()
# SP is not enabled for spatial classification network
if spEnable:
spParams = spParams.copy()
spParams['inputWidth'] = prevRegionWidth
self.__logger.debug("Adding SPRegion; spParams: %r" % spParams)
n.addRegion("SP", "py.SPRegion", json.dumps(spParams))
# Link SP region
n.link("sensor", "SP", "UniformLink", "")
n.link("sensor", "SP", "UniformLink", "", srcOutput="resetOut",
destInput="resetIn")
n.link("SP", "sensor", "UniformLink", "", srcOutput="spatialTopDownOut",
destInput="spatialTopDownIn")
n.link("SP", "sensor", "UniformLink", "", srcOutput="temporalTopDownOut",
destInput="temporalTopDownIn")
prevRegion = "SP"
prevRegionWidth = spParams['columnCount']
if tpEnable:
tpParams = tpParams.copy()
if prevRegion == 'sensor':
tpParams['inputWidth'] = tpParams['columnCount'] = prevRegionWidth
else:
assert tpParams['columnCount'] == prevRegionWidth
tpParams['inputWidth'] = tpParams['columnCount']
self.__logger.debug("Adding TPRegion; tpParams: %r" % tpParams)
n.addRegion("TP", "py.TPRegion", json.dumps(tpParams))
# Link TP region
n.link(prevRegion, "TP", "UniformLink", "")
if prevRegion != "sensor":
n.link("TP", prevRegion, "UniformLink", "", srcOutput="topDownOut",
destInput="topDownIn")
else:
n.link("TP", prevRegion, "UniformLink", "", srcOutput="topDownOut",
destInput="temporalTopDownIn")
n.link("sensor", "TP", "UniformLink", "", srcOutput="resetOut",
destInput="resetIn")
prevRegion = "TP"
prevRegionWidth = tpParams['inputWidth']
if clEnable and clParams is not None:
clParams = clParams.copy()
clRegionName = clParams.pop('regionName')
self.__logger.debug("Adding %s; clParams: %r" % (clRegionName,
clParams))
n.addRegion("Classifier", "py.%s" % str(clRegionName), json.dumps(clParams))
n.link("sensor", "Classifier", "UniformLink", "", srcOutput="categoryOut",
destInput="categoryIn")
n.link(prevRegion, "Classifier", "UniformLink", "")
if self.getInferenceType() == InferenceType.TemporalAnomaly:
anomalyClParams = dict(
trainRecords=anomalyParams.get('autoDetectWaitRecords', None),
cacheSize=anomalyParams.get('anomalyCacheRecords', None)
)
self._addAnomalyClassifierRegion(n, anomalyClParams, spEnable, tpEnable)
#--------------------------------------------------
# NuPIC doesn't initialize the network until you try to run it
# but users may want to access components in a setup callback
n.initialize()
return NetworkInfo(net=n, statsCollectors=[])
#############################################################################
#
# CLAModel Methods to support serialization
#
#############################################################################
def __getstate__(self):
"""
Return serializable state. This function will return a version of the
__dict__ with data that shouldn't be pickled stripped out. In particular,
the CLA Network is stripped out because it has it's own serialization
mechanism)
See also: _serializeExtraData()
"""
# Remove ephemeral member variables from state
state = self.__dict__.copy()
state["_netInfo"] = NetworkInfo(net=None,
statsCollectors=self._netInfo.statsCollectors)
for ephemeral in [self.__manglePrivateMemberName("__restoringFromState"),
self.__manglePrivateMemberName("__logger")]:
state.pop(ephemeral)
return state
def __setstate__(self, state):
"""
Set the state of ourself from a serialized state.
See also: _deSerializeExtraData
"""
self.__dict__.update(state)
# Mark beginning of restoration.
#
# self.__restoringFromState will be reset to False upon completion of
# object restoration in _deSerializeExtraData()
self.__restoringFromState = True
# set up logging
self.__logger = initLogger(self)
# =========================================================================
# TODO: Temporary migration solution
if not hasattr(self, "_Model__inferenceType"):
self.__restoringFromV1 = True
self._hasSP = True
if self.__temporalNetInfo is not None:
self._Model__inferenceType = InferenceType.TemporalNextStep
self._netInfo = self.__temporalNetInfo
self._hasTP = True
else:
raise RuntimeError("The Nontemporal inference type is not supported")
self._Model__inferenceArgs = {}
self._Model__learningEnabled = True
self._Model__inferenceEnabled = True
# Remove obsolete members
self.__dict__.pop("_CLAModel__encoderNetInfo", None)
self.__dict__.pop("_CLAModel__nonTemporalNetInfo", None)
self.__dict__.pop("_CLAModel__temporalNetInfo", None)
# -----------------------------------------------------------------------
# Migrate from v2
if not hasattr(self, "_netInfo"):
self._hasSP = False
self._hasTP = False
if self.__encoderNetInfo is not None:
self._netInfo = self.__encoderNetInfo
elif self.__nonTemporalNetInfo is not None:
self._netInfo = self.__nonTemporalNetInfo
self._hasSP = True
else:
self._netInfo = self.__temporalNetInfo
self._hasSP = True
self._hasTP = True
# Remove obsolete members
self.__dict__.pop("_CLAModel__encoderNetInfo", None)
self.__dict__.pop("_CLAModel__nonTemporalNetInfo", None)
self.__dict__.pop("_CLAModel__temporalNetInfo", None)
# -----------------------------------------------------------------------
# Migrate from when Anomaly was not separate class
if not hasattr(self, "_anomalyInst"):
self._anomalyInst = Anomaly()
# This gets filled in during the first infer because it can only be
# determined at run-time
self._classifierInputEncoder = None
if not hasattr(self, '_minLikelihoodThreshold'):
self._minLikelihoodThreshold = DEFAULT_LIKELIHOOD_THRESHOLD
if not hasattr(self, '_maxPredictionsPerStep'):
self._maxPredictionsPerStep = DEFAULT_MAX_PREDICTIONS_PER_STEP
if not hasattr(self, '_hasCL'):
self._hasCL = (self._getClassifierRegion() is not None)
self.__logger.debug("Restoring %s from state..." % self.__class__.__name__)
def _serializeExtraData(self, extraDataDir):
""" [virtual method override] This method is called during serialization
with an external directory path that can be used to bypass pickle for saving
large binary states.
extraDataDir:
Model's extra data directory path
"""
makeDirectoryFromAbsolutePath(extraDataDir)
#--------------------------------------------------
# Save the network
outputDir = self.__getNetworkStateDirectory(extraDataDir=extraDataDir)
self.__logger.debug("Serializing network...")
self._netInfo.net.save(outputDir)
self.__logger.debug("Finished serializing network")
return
def _deSerializeExtraData(self, extraDataDir):
""" [virtual method override] This method is called during deserialization
(after __setstate__) with an external directory path that can be used to
bypass pickle for loading large binary states.
extraDataDir:
Model's extra data directory path
"""
assert self.__restoringFromState
#--------------------------------------------------
# Check to make sure that our Network member wasn't restored from
# serialized data
assert (self._netInfo.net is None), "Network was already unpickled"
#--------------------------------------------------
# Restore the network
stateDir = self.__getNetworkStateDirectory(extraDataDir=extraDataDir)
self.__logger.debug(
"(%s) De-serializing network...", self)
self._netInfo.net = Network(stateDir)
self.__logger.debug(
"(%s) Finished de-serializing network", self)
# NuPIC doesn't initialize the network until you try to run it
# but users may want to access components in a setup callback
self._netInfo.net.initialize()
# Used for backwards compatibility for anomaly classification models.
# Previous versions used the CLAModelClassifierHelper class for utilizing
# the KNN classifier. Current version uses KNNAnomalyClassifierRegion to
# encapsulate all the classifier functionality.
if self.getInferenceType() == InferenceType.TemporalAnomaly:
classifierType = self._getAnomalyClassifier().getSelf().__class__.__name__
if classifierType is 'KNNClassifierRegion':
anomalyClParams = dict(
trainRecords=self._classifier_helper._autoDetectWaitRecords,
cacheSize=self._classifier_helper._history_length,
)
spEnable = (self._getSPRegion() is not None)
tpEnable = True
# Store original KNN region
knnRegion = self._getAnomalyClassifier().getSelf()
# Add new KNNAnomalyClassifierRegion
self._addAnomalyClassifierRegion(self._netInfo.net, anomalyClParams,
spEnable, tpEnable)
# Restore state
self._getAnomalyClassifier().getSelf()._iteration = self.__numRunCalls
self._getAnomalyClassifier().getSelf()._recordsCache = (
self._classifier_helper.saved_states)
self._getAnomalyClassifier().getSelf().saved_categories = (
self._classifier_helper.saved_categories)
self._getAnomalyClassifier().getSelf()._knnclassifier = knnRegion
# Set TP to output neccessary information
self._getTPRegion().setParameter('anomalyMode', True)
# Remove old classifier_helper
del self._classifier_helper
self._netInfo.net.initialize()
#--------------------------------------------------
# Mark end of restoration from state
self.__restoringFromState = False
self.__logger.debug("(%s) Finished restoring from state", self)
return
def _addAnomalyClassifierRegion(self, network, params, spEnable, tpEnable):
"""
Attaches an 'AnomalyClassifier' region to the network. Will remove current
'AnomalyClassifier' region if it exists.
Parameters
-----------
network - network to add the AnomalyClassifier region
params - parameters to pass to the region
spEnable - True if network has an SP region
tpEnable - True if network has a TP region; Currently requires True
"""
allParams = copy.deepcopy(params)
knnParams = dict(k=1,
distanceMethod='rawOverlap',
distanceNorm=1,
doBinarization=1,
replaceDuplicates=0,
maxStoredPatterns=1000)
allParams.update(knnParams)
# Set defaults if not set
if allParams['trainRecords'] is None:
allParams['trainRecords'] = DEFAULT_ANOMALY_TRAINRECORDS
if allParams['cacheSize'] is None:
allParams['cacheSize'] = DEFAULT_ANOMALY_CACHESIZE
# Remove current instance if already created (used for deserializing)
if self._netInfo is not None and self._netInfo.net is not None \
and self._getAnomalyClassifier() is not None:
self._netInfo.net.removeRegion('AnomalyClassifier')
network.addRegion("AnomalyClassifier",
"py.KNNAnomalyClassifierRegion",
json.dumps(allParams))
# Attach link to SP
if spEnable:
network.link("SP", "AnomalyClassifier", "UniformLink", "",
srcOutput="bottomUpOut", destInput="spBottomUpOut")
else:
network.link("sensor", "AnomalyClassifier", "UniformLink", "",
srcOutput="dataOut", destInput="spBottomUpOut")
# Attach link to TP
if tpEnable:
network.link("TP", "AnomalyClassifier", "UniformLink", "",
srcOutput="topDownOut", destInput="tpTopDownOut")
network.link("TP", "AnomalyClassifier", "UniformLink", "",
srcOutput="lrnActiveStateT", destInput="tpLrnActiveStateT")
else:
raise RuntimeError("TemporalAnomaly models require a TP region.")
def __getNetworkStateDirectory(self, extraDataDir):
"""
extraDataDir:
Model's extra data directory path
Returns: Absolute directory path for saving CLA Network
"""
if self.__restoringFromV1:
if self.getInferenceType() == InferenceType.TemporalNextStep:
leafName = 'temporal'+ "-network.nta"
else:
leafName = 'nonTemporal'+ "-network.nta"
else:
leafName = InferenceType.getLabel(self.getInferenceType()) + "-network.nta"
path = os.path.join(extraDataDir, leafName)
path = os.path.abspath(path)
return path
def __manglePrivateMemberName(self, privateMemberName, skipCheck=False):
""" Mangles the given mangled (private) member name; a mangled member name
is one whose name begins with two or more underscores and ends with one
or zero underscores.
privateMemberName:
The private member name (e.g., "__logger")
skipCheck: Pass True to skip test for presence of the demangled member
in our instance.
Returns: The demangled member name (e.g., "_CLAModel__logger")
"""
assert privateMemberName.startswith("__"), \
"%r doesn't start with __" % privateMemberName
assert not privateMemberName.startswith("___"), \
"%r starts with ___" % privateMemberName
assert not privateMemberName.endswith("__"), \
"%r ends with more than one underscore" % privateMemberName
realName = "_" + (self.__myClassName).lstrip("_") + privateMemberName
if not skipCheck:
# This will throw an exception if the member is missing
getattr(self, realName)
return realName
###############################################################################
class DataBuffer(object):
"""
A simple FIFO stack. Add data when it's available, and
implement getNextRecordDict() so DataBuffer can be used as a DataSource
in a CLA Network.
Currently, DataBuffer requires the stack to contain 0 or 1 records.
This requirement may change in the future, and is trivially supported
by removing the assertions.
"""
def __init__(self):
self.stack = []
def push(self, data):
assert len(self.stack) == 0
# Copy the data, because sensor's pre-encoding filters (e.g.,
# AutoResetFilter) may modify it. Our caller relies on the input record
# remaining unmodified.
data = data.__class__(data)
self.stack.append(data)
def getNextRecordDict(self):
assert len(self.stack) > 0
return self.stack.pop()
| 1 | 17,112 | `classification` is misspelled | numenta-nupic | py |
@@ -152,7 +152,7 @@ public class SampleTransformer {
// to
// use is the first one on the list, since we set it in the overload of generateSamples.
if (methodSampleViews.size() > 0) {
- methodViewBuilder.initCode(methodSampleViews.get(0).initCode());
+ methodViewBuilder.initCode(methodSampleViews.get(0).methodInitCode());
// Don't emit samples that were specifically generated for backward-compatibility. We only
// wanted them for initCode above. | 1 | /* Copyright 2018 Google LLC
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* https://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.google.api.codegen.transformer;
import com.google.api.codegen.OutputSpec;
import com.google.api.codegen.SampleParameters;
import com.google.api.codegen.SampleValueSet;
import com.google.api.codegen.config.FieldConfig;
import com.google.api.codegen.config.MethodConfig;
import com.google.api.codegen.config.SampleParameterConfig;
import com.google.api.codegen.config.SampleSpec.SampleType;
import com.google.api.codegen.config.SampleSpec.ValueSetAndTags;
import com.google.api.codegen.metacode.InitCodeContext;
import com.google.api.codegen.metacode.InitCodeContext.InitCodeOutputType;
import com.google.api.codegen.util.Name;
import com.google.api.codegen.viewmodel.ApiMethodView;
import com.google.api.codegen.viewmodel.CallingForm;
import com.google.api.codegen.viewmodel.InitCodeView;
import com.google.api.codegen.viewmodel.MethodSampleView;
import com.google.api.codegen.viewmodel.OutputView;
import com.google.api.codegen.viewmodel.SampleValueSetView;
import com.google.common.base.CaseFormat;
import com.google.common.base.Strings;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import java.util.ArrayList;
import java.util.Collection;
import java.util.Collections;
import java.util.List;
/**
* A class that performs the transformations needed to generate the MethodSampleView for the
* applicable sample types.
*/
public class SampleTransformer {
private final SampleType sampleType;
private final OutputTransformer outputTransformer;
/**
* A functional interface provided by clients to generate an InitCodeView given an
* InitCodeContext.
*/
@FunctionalInterface
public interface Generator {
InitCodeView generate(InitCodeContext initCodeContext);
}
/**
* Constructs the SampleTransfomer
*
* @param sampleType The SampleType this transformer should concern itself with. Other SampleTypes
* configured on the methods are ignored.
*/
public SampleTransformer(SampleType sampleType) {
this(sampleType, new OutputTransformer());
}
public SampleTransformer(SampleType sampleType, OutputTransformer outputTransformer) {
this.sampleType = sampleType;
this.outputTransformer = outputTransformer;
}
/**
* A placeholder id for the ValueSet we synthesize in generateSamples() to accommodate legacy
* configurations that still rely on sample_code_init_fields. The format of this string is
* purposefully set to something that will cause errors if it makes it to an artifact.
*/
private static final String INIT_CODE_SHIM = "[ shim for initcode ]";
/**
* Populates {@code methodViewBuilder.samples} with the appropriate {@code MethodSampleView}s.
* This method provides backward compatibility for configuring in-code samples via {@code
* MethodView.initCode}.
*
* @param methodViewBuilder
* @param context
* @param fieldConfigs
* @param initCodeOutputType
* @param generator The function (typically a lambda) to generate the InitCode for a sample given
* an InitCodeContext
* @param callingForms The list of calling forms applicable to this method, for which we will
* generate samples if so configured via {@code context.getMethodConfig()}
*/
public void generateSamples(
ApiMethodView.Builder methodViewBuilder,
MethodContext context,
Collection<FieldConfig> fieldConfigs,
InitCodeOutputType initCodeOutputType,
Generator generator,
List<CallingForm> callingForms) {
generateSamples(
methodViewBuilder,
context,
null,
fieldConfigs,
initCodeOutputType,
generator,
callingForms);
}
/**
* Populates {@code methodViewBuilder.samples} with the appropriate {@code MethodSampleView}s.
* This method provides backward compatibility for configuring in-code samples via {@code
* MethodView.initCode}.
*
* <p>TODO: Once MethodView.initCode is removed, this function becomes a one-liner (calling the
* overloaded form of generateSamples) that should then be inlined at the call sites:
* methodViewBuilder.samples( generateSamples(context, initContext, fieldConfigs,
* initCodeOutputType, generator, callingForms));
*
* @param methodViewBuilder
* @param context
* @param initContext If null, an {@code InitCodeContext} will be created for each sample based on
* the {@code fieldConfigs} and {@code initCodeOutputType}.
* @param fieldConfigs Only used if {@code initContext} is null to create an {@code
* InitCodeContext}. Can be null otherwise.
* @param initCodeOutputType Only used if {@code initContext} is null to create an {@code
* InitCodeContext}. Can be null otherwise.
* @param generator The function (typically a lambda) to generate the InitCode for a sample given
* an InitCodeContext
* @param callingForms The list of calling forms applicable to this method, for which we will
* generate samples if so configured via {@code context.getMethodConfig()}
*/
public void generateSamples(
ApiMethodView.Builder methodViewBuilder,
MethodContext context,
InitCodeContext initContext,
Collection<FieldConfig> fieldConfigs,
InitCodeOutputType initCodeOutputType,
Generator generator,
List<CallingForm> callingForms) {
List<MethodSampleView> methodSampleViews =
generateSamples(
context, initContext, fieldConfigs, initCodeOutputType, generator, callingForms);
// Until we get rid of the non-nullable StaticLangApiMethodView.initCode field, set it to a
// non-null value. For an in-code sample not specified through the SampleSpec, the correct value
// to
// use is the first one on the list, since we set it in the overload of generateSamples.
if (methodSampleViews.size() > 0) {
methodViewBuilder.initCode(methodSampleViews.get(0).initCode());
// Don't emit samples that were specifically generated for backward-compatibility. We only
// wanted them for initCode above.
if (methodSampleViews.get(0).valueSet().id().equals(INIT_CODE_SHIM)) {
methodSampleViews = Collections.emptyList();
}
}
methodViewBuilder.samples(methodSampleViews);
}
/**
* Returns a list of MethodSampleViews for the given MethodContext.
*
* @param initContext
* @param methodContext
* @param fieldConfigs
* @param initCodeOutputType
* @param sampleGenerator The function (typically a lambda) to generate the InitCode for a sample
* given an InitCodeContext
* @param callingForms The list of calling forms applicable to this method, for which we will
* generate samples if so configured via context.getMethodConfig()
* @return A list of of the MethodSampleView, each of which corresponds to a specific sample
* forthe method
*/
public List<MethodSampleView> generateSamples(
MethodContext methodContext,
InitCodeContext initContext,
Collection<FieldConfig> fieldConfigs,
InitCodeOutputType initCodeOutputType,
Generator sampleGenerator,
List<CallingForm> callingForms) {
List<MethodSampleView> methodSampleViews = new ArrayList<>();
for (CallingForm form : callingForms) {
MethodConfig methodConfig = methodContext.getMethodConfig();
List<ValueSetAndTags> matchingValueSets =
methodConfig.getSampleSpec().getMatchingValueSets(form, sampleType);
// For backwards compatibility in the configs, we need to use sample_code_init_fields instead
// to generate the samples in various scenarios. Once all the configs have been migrated to
// use the SampleSpec, we can delete the code below as well as sample_code_init_fields.
String id = null;
if (sampleType == SampleType.IN_CODE) {
// for IN_CODE, have the source of truth be sample_code_init_fields for
// now even if otherwise configured
id = "sample_code_init_field";
} else if (!methodConfig.getSampleSpec().isConfigured()
// if not configured, make the sample_code_init_fields available to
// all sample types
|| matchingValueSets.isEmpty()) { // ApiMethodView.initCode still needs to be set for now
id = INIT_CODE_SHIM;
}
if (id != null) {
SampleValueSet valueSet =
SampleValueSet.newBuilder()
.setParameters(
SampleParameters.newBuilder()
.addAllDefaults(methodConfig.getSampleCodeInitFields())
.build())
.setId(id)
.setDescription("value set imported from sample_code_init_fields")
.setTitle("Sample Values")
.build();
matchingValueSets =
ImmutableList.of(ValueSetAndTags.newBuilder().values(valueSet).regionTag("").build());
}
for (ValueSetAndTags setAndTag : matchingValueSets) {
// Don't overwrite the initContext in outer scope.
InitCodeContext thisContext = initContext;
SampleValueSet valueSet = setAndTag.values();
if (thisContext == null) {
thisContext =
createInitCodeContext(
methodContext,
fieldConfigs,
initCodeOutputType,
valueSet.getParameters().getDefaultsList(),
valueSet
.getParameters()
.getAttributesList()
.stream()
.collect(
ImmutableMap.toImmutableMap(
attr -> attr.getParameter(),
attr ->
SampleParameterConfig.newBuilder()
.identifier(attr.getParameter())
.readFromFile(attr.getReadFile())
.sampleArgumentName(attr.getSampleArgumentName())
.build())));
}
InitCodeView initCodeView = sampleGenerator.generate(thisContext);
List<OutputSpec> outputs = valueSet.getOnSuccessList();
if (outputs.isEmpty()) {
outputs = OutputTransformer.defaultOutputSpecs(methodContext.getMethodModel());
}
ImmutableList<OutputView> outputViews =
outputTransformer.toViews(outputs, methodContext, valueSet);
methodSampleViews.add(
MethodSampleView.newBuilder()
.callingForm(form)
.valueSet(SampleValueSetView.of(valueSet))
.initCode(initCodeView)
.outputs(outputViews)
.outputImports(
outputTransformer
.getOutputImportTransformer()
.generateOutputImports(methodContext, outputViews))
.regionTag(
regionTagFromSpec(
setAndTag.regionTag(),
methodContext.getMethodModel().getSimpleName(),
form,
valueSet.getId()))
.sampleFunctionName(
methodContext.getNamer().getSampleFunctionName(methodContext.getMethodModel()))
.build());
}
}
return methodSampleViews;
}
/**
* Creates a region tag from the spec, replacing any {@code %m}, {@code %c}, and {@code %v}
* placeholders with the method name, calling form name, and value set id, respectively.
*/
private static String regionTagFromSpec(
String spec, String methodName, CallingForm callingForm, String valueSetId) {
final String DEFAULT_REGION_SPEC = "sample";
if (Strings.isNullOrEmpty(spec)) {
spec = DEFAULT_REGION_SPEC;
}
return spec.replace("%m", CaseFormat.UPPER_CAMEL.to(CaseFormat.LOWER_UNDERSCORE, methodName))
.replace("%c", callingForm.toLowerCamel())
.replace("%v", valueSetId);
}
private InitCodeContext createInitCodeContext(
MethodContext context,
Collection<FieldConfig> fieldConfigs,
InitCodeOutputType initCodeOutputType,
List<String> sampleCodeDefaultValues,
ImmutableMap<String, SampleParameterConfig> sampleParamConfigMap) {
return InitCodeContext.newBuilder()
.initObjectType(context.getMethodModel().getInputType())
.suggestedName(Name.from("request"))
.initFieldConfigStrings(sampleCodeDefaultValues)
.sampleParamConfigMap(sampleParamConfigMap)
.initValueConfigMap(InitCodeTransformer.createCollectionMap(context))
.initFields(FieldConfig.toFieldTypeIterable(fieldConfigs))
.outputType(initCodeOutputType)
.fieldConfigMap(FieldConfig.toFieldConfigMap(fieldConfigs))
.build();
}
}
| 1 | 27,962 | Can you change this to `sampleInitCode`? | googleapis-gapic-generator | java |
@@ -823,6 +823,7 @@ public class TiDAGRequest implements Serializable {
sb.append(", Limit: ");
sb.append("[").append(limit).append("]");
}
+ sb.append(", startTs: ").append(startTs.getVersion());
return sb.toString();
}
| 1 | /*
* Copyright 2017 PingCAP, Inc.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* See the License for the specific language governing permissions and
* limitations under the License.
*/
package com.pingcap.tikv.meta;
import static com.google.common.base.Preconditions.checkArgument;
import static com.google.common.base.Preconditions.checkNotNull;
import static com.pingcap.tikv.predicates.PredicateUtils.mergeCNFExpressions;
import static java.util.Objects.requireNonNull;
import com.google.common.annotations.VisibleForTesting;
import com.google.common.base.Joiner;
import com.google.common.collect.ImmutableList;
import com.google.common.collect.ImmutableMap;
import com.pingcap.tidb.tipb.*;
import com.pingcap.tikv.codec.KeyUtils;
import com.pingcap.tikv.exception.DAGRequestException;
import com.pingcap.tikv.exception.TiClientInternalException;
import com.pingcap.tikv.expression.ByItem;
import com.pingcap.tikv.expression.ColumnRef;
import com.pingcap.tikv.expression.Expression;
import com.pingcap.tikv.expression.visitor.ExpressionTypeCoercer;
import com.pingcap.tikv.expression.visitor.MetaResolver;
import com.pingcap.tikv.expression.visitor.ProtoConverter;
import com.pingcap.tikv.key.RowKey;
import com.pingcap.tikv.types.DataType;
import com.pingcap.tikv.util.KeyRangeUtils;
import com.pingcap.tikv.util.Pair;
import java.io.*;
import java.util.*;
import java.util.stream.Collectors;
import javax.annotation.Nonnull;
import org.tikv.kvproto.Coprocessor;
/**
* Type TiDAGRequest.
*
* <p>Used for constructing a new DAG request to TiKV
*/
public class TiDAGRequest implements Serializable {
public List<TiPartitionDef> getPrunedParts() {
return prunedParts;
}
public void setPrunedParts(List<TiPartitionDef> prunedParts) {
this.prunedParts = prunedParts;
}
public static class Builder {
private List<String> requiredCols = new ArrayList<>();
private List<Expression> filters = new ArrayList<>();
private List<ByItem> orderBys = new ArrayList<>();
private List<Coprocessor.KeyRange> ranges = new ArrayList<>();
private TiTableInfo tableInfo;
private int limit;
private TiTimestamp startTs;
public static Builder newBuilder() {
return new Builder();
}
public Builder setFullTableScan(TiTableInfo tableInfo) {
requireNonNull(tableInfo);
setTableInfo(tableInfo);
if (!tableInfo.isPartitionEnabled()) {
RowKey start = RowKey.createMin(tableInfo.getId());
RowKey end = RowKey.createBeyondMax(tableInfo.getId());
ranges.add(KeyRangeUtils.makeCoprocRange(start.toByteString(), end.toByteString()));
} else {
for (TiPartitionDef pDef : tableInfo.getPartitionInfo().getDefs()) {
RowKey start = RowKey.createMin(pDef.getId());
RowKey end = RowKey.createBeyondMax(pDef.getId());
ranges.add(KeyRangeUtils.makeCoprocRange(start.toByteString(), end.toByteString()));
}
}
return this;
}
public Builder setLimit(int limit) {
this.limit = limit;
return this;
}
public Builder setTableInfo(TiTableInfo tableInfo) {
this.tableInfo = tableInfo;
return this;
}
public Builder addRequiredCols(String... cols) {
this.requiredCols.addAll(Arrays.asList(cols));
return this;
}
public Builder addRequiredCols(List<String> cols) {
this.requiredCols.addAll(cols);
return this;
}
public Builder addFilter(Expression filter) {
this.filters.add(filter);
return this;
}
public Builder addOrderBy(ByItem item) {
this.orderBys.add(item);
return this;
}
public Builder setStartTs(@Nonnull TiTimestamp ts) {
this.startTs = ts;
return this;
}
public TiDAGRequest build(PushDownType pushDownType) {
TiDAGRequest req = new TiDAGRequest(pushDownType);
req.setTableInfo(tableInfo);
req.addRanges(ranges);
filters.forEach(req::addFilter);
if (!orderBys.isEmpty()) {
orderBys.forEach(req::addOrderByItem);
}
if (limit != 0) {
req.setLimit(limit);
}
requiredCols.forEach(c -> req.addRequiredColumn(ColumnRef.create(c)));
req.setStartTs(startTs);
req.resolve();
return req;
}
}
public TiDAGRequest(PushDownType pushDownType) {
this.pushDownType = pushDownType;
}
public TiDAGRequest(PushDownType pushDownType, int timeZoneOffset) {
this(pushDownType);
this.timeZoneOffset = timeZoneOffset;
}
public enum TruncateMode {
IgnoreTruncation(0x1),
TruncationAsWarning(0x2);
private final long mask;
TruncateMode(long mask) {
this.mask = mask;
}
public long mask(long flags) {
return flags | mask;
}
}
/** Whether we use streaming to push down the request */
public enum PushDownType {
STREAMING,
NORMAL
}
/** Predefined executor priority map. */
private static final Map<ExecType, Integer> EXEC_TYPE_PRIORITY_MAP =
ImmutableMap.<ExecType, Integer>builder()
.put(ExecType.TypeTableScan, 0)
.put(ExecType.TypeIndexScan, 0)
.put(ExecType.TypeSelection, 1)
.put(ExecType.TypeAggregation, 2)
.put(ExecType.TypeTopN, 3)
.put(ExecType.TypeLimit, 4)
.build();
private TiTableInfo tableInfo;
private List<TiPartitionDef> prunedParts;
private TiIndexInfo indexInfo;
private final List<ColumnRef> fields = new ArrayList<>();
private final List<Expression> filters = new ArrayList<>();
private final List<ByItem> groupByItems = new ArrayList<>();
private final List<ByItem> orderByItems = new ArrayList<>();
private List<Expression> residualFilters = null;
// System like Spark has different type promotion rules
// we need a cast to target when given
private final List<Pair<Expression, DataType>> aggregates = new ArrayList<>();
private final List<Coprocessor.KeyRange> keyRanges = new ArrayList<>();
// If index scanning of this request is not possible in some scenario, we downgrade it to a table
// scan and use
// downGradeRanges instead of index scan ranges stored in keyRanges along with downgradeFilters to
// perform a
// table scan.
private List<Expression> downgradeFilters = new ArrayList<>();
private int limit;
private int timeZoneOffset;
private long flags;
private TiTimestamp startTs;
private Expression having;
private boolean distinct;
private boolean isDoubleRead;
private final PushDownType pushDownType;
private IdentityHashMap<Expression, DataType> typeMap;
private double estimatedCount = -1;
private static ColumnInfo handleColumn =
ColumnInfo.newBuilder()
.setColumnId(-1)
.setPkHandle(true)
// We haven't changed the field name in protobuf file, but
// we need to set this to true in order to retrieve the handle,
// so the name 'setPkHandle' may sounds strange.
.build();
private List<Expression> getAllExpressions() {
ImmutableList.Builder<Expression> builder = ImmutableList.builder();
builder.addAll(getFields());
builder.addAll(getFilters());
builder.addAll(getAggregates());
getGroupByItems().forEach(item -> builder.add(item.getExpr()));
getOrderByItems().forEach(item -> builder.add(item.getExpr()));
if (having != null) {
builder.add(having);
}
return builder.build();
}
public DataType getExpressionType(Expression expression) {
requireNonNull(typeMap, "request is not resolved");
return typeMap.get(expression);
}
public void resolve() {
MetaResolver resolver = new MetaResolver(tableInfo);
ExpressionTypeCoercer inferrer = new ExpressionTypeCoercer();
resolver.resolve(getAllExpressions());
inferrer.infer(getAllExpressions());
typeMap = inferrer.getTypeMap();
}
/**
* Unify indexScan and tableScan building logic since they are very much alike. DAGRequest for
* IndexScan should also contain filters and aggregation, so we can reuse this part of logic.
*
* <p>DAGRequest is made up of a chain of executors with strict orders: TableScan/IndexScan >
* Selection > Aggregation > TopN/Limit a DAGRequest must contain one and only one TableScan or
* IndexScan.
*
* @param buildIndexScan whether the dagRequest to build should be an {@link
* com.pingcap.tidb.tipb.IndexScan}
* @return final DAGRequest built
*/
public DAGRequest buildScan(boolean buildIndexScan) {
long id = tableInfo.getId();
checkNotNull(startTs, "startTs is null");
checkArgument(startTs.getVersion() != 0, "timestamp is 0");
DAGRequest.Builder dagRequestBuilder = DAGRequest.newBuilder();
Executor.Builder executorBuilder = Executor.newBuilder();
IndexScan.Builder indexScanBuilder = IndexScan.newBuilder();
TableScan.Builder tblScanBuilder = TableScan.newBuilder();
// find a column's offset in fields
Map<ColumnRef, Integer> colOffsetInFieldMap = new HashMap<>();
// find a column's position in index
Map<TiColumnInfo, Integer> colPosInIndexMap = new HashMap<>();
if (buildIndexScan) {
// IndexScan
if (indexInfo == null) {
throw new TiClientInternalException("Index is empty for index scan");
}
List<TiColumnInfo> columnInfoList = tableInfo.getColumns();
boolean hasPk = false;
// We extract index column info
List<Integer> indexColOffsets =
indexInfo
.getIndexColumns()
.stream()
.map(TiIndexColumn::getOffset)
.collect(Collectors.toList());
int idxPos = 0;
// for index scan builder, columns are added by its order in index
for (Integer idx : indexColOffsets) {
TiColumnInfo tiColumnInfo = columnInfoList.get(idx);
ColumnInfo columnInfo = tiColumnInfo.toProto(tableInfo);
colPosInIndexMap.put(tiColumnInfo, idxPos++);
ColumnInfo.Builder colBuilder = ColumnInfo.newBuilder(columnInfo);
if (columnInfo.getColumnId() == -1) {
hasPk = true;
colBuilder.setPkHandle(true);
}
indexScanBuilder.addColumns(colBuilder);
}
if (isDoubleRead()) {
// double read case
if (!hasPk) {
indexScanBuilder.addColumns(handleColumn);
}
int colCount = indexScanBuilder.getColumnsCount();
// double read case: need to retrieve handle
dagRequestBuilder.addOutputOffsets(colCount != 0 ? colCount - 1 : 0);
} else {
int colCount = indexScanBuilder.getColumnsCount();
boolean pkIsNeeded = false;
// =================== IMPORTANT ======================
// offset for dagRequest should be in accordance with fields
for (ColumnRef col : getFields()) {
Integer pos = colPosInIndexMap.get(col.getColumnInfo());
if (pos != null) {
TiColumnInfo columnInfo = columnInfoList.get(indexColOffsets.get(pos));
if (col.getColumnInfo().equals(columnInfo)) {
dagRequestBuilder.addOutputOffsets(pos);
colOffsetInFieldMap.put(col, pos);
}
}
// if a column of field is not contained in index selected,
// logically it must be the pk column and
// the pkIsHandle must be true. Extra check here.
else if (col.getColumnInfo().isPrimaryKey() && tableInfo.isPkHandle()) {
pkIsNeeded = true;
// offset should be processed for each primary key encountered
dagRequestBuilder.addOutputOffsets(colCount);
// for index scan, column offset must be in the order of index->handle
colOffsetInFieldMap.put(col, indexColOffsets.size());
} else {
throw new DAGRequestException(
"columns other than primary key and index key exist in fields while index single read: "
+ col.getName());
}
}
// pk is not included in index but still needed
if (pkIsNeeded) {
indexScanBuilder.addColumns(handleColumn);
}
}
executorBuilder.setTp(ExecType.TypeIndexScan);
indexScanBuilder.setTableId(id).setIndexId(indexInfo.getId());
dagRequestBuilder.addExecutors(executorBuilder.setIdxScan(indexScanBuilder).build());
} else {
// TableScan
executorBuilder.setTp(ExecType.TypeTableScan);
tblScanBuilder.setTableId(id);
// Step1. Add columns to first executor
int lastOffset = 0;
for (ColumnRef col : getFields()) {
// can't allow duplicated col added into executor.
if (!colOffsetInFieldMap.containsKey(col)) {
tblScanBuilder.addColumns(col.getColumnInfo().toProto(tableInfo));
colOffsetInFieldMap.put(col, lastOffset);
lastOffset++;
}
// column offset should be in accordance with fields
dagRequestBuilder.addOutputOffsets(colOffsetInFieldMap.get(col));
}
dagRequestBuilder.addExecutors(executorBuilder.setTblScan(tblScanBuilder));
}
// Should build these executors when performing CoveringIndexScan/TableScan
if (!buildIndexScan || !isDoubleRead()) {
// clear executorBuilder
executorBuilder.clear();
// Step2. Add others
// DO NOT EDIT EXPRESSION CONSTRUCTION ORDER
// Or make sure the construction order is below:
// TableScan/IndexScan > Selection > Aggregation > TopN/Limit
Expression whereExpr = mergeCNFExpressions(getFilters());
if (whereExpr != null) {
executorBuilder.setTp(ExecType.TypeSelection);
dagRequestBuilder.addExecutors(
executorBuilder.setSelection(
Selection.newBuilder()
.addConditions(ProtoConverter.toProto(whereExpr, colOffsetInFieldMap))));
executorBuilder.clear();
}
if (!getGroupByItems().isEmpty() || !getAggregates().isEmpty()) {
Aggregation.Builder aggregationBuilder = Aggregation.newBuilder();
getGroupByItems()
.forEach(
tiByItem ->
aggregationBuilder.addGroupBy(
ProtoConverter.toProto(tiByItem.getExpr(), colOffsetInFieldMap)));
getAggregates()
.forEach(
tiExpr ->
aggregationBuilder.addAggFunc(
ProtoConverter.toProto(tiExpr, colOffsetInFieldMap)));
executorBuilder.setTp(ExecType.TypeAggregation);
dagRequestBuilder.addExecutors(executorBuilder.setAggregation(aggregationBuilder));
executorBuilder.clear();
}
if (!getOrderByItems().isEmpty()) {
TopN.Builder topNBuilder = TopN.newBuilder();
getOrderByItems()
.forEach(
tiByItem ->
topNBuilder.addOrderBy(
com.pingcap.tidb.tipb.ByItem.newBuilder()
.setExpr(
ProtoConverter.toProto(tiByItem.getExpr(), colOffsetInFieldMap))
.setDesc(tiByItem.isDesc())));
executorBuilder.setTp(ExecType.TypeTopN);
topNBuilder.setLimit(getLimit());
dagRequestBuilder.addExecutors(executorBuilder.setTopN(topNBuilder));
executorBuilder.clear();
} else if (getLimit() != 0) {
Limit.Builder limitBuilder = Limit.newBuilder();
limitBuilder.setLimit(getLimit());
executorBuilder.setTp(ExecType.TypeLimit);
dagRequestBuilder.addExecutors(executorBuilder.setLimit(limitBuilder));
executorBuilder.clear();
}
}
DAGRequest request =
dagRequestBuilder
.setTimeZoneOffset(timeZoneOffset)
.setFlags(flags)
.setStartTs(startTs.getVersion())
.build();
validateRequest(request);
return request;
}
/**
* Check if a DAG request is valid.
*
* <p>Note: When constructing a DAG request, a executor with an ExecType of higher priority should
* always be placed before those lower ones.
*
* @param dagRequest Request DAG.
*/
private void validateRequest(DAGRequest dagRequest) {
requireNonNull(dagRequest);
// A DAG request must has at least one executor.
if (dagRequest.getExecutorsCount() < 1) {
throw new DAGRequestException("Invalid executors count:" + dagRequest.getExecutorsCount());
}
ExecType formerType = dagRequest.getExecutors(0).getTp();
if (formerType != ExecType.TypeTableScan && formerType != ExecType.TypeIndexScan) {
throw new DAGRequestException(
"Invalid first executor type:"
+ formerType
+ ", must one of TypeTableScan or TypeIndexScan");
}
for (int i = 1; i < dagRequest.getExecutorsCount(); i++) {
ExecType currentType = dagRequest.getExecutors(i).getTp();
if (EXEC_TYPE_PRIORITY_MAP.get(currentType) < EXEC_TYPE_PRIORITY_MAP.get(formerType)) {
throw new DAGRequestException("Invalid executor priority.");
}
formerType = currentType;
}
}
public TiDAGRequest setTableInfo(TiTableInfo tableInfo) {
this.tableInfo = requireNonNull(tableInfo, "tableInfo is null");
return this;
}
public TiTableInfo getTableInfo() {
return this.tableInfo;
}
public List<Long> getIds() {
if (!this.tableInfo.isPartitionEnabled()) {
return ImmutableList.of(this.tableInfo.getId());
}
List<Long> ids = new ArrayList<>();
for (TiPartitionDef pDef : this.getPrunedParts()) {
ids.add(pDef.getId());
}
return ids;
}
public TiDAGRequest setIndexInfo(TiIndexInfo indexInfo) {
this.indexInfo = requireNonNull(indexInfo, "indexInfo is null");
return this;
}
public TiIndexInfo getIndexInfo() {
return indexInfo;
}
public void clearIndexInfo() {
indexInfo = null;
}
public int getLimit() {
return limit;
}
/**
* add limit clause to select query.
*
* @param limit is just a integer.
* @return a SelectBuilder
*/
public TiDAGRequest setLimit(int limit) {
this.limit = limit;
return this;
}
/**
* set timezone offset
*
* @param timeZoneOffset timezone offset
* @return a TiDAGRequest
*/
public TiDAGRequest setTimeZoneOffset(int timeZoneOffset) {
this.timeZoneOffset = timeZoneOffset;
return this;
}
int getTimeZoneOffset() {
return timeZoneOffset;
}
/**
* set truncate mode
*
* @param mode truncate mode
* @return a TiDAGRequest
*/
public TiDAGRequest setTruncateMode(TiDAGRequest.TruncateMode mode) {
flags = requireNonNull(mode, "mode is null").mask(flags);
return this;
}
@VisibleForTesting
long getFlags() {
return flags;
}
/**
* set start timestamp for the transaction
*
* @param startTs timestamp
* @return a TiDAGRequest
*/
public TiDAGRequest setStartTs(@Nonnull TiTimestamp startTs) {
this.startTs = startTs;
return this;
}
@VisibleForTesting
public TiTimestamp getStartTs() {
return startTs;
}
/**
* set having clause to select query
*
* @param having is a expression represents Having
* @return a TiDAGRequest
*/
public TiDAGRequest setHaving(Expression having) {
this.having = requireNonNull(having, "having is null");
return this;
}
public TiDAGRequest setDistinct(boolean distinct) {
this.distinct = distinct;
return this;
}
public boolean isDistinct() {
return distinct;
}
public TiDAGRequest addAggregate(Expression expr, DataType targetType) {
requireNonNull(expr, "aggregation expr is null");
aggregates.add(Pair.create(expr, targetType));
return this;
}
public List<Expression> getAggregates() {
return aggregates.stream().map(p -> p.first).collect(Collectors.toList());
}
public List<Pair<Expression, DataType>> getAggregatePairs() {
return aggregates;
}
/**
* add a order by clause to select query.
*
* @param byItem is a TiByItem.
* @return a SelectBuilder
*/
public TiDAGRequest addOrderByItem(ByItem byItem) {
orderByItems.add(requireNonNull(byItem, "byItem is null"));
return this;
}
List<ByItem> getOrderByItems() {
return orderByItems;
}
/**
* add a group by clause to select query
*
* @param byItem is a TiByItem
* @return a SelectBuilder
*/
public TiDAGRequest addGroupByItem(ByItem byItem) {
groupByItems.add(requireNonNull(byItem, "byItem is null"));
return this;
}
public List<ByItem> getGroupByItems() {
return groupByItems;
}
/**
* Field is not support in TiDB yet, for here we simply allow TiColumnRef instead of TiExpr like
* in SelectRequest proto
*
* <p>
*
* <p>This interface allows duplicate columns and it's user's responsibility to do dedup since we
* need to ensure exact order and items preserved during decoding
*
* @param column is column referred during selectReq
*/
public TiDAGRequest addRequiredColumn(ColumnRef column) {
fields.add(requireNonNull(column, "columnRef is null"));
return this;
}
public List<ColumnRef> getFields() {
return fields;
}
/**
* set key range of scan
*
* @param ranges key range of scan
*/
public TiDAGRequest addRanges(List<Coprocessor.KeyRange> ranges) {
keyRanges.addAll(requireNonNull(ranges, "KeyRange is null"));
return this;
}
public void resetFilters(List<Expression> filters) {
this.filters.clear();
this.filters.addAll(filters);
}
public List<Coprocessor.KeyRange> getRanges() {
return keyRanges;
}
public TiDAGRequest addFilter(Expression filter) {
this.filters.add(requireNonNull(filter, "filters expr is null"));
return this;
}
public List<Expression> getDowngradeFilters() {
return downgradeFilters;
}
public TiDAGRequest addDowngradeFilter(Expression filter) {
this.downgradeFilters.add(requireNonNull(filter, "downgrade filter is null"));
return this;
}
/**
* Check whether the DAG request has any aggregate expression.
*
* @return the boolean
*/
public boolean hasAggregate() {
return !getAggregates().isEmpty();
}
/**
* Check whether the DAG request has any group by expression.
*
* @return the boolean
*/
public boolean hasGroupBy() {
return !getGroupByItems().isEmpty();
}
public List<Expression> getFilters() {
return filters;
}
/**
* Returns whether needs to read handle from index first and find its corresponding row. i.e,
* "double read"
*
* @return boolean
*/
public boolean isDoubleRead() {
return isDoubleRead;
}
/**
* Sets isDoubleRead
*
* @param isDoubleRead if is double read
*/
public void setIsDoubleRead(boolean isDoubleRead) {
this.isDoubleRead = isDoubleRead;
}
/**
* Returns whether the request is CoveringIndex
*
* @return boolean
*/
public boolean isCoveringIndexScan() {
return hasIndex() && !isDoubleRead();
}
/**
* Returns whether this request is of indexScanType
*
* @return true iff indexInfo is provided, false otherwise
*/
public boolean hasIndex() {
return indexInfo != null;
}
/**
* Whether we use streaming processing to retrieve data
*
* @return push down type.
*/
public PushDownType getPushDownType() {
return pushDownType;
}
/** Set the estimated row count will be fetched from this request. */
public void setEstimatedCount(double estimatedCount) {
this.estimatedCount = estimatedCount;
}
/** Get the estimated row count will be fetched from this request. */
public double getEstimatedCount() {
return estimatedCount;
}
@Override
public String toString() {
StringBuilder sb = new StringBuilder();
if (tableInfo != null) {
sb.append(String.format("[table: %s] ", tableInfo.getName()));
}
if (indexInfo != null) {
sb.append(String.format("[Index: %s] ", indexInfo.getName()));
}
if (!getFields().isEmpty()) {
sb.append(", Columns: ");
Joiner.on(", ").skipNulls().appendTo(sb, getFields());
}
if (!getDowngradeFilters().isEmpty()) {
// should be called after all parameters are set
if (residualFilters == null) {
residualFilters = new ArrayList<>(getDowngradeFilters());
residualFilters.removeAll(new HashSet<>(getFilters()));
}
if (!residualFilters.isEmpty()) {
sb.append(", Residual Filter: ");
Joiner.on(", ").skipNulls().appendTo(sb, residualFilters);
}
}
if (!getFilters().isEmpty()) {
sb.append(", PushDown Filter: ");
Joiner.on(", ").skipNulls().appendTo(sb, getFilters());
}
// Key ranges might be also useful
if (!getRanges().isEmpty()) {
sb.append(", KeyRange: ");
getRanges().forEach(x -> sb.append(KeyUtils.formatBytes(x)));
}
if (!getAggregates().isEmpty()) {
sb.append(", Aggregates: ");
Joiner.on(", ").skipNulls().appendTo(sb, getAggregates());
}
if (!getGroupByItems().isEmpty()) {
sb.append(", Group By: ");
Joiner.on(", ").skipNulls().appendTo(sb, getGroupByItems());
}
if (!getOrderByItems().isEmpty()) {
sb.append(", Order By: ");
Joiner.on(", ").skipNulls().appendTo(sb, getOrderByItems());
}
if (getLimit() != 0) {
sb.append(", Limit: ");
sb.append("[").append(limit).append("]");
}
return sb.toString();
}
public TiDAGRequest copy() {
try {
ByteArrayOutputStream baos = new ByteArrayOutputStream();
ObjectOutputStream oos = new ObjectOutputStream(baos);
oos.writeObject(this);
ByteArrayInputStream bais = new ByteArrayInputStream(baos.toByteArray());
ObjectInputStream ois = new ObjectInputStream(bais);
return ((TiDAGRequest) ois.readObject());
} catch (Exception e) {
throw new RuntimeException(e);
}
}
}
| 1 | 9,631 | I added a startTs information in `TiDagRequest` output. Not really sure if it is useful? @zhexuany | pingcap-tispark | java |
@@ -27,7 +27,12 @@ function collectResultsFromFrames(
frames.forEach(frame => {
const tabindex = parseInt(frame.node.getAttribute('tabindex'), 10);
const focusable = isNaN(tabindex) || tabindex >= 0;
+
const rect = frame.node.getBoundingClientRect();
+ let width = parseInt(frame.node.getAttribute('width'), 10);
+ let height = parseInt(frame.node.getAttribute('height'), 10);
+ width = isNaN(width) ? rect.width : width;
+ height = isNaN(height) ? rect.height : height;
var params = {
options: options, | 1 | import queue from './queue';
import sendCommandToFrame from './send-command-to-frame';
import mergeResults from './merge-results';
import getSelector from './get-selector';
/**
* Sends a message to axe running in frames to start analysis and collate results (via `mergeResults`)
* @private
* @param {Context} parentContent The resolved Context object
* @param {Object} options Options object (as passed to `runRules`)
* @param {string} command Command sent to all frames
* @param {Array} parameter Array of values to be passed along side the command
* @param {Function} callback Function to call when results from all frames have returned
*/
function collectResultsFromFrames(
parentContent,
options,
command,
parameter,
resolve,
reject
) {
var q = queue();
var frames = parentContent.frames;
// Tell each axe running in each frame to collect results
frames.forEach(frame => {
const tabindex = parseInt(frame.node.getAttribute('tabindex'), 10);
const focusable = isNaN(tabindex) || tabindex >= 0;
const rect = frame.node.getBoundingClientRect();
var params = {
options: options,
command: command,
parameter: parameter,
context: {
initiator: false,
// if any parent iframe is not focusable then the entire
// iframe stack is not focusable (even if the descendant
// iframe has tabindex=0 on it)
focusable: parentContent.focusable === false ? false : focusable,
boundingClientRect: {
width: rect.width,
height: rect.height
},
page: parentContent.page,
include: frame.include || [],
exclude: frame.exclude || []
}
};
q.defer((res, rej) => {
var node = frame.node;
sendCommandToFrame(
node,
params,
data => {
if (data) {
return res({
results: data,
frameElement: node,
frame: getSelector(node)
});
}
res(null);
},
rej
);
});
});
// Combine results from all frames and give it back
q.then(data => {
resolve(mergeResults(data, options));
}).catch(reject);
}
export default collectResultsFromFrames;
| 1 | 16,303 | This prevents margin/border/padding CSS from affecting the width/height (otherwise in Chrome a width and height of 1 would result in a rect size of 5x5). | dequelabs-axe-core | js |
@@ -22,10 +22,11 @@ class PublicPagesController < ApplicationController
@template = Template.live(params[:id])
# covers authorization for this action.
# Pundit dosent support passing objects into scoped policies
+ skip_authorization
unless PublicPagePolicy.new(@template).template_export?
- raise Pundit::NotAuthorizedError
+ redirect_to public_templates_path, notice: "You are not authorized to export that template" and return
+ #raise Pundit::NotAuthorizedError
end
- skip_authorization
# now with prefetching (if guidance is added, prefetch annottaions/guidance)
@template = Template.includes(
:org, | 1 | # frozen_string_literal: true
class PublicPagesController < ApplicationController
after_action :verify_authorized, except: [:template_index, :plan_index]
# GET template_index
# -----------------------------------------------------
def template_index
templates = Template.live(Template.families(Org.funder.pluck(:id)).pluck(:family_id))
.publicly_visible.pluck(:id) <<
Template.where(is_default: true).unarchived.published.pluck(:id)
@templates = Template.includes(:org)
.where(id: templates.uniq.flatten)
.unarchived.published.order(title: :asc).page(1)
end
# GET template_export/:id
# -----------------------------------------------------
def template_export
# only export live templates, id passed is family_id
@template = Template.live(params[:id])
# covers authorization for this action.
# Pundit dosent support passing objects into scoped policies
unless PublicPagePolicy.new(@template).template_export?
raise Pundit::NotAuthorizedError
end
skip_authorization
# now with prefetching (if guidance is added, prefetch annottaions/guidance)
@template = Template.includes(
:org,
phases: {
sections: {
questions: [
:question_options,
:question_format,
:annotations
]
}
}
).find(@template.id)
@formatting = Settings::Template::DEFAULT_SETTINGS[:formatting]
begin
file_name = @template.title.gsub(/[^a-zA-Z\d\s]/, "").gsub(/ /, "_")
respond_to do |format|
format.docx do
render docx: "template_export", filename: "#{file_name}.docx"
end
format.pdf do
# rubocop:disable LineLength
render pdf: file_name,
margin: @formatting[:margin],
footer: {
center: _("Template created using the %{application_name} service. Last modified %{date}") % {
application_name: Rails.configuration.branding[:application][:name],
date: l(@template.updated_at.to_date, formats: :short)
},
font_size: 8,
spacing: (@formatting[:margin][:bottom] / 2) - 4,
right: "[page] of [topage]",
encoding: "utf8"
}
# rubocop:enable LineLength
end
end
rescue ActiveRecord::RecordInvalid => e
# What scenario is this triggered in? it's common to our export pages
redirect_to public_templates_path,
alert: _("Unable to download the DMP Template at this time.")
end
end
# GET /plans_index
# ------------------------------------------------------------------------------------
def plan_index
@plans = Plan.publicly_visible.includes(:template).page(1)
render "plan_index", locals: {
query_params: {
sort_field: "plans.updated_at",
sort_direction: "desc"
}
}
end
end
| 1 | 18,299 | prefer to just add this to the `verify_authorized` exception list above since we're already making exceptions. | DMPRoadmap-roadmap | rb |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.