cve_id
stringlengths
13
16
obtain_all_privilege
stringclasses
3 values
obtain_user_privilege
stringclasses
2 values
obtain_other_privilege
stringclasses
2 values
user_interaction_required
stringclasses
3 values
cvss2_vector_string
stringclasses
106 values
cvss2_access_vector
stringclasses
4 values
cvss2_access_complexity
stringclasses
4 values
cvss2_authentication
stringclasses
3 values
cvss2_confidentiality_impact
stringclasses
4 values
cvss2_integrity_impact
stringclasses
4 values
cvss2_availability_impact
stringclasses
4 values
cvss2_base_score
stringclasses
50 values
cvss3_vector_string
stringclasses
226 values
cvss3_attack_vector
stringclasses
5 values
cvss3_attack_complexity
stringclasses
3 values
cvss3_privileges_required
stringclasses
4 values
cvss3_user_interaction
stringclasses
3 values
cvss3_scope
stringclasses
3 values
cvss3_confidentiality_impact
stringclasses
4 values
cvss3_integrity_impact
stringclasses
4 values
cvss3_availability_impact
stringclasses
4 values
cvss3_base_score
stringclasses
55 values
cvss3_base_severity
stringclasses
5 values
exploitability_score
stringclasses
22 values
impact_score
stringclasses
15 values
ac_insuf_info
stringclasses
3 values
reference_json
stringlengths
221
23.3k
problemtype_json
stringclasses
200 values
severity
stringclasses
4 values
cve_nodes
stringlengths
2
33.1k
cve_description
stringlengths
64
1.99k
cve_last_modified_date
stringlengths
17
17
cve_published_date
stringlengths
17
17
cwe_name
stringclasses
125 values
cwe_description
stringclasses
124 values
cwe_extended_description
stringclasses
95 values
cwe_url
stringclasses
124 values
cwe_is_category
int64
0
1
commit_author
stringlengths
0
34
commit_author_date
stringlengths
25
25
commit_msg
stringlengths
0
13.3k
commit_hash
stringlengths
40
40
commit_is_merge
stringclasses
1 value
repo_name
stringclasses
467 values
repo_description
stringclasses
459 values
repo_date_created
stringclasses
467 values
repo_date_last_push
stringclasses
467 values
repo_homepage
stringclasses
294 values
repo_owner
stringclasses
470 values
repo_stars
stringclasses
406 values
repo_forks
stringclasses
352 values
function_name
stringlengths
3
120
function_signature
stringlengths
6
640
function_parameters
stringlengths
2
302
function
stringlengths
12
114k
function_token_count
stringlengths
1
5
function_before_change
stringclasses
1 value
labels
int64
1
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Read
The software reads data past the end, or before the beginning, of the intended buffer.
Typically, this can allow attackers to read sensitive information from other memory locations or cause a crash. A crash can occur when the code reads a variable amount of data and assumes that a sentinel exists to stop the read operation, such as a NUL in a string. The expected sentinel might not be located in the out-of-bounds memory, causing excessive data to be read, leading to a segmentation fault or a buffer overflow. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent read operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/125.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::builtin::lstm::full::Eval
tflite::ops::builtin::lstm::full::Eval( TfLiteContext * context , TfLiteNode * node)
['context', 'node']
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { const auto* params = static_cast<TfLiteLSTMParams*>(node->builtin_data); OpData* op_data = static_cast<OpData*>(node->user_data); const TfLiteTensor* input = GetInput(context, node, kInputTensor); const TfLiteTensor* input_to_input_weights = GetOptionalInputTensor(context, node, kInputToInputWeightsTensor); const TfLiteTensor* input_to_forget_weights = GetInput(context, node, kInputToForgetWeightsTensor); const TfLiteTensor* input_to_cell_weights = GetInput(context, node, kInputToCellWeightsTensor); const TfLiteTensor* input_to_output_weights = GetInput(context, node, kInputToOutputWeightsTensor); const TfLiteTensor* recurrent_to_input_weights = GetOptionalInputTensor(context, node, kRecurrentToInputWeightsTensor); const TfLiteTensor* recurrent_to_forget_weights = GetInput(context, node, kRecurrentToForgetWeightsTensor); const TfLiteTensor* recurrent_to_cell_weights = GetInput(context, node, kRecurrentToCellWeightsTensor); const TfLiteTensor* recurrent_to_output_weights = GetInput(context, node, kRecurrentToOutputWeightsTensor); const TfLiteTensor* cell_to_input_weights = GetOptionalInputTensor(context, node, kCellToInputWeightsTensor); const TfLiteTensor* cell_to_forget_weights = GetOptionalInputTensor(context, node, kCellToForgetWeightsTensor); const TfLiteTensor* cell_to_output_weights = GetOptionalInputTensor(context, node, kCellToOutputWeightsTensor); const TfLiteTensor* input_layer_norm_coefficients = GetOptionalInputTensor(context, node, kInputLayerNormCoefficientsTensor); const TfLiteTensor* forget_layer_norm_coefficients = GetOptionalInputTensor(context, node, kForgetLayerNormCoefficientsTensor); const TfLiteTensor* cell_layer_norm_coefficients = GetOptionalInputTensor(context, node, kCellLayerNormCoefficientsTensor); const TfLiteTensor* output_layer_norm_coefficients = GetOptionalInputTensor(context, node, kOutputLayerNormCoefficientsTensor); const TfLiteTensor* input_gate_bias = GetOptionalInputTensor(context, node, kInputGateBiasTensor); const TfLiteTensor* forget_gate_bias = GetInput(context, node, kForgetGateBiasTensor); const TfLiteTensor* cell_gate_bias = GetInput(context, node, kCellGateBiasTensor); const TfLiteTensor* output_gate_bias = GetInput(context, node, kOutputGateBiasTensor); const TfLiteTensor* projection_weights = GetOptionalInputTensor(context, node, kProjectionWeightsTensor); const TfLiteTensor* projection_bias = GetOptionalInputTensor(context, node, kProjectionBiasTensor); TfLiteTensor* output_state = GetVariableInput(context, node, kOutputStateTensor); TF_LITE_ENSURE(context, output_state != nullptr); TfLiteTensor* cell_state = GetVariableInput(context, node, kCellStateTensor); TF_LITE_ENSURE(context, cell_state != nullptr); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); switch (input_to_output_weights->type) { case kTfLiteFloat32: { // Index the scratch buffers pointers to the global scratch buffer. TfLiteTensor* scratch_buffer = GetTemporary(context, node, 0); return lstm_eval::EvalFloat( input, input_to_input_weights, input_to_forget_weights, input_to_cell_weights, input_to_output_weights, recurrent_to_input_weights, recurrent_to_forget_weights, recurrent_to_cell_weights, recurrent_to_output_weights, cell_to_input_weights, cell_to_forget_weights, cell_to_output_weights, input_layer_norm_coefficients, forget_layer_norm_coefficients, cell_layer_norm_coefficients, output_layer_norm_coefficients, /*aux_input=*/nullptr, /*aux_input_to_input_weights=*/nullptr, /*aux_input_to_forget_weights=*/nullptr, /*aux_input_to_cell_weights=*/nullptr, /*aux_input_to_output_weights=*/nullptr, input_gate_bias, forget_gate_bias, cell_gate_bias, output_gate_bias, projection_weights, projection_bias, params, /*forward_sequence=*/true, /*time_major=*/true, /*output_offset=*/0, scratch_buffer, output_state, cell_state, output); } case kTfLiteUInt8: case kTfLiteInt8: { const bool is_hybrid = (input->type == kTfLiteFloat32); const bool is_sparse = input_to_output_weights->sparsity != nullptr; if (is_hybrid) { TfLiteTensor* row_sums = GetTemporary(context, node, kRowSums); const int row_sums_size = row_sums->dims->data[0]; if (is_sparse) { TfLiteTensor* input_to_input_weights_ledger = &context->tensors[op_data->ledger_index + kInputToInputWeightsLedgerOffset]; TfLiteTensor* input_to_forget_weights_ledger = &context->tensors[op_data->ledger_index + kInputToForgetWeightsLedgerOffset]; TfLiteTensor* input_to_cell_weights_ledger = &context->tensors[op_data->ledger_index + kInputToCellWeightsLedgerOffset]; TfLiteTensor* input_to_output_weights_ledger = &context->tensors[op_data->ledger_index + kInputToOutputWeightsLedgerOffset]; TfLiteTensor* recurrent_to_input_weights_ledger = &context->tensors[op_data->ledger_index + kRecurrentToInputWeightsLedgerOffset]; TfLiteTensor* recurrent_to_forget_weights_ledger = &context->tensors[op_data->ledger_index + kRecurrentToForgetWeightsLedgerOffset]; TfLiteTensor* recurrent_to_cell_weights_ledger = &context->tensors[op_data->ledger_index + kRecurrentToCellWeightsLedgerOffset]; TfLiteTensor* recurrent_to_output_weights_ledger = &context->tensors[op_data->ledger_index + kRecurrentToOutputWeightsLedgerOffset]; TfLiteTensor* projection_weights_ledger = &context->tensors[op_data->ledger_index + kProjectionWeightsLedgerOffset]; if (!op_data->ledger_initialized) { copy_ledger(input_to_input_weights == nullptr ? nullptr : input_to_input_weights->sparsity, input_to_input_weights_ledger); copy_ledger(input_to_forget_weights->sparsity, input_to_forget_weights_ledger); copy_ledger(input_to_cell_weights->sparsity, input_to_cell_weights_ledger); copy_ledger(input_to_output_weights->sparsity, input_to_output_weights_ledger); copy_ledger(recurrent_to_input_weights == nullptr ? nullptr : recurrent_to_input_weights->sparsity, recurrent_to_input_weights_ledger); copy_ledger(recurrent_to_forget_weights->sparsity, recurrent_to_forget_weights_ledger); copy_ledger(recurrent_to_cell_weights->sparsity, recurrent_to_cell_weights_ledger); copy_ledger(recurrent_to_output_weights->sparsity, recurrent_to_output_weights_ledger); copy_ledger(projection_weights->sparsity, projection_weights_ledger); op_data->ledger_initialized = true; } return lstm_eval::EvalHybrid( input, input_to_input_weights, input_to_input_weights_ledger, input_to_forget_weights, input_to_forget_weights_ledger, input_to_cell_weights, input_to_cell_weights_ledger, input_to_output_weights, input_to_output_weights_ledger, recurrent_to_input_weights, recurrent_to_input_weights_ledger, recurrent_to_forget_weights, recurrent_to_forget_weights_ledger, recurrent_to_cell_weights, recurrent_to_cell_weights_ledger, recurrent_to_output_weights, recurrent_to_output_weights_ledger, cell_to_input_weights, cell_to_forget_weights, cell_to_output_weights, input_layer_norm_coefficients, forget_layer_norm_coefficients, cell_layer_norm_coefficients, output_layer_norm_coefficients, /*aux_input=*/nullptr, /*aux_input_to_input_weights=*/nullptr, /*aux_input_to_forget_weights=*/nullptr, /*aux_input_to_cell_weights=*/nullptr, /*aux_input_to_output_weights=*/nullptr, input_gate_bias, forget_gate_bias, cell_gate_bias, output_gate_bias, projection_weights, projection_weights_ledger, projection_bias, params, /*forward_sequence=*/true, /*time_major=*/true, /*output_offset=*/0, GetTemporary(context, node, kScratchBuffer), GetTemporary(context, node, kInputScalingFactors), /*aux_input_sf=*/nullptr, GetTemporary(context, node, kOutputStateScalingFactors), GetTemporary(context, node, kProductScalingFactors), GetTemporary(context, node, kRecoveredCellWeights), GetTemporary(context, node, kInputQuantized), /*aux_input_quantized=*/nullptr, GetTemporary(context, node, kOutputStateQuantized), GetTemporary(context, node, kCellStateQuantized), output_state, cell_state, GetTemporary(context, node, kAccumScratch), output, GetTemporary(context, node, kInputZeroPoints), /*aux_input_zp=*/nullptr, GetTemporary(context, node, kOutputStateZeroPoints), row_sums, row_sums_size, &op_data->compute_row_sums, CpuBackendContext::GetFromContext(context)); } return lstm_eval::EvalHybrid( input, input_to_input_weights, /*input_to_input_weights_ledger*/ nullptr, input_to_forget_weights, /*input_to_forget_weights_ledger*/ nullptr, input_to_cell_weights, /*input_to_cell_weights_ledger*/ nullptr, input_to_output_weights, /*input_to_output_weights_ledger*/ nullptr, recurrent_to_input_weights, /*recurrent_to_input_weights_ledger*/ nullptr, recurrent_to_forget_weights, /*recurrent_to_forget_weights_ledger*/ nullptr, recurrent_to_cell_weights, /*recurrent_to_cell_weights_ledger*/ nullptr, recurrent_to_output_weights, /*recurrent_to_output_weights_ledger*/ nullptr, cell_to_input_weights, cell_to_forget_weights, cell_to_output_weights, input_layer_norm_coefficients, forget_layer_norm_coefficients, cell_layer_norm_coefficients, output_layer_norm_coefficients, /*aux_input=*/nullptr, /*aux_input_to_input_weights=*/nullptr, /*aux_input_to_forget_weights=*/nullptr, /*aux_input_to_cell_weights=*/nullptr, /*aux_input_to_output_weights=*/nullptr, input_gate_bias, forget_gate_bias, cell_gate_bias, output_gate_bias, projection_weights, /*projection_weights_ledger*/ nullptr, projection_bias, params, /*forward_sequence=*/true, /*time_major=*/true, /*output_offset=*/0, GetTemporary(context, node, kScratchBuffer), GetTemporary(context, node, kInputScalingFactors), /*aux_input_sf=*/nullptr, GetTemporary(context, node, kOutputStateScalingFactors), GetTemporary(context, node, kProductScalingFactors), GetTemporary(context, node, kRecoveredCellWeights), GetTemporary(context, node, kInputQuantized), /*aux_input_quantized=*/nullptr, GetTemporary(context, node, kOutputStateQuantized), GetTemporary(context, node, kCellStateQuantized), output_state, cell_state, GetTemporary(context, node, kAccumScratch), output, GetTemporary(context, node, kInputZeroPoints), /*aux_input_zp=*/nullptr, GetTemporary(context, node, kOutputStateZeroPoints), row_sums, row_sums_size, &op_data->compute_row_sums, CpuBackendContext::GetFromContext(context)); } else { const int num_intermediate_tensors = node->intermediates->size; if (num_intermediate_tensors == 5) { TfLiteTensor* scratch0 = GetTemporary(context, node, 0); TfLiteTensor* scratch1 = GetTemporary(context, node, 1); TfLiteTensor* scratch2 = GetTemporary(context, node, 2); TfLiteTensor* scratch3 = GetTemporary(context, node, 3); TfLiteTensor* scratch4 = GetTemporary(context, node, 4); TfLiteTensor* scratch5 = GetTemporary(context, node, 5); return lstm_eval::EvalInteger8x8_16( input, input_to_input_weights, input_to_forget_weights, input_to_cell_weights, input_to_output_weights, recurrent_to_input_weights, recurrent_to_forget_weights, recurrent_to_cell_weights, recurrent_to_output_weights, cell_to_input_weights, cell_to_forget_weights, cell_to_output_weights, input_layer_norm_coefficients, forget_layer_norm_coefficients, cell_layer_norm_coefficients, output_layer_norm_coefficients, input_gate_bias, forget_gate_bias, cell_gate_bias, output_gate_bias, projection_weights, projection_bias, params, &op_data->integer_lstm_param, output_state, cell_state, output, scratch0, scratch1, scratch2, scratch3, scratch4, scratch5, CpuBackendContext::GetFromContext(context)); } else { TfLiteTensor* scratch0 = GetTemporary(context, node, 0); TfLiteTensor* scratch1 = GetTemporary(context, node, 1); TfLiteTensor* scratch2 = GetTemporary(context, node, 2); TfLiteTensor* scratch3 = GetTemporary(context, node, 3); TfLiteTensor* scratch4 = GetTemporary(context, node, 4); TfLiteTensor* scratch5 = GetTemporary(context, node, 5); TfLiteTensor* scratch6 = GetTemporary(context, node, 6); TfLiteTensor* scratch7 = GetTemporary(context, node, 7); return lstm_eval::EvalInteger8x8_8( input, input_to_input_weights, input_to_forget_weights, input_to_cell_weights, input_to_output_weights, recurrent_to_input_weights, recurrent_to_forget_weights, recurrent_to_cell_weights, recurrent_to_output_weights, cell_to_input_weights, cell_to_forget_weights, cell_to_output_weights, input_layer_norm_coefficients, forget_layer_norm_coefficients, cell_layer_norm_coefficients, output_layer_norm_coefficients, input_gate_bias, forget_gate_bias, cell_gate_bias, output_gate_bias, projection_weights, projection_bias, params, output_state, cell_state, output, &op_data->integer_lstm_param, scratch0, scratch1, scratch2, scratch3, scratch4, scratch5, scratch6, scratch7); return kTfLiteOk; } } } default: context->ReportError(context, "Type %d is not currently supported.", input_to_output_weights->type); return kTfLiteError; } return kTfLiteOk; }
1647
True
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Write
The software writes data past the end, or before the beginning, of the intended buffer.
Typically, this can result in corruption of data, a crash, or code execution. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent write operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/787.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::builtin::lstm::full::Eval
tflite::ops::builtin::lstm::full::Eval( TfLiteContext * context , TfLiteNode * node)
['context', 'node']
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { const auto* params = static_cast<TfLiteLSTMParams*>(node->builtin_data); OpData* op_data = static_cast<OpData*>(node->user_data); const TfLiteTensor* input = GetInput(context, node, kInputTensor); const TfLiteTensor* input_to_input_weights = GetOptionalInputTensor(context, node, kInputToInputWeightsTensor); const TfLiteTensor* input_to_forget_weights = GetInput(context, node, kInputToForgetWeightsTensor); const TfLiteTensor* input_to_cell_weights = GetInput(context, node, kInputToCellWeightsTensor); const TfLiteTensor* input_to_output_weights = GetInput(context, node, kInputToOutputWeightsTensor); const TfLiteTensor* recurrent_to_input_weights = GetOptionalInputTensor(context, node, kRecurrentToInputWeightsTensor); const TfLiteTensor* recurrent_to_forget_weights = GetInput(context, node, kRecurrentToForgetWeightsTensor); const TfLiteTensor* recurrent_to_cell_weights = GetInput(context, node, kRecurrentToCellWeightsTensor); const TfLiteTensor* recurrent_to_output_weights = GetInput(context, node, kRecurrentToOutputWeightsTensor); const TfLiteTensor* cell_to_input_weights = GetOptionalInputTensor(context, node, kCellToInputWeightsTensor); const TfLiteTensor* cell_to_forget_weights = GetOptionalInputTensor(context, node, kCellToForgetWeightsTensor); const TfLiteTensor* cell_to_output_weights = GetOptionalInputTensor(context, node, kCellToOutputWeightsTensor); const TfLiteTensor* input_layer_norm_coefficients = GetOptionalInputTensor(context, node, kInputLayerNormCoefficientsTensor); const TfLiteTensor* forget_layer_norm_coefficients = GetOptionalInputTensor(context, node, kForgetLayerNormCoefficientsTensor); const TfLiteTensor* cell_layer_norm_coefficients = GetOptionalInputTensor(context, node, kCellLayerNormCoefficientsTensor); const TfLiteTensor* output_layer_norm_coefficients = GetOptionalInputTensor(context, node, kOutputLayerNormCoefficientsTensor); const TfLiteTensor* input_gate_bias = GetOptionalInputTensor(context, node, kInputGateBiasTensor); const TfLiteTensor* forget_gate_bias = GetInput(context, node, kForgetGateBiasTensor); const TfLiteTensor* cell_gate_bias = GetInput(context, node, kCellGateBiasTensor); const TfLiteTensor* output_gate_bias = GetInput(context, node, kOutputGateBiasTensor); const TfLiteTensor* projection_weights = GetOptionalInputTensor(context, node, kProjectionWeightsTensor); const TfLiteTensor* projection_bias = GetOptionalInputTensor(context, node, kProjectionBiasTensor); TfLiteTensor* output_state = GetVariableInput(context, node, kOutputStateTensor); TF_LITE_ENSURE(context, output_state != nullptr); TfLiteTensor* cell_state = GetVariableInput(context, node, kCellStateTensor); TF_LITE_ENSURE(context, cell_state != nullptr); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); switch (input_to_output_weights->type) { case kTfLiteFloat32: { // Index the scratch buffers pointers to the global scratch buffer. TfLiteTensor* scratch_buffer = GetTemporary(context, node, 0); return lstm_eval::EvalFloat( input, input_to_input_weights, input_to_forget_weights, input_to_cell_weights, input_to_output_weights, recurrent_to_input_weights, recurrent_to_forget_weights, recurrent_to_cell_weights, recurrent_to_output_weights, cell_to_input_weights, cell_to_forget_weights, cell_to_output_weights, input_layer_norm_coefficients, forget_layer_norm_coefficients, cell_layer_norm_coefficients, output_layer_norm_coefficients, /*aux_input=*/nullptr, /*aux_input_to_input_weights=*/nullptr, /*aux_input_to_forget_weights=*/nullptr, /*aux_input_to_cell_weights=*/nullptr, /*aux_input_to_output_weights=*/nullptr, input_gate_bias, forget_gate_bias, cell_gate_bias, output_gate_bias, projection_weights, projection_bias, params, /*forward_sequence=*/true, /*time_major=*/true, /*output_offset=*/0, scratch_buffer, output_state, cell_state, output); } case kTfLiteUInt8: case kTfLiteInt8: { const bool is_hybrid = (input->type == kTfLiteFloat32); const bool is_sparse = input_to_output_weights->sparsity != nullptr; if (is_hybrid) { TfLiteTensor* row_sums = GetTemporary(context, node, kRowSums); const int row_sums_size = row_sums->dims->data[0]; if (is_sparse) { TfLiteTensor* input_to_input_weights_ledger = &context->tensors[op_data->ledger_index + kInputToInputWeightsLedgerOffset]; TfLiteTensor* input_to_forget_weights_ledger = &context->tensors[op_data->ledger_index + kInputToForgetWeightsLedgerOffset]; TfLiteTensor* input_to_cell_weights_ledger = &context->tensors[op_data->ledger_index + kInputToCellWeightsLedgerOffset]; TfLiteTensor* input_to_output_weights_ledger = &context->tensors[op_data->ledger_index + kInputToOutputWeightsLedgerOffset]; TfLiteTensor* recurrent_to_input_weights_ledger = &context->tensors[op_data->ledger_index + kRecurrentToInputWeightsLedgerOffset]; TfLiteTensor* recurrent_to_forget_weights_ledger = &context->tensors[op_data->ledger_index + kRecurrentToForgetWeightsLedgerOffset]; TfLiteTensor* recurrent_to_cell_weights_ledger = &context->tensors[op_data->ledger_index + kRecurrentToCellWeightsLedgerOffset]; TfLiteTensor* recurrent_to_output_weights_ledger = &context->tensors[op_data->ledger_index + kRecurrentToOutputWeightsLedgerOffset]; TfLiteTensor* projection_weights_ledger = &context->tensors[op_data->ledger_index + kProjectionWeightsLedgerOffset]; if (!op_data->ledger_initialized) { copy_ledger(input_to_input_weights == nullptr ? nullptr : input_to_input_weights->sparsity, input_to_input_weights_ledger); copy_ledger(input_to_forget_weights->sparsity, input_to_forget_weights_ledger); copy_ledger(input_to_cell_weights->sparsity, input_to_cell_weights_ledger); copy_ledger(input_to_output_weights->sparsity, input_to_output_weights_ledger); copy_ledger(recurrent_to_input_weights == nullptr ? nullptr : recurrent_to_input_weights->sparsity, recurrent_to_input_weights_ledger); copy_ledger(recurrent_to_forget_weights->sparsity, recurrent_to_forget_weights_ledger); copy_ledger(recurrent_to_cell_weights->sparsity, recurrent_to_cell_weights_ledger); copy_ledger(recurrent_to_output_weights->sparsity, recurrent_to_output_weights_ledger); copy_ledger(projection_weights->sparsity, projection_weights_ledger); op_data->ledger_initialized = true; } return lstm_eval::EvalHybrid( input, input_to_input_weights, input_to_input_weights_ledger, input_to_forget_weights, input_to_forget_weights_ledger, input_to_cell_weights, input_to_cell_weights_ledger, input_to_output_weights, input_to_output_weights_ledger, recurrent_to_input_weights, recurrent_to_input_weights_ledger, recurrent_to_forget_weights, recurrent_to_forget_weights_ledger, recurrent_to_cell_weights, recurrent_to_cell_weights_ledger, recurrent_to_output_weights, recurrent_to_output_weights_ledger, cell_to_input_weights, cell_to_forget_weights, cell_to_output_weights, input_layer_norm_coefficients, forget_layer_norm_coefficients, cell_layer_norm_coefficients, output_layer_norm_coefficients, /*aux_input=*/nullptr, /*aux_input_to_input_weights=*/nullptr, /*aux_input_to_forget_weights=*/nullptr, /*aux_input_to_cell_weights=*/nullptr, /*aux_input_to_output_weights=*/nullptr, input_gate_bias, forget_gate_bias, cell_gate_bias, output_gate_bias, projection_weights, projection_weights_ledger, projection_bias, params, /*forward_sequence=*/true, /*time_major=*/true, /*output_offset=*/0, GetTemporary(context, node, kScratchBuffer), GetTemporary(context, node, kInputScalingFactors), /*aux_input_sf=*/nullptr, GetTemporary(context, node, kOutputStateScalingFactors), GetTemporary(context, node, kProductScalingFactors), GetTemporary(context, node, kRecoveredCellWeights), GetTemporary(context, node, kInputQuantized), /*aux_input_quantized=*/nullptr, GetTemporary(context, node, kOutputStateQuantized), GetTemporary(context, node, kCellStateQuantized), output_state, cell_state, GetTemporary(context, node, kAccumScratch), output, GetTemporary(context, node, kInputZeroPoints), /*aux_input_zp=*/nullptr, GetTemporary(context, node, kOutputStateZeroPoints), row_sums, row_sums_size, &op_data->compute_row_sums, CpuBackendContext::GetFromContext(context)); } return lstm_eval::EvalHybrid( input, input_to_input_weights, /*input_to_input_weights_ledger*/ nullptr, input_to_forget_weights, /*input_to_forget_weights_ledger*/ nullptr, input_to_cell_weights, /*input_to_cell_weights_ledger*/ nullptr, input_to_output_weights, /*input_to_output_weights_ledger*/ nullptr, recurrent_to_input_weights, /*recurrent_to_input_weights_ledger*/ nullptr, recurrent_to_forget_weights, /*recurrent_to_forget_weights_ledger*/ nullptr, recurrent_to_cell_weights, /*recurrent_to_cell_weights_ledger*/ nullptr, recurrent_to_output_weights, /*recurrent_to_output_weights_ledger*/ nullptr, cell_to_input_weights, cell_to_forget_weights, cell_to_output_weights, input_layer_norm_coefficients, forget_layer_norm_coefficients, cell_layer_norm_coefficients, output_layer_norm_coefficients, /*aux_input=*/nullptr, /*aux_input_to_input_weights=*/nullptr, /*aux_input_to_forget_weights=*/nullptr, /*aux_input_to_cell_weights=*/nullptr, /*aux_input_to_output_weights=*/nullptr, input_gate_bias, forget_gate_bias, cell_gate_bias, output_gate_bias, projection_weights, /*projection_weights_ledger*/ nullptr, projection_bias, params, /*forward_sequence=*/true, /*time_major=*/true, /*output_offset=*/0, GetTemporary(context, node, kScratchBuffer), GetTemporary(context, node, kInputScalingFactors), /*aux_input_sf=*/nullptr, GetTemporary(context, node, kOutputStateScalingFactors), GetTemporary(context, node, kProductScalingFactors), GetTemporary(context, node, kRecoveredCellWeights), GetTemporary(context, node, kInputQuantized), /*aux_input_quantized=*/nullptr, GetTemporary(context, node, kOutputStateQuantized), GetTemporary(context, node, kCellStateQuantized), output_state, cell_state, GetTemporary(context, node, kAccumScratch), output, GetTemporary(context, node, kInputZeroPoints), /*aux_input_zp=*/nullptr, GetTemporary(context, node, kOutputStateZeroPoints), row_sums, row_sums_size, &op_data->compute_row_sums, CpuBackendContext::GetFromContext(context)); } else { const int num_intermediate_tensors = node->intermediates->size; if (num_intermediate_tensors == 5) { TfLiteTensor* scratch0 = GetTemporary(context, node, 0); TfLiteTensor* scratch1 = GetTemporary(context, node, 1); TfLiteTensor* scratch2 = GetTemporary(context, node, 2); TfLiteTensor* scratch3 = GetTemporary(context, node, 3); TfLiteTensor* scratch4 = GetTemporary(context, node, 4); TfLiteTensor* scratch5 = GetTemporary(context, node, 5); return lstm_eval::EvalInteger8x8_16( input, input_to_input_weights, input_to_forget_weights, input_to_cell_weights, input_to_output_weights, recurrent_to_input_weights, recurrent_to_forget_weights, recurrent_to_cell_weights, recurrent_to_output_weights, cell_to_input_weights, cell_to_forget_weights, cell_to_output_weights, input_layer_norm_coefficients, forget_layer_norm_coefficients, cell_layer_norm_coefficients, output_layer_norm_coefficients, input_gate_bias, forget_gate_bias, cell_gate_bias, output_gate_bias, projection_weights, projection_bias, params, &op_data->integer_lstm_param, output_state, cell_state, output, scratch0, scratch1, scratch2, scratch3, scratch4, scratch5, CpuBackendContext::GetFromContext(context)); } else { TfLiteTensor* scratch0 = GetTemporary(context, node, 0); TfLiteTensor* scratch1 = GetTemporary(context, node, 1); TfLiteTensor* scratch2 = GetTemporary(context, node, 2); TfLiteTensor* scratch3 = GetTemporary(context, node, 3); TfLiteTensor* scratch4 = GetTemporary(context, node, 4); TfLiteTensor* scratch5 = GetTemporary(context, node, 5); TfLiteTensor* scratch6 = GetTemporary(context, node, 6); TfLiteTensor* scratch7 = GetTemporary(context, node, 7); return lstm_eval::EvalInteger8x8_8( input, input_to_input_weights, input_to_forget_weights, input_to_cell_weights, input_to_output_weights, recurrent_to_input_weights, recurrent_to_forget_weights, recurrent_to_cell_weights, recurrent_to_output_weights, cell_to_input_weights, cell_to_forget_weights, cell_to_output_weights, input_layer_norm_coefficients, forget_layer_norm_coefficients, cell_layer_norm_coefficients, output_layer_norm_coefficients, input_gate_bias, forget_gate_bias, cell_gate_bias, output_gate_bias, projection_weights, projection_bias, params, output_state, cell_state, output, &op_data->integer_lstm_param, scratch0, scratch1, scratch2, scratch3, scratch4, scratch5, scratch6, scratch7); return kTfLiteOk; } } } default: context->ReportError(context, "Type %d is not currently supported.", input_to_output_weights->type); return kTfLiteError; } return kTfLiteOk; }
1647
True
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Read
The software reads data past the end, or before the beginning, of the intended buffer.
Typically, this can allow attackers to read sensitive information from other memory locations or cause a crash. A crash can occur when the code reads a variable amount of data and assumes that a sentinel exists to stop the read operation, such as a NUL in a string. The expected sentinel might not be located in the out-of-bounds memory, causing excessive data to be read, leading to a segmentation fault or a buffer overflow. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent read operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/125.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::builtin::lstm::full::PopulatePrecomputedZPTimesWeightsWithBias
tflite::ops::builtin::lstm::full::PopulatePrecomputedZPTimesWeightsWithBias( TfLiteContext * context , OpData * op_data , TfLiteNode * node)
['context', 'op_data', 'node']
TfLiteStatus PopulatePrecomputedZPTimesWeightsWithBias(TfLiteContext* context, OpData* op_data, TfLiteNode* node) { const TfLiteTensor* input = GetInput(context, node, kInputTensor); const TfLiteTensor* output_state = GetVariableInput(context, node, kOutputStateTensor); TF_LITE_ENSURE(context, output_state != nullptr); const int32_t input_zero_point = -input->params.zero_point; const int32_t output_state_zero_point = -output_state->params.zero_point; const TfLiteTensor* input_to_input_weights = GetOptionalInputTensor(context, node, kInputToInputWeightsTensor); const TfLiteTensor* input_to_forget_weights = GetInput(context, node, kInputToForgetWeightsTensor); const TfLiteTensor* input_to_cell_weights = GetInput(context, node, kInputToCellWeightsTensor); const TfLiteTensor* input_to_output_weights = GetInput(context, node, kInputToOutputWeightsTensor); const TfLiteTensor* recurrent_to_input_weights = GetOptionalInputTensor(context, node, kRecurrentToInputWeightsTensor); const TfLiteTensor* recurrent_to_forget_weights = GetInput(context, node, kRecurrentToForgetWeightsTensor); const TfLiteTensor* recurrent_to_cell_weights = GetInput(context, node, kRecurrentToCellWeightsTensor); const TfLiteTensor* recurrent_to_output_weights = GetInput(context, node, kRecurrentToOutputWeightsTensor); const TfLiteTensor* projection_weights = GetOptionalInputTensor(context, node, kProjectionWeightsTensor); const TfLiteTensor* projection_bias = GetOptionalInputTensor(context, node, kProjectionBiasTensor); lstm_eval::IntegerLstmParameter* integer_lstm_params = &op_data->integer_lstm_param; const TfLiteTensor* intermediate = &context->tensors[node->intermediates->data[4]]; const auto* params = static_cast<TfLiteAffineQuantization*>(intermediate->quantization.params); const int32_t hidden_zp = params->zero_point->data[0]; // Get bias and perform zero point calculation. // When there is layer normalization, the gate bias does not apply to matmul // directly: // y = ln(w * x + w * r + w * c) + b. const bool is_layer_norm = op_data->use_layer_norm; // Forget gate. const TfLiteTensor* forget_gate_bias = is_layer_norm ? nullptr : GetInput(context, node, kForgetGateBiasTensor); TF_LITE_ENSURE_OK( context, PrecomputeZeroPointTimesWeightWithBias( context, input_zero_point, input_to_forget_weights, forget_gate_bias, &(integer_lstm_params->input_to_forget_effective_bias))); TF_LITE_ENSURE_OK( context, PrecomputeZeroPointTimesWeightWithBias( context, output_state_zero_point, recurrent_to_forget_weights, nullptr, &(integer_lstm_params->recurrent_to_forget_effective_bias))); // Modulation gate. const TfLiteTensor* cell_gate_bias = is_layer_norm ? nullptr : GetInput(context, node, kCellGateBiasTensor); TF_LITE_ENSURE_OK( context, PrecomputeZeroPointTimesWeightWithBias( context, input_zero_point, input_to_cell_weights, cell_gate_bias, &(integer_lstm_params->input_to_cell_effective_bias))); TF_LITE_ENSURE_OK( context, PrecomputeZeroPointTimesWeightWithBias( context, output_state_zero_point, recurrent_to_cell_weights, nullptr, &(integer_lstm_params->recurrent_to_cell_effective_bias))); // Output gate. const TfLiteTensor* output_gate_bias = is_layer_norm ? nullptr : GetInput(context, node, kOutputGateBiasTensor); TF_LITE_ENSURE_OK( context, PrecomputeZeroPointTimesWeightWithBias( context, input_zero_point, input_to_output_weights, output_gate_bias, &(integer_lstm_params->input_to_output_effective_bias))); TF_LITE_ENSURE_OK( context, PrecomputeZeroPointTimesWeightWithBias( context, output_state_zero_point, recurrent_to_output_weights, nullptr, &(integer_lstm_params->recurrent_to_output_effective_bias))); // Input gate. The calculation is only meaningful for non-cifg case. const TfLiteTensor* input_gate_bias = is_layer_norm ? nullptr : GetInput(context, node, kInputGateBiasTensor); TF_LITE_ENSURE_OK( context, PrecomputeZeroPointTimesWeightWithBias( context, input_zero_point, input_to_input_weights, input_gate_bias, &(integer_lstm_params->input_to_input_effective_bias))); TF_LITE_ENSURE_OK( context, PrecomputeZeroPointTimesWeightWithBias( context, output_state_zero_point, recurrent_to_input_weights, nullptr, &(integer_lstm_params->recurrent_to_input_effective_bias))); // Projection bias. The calculation is only meaningful for with projection. TF_LITE_ENSURE_OK(context, PrecomputeZeroPointTimesWeightWithBias( context, hidden_zp, projection_weights, projection_bias, &(integer_lstm_params->projection_effective_bias))); return kTfLiteOk; }
567
True
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Write
The software writes data past the end, or before the beginning, of the intended buffer.
Typically, this can result in corruption of data, a crash, or code execution. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent write operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/787.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::builtin::lstm::full::PopulatePrecomputedZPTimesWeightsWithBias
tflite::ops::builtin::lstm::full::PopulatePrecomputedZPTimesWeightsWithBias( TfLiteContext * context , OpData * op_data , TfLiteNode * node)
['context', 'op_data', 'node']
TfLiteStatus PopulatePrecomputedZPTimesWeightsWithBias(TfLiteContext* context, OpData* op_data, TfLiteNode* node) { const TfLiteTensor* input = GetInput(context, node, kInputTensor); const TfLiteTensor* output_state = GetVariableInput(context, node, kOutputStateTensor); TF_LITE_ENSURE(context, output_state != nullptr); const int32_t input_zero_point = -input->params.zero_point; const int32_t output_state_zero_point = -output_state->params.zero_point; const TfLiteTensor* input_to_input_weights = GetOptionalInputTensor(context, node, kInputToInputWeightsTensor); const TfLiteTensor* input_to_forget_weights = GetInput(context, node, kInputToForgetWeightsTensor); const TfLiteTensor* input_to_cell_weights = GetInput(context, node, kInputToCellWeightsTensor); const TfLiteTensor* input_to_output_weights = GetInput(context, node, kInputToOutputWeightsTensor); const TfLiteTensor* recurrent_to_input_weights = GetOptionalInputTensor(context, node, kRecurrentToInputWeightsTensor); const TfLiteTensor* recurrent_to_forget_weights = GetInput(context, node, kRecurrentToForgetWeightsTensor); const TfLiteTensor* recurrent_to_cell_weights = GetInput(context, node, kRecurrentToCellWeightsTensor); const TfLiteTensor* recurrent_to_output_weights = GetInput(context, node, kRecurrentToOutputWeightsTensor); const TfLiteTensor* projection_weights = GetOptionalInputTensor(context, node, kProjectionWeightsTensor); const TfLiteTensor* projection_bias = GetOptionalInputTensor(context, node, kProjectionBiasTensor); lstm_eval::IntegerLstmParameter* integer_lstm_params = &op_data->integer_lstm_param; const TfLiteTensor* intermediate = &context->tensors[node->intermediates->data[4]]; const auto* params = static_cast<TfLiteAffineQuantization*>(intermediate->quantization.params); const int32_t hidden_zp = params->zero_point->data[0]; // Get bias and perform zero point calculation. // When there is layer normalization, the gate bias does not apply to matmul // directly: // y = ln(w * x + w * r + w * c) + b. const bool is_layer_norm = op_data->use_layer_norm; // Forget gate. const TfLiteTensor* forget_gate_bias = is_layer_norm ? nullptr : GetInput(context, node, kForgetGateBiasTensor); TF_LITE_ENSURE_OK( context, PrecomputeZeroPointTimesWeightWithBias( context, input_zero_point, input_to_forget_weights, forget_gate_bias, &(integer_lstm_params->input_to_forget_effective_bias))); TF_LITE_ENSURE_OK( context, PrecomputeZeroPointTimesWeightWithBias( context, output_state_zero_point, recurrent_to_forget_weights, nullptr, &(integer_lstm_params->recurrent_to_forget_effective_bias))); // Modulation gate. const TfLiteTensor* cell_gate_bias = is_layer_norm ? nullptr : GetInput(context, node, kCellGateBiasTensor); TF_LITE_ENSURE_OK( context, PrecomputeZeroPointTimesWeightWithBias( context, input_zero_point, input_to_cell_weights, cell_gate_bias, &(integer_lstm_params->input_to_cell_effective_bias))); TF_LITE_ENSURE_OK( context, PrecomputeZeroPointTimesWeightWithBias( context, output_state_zero_point, recurrent_to_cell_weights, nullptr, &(integer_lstm_params->recurrent_to_cell_effective_bias))); // Output gate. const TfLiteTensor* output_gate_bias = is_layer_norm ? nullptr : GetInput(context, node, kOutputGateBiasTensor); TF_LITE_ENSURE_OK( context, PrecomputeZeroPointTimesWeightWithBias( context, input_zero_point, input_to_output_weights, output_gate_bias, &(integer_lstm_params->input_to_output_effective_bias))); TF_LITE_ENSURE_OK( context, PrecomputeZeroPointTimesWeightWithBias( context, output_state_zero_point, recurrent_to_output_weights, nullptr, &(integer_lstm_params->recurrent_to_output_effective_bias))); // Input gate. The calculation is only meaningful for non-cifg case. const TfLiteTensor* input_gate_bias = is_layer_norm ? nullptr : GetInput(context, node, kInputGateBiasTensor); TF_LITE_ENSURE_OK( context, PrecomputeZeroPointTimesWeightWithBias( context, input_zero_point, input_to_input_weights, input_gate_bias, &(integer_lstm_params->input_to_input_effective_bias))); TF_LITE_ENSURE_OK( context, PrecomputeZeroPointTimesWeightWithBias( context, output_state_zero_point, recurrent_to_input_weights, nullptr, &(integer_lstm_params->recurrent_to_input_effective_bias))); // Projection bias. The calculation is only meaningful for with projection. TF_LITE_ENSURE_OK(context, PrecomputeZeroPointTimesWeightWithBias( context, hidden_zp, projection_weights, projection_bias, &(integer_lstm_params->projection_effective_bias))); return kTfLiteOk; }
567
True
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Read
The software reads data past the end, or before the beginning, of the intended buffer.
Typically, this can allow attackers to read sensitive information from other memory locations or cause a crash. A crash can occur when the code reads a variable amount of data and assumes that a sentinel exists to stop the read operation, such as a NUL in a string. The expected sentinel might not be located in the out-of-bounds memory, causing excessive data to be read, leading to a segmentation fault or a buffer overflow. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent read operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/125.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::builtin::lstm::full::PopulateQuantizedLstmParams8x8_16
tflite::ops::builtin::lstm::full::PopulateQuantizedLstmParams8x8_16( TfLiteContext * context , TfLiteNode * node , lstm_eval :: IntegerLstmParameter * integer_lstm_param)
['context', 'node', 'integer_lstm_param']
TfLiteStatus PopulateQuantizedLstmParams8x8_16( TfLiteContext* context, TfLiteNode* node, lstm_eval::IntegerLstmParameter* integer_lstm_param) { // Calculate quantized clip for projection and cell. const auto* params = static_cast<TfLiteLSTMParams*>(node->builtin_data); const float cell_clip = params->cell_clip; const float proj_clip = params->proj_clip; const TfLiteTensor* cell_state = GetVariableInput(context, node, kCellStateTensor); TF_LITE_ENSURE(context, cell_state != nullptr); const TfLiteTensor* output_tensor = GetOutput(context, node, kOutputTensor); auto* cell_state_params = static_cast<TfLiteAffineQuantization*>(cell_state->quantization.params); auto* proj_params = static_cast<TfLiteAffineQuantization*>( output_tensor->quantization.params); if (cell_clip > 0.0) { integer_lstm_param->quantized_cell_clip = static_cast<int16_t>(std::min( std::max(cell_clip / cell_state_params->scale->data[0], -32768.0f), 32767.0f)); } else { integer_lstm_param->quantized_cell_clip = 0; } if (proj_clip > 0.0) { integer_lstm_param->quantized_proj_clip = static_cast<int8_t>(std::min( std::max(proj_clip / proj_params->scale->data[0], -128.0f), 127.0f)); } else { integer_lstm_param->quantized_proj_clip = 0; } // Calculate effective scales. OpData* op_data = static_cast<OpData*>(node->user_data); const bool use_layer_norm = op_data->use_layer_norm; const TfLiteTensor* input = GetInput(context, node, kInputTensor); const TfLiteTensor* input_to_input_weights = GetOptionalInputTensor(context, node, kInputToInputWeightsTensor); const TfLiteTensor* input_to_forget_weights = GetInput(context, node, kInputToForgetWeightsTensor); const TfLiteTensor* input_to_cell_weights = GetInput(context, node, kInputToCellWeightsTensor); const TfLiteTensor* input_to_output_weights = GetInput(context, node, kInputToOutputWeightsTensor); const TfLiteTensor* recurrent_to_input_weights = GetOptionalInputTensor(context, node, kRecurrentToInputWeightsTensor); const TfLiteTensor* recurrent_to_forget_weights = GetInput(context, node, kRecurrentToForgetWeightsTensor); const TfLiteTensor* recurrent_to_cell_weights = GetInput(context, node, kRecurrentToCellWeightsTensor); const TfLiteTensor* recurrent_to_output_weights = GetInput(context, node, kRecurrentToOutputWeightsTensor); const TfLiteTensor* cell_to_input_weights = GetOptionalInputTensor(context, node, kCellToInputWeightsTensor); const TfLiteTensor* cell_to_forget_weights = GetOptionalInputTensor(context, node, kCellToForgetWeightsTensor); const TfLiteTensor* cell_to_output_weights = GetOptionalInputTensor(context, node, kCellToOutputWeightsTensor); const TfLiteTensor* input_layer_norm_coefficients = GetOptionalInputTensor(context, node, kInputLayerNormCoefficientsTensor); const TfLiteTensor* forget_layer_norm_coefficients = GetOptionalInputTensor(context, node, kForgetLayerNormCoefficientsTensor); const TfLiteTensor* cell_layer_norm_coefficients = GetOptionalInputTensor(context, node, kCellLayerNormCoefficientsTensor); const TfLiteTensor* output_layer_norm_coefficients = GetOptionalInputTensor(context, node, kOutputLayerNormCoefficientsTensor); const TfLiteTensor* projection_weights = GetOptionalInputTensor(context, node, kProjectionWeightsTensor); TfLiteTensor* output_state = GetVariableInput(context, node, kOutputStateTensor); TF_LITE_ENSURE(context, output_state != nullptr); // Since we have already checked that weights are all there or none, we can // check the existence of only one to get the condition. const bool use_cifg = (input_to_input_weights == nullptr); const bool use_peephole = (cell_to_output_weights != nullptr); const bool use_projection = (projection_weights != nullptr); // Get intermediate scales and zero points. std::vector<float> intermediate_scale; std::vector<int32> intermediate_zp; for (int i = 0; i < 4; ++i) { if (use_layer_norm) { const TfLiteTensor* intermediate = GetIntermediates(context, node, i); auto* params = static_cast<TfLiteAffineQuantization*>( intermediate->quantization.params); intermediate_scale.push_back(params->scale->data[0]); intermediate_zp.push_back(params->zero_point->data[0]); } else { // Q3.12 for activation functions. intermediate_scale.push_back(std::pow(2, -12)); intermediate_zp.push_back(0); } } // In the absense of projection, hidden becomes otuput and this intermediate // is ignored. const TfLiteTensor* hidden = GetIntermediates(context, node, 4); auto* hidden_params = static_cast<TfLiteAffineQuantization*>(hidden->quantization.params); intermediate_scale.push_back(hidden_params->scale->data[0]); intermediate_zp.push_back(hidden_params->zero_point->data[0]); // Scales. const float default_scale = 1.0; float input_scale = default_scale; float input_to_input_weight_scale = default_scale; float recurrent_to_input_weight_scale = default_scale; float cell_to_input_weight_scale = default_scale; float input_to_forget_weight_scale = default_scale; float recurrent_to_forget_weight_scale = default_scale; float cell_to_forget_weight_scale = default_scale; float input_to_cell_weight_scale = default_scale; float recurrent_to_cell_weight_scale = default_scale; float input_to_output_weight_scale = default_scale; float recurrent_to_output_weight_scale = default_scale; float cell_to_output_weight_scale = default_scale; float projection_weight_scale = default_scale; float layer_norm_input_scale = default_scale; float layer_norm_forget_scale = default_scale; float layer_norm_cell_scale = default_scale; float layer_norm_output_scale = default_scale; float output_state_scale = default_scale; int cell_scale = 1; // Effective scales. float effective_input_to_input_scale = default_scale; float effective_recurrent_to_input_scale = default_scale; float effective_cell_to_input_scale = default_scale; float effective_input_to_forget_scale = default_scale; float effective_recurrent_to_forget_scale = default_scale; float effective_cell_to_forget_scale = default_scale; float effective_input_to_cell_scale = default_scale; float effective_recurrent_to_cell_scale = default_scale; float effective_input_to_output_scale = default_scale; float effective_recurrent_to_output_scale = default_scale; float effective_cell_to_output_scale = default_scale; float effective_proj_scale = default_scale; float effective_hidden_scale = default_scale; // Populate scales. if (!use_cifg) { input_to_input_weight_scale = input_to_input_weights->params.scale; recurrent_to_input_weight_scale = recurrent_to_input_weights->params.scale; } if (use_peephole) { if (!use_cifg) { cell_to_input_weight_scale = cell_to_input_weights->params.scale; } cell_to_forget_weight_scale = cell_to_forget_weights->params.scale; cell_to_output_weight_scale = cell_to_output_weights->params.scale; } if (use_layer_norm) { if (!use_cifg) { layer_norm_input_scale = input_layer_norm_coefficients->params.scale; } layer_norm_forget_scale = forget_layer_norm_coefficients->params.scale; layer_norm_cell_scale = cell_layer_norm_coefficients->params.scale; layer_norm_output_scale = output_layer_norm_coefficients->params.scale; } if (use_projection) { projection_weight_scale = projection_weights->params.scale; } output_state_scale = output_state->params.scale; input_to_forget_weight_scale = input_to_forget_weights->params.scale; input_to_cell_weight_scale = input_to_cell_weights->params.scale; input_to_output_weight_scale = input_to_output_weights->params.scale; recurrent_to_forget_weight_scale = recurrent_to_forget_weights->params.scale; recurrent_to_cell_weight_scale = recurrent_to_cell_weights->params.scale; recurrent_to_output_weight_scale = recurrent_to_output_weights->params.scale; // Check cell state (already used above) TF_LITE_ENSURE(context, CheckedLog2(cell_state->params.scale, &cell_scale)); TF_LITE_ENSURE(context, cell_scale <= -9); integer_lstm_param->cell_scale = cell_scale; input_scale = input->params.scale; // Calculate effective scales. if (!use_cifg) { effective_input_to_input_scale = input_to_input_weight_scale * input_scale / intermediate_scale[0]; effective_recurrent_to_input_scale = recurrent_to_input_weight_scale * output_state_scale / intermediate_scale[0]; } effective_input_to_forget_scale = input_to_forget_weight_scale * input_scale / intermediate_scale[1]; effective_recurrent_to_forget_scale = recurrent_to_forget_weight_scale * output_state_scale / intermediate_scale[1]; effective_input_to_cell_scale = input_to_cell_weight_scale * input_scale / intermediate_scale[2]; effective_recurrent_to_cell_scale = recurrent_to_cell_weight_scale * output_state_scale / intermediate_scale[2]; effective_input_to_output_scale = input_to_output_weight_scale * input_scale / intermediate_scale[3]; effective_recurrent_to_output_scale = recurrent_to_output_weight_scale * output_state_scale / intermediate_scale[3]; effective_hidden_scale = std::pow(2, -15) / intermediate_scale[4] * std::pow(2, -15); effective_proj_scale = projection_weight_scale * intermediate_scale[4] / output_state_scale; if (use_peephole) { if (!use_cifg) { effective_cell_to_input_scale = std::pow(2, cell_scale) * // NOLINT cell_to_input_weight_scale / intermediate_scale[0]; } effective_cell_to_forget_scale = std::pow(2, cell_scale) * // NOLINT cell_to_forget_weight_scale / intermediate_scale[1]; effective_cell_to_output_scale = std::pow(2, cell_scale) * // NOLINT cell_to_output_weight_scale / intermediate_scale[3]; } // Decompose scales. QuantizeMultiplier(effective_input_to_input_scale, &integer_lstm_param->effective_input_to_input_scale_a, &integer_lstm_param->effective_input_to_input_scale_b); QuantizeMultiplier(effective_recurrent_to_input_scale, &integer_lstm_param->effective_recurrent_to_input_scale_a, &integer_lstm_param->effective_recurrent_to_input_scale_b); QuantizeMultiplier(effective_cell_to_input_scale, &integer_lstm_param->effective_cell_to_input_scale_a, &integer_lstm_param->effective_cell_to_input_scale_b); QuantizeMultiplier(effective_input_to_forget_scale, &integer_lstm_param->effective_input_to_forget_scale_a, &integer_lstm_param->effective_input_to_forget_scale_b); QuantizeMultiplier( effective_recurrent_to_forget_scale, &integer_lstm_param->effective_recurrent_to_forget_scale_a, &integer_lstm_param->effective_recurrent_to_forget_scale_b); QuantizeMultiplier(effective_cell_to_forget_scale, &integer_lstm_param->effective_cell_to_forget_scale_a, &integer_lstm_param->effective_cell_to_forget_scale_b); QuantizeMultiplier(effective_input_to_cell_scale, &integer_lstm_param->effective_input_to_cell_scale_a, &integer_lstm_param->effective_input_to_cell_scale_b); QuantizeMultiplier(effective_recurrent_to_cell_scale, &integer_lstm_param->effective_recurrent_to_cell_scale_a, &integer_lstm_param->effective_recurrent_to_cell_scale_b); QuantizeMultiplier(effective_input_to_output_scale, &integer_lstm_param->effective_input_to_output_scale_a, &integer_lstm_param->effective_input_to_output_scale_b); QuantizeMultiplier( effective_recurrent_to_output_scale, &integer_lstm_param->effective_recurrent_to_output_scale_a, &integer_lstm_param->effective_recurrent_to_output_scale_b); QuantizeMultiplier(effective_cell_to_output_scale, &integer_lstm_param->effective_cell_to_output_scale_a, &integer_lstm_param->effective_cell_to_output_scale_b); QuantizeMultiplier(effective_proj_scale, &integer_lstm_param->effective_proj_scale_a, &integer_lstm_param->effective_proj_scale_b); QuantizeMultiplier(effective_hidden_scale, &integer_lstm_param->effective_hidden_scale_a, &integer_lstm_param->effective_hidden_scale_b); QuantizeMultiplier(layer_norm_input_scale, &integer_lstm_param->layer_norm_input_scale_a, &integer_lstm_param->layer_norm_input_scale_b); QuantizeMultiplier(layer_norm_forget_scale, &integer_lstm_param->layer_norm_forget_scale_a, &integer_lstm_param->layer_norm_forget_scale_b); QuantizeMultiplier(layer_norm_cell_scale, &integer_lstm_param->layer_norm_cell_scale_a, &integer_lstm_param->layer_norm_cell_scale_b); QuantizeMultiplier(layer_norm_output_scale, &integer_lstm_param->layer_norm_output_scale_a, &integer_lstm_param->layer_norm_output_scale_b); integer_lstm_param->hidden_zp = intermediate_zp[4]; // 10000 is used to make sure the kernel logic does not overflow. if (!use_cifg) { integer_lstm_param->input_variance_guard = std::max(1, static_cast<int32_t>(10000 * layer_norm_input_scale)); } integer_lstm_param->forget_variance_guard = std::max(1, static_cast<int32_t>(10000 * layer_norm_forget_scale)); integer_lstm_param->cell_variance_guard = std::max(1, static_cast<int32_t>(10000 * layer_norm_cell_scale)); integer_lstm_param->output_variance_guard = std::max(1, static_cast<int32_t>(10000 * layer_norm_output_scale)); return kTfLiteOk; }
1675
True
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Write
The software writes data past the end, or before the beginning, of the intended buffer.
Typically, this can result in corruption of data, a crash, or code execution. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent write operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/787.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::builtin::lstm::full::PopulateQuantizedLstmParams8x8_16
tflite::ops::builtin::lstm::full::PopulateQuantizedLstmParams8x8_16( TfLiteContext * context , TfLiteNode * node , lstm_eval :: IntegerLstmParameter * integer_lstm_param)
['context', 'node', 'integer_lstm_param']
TfLiteStatus PopulateQuantizedLstmParams8x8_16( TfLiteContext* context, TfLiteNode* node, lstm_eval::IntegerLstmParameter* integer_lstm_param) { // Calculate quantized clip for projection and cell. const auto* params = static_cast<TfLiteLSTMParams*>(node->builtin_data); const float cell_clip = params->cell_clip; const float proj_clip = params->proj_clip; const TfLiteTensor* cell_state = GetVariableInput(context, node, kCellStateTensor); TF_LITE_ENSURE(context, cell_state != nullptr); const TfLiteTensor* output_tensor = GetOutput(context, node, kOutputTensor); auto* cell_state_params = static_cast<TfLiteAffineQuantization*>(cell_state->quantization.params); auto* proj_params = static_cast<TfLiteAffineQuantization*>( output_tensor->quantization.params); if (cell_clip > 0.0) { integer_lstm_param->quantized_cell_clip = static_cast<int16_t>(std::min( std::max(cell_clip / cell_state_params->scale->data[0], -32768.0f), 32767.0f)); } else { integer_lstm_param->quantized_cell_clip = 0; } if (proj_clip > 0.0) { integer_lstm_param->quantized_proj_clip = static_cast<int8_t>(std::min( std::max(proj_clip / proj_params->scale->data[0], -128.0f), 127.0f)); } else { integer_lstm_param->quantized_proj_clip = 0; } // Calculate effective scales. OpData* op_data = static_cast<OpData*>(node->user_data); const bool use_layer_norm = op_data->use_layer_norm; const TfLiteTensor* input = GetInput(context, node, kInputTensor); const TfLiteTensor* input_to_input_weights = GetOptionalInputTensor(context, node, kInputToInputWeightsTensor); const TfLiteTensor* input_to_forget_weights = GetInput(context, node, kInputToForgetWeightsTensor); const TfLiteTensor* input_to_cell_weights = GetInput(context, node, kInputToCellWeightsTensor); const TfLiteTensor* input_to_output_weights = GetInput(context, node, kInputToOutputWeightsTensor); const TfLiteTensor* recurrent_to_input_weights = GetOptionalInputTensor(context, node, kRecurrentToInputWeightsTensor); const TfLiteTensor* recurrent_to_forget_weights = GetInput(context, node, kRecurrentToForgetWeightsTensor); const TfLiteTensor* recurrent_to_cell_weights = GetInput(context, node, kRecurrentToCellWeightsTensor); const TfLiteTensor* recurrent_to_output_weights = GetInput(context, node, kRecurrentToOutputWeightsTensor); const TfLiteTensor* cell_to_input_weights = GetOptionalInputTensor(context, node, kCellToInputWeightsTensor); const TfLiteTensor* cell_to_forget_weights = GetOptionalInputTensor(context, node, kCellToForgetWeightsTensor); const TfLiteTensor* cell_to_output_weights = GetOptionalInputTensor(context, node, kCellToOutputWeightsTensor); const TfLiteTensor* input_layer_norm_coefficients = GetOptionalInputTensor(context, node, kInputLayerNormCoefficientsTensor); const TfLiteTensor* forget_layer_norm_coefficients = GetOptionalInputTensor(context, node, kForgetLayerNormCoefficientsTensor); const TfLiteTensor* cell_layer_norm_coefficients = GetOptionalInputTensor(context, node, kCellLayerNormCoefficientsTensor); const TfLiteTensor* output_layer_norm_coefficients = GetOptionalInputTensor(context, node, kOutputLayerNormCoefficientsTensor); const TfLiteTensor* projection_weights = GetOptionalInputTensor(context, node, kProjectionWeightsTensor); TfLiteTensor* output_state = GetVariableInput(context, node, kOutputStateTensor); TF_LITE_ENSURE(context, output_state != nullptr); // Since we have already checked that weights are all there or none, we can // check the existence of only one to get the condition. const bool use_cifg = (input_to_input_weights == nullptr); const bool use_peephole = (cell_to_output_weights != nullptr); const bool use_projection = (projection_weights != nullptr); // Get intermediate scales and zero points. std::vector<float> intermediate_scale; std::vector<int32> intermediate_zp; for (int i = 0; i < 4; ++i) { if (use_layer_norm) { const TfLiteTensor* intermediate = GetIntermediates(context, node, i); auto* params = static_cast<TfLiteAffineQuantization*>( intermediate->quantization.params); intermediate_scale.push_back(params->scale->data[0]); intermediate_zp.push_back(params->zero_point->data[0]); } else { // Q3.12 for activation functions. intermediate_scale.push_back(std::pow(2, -12)); intermediate_zp.push_back(0); } } // In the absense of projection, hidden becomes otuput and this intermediate // is ignored. const TfLiteTensor* hidden = GetIntermediates(context, node, 4); auto* hidden_params = static_cast<TfLiteAffineQuantization*>(hidden->quantization.params); intermediate_scale.push_back(hidden_params->scale->data[0]); intermediate_zp.push_back(hidden_params->zero_point->data[0]); // Scales. const float default_scale = 1.0; float input_scale = default_scale; float input_to_input_weight_scale = default_scale; float recurrent_to_input_weight_scale = default_scale; float cell_to_input_weight_scale = default_scale; float input_to_forget_weight_scale = default_scale; float recurrent_to_forget_weight_scale = default_scale; float cell_to_forget_weight_scale = default_scale; float input_to_cell_weight_scale = default_scale; float recurrent_to_cell_weight_scale = default_scale; float input_to_output_weight_scale = default_scale; float recurrent_to_output_weight_scale = default_scale; float cell_to_output_weight_scale = default_scale; float projection_weight_scale = default_scale; float layer_norm_input_scale = default_scale; float layer_norm_forget_scale = default_scale; float layer_norm_cell_scale = default_scale; float layer_norm_output_scale = default_scale; float output_state_scale = default_scale; int cell_scale = 1; // Effective scales. float effective_input_to_input_scale = default_scale; float effective_recurrent_to_input_scale = default_scale; float effective_cell_to_input_scale = default_scale; float effective_input_to_forget_scale = default_scale; float effective_recurrent_to_forget_scale = default_scale; float effective_cell_to_forget_scale = default_scale; float effective_input_to_cell_scale = default_scale; float effective_recurrent_to_cell_scale = default_scale; float effective_input_to_output_scale = default_scale; float effective_recurrent_to_output_scale = default_scale; float effective_cell_to_output_scale = default_scale; float effective_proj_scale = default_scale; float effective_hidden_scale = default_scale; // Populate scales. if (!use_cifg) { input_to_input_weight_scale = input_to_input_weights->params.scale; recurrent_to_input_weight_scale = recurrent_to_input_weights->params.scale; } if (use_peephole) { if (!use_cifg) { cell_to_input_weight_scale = cell_to_input_weights->params.scale; } cell_to_forget_weight_scale = cell_to_forget_weights->params.scale; cell_to_output_weight_scale = cell_to_output_weights->params.scale; } if (use_layer_norm) { if (!use_cifg) { layer_norm_input_scale = input_layer_norm_coefficients->params.scale; } layer_norm_forget_scale = forget_layer_norm_coefficients->params.scale; layer_norm_cell_scale = cell_layer_norm_coefficients->params.scale; layer_norm_output_scale = output_layer_norm_coefficients->params.scale; } if (use_projection) { projection_weight_scale = projection_weights->params.scale; } output_state_scale = output_state->params.scale; input_to_forget_weight_scale = input_to_forget_weights->params.scale; input_to_cell_weight_scale = input_to_cell_weights->params.scale; input_to_output_weight_scale = input_to_output_weights->params.scale; recurrent_to_forget_weight_scale = recurrent_to_forget_weights->params.scale; recurrent_to_cell_weight_scale = recurrent_to_cell_weights->params.scale; recurrent_to_output_weight_scale = recurrent_to_output_weights->params.scale; // Check cell state (already used above) TF_LITE_ENSURE(context, CheckedLog2(cell_state->params.scale, &cell_scale)); TF_LITE_ENSURE(context, cell_scale <= -9); integer_lstm_param->cell_scale = cell_scale; input_scale = input->params.scale; // Calculate effective scales. if (!use_cifg) { effective_input_to_input_scale = input_to_input_weight_scale * input_scale / intermediate_scale[0]; effective_recurrent_to_input_scale = recurrent_to_input_weight_scale * output_state_scale / intermediate_scale[0]; } effective_input_to_forget_scale = input_to_forget_weight_scale * input_scale / intermediate_scale[1]; effective_recurrent_to_forget_scale = recurrent_to_forget_weight_scale * output_state_scale / intermediate_scale[1]; effective_input_to_cell_scale = input_to_cell_weight_scale * input_scale / intermediate_scale[2]; effective_recurrent_to_cell_scale = recurrent_to_cell_weight_scale * output_state_scale / intermediate_scale[2]; effective_input_to_output_scale = input_to_output_weight_scale * input_scale / intermediate_scale[3]; effective_recurrent_to_output_scale = recurrent_to_output_weight_scale * output_state_scale / intermediate_scale[3]; effective_hidden_scale = std::pow(2, -15) / intermediate_scale[4] * std::pow(2, -15); effective_proj_scale = projection_weight_scale * intermediate_scale[4] / output_state_scale; if (use_peephole) { if (!use_cifg) { effective_cell_to_input_scale = std::pow(2, cell_scale) * // NOLINT cell_to_input_weight_scale / intermediate_scale[0]; } effective_cell_to_forget_scale = std::pow(2, cell_scale) * // NOLINT cell_to_forget_weight_scale / intermediate_scale[1]; effective_cell_to_output_scale = std::pow(2, cell_scale) * // NOLINT cell_to_output_weight_scale / intermediate_scale[3]; } // Decompose scales. QuantizeMultiplier(effective_input_to_input_scale, &integer_lstm_param->effective_input_to_input_scale_a, &integer_lstm_param->effective_input_to_input_scale_b); QuantizeMultiplier(effective_recurrent_to_input_scale, &integer_lstm_param->effective_recurrent_to_input_scale_a, &integer_lstm_param->effective_recurrent_to_input_scale_b); QuantizeMultiplier(effective_cell_to_input_scale, &integer_lstm_param->effective_cell_to_input_scale_a, &integer_lstm_param->effective_cell_to_input_scale_b); QuantizeMultiplier(effective_input_to_forget_scale, &integer_lstm_param->effective_input_to_forget_scale_a, &integer_lstm_param->effective_input_to_forget_scale_b); QuantizeMultiplier( effective_recurrent_to_forget_scale, &integer_lstm_param->effective_recurrent_to_forget_scale_a, &integer_lstm_param->effective_recurrent_to_forget_scale_b); QuantizeMultiplier(effective_cell_to_forget_scale, &integer_lstm_param->effective_cell_to_forget_scale_a, &integer_lstm_param->effective_cell_to_forget_scale_b); QuantizeMultiplier(effective_input_to_cell_scale, &integer_lstm_param->effective_input_to_cell_scale_a, &integer_lstm_param->effective_input_to_cell_scale_b); QuantizeMultiplier(effective_recurrent_to_cell_scale, &integer_lstm_param->effective_recurrent_to_cell_scale_a, &integer_lstm_param->effective_recurrent_to_cell_scale_b); QuantizeMultiplier(effective_input_to_output_scale, &integer_lstm_param->effective_input_to_output_scale_a, &integer_lstm_param->effective_input_to_output_scale_b); QuantizeMultiplier( effective_recurrent_to_output_scale, &integer_lstm_param->effective_recurrent_to_output_scale_a, &integer_lstm_param->effective_recurrent_to_output_scale_b); QuantizeMultiplier(effective_cell_to_output_scale, &integer_lstm_param->effective_cell_to_output_scale_a, &integer_lstm_param->effective_cell_to_output_scale_b); QuantizeMultiplier(effective_proj_scale, &integer_lstm_param->effective_proj_scale_a, &integer_lstm_param->effective_proj_scale_b); QuantizeMultiplier(effective_hidden_scale, &integer_lstm_param->effective_hidden_scale_a, &integer_lstm_param->effective_hidden_scale_b); QuantizeMultiplier(layer_norm_input_scale, &integer_lstm_param->layer_norm_input_scale_a, &integer_lstm_param->layer_norm_input_scale_b); QuantizeMultiplier(layer_norm_forget_scale, &integer_lstm_param->layer_norm_forget_scale_a, &integer_lstm_param->layer_norm_forget_scale_b); QuantizeMultiplier(layer_norm_cell_scale, &integer_lstm_param->layer_norm_cell_scale_a, &integer_lstm_param->layer_norm_cell_scale_b); QuantizeMultiplier(layer_norm_output_scale, &integer_lstm_param->layer_norm_output_scale_a, &integer_lstm_param->layer_norm_output_scale_b); integer_lstm_param->hidden_zp = intermediate_zp[4]; // 10000 is used to make sure the kernel logic does not overflow. if (!use_cifg) { integer_lstm_param->input_variance_guard = std::max(1, static_cast<int32_t>(10000 * layer_norm_input_scale)); } integer_lstm_param->forget_variance_guard = std::max(1, static_cast<int32_t>(10000 * layer_norm_forget_scale)); integer_lstm_param->cell_variance_guard = std::max(1, static_cast<int32_t>(10000 * layer_norm_cell_scale)); integer_lstm_param->output_variance_guard = std::max(1, static_cast<int32_t>(10000 * layer_norm_output_scale)); return kTfLiteOk; }
1675
True
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Read
The software reads data past the end, or before the beginning, of the intended buffer.
Typically, this can allow attackers to read sensitive information from other memory locations or cause a crash. A crash can occur when the code reads a variable amount of data and assumes that a sentinel exists to stop the read operation, such as a NUL in a string. The expected sentinel might not be located in the out-of-bounds memory, causing excessive data to be read, leading to a segmentation fault or a buffer overflow. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent read operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/125.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::builtin::lstm::full::PopulateQuantizedLstmParams8x8_8
tflite::ops::builtin::lstm::full::PopulateQuantizedLstmParams8x8_8( TfLiteContext * context , TfLiteNode * node , lstm_eval :: IntegerLstmParameter * integer_lstm_param)
['context', 'node', 'integer_lstm_param']
TfLiteStatus PopulateQuantizedLstmParams8x8_8( TfLiteContext* context, TfLiteNode* node, lstm_eval::IntegerLstmParameter* integer_lstm_param) { // Get all tensors. const TfLiteTensor* input = GetInput(context, node, kInputTensor); const TfLiteTensor* input_to_input_weights = GetOptionalInputTensor(context, node, kInputToInputWeightsTensor); const TfLiteTensor* input_to_forget_weights = GetInput(context, node, kInputToForgetWeightsTensor); const TfLiteTensor* input_to_cell_weights = GetInput(context, node, kInputToCellWeightsTensor); const TfLiteTensor* input_to_output_weights = GetInput(context, node, kInputToOutputWeightsTensor); const TfLiteTensor* recurrent_to_input_weights = GetOptionalInputTensor(context, node, kRecurrentToInputWeightsTensor); const TfLiteTensor* recurrent_to_forget_weights = GetInput(context, node, kRecurrentToForgetWeightsTensor); const TfLiteTensor* recurrent_to_cell_weights = GetInput(context, node, kRecurrentToCellWeightsTensor); const TfLiteTensor* recurrent_to_output_weights = GetInput(context, node, kRecurrentToOutputWeightsTensor); const TfLiteTensor* cell_to_input_weights = GetOptionalInputTensor(context, node, kCellToInputWeightsTensor); const TfLiteTensor* cell_to_forget_weights = GetOptionalInputTensor(context, node, kCellToForgetWeightsTensor); const TfLiteTensor* cell_to_output_weights = GetOptionalInputTensor(context, node, kCellToOutputWeightsTensor); const TfLiteTensor* input_layer_norm_coefficients = GetOptionalInputTensor(context, node, kInputLayerNormCoefficientsTensor); const TfLiteTensor* forget_layer_norm_coefficients = GetOptionalInputTensor(context, node, kForgetLayerNormCoefficientsTensor); const TfLiteTensor* cell_layer_norm_coefficients = GetOptionalInputTensor(context, node, kCellLayerNormCoefficientsTensor); const TfLiteTensor* output_layer_norm_coefficients = GetOptionalInputTensor(context, node, kOutputLayerNormCoefficientsTensor); const TfLiteTensor* input_gate_bias = GetOptionalInputTensor(context, node, kInputGateBiasTensor); const TfLiteTensor* forget_gate_bias = GetInput(context, node, kForgetGateBiasTensor); const TfLiteTensor* cell_gate_bias = GetInput(context, node, kCellGateBiasTensor); const TfLiteTensor* output_gate_bias = GetInput(context, node, kOutputGateBiasTensor); const TfLiteTensor* projection_weights = GetOptionalInputTensor(context, node, kProjectionWeightsTensor); const TfLiteTensor* projection_bias = GetOptionalInputTensor(context, node, kProjectionBiasTensor); TfLiteTensor* output_state = GetVariableInput(context, node, kOutputStateTensor); TF_LITE_ENSURE(context, output_state != nullptr); TfLiteTensor* cell_state = GetVariableInput(context, node, kCellStateTensor); TF_LITE_ENSURE(context, cell_state != nullptr); // Since we have already checked that weights are all there or none, we can // check the existence of only one to get the condition. const bool use_cifg = (input_to_input_weights == nullptr); const bool use_peephole = (cell_to_output_weights != nullptr); const bool is_layer_norm_lstm = (forget_layer_norm_coefficients != nullptr); const bool use_projection = (projection_weights != nullptr); // Weights and states. int8_t* input_to_input_weight_ptr = nullptr; int8_t* recurrent_to_input_weight_ptr = nullptr; int8_t* cell_to_input_weight_ptr = nullptr; int8_t* input_to_forget_weight_ptr = nullptr; int8_t* recurrent_to_forget_weight_ptr = nullptr; int8_t* cell_to_forget_weight_ptr = nullptr; int8_t* input_to_cell_weight_ptr = nullptr; int8_t* recurrent_to_cell_weight_ptr = nullptr; int8_t* input_to_output_weight_ptr = nullptr; int8_t* recurrent_to_output_weight_ptr = nullptr; int8_t* cell_to_output_weight_ptr = nullptr; int8_t* projection_weight_ptr = nullptr; int16_t* layer_norm_input_weight_ptr = nullptr; int16_t* layer_norm_forget_weight_ptr = nullptr; int16_t* layer_norm_cell_weight_ptr = nullptr; int16_t* layer_norm_output_weight_ptr = nullptr; int32_t* input_gate_bias_ptr = nullptr; int32_t* forget_gate_bias_ptr = nullptr; int32_t* cell_gate_bias_ptr = nullptr; int32_t* output_gate_bias_ptr = nullptr; int32_t* projection_bias_ptr = nullptr; int16_t* cell_ptr = nullptr; int8_t* output_state_ptr = nullptr; // Scales. const float default_scale = 1.0; float input_scale = default_scale; float input_to_input_weight_scale = default_scale; float recurrent_to_input_weight_scale = default_scale; float cell_to_input_weight_scale = default_scale; float input_to_forget_weight_scale = default_scale; float recurrent_to_forget_weight_scale = default_scale; float cell_to_forget_weight_scale = default_scale; float input_to_cell_weight_scale = default_scale; float recurrent_to_cell_weight_scale = default_scale; float input_to_output_weight_scale = default_scale; float recurrent_to_output_weight_scale = default_scale; float cell_to_output_weight_scale = default_scale; float projection_weight_scale = default_scale; float layer_norm_input_scale = default_scale; float layer_norm_forget_scale = default_scale; float layer_norm_cell_scale = default_scale; float layer_norm_output_scale = default_scale; float output_state_scale = default_scale; // Effective scales. float effective_input_to_input_scale = default_scale; float effective_recurrent_to_input_scale = default_scale; float effective_cell_to_input_scale = default_scale; float effective_input_to_forget_scale = default_scale; float effective_recurrent_to_forget_scale = default_scale; float effective_cell_to_forget_scale = default_scale; float effective_input_to_cell_scale = default_scale; float effective_recurrent_to_cell_scale = default_scale; float effective_input_to_output_scale = default_scale; float effective_recurrent_to_output_scale = default_scale; float effective_cell_to_output_scale = default_scale; float effective_proj_scale = default_scale; // Zero points int input_zp = 0; int output_state_zp = 0; // Populate all the values. if (!use_cifg) { input_to_input_weight_ptr = input_to_input_weights->data.int8; recurrent_to_input_weight_ptr = recurrent_to_input_weights->data.int8; input_gate_bias_ptr = input_gate_bias->data.i32; input_to_input_weight_scale = input_to_input_weights->params.scale; recurrent_to_input_weight_scale = recurrent_to_input_weights->params.scale; } if (use_peephole) { if (!use_cifg) { cell_to_input_weight_ptr = cell_to_input_weights->data.int8; cell_to_input_weight_scale = cell_to_input_weights->params.scale; } cell_to_forget_weight_ptr = cell_to_forget_weights->data.int8; cell_to_output_weight_ptr = cell_to_output_weights->data.int8; cell_to_forget_weight_scale = cell_to_forget_weights->params.scale; cell_to_output_weight_scale = cell_to_output_weights->params.scale; } if (is_layer_norm_lstm) { if (!use_cifg) { layer_norm_input_weight_ptr = input_layer_norm_coefficients->data.i16; layer_norm_input_scale = input_layer_norm_coefficients->params.scale; } layer_norm_forget_weight_ptr = forget_layer_norm_coefficients->data.i16; layer_norm_forget_scale = forget_layer_norm_coefficients->params.scale; layer_norm_cell_weight_ptr = cell_layer_norm_coefficients->data.i16; layer_norm_cell_scale = cell_layer_norm_coefficients->params.scale; layer_norm_output_weight_ptr = output_layer_norm_coefficients->data.i16; layer_norm_output_scale = output_layer_norm_coefficients->params.scale; } if (use_projection) { projection_weight_ptr = projection_weights->data.int8; projection_weight_scale = projection_weights->params.scale; if (projection_bias) { projection_bias_ptr = projection_bias->data.i32; } } output_state_scale = output_state->params.scale; input_to_forget_weight_ptr = input_to_forget_weights->data.int8; input_to_forget_weight_scale = input_to_forget_weights->params.scale; input_to_cell_weight_ptr = input_to_cell_weights->data.int8; input_to_cell_weight_scale = input_to_cell_weights->params.scale; input_to_output_weight_ptr = input_to_output_weights->data.int8; input_to_output_weight_scale = input_to_output_weights->params.scale; recurrent_to_forget_weight_ptr = recurrent_to_forget_weights->data.int8; recurrent_to_forget_weight_scale = recurrent_to_forget_weights->params.scale; recurrent_to_cell_weight_ptr = recurrent_to_cell_weights->data.int8; recurrent_to_cell_weight_scale = recurrent_to_cell_weights->params.scale; recurrent_to_output_weight_ptr = recurrent_to_output_weights->data.int8; recurrent_to_output_weight_scale = recurrent_to_output_weights->params.scale; forget_gate_bias_ptr = forget_gate_bias->data.i32; cell_gate_bias_ptr = cell_gate_bias->data.i32; output_gate_bias_ptr = output_gate_bias->data.i32; output_state_ptr = output_state->data.int8; cell_ptr = cell_state->data.i16; input_scale = input->params.scale; input_zp = input->params.zero_point; output_state_zp = output_state->params.zero_point; std::vector<float> intermediate_scale; for (int i = 0; i < 12; ++i) { TfLiteTensor* intermediate = &context->tensors[node->intermediates->data[i]]; auto* params = reinterpret_cast<TfLiteAffineQuantization*>( intermediate->quantization.params); intermediate_scale.push_back(params->scale->data[0]); integer_lstm_param->intermediate_zp[i] = params->zero_point->data[0]; } // Calculate effective scales. if (!use_cifg) { effective_input_to_input_scale = input_to_input_weight_scale * input_scale / intermediate_scale[1]; effective_recurrent_to_input_scale = recurrent_to_input_weight_scale * output_state_scale / intermediate_scale[2]; } effective_input_to_forget_scale = input_to_forget_weight_scale * input_scale / intermediate_scale[4]; effective_recurrent_to_forget_scale = recurrent_to_forget_weight_scale * output_state_scale / intermediate_scale[5]; effective_input_to_cell_scale = input_to_cell_weight_scale * input_scale / intermediate_scale[7]; effective_recurrent_to_cell_scale = recurrent_to_cell_weight_scale * output_state_scale / intermediate_scale[8]; effective_input_to_output_scale = input_to_output_weight_scale * input_scale / intermediate_scale[10]; effective_recurrent_to_output_scale = recurrent_to_output_weight_scale * output_state_scale / intermediate_scale[11]; effective_proj_scale = projection_weight_scale * std::pow(2, -15) / output_state_scale; if (use_peephole) { if (!use_cifg) { effective_cell_to_input_scale = std::pow(2, -15) * cell_to_input_weight_scale / intermediate_scale[0]; } effective_cell_to_forget_scale = std::pow(2, -15) * cell_to_forget_weight_scale / intermediate_scale[3]; effective_cell_to_output_scale = std::pow(2, -15) * cell_to_output_weight_scale / intermediate_scale[9]; } // Calculate effecgive scales. QuantizeMultiplier(effective_input_to_input_scale, &integer_lstm_param->effective_input_to_input_scale_a, &integer_lstm_param->effective_input_to_input_scale_b); QuantizeMultiplier(effective_recurrent_to_input_scale, &integer_lstm_param->effective_recurrent_to_input_scale_a, &integer_lstm_param->effective_recurrent_to_input_scale_b); QuantizeMultiplier(effective_cell_to_input_scale, &integer_lstm_param->effective_cell_to_input_scale_a, &integer_lstm_param->effective_cell_to_input_scale_b); QuantizeMultiplier(effective_input_to_forget_scale, &integer_lstm_param->effective_input_to_forget_scale_a, &integer_lstm_param->effective_input_to_forget_scale_b); QuantizeMultiplier( effective_recurrent_to_forget_scale, &integer_lstm_param->effective_recurrent_to_forget_scale_a, &integer_lstm_param->effective_recurrent_to_forget_scale_b); QuantizeMultiplier(effective_cell_to_forget_scale, &integer_lstm_param->effective_cell_to_forget_scale_a, &integer_lstm_param->effective_cell_to_forget_scale_b); QuantizeMultiplier(effective_input_to_cell_scale, &integer_lstm_param->effective_input_to_cell_scale_a, &integer_lstm_param->effective_input_to_cell_scale_b); QuantizeMultiplier(effective_recurrent_to_cell_scale, &integer_lstm_param->effective_recurrent_to_cell_scale_a, &integer_lstm_param->effective_recurrent_to_cell_scale_b); QuantizeMultiplier(effective_input_to_output_scale, &integer_lstm_param->effective_input_to_output_scale_a, &integer_lstm_param->effective_input_to_output_scale_b); QuantizeMultiplier( effective_recurrent_to_output_scale, &integer_lstm_param->effective_recurrent_to_output_scale_a, &integer_lstm_param->effective_recurrent_to_output_scale_b); QuantizeMultiplier(effective_cell_to_output_scale, &integer_lstm_param->effective_cell_to_output_scale_a, &integer_lstm_param->effective_cell_to_output_scale_b); QuantizeMultiplier(effective_proj_scale, &integer_lstm_param->effective_proj_scale_a, &integer_lstm_param->effective_proj_scale_b); QuantizeMultiplier(layer_norm_input_scale, &integer_lstm_param->layer_norm_input_scale_a, &integer_lstm_param->layer_norm_input_scale_b); QuantizeMultiplier(layer_norm_forget_scale, &integer_lstm_param->layer_norm_forget_scale_a, &integer_lstm_param->layer_norm_forget_scale_b); QuantizeMultiplier(layer_norm_cell_scale, &integer_lstm_param->layer_norm_cell_scale_a, &integer_lstm_param->layer_norm_cell_scale_b); QuantizeMultiplier(layer_norm_output_scale, &integer_lstm_param->layer_norm_output_scale_a, &integer_lstm_param->layer_norm_output_scale_b); { // Intermdiates in flatbuffer holds Wx, Wh and Wx+Wh. // effective Wx, Wh is in effective_input/recurrent_to_<...>_scale // So use intermediate_scale to hold scale from Wx and Wh to Wx+Wh // 0: [1] -> [0] // 1: [2] -> [0] // and use intermdiate_zp as is. const float s_1_0 = intermediate_scale[1] / intermediate_scale[0]; const float s_2_0 = intermediate_scale[2] / intermediate_scale[0]; const float s_4_3 = intermediate_scale[4] / intermediate_scale[3]; const float s_5_3 = intermediate_scale[5] / intermediate_scale[3]; const float s_7_6 = intermediate_scale[7] / intermediate_scale[6]; const float s_8_6 = intermediate_scale[8] / intermediate_scale[6]; const float s_10_9 = intermediate_scale[10] / intermediate_scale[9]; const float s_11_9 = intermediate_scale[11] / intermediate_scale[9]; QuantizeMultiplier(s_1_0, &integer_lstm_param->intermediate_scale_a[0], &integer_lstm_param->intermediate_scale_b[0]); QuantizeMultiplier(s_2_0, &integer_lstm_param->intermediate_scale_a[1], &integer_lstm_param->intermediate_scale_b[1]); QuantizeMultiplier(s_4_3, &integer_lstm_param->intermediate_scale_a[2], &integer_lstm_param->intermediate_scale_b[2]); QuantizeMultiplier(s_5_3, &integer_lstm_param->intermediate_scale_a[3], &integer_lstm_param->intermediate_scale_b[3]); QuantizeMultiplier(s_7_6, &integer_lstm_param->intermediate_scale_a[4], &integer_lstm_param->intermediate_scale_b[4]); QuantizeMultiplier(s_8_6, &integer_lstm_param->intermediate_scale_a[5], &integer_lstm_param->intermediate_scale_b[5]); QuantizeMultiplier(s_10_9, &integer_lstm_param->intermediate_scale_a[6], &integer_lstm_param->intermediate_scale_b[6]); QuantizeMultiplier(s_11_9, &integer_lstm_param->intermediate_scale_a[7], &integer_lstm_param->intermediate_scale_b[7]); } // Calculate quantized clip for projection and cell. const auto* params = reinterpret_cast<TfLiteLSTMParams*>(node->builtin_data); const float cell_clip = params->cell_clip; const float proj_clip = params->proj_clip; const TfLiteTensor* output_tensor = GetOutput(context, node, kOutputTensor); auto* cell_state_params = reinterpret_cast<TfLiteAffineQuantization*>( cell_state->quantization.params); auto* proj_params = reinterpret_cast<TfLiteAffineQuantization*>( output_tensor->quantization.params); TF_LITE_ENSURE_EQ(context, cell_state_params->scale->data[0], 1.0 / 32768); if (cell_clip > 0.0 && cell_clip < 1.0) { integer_lstm_param->quantized_cell_clip = static_cast<int16_t>(std::min( std::max(cell_clip / cell_state_params->scale->data[0], -32768.0f), 32767.0f)); } else { integer_lstm_param->quantized_cell_clip = 0; } if (proj_clip > 0.0) { integer_lstm_param->quantized_proj_clip = static_cast<int8_t>(std::min( std::max(proj_clip / proj_params->scale->data[0], -128.0f), 127.0f)); } else { integer_lstm_param->quantized_proj_clip = 0; } return kTfLiteOk; }
2125
True
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Write
The software writes data past the end, or before the beginning, of the intended buffer.
Typically, this can result in corruption of data, a crash, or code execution. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent write operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/787.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::builtin::lstm::full::PopulateQuantizedLstmParams8x8_8
tflite::ops::builtin::lstm::full::PopulateQuantizedLstmParams8x8_8( TfLiteContext * context , TfLiteNode * node , lstm_eval :: IntegerLstmParameter * integer_lstm_param)
['context', 'node', 'integer_lstm_param']
TfLiteStatus PopulateQuantizedLstmParams8x8_8( TfLiteContext* context, TfLiteNode* node, lstm_eval::IntegerLstmParameter* integer_lstm_param) { // Get all tensors. const TfLiteTensor* input = GetInput(context, node, kInputTensor); const TfLiteTensor* input_to_input_weights = GetOptionalInputTensor(context, node, kInputToInputWeightsTensor); const TfLiteTensor* input_to_forget_weights = GetInput(context, node, kInputToForgetWeightsTensor); const TfLiteTensor* input_to_cell_weights = GetInput(context, node, kInputToCellWeightsTensor); const TfLiteTensor* input_to_output_weights = GetInput(context, node, kInputToOutputWeightsTensor); const TfLiteTensor* recurrent_to_input_weights = GetOptionalInputTensor(context, node, kRecurrentToInputWeightsTensor); const TfLiteTensor* recurrent_to_forget_weights = GetInput(context, node, kRecurrentToForgetWeightsTensor); const TfLiteTensor* recurrent_to_cell_weights = GetInput(context, node, kRecurrentToCellWeightsTensor); const TfLiteTensor* recurrent_to_output_weights = GetInput(context, node, kRecurrentToOutputWeightsTensor); const TfLiteTensor* cell_to_input_weights = GetOptionalInputTensor(context, node, kCellToInputWeightsTensor); const TfLiteTensor* cell_to_forget_weights = GetOptionalInputTensor(context, node, kCellToForgetWeightsTensor); const TfLiteTensor* cell_to_output_weights = GetOptionalInputTensor(context, node, kCellToOutputWeightsTensor); const TfLiteTensor* input_layer_norm_coefficients = GetOptionalInputTensor(context, node, kInputLayerNormCoefficientsTensor); const TfLiteTensor* forget_layer_norm_coefficients = GetOptionalInputTensor(context, node, kForgetLayerNormCoefficientsTensor); const TfLiteTensor* cell_layer_norm_coefficients = GetOptionalInputTensor(context, node, kCellLayerNormCoefficientsTensor); const TfLiteTensor* output_layer_norm_coefficients = GetOptionalInputTensor(context, node, kOutputLayerNormCoefficientsTensor); const TfLiteTensor* input_gate_bias = GetOptionalInputTensor(context, node, kInputGateBiasTensor); const TfLiteTensor* forget_gate_bias = GetInput(context, node, kForgetGateBiasTensor); const TfLiteTensor* cell_gate_bias = GetInput(context, node, kCellGateBiasTensor); const TfLiteTensor* output_gate_bias = GetInput(context, node, kOutputGateBiasTensor); const TfLiteTensor* projection_weights = GetOptionalInputTensor(context, node, kProjectionWeightsTensor); const TfLiteTensor* projection_bias = GetOptionalInputTensor(context, node, kProjectionBiasTensor); TfLiteTensor* output_state = GetVariableInput(context, node, kOutputStateTensor); TF_LITE_ENSURE(context, output_state != nullptr); TfLiteTensor* cell_state = GetVariableInput(context, node, kCellStateTensor); TF_LITE_ENSURE(context, cell_state != nullptr); // Since we have already checked that weights are all there or none, we can // check the existence of only one to get the condition. const bool use_cifg = (input_to_input_weights == nullptr); const bool use_peephole = (cell_to_output_weights != nullptr); const bool is_layer_norm_lstm = (forget_layer_norm_coefficients != nullptr); const bool use_projection = (projection_weights != nullptr); // Weights and states. int8_t* input_to_input_weight_ptr = nullptr; int8_t* recurrent_to_input_weight_ptr = nullptr; int8_t* cell_to_input_weight_ptr = nullptr; int8_t* input_to_forget_weight_ptr = nullptr; int8_t* recurrent_to_forget_weight_ptr = nullptr; int8_t* cell_to_forget_weight_ptr = nullptr; int8_t* input_to_cell_weight_ptr = nullptr; int8_t* recurrent_to_cell_weight_ptr = nullptr; int8_t* input_to_output_weight_ptr = nullptr; int8_t* recurrent_to_output_weight_ptr = nullptr; int8_t* cell_to_output_weight_ptr = nullptr; int8_t* projection_weight_ptr = nullptr; int16_t* layer_norm_input_weight_ptr = nullptr; int16_t* layer_norm_forget_weight_ptr = nullptr; int16_t* layer_norm_cell_weight_ptr = nullptr; int16_t* layer_norm_output_weight_ptr = nullptr; int32_t* input_gate_bias_ptr = nullptr; int32_t* forget_gate_bias_ptr = nullptr; int32_t* cell_gate_bias_ptr = nullptr; int32_t* output_gate_bias_ptr = nullptr; int32_t* projection_bias_ptr = nullptr; int16_t* cell_ptr = nullptr; int8_t* output_state_ptr = nullptr; // Scales. const float default_scale = 1.0; float input_scale = default_scale; float input_to_input_weight_scale = default_scale; float recurrent_to_input_weight_scale = default_scale; float cell_to_input_weight_scale = default_scale; float input_to_forget_weight_scale = default_scale; float recurrent_to_forget_weight_scale = default_scale; float cell_to_forget_weight_scale = default_scale; float input_to_cell_weight_scale = default_scale; float recurrent_to_cell_weight_scale = default_scale; float input_to_output_weight_scale = default_scale; float recurrent_to_output_weight_scale = default_scale; float cell_to_output_weight_scale = default_scale; float projection_weight_scale = default_scale; float layer_norm_input_scale = default_scale; float layer_norm_forget_scale = default_scale; float layer_norm_cell_scale = default_scale; float layer_norm_output_scale = default_scale; float output_state_scale = default_scale; // Effective scales. float effective_input_to_input_scale = default_scale; float effective_recurrent_to_input_scale = default_scale; float effective_cell_to_input_scale = default_scale; float effective_input_to_forget_scale = default_scale; float effective_recurrent_to_forget_scale = default_scale; float effective_cell_to_forget_scale = default_scale; float effective_input_to_cell_scale = default_scale; float effective_recurrent_to_cell_scale = default_scale; float effective_input_to_output_scale = default_scale; float effective_recurrent_to_output_scale = default_scale; float effective_cell_to_output_scale = default_scale; float effective_proj_scale = default_scale; // Zero points int input_zp = 0; int output_state_zp = 0; // Populate all the values. if (!use_cifg) { input_to_input_weight_ptr = input_to_input_weights->data.int8; recurrent_to_input_weight_ptr = recurrent_to_input_weights->data.int8; input_gate_bias_ptr = input_gate_bias->data.i32; input_to_input_weight_scale = input_to_input_weights->params.scale; recurrent_to_input_weight_scale = recurrent_to_input_weights->params.scale; } if (use_peephole) { if (!use_cifg) { cell_to_input_weight_ptr = cell_to_input_weights->data.int8; cell_to_input_weight_scale = cell_to_input_weights->params.scale; } cell_to_forget_weight_ptr = cell_to_forget_weights->data.int8; cell_to_output_weight_ptr = cell_to_output_weights->data.int8; cell_to_forget_weight_scale = cell_to_forget_weights->params.scale; cell_to_output_weight_scale = cell_to_output_weights->params.scale; } if (is_layer_norm_lstm) { if (!use_cifg) { layer_norm_input_weight_ptr = input_layer_norm_coefficients->data.i16; layer_norm_input_scale = input_layer_norm_coefficients->params.scale; } layer_norm_forget_weight_ptr = forget_layer_norm_coefficients->data.i16; layer_norm_forget_scale = forget_layer_norm_coefficients->params.scale; layer_norm_cell_weight_ptr = cell_layer_norm_coefficients->data.i16; layer_norm_cell_scale = cell_layer_norm_coefficients->params.scale; layer_norm_output_weight_ptr = output_layer_norm_coefficients->data.i16; layer_norm_output_scale = output_layer_norm_coefficients->params.scale; } if (use_projection) { projection_weight_ptr = projection_weights->data.int8; projection_weight_scale = projection_weights->params.scale; if (projection_bias) { projection_bias_ptr = projection_bias->data.i32; } } output_state_scale = output_state->params.scale; input_to_forget_weight_ptr = input_to_forget_weights->data.int8; input_to_forget_weight_scale = input_to_forget_weights->params.scale; input_to_cell_weight_ptr = input_to_cell_weights->data.int8; input_to_cell_weight_scale = input_to_cell_weights->params.scale; input_to_output_weight_ptr = input_to_output_weights->data.int8; input_to_output_weight_scale = input_to_output_weights->params.scale; recurrent_to_forget_weight_ptr = recurrent_to_forget_weights->data.int8; recurrent_to_forget_weight_scale = recurrent_to_forget_weights->params.scale; recurrent_to_cell_weight_ptr = recurrent_to_cell_weights->data.int8; recurrent_to_cell_weight_scale = recurrent_to_cell_weights->params.scale; recurrent_to_output_weight_ptr = recurrent_to_output_weights->data.int8; recurrent_to_output_weight_scale = recurrent_to_output_weights->params.scale; forget_gate_bias_ptr = forget_gate_bias->data.i32; cell_gate_bias_ptr = cell_gate_bias->data.i32; output_gate_bias_ptr = output_gate_bias->data.i32; output_state_ptr = output_state->data.int8; cell_ptr = cell_state->data.i16; input_scale = input->params.scale; input_zp = input->params.zero_point; output_state_zp = output_state->params.zero_point; std::vector<float> intermediate_scale; for (int i = 0; i < 12; ++i) { TfLiteTensor* intermediate = &context->tensors[node->intermediates->data[i]]; auto* params = reinterpret_cast<TfLiteAffineQuantization*>( intermediate->quantization.params); intermediate_scale.push_back(params->scale->data[0]); integer_lstm_param->intermediate_zp[i] = params->zero_point->data[0]; } // Calculate effective scales. if (!use_cifg) { effective_input_to_input_scale = input_to_input_weight_scale * input_scale / intermediate_scale[1]; effective_recurrent_to_input_scale = recurrent_to_input_weight_scale * output_state_scale / intermediate_scale[2]; } effective_input_to_forget_scale = input_to_forget_weight_scale * input_scale / intermediate_scale[4]; effective_recurrent_to_forget_scale = recurrent_to_forget_weight_scale * output_state_scale / intermediate_scale[5]; effective_input_to_cell_scale = input_to_cell_weight_scale * input_scale / intermediate_scale[7]; effective_recurrent_to_cell_scale = recurrent_to_cell_weight_scale * output_state_scale / intermediate_scale[8]; effective_input_to_output_scale = input_to_output_weight_scale * input_scale / intermediate_scale[10]; effective_recurrent_to_output_scale = recurrent_to_output_weight_scale * output_state_scale / intermediate_scale[11]; effective_proj_scale = projection_weight_scale * std::pow(2, -15) / output_state_scale; if (use_peephole) { if (!use_cifg) { effective_cell_to_input_scale = std::pow(2, -15) * cell_to_input_weight_scale / intermediate_scale[0]; } effective_cell_to_forget_scale = std::pow(2, -15) * cell_to_forget_weight_scale / intermediate_scale[3]; effective_cell_to_output_scale = std::pow(2, -15) * cell_to_output_weight_scale / intermediate_scale[9]; } // Calculate effecgive scales. QuantizeMultiplier(effective_input_to_input_scale, &integer_lstm_param->effective_input_to_input_scale_a, &integer_lstm_param->effective_input_to_input_scale_b); QuantizeMultiplier(effective_recurrent_to_input_scale, &integer_lstm_param->effective_recurrent_to_input_scale_a, &integer_lstm_param->effective_recurrent_to_input_scale_b); QuantizeMultiplier(effective_cell_to_input_scale, &integer_lstm_param->effective_cell_to_input_scale_a, &integer_lstm_param->effective_cell_to_input_scale_b); QuantizeMultiplier(effective_input_to_forget_scale, &integer_lstm_param->effective_input_to_forget_scale_a, &integer_lstm_param->effective_input_to_forget_scale_b); QuantizeMultiplier( effective_recurrent_to_forget_scale, &integer_lstm_param->effective_recurrent_to_forget_scale_a, &integer_lstm_param->effective_recurrent_to_forget_scale_b); QuantizeMultiplier(effective_cell_to_forget_scale, &integer_lstm_param->effective_cell_to_forget_scale_a, &integer_lstm_param->effective_cell_to_forget_scale_b); QuantizeMultiplier(effective_input_to_cell_scale, &integer_lstm_param->effective_input_to_cell_scale_a, &integer_lstm_param->effective_input_to_cell_scale_b); QuantizeMultiplier(effective_recurrent_to_cell_scale, &integer_lstm_param->effective_recurrent_to_cell_scale_a, &integer_lstm_param->effective_recurrent_to_cell_scale_b); QuantizeMultiplier(effective_input_to_output_scale, &integer_lstm_param->effective_input_to_output_scale_a, &integer_lstm_param->effective_input_to_output_scale_b); QuantizeMultiplier( effective_recurrent_to_output_scale, &integer_lstm_param->effective_recurrent_to_output_scale_a, &integer_lstm_param->effective_recurrent_to_output_scale_b); QuantizeMultiplier(effective_cell_to_output_scale, &integer_lstm_param->effective_cell_to_output_scale_a, &integer_lstm_param->effective_cell_to_output_scale_b); QuantizeMultiplier(effective_proj_scale, &integer_lstm_param->effective_proj_scale_a, &integer_lstm_param->effective_proj_scale_b); QuantizeMultiplier(layer_norm_input_scale, &integer_lstm_param->layer_norm_input_scale_a, &integer_lstm_param->layer_norm_input_scale_b); QuantizeMultiplier(layer_norm_forget_scale, &integer_lstm_param->layer_norm_forget_scale_a, &integer_lstm_param->layer_norm_forget_scale_b); QuantizeMultiplier(layer_norm_cell_scale, &integer_lstm_param->layer_norm_cell_scale_a, &integer_lstm_param->layer_norm_cell_scale_b); QuantizeMultiplier(layer_norm_output_scale, &integer_lstm_param->layer_norm_output_scale_a, &integer_lstm_param->layer_norm_output_scale_b); { // Intermdiates in flatbuffer holds Wx, Wh and Wx+Wh. // effective Wx, Wh is in effective_input/recurrent_to_<...>_scale // So use intermediate_scale to hold scale from Wx and Wh to Wx+Wh // 0: [1] -> [0] // 1: [2] -> [0] // and use intermdiate_zp as is. const float s_1_0 = intermediate_scale[1] / intermediate_scale[0]; const float s_2_0 = intermediate_scale[2] / intermediate_scale[0]; const float s_4_3 = intermediate_scale[4] / intermediate_scale[3]; const float s_5_3 = intermediate_scale[5] / intermediate_scale[3]; const float s_7_6 = intermediate_scale[7] / intermediate_scale[6]; const float s_8_6 = intermediate_scale[8] / intermediate_scale[6]; const float s_10_9 = intermediate_scale[10] / intermediate_scale[9]; const float s_11_9 = intermediate_scale[11] / intermediate_scale[9]; QuantizeMultiplier(s_1_0, &integer_lstm_param->intermediate_scale_a[0], &integer_lstm_param->intermediate_scale_b[0]); QuantizeMultiplier(s_2_0, &integer_lstm_param->intermediate_scale_a[1], &integer_lstm_param->intermediate_scale_b[1]); QuantizeMultiplier(s_4_3, &integer_lstm_param->intermediate_scale_a[2], &integer_lstm_param->intermediate_scale_b[2]); QuantizeMultiplier(s_5_3, &integer_lstm_param->intermediate_scale_a[3], &integer_lstm_param->intermediate_scale_b[3]); QuantizeMultiplier(s_7_6, &integer_lstm_param->intermediate_scale_a[4], &integer_lstm_param->intermediate_scale_b[4]); QuantizeMultiplier(s_8_6, &integer_lstm_param->intermediate_scale_a[5], &integer_lstm_param->intermediate_scale_b[5]); QuantizeMultiplier(s_10_9, &integer_lstm_param->intermediate_scale_a[6], &integer_lstm_param->intermediate_scale_b[6]); QuantizeMultiplier(s_11_9, &integer_lstm_param->intermediate_scale_a[7], &integer_lstm_param->intermediate_scale_b[7]); } // Calculate quantized clip for projection and cell. const auto* params = reinterpret_cast<TfLiteLSTMParams*>(node->builtin_data); const float cell_clip = params->cell_clip; const float proj_clip = params->proj_clip; const TfLiteTensor* output_tensor = GetOutput(context, node, kOutputTensor); auto* cell_state_params = reinterpret_cast<TfLiteAffineQuantization*>( cell_state->quantization.params); auto* proj_params = reinterpret_cast<TfLiteAffineQuantization*>( output_tensor->quantization.params); TF_LITE_ENSURE_EQ(context, cell_state_params->scale->data[0], 1.0 / 32768); if (cell_clip > 0.0 && cell_clip < 1.0) { integer_lstm_param->quantized_cell_clip = static_cast<int16_t>(std::min( std::max(cell_clip / cell_state_params->scale->data[0], -32768.0f), 32767.0f)); } else { integer_lstm_param->quantized_cell_clip = 0; } if (proj_clip > 0.0) { integer_lstm_param->quantized_proj_clip = static_cast<int8_t>(std::min( std::max(proj_clip / proj_params->scale->data[0], -128.0f), 127.0f)); } else { integer_lstm_param->quantized_proj_clip = 0; } return kTfLiteOk; }
2125
True
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Read
The software reads data past the end, or before the beginning, of the intended buffer.
Typically, this can allow attackers to read sensitive information from other memory locations or cause a crash. A crash can occur when the code reads a variable amount of data and assumes that a sentinel exists to stop the read operation, such as a NUL in a string. The expected sentinel might not be located in the out-of-bounds memory, causing excessive data to be read, leading to a segmentation fault or a buffer overflow. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent read operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/125.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::builtin::lstm::full::Prepare
tflite::ops::builtin::lstm::full::Prepare( TfLiteContext * context , TfLiteNode * node)
['context', 'node']
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { OpData* op_data = static_cast<OpData*>(node->user_data); TF_LITE_ENSURE_EQ(context, node->outputs->size, 1); // Logic for determining regular lstm and layer norm lstm: // input_size, forget_gate_layer_norm_tensor (20) null? is_layer_norm? // 20, N/A, No. // 24, null, No. // 24, not null, Yes. // 20-inputs lstm are deprecated and is only kept here for backward // compatibility. if (node->inputs->size == 24) { const TfLiteTensor* forget_layer_norm_coefficients = GetOptionalInputTensor( context, node, kForgetLayerNormCoefficientsTensor); if (forget_layer_norm_coefficients == nullptr) { op_data->use_layer_norm = false; } else { op_data->use_layer_norm = true; } } else if (node->inputs->size == 20) { // This is deprecated and is only kept here for backward compatibility. op_data->use_layer_norm = false; } else { context->ReportError( context, "The LSTM Full kernel expects 20 or 24 inputs. Got %d inputs", node->inputs->size); return kTfLiteError; } const bool use_layer_norm = op_data->use_layer_norm; // Inferring batch size, number of outputs and number of cells from the // input tensors. const TfLiteTensor* input = GetInput(context, node, kInputTensor); const bool is_integer = input->type == kTfLiteInt8; TF_LITE_ENSURE(context, input->dims->size > 1); const int n_batch = input->dims->data[0]; const int n_input = input->dims->data[1]; const TfLiteTensor* input_to_output_weights = GetInput(context, node, kInputToOutputWeightsTensor); const int n_cell = input_to_output_weights->dims->data[0]; TF_LITE_ENSURE_EQ(context, input_to_output_weights->dims->size, 2); TF_LITE_ENSURE_EQ(context, input_to_output_weights->dims->data[1], n_input); const TfLiteTensor* recurrent_to_output_weights = GetInput(context, node, kRecurrentToOutputWeightsTensor); TF_LITE_ENSURE_EQ(context, recurrent_to_output_weights->dims->size, 2); TF_LITE_ENSURE_EQ(context, recurrent_to_output_weights->dims->data[0], n_cell); const int n_output = recurrent_to_output_weights->dims->data[1]; // Check that input tensor dimensions matches with each other. TF_LITE_ENSURE_OK( context, CheckInputTensorDimensions(context, node, n_input, n_output, n_cell, use_layer_norm, is_integer)); // Get the pointer to output, output_state and cell_state tensors. TfLiteTensor* output = GetOutput(context, node, kOutputTensor); TfLiteTensor* output_state = GetVariableInput(context, node, kOutputStateTensor); TF_LITE_ENSURE(context, output_state != nullptr); TfLiteTensor* cell_state = GetVariableInput(context, node, kCellStateTensor); TF_LITE_ENSURE(context, cell_state != nullptr); // Check the shape of input state tensors. // These tensor may be 1D or 2D. It's fine as long as the total size is // correct. TF_LITE_ENSURE_EQ(context, NumElements(output_state), n_batch * n_output); TF_LITE_ENSURE_EQ(context, NumElements(cell_state), n_batch * n_cell); // Resize the output tensors. TfLiteIntArray* output_size = TfLiteIntArrayCreate(2); output_size->data[0] = n_batch; output_size->data[1] = n_output; TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, output, output_size)); // The weights are of consistent type, so it suffices to check one. const bool is_hybrid_op = IsHybridOp(input, input_to_output_weights); const bool is_sparse_op = (input_to_output_weights->sparsity != nullptr); // The type of Integer LSTM. const int num_intermediate_tensors = node->intermediates->size; if (is_integer) { TF_LITE_ENSURE(context, num_intermediate_tensors == 5 || num_intermediate_tensors == 12); } // We use number of intermediate tensors to distinguish the 8 bit matmul // output and the 16 bit matmul output version. const bool is_8x8_16 = num_intermediate_tensors == 5; TfLiteIntArrayFree(node->temporaries); if (is_hybrid_op) { if (is_sparse_op) { node->temporaries = TfLiteIntArrayCreate(kNumHybridTemporaryTensors + kLedgersToAdd); } else { node->temporaries = TfLiteIntArrayCreate(kNumHybridTemporaryTensors); } } else if (is_integer) { if (is_8x8_16) { node->temporaries = TfLiteIntArrayCreate(6); } else { node->temporaries = TfLiteIntArrayCreate(8); } } else { node->temporaries = TfLiteIntArrayCreate(1); } // Create a scratch buffer tensor for float case and hybrid case. // TODO(b/152066492): Create a is_float boolean and reorganize the temporary // buffer allocation logic. if (!is_integer) { node->temporaries->data[kScratchBuffer] = op_data->scratch_tensor_index + kScratchBuffer; TfLiteTensor* scratch_buffer = GetTemporary(context, node, kScratchBuffer); scratch_buffer->type = input->type; scratch_buffer->allocation_type = kTfLiteArenaRw; const TfLiteTensor* input_to_input_weights = GetOptionalInputTensor(context, node, kInputToInputWeightsTensor); const bool use_cifg = (input_to_input_weights == nullptr); TfLiteIntArray* scratch_buffer_size = TfLiteIntArrayCreate(2); scratch_buffer_size->data[0] = n_batch; if (use_cifg) { // Reserving space for Cell, Forget, Output gates scratch_buffer_size->data[1] = n_cell * 3; } else { // Reserving space for Input, Cell, Forget, Output gates scratch_buffer_size->data[1] = n_cell * 4; } TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, scratch_buffer, scratch_buffer_size)); } if (is_hybrid_op) { if (!is_sparse_op) { op_data->compute_row_sums = true; } // Allocate temporary tensors to store quantized values of input, // output_state and cell_state tensors. node->temporaries->data[kInputQuantized] = op_data->scratch_tensor_index + kInputQuantized; TfLiteTensor* input_quantized = GetTemporary(context, node, kInputQuantized); input_quantized->type = input_to_output_weights->type; input_quantized->allocation_type = kTfLiteArenaRw; if (!TfLiteIntArrayEqual(input_quantized->dims, input->dims)) { TfLiteIntArray* input_quantized_size = TfLiteIntArrayCopy(input->dims); TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, input_quantized, input_quantized_size)); } node->temporaries->data[kOutputStateQuantized] = op_data->scratch_tensor_index + kOutputStateQuantized; TfLiteTensor* output_state_quantized = GetTemporary(context, node, kOutputStateQuantized); output_state_quantized->type = input_to_output_weights->type; output_state_quantized->allocation_type = kTfLiteArenaRw; if (!TfLiteIntArrayEqual(output_state_quantized->dims, output_state->dims)) { TfLiteIntArray* output_state_quantized_size = TfLiteIntArrayCopy(output_state->dims); TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, output_state_quantized, output_state_quantized_size)); } node->temporaries->data[kCellStateQuantized] = op_data->scratch_tensor_index + kCellStateQuantized; TfLiteTensor* cell_state_quantized = GetTemporary(context, node, kCellStateQuantized); cell_state_quantized->type = input_to_output_weights->type; cell_state_quantized->allocation_type = kTfLiteArenaRw; if (!TfLiteIntArrayEqual(cell_state_quantized->dims, cell_state->dims)) { TfLiteIntArray* cell_state_quantized_size = TfLiteIntArrayCopy(cell_state->dims); TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, cell_state_quantized, cell_state_quantized_size)); } // Allocate temporary tensors to store scaling factors and product scaling // factors. The latter is a convenience storage which allows to quantize // a vector once (which produces the scaling factors) and multiply it with // different matrices (which requires multiplying the scaling factors with // the scaling factor of the matrix). node->temporaries->data[kInputScalingFactors] = op_data->scratch_tensor_index + kInputScalingFactors; TfLiteTensor* input_sf = GetTemporary(context, node, kInputScalingFactors); input_sf->type = kTfLiteFloat32; input_sf->allocation_type = kTfLiteArenaRw; int scaling_dims[1] = {n_batch}; if (!TfLiteIntArrayEqualsArray(input_sf->dims, 1, scaling_dims)) { TfLiteIntArray* input_sf_size = TfLiteIntArrayCreate(1); input_sf_size->data[0] = n_batch; TF_LITE_ENSURE_OK( context, context->ResizeTensor(context, input_sf, input_sf_size)); } node->temporaries->data[kOutputStateScalingFactors] = op_data->scratch_tensor_index + kOutputStateScalingFactors; TfLiteTensor* output_state_sf = GetTemporary(context, node, kOutputStateScalingFactors); output_state_sf->type = kTfLiteFloat32; output_state_sf->allocation_type = kTfLiteArenaRw; if (!TfLiteIntArrayEqualsArray(output_state_sf->dims, 1, scaling_dims)) { TfLiteIntArray* output_state_sf_size = TfLiteIntArrayCreate(1); output_state_sf_size->data[0] = n_batch; TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, output_state_sf, output_state_sf_size)); } node->temporaries->data[kProductScalingFactors] = op_data->scratch_tensor_index + kProductScalingFactors; TfLiteTensor* prod_scaling_factors = GetTemporary(context, node, kProductScalingFactors); prod_scaling_factors->type = kTfLiteFloat32; prod_scaling_factors->allocation_type = kTfLiteArenaRw; if (!TfLiteIntArrayEqualsArray(prod_scaling_factors->dims, 1, scaling_dims)) { TfLiteIntArray* prod_scaling_factors_size = TfLiteIntArrayCreate(1); prod_scaling_factors_size->data[0] = n_batch; TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, prod_scaling_factors, prod_scaling_factors_size)); } // Allocate a temporary tensor to store the recovered cell weights. Since // this is used for diagonal matrices, only need to store n_cell values. node->temporaries->data[kRecoveredCellWeights] = op_data->scratch_tensor_index + kRecoveredCellWeights; TfLiteTensor* recovered_cell_weights = GetTemporary(context, node, kRecoveredCellWeights); recovered_cell_weights->type = kTfLiteFloat32; recovered_cell_weights->allocation_type = kTfLiteArenaRw; int recovered_cell_dims[1] = {n_cell}; if (!TfLiteIntArrayEqualsArray(recovered_cell_weights->dims, 1, recovered_cell_dims)) { TfLiteIntArray* recovered_cell_weights_size = TfLiteIntArrayCreate(1); recovered_cell_weights_size->data[0] = n_cell; TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, recovered_cell_weights, recovered_cell_weights_size)); } // Allocate a temporary tensor to store accumulate values for matrix // multiplication before multiplication by scaling factor node->temporaries->data[kAccumScratch] = op_data->scratch_tensor_index + kAccumScratch; TfLiteTensor* accum_scratch = GetTemporary(context, node, kAccumScratch); accum_scratch->type = kTfLiteInt32; accum_scratch->allocation_type = kTfLiteArenaRw; int accum_scratch_dims[2] = {n_cell, n_batch}; if (!TfLiteIntArrayEqualsArray(accum_scratch->dims, 2, accum_scratch_dims)) { TfLiteIntArray* accum_size = TfLiteIntArrayCreate(2); accum_size->data[0] = n_cell; accum_size->data[1] = n_batch; TF_LITE_ENSURE_OK( context, context->ResizeTensor(context, accum_scratch, accum_size)); } node->temporaries->data[kInputZeroPoints] = op_data->scratch_tensor_index + kInputZeroPoints; TfLiteTensor* input_zp = GetTemporary(context, node, kInputZeroPoints); input_zp->type = kTfLiteFloat32; input_zp->allocation_type = kTfLiteArenaRw; if (!TfLiteIntArrayEqualsArray(input_zp->dims, 1, scaling_dims)) { TfLiteIntArray* input_zp_size = TfLiteIntArrayCreate(1); input_zp_size->data[0] = n_batch; TF_LITE_ENSURE_OK( context, context->ResizeTensor(context, input_zp, input_zp_size)); } node->temporaries->data[kOutputStateZeroPoints] = op_data->scratch_tensor_index + kOutputStateZeroPoints; TfLiteTensor* output_state_zp = GetTemporary(context, node, kOutputStateZeroPoints); output_state_zp->type = kTfLiteFloat32; output_state_zp->allocation_type = kTfLiteArenaRw; if (!TfLiteIntArrayEqualsArray(output_state_zp->dims, 1, scaling_dims)) { TfLiteIntArray* output_state_zp_size = TfLiteIntArrayCreate(1); output_state_zp_size->data[0] = n_batch; TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, output_state_zp, output_state_zp_size)); } node->temporaries->data[kRowSums] = op_data->scratch_tensor_index + kRowSums; const TfLiteTensor* input_to_input_weights = GetOptionalInputTensor(context, node, kInputToInputWeightsTensor); const bool use_cifg = (input_to_input_weights == nullptr); int row_sums_rows = use_cifg ? 6 : 8; const TfLiteTensor* projection_weights = GetOptionalInputTensor(context, node, kProjectionWeightsTensor); if (projection_weights != nullptr) { row_sums_rows += ceil(static_cast<float>(n_output) / n_cell); } TfLiteTensor* row_sums = GetTemporary(context, node, kRowSums); row_sums->type = kTfLiteInt32; row_sums->allocation_type = kTfLiteArenaRwPersistent; const int row_sums_dims[2] = {row_sums_rows, n_cell}; if (!TfLiteIntArrayEqualsArray(row_sums->dims, 2, row_sums_dims)) { TfLiteIntArray* row_sums_size = TfLiteIntArrayCreate(2); row_sums_size->data[0] = row_sums_dims[0]; row_sums_size->data[1] = row_sums_dims[1]; TF_LITE_ENSURE_OK( context, context->ResizeTensor(context, row_sums, row_sums_size)); } if (is_sparse_op) { op_data->ledger_initialized = false; int offset = kNumHybridTemporaryTensors; { node->temporaries->data[offset + kInputToInputWeightsLedgerOffset] = op_data->ledger_index + kInputToInputWeightsLedgerOffset; const TfLiteTensor* input_to_input_weights = GetOptionalInputTensor(context, node, kInputToInputWeightsTensor); TfLiteTensor* input_to_input_weights_ledger = &context->tensors[op_data->ledger_index + kInputToInputWeightsLedgerOffset]; auto status = make_ledger(input_to_input_weights == nullptr ? nullptr : input_to_input_weights->sparsity, context, input_to_input_weights_ledger); if (status != kTfLiteOk) return status; } { node->temporaries->data[offset + kInputToForgetWeightsLedgerOffset] = op_data->ledger_index + kInputToForgetWeightsLedgerOffset; const TfLiteTensor* input_to_forget_weights = GetInput(context, node, kInputToForgetWeightsTensor); TfLiteTensor* input_to_forget_weights_ledger = &context->tensors[op_data->ledger_index + kInputToForgetWeightsLedgerOffset]; auto status = make_ledger(input_to_forget_weights->sparsity, context, input_to_forget_weights_ledger); if (status != kTfLiteOk) return status; } { node->temporaries->data[offset + kInputToCellWeightsLedgerOffset] = op_data->ledger_index + kInputToCellWeightsLedgerOffset; const TfLiteTensor* input_to_cell_weights = GetInput(context, node, kInputToCellWeightsTensor); TfLiteTensor* input_to_cell_weights_ledger = &context->tensors[op_data->ledger_index + kInputToCellWeightsLedgerOffset]; auto status = make_ledger(input_to_cell_weights->sparsity, context, input_to_cell_weights_ledger); if (status != kTfLiteOk) return status; } { node->temporaries->data[offset + kInputToOutputWeightsLedgerOffset] = op_data->ledger_index + kInputToOutputWeightsLedgerOffset; const TfLiteTensor* input_to_output_weights = GetInput(context, node, kInputToOutputWeightsTensor); TfLiteTensor* input_to_output_weights_ledger = &context->tensors[op_data->ledger_index + kInputToOutputWeightsLedgerOffset]; auto status = make_ledger(input_to_output_weights->sparsity, context, input_to_output_weights_ledger); if (status != kTfLiteOk) return status; } { node->temporaries->data[offset + kRecurrentToInputWeightsLedgerOffset] = op_data->ledger_index + kRecurrentToInputWeightsLedgerOffset; const TfLiteTensor* recurrent_to_input_weights = GetOptionalInputTensor( context, node, kRecurrentToInputWeightsTensor); TfLiteTensor* recurrent_to_input_weights_ledger = &context->tensors[op_data->ledger_index + kRecurrentToInputWeightsLedgerOffset]; auto status = make_ledger(recurrent_to_input_weights == nullptr ? nullptr : recurrent_to_input_weights->sparsity, context, recurrent_to_input_weights_ledger); if (status != kTfLiteOk) return status; } { node->temporaries ->data[offset + kRecurrentToForgetWeightsLedgerOffset] = op_data->ledger_index + kRecurrentToForgetWeightsLedgerOffset; const TfLiteTensor* recurrent_to_forget_weights = GetInput(context, node, kRecurrentToForgetWeightsTensor); TfLiteTensor* recurrent_to_forget_weights_ledger = &context->tensors[op_data->ledger_index + kRecurrentToForgetWeightsLedgerOffset]; auto status = make_ledger(recurrent_to_forget_weights->sparsity, context, recurrent_to_forget_weights_ledger); if (status != kTfLiteOk) return status; } { node->temporaries->data[offset + kRecurrentToCellWeightsLedgerOffset] = op_data->ledger_index + kRecurrentToCellWeightsLedgerOffset; const TfLiteTensor* recurrent_to_cell_weights = GetInput(context, node, kRecurrentToCellWeightsTensor); TfLiteTensor* recurrent_to_cell_weights_ledger = &context->tensors[op_data->ledger_index + kRecurrentToCellWeightsLedgerOffset]; auto status = make_ledger(recurrent_to_cell_weights->sparsity, context, recurrent_to_cell_weights_ledger); if (status != kTfLiteOk) return status; } { node->temporaries ->data[offset + kRecurrentToOutputWeightsLedgerOffset] = op_data->ledger_index + kRecurrentToOutputWeightsLedgerOffset; const TfLiteTensor* recurrent_to_output_weights = GetInput(context, node, kRecurrentToOutputWeightsTensor); TfLiteTensor* recurrent_to_output_weights_ledger = &context->tensors[op_data->ledger_index + kRecurrentToOutputWeightsLedgerOffset]; auto status = make_ledger(recurrent_to_output_weights->sparsity, context, recurrent_to_output_weights_ledger); if (status != kTfLiteOk) return status; } { node->temporaries->data[offset + kProjectionWeightsLedgerOffset] = op_data->ledger_index + kProjectionWeightsLedgerOffset; const TfLiteTensor* projection_weights = GetInput(context, node, kProjectionWeightsTensor); TfLiteTensor* projection_weights_ledger = &context->tensors[op_data->ledger_index + kProjectionWeightsLedgerOffset]; auto status = make_ledger(projection_weights->sparsity, context, projection_weights_ledger); if (status != kTfLiteOk) return status; } } } if (is_integer) { if (is_8x8_16) { // Integer LSTM prepare function for 8x8->16. // This code path needs 5 intermediate tensors per Op. // Populate quantization parameters. PopulateQuantizedLstmParams8x8_16(context, node, &op_data->integer_lstm_param); // Allocate scratch buffer. Need 6 16bit buffer with size n_batch * n_cell // and 1 8bit buffer with size n_batch * n_cell. We also need 1 32 bit // buffer with size n_batch * n_cell. // // Handle cifg case as well, which might save one buffer. for (int scratch_index = 0; scratch_index < 6; ++scratch_index) { node->temporaries->data[scratch_index] = op_data->scratch_tensor_index + scratch_index; TfLiteTensor* scratch_tensor = GetTemporary(context, node, scratch_index); scratch_tensor->type = kTfLiteInt16; if (scratch_index == 4) { scratch_tensor->type = kTfLiteInt8; } else if (scratch_index == 5) { scratch_tensor->type = kTfLiteInt32; } scratch_tensor->allocation_type = kTfLiteArenaRw; const int scratch_dimension[2] = {n_batch, n_cell}; if (!TfLiteIntArrayEqualsArray(scratch_tensor->dims, 2, scratch_dimension)) { TfLiteIntArray* scratch_buffer_size = TfLiteIntArrayCreate(2); scratch_buffer_size->data[0] = n_batch; scratch_buffer_size->data[1] = n_cell; TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, scratch_tensor, scratch_buffer_size)); } } // Populate precomputed zp * weight. TF_LITE_ENSURE_OK(context, PopulatePrecomputedZPTimesWeightsWithBias( context, op_data, node)); } else { // Integer LSTM prepare function for 8x8->8. // This code path needs 12 intermediate tensors per Op. PopulateQuantizedLstmParams8x8_8(context, node, &op_data->integer_lstm_param); // Allocate scratch buffer. Need 6 16bit buffer with size n_batch * n_cell // and 2 8bit buffer with size n_batch * n_cell. // // Handle cifg case as well, which might save one buffer. for (int scratch_index = 0; scratch_index < 8; ++scratch_index) { node->temporaries->data[scratch_index] = op_data->scratch_tensor_index + scratch_index; TfLiteTensor* scratch_tensor = GetTemporary(context, node, scratch_index); if (scratch_index == 0 || scratch_index == 1) { scratch_tensor->type = kTfLiteInt8; } else { scratch_tensor->type = kTfLiteInt16; } scratch_tensor->allocation_type = kTfLiteArenaRw; const int scratch_dimension[2] = {n_batch, n_cell}; if (!TfLiteIntArrayEqualsArray(scratch_tensor->dims, 2, scratch_dimension)) { TfLiteIntArray* scratch_buffer_size = TfLiteIntArrayCreate(2); scratch_buffer_size->data[0] = n_batch; scratch_buffer_size->data[1] = n_cell; TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, scratch_tensor, scratch_buffer_size)); } } } } return kTfLiteOk; }
2920
True
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Write
The software writes data past the end, or before the beginning, of the intended buffer.
Typically, this can result in corruption of data, a crash, or code execution. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent write operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/787.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::builtin::lstm::full::Prepare
tflite::ops::builtin::lstm::full::Prepare( TfLiteContext * context , TfLiteNode * node)
['context', 'node']
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { OpData* op_data = static_cast<OpData*>(node->user_data); TF_LITE_ENSURE_EQ(context, node->outputs->size, 1); // Logic for determining regular lstm and layer norm lstm: // input_size, forget_gate_layer_norm_tensor (20) null? is_layer_norm? // 20, N/A, No. // 24, null, No. // 24, not null, Yes. // 20-inputs lstm are deprecated and is only kept here for backward // compatibility. if (node->inputs->size == 24) { const TfLiteTensor* forget_layer_norm_coefficients = GetOptionalInputTensor( context, node, kForgetLayerNormCoefficientsTensor); if (forget_layer_norm_coefficients == nullptr) { op_data->use_layer_norm = false; } else { op_data->use_layer_norm = true; } } else if (node->inputs->size == 20) { // This is deprecated and is only kept here for backward compatibility. op_data->use_layer_norm = false; } else { context->ReportError( context, "The LSTM Full kernel expects 20 or 24 inputs. Got %d inputs", node->inputs->size); return kTfLiteError; } const bool use_layer_norm = op_data->use_layer_norm; // Inferring batch size, number of outputs and number of cells from the // input tensors. const TfLiteTensor* input = GetInput(context, node, kInputTensor); const bool is_integer = input->type == kTfLiteInt8; TF_LITE_ENSURE(context, input->dims->size > 1); const int n_batch = input->dims->data[0]; const int n_input = input->dims->data[1]; const TfLiteTensor* input_to_output_weights = GetInput(context, node, kInputToOutputWeightsTensor); const int n_cell = input_to_output_weights->dims->data[0]; TF_LITE_ENSURE_EQ(context, input_to_output_weights->dims->size, 2); TF_LITE_ENSURE_EQ(context, input_to_output_weights->dims->data[1], n_input); const TfLiteTensor* recurrent_to_output_weights = GetInput(context, node, kRecurrentToOutputWeightsTensor); TF_LITE_ENSURE_EQ(context, recurrent_to_output_weights->dims->size, 2); TF_LITE_ENSURE_EQ(context, recurrent_to_output_weights->dims->data[0], n_cell); const int n_output = recurrent_to_output_weights->dims->data[1]; // Check that input tensor dimensions matches with each other. TF_LITE_ENSURE_OK( context, CheckInputTensorDimensions(context, node, n_input, n_output, n_cell, use_layer_norm, is_integer)); // Get the pointer to output, output_state and cell_state tensors. TfLiteTensor* output = GetOutput(context, node, kOutputTensor); TfLiteTensor* output_state = GetVariableInput(context, node, kOutputStateTensor); TF_LITE_ENSURE(context, output_state != nullptr); TfLiteTensor* cell_state = GetVariableInput(context, node, kCellStateTensor); TF_LITE_ENSURE(context, cell_state != nullptr); // Check the shape of input state tensors. // These tensor may be 1D or 2D. It's fine as long as the total size is // correct. TF_LITE_ENSURE_EQ(context, NumElements(output_state), n_batch * n_output); TF_LITE_ENSURE_EQ(context, NumElements(cell_state), n_batch * n_cell); // Resize the output tensors. TfLiteIntArray* output_size = TfLiteIntArrayCreate(2); output_size->data[0] = n_batch; output_size->data[1] = n_output; TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, output, output_size)); // The weights are of consistent type, so it suffices to check one. const bool is_hybrid_op = IsHybridOp(input, input_to_output_weights); const bool is_sparse_op = (input_to_output_weights->sparsity != nullptr); // The type of Integer LSTM. const int num_intermediate_tensors = node->intermediates->size; if (is_integer) { TF_LITE_ENSURE(context, num_intermediate_tensors == 5 || num_intermediate_tensors == 12); } // We use number of intermediate tensors to distinguish the 8 bit matmul // output and the 16 bit matmul output version. const bool is_8x8_16 = num_intermediate_tensors == 5; TfLiteIntArrayFree(node->temporaries); if (is_hybrid_op) { if (is_sparse_op) { node->temporaries = TfLiteIntArrayCreate(kNumHybridTemporaryTensors + kLedgersToAdd); } else { node->temporaries = TfLiteIntArrayCreate(kNumHybridTemporaryTensors); } } else if (is_integer) { if (is_8x8_16) { node->temporaries = TfLiteIntArrayCreate(6); } else { node->temporaries = TfLiteIntArrayCreate(8); } } else { node->temporaries = TfLiteIntArrayCreate(1); } // Create a scratch buffer tensor for float case and hybrid case. // TODO(b/152066492): Create a is_float boolean and reorganize the temporary // buffer allocation logic. if (!is_integer) { node->temporaries->data[kScratchBuffer] = op_data->scratch_tensor_index + kScratchBuffer; TfLiteTensor* scratch_buffer = GetTemporary(context, node, kScratchBuffer); scratch_buffer->type = input->type; scratch_buffer->allocation_type = kTfLiteArenaRw; const TfLiteTensor* input_to_input_weights = GetOptionalInputTensor(context, node, kInputToInputWeightsTensor); const bool use_cifg = (input_to_input_weights == nullptr); TfLiteIntArray* scratch_buffer_size = TfLiteIntArrayCreate(2); scratch_buffer_size->data[0] = n_batch; if (use_cifg) { // Reserving space for Cell, Forget, Output gates scratch_buffer_size->data[1] = n_cell * 3; } else { // Reserving space for Input, Cell, Forget, Output gates scratch_buffer_size->data[1] = n_cell * 4; } TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, scratch_buffer, scratch_buffer_size)); } if (is_hybrid_op) { if (!is_sparse_op) { op_data->compute_row_sums = true; } // Allocate temporary tensors to store quantized values of input, // output_state and cell_state tensors. node->temporaries->data[kInputQuantized] = op_data->scratch_tensor_index + kInputQuantized; TfLiteTensor* input_quantized = GetTemporary(context, node, kInputQuantized); input_quantized->type = input_to_output_weights->type; input_quantized->allocation_type = kTfLiteArenaRw; if (!TfLiteIntArrayEqual(input_quantized->dims, input->dims)) { TfLiteIntArray* input_quantized_size = TfLiteIntArrayCopy(input->dims); TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, input_quantized, input_quantized_size)); } node->temporaries->data[kOutputStateQuantized] = op_data->scratch_tensor_index + kOutputStateQuantized; TfLiteTensor* output_state_quantized = GetTemporary(context, node, kOutputStateQuantized); output_state_quantized->type = input_to_output_weights->type; output_state_quantized->allocation_type = kTfLiteArenaRw; if (!TfLiteIntArrayEqual(output_state_quantized->dims, output_state->dims)) { TfLiteIntArray* output_state_quantized_size = TfLiteIntArrayCopy(output_state->dims); TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, output_state_quantized, output_state_quantized_size)); } node->temporaries->data[kCellStateQuantized] = op_data->scratch_tensor_index + kCellStateQuantized; TfLiteTensor* cell_state_quantized = GetTemporary(context, node, kCellStateQuantized); cell_state_quantized->type = input_to_output_weights->type; cell_state_quantized->allocation_type = kTfLiteArenaRw; if (!TfLiteIntArrayEqual(cell_state_quantized->dims, cell_state->dims)) { TfLiteIntArray* cell_state_quantized_size = TfLiteIntArrayCopy(cell_state->dims); TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, cell_state_quantized, cell_state_quantized_size)); } // Allocate temporary tensors to store scaling factors and product scaling // factors. The latter is a convenience storage which allows to quantize // a vector once (which produces the scaling factors) and multiply it with // different matrices (which requires multiplying the scaling factors with // the scaling factor of the matrix). node->temporaries->data[kInputScalingFactors] = op_data->scratch_tensor_index + kInputScalingFactors; TfLiteTensor* input_sf = GetTemporary(context, node, kInputScalingFactors); input_sf->type = kTfLiteFloat32; input_sf->allocation_type = kTfLiteArenaRw; int scaling_dims[1] = {n_batch}; if (!TfLiteIntArrayEqualsArray(input_sf->dims, 1, scaling_dims)) { TfLiteIntArray* input_sf_size = TfLiteIntArrayCreate(1); input_sf_size->data[0] = n_batch; TF_LITE_ENSURE_OK( context, context->ResizeTensor(context, input_sf, input_sf_size)); } node->temporaries->data[kOutputStateScalingFactors] = op_data->scratch_tensor_index + kOutputStateScalingFactors; TfLiteTensor* output_state_sf = GetTemporary(context, node, kOutputStateScalingFactors); output_state_sf->type = kTfLiteFloat32; output_state_sf->allocation_type = kTfLiteArenaRw; if (!TfLiteIntArrayEqualsArray(output_state_sf->dims, 1, scaling_dims)) { TfLiteIntArray* output_state_sf_size = TfLiteIntArrayCreate(1); output_state_sf_size->data[0] = n_batch; TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, output_state_sf, output_state_sf_size)); } node->temporaries->data[kProductScalingFactors] = op_data->scratch_tensor_index + kProductScalingFactors; TfLiteTensor* prod_scaling_factors = GetTemporary(context, node, kProductScalingFactors); prod_scaling_factors->type = kTfLiteFloat32; prod_scaling_factors->allocation_type = kTfLiteArenaRw; if (!TfLiteIntArrayEqualsArray(prod_scaling_factors->dims, 1, scaling_dims)) { TfLiteIntArray* prod_scaling_factors_size = TfLiteIntArrayCreate(1); prod_scaling_factors_size->data[0] = n_batch; TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, prod_scaling_factors, prod_scaling_factors_size)); } // Allocate a temporary tensor to store the recovered cell weights. Since // this is used for diagonal matrices, only need to store n_cell values. node->temporaries->data[kRecoveredCellWeights] = op_data->scratch_tensor_index + kRecoveredCellWeights; TfLiteTensor* recovered_cell_weights = GetTemporary(context, node, kRecoveredCellWeights); recovered_cell_weights->type = kTfLiteFloat32; recovered_cell_weights->allocation_type = kTfLiteArenaRw; int recovered_cell_dims[1] = {n_cell}; if (!TfLiteIntArrayEqualsArray(recovered_cell_weights->dims, 1, recovered_cell_dims)) { TfLiteIntArray* recovered_cell_weights_size = TfLiteIntArrayCreate(1); recovered_cell_weights_size->data[0] = n_cell; TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, recovered_cell_weights, recovered_cell_weights_size)); } // Allocate a temporary tensor to store accumulate values for matrix // multiplication before multiplication by scaling factor node->temporaries->data[kAccumScratch] = op_data->scratch_tensor_index + kAccumScratch; TfLiteTensor* accum_scratch = GetTemporary(context, node, kAccumScratch); accum_scratch->type = kTfLiteInt32; accum_scratch->allocation_type = kTfLiteArenaRw; int accum_scratch_dims[2] = {n_cell, n_batch}; if (!TfLiteIntArrayEqualsArray(accum_scratch->dims, 2, accum_scratch_dims)) { TfLiteIntArray* accum_size = TfLiteIntArrayCreate(2); accum_size->data[0] = n_cell; accum_size->data[1] = n_batch; TF_LITE_ENSURE_OK( context, context->ResizeTensor(context, accum_scratch, accum_size)); } node->temporaries->data[kInputZeroPoints] = op_data->scratch_tensor_index + kInputZeroPoints; TfLiteTensor* input_zp = GetTemporary(context, node, kInputZeroPoints); input_zp->type = kTfLiteFloat32; input_zp->allocation_type = kTfLiteArenaRw; if (!TfLiteIntArrayEqualsArray(input_zp->dims, 1, scaling_dims)) { TfLiteIntArray* input_zp_size = TfLiteIntArrayCreate(1); input_zp_size->data[0] = n_batch; TF_LITE_ENSURE_OK( context, context->ResizeTensor(context, input_zp, input_zp_size)); } node->temporaries->data[kOutputStateZeroPoints] = op_data->scratch_tensor_index + kOutputStateZeroPoints; TfLiteTensor* output_state_zp = GetTemporary(context, node, kOutputStateZeroPoints); output_state_zp->type = kTfLiteFloat32; output_state_zp->allocation_type = kTfLiteArenaRw; if (!TfLiteIntArrayEqualsArray(output_state_zp->dims, 1, scaling_dims)) { TfLiteIntArray* output_state_zp_size = TfLiteIntArrayCreate(1); output_state_zp_size->data[0] = n_batch; TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, output_state_zp, output_state_zp_size)); } node->temporaries->data[kRowSums] = op_data->scratch_tensor_index + kRowSums; const TfLiteTensor* input_to_input_weights = GetOptionalInputTensor(context, node, kInputToInputWeightsTensor); const bool use_cifg = (input_to_input_weights == nullptr); int row_sums_rows = use_cifg ? 6 : 8; const TfLiteTensor* projection_weights = GetOptionalInputTensor(context, node, kProjectionWeightsTensor); if (projection_weights != nullptr) { row_sums_rows += ceil(static_cast<float>(n_output) / n_cell); } TfLiteTensor* row_sums = GetTemporary(context, node, kRowSums); row_sums->type = kTfLiteInt32; row_sums->allocation_type = kTfLiteArenaRwPersistent; const int row_sums_dims[2] = {row_sums_rows, n_cell}; if (!TfLiteIntArrayEqualsArray(row_sums->dims, 2, row_sums_dims)) { TfLiteIntArray* row_sums_size = TfLiteIntArrayCreate(2); row_sums_size->data[0] = row_sums_dims[0]; row_sums_size->data[1] = row_sums_dims[1]; TF_LITE_ENSURE_OK( context, context->ResizeTensor(context, row_sums, row_sums_size)); } if (is_sparse_op) { op_data->ledger_initialized = false; int offset = kNumHybridTemporaryTensors; { node->temporaries->data[offset + kInputToInputWeightsLedgerOffset] = op_data->ledger_index + kInputToInputWeightsLedgerOffset; const TfLiteTensor* input_to_input_weights = GetOptionalInputTensor(context, node, kInputToInputWeightsTensor); TfLiteTensor* input_to_input_weights_ledger = &context->tensors[op_data->ledger_index + kInputToInputWeightsLedgerOffset]; auto status = make_ledger(input_to_input_weights == nullptr ? nullptr : input_to_input_weights->sparsity, context, input_to_input_weights_ledger); if (status != kTfLiteOk) return status; } { node->temporaries->data[offset + kInputToForgetWeightsLedgerOffset] = op_data->ledger_index + kInputToForgetWeightsLedgerOffset; const TfLiteTensor* input_to_forget_weights = GetInput(context, node, kInputToForgetWeightsTensor); TfLiteTensor* input_to_forget_weights_ledger = &context->tensors[op_data->ledger_index + kInputToForgetWeightsLedgerOffset]; auto status = make_ledger(input_to_forget_weights->sparsity, context, input_to_forget_weights_ledger); if (status != kTfLiteOk) return status; } { node->temporaries->data[offset + kInputToCellWeightsLedgerOffset] = op_data->ledger_index + kInputToCellWeightsLedgerOffset; const TfLiteTensor* input_to_cell_weights = GetInput(context, node, kInputToCellWeightsTensor); TfLiteTensor* input_to_cell_weights_ledger = &context->tensors[op_data->ledger_index + kInputToCellWeightsLedgerOffset]; auto status = make_ledger(input_to_cell_weights->sparsity, context, input_to_cell_weights_ledger); if (status != kTfLiteOk) return status; } { node->temporaries->data[offset + kInputToOutputWeightsLedgerOffset] = op_data->ledger_index + kInputToOutputWeightsLedgerOffset; const TfLiteTensor* input_to_output_weights = GetInput(context, node, kInputToOutputWeightsTensor); TfLiteTensor* input_to_output_weights_ledger = &context->tensors[op_data->ledger_index + kInputToOutputWeightsLedgerOffset]; auto status = make_ledger(input_to_output_weights->sparsity, context, input_to_output_weights_ledger); if (status != kTfLiteOk) return status; } { node->temporaries->data[offset + kRecurrentToInputWeightsLedgerOffset] = op_data->ledger_index + kRecurrentToInputWeightsLedgerOffset; const TfLiteTensor* recurrent_to_input_weights = GetOptionalInputTensor( context, node, kRecurrentToInputWeightsTensor); TfLiteTensor* recurrent_to_input_weights_ledger = &context->tensors[op_data->ledger_index + kRecurrentToInputWeightsLedgerOffset]; auto status = make_ledger(recurrent_to_input_weights == nullptr ? nullptr : recurrent_to_input_weights->sparsity, context, recurrent_to_input_weights_ledger); if (status != kTfLiteOk) return status; } { node->temporaries ->data[offset + kRecurrentToForgetWeightsLedgerOffset] = op_data->ledger_index + kRecurrentToForgetWeightsLedgerOffset; const TfLiteTensor* recurrent_to_forget_weights = GetInput(context, node, kRecurrentToForgetWeightsTensor); TfLiteTensor* recurrent_to_forget_weights_ledger = &context->tensors[op_data->ledger_index + kRecurrentToForgetWeightsLedgerOffset]; auto status = make_ledger(recurrent_to_forget_weights->sparsity, context, recurrent_to_forget_weights_ledger); if (status != kTfLiteOk) return status; } { node->temporaries->data[offset + kRecurrentToCellWeightsLedgerOffset] = op_data->ledger_index + kRecurrentToCellWeightsLedgerOffset; const TfLiteTensor* recurrent_to_cell_weights = GetInput(context, node, kRecurrentToCellWeightsTensor); TfLiteTensor* recurrent_to_cell_weights_ledger = &context->tensors[op_data->ledger_index + kRecurrentToCellWeightsLedgerOffset]; auto status = make_ledger(recurrent_to_cell_weights->sparsity, context, recurrent_to_cell_weights_ledger); if (status != kTfLiteOk) return status; } { node->temporaries ->data[offset + kRecurrentToOutputWeightsLedgerOffset] = op_data->ledger_index + kRecurrentToOutputWeightsLedgerOffset; const TfLiteTensor* recurrent_to_output_weights = GetInput(context, node, kRecurrentToOutputWeightsTensor); TfLiteTensor* recurrent_to_output_weights_ledger = &context->tensors[op_data->ledger_index + kRecurrentToOutputWeightsLedgerOffset]; auto status = make_ledger(recurrent_to_output_weights->sparsity, context, recurrent_to_output_weights_ledger); if (status != kTfLiteOk) return status; } { node->temporaries->data[offset + kProjectionWeightsLedgerOffset] = op_data->ledger_index + kProjectionWeightsLedgerOffset; const TfLiteTensor* projection_weights = GetInput(context, node, kProjectionWeightsTensor); TfLiteTensor* projection_weights_ledger = &context->tensors[op_data->ledger_index + kProjectionWeightsLedgerOffset]; auto status = make_ledger(projection_weights->sparsity, context, projection_weights_ledger); if (status != kTfLiteOk) return status; } } } if (is_integer) { if (is_8x8_16) { // Integer LSTM prepare function for 8x8->16. // This code path needs 5 intermediate tensors per Op. // Populate quantization parameters. PopulateQuantizedLstmParams8x8_16(context, node, &op_data->integer_lstm_param); // Allocate scratch buffer. Need 6 16bit buffer with size n_batch * n_cell // and 1 8bit buffer with size n_batch * n_cell. We also need 1 32 bit // buffer with size n_batch * n_cell. // // Handle cifg case as well, which might save one buffer. for (int scratch_index = 0; scratch_index < 6; ++scratch_index) { node->temporaries->data[scratch_index] = op_data->scratch_tensor_index + scratch_index; TfLiteTensor* scratch_tensor = GetTemporary(context, node, scratch_index); scratch_tensor->type = kTfLiteInt16; if (scratch_index == 4) { scratch_tensor->type = kTfLiteInt8; } else if (scratch_index == 5) { scratch_tensor->type = kTfLiteInt32; } scratch_tensor->allocation_type = kTfLiteArenaRw; const int scratch_dimension[2] = {n_batch, n_cell}; if (!TfLiteIntArrayEqualsArray(scratch_tensor->dims, 2, scratch_dimension)) { TfLiteIntArray* scratch_buffer_size = TfLiteIntArrayCreate(2); scratch_buffer_size->data[0] = n_batch; scratch_buffer_size->data[1] = n_cell; TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, scratch_tensor, scratch_buffer_size)); } } // Populate precomputed zp * weight. TF_LITE_ENSURE_OK(context, PopulatePrecomputedZPTimesWeightsWithBias( context, op_data, node)); } else { // Integer LSTM prepare function for 8x8->8. // This code path needs 12 intermediate tensors per Op. PopulateQuantizedLstmParams8x8_8(context, node, &op_data->integer_lstm_param); // Allocate scratch buffer. Need 6 16bit buffer with size n_batch * n_cell // and 2 8bit buffer with size n_batch * n_cell. // // Handle cifg case as well, which might save one buffer. for (int scratch_index = 0; scratch_index < 8; ++scratch_index) { node->temporaries->data[scratch_index] = op_data->scratch_tensor_index + scratch_index; TfLiteTensor* scratch_tensor = GetTemporary(context, node, scratch_index); if (scratch_index == 0 || scratch_index == 1) { scratch_tensor->type = kTfLiteInt8; } else { scratch_tensor->type = kTfLiteInt16; } scratch_tensor->allocation_type = kTfLiteArenaRw; const int scratch_dimension[2] = {n_batch, n_cell}; if (!TfLiteIntArrayEqualsArray(scratch_tensor->dims, 2, scratch_dimension)) { TfLiteIntArray* scratch_buffer_size = TfLiteIntArrayCreate(2); scratch_buffer_size->data[0] = n_batch; scratch_buffer_size->data[1] = n_cell; TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, scratch_tensor, scratch_buffer_size)); } } } } return kTfLiteOk; }
2920
True
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Read
The software reads data past the end, or before the beginning, of the intended buffer.
Typically, this can allow attackers to read sensitive information from other memory locations or cause a crash. A crash can occur when the code reads a variable amount of data and assumes that a sentinel exists to stop the read operation, such as a NUL in a string. The expected sentinel might not be located in the out-of-bounds memory, causing excessive data to be read, leading to a segmentation fault or a buffer overflow. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent read operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/125.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::builtin::matrix_diag::Eval
tflite::ops::builtin::matrix_diag::Eval( TfLiteContext * context , TfLiteNode * node)
['context', 'node']
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { TfLiteTensor* output = GetOutput(context, node, kOutputTensor); const TfLiteTensor* input = GetInput(context, node, kInputTensor); FillDiagHelper(input, output); return kTfLiteOk; }
49
True
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Write
The software writes data past the end, or before the beginning, of the intended buffer.
Typically, this can result in corruption of data, a crash, or code execution. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent write operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/787.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::builtin::matrix_diag::Eval
tflite::ops::builtin::matrix_diag::Eval( TfLiteContext * context , TfLiteNode * node)
['context', 'node']
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { TfLiteTensor* output = GetOutput(context, node, kOutputTensor); const TfLiteTensor* input = GetInput(context, node, kInputTensor); FillDiagHelper(input, output); return kTfLiteOk; }
49
True
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Read
The software reads data past the end, or before the beginning, of the intended buffer.
Typically, this can allow attackers to read sensitive information from other memory locations or cause a crash. A crash can occur when the code reads a variable amount of data and assumes that a sentinel exists to stop the read operation, such as a NUL in a string. The expected sentinel might not be located in the out-of-bounds memory, causing excessive data to be read, leading to a segmentation fault or a buffer overflow. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent read operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/125.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::builtin::matrix_diag::Prepare
tflite::ops::builtin::matrix_diag::Prepare( TfLiteContext * context , TfLiteNode * node)
['context', 'node']
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input = GetInput(context, node, kInputTensor); TfLiteIntArray* input_dims = input->dims; int input_dims_size = input_dims->size; TF_LITE_ENSURE(context, input_dims_size >= 1); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); // Resize the output tensor. TfLiteIntArray* output_shape = TfLiteIntArrayCreate(input_dims_size + 1); for (int i = 0; i < input_dims_size; i++) { output_shape->data[i] = input_dims->data[i]; } // Last dimension in the output is the same as the last dimension in the // input. output_shape->data[input_dims_size] = input_dims->data[input_dims_size - 1]; output->type = input->type; TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, output, output_shape)); return kTfLiteOk; }
171
True
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Write
The software writes data past the end, or before the beginning, of the intended buffer.
Typically, this can result in corruption of data, a crash, or code execution. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent write operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/787.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::builtin::matrix_diag::Prepare
tflite::ops::builtin::matrix_diag::Prepare( TfLiteContext * context , TfLiteNode * node)
['context', 'node']
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input = GetInput(context, node, kInputTensor); TfLiteIntArray* input_dims = input->dims; int input_dims_size = input_dims->size; TF_LITE_ENSURE(context, input_dims_size >= 1); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); // Resize the output tensor. TfLiteIntArray* output_shape = TfLiteIntArrayCreate(input_dims_size + 1); for (int i = 0; i < input_dims_size; i++) { output_shape->data[i] = input_dims->data[i]; } // Last dimension in the output is the same as the last dimension in the // input. output_shape->data[input_dims_size] = input_dims->data[input_dims_size - 1]; output->type = input->type; TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, output, output_shape)); return kTfLiteOk; }
171
True
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Read
The software reads data past the end, or before the beginning, of the intended buffer.
Typically, this can allow attackers to read sensitive information from other memory locations or cause a crash. A crash can occur when the code reads a variable amount of data and assumes that a sentinel exists to stop the read operation, such as a NUL in a string. The expected sentinel might not be located in the out-of-bounds memory, causing excessive data to be read, leading to a segmentation fault or a buffer overflow. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent read operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/125.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::builtin::matrix_set_diag::Eval
tflite::ops::builtin::matrix_set_diag::Eval( TfLiteContext * context , TfLiteNode * node)
['context', 'node']
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { TfLiteTensor* output = GetOutput(context, node, kOutputTensor); const TfLiteTensor* input = GetInput(context, node, kInputTensor); const TfLiteTensor* diag = GetInput(context, node, kDiagonalTensor); FillDiagHelper(input, diag, output); return kTfLiteOk; }
65
True
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Write
The software writes data past the end, or before the beginning, of the intended buffer.
Typically, this can result in corruption of data, a crash, or code execution. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent write operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/787.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::builtin::matrix_set_diag::Eval
tflite::ops::builtin::matrix_set_diag::Eval( TfLiteContext * context , TfLiteNode * node)
['context', 'node']
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { TfLiteTensor* output = GetOutput(context, node, kOutputTensor); const TfLiteTensor* input = GetInput(context, node, kInputTensor); const TfLiteTensor* diag = GetInput(context, node, kDiagonalTensor); FillDiagHelper(input, diag, output); return kTfLiteOk; }
65
True
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Read
The software reads data past the end, or before the beginning, of the intended buffer.
Typically, this can allow attackers to read sensitive information from other memory locations or cause a crash. A crash can occur when the code reads a variable amount of data and assumes that a sentinel exists to stop the read operation, such as a NUL in a string. The expected sentinel might not be located in the out-of-bounds memory, causing excessive data to be read, leading to a segmentation fault or a buffer overflow. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent read operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/125.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::builtin::matrix_set_diag::Prepare
tflite::ops::builtin::matrix_set_diag::Prepare( TfLiteContext * context , TfLiteNode * node)
['context', 'node']
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 2); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input = GetInput(context, node, kInputTensor); TfLiteIntArray* input_dims = input->dims; int input_dims_size = input_dims->size; TF_LITE_ENSURE(context, input_dims_size >= 2); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); TfLiteIntArray* output_shape = TfLiteIntArrayCreate(input_dims_size); for (int i = 0; i < input_dims_size; i++) { output_shape->data[i] = input_dims->data[i]; } // Resize the output tensor to the same size as the input tensor. output->type = input->type; TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, output, output_shape)); return kTfLiteOk; }
153
True
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Write
The software writes data past the end, or before the beginning, of the intended buffer.
Typically, this can result in corruption of data, a crash, or code execution. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent write operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/787.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::builtin::matrix_set_diag::Prepare
tflite::ops::builtin::matrix_set_diag::Prepare( TfLiteContext * context , TfLiteNode * node)
['context', 'node']
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 2); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input = GetInput(context, node, kInputTensor); TfLiteIntArray* input_dims = input->dims; int input_dims_size = input_dims->size; TF_LITE_ENSURE(context, input_dims_size >= 2); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); TfLiteIntArray* output_shape = TfLiteIntArrayCreate(input_dims_size); for (int i = 0; i < input_dims_size; i++) { output_shape->data[i] = input_dims->data[i]; } // Resize the output tensor to the same size as the input tensor. output->type = input->type; TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, output, output_shape)); return kTfLiteOk; }
153
True
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Read
The software reads data past the end, or before the beginning, of the intended buffer.
Typically, this can allow attackers to read sensitive information from other memory locations or cause a crash. A crash can occur when the code reads a variable amount of data and assumes that a sentinel exists to stop the read operation, such as a NUL in a string. The expected sentinel might not be located in the out-of-bounds memory, causing excessive data to be read, leading to a segmentation fault or a buffer overflow. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent read operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/125.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::custom::mfcc::Eval
tflite::ops::custom::mfcc::Eval( TfLiteContext * context , TfLiteNode * node)
['context', 'node']
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { auto* params = reinterpret_cast<TfLiteMfccParams*>(node->user_data); const TfLiteTensor* input_wav = GetInput(context, node, kInputTensorWav); const TfLiteTensor* input_rate = GetInput(context, node, kInputTensorRate); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); const int32 sample_rate = *GetTensorData<int>(input_rate); const int spectrogram_channels = input_wav->dims->data[2]; const int spectrogram_samples = input_wav->dims->data[1]; const int audio_channels = input_wav->dims->data[0]; internal::Mfcc mfcc; mfcc.set_upper_frequency_limit(params->upper_frequency_limit); mfcc.set_lower_frequency_limit(params->lower_frequency_limit); mfcc.set_filterbank_channel_count(params->filterbank_channel_count); mfcc.set_dct_coefficient_count(params->dct_coefficient_count); mfcc.Initialize(spectrogram_channels, sample_rate); const float* spectrogram_flat = GetTensorData<float>(input_wav); float* output_flat = GetTensorData<float>(output); for (int audio_channel = 0; audio_channel < audio_channels; ++audio_channel) { for (int spectrogram_sample = 0; spectrogram_sample < spectrogram_samples; ++spectrogram_sample) { const float* sample_data = spectrogram_flat + (audio_channel * spectrogram_samples * spectrogram_channels) + (spectrogram_sample * spectrogram_channels); std::vector<double> mfcc_input(sample_data, sample_data + spectrogram_channels); std::vector<double> mfcc_output; mfcc.Compute(mfcc_input, &mfcc_output); TF_LITE_ENSURE_EQ(context, params->dct_coefficient_count, mfcc_output.size()); float* output_data = output_flat + (audio_channel * spectrogram_samples * params->dct_coefficient_count) + (spectrogram_sample * params->dct_coefficient_count); for (int i = 0; i < params->dct_coefficient_count; ++i) { output_data[i] = mfcc_output[i]; } } } return kTfLiteOk; }
351
True
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Write
The software writes data past the end, or before the beginning, of the intended buffer.
Typically, this can result in corruption of data, a crash, or code execution. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent write operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/787.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::custom::mfcc::Eval
tflite::ops::custom::mfcc::Eval( TfLiteContext * context , TfLiteNode * node)
['context', 'node']
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { auto* params = reinterpret_cast<TfLiteMfccParams*>(node->user_data); const TfLiteTensor* input_wav = GetInput(context, node, kInputTensorWav); const TfLiteTensor* input_rate = GetInput(context, node, kInputTensorRate); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); const int32 sample_rate = *GetTensorData<int>(input_rate); const int spectrogram_channels = input_wav->dims->data[2]; const int spectrogram_samples = input_wav->dims->data[1]; const int audio_channels = input_wav->dims->data[0]; internal::Mfcc mfcc; mfcc.set_upper_frequency_limit(params->upper_frequency_limit); mfcc.set_lower_frequency_limit(params->lower_frequency_limit); mfcc.set_filterbank_channel_count(params->filterbank_channel_count); mfcc.set_dct_coefficient_count(params->dct_coefficient_count); mfcc.Initialize(spectrogram_channels, sample_rate); const float* spectrogram_flat = GetTensorData<float>(input_wav); float* output_flat = GetTensorData<float>(output); for (int audio_channel = 0; audio_channel < audio_channels; ++audio_channel) { for (int spectrogram_sample = 0; spectrogram_sample < spectrogram_samples; ++spectrogram_sample) { const float* sample_data = spectrogram_flat + (audio_channel * spectrogram_samples * spectrogram_channels) + (spectrogram_sample * spectrogram_channels); std::vector<double> mfcc_input(sample_data, sample_data + spectrogram_channels); std::vector<double> mfcc_output; mfcc.Compute(mfcc_input, &mfcc_output); TF_LITE_ENSURE_EQ(context, params->dct_coefficient_count, mfcc_output.size()); float* output_data = output_flat + (audio_channel * spectrogram_samples * params->dct_coefficient_count) + (spectrogram_sample * params->dct_coefficient_count); for (int i = 0; i < params->dct_coefficient_count; ++i) { output_data[i] = mfcc_output[i]; } } } return kTfLiteOk; }
351
True
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Read
The software reads data past the end, or before the beginning, of the intended buffer.
Typically, this can allow attackers to read sensitive information from other memory locations or cause a crash. A crash can occur when the code reads a variable amount of data and assumes that a sentinel exists to stop the read operation, such as a NUL in a string. The expected sentinel might not be located in the out-of-bounds memory, causing excessive data to be read, leading to a segmentation fault or a buffer overflow. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent read operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/125.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::custom::mfcc::Prepare
tflite::ops::custom::mfcc::Prepare( TfLiteContext * context , TfLiteNode * node)
['context', 'node']
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { auto* params = reinterpret_cast<TfLiteMfccParams*>(node->user_data); TF_LITE_ENSURE_EQ(context, NumInputs(node), 2); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input_wav = GetInput(context, node, kInputTensorWav); const TfLiteTensor* input_rate = GetInput(context, node, kInputTensorRate); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); TF_LITE_ENSURE_EQ(context, NumDimensions(input_wav), 3); TF_LITE_ENSURE_EQ(context, NumElements(input_rate), 1); TF_LITE_ENSURE_TYPES_EQ(context, output->type, kTfLiteFloat32); TF_LITE_ENSURE_TYPES_EQ(context, input_wav->type, output->type); TF_LITE_ENSURE_TYPES_EQ(context, input_rate->type, kTfLiteInt32); TfLiteIntArray* output_size = TfLiteIntArrayCreate(3); output_size->data[0] = input_wav->dims->data[0]; output_size->data[1] = input_wav->dims->data[1]; output_size->data[2] = params->dct_coefficient_count; return context->ResizeTensor(context, output, output_size); }
215
True
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Write
The software writes data past the end, or before the beginning, of the intended buffer.
Typically, this can result in corruption of data, a crash, or code execution. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent write operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/787.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::custom::mfcc::Prepare
tflite::ops::custom::mfcc::Prepare( TfLiteContext * context , TfLiteNode * node)
['context', 'node']
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { auto* params = reinterpret_cast<TfLiteMfccParams*>(node->user_data); TF_LITE_ENSURE_EQ(context, NumInputs(node), 2); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input_wav = GetInput(context, node, kInputTensorWav); const TfLiteTensor* input_rate = GetInput(context, node, kInputTensorRate); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); TF_LITE_ENSURE_EQ(context, NumDimensions(input_wav), 3); TF_LITE_ENSURE_EQ(context, NumElements(input_rate), 1); TF_LITE_ENSURE_TYPES_EQ(context, output->type, kTfLiteFloat32); TF_LITE_ENSURE_TYPES_EQ(context, input_wav->type, output->type); TF_LITE_ENSURE_TYPES_EQ(context, input_rate->type, kTfLiteInt32); TfLiteIntArray* output_size = TfLiteIntArrayCreate(3); output_size->data[0] = input_wav->dims->data[0]; output_size->data[1] = input_wav->dims->data[1]; output_size->data[2] = params->dct_coefficient_count; return context->ResizeTensor(context, output, output_size); }
215
True
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Read
The software reads data past the end, or before the beginning, of the intended buffer.
Typically, this can allow attackers to read sensitive information from other memory locations or cause a crash. A crash can occur when the code reads a variable amount of data and assumes that a sentinel exists to stop the read operation, such as a NUL in a string. The expected sentinel might not be located in the out-of-bounds memory, causing excessive data to be read, leading to a segmentation fault or a buffer overflow. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent read operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/125.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::builtin::mirror_pad::Eval
tflite::ops::builtin::mirror_pad::Eval( TfLiteContext * context , TfLiteNode * node)
['context', 'node']
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { ruy::profiler::ScopeLabel label("MirrorPad"); const TfLiteTensor* input_tensor = GetInput(context, node, 0); const TfLiteTensor* padding_matrix = GetInput(context, node, 1); auto* params = reinterpret_cast<TfLiteMirrorPaddingParams*>(node->builtin_data); if (params == nullptr) { return kTfLiteError; } const int input_dims = NumDimensions(input_tensor); TfLiteTensor* output_tensor = GetOutput(context, node, 0); if (IsDynamicTensor(output_tensor)) { auto output_size = GetPaddedOutputShape(input_tensor, padding_matrix); if (output_size == nullptr) { return kTfLiteError; } TF_LITE_ENSURE_STATUS( context->ResizeTensor(context, output_tensor, output_size.release())); } std::vector<int> output_dims_num_elements(input_dims, 1); std::vector<int> input_dims_num_elements(input_dims, 1); for (int i = input_dims - 2; i >= 0; i--) { output_dims_num_elements[i] = output_dims_num_elements[i + 1] * output_tensor->dims->data[i + 1]; input_dims_num_elements[i] = input_dims_num_elements[i + 1] * input_tensor->dims->data[i + 1]; } const int offset = params->mode != TfLiteMirrorPaddingMode::kTfLiteMirrorPaddingReflect ? 0 : 1; CpuBackendContext* cpu_backend_context = CpuBackendContext::GetFromContext(context); const int thread_count = cpu_backend_context->max_num_threads(); TfLiteStatus status = kTfLiteOk; const int output_size = NumElements(output_tensor); #define TF_LITE_MIRROR_PAD(type) \ EvalData<type> eval_data; \ eval_data.input_data = GetTensorData<type>(input_tensor); \ eval_data.input_dims = input_tensor->dims; \ eval_data.input_dims = input_tensor->dims; \ eval_data.output_dims_num_elements = &output_dims_num_elements; \ eval_data.input_dims_num_elements = &input_dims_num_elements; \ eval_data.num_dims = input_dims; \ eval_data.offset = offset; \ eval_data.output_data = GetTensorData<type>(output_tensor); \ eval_data.padding_matrix = padding_matrix; \ std::vector<MirrorPadWorkerTask<type>> tasks; \ tasks.reserve(thread_count); \ int start = 0; \ for (int i = 0; i < thread_count; ++i) { \ int end = start + (output_size - start) / (thread_count - i); \ tasks.emplace_back(MirrorPadWorkerTask<type>(&eval_data, start, end)); \ start = end; \ } \ cpu_backend_threadpool::Execute(tasks.size(), tasks.data(), \ cpu_backend_context); switch (output_tensor->type) { case kTfLiteFloat32: { TF_LITE_MIRROR_PAD(float); break; } case kTfLiteInt32: { TF_LITE_MIRROR_PAD(int32_t); break; } case kTfLiteUInt8: { TF_LITE_MIRROR_PAD(uint8_t); break; } case kTfLiteInt8: { TF_LITE_MIRROR_PAD(int8_t); break; } case kTfLiteInt64: { TF_LITE_MIRROR_PAD(int64_t); break; } default: status = kTfLiteError; break; } #undef TF_LITE_MIRROR_PAD return status; }
366
True
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Write
The software writes data past the end, or before the beginning, of the intended buffer.
Typically, this can result in corruption of data, a crash, or code execution. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent write operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/787.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::builtin::mirror_pad::Eval
tflite::ops::builtin::mirror_pad::Eval( TfLiteContext * context , TfLiteNode * node)
['context', 'node']
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { ruy::profiler::ScopeLabel label("MirrorPad"); const TfLiteTensor* input_tensor = GetInput(context, node, 0); const TfLiteTensor* padding_matrix = GetInput(context, node, 1); auto* params = reinterpret_cast<TfLiteMirrorPaddingParams*>(node->builtin_data); if (params == nullptr) { return kTfLiteError; } const int input_dims = NumDimensions(input_tensor); TfLiteTensor* output_tensor = GetOutput(context, node, 0); if (IsDynamicTensor(output_tensor)) { auto output_size = GetPaddedOutputShape(input_tensor, padding_matrix); if (output_size == nullptr) { return kTfLiteError; } TF_LITE_ENSURE_STATUS( context->ResizeTensor(context, output_tensor, output_size.release())); } std::vector<int> output_dims_num_elements(input_dims, 1); std::vector<int> input_dims_num_elements(input_dims, 1); for (int i = input_dims - 2; i >= 0; i--) { output_dims_num_elements[i] = output_dims_num_elements[i + 1] * output_tensor->dims->data[i + 1]; input_dims_num_elements[i] = input_dims_num_elements[i + 1] * input_tensor->dims->data[i + 1]; } const int offset = params->mode != TfLiteMirrorPaddingMode::kTfLiteMirrorPaddingReflect ? 0 : 1; CpuBackendContext* cpu_backend_context = CpuBackendContext::GetFromContext(context); const int thread_count = cpu_backend_context->max_num_threads(); TfLiteStatus status = kTfLiteOk; const int output_size = NumElements(output_tensor); #define TF_LITE_MIRROR_PAD(type) \ EvalData<type> eval_data; \ eval_data.input_data = GetTensorData<type>(input_tensor); \ eval_data.input_dims = input_tensor->dims; \ eval_data.input_dims = input_tensor->dims; \ eval_data.output_dims_num_elements = &output_dims_num_elements; \ eval_data.input_dims_num_elements = &input_dims_num_elements; \ eval_data.num_dims = input_dims; \ eval_data.offset = offset; \ eval_data.output_data = GetTensorData<type>(output_tensor); \ eval_data.padding_matrix = padding_matrix; \ std::vector<MirrorPadWorkerTask<type>> tasks; \ tasks.reserve(thread_count); \ int start = 0; \ for (int i = 0; i < thread_count; ++i) { \ int end = start + (output_size - start) / (thread_count - i); \ tasks.emplace_back(MirrorPadWorkerTask<type>(&eval_data, start, end)); \ start = end; \ } \ cpu_backend_threadpool::Execute(tasks.size(), tasks.data(), \ cpu_backend_context); switch (output_tensor->type) { case kTfLiteFloat32: { TF_LITE_MIRROR_PAD(float); break; } case kTfLiteInt32: { TF_LITE_MIRROR_PAD(int32_t); break; } case kTfLiteUInt8: { TF_LITE_MIRROR_PAD(uint8_t); break; } case kTfLiteInt8: { TF_LITE_MIRROR_PAD(int8_t); break; } case kTfLiteInt64: { TF_LITE_MIRROR_PAD(int64_t); break; } default: status = kTfLiteError; break; } #undef TF_LITE_MIRROR_PAD return status; }
366
True
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Read
The software reads data past the end, or before the beginning, of the intended buffer.
Typically, this can allow attackers to read sensitive information from other memory locations or cause a crash. A crash can occur when the code reads a variable amount of data and assumes that a sentinel exists to stop the read operation, such as a NUL in a string. The expected sentinel might not be located in the out-of-bounds memory, causing excessive data to be read, leading to a segmentation fault or a buffer overflow. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent read operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/125.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::builtin::mirror_pad::Prepare
tflite::ops::builtin::mirror_pad::Prepare( TfLiteContext * context , TfLiteNode * node)
['context', 'node']
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input_tensor = GetInput(context, node, 0); const TfLiteTensor* padding_matrix = GetInput(context, node, 1); TfLiteTensor* output_tensor = GetOutput(context, node, 0); TF_LITE_ENSURE_EQ(context, NumDimensions(padding_matrix), 2); TF_LITE_ENSURE_EQ(context, SizeOfDimension(padding_matrix, 0), NumDimensions(input_tensor)); if (!IsConstantTensor(padding_matrix)) { SetTensorToDynamic(output_tensor); return kTfLiteOk; } // We have constant padding, so we can infer output size. auto output_size = GetPaddedOutputShape(input_tensor, padding_matrix); if (output_size == nullptr) { return kTfLiteError; } return context->ResizeTensor(context, output_tensor, output_size.release()); }
137
True
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Write
The software writes data past the end, or before the beginning, of the intended buffer.
Typically, this can result in corruption of data, a crash, or code execution. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent write operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/787.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::builtin::mirror_pad::Prepare
tflite::ops::builtin::mirror_pad::Prepare( TfLiteContext * context , TfLiteNode * node)
['context', 'node']
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input_tensor = GetInput(context, node, 0); const TfLiteTensor* padding_matrix = GetInput(context, node, 1); TfLiteTensor* output_tensor = GetOutput(context, node, 0); TF_LITE_ENSURE_EQ(context, NumDimensions(padding_matrix), 2); TF_LITE_ENSURE_EQ(context, SizeOfDimension(padding_matrix, 0), NumDimensions(input_tensor)); if (!IsConstantTensor(padding_matrix)) { SetTensorToDynamic(output_tensor); return kTfLiteOk; } // We have constant padding, so we can infer output size. auto output_size = GetPaddedOutputShape(input_tensor, padding_matrix); if (output_size == nullptr) { return kTfLiteError; } return context->ResizeTensor(context, output_tensor, output_size.release()); }
137
True
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Read
The software reads data past the end, or before the beginning, of the intended buffer.
Typically, this can allow attackers to read sensitive information from other memory locations or cause a crash. A crash can occur when the code reads a variable amount of data and assumes that a sentinel exists to stop the read operation, such as a NUL in a string. The expected sentinel might not be located in the out-of-bounds memory, causing excessive data to be read, leading to a segmentation fault or a buffer overflow. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent read operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/125.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::builtin::mul::Eval
tflite::ops::builtin::mul::Eval( TfLiteContext * context , TfLiteNode * node)
['context', 'node']
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { auto* params = reinterpret_cast<TfLiteMulParams*>(node->builtin_data); OpData* data = reinterpret_cast<OpData*>(node->user_data); const TfLiteTensor* input1 = GetInput(context, node, kInputTensor1); const TfLiteTensor* input2 = GetInput(context, node, kInputTensor2); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); if (output->type == kTfLiteFloat32 || output->type == kTfLiteInt32) { EvalMul<kernel_type>(context, node, params, data, input1, input2, output); } else if (output->type == kTfLiteUInt8 || output->type == kTfLiteInt8 || output->type == kTfLiteInt16) { TF_LITE_ENSURE_OK( context, EvalQuantized<kernel_type>(context, node, params, data, input1, input2, output)); } else { context->ReportError(context, "Mul only supports FLOAT32, INT32 and quantized UINT8," " INT8 and INT16 now, got %d.", output->type); return kTfLiteError; } return kTfLiteOk; }
190
True
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Write
The software writes data past the end, or before the beginning, of the intended buffer.
Typically, this can result in corruption of data, a crash, or code execution. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent write operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/787.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::builtin::mul::Eval
tflite::ops::builtin::mul::Eval( TfLiteContext * context , TfLiteNode * node)
['context', 'node']
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { auto* params = reinterpret_cast<TfLiteMulParams*>(node->builtin_data); OpData* data = reinterpret_cast<OpData*>(node->user_data); const TfLiteTensor* input1 = GetInput(context, node, kInputTensor1); const TfLiteTensor* input2 = GetInput(context, node, kInputTensor2); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); if (output->type == kTfLiteFloat32 || output->type == kTfLiteInt32) { EvalMul<kernel_type>(context, node, params, data, input1, input2, output); } else if (output->type == kTfLiteUInt8 || output->type == kTfLiteInt8 || output->type == kTfLiteInt16) { TF_LITE_ENSURE_OK( context, EvalQuantized<kernel_type>(context, node, params, data, input1, input2, output)); } else { context->ReportError(context, "Mul only supports FLOAT32, INT32 and quantized UINT8," " INT8 and INT16 now, got %d.", output->type); return kTfLiteError; } return kTfLiteOk; }
190
True
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Read
The software reads data past the end, or before the beginning, of the intended buffer.
Typically, this can allow attackers to read sensitive information from other memory locations or cause a crash. A crash can occur when the code reads a variable amount of data and assumes that a sentinel exists to stop the read operation, such as a NUL in a string. The expected sentinel might not be located in the out-of-bounds memory, causing excessive data to be read, leading to a segmentation fault or a buffer overflow. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent read operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/125.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::builtin::mul::Prepare
tflite::ops::builtin::mul::Prepare( TfLiteContext * context , TfLiteNode * node)
['context', 'node']
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { auto* params = reinterpret_cast<TfLiteMulParams*>(node->builtin_data); OpData* data = reinterpret_cast<OpData*>(node->user_data); TF_LITE_ENSURE_EQ(context, NumInputs(node), 2); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input1 = GetInput(context, node, kInputTensor1); const TfLiteTensor* input2 = GetInput(context, node, kInputTensor2); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); TF_LITE_ENSURE_TYPES_EQ(context, input1->type, input2->type); const bool requires_broadcast = !HaveSameShapes(input1, input2); TfLiteIntArray* output_size = nullptr; if (requires_broadcast) { TF_LITE_ENSURE_OK(context, CalculateShapeForBroadcast( context, input1, input2, &output_size)); } else { output_size = TfLiteIntArrayCopy(input1->dims); } if (output->type == kTfLiteUInt8 || output->type == kTfLiteInt8 || output->type == kTfLiteInt16) { TF_LITE_ENSURE_STATUS(CalculateActivationRangeQuantized( context, params->activation, output, &data->output_activation_min, &data->output_activation_max)); double real_multiplier = input1->params.scale * input2->params.scale / output->params.scale; QuantizeMultiplier(real_multiplier, &data->output_multiplier, &data->output_shift); } return context->ResizeTensor(context, output, output_size); }
267
True
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Write
The software writes data past the end, or before the beginning, of the intended buffer.
Typically, this can result in corruption of data, a crash, or code execution. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent write operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/787.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::builtin::mul::Prepare
tflite::ops::builtin::mul::Prepare( TfLiteContext * context , TfLiteNode * node)
['context', 'node']
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { auto* params = reinterpret_cast<TfLiteMulParams*>(node->builtin_data); OpData* data = reinterpret_cast<OpData*>(node->user_data); TF_LITE_ENSURE_EQ(context, NumInputs(node), 2); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input1 = GetInput(context, node, kInputTensor1); const TfLiteTensor* input2 = GetInput(context, node, kInputTensor2); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); TF_LITE_ENSURE_TYPES_EQ(context, input1->type, input2->type); const bool requires_broadcast = !HaveSameShapes(input1, input2); TfLiteIntArray* output_size = nullptr; if (requires_broadcast) { TF_LITE_ENSURE_OK(context, CalculateShapeForBroadcast( context, input1, input2, &output_size)); } else { output_size = TfLiteIntArrayCopy(input1->dims); } if (output->type == kTfLiteUInt8 || output->type == kTfLiteInt8 || output->type == kTfLiteInt16) { TF_LITE_ENSURE_STATUS(CalculateActivationRangeQuantized( context, params->activation, output, &data->output_activation_min, &data->output_activation_max)); double real_multiplier = input1->params.scale * input2->params.scale / output->params.scale; QuantizeMultiplier(real_multiplier, &data->output_multiplier, &data->output_shift); } return context->ResizeTensor(context, output, output_size); }
267
True
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Read
The software reads data past the end, or before the beginning, of the intended buffer.
Typically, this can allow attackers to read sensitive information from other memory locations or cause a crash. A crash can occur when the code reads a variable amount of data and assumes that a sentinel exists to stop the read operation, such as a NUL in a string. The expected sentinel might not be located in the out-of-bounds memory, causing excessive data to be read, leading to a segmentation fault or a buffer overflow. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent read operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/125.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::builtin::neg::Eval
tflite::ops::builtin::neg::Eval( TfLiteContext * context , TfLiteNode * node)
['context', 'node']
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input = GetInput(context, node, kInputTensor); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); switch (input->type) { case kTfLiteInt64: reference_ops::Negate( GetTensorShape(input), GetTensorData<int64_t>(input), GetTensorShape(output), GetTensorData<int64_t>(output)); break; case kTfLiteInt32: reference_ops::Negate( GetTensorShape(input), GetTensorData<int32_t>(input), GetTensorShape(output), GetTensorData<int32_t>(output)); break; case kTfLiteFloat32: reference_ops::Negate(GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(output), GetTensorData<float>(output)); break; default: context->ReportError( context, "Neg only currently supports int64, int32, and float32, got %d.", input->type); return kTfLiteError; } return kTfLiteOk; }
176
True
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Write
The software writes data past the end, or before the beginning, of the intended buffer.
Typically, this can result in corruption of data, a crash, or code execution. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent write operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/787.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::builtin::neg::Eval
tflite::ops::builtin::neg::Eval( TfLiteContext * context , TfLiteNode * node)
['context', 'node']
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input = GetInput(context, node, kInputTensor); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); switch (input->type) { case kTfLiteInt64: reference_ops::Negate( GetTensorShape(input), GetTensorData<int64_t>(input), GetTensorShape(output), GetTensorData<int64_t>(output)); break; case kTfLiteInt32: reference_ops::Negate( GetTensorShape(input), GetTensorData<int32_t>(input), GetTensorShape(output), GetTensorData<int32_t>(output)); break; case kTfLiteFloat32: reference_ops::Negate(GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(output), GetTensorData<float>(output)); break; default: context->ReportError( context, "Neg only currently supports int64, int32, and float32, got %d.", input->type); return kTfLiteError; } return kTfLiteOk; }
176
True
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Read
The software reads data past the end, or before the beginning, of the intended buffer.
Typically, this can allow attackers to read sensitive information from other memory locations or cause a crash. A crash can occur when the code reads a variable amount of data and assumes that a sentinel exists to stop the read operation, such as a NUL in a string. The expected sentinel might not be located in the out-of-bounds memory, causing excessive data to be read, leading to a segmentation fault or a buffer overflow. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent read operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/125.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::builtin::neg::Prepare
tflite::ops::builtin::neg::Prepare( TfLiteContext * context , TfLiteNode * node)
['context', 'node']
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input = GetInput(context, node, kInputTensor); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); output->type = input->type; return context->ResizeTensor(context, output, TfLiteIntArrayCopy(input->dims)); }
88
True
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Write
The software writes data past the end, or before the beginning, of the intended buffer.
Typically, this can result in corruption of data, a crash, or code execution. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent write operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/787.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::builtin::neg::Prepare
tflite::ops::builtin::neg::Prepare( TfLiteContext * context , TfLiteNode * node)
['context', 'node']
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input = GetInput(context, node, kInputTensor); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); output->type = input->type; return context->ResizeTensor(context, output, TfLiteIntArrayCopy(input->dims)); }
88
True
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Read
The software reads data past the end, or before the beginning, of the intended buffer.
Typically, this can allow attackers to read sensitive information from other memory locations or cause a crash. A crash can occur when the code reads a variable amount of data and assumes that a sentinel exists to stop the read operation, such as a NUL in a string. The expected sentinel might not be located in the out-of-bounds memory, causing excessive data to be read, leading to a segmentation fault or a buffer overflow. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent read operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/125.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::custom::numeric_verify::Eval
tflite::ops::custom::numeric_verify::Eval( TfLiteContext * context , TfLiteNode * node)
['context', 'node']
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { OpData* op_data = reinterpret_cast<OpData*>(node->user_data); OpContext op_context(context, node); if (IsConstantTensor(op_context.input) && op_data->float_input_initialized) { return kTfLiteOk; } // Dequantize the input TfLiteTensor* dequantized = GetTemporary(context, node, /*index=*/0); auto status = builtin::dequantize::DequantizeImpl<kernel_type>( context, node, op_context.input, dequantized); if (status != kTfLiteOk) { return status; } if (IsConstantTensor(op_context.input)) { op_data->float_input_initialized = true; } // If the tolerance is very small, we only display the stats of the diff. if (op_data->tolerance < 0.1) { std::vector<double> diffs, temp; diffs.reserve(NumElements(dequantized)); temp.reserve(NumElements(dequantized)); for (int i = 0; i < NumElements(op_context.ref); ++i) { float dequant = GetTensorData<float>(dequantized)[i]; float reference = GetTensorData<float>(op_context.ref)[i]; diffs.push_back(dequant - reference); } double mean = std::accumulate(diffs.begin(), diffs.end(), 0.0) / diffs.size(); double max_diff = 0.0; std::transform(diffs.begin(), diffs.end(), temp.begin(), [mean, &max_diff](double x) { max_diff = std::max(max_diff, std::abs(x)); return x - mean; }); double sq_sum = std::inner_product(temp.begin(), temp.end(), temp.begin(), 0.0); double std = std::sqrt(sq_sum / diffs.size()); TF_LITE_KERNEL_LOG( context, "std: %f, mean: %f, max_diff: %f (scale: %f, zero_point: %d).\n", std, mean, max_diff, op_context.input->params.scale, op_context.input->params.zero_point); return kTfLiteOk; } // Verify the dequantized output. auto max_diff = op_data->tolerance * op_context.input->params.scale; for (int i = 0; i < NumElements(op_context.ref); ++i) { int32_t value = GetQuantizedValue(op_context, i); float dequant = GetTensorData<float>(dequantized)[i]; float reference = GetTensorData<float>(op_context.ref)[i]; float diff = std::abs(reference - dequant); if (diff > max_diff) { TF_LITE_KERNEL_LOG( context, "Mismatch: %f is quantized to %d with (%f, %d). " "abs(%f - %f) = %f > %f (tolerance) range percentage %f.\n", reference, value, op_context.input->params.scale, op_context.input->params.zero_point, reference, dequant, diff, max_diff, op_data->tolerance); return kTfLiteError; } } return kTfLiteOk; }
533
True
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Write
The software writes data past the end, or before the beginning, of the intended buffer.
Typically, this can result in corruption of data, a crash, or code execution. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent write operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/787.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::custom::numeric_verify::Eval
tflite::ops::custom::numeric_verify::Eval( TfLiteContext * context , TfLiteNode * node)
['context', 'node']
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { OpData* op_data = reinterpret_cast<OpData*>(node->user_data); OpContext op_context(context, node); if (IsConstantTensor(op_context.input) && op_data->float_input_initialized) { return kTfLiteOk; } // Dequantize the input TfLiteTensor* dequantized = GetTemporary(context, node, /*index=*/0); auto status = builtin::dequantize::DequantizeImpl<kernel_type>( context, node, op_context.input, dequantized); if (status != kTfLiteOk) { return status; } if (IsConstantTensor(op_context.input)) { op_data->float_input_initialized = true; } // If the tolerance is very small, we only display the stats of the diff. if (op_data->tolerance < 0.1) { std::vector<double> diffs, temp; diffs.reserve(NumElements(dequantized)); temp.reserve(NumElements(dequantized)); for (int i = 0; i < NumElements(op_context.ref); ++i) { float dequant = GetTensorData<float>(dequantized)[i]; float reference = GetTensorData<float>(op_context.ref)[i]; diffs.push_back(dequant - reference); } double mean = std::accumulate(diffs.begin(), diffs.end(), 0.0) / diffs.size(); double max_diff = 0.0; std::transform(diffs.begin(), diffs.end(), temp.begin(), [mean, &max_diff](double x) { max_diff = std::max(max_diff, std::abs(x)); return x - mean; }); double sq_sum = std::inner_product(temp.begin(), temp.end(), temp.begin(), 0.0); double std = std::sqrt(sq_sum / diffs.size()); TF_LITE_KERNEL_LOG( context, "std: %f, mean: %f, max_diff: %f (scale: %f, zero_point: %d).\n", std, mean, max_diff, op_context.input->params.scale, op_context.input->params.zero_point); return kTfLiteOk; } // Verify the dequantized output. auto max_diff = op_data->tolerance * op_context.input->params.scale; for (int i = 0; i < NumElements(op_context.ref); ++i) { int32_t value = GetQuantizedValue(op_context, i); float dequant = GetTensorData<float>(dequantized)[i]; float reference = GetTensorData<float>(op_context.ref)[i]; float diff = std::abs(reference - dequant); if (diff > max_diff) { TF_LITE_KERNEL_LOG( context, "Mismatch: %f is quantized to %d with (%f, %d). " "abs(%f - %f) = %f > %f (tolerance) range percentage %f.\n", reference, value, op_context.input->params.scale, op_context.input->params.zero_point, reference, dequant, diff, max_diff, op_data->tolerance); return kTfLiteError; } } return kTfLiteOk; }
533
True
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Read
The software reads data past the end, or before the beginning, of the intended buffer.
Typically, this can allow attackers to read sensitive information from other memory locations or cause a crash. A crash can occur when the code reads a variable amount of data and assumes that a sentinel exists to stop the read operation, such as a NUL in a string. The expected sentinel might not be located in the out-of-bounds memory, causing excessive data to be read, leading to a segmentation fault or a buffer overflow. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent read operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/125.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::custom::numeric_verify::Prepare
tflite::ops::custom::numeric_verify::Prepare( TfLiteContext * context , TfLiteNode * node)
['context', 'node']
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 2); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 0); OpData* op_data = reinterpret_cast<OpData*>(node->user_data); OpContext op_context(context, node); TF_LITE_ENSURE(context, op_context.input->type == kTfLiteUInt8 || op_context.input->type == kTfLiteInt8 || op_context.input->type == kTfLiteInt16 || op_context.input->type == kTfLiteFloat16); TF_LITE_ENSURE(context, op_context.ref->type == kTfLiteFloat32); op_data->max_diff = op_data->tolerance * op_context.input->params.scale; switch (op_context.input->type) { case kTfLiteUInt8: case kTfLiteInt8: op_data->max_diff *= (1 << 8); break; case kTfLiteInt16: op_data->max_diff *= (1 << 16); break; default: break; } // Allocate tensor to store the dequantized inputs. if (op_data->cache_tensor_id == kTensorNotAllocated) { TF_LITE_ENSURE_OK( context, context->AddTensors(context, 1, &op_data->cache_tensor_id)); } TfLiteIntArrayFree(node->temporaries); node->temporaries = TfLiteIntArrayCreate(1); node->temporaries->data[0] = op_data->cache_tensor_id; TfLiteTensor* dequantized = GetTemporary(context, node, /*index=*/0); dequantized->type = op_context.ref->type; dequantized->allocation_type = kTfLiteDynamic; TF_LITE_ENSURE_OK(context, context->ResizeTensor( context, dequantized, TfLiteIntArrayCopy(op_context.input->dims))); return kTfLiteOk; }
289
True
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Write
The software writes data past the end, or before the beginning, of the intended buffer.
Typically, this can result in corruption of data, a crash, or code execution. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent write operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/787.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::custom::numeric_verify::Prepare
tflite::ops::custom::numeric_verify::Prepare( TfLiteContext * context , TfLiteNode * node)
['context', 'node']
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 2); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 0); OpData* op_data = reinterpret_cast<OpData*>(node->user_data); OpContext op_context(context, node); TF_LITE_ENSURE(context, op_context.input->type == kTfLiteUInt8 || op_context.input->type == kTfLiteInt8 || op_context.input->type == kTfLiteInt16 || op_context.input->type == kTfLiteFloat16); TF_LITE_ENSURE(context, op_context.ref->type == kTfLiteFloat32); op_data->max_diff = op_data->tolerance * op_context.input->params.scale; switch (op_context.input->type) { case kTfLiteUInt8: case kTfLiteInt8: op_data->max_diff *= (1 << 8); break; case kTfLiteInt16: op_data->max_diff *= (1 << 16); break; default: break; } // Allocate tensor to store the dequantized inputs. if (op_data->cache_tensor_id == kTensorNotAllocated) { TF_LITE_ENSURE_OK( context, context->AddTensors(context, 1, &op_data->cache_tensor_id)); } TfLiteIntArrayFree(node->temporaries); node->temporaries = TfLiteIntArrayCreate(1); node->temporaries->data[0] = op_data->cache_tensor_id; TfLiteTensor* dequantized = GetTemporary(context, node, /*index=*/0); dequantized->type = op_context.ref->type; dequantized->allocation_type = kTfLiteDynamic; TF_LITE_ENSURE_OK(context, context->ResizeTensor( context, dequantized, TfLiteIntArrayCopy(op_context.input->dims))); return kTfLiteOk; }
289
True
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Read
The software reads data past the end, or before the beginning, of the intended buffer.
Typically, this can allow attackers to read sensitive information from other memory locations or cause a crash. A crash can occur when the code reads a variable amount of data and assumes that a sentinel exists to stop the read operation, such as a NUL in a string. The expected sentinel might not be located in the out-of-bounds memory, causing excessive data to be read, leading to a segmentation fault or a buffer overflow. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent read operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/125.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::builtin::pack::Eval
tflite::ops::builtin::pack::Eval( TfLiteContext * context , TfLiteNode * node)
['context', 'node']
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { const TfLitePackParams* data = reinterpret_cast<TfLitePackParams*>(node->builtin_data); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); switch (output->type) { case kTfLiteFloat32: { return PackImpl<float>(context, node, output, data->values_count, data->axis); } case kTfLiteUInt8: { return PackImpl<uint8_t>(context, node, output, data->values_count, data->axis); } case kTfLiteInt8: { return PackImpl<int8_t>(context, node, output, data->values_count, data->axis); } case kTfLiteInt16: { return PackImpl<int16_t>(context, node, output, data->values_count, data->axis); } case kTfLiteInt32: { return PackImpl<int32_t>(context, node, output, data->values_count, data->axis); } case kTfLiteInt64: { return PackImpl<int64_t>(context, node, output, data->values_count, data->axis); } default: { context->ReportError(context, "Type '%s' is not supported by pack.", TfLiteTypeGetName(output->type)); return kTfLiteError; } } return kTfLiteOk; }
231
True
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Write
The software writes data past the end, or before the beginning, of the intended buffer.
Typically, this can result in corruption of data, a crash, or code execution. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent write operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/787.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::builtin::pack::Eval
tflite::ops::builtin::pack::Eval( TfLiteContext * context , TfLiteNode * node)
['context', 'node']
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { const TfLitePackParams* data = reinterpret_cast<TfLitePackParams*>(node->builtin_data); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); switch (output->type) { case kTfLiteFloat32: { return PackImpl<float>(context, node, output, data->values_count, data->axis); } case kTfLiteUInt8: { return PackImpl<uint8_t>(context, node, output, data->values_count, data->axis); } case kTfLiteInt8: { return PackImpl<int8_t>(context, node, output, data->values_count, data->axis); } case kTfLiteInt16: { return PackImpl<int16_t>(context, node, output, data->values_count, data->axis); } case kTfLiteInt32: { return PackImpl<int32_t>(context, node, output, data->values_count, data->axis); } case kTfLiteInt64: { return PackImpl<int64_t>(context, node, output, data->values_count, data->axis); } default: { context->ReportError(context, "Type '%s' is not supported by pack.", TfLiteTypeGetName(output->type)); return kTfLiteError; } } return kTfLiteOk; }
231
True
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Read
The software reads data past the end, or before the beginning, of the intended buffer.
Typically, this can allow attackers to read sensitive information from other memory locations or cause a crash. A crash can occur when the code reads a variable amount of data and assumes that a sentinel exists to stop the read operation, such as a NUL in a string. The expected sentinel might not be located in the out-of-bounds memory, causing excessive data to be read, leading to a segmentation fault or a buffer overflow. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent read operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/125.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::builtin::pack::Prepare
tflite::ops::builtin::pack::Prepare( TfLiteContext * context , TfLiteNode * node)
['context', 'node']
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { TfLitePackParams* data = reinterpret_cast<TfLitePackParams*>(node->builtin_data); TF_LITE_ENSURE_EQ(context, NumInputs(node), data->values_count); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input0 = GetInput(context, node, 0); const int dimension_size = NumDimensions(input0) + 1; if (data->axis < 0) { data->axis += dimension_size; } TF_LITE_ENSURE(context, NumDimensions(input0) >= data->axis); TF_LITE_ENSURE(context, data->axis >= 0); if (input0->type != kTfLiteInt32 && input0->type != kTfLiteFloat32 && input0->type != kTfLiteUInt8 && input0->type != kTfLiteInt8 && input0->type != kTfLiteInt16 && input0->type != kTfLiteInt64) { context->ReportError(context, "Type '%s' is not supported by pack.", TfLiteTypeGetName(input0->type)); return kTfLiteError; } // Make sure all inputs have the same shape and type. for (int i = 1; i < data->values_count; ++i) { const TfLiteTensor* input = GetInput(context, node, i); TF_LITE_ENSURE(context, HaveSameShapes(input0, input)); TF_LITE_ENSURE_TYPES_EQ(context, input0->type, input->type); } // Resize output. rank R will become rank R + 1 const TfLiteIntArray* input_shape = input0->dims; TfLiteIntArray* output_shape = TfLiteIntArrayCreate(dimension_size); int i = 0; for (int index = 0; index < dimension_size; ++index) { if (index == data->axis) { output_shape->data[index] = data->values_count; } else { output_shape->data[index] = input_shape->data[i++]; } } TfLiteTensor* output = GetOutput(context, node, kOutputTensor); TF_LITE_ENSURE_TYPES_EQ(context, output->type, input0->type); // Guarantee input/output quantization params match as we do not support // packing quantized tensors. for (int i = 0; i < data->values_count; i++) { const TfLiteTensor* input = GetInput(context, node, i); TF_LITE_ENSURE_EQ(context, input->params.zero_point, output->params.zero_point); TF_LITE_ENSURE_EQ(context, input->params.scale, output->params.scale); } return context->ResizeTensor(context, output, output_shape); }
417
True
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Write
The software writes data past the end, or before the beginning, of the intended buffer.
Typically, this can result in corruption of data, a crash, or code execution. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent write operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/787.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::builtin::pack::Prepare
tflite::ops::builtin::pack::Prepare( TfLiteContext * context , TfLiteNode * node)
['context', 'node']
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { TfLitePackParams* data = reinterpret_cast<TfLitePackParams*>(node->builtin_data); TF_LITE_ENSURE_EQ(context, NumInputs(node), data->values_count); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input0 = GetInput(context, node, 0); const int dimension_size = NumDimensions(input0) + 1; if (data->axis < 0) { data->axis += dimension_size; } TF_LITE_ENSURE(context, NumDimensions(input0) >= data->axis); TF_LITE_ENSURE(context, data->axis >= 0); if (input0->type != kTfLiteInt32 && input0->type != kTfLiteFloat32 && input0->type != kTfLiteUInt8 && input0->type != kTfLiteInt8 && input0->type != kTfLiteInt16 && input0->type != kTfLiteInt64) { context->ReportError(context, "Type '%s' is not supported by pack.", TfLiteTypeGetName(input0->type)); return kTfLiteError; } // Make sure all inputs have the same shape and type. for (int i = 1; i < data->values_count; ++i) { const TfLiteTensor* input = GetInput(context, node, i); TF_LITE_ENSURE(context, HaveSameShapes(input0, input)); TF_LITE_ENSURE_TYPES_EQ(context, input0->type, input->type); } // Resize output. rank R will become rank R + 1 const TfLiteIntArray* input_shape = input0->dims; TfLiteIntArray* output_shape = TfLiteIntArrayCreate(dimension_size); int i = 0; for (int index = 0; index < dimension_size; ++index) { if (index == data->axis) { output_shape->data[index] = data->values_count; } else { output_shape->data[index] = input_shape->data[i++]; } } TfLiteTensor* output = GetOutput(context, node, kOutputTensor); TF_LITE_ENSURE_TYPES_EQ(context, output->type, input0->type); // Guarantee input/output quantization params match as we do not support // packing quantized tensors. for (int i = 0; i < data->values_count; i++) { const TfLiteTensor* input = GetInput(context, node, i); TF_LITE_ENSURE_EQ(context, input->params.zero_point, output->params.zero_point); TF_LITE_ENSURE_EQ(context, input->params.scale, output->params.scale); } return context->ResizeTensor(context, output, output_shape); }
417
True
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Read
The software reads data past the end, or before the beginning, of the intended buffer.
Typically, this can allow attackers to read sensitive information from other memory locations or cause a crash. A crash can occur when the code reads a variable amount of data and assumes that a sentinel exists to stop the read operation, such as a NUL in a string. The expected sentinel might not be located in the out-of-bounds memory, causing excessive data to be read, leading to a segmentation fault or a buffer overflow. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent read operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/125.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::builtin::pooling::AverageEval
tflite::ops::builtin::pooling::AverageEval( TfLiteContext * context , TfLiteNode * node)
['context', 'node']
TfLiteStatus AverageEval(TfLiteContext* context, TfLiteNode* node) { auto* params = reinterpret_cast<TfLitePoolParams*>(node->builtin_data); OpData* data = reinterpret_cast<OpData*>(node->user_data); TfLiteTensor* output = GetOutput(context, node, 0); const TfLiteTensor* input = GetInput(context, node, 0); switch (input->type) { // Already know in/out types are same. case kTfLiteFloat32: AverageEvalFloat<kernel_type>(context, node, params, data, input, output); break; case kTfLiteUInt8: AverageEvalQuantizedUint8<kernel_type>(context, node, params, data, input, output); break; case kTfLiteInt8: AverageEvalQuantizedInt8<kernel_type>(context, node, params, data, input, output); break; case kTfLiteInt16: AverageEvalQuantizedInt16<kernel_type>(context, node, params, data, input, output); break; default: TF_LITE_KERNEL_LOG(context, "Type %s not currently supported.", TfLiteTypeGetName(input->type)); return kTfLiteError; } return kTfLiteOk; }
191
True
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Write
The software writes data past the end, or before the beginning, of the intended buffer.
Typically, this can result in corruption of data, a crash, or code execution. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent write operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/787.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::builtin::pooling::AverageEval
tflite::ops::builtin::pooling::AverageEval( TfLiteContext * context , TfLiteNode * node)
['context', 'node']
TfLiteStatus AverageEval(TfLiteContext* context, TfLiteNode* node) { auto* params = reinterpret_cast<TfLitePoolParams*>(node->builtin_data); OpData* data = reinterpret_cast<OpData*>(node->user_data); TfLiteTensor* output = GetOutput(context, node, 0); const TfLiteTensor* input = GetInput(context, node, 0); switch (input->type) { // Already know in/out types are same. case kTfLiteFloat32: AverageEvalFloat<kernel_type>(context, node, params, data, input, output); break; case kTfLiteUInt8: AverageEvalQuantizedUint8<kernel_type>(context, node, params, data, input, output); break; case kTfLiteInt8: AverageEvalQuantizedInt8<kernel_type>(context, node, params, data, input, output); break; case kTfLiteInt16: AverageEvalQuantizedInt16<kernel_type>(context, node, params, data, input, output); break; default: TF_LITE_KERNEL_LOG(context, "Type %s not currently supported.", TfLiteTypeGetName(input->type)); return kTfLiteError; } return kTfLiteOk; }
191
True
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Read
The software reads data past the end, or before the beginning, of the intended buffer.
Typically, this can allow attackers to read sensitive information from other memory locations or cause a crash. A crash can occur when the code reads a variable amount of data and assumes that a sentinel exists to stop the read operation, such as a NUL in a string. The expected sentinel might not be located in the out-of-bounds memory, causing excessive data to be read, leading to a segmentation fault or a buffer overflow. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent read operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/125.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::builtin::pooling::GenericPrepare
tflite::ops::builtin::pooling::GenericPrepare( TfLiteContext * context , TfLiteNode * node)
['context', 'node']
TfLiteStatus GenericPrepare(TfLiteContext* context, TfLiteNode* node) { auto* params = reinterpret_cast<TfLitePoolParams*>(node->builtin_data); OpData* data = reinterpret_cast<OpData*>(node->user_data); TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); TfLiteTensor* output = GetOutput(context, node, 0); const TfLiteTensor* input = GetInput(context, node, 0); TF_LITE_ENSURE_EQ(context, NumDimensions(input), 4); TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); int batches = input->dims->data[0]; int height = input->dims->data[1]; int width = input->dims->data[2]; int channels_out = input->dims->data[3]; // Matching GetWindowedOutputSize in TensorFlow. auto padding = params->padding; int out_width, out_height; data->padding = ComputePaddingHeightWidth( params->stride_height, params->stride_width, 1, 1, height, width, params->filter_height, params->filter_width, padding, &out_height, &out_width); if (input->type == kTfLiteUInt8 || input->type == kTfLiteInt8) { if (pool_type == kAverage || pool_type == kMax) { TFLITE_DCHECK_LE(std::abs(input->params.scale - output->params.scale), 1.0e-6); TFLITE_DCHECK_EQ(input->params.zero_point, output->params.zero_point); } if (pool_type == kL2) { // We currently don't have a quantized implementation of L2Pool TF_LITE_ENSURE_TYPES_EQ(context, input->type, kTfLiteFloat32); } } TfLiteIntArray* output_size = TfLiteIntArrayCreate(4); output_size->data[0] = batches; output_size->data[1] = out_height; output_size->data[2] = out_width; output_size->data[3] = channels_out; return context->ResizeTensor(context, output, output_size); }
362
True
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Write
The software writes data past the end, or before the beginning, of the intended buffer.
Typically, this can result in corruption of data, a crash, or code execution. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent write operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/787.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::builtin::pooling::GenericPrepare
tflite::ops::builtin::pooling::GenericPrepare( TfLiteContext * context , TfLiteNode * node)
['context', 'node']
TfLiteStatus GenericPrepare(TfLiteContext* context, TfLiteNode* node) { auto* params = reinterpret_cast<TfLitePoolParams*>(node->builtin_data); OpData* data = reinterpret_cast<OpData*>(node->user_data); TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); TfLiteTensor* output = GetOutput(context, node, 0); const TfLiteTensor* input = GetInput(context, node, 0); TF_LITE_ENSURE_EQ(context, NumDimensions(input), 4); TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); int batches = input->dims->data[0]; int height = input->dims->data[1]; int width = input->dims->data[2]; int channels_out = input->dims->data[3]; // Matching GetWindowedOutputSize in TensorFlow. auto padding = params->padding; int out_width, out_height; data->padding = ComputePaddingHeightWidth( params->stride_height, params->stride_width, 1, 1, height, width, params->filter_height, params->filter_width, padding, &out_height, &out_width); if (input->type == kTfLiteUInt8 || input->type == kTfLiteInt8) { if (pool_type == kAverage || pool_type == kMax) { TFLITE_DCHECK_LE(std::abs(input->params.scale - output->params.scale), 1.0e-6); TFLITE_DCHECK_EQ(input->params.zero_point, output->params.zero_point); } if (pool_type == kL2) { // We currently don't have a quantized implementation of L2Pool TF_LITE_ENSURE_TYPES_EQ(context, input->type, kTfLiteFloat32); } } TfLiteIntArray* output_size = TfLiteIntArrayCreate(4); output_size->data[0] = batches; output_size->data[1] = out_height; output_size->data[2] = out_width; output_size->data[3] = channels_out; return context->ResizeTensor(context, output, output_size); }
362
True
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Read
The software reads data past the end, or before the beginning, of the intended buffer.
Typically, this can allow attackers to read sensitive information from other memory locations or cause a crash. A crash can occur when the code reads a variable amount of data and assumes that a sentinel exists to stop the read operation, such as a NUL in a string. The expected sentinel might not be located in the out-of-bounds memory, causing excessive data to be read, leading to a segmentation fault or a buffer overflow. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent read operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/125.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::builtin::pooling::L2Eval
tflite::ops::builtin::pooling::L2Eval( TfLiteContext * context , TfLiteNode * node)
['context', 'node']
TfLiteStatus L2Eval(TfLiteContext* context, TfLiteNode* node) { auto* params = reinterpret_cast<TfLitePoolParams*>(node->builtin_data); OpData* data = reinterpret_cast<OpData*>(node->user_data); TfLiteTensor* output = GetOutput(context, node, 0); const TfLiteTensor* input = GetInput(context, node, 0); switch (input->type) { // Already know in/out types are same. case kTfLiteFloat32: L2EvalFloat<kernel_type>(context, node, params, data, input, output); break; case kTfLiteUInt8: // We don't have a quantized implementation, so just fall through to the // 'default' case. default: context->ReportError(context, "Type %d not currently supported.", input->type); return kTfLiteError; } return kTfLiteOk; }
124
True
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Write
The software writes data past the end, or before the beginning, of the intended buffer.
Typically, this can result in corruption of data, a crash, or code execution. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent write operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/787.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::builtin::pooling::L2Eval
tflite::ops::builtin::pooling::L2Eval( TfLiteContext * context , TfLiteNode * node)
['context', 'node']
TfLiteStatus L2Eval(TfLiteContext* context, TfLiteNode* node) { auto* params = reinterpret_cast<TfLitePoolParams*>(node->builtin_data); OpData* data = reinterpret_cast<OpData*>(node->user_data); TfLiteTensor* output = GetOutput(context, node, 0); const TfLiteTensor* input = GetInput(context, node, 0); switch (input->type) { // Already know in/out types are same. case kTfLiteFloat32: L2EvalFloat<kernel_type>(context, node, params, data, input, output); break; case kTfLiteUInt8: // We don't have a quantized implementation, so just fall through to the // 'default' case. default: context->ReportError(context, "Type %d not currently supported.", input->type); return kTfLiteError; } return kTfLiteOk; }
124
True
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Read
The software reads data past the end, or before the beginning, of the intended buffer.
Typically, this can allow attackers to read sensitive information from other memory locations or cause a crash. A crash can occur when the code reads a variable amount of data and assumes that a sentinel exists to stop the read operation, such as a NUL in a string. The expected sentinel might not be located in the out-of-bounds memory, causing excessive data to be read, leading to a segmentation fault or a buffer overflow. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent read operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/125.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::builtin::pooling::MaxEval
tflite::ops::builtin::pooling::MaxEval( TfLiteContext * context , TfLiteNode * node)
['context', 'node']
TfLiteStatus MaxEval(TfLiteContext* context, TfLiteNode* node) { auto* params = reinterpret_cast<TfLitePoolParams*>(node->builtin_data); OpData* data = reinterpret_cast<OpData*>(node->user_data); TfLiteTensor* output = GetOutput(context, node, 0); const TfLiteTensor* input = GetInput(context, node, 0); switch (input->type) { // Already know in/out types are same. case kTfLiteFloat32: MaxEvalFloat<kernel_type>(context, node, params, data, input, output); break; case kTfLiteUInt8: MaxEvalQuantizedUInt8<kernel_type>(context, node, params, data, input, output); break; case kTfLiteInt8: MaxEvalQuantizedInt8<kernel_type>(context, node, params, data, input, output); break; case kTfLiteInt16: MaxEvalQuantizedInt16<kernel_type>(context, node, params, data, input, output); break; default: TF_LITE_KERNEL_LOG(context, "Type %s not currently supported.", TfLiteTypeGetName(input->type)); return kTfLiteError; } return kTfLiteOk; }
191
True
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Write
The software writes data past the end, or before the beginning, of the intended buffer.
Typically, this can result in corruption of data, a crash, or code execution. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent write operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/787.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::builtin::pooling::MaxEval
tflite::ops::builtin::pooling::MaxEval( TfLiteContext * context , TfLiteNode * node)
['context', 'node']
TfLiteStatus MaxEval(TfLiteContext* context, TfLiteNode* node) { auto* params = reinterpret_cast<TfLitePoolParams*>(node->builtin_data); OpData* data = reinterpret_cast<OpData*>(node->user_data); TfLiteTensor* output = GetOutput(context, node, 0); const TfLiteTensor* input = GetInput(context, node, 0); switch (input->type) { // Already know in/out types are same. case kTfLiteFloat32: MaxEvalFloat<kernel_type>(context, node, params, data, input, output); break; case kTfLiteUInt8: MaxEvalQuantizedUInt8<kernel_type>(context, node, params, data, input, output); break; case kTfLiteInt8: MaxEvalQuantizedInt8<kernel_type>(context, node, params, data, input, output); break; case kTfLiteInt16: MaxEvalQuantizedInt16<kernel_type>(context, node, params, data, input, output); break; default: TF_LITE_KERNEL_LOG(context, "Type %s not currently supported.", TfLiteTypeGetName(input->type)); return kTfLiteError; } return kTfLiteOk; }
191
True
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Read
The software reads data past the end, or before the beginning, of the intended buffer.
Typically, this can allow attackers to read sensitive information from other memory locations or cause a crash. A crash can occur when the code reads a variable amount of data and assumes that a sentinel exists to stop the read operation, such as a NUL in a string. The expected sentinel might not be located in the out-of-bounds memory, causing excessive data to be read, leading to a segmentation fault or a buffer overflow. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent read operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/125.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::builtin::pow::Eval
tflite::ops::builtin::pow::Eval( TfLiteContext * context , TfLiteNode * node)
['context', 'node']
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { OpData* data = reinterpret_cast<OpData*>(node->user_data); const TfLiteTensor* input1 = GetInput(context, node, kInputTensor1); const TfLiteTensor* input2 = GetInput(context, node, kInputTensor2); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); switch (output->type) { case kTfLiteInt32: { // TensorFlow does not support negative for int32. TF_LITE_ENSURE_OK(context, CheckValue(context, input2)); PowImpl<int32_t>(input1, input2, output, data->requires_broadcast); break; } case kTfLiteFloat32: { PowImpl<float>(input1, input2, output, data->requires_broadcast); break; } default: { context->ReportError(context, "Unsupported data type: %d", output->type); return kTfLiteError; } } return kTfLiteOk; }
157
True
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Write
The software writes data past the end, or before the beginning, of the intended buffer.
Typically, this can result in corruption of data, a crash, or code execution. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent write operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/787.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::builtin::pow::Eval
tflite::ops::builtin::pow::Eval( TfLiteContext * context , TfLiteNode * node)
['context', 'node']
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { OpData* data = reinterpret_cast<OpData*>(node->user_data); const TfLiteTensor* input1 = GetInput(context, node, kInputTensor1); const TfLiteTensor* input2 = GetInput(context, node, kInputTensor2); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); switch (output->type) { case kTfLiteInt32: { // TensorFlow does not support negative for int32. TF_LITE_ENSURE_OK(context, CheckValue(context, input2)); PowImpl<int32_t>(input1, input2, output, data->requires_broadcast); break; } case kTfLiteFloat32: { PowImpl<float>(input1, input2, output, data->requires_broadcast); break; } default: { context->ReportError(context, "Unsupported data type: %d", output->type); return kTfLiteError; } } return kTfLiteOk; }
157
True
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Read
The software reads data past the end, or before the beginning, of the intended buffer.
Typically, this can allow attackers to read sensitive information from other memory locations or cause a crash. A crash can occur when the code reads a variable amount of data and assumes that a sentinel exists to stop the read operation, such as a NUL in a string. The expected sentinel might not be located in the out-of-bounds memory, causing excessive data to be read, leading to a segmentation fault or a buffer overflow. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent read operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/125.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::builtin::pow::Prepare
tflite::ops::builtin::pow::Prepare( TfLiteContext * context , TfLiteNode * node)
['context', 'node']
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 2); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); OpData* data = reinterpret_cast<OpData*>(node->user_data); const TfLiteTensor* input1 = GetInput(context, node, kInputTensor1); const TfLiteTensor* input2 = GetInput(context, node, kInputTensor2); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); TF_LITE_ENSURE_TYPES_EQ(context, input1->type, input2->type); const TfLiteType type = input1->type; if (type != kTfLiteInt32 && type != kTfLiteFloat32) { TF_LITE_KERNEL_LOG(context, "Unsupported data type %s.", TfLiteTypeGetName(type)); return kTfLiteError; } output->type = type; data->requires_broadcast = !HaveSameShapes(input1, input2); TfLiteIntArray* output_size = nullptr; if (data->requires_broadcast) { TF_LITE_ENSURE_OK(context, CalculateShapeForBroadcast( context, input1, input2, &output_size)); } else { output_size = TfLiteIntArrayCopy(input1->dims); } return context->ResizeTensor(context, output, output_size); }
213
True
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Write
The software writes data past the end, or before the beginning, of the intended buffer.
Typically, this can result in corruption of data, a crash, or code execution. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent write operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/787.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::builtin::pow::Prepare
tflite::ops::builtin::pow::Prepare( TfLiteContext * context , TfLiteNode * node)
['context', 'node']
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 2); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); OpData* data = reinterpret_cast<OpData*>(node->user_data); const TfLiteTensor* input1 = GetInput(context, node, kInputTensor1); const TfLiteTensor* input2 = GetInput(context, node, kInputTensor2); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); TF_LITE_ENSURE_TYPES_EQ(context, input1->type, input2->type); const TfLiteType type = input1->type; if (type != kTfLiteInt32 && type != kTfLiteFloat32) { TF_LITE_KERNEL_LOG(context, "Unsupported data type %s.", TfLiteTypeGetName(type)); return kTfLiteError; } output->type = type; data->requires_broadcast = !HaveSameShapes(input1, input2); TfLiteIntArray* output_size = nullptr; if (data->requires_broadcast) { TF_LITE_ENSURE_OK(context, CalculateShapeForBroadcast( context, input1, input2, &output_size)); } else { output_size = TfLiteIntArrayCopy(input1->dims); } return context->ResizeTensor(context, output, output_size); }
213
True
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Read
The software reads data past the end, or before the beginning, of the intended buffer.
Typically, this can allow attackers to read sensitive information from other memory locations or cause a crash. A crash can occur when the code reads a variable amount of data and assumes that a sentinel exists to stop the read operation, such as a NUL in a string. The expected sentinel might not be located in the out-of-bounds memory, causing excessive data to be read, leading to a segmentation fault or a buffer overflow. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent read operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/125.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::builtin::quantize::Eval
tflite::ops::builtin::quantize::Eval( TfLiteContext * context , TfLiteNode * node)
['context', 'node']
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { OpData* data = static_cast<OpData*>(node->user_data); const TfLiteTensor* input = GetInput(context, node, 0); TfLiteTensor* output = GetOutput(context, node, 0); const RuntimeShape input_shape = GetTensorShape(input); const RuntimeShape output_shape = GetTensorShape(output); switch (input->type) { case kTfLiteFloat32: { // Float to int8, uint8, int16. tflite::QuantizationParams op_params; op_params.zero_point = output->params.zero_point; op_params.scale = output->params.scale; const float* input_data = GetTensorData<float>(input); switch (output->type) { case kTfLiteInt8: AffineQuantize<kernel_type>(op_params, input_shape, input_data, output_shape, GetTensorData<int8_t>(output)); return kTfLiteOk; case kTfLiteUInt8: AffineQuantize<kernel_type>(op_params, input_shape, input_data, output_shape, GetTensorData<uint8_t>(output)); return kTfLiteOk; case kTfLiteInt16: AffineQuantize<kernel_type>(op_params, input_shape, input_data, output_shape, GetTensorData<int16_t>(output)); return kTfLiteOk; default: ReportError(context, input->type, output->type); return kTfLiteError; } } case kTfLiteInt16: { // int16 to int8 or int16. switch (output->type) { case kTfLiteInt8: Requantize<kernel_type>(GetTensorData<int16_t>(input), MatchingFlatSize(input_shape, output_shape), data->output_multiplier, data->output_shift, input->params.zero_point, output->params.zero_point, GetTensorData<int8_t>(output)); return kTfLiteOk; case kTfLiteInt16: Requantize<kernel_type>(GetTensorData<int16_t>(input), MatchingFlatSize(input_shape, output_shape), data->output_multiplier, data->output_shift, input->params.zero_point, output->params.zero_point, GetTensorData<int16_t>(output)); return kTfLiteOk; default: ReportError(context, input->type, output->type); return kTfLiteError; } } case kTfLiteInt8: { // int8 to int8, uint8. const int32_t size = MatchingFlatSize(input_shape, output_shape); const int8_t* input_data = GetTensorData<int8_t>(input); switch (output->type) { case kTfLiteInt8: Requantize<kernel_type>(input_data, size, data->output_multiplier, data->output_shift, input->params.zero_point, output->params.zero_point, GetTensorData<int8_t>(output)); return kTfLiteOk; case kTfLiteUInt8: Requantize<kernel_type>(input_data, size, data->output_multiplier, data->output_shift, input->params.zero_point, output->params.zero_point, GetTensorData<uint8_t>(output)); return kTfLiteOk; default: ReportError(context, input->type, output->type); return kTfLiteError; } } case kTfLiteUInt8: { // uint8 to int8, uint8. const int32_t size = MatchingFlatSize(input_shape, output_shape); const uint8_t* input_data = GetTensorData<uint8_t>(input); switch (output->type) { case kTfLiteInt8: Requantize<kernel_type>(input_data, size, data->output_multiplier, data->output_shift, input->params.zero_point, output->params.zero_point, GetTensorData<int8_t>(output)); return kTfLiteOk; case kTfLiteUInt8: Requantize<kernel_type>(input_data, size, data->output_multiplier, data->output_shift, input->params.zero_point, output->params.zero_point, GetTensorData<uint8_t>(output)); return kTfLiteOk; default: ReportError(context, input->type, output->type); return kTfLiteError; } } default: ReportError(context, input->type, output->type); return kTfLiteError; } }
678
True
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Write
The software writes data past the end, or before the beginning, of the intended buffer.
Typically, this can result in corruption of data, a crash, or code execution. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent write operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/787.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::builtin::quantize::Eval
tflite::ops::builtin::quantize::Eval( TfLiteContext * context , TfLiteNode * node)
['context', 'node']
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { OpData* data = static_cast<OpData*>(node->user_data); const TfLiteTensor* input = GetInput(context, node, 0); TfLiteTensor* output = GetOutput(context, node, 0); const RuntimeShape input_shape = GetTensorShape(input); const RuntimeShape output_shape = GetTensorShape(output); switch (input->type) { case kTfLiteFloat32: { // Float to int8, uint8, int16. tflite::QuantizationParams op_params; op_params.zero_point = output->params.zero_point; op_params.scale = output->params.scale; const float* input_data = GetTensorData<float>(input); switch (output->type) { case kTfLiteInt8: AffineQuantize<kernel_type>(op_params, input_shape, input_data, output_shape, GetTensorData<int8_t>(output)); return kTfLiteOk; case kTfLiteUInt8: AffineQuantize<kernel_type>(op_params, input_shape, input_data, output_shape, GetTensorData<uint8_t>(output)); return kTfLiteOk; case kTfLiteInt16: AffineQuantize<kernel_type>(op_params, input_shape, input_data, output_shape, GetTensorData<int16_t>(output)); return kTfLiteOk; default: ReportError(context, input->type, output->type); return kTfLiteError; } } case kTfLiteInt16: { // int16 to int8 or int16. switch (output->type) { case kTfLiteInt8: Requantize<kernel_type>(GetTensorData<int16_t>(input), MatchingFlatSize(input_shape, output_shape), data->output_multiplier, data->output_shift, input->params.zero_point, output->params.zero_point, GetTensorData<int8_t>(output)); return kTfLiteOk; case kTfLiteInt16: Requantize<kernel_type>(GetTensorData<int16_t>(input), MatchingFlatSize(input_shape, output_shape), data->output_multiplier, data->output_shift, input->params.zero_point, output->params.zero_point, GetTensorData<int16_t>(output)); return kTfLiteOk; default: ReportError(context, input->type, output->type); return kTfLiteError; } } case kTfLiteInt8: { // int8 to int8, uint8. const int32_t size = MatchingFlatSize(input_shape, output_shape); const int8_t* input_data = GetTensorData<int8_t>(input); switch (output->type) { case kTfLiteInt8: Requantize<kernel_type>(input_data, size, data->output_multiplier, data->output_shift, input->params.zero_point, output->params.zero_point, GetTensorData<int8_t>(output)); return kTfLiteOk; case kTfLiteUInt8: Requantize<kernel_type>(input_data, size, data->output_multiplier, data->output_shift, input->params.zero_point, output->params.zero_point, GetTensorData<uint8_t>(output)); return kTfLiteOk; default: ReportError(context, input->type, output->type); return kTfLiteError; } } case kTfLiteUInt8: { // uint8 to int8, uint8. const int32_t size = MatchingFlatSize(input_shape, output_shape); const uint8_t* input_data = GetTensorData<uint8_t>(input); switch (output->type) { case kTfLiteInt8: Requantize<kernel_type>(input_data, size, data->output_multiplier, data->output_shift, input->params.zero_point, output->params.zero_point, GetTensorData<int8_t>(output)); return kTfLiteOk; case kTfLiteUInt8: Requantize<kernel_type>(input_data, size, data->output_multiplier, data->output_shift, input->params.zero_point, output->params.zero_point, GetTensorData<uint8_t>(output)); return kTfLiteOk; default: ReportError(context, input->type, output->type); return kTfLiteError; } } default: ReportError(context, input->type, output->type); return kTfLiteError; } }
678
True
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Read
The software reads data past the end, or before the beginning, of the intended buffer.
Typically, this can allow attackers to read sensitive information from other memory locations or cause a crash. A crash can occur when the code reads a variable amount of data and assumes that a sentinel exists to stop the read operation, such as a NUL in a string. The expected sentinel might not be located in the out-of-bounds memory, causing excessive data to be read, leading to a segmentation fault or a buffer overflow. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent read operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/125.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::builtin::quantize::Prepare
tflite::ops::builtin::quantize::Prepare( TfLiteContext * context , TfLiteNode * node)
['context', 'node']
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { OpData* data = static_cast<OpData*>(node->user_data); TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input = GetInput(context, node, 0); TfLiteTensor* output = GetOutput(context, node, 0); // TODO(b/128934713): Add support for fixed-point per-channel quantization. // Currently this only support affine per-layer quantization. TF_LITE_ENSURE_EQ(context, output->quantization.type, kTfLiteAffineQuantization); const auto* affine_quantization = static_cast<TfLiteAffineQuantization*>(output->quantization.params); TF_LITE_ENSURE(context, affine_quantization); TF_LITE_ENSURE(context, affine_quantization->scale); TF_LITE_ENSURE(context, affine_quantization->scale->size == 1); if (input->type == kTfLiteFloat32) { // Quantize use case. TF_LITE_ENSURE(context, output->type == kTfLiteUInt8 || output->type == kTfLiteInt8 || output->type == kTfLiteInt16); } else { // Requantize use case. if (input->type == kTfLiteInt16) { TF_LITE_ENSURE( context, output->type == kTfLiteInt8 || output->type == kTfLiteInt16); } else { TF_LITE_ENSURE(context, input->type == kTfLiteInt8 || input->type == kTfLiteUInt8); TF_LITE_ENSURE( context, output->type == kTfLiteUInt8 || output->type == kTfLiteInt8); } const double effective_output_scale = static_cast<double>(input->params.scale) / static_cast<double>(output->params.scale); QuantizeMultiplier(effective_output_scale, &data->output_multiplier, &data->output_shift); } return context->ResizeTensor(context, output, TfLiteIntArrayCopy(input->dims)); }
298
True
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Write
The software writes data past the end, or before the beginning, of the intended buffer.
Typically, this can result in corruption of data, a crash, or code execution. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent write operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/787.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::builtin::quantize::Prepare
tflite::ops::builtin::quantize::Prepare( TfLiteContext * context , TfLiteNode * node)
['context', 'node']
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { OpData* data = static_cast<OpData*>(node->user_data); TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input = GetInput(context, node, 0); TfLiteTensor* output = GetOutput(context, node, 0); // TODO(b/128934713): Add support for fixed-point per-channel quantization. // Currently this only support affine per-layer quantization. TF_LITE_ENSURE_EQ(context, output->quantization.type, kTfLiteAffineQuantization); const auto* affine_quantization = static_cast<TfLiteAffineQuantization*>(output->quantization.params); TF_LITE_ENSURE(context, affine_quantization); TF_LITE_ENSURE(context, affine_quantization->scale); TF_LITE_ENSURE(context, affine_quantization->scale->size == 1); if (input->type == kTfLiteFloat32) { // Quantize use case. TF_LITE_ENSURE(context, output->type == kTfLiteUInt8 || output->type == kTfLiteInt8 || output->type == kTfLiteInt16); } else { // Requantize use case. if (input->type == kTfLiteInt16) { TF_LITE_ENSURE( context, output->type == kTfLiteInt8 || output->type == kTfLiteInt16); } else { TF_LITE_ENSURE(context, input->type == kTfLiteInt8 || input->type == kTfLiteUInt8); TF_LITE_ENSURE( context, output->type == kTfLiteUInt8 || output->type == kTfLiteInt8); } const double effective_output_scale = static_cast<double>(input->params.scale) / static_cast<double>(output->params.scale); QuantizeMultiplier(effective_output_scale, &data->output_multiplier, &data->output_shift); } return context->ResizeTensor(context, output, TfLiteIntArrayCopy(input->dims)); }
298
True
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Read
The software reads data past the end, or before the beginning, of the intended buffer.
Typically, this can allow attackers to read sensitive information from other memory locations or cause a crash. A crash can occur when the code reads a variable amount of data and assumes that a sentinel exists to stop the read operation, such as a NUL in a string. The expected sentinel might not be located in the out-of-bounds memory, causing excessive data to be read, leading to a segmentation fault or a buffer overflow. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent read operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/125.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::builtin::range::Eval
tflite::ops::builtin::range::Eval( TfLiteContext * context , TfLiteNode * node)
['context', 'node']
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* start = GetInput(context, node, kStartTensor); const TfLiteTensor* limit = GetInput(context, node, kLimitTensor); const TfLiteTensor* delta = GetInput(context, node, kDeltaTensor); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); if (IsDynamicTensor(output)) { TF_LITE_ENSURE_OK(context, ResizeOutput(context, start, limit, delta, output)); } switch (output->type) { case kTfLiteInt32: { EvalImpl<int32_t>(start, delta, output); break; } case kTfLiteFloat32: { EvalImpl<float>(start, delta, output); break; } default: { context->ReportError(context, "Unsupported data type: %d", output->type); return kTfLiteError; } } return kTfLiteOk; }
163
True
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Write
The software writes data past the end, or before the beginning, of the intended buffer.
Typically, this can result in corruption of data, a crash, or code execution. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent write operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/787.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::builtin::range::Eval
tflite::ops::builtin::range::Eval( TfLiteContext * context , TfLiteNode * node)
['context', 'node']
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* start = GetInput(context, node, kStartTensor); const TfLiteTensor* limit = GetInput(context, node, kLimitTensor); const TfLiteTensor* delta = GetInput(context, node, kDeltaTensor); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); if (IsDynamicTensor(output)) { TF_LITE_ENSURE_OK(context, ResizeOutput(context, start, limit, delta, output)); } switch (output->type) { case kTfLiteInt32: { EvalImpl<int32_t>(start, delta, output); break; } case kTfLiteFloat32: { EvalImpl<float>(start, delta, output); break; } default: { context->ReportError(context, "Unsupported data type: %d", output->type); return kTfLiteError; } } return kTfLiteOk; }
163
True
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Read
The software reads data past the end, or before the beginning, of the intended buffer.
Typically, this can allow attackers to read sensitive information from other memory locations or cause a crash. A crash can occur when the code reads a variable amount of data and assumes that a sentinel exists to stop the read operation, such as a NUL in a string. The expected sentinel might not be located in the out-of-bounds memory, causing excessive data to be read, leading to a segmentation fault or a buffer overflow. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent read operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/125.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::builtin::range::Prepare
tflite::ops::builtin::range::Prepare( TfLiteContext * context , TfLiteNode * node)
['context', 'node']
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 3); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* start = GetInput(context, node, kStartTensor); const TfLiteTensor* limit = GetInput(context, node, kLimitTensor); const TfLiteTensor* delta = GetInput(context, node, kDeltaTensor); // Make sure all the inputs are scalars. TF_LITE_ENSURE_EQ(context, NumDimensions(start), 0); TF_LITE_ENSURE_EQ(context, NumDimensions(limit), 0); TF_LITE_ENSURE_EQ(context, NumDimensions(delta), 0); // Currently only supports int32 and float. // TODO(b/117912892): Support quantization as well. const auto dtype = start->type; if (dtype != kTfLiteFloat32 && dtype != kTfLiteInt32) { context->ReportError(context, "Unknown index output data type: %s", TfLiteTypeGetName(dtype)); return kTfLiteError; } TF_LITE_ENSURE_TYPES_EQ(context, limit->type, dtype); TF_LITE_ENSURE_TYPES_EQ(context, delta->type, dtype); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); output->type = dtype; if (IsConstantTensor(start) && IsConstantTensor(limit) && IsConstantTensor(delta)) { return ResizeOutput(context, start, limit, delta, output); } SetTensorToDynamic(output); return kTfLiteOk; }
233
True
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Write
The software writes data past the end, or before the beginning, of the intended buffer.
Typically, this can result in corruption of data, a crash, or code execution. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent write operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/787.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::builtin::range::Prepare
tflite::ops::builtin::range::Prepare( TfLiteContext * context , TfLiteNode * node)
['context', 'node']
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 3); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* start = GetInput(context, node, kStartTensor); const TfLiteTensor* limit = GetInput(context, node, kLimitTensor); const TfLiteTensor* delta = GetInput(context, node, kDeltaTensor); // Make sure all the inputs are scalars. TF_LITE_ENSURE_EQ(context, NumDimensions(start), 0); TF_LITE_ENSURE_EQ(context, NumDimensions(limit), 0); TF_LITE_ENSURE_EQ(context, NumDimensions(delta), 0); // Currently only supports int32 and float. // TODO(b/117912892): Support quantization as well. const auto dtype = start->type; if (dtype != kTfLiteFloat32 && dtype != kTfLiteInt32) { context->ReportError(context, "Unknown index output data type: %s", TfLiteTypeGetName(dtype)); return kTfLiteError; } TF_LITE_ENSURE_TYPES_EQ(context, limit->type, dtype); TF_LITE_ENSURE_TYPES_EQ(context, delta->type, dtype); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); output->type = dtype; if (IsConstantTensor(start) && IsConstantTensor(limit) && IsConstantTensor(delta)) { return ResizeOutput(context, start, limit, delta, output); } SetTensorToDynamic(output); return kTfLiteOk; }
233
True
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Read
The software reads data past the end, or before the beginning, of the intended buffer.
Typically, this can allow attackers to read sensitive information from other memory locations or cause a crash. A crash can occur when the code reads a variable amount of data and assumes that a sentinel exists to stop the read operation, such as a NUL in a string. The expected sentinel might not be located in the out-of-bounds memory, causing excessive data to be read, leading to a segmentation fault or a buffer overflow. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent read operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/125.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::builtin::rank::Prepare
tflite::ops::builtin::rank::Prepare( TfLiteContext * context , TfLiteNode * node)
['context', 'node']
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input = GetInput(context, node, kInputTensor); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); output->type = kTfLiteInt32; // By design, the input shape is always known at the time of Prepare, even // if the preceding op that generates |input| is dynamic. Thus, we can // always compute the rank immediately, without waiting for Eval. SetTensorToPersistentRo(output); // Rank produces a 0-D int32 Tensor representing the rank of input. TfLiteIntArray* output_size = TfLiteIntArrayCreate(0); TF_LITE_ENSURE_STATUS(context->ResizeTensor(context, output, output_size)); TF_LITE_ENSURE_EQ(context, NumDimensions(output), 0); // Immediately propagate the known rank to the output tensor. This allows // downstream ops that rely on the value to use it during prepare. if (output->type == kTfLiteInt32) { int32_t* output_data = GetTensorData<int32_t>(output); *output_data = NumDimensions(input); } else { return kTfLiteError; } return kTfLiteOk; }
148
True
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Write
The software writes data past the end, or before the beginning, of the intended buffer.
Typically, this can result in corruption of data, a crash, or code execution. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent write operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/787.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::builtin::rank::Prepare
tflite::ops::builtin::rank::Prepare( TfLiteContext * context , TfLiteNode * node)
['context', 'node']
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input = GetInput(context, node, kInputTensor); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); output->type = kTfLiteInt32; // By design, the input shape is always known at the time of Prepare, even // if the preceding op that generates |input| is dynamic. Thus, we can // always compute the rank immediately, without waiting for Eval. SetTensorToPersistentRo(output); // Rank produces a 0-D int32 Tensor representing the rank of input. TfLiteIntArray* output_size = TfLiteIntArrayCreate(0); TF_LITE_ENSURE_STATUS(context->ResizeTensor(context, output, output_size)); TF_LITE_ENSURE_EQ(context, NumDimensions(output), 0); // Immediately propagate the known rank to the output tensor. This allows // downstream ops that rely on the value to use it during prepare. if (output->type == kTfLiteInt32) { int32_t* output_data = GetTensorData<int32_t>(output); *output_data = NumDimensions(input); } else { return kTfLiteError; } return kTfLiteOk; }
148
True
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Read
The software reads data past the end, or before the beginning, of the intended buffer.
Typically, this can allow attackers to read sensitive information from other memory locations or cause a crash. A crash can occur when the code reads a variable amount of data and assumes that a sentinel exists to stop the read operation, such as a NUL in a string. The expected sentinel might not be located in the out-of-bounds memory, causing excessive data to be read, leading to a segmentation fault or a buffer overflow. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent read operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/125.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::custom::read_variable::Eval
tflite::ops::custom::read_variable::Eval( TfLiteContext * context , TfLiteNode * node)
['context', 'node']
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { Subgraph* subgraph = reinterpret_cast<Subgraph*>(context->impl_); const TfLiteTensor* input_resource_id_tensor = GetInput(context, node, kInputVariableId); int resource_id = input_resource_id_tensor->data.i32[0]; auto& resources = subgraph->resources(); auto* variable = resource::GetResourceVariable(&resources, resource_id); TF_LITE_ENSURE(context, variable != nullptr); TfLiteTensor* variable_tensor = variable->GetTensor(); TfLiteTensor* output = GetOutput(context, node, kOutputValue); TF_LITE_ENSURE_TYPES_EQ(context, variable_tensor->type, output->type); TF_LITE_ENSURE_OK( context, context->ResizeTensor( context, output, TfLiteIntArrayCopy(variable_tensor->dims))); memcpy(output->data.raw, variable_tensor->data.raw, output->bytes); return kTfLiteOk; }
165
True
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Write
The software writes data past the end, or before the beginning, of the intended buffer.
Typically, this can result in corruption of data, a crash, or code execution. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent write operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/787.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::custom::read_variable::Eval
tflite::ops::custom::read_variable::Eval( TfLiteContext * context , TfLiteNode * node)
['context', 'node']
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { Subgraph* subgraph = reinterpret_cast<Subgraph*>(context->impl_); const TfLiteTensor* input_resource_id_tensor = GetInput(context, node, kInputVariableId); int resource_id = input_resource_id_tensor->data.i32[0]; auto& resources = subgraph->resources(); auto* variable = resource::GetResourceVariable(&resources, resource_id); TF_LITE_ENSURE(context, variable != nullptr); TfLiteTensor* variable_tensor = variable->GetTensor(); TfLiteTensor* output = GetOutput(context, node, kOutputValue); TF_LITE_ENSURE_TYPES_EQ(context, variable_tensor->type, output->type); TF_LITE_ENSURE_OK( context, context->ResizeTensor( context, output, TfLiteIntArrayCopy(variable_tensor->dims))); memcpy(output->data.raw, variable_tensor->data.raw, output->bytes); return kTfLiteOk; }
165
True
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Read
The software reads data past the end, or before the beginning, of the intended buffer.
Typically, this can allow attackers to read sensitive information from other memory locations or cause a crash. A crash can occur when the code reads a variable amount of data and assumes that a sentinel exists to stop the read operation, such as a NUL in a string. The expected sentinel might not be located in the out-of-bounds memory, causing excessive data to be read, leading to a segmentation fault or a buffer overflow. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent read operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/125.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::custom::read_variable::Prepare
tflite::ops::custom::read_variable::Prepare( TfLiteContext * context , TfLiteNode * node)
['context', 'node']
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, node->inputs->size, 1); TF_LITE_ENSURE_EQ(context, node->outputs->size, 1); const TfLiteTensor* input_resource_id_tensor = GetInput(context, node, kInputVariableId); TF_LITE_ENSURE_EQ(context, input_resource_id_tensor->type, kTfLiteInt32); TF_LITE_ENSURE_EQ(context, NumElements(input_resource_id_tensor), 1); TfLiteTensor* output = GetOutput(context, node, kOutputValue); SetTensorToDynamic(output); return kTfLiteOk; }
96
True
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Write
The software writes data past the end, or before the beginning, of the intended buffer.
Typically, this can result in corruption of data, a crash, or code execution. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent write operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/787.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::custom::read_variable::Prepare
tflite::ops::custom::read_variable::Prepare( TfLiteContext * context , TfLiteNode * node)
['context', 'node']
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, node->inputs->size, 1); TF_LITE_ENSURE_EQ(context, node->outputs->size, 1); const TfLiteTensor* input_resource_id_tensor = GetInput(context, node, kInputVariableId); TF_LITE_ENSURE_EQ(context, input_resource_id_tensor->type, kTfLiteInt32); TF_LITE_ENSURE_EQ(context, NumElements(input_resource_id_tensor), 1); TfLiteTensor* output = GetOutput(context, node, kOutputValue); SetTensorToDynamic(output); return kTfLiteOk; }
96
True
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Read
The software reads data past the end, or before the beginning, of the intended buffer.
Typically, this can allow attackers to read sensitive information from other memory locations or cause a crash. A crash can occur when the code reads a variable amount of data and assumes that a sentinel exists to stop the read operation, such as a NUL in a string. The expected sentinel might not be located in the out-of-bounds memory, causing excessive data to be read, leading to a segmentation fault or a buffer overflow. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent read operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/125.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::builtin::reduce::EvalLogic
tflite::ops::builtin::reduce::EvalLogic( TfLiteContext * context , TfLiteNode * node , OpContext * op_context , T init_value , T reducer(const T current,const T in))
['context', 'node', 'op_context', 'init_value', 'reducer']
TfLiteStatus EvalLogic(TfLiteContext* context, TfLiteNode* node, OpContext* op_context, T init_value, T reducer(const T current, const T in)) { int64_t num_axis = NumElements(op_context->axis); TfLiteTensor* temp_index = GetTemporary(context, node, /*index=*/0); TfLiteTensor* resolved_axis = GetTemporary(context, node, /*index=*/1); // Resize the output tensor if the output tensor is dynamic. if (IsDynamicTensor(op_context->output)) { TF_LITE_ENSURE_OK(context, ResizeTempAxis(context, op_context, resolved_axis)); TF_LITE_ENSURE_OK(context, ResizeOutputTensor(context, op_context)); } if (op_context->input->type == kTfLiteUInt8 || op_context->input->type == kTfLiteInt8) { TF_LITE_ENSURE_EQ(context, op_context->input->params.scale, op_context->output->params.scale); TF_LITE_ENSURE_EQ(context, op_context->input->params.zero_point, op_context->output->params.zero_point); } TF_LITE_ENSURE( context, reference_ops::ReduceGeneric<T>( GetTensorData<T>(op_context->input), op_context->input->dims->data, op_context->input->dims->size, GetTensorData<T>(op_context->output), op_context->output->dims->data, op_context->output->dims->size, GetTensorData<int>(op_context->axis), num_axis, op_context->params->keep_dims, GetTensorData<int>(temp_index), GetTensorData<int>(resolved_axis), init_value, reducer)); return kTfLiteOk; }
272
True
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Write
The software writes data past the end, or before the beginning, of the intended buffer.
Typically, this can result in corruption of data, a crash, or code execution. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent write operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/787.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::builtin::reduce::EvalLogic
tflite::ops::builtin::reduce::EvalLogic( TfLiteContext * context , TfLiteNode * node , OpContext * op_context , T init_value , T reducer(const T current,const T in))
['context', 'node', 'op_context', 'init_value', 'reducer']
TfLiteStatus EvalLogic(TfLiteContext* context, TfLiteNode* node, OpContext* op_context, T init_value, T reducer(const T current, const T in)) { int64_t num_axis = NumElements(op_context->axis); TfLiteTensor* temp_index = GetTemporary(context, node, /*index=*/0); TfLiteTensor* resolved_axis = GetTemporary(context, node, /*index=*/1); // Resize the output tensor if the output tensor is dynamic. if (IsDynamicTensor(op_context->output)) { TF_LITE_ENSURE_OK(context, ResizeTempAxis(context, op_context, resolved_axis)); TF_LITE_ENSURE_OK(context, ResizeOutputTensor(context, op_context)); } if (op_context->input->type == kTfLiteUInt8 || op_context->input->type == kTfLiteInt8) { TF_LITE_ENSURE_EQ(context, op_context->input->params.scale, op_context->output->params.scale); TF_LITE_ENSURE_EQ(context, op_context->input->params.zero_point, op_context->output->params.zero_point); } TF_LITE_ENSURE( context, reference_ops::ReduceGeneric<T>( GetTensorData<T>(op_context->input), op_context->input->dims->data, op_context->input->dims->size, GetTensorData<T>(op_context->output), op_context->output->dims->data, op_context->output->dims->size, GetTensorData<int>(op_context->axis), num_axis, op_context->params->keep_dims, GetTensorData<int>(temp_index), GetTensorData<int>(resolved_axis), init_value, reducer)); return kTfLiteOk; }
272
True
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Read
The software reads data past the end, or before the beginning, of the intended buffer.
Typically, this can allow attackers to read sensitive information from other memory locations or cause a crash. A crash can occur when the code reads a variable amount of data and assumes that a sentinel exists to stop the read operation, such as a NUL in a string. The expected sentinel might not be located in the out-of-bounds memory, causing excessive data to be read, leading to a segmentation fault or a buffer overflow. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent read operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/125.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::builtin::reduce::EvalMean
tflite::ops::builtin::reduce::EvalMean( TfLiteContext * context , TfLiteNode * node)
['context', 'node']
TfLiteStatus EvalMean(TfLiteContext* context, TfLiteNode* node) { OpContext op_context(context, node); OpData* data = reinterpret_cast<OpData*>(node->user_data); int num_axis = static_cast<int>(NumElements(op_context.axis)); TfLiteTensor* temp_index = GetTemporary(context, node, /*index=*/0); TfLiteTensor* resolved_axis = GetTemporary(context, node, /*index=*/1); TfLiteTensor* temp_sum = GetTemporary(context, node, /*index=*/2); // Resize the output tensor if the output tensor is dynamic. if (IsDynamicTensor(op_context.output)) { TF_LITE_ENSURE_OK(context, ResizeTempAxis(context, &op_context, resolved_axis)); TF_LITE_ENSURE_OK(context, ResizeOutputTensor(context, &op_context)); TF_LITE_ENSURE_OK(context, ResizeTempSum(context, &op_context, temp_sum)); } if (kernel_type == kGenericOptimized) { // Use optimized ops if available. switch (op_context.input->type) { case kTfLiteInt8: { tflite::MeanParams op_params; op_params.axis_count = num_axis; ResolveAxis(GetTensorData<int>(op_context.axis), num_axis, &op_params); const TfLiteTensor* input = op_context.input; if (op_context.params->keep_dims && NumDimensions(input) == 4 && op_params.axis_count == 2 && ((op_params.axis[0] == 1 && op_params.axis[1] == 2) || (op_params.axis[0] == 2 && op_params.axis[1] == 1))) { optimized_integer_ops::Mean( op_params, GetTensorShape(input), GetTensorData<int8_t>(input), input->params.zero_point, input->params.scale, GetTensorShape(op_context.output), GetTensorData<int8_t>(op_context.output), op_context.output->params.zero_point, op_context.output->params.scale, CpuBackendContext::GetFromContext(context)); return kTfLiteOk; } } break; case kTfLiteUInt8: { tflite::MeanParams op_params; op_params.axis_count = num_axis; ResolveAxis(GetTensorData<int>(op_context.axis), num_axis, &op_params); const TfLiteTensor* input = op_context.input; if (op_context.params->keep_dims && NumDimensions(input) == 4 && op_params.axis_count == 2 && ((op_params.axis[0] == 1 && op_params.axis[1] == 2) || (op_params.axis[0] == 2 && op_params.axis[1] == 1))) { optimized_ops::Mean(op_params, GetTensorShape(input), GetTensorData<uint8_t>(input), input->params.zero_point, input->params.scale, GetTensorShape(op_context.output), GetTensorData<uint8_t>(op_context.output), op_context.output->params.zero_point, op_context.output->params.scale, CpuBackendContext::GetFromContext(context)); return kTfLiteOk; } } break; default: break; } } // From here, it uses the reference implementations. // TODO(b/139102329): Clean up the function signatures to merge the variations // and handle the specialized cases in the combined reference implementations // per each op. switch (op_context.input->type) { case kTfLiteFloat32: { tflite::MeanParams op_params; op_params.axis_count = num_axis; ResolveAxis(GetTensorData<int>(op_context.axis), num_axis, &op_params); const TfLiteTensor* input = op_context.input; // TODO(b/139102329): Handle the below special case in the combined // reference method. // Defer to specialized implementation for 4D Mean across axes 1 & 2. if (op_context.params->keep_dims && NumDimensions(input) == 4 && op_params.axis_count == 2 && ((op_params.axis[0] == 1 && op_params.axis[1] == 2) || (op_params.axis[0] == 2 && op_params.axis[1] == 1))) { reference_ops::Mean(op_params, GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(op_context.output), GetTensorData<float>(op_context.output)); } else { TF_LITE_ENSURE( context, optimized_ops::MeanGeneral( GetTensorData<float>(op_context.input), op_context.input->dims->data, op_context.input->dims->size, GetTensorData<float>(op_context.output), op_context.output->dims->data, op_context.output->dims->size, GetTensorData<int>(op_context.axis), num_axis, op_context.params->keep_dims, GetTensorData<int>(temp_index), GetTensorData<int>(resolved_axis), GetTensorData<float>(temp_sum))); } } break; case kTfLiteInt32: TF_LITE_ENSURE( context, reference_ops::Mean( GetTensorData<int>(op_context.input), op_context.input->dims->data, op_context.input->dims->size, GetTensorData<int>(op_context.output), op_context.output->dims->data, op_context.output->dims->size, GetTensorData<int>(op_context.axis), num_axis, op_context.params->keep_dims, GetTensorData<int>(temp_index), GetTensorData<int>(resolved_axis), GetTensorData<int64_t>(temp_sum))); break; case kTfLiteInt64: TF_LITE_ENSURE( context, reference_ops::Mean( GetTensorData<int64_t>(op_context.input), op_context.input->dims->data, op_context.input->dims->size, GetTensorData<int64_t>(op_context.output), op_context.output->dims->data, op_context.output->dims->size, GetTensorData<int>(op_context.axis), num_axis, op_context.params->keep_dims, GetTensorData<int>(temp_index), GetTensorData<int>(resolved_axis), GetTensorData<int64_t>(temp_sum))); break; case kTfLiteInt8: { TF_LITE_ENSURE_OK(context, EvalMeanReferenceOps<int8_t>( context, op_context, num_axis, data, temp_index, resolved_axis, temp_sum)); } break; case kTfLiteInt16: { TF_LITE_ENSURE_OK(context, EvalMeanReferenceOps<int16_t>( context, op_context, num_axis, data, temp_index, resolved_axis, temp_sum)); } break; case kTfLiteUInt8: { TF_LITE_ENSURE_OK(context, EvalMeanReferenceOps<uint8_t>( context, op_context, num_axis, data, temp_index, resolved_axis, temp_sum)); } break; default: return kTfLiteError; } return kTfLiteOk; }
1122
True
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Write
The software writes data past the end, or before the beginning, of the intended buffer.
Typically, this can result in corruption of data, a crash, or code execution. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent write operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/787.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::builtin::reduce::EvalMean
tflite::ops::builtin::reduce::EvalMean( TfLiteContext * context , TfLiteNode * node)
['context', 'node']
TfLiteStatus EvalMean(TfLiteContext* context, TfLiteNode* node) { OpContext op_context(context, node); OpData* data = reinterpret_cast<OpData*>(node->user_data); int num_axis = static_cast<int>(NumElements(op_context.axis)); TfLiteTensor* temp_index = GetTemporary(context, node, /*index=*/0); TfLiteTensor* resolved_axis = GetTemporary(context, node, /*index=*/1); TfLiteTensor* temp_sum = GetTemporary(context, node, /*index=*/2); // Resize the output tensor if the output tensor is dynamic. if (IsDynamicTensor(op_context.output)) { TF_LITE_ENSURE_OK(context, ResizeTempAxis(context, &op_context, resolved_axis)); TF_LITE_ENSURE_OK(context, ResizeOutputTensor(context, &op_context)); TF_LITE_ENSURE_OK(context, ResizeTempSum(context, &op_context, temp_sum)); } if (kernel_type == kGenericOptimized) { // Use optimized ops if available. switch (op_context.input->type) { case kTfLiteInt8: { tflite::MeanParams op_params; op_params.axis_count = num_axis; ResolveAxis(GetTensorData<int>(op_context.axis), num_axis, &op_params); const TfLiteTensor* input = op_context.input; if (op_context.params->keep_dims && NumDimensions(input) == 4 && op_params.axis_count == 2 && ((op_params.axis[0] == 1 && op_params.axis[1] == 2) || (op_params.axis[0] == 2 && op_params.axis[1] == 1))) { optimized_integer_ops::Mean( op_params, GetTensorShape(input), GetTensorData<int8_t>(input), input->params.zero_point, input->params.scale, GetTensorShape(op_context.output), GetTensorData<int8_t>(op_context.output), op_context.output->params.zero_point, op_context.output->params.scale, CpuBackendContext::GetFromContext(context)); return kTfLiteOk; } } break; case kTfLiteUInt8: { tflite::MeanParams op_params; op_params.axis_count = num_axis; ResolveAxis(GetTensorData<int>(op_context.axis), num_axis, &op_params); const TfLiteTensor* input = op_context.input; if (op_context.params->keep_dims && NumDimensions(input) == 4 && op_params.axis_count == 2 && ((op_params.axis[0] == 1 && op_params.axis[1] == 2) || (op_params.axis[0] == 2 && op_params.axis[1] == 1))) { optimized_ops::Mean(op_params, GetTensorShape(input), GetTensorData<uint8_t>(input), input->params.zero_point, input->params.scale, GetTensorShape(op_context.output), GetTensorData<uint8_t>(op_context.output), op_context.output->params.zero_point, op_context.output->params.scale, CpuBackendContext::GetFromContext(context)); return kTfLiteOk; } } break; default: break; } } // From here, it uses the reference implementations. // TODO(b/139102329): Clean up the function signatures to merge the variations // and handle the specialized cases in the combined reference implementations // per each op. switch (op_context.input->type) { case kTfLiteFloat32: { tflite::MeanParams op_params; op_params.axis_count = num_axis; ResolveAxis(GetTensorData<int>(op_context.axis), num_axis, &op_params); const TfLiteTensor* input = op_context.input; // TODO(b/139102329): Handle the below special case in the combined // reference method. // Defer to specialized implementation for 4D Mean across axes 1 & 2. if (op_context.params->keep_dims && NumDimensions(input) == 4 && op_params.axis_count == 2 && ((op_params.axis[0] == 1 && op_params.axis[1] == 2) || (op_params.axis[0] == 2 && op_params.axis[1] == 1))) { reference_ops::Mean(op_params, GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(op_context.output), GetTensorData<float>(op_context.output)); } else { TF_LITE_ENSURE( context, optimized_ops::MeanGeneral( GetTensorData<float>(op_context.input), op_context.input->dims->data, op_context.input->dims->size, GetTensorData<float>(op_context.output), op_context.output->dims->data, op_context.output->dims->size, GetTensorData<int>(op_context.axis), num_axis, op_context.params->keep_dims, GetTensorData<int>(temp_index), GetTensorData<int>(resolved_axis), GetTensorData<float>(temp_sum))); } } break; case kTfLiteInt32: TF_LITE_ENSURE( context, reference_ops::Mean( GetTensorData<int>(op_context.input), op_context.input->dims->data, op_context.input->dims->size, GetTensorData<int>(op_context.output), op_context.output->dims->data, op_context.output->dims->size, GetTensorData<int>(op_context.axis), num_axis, op_context.params->keep_dims, GetTensorData<int>(temp_index), GetTensorData<int>(resolved_axis), GetTensorData<int64_t>(temp_sum))); break; case kTfLiteInt64: TF_LITE_ENSURE( context, reference_ops::Mean( GetTensorData<int64_t>(op_context.input), op_context.input->dims->data, op_context.input->dims->size, GetTensorData<int64_t>(op_context.output), op_context.output->dims->data, op_context.output->dims->size, GetTensorData<int>(op_context.axis), num_axis, op_context.params->keep_dims, GetTensorData<int>(temp_index), GetTensorData<int>(resolved_axis), GetTensorData<int64_t>(temp_sum))); break; case kTfLiteInt8: { TF_LITE_ENSURE_OK(context, EvalMeanReferenceOps<int8_t>( context, op_context, num_axis, data, temp_index, resolved_axis, temp_sum)); } break; case kTfLiteInt16: { TF_LITE_ENSURE_OK(context, EvalMeanReferenceOps<int16_t>( context, op_context, num_axis, data, temp_index, resolved_axis, temp_sum)); } break; case kTfLiteUInt8: { TF_LITE_ENSURE_OK(context, EvalMeanReferenceOps<uint8_t>( context, op_context, num_axis, data, temp_index, resolved_axis, temp_sum)); } break; default: return kTfLiteError; } return kTfLiteOk; }
1122
True
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Read
The software reads data past the end, or before the beginning, of the intended buffer.
Typically, this can allow attackers to read sensitive information from other memory locations or cause a crash. A crash can occur when the code reads a variable amount of data and assumes that a sentinel exists to stop the read operation, such as a NUL in a string. The expected sentinel might not be located in the out-of-bounds memory, causing excessive data to be read, leading to a segmentation fault or a buffer overflow. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent read operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/125.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::builtin::reduce::EvalSum
tflite::ops::builtin::reduce::EvalSum( TfLiteContext * context , TfLiteNode * node)
['context', 'node']
TfLiteStatus EvalSum(TfLiteContext* context, TfLiteNode* node) { OpContext op_context(context, node); ruy::profiler::ScopeLabel label("Sum"); const auto& input = op_context.input; const auto& output = op_context.output; const bool same_scale = (input->params.scale == output->params.scale && input->params.zero_point == output->params.zero_point); const bool eight_bit_quantized = input->type == kTfLiteUInt8 || input->type == kTfLiteInt8; const bool need_rescale = (eight_bit_quantized && !same_scale); if (need_rescale) { // Rescaling 8bit reduce sum. int num_axis = static_cast<int>(NumElements(op_context.axis)); TfLiteTensor* temp_index = GetTemporary(context, node, /*index=*/0); TfLiteTensor* resolved_axis = GetTemporary(context, node, /*index=*/1); TfLiteTensor* temp_sum = GetTemporary(context, node, /*index=*/2); // Resize the output tensor if the output tensor is dynamic. if (IsDynamicTensor(op_context.output)) { TF_LITE_ENSURE_OK(context, ResizeTempAxis(context, &op_context, resolved_axis)); TF_LITE_ENSURE_OK(context, ResizeOutputTensor(context, &op_context)); TF_LITE_ENSURE_OK(context, ResizeTempSum(context, &op_context, temp_sum)); } if (input->type == kTfLiteUInt8) { TF_LITE_ENSURE( context, reference_ops::QuantizedMeanOrSum<>( GetTensorData<uint8_t>(op_context.input), op_context.input->params.zero_point, op_context.input->params.scale, op_context.input->dims->data, op_context.input->dims->size, GetTensorData<uint8_t>(op_context.output), op_context.output->params.zero_point, op_context.output->params.scale, op_context.output->dims->data, op_context.output->dims->size, GetTensorData<int>(op_context.axis), num_axis, op_context.params->keep_dims, GetTensorData<int>(temp_index), GetTensorData<int>(resolved_axis), GetTensorData<int32>(temp_sum), /*compute_sum=*/true)); } if (input->type == kTfLiteInt8) { TF_LITE_ENSURE( context, reference_ops::QuantizedMeanOrSum<>( GetTensorData<int8_t>(op_context.input), op_context.input->params.zero_point, op_context.input->params.scale, op_context.input->dims->data, op_context.input->dims->size, GetTensorData<int8_t>(op_context.output), op_context.output->params.zero_point, op_context.output->params.scale, op_context.output->dims->data, op_context.output->dims->size, GetTensorData<int>(op_context.axis), num_axis, op_context.params->keep_dims, GetTensorData<int>(temp_index), GetTensorData<int>(resolved_axis), GetTensorData<int32>(temp_sum), /*compute_sum=*/true)); } } else { return EvalGeneric<kReference, kSum>(context, node); } return kTfLiteOk; }
539
True
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Write
The software writes data past the end, or before the beginning, of the intended buffer.
Typically, this can result in corruption of data, a crash, or code execution. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent write operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/787.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::builtin::reduce::EvalSum
tflite::ops::builtin::reduce::EvalSum( TfLiteContext * context , TfLiteNode * node)
['context', 'node']
TfLiteStatus EvalSum(TfLiteContext* context, TfLiteNode* node) { OpContext op_context(context, node); ruy::profiler::ScopeLabel label("Sum"); const auto& input = op_context.input; const auto& output = op_context.output; const bool same_scale = (input->params.scale == output->params.scale && input->params.zero_point == output->params.zero_point); const bool eight_bit_quantized = input->type == kTfLiteUInt8 || input->type == kTfLiteInt8; const bool need_rescale = (eight_bit_quantized && !same_scale); if (need_rescale) { // Rescaling 8bit reduce sum. int num_axis = static_cast<int>(NumElements(op_context.axis)); TfLiteTensor* temp_index = GetTemporary(context, node, /*index=*/0); TfLiteTensor* resolved_axis = GetTemporary(context, node, /*index=*/1); TfLiteTensor* temp_sum = GetTemporary(context, node, /*index=*/2); // Resize the output tensor if the output tensor is dynamic. if (IsDynamicTensor(op_context.output)) { TF_LITE_ENSURE_OK(context, ResizeTempAxis(context, &op_context, resolved_axis)); TF_LITE_ENSURE_OK(context, ResizeOutputTensor(context, &op_context)); TF_LITE_ENSURE_OK(context, ResizeTempSum(context, &op_context, temp_sum)); } if (input->type == kTfLiteUInt8) { TF_LITE_ENSURE( context, reference_ops::QuantizedMeanOrSum<>( GetTensorData<uint8_t>(op_context.input), op_context.input->params.zero_point, op_context.input->params.scale, op_context.input->dims->data, op_context.input->dims->size, GetTensorData<uint8_t>(op_context.output), op_context.output->params.zero_point, op_context.output->params.scale, op_context.output->dims->data, op_context.output->dims->size, GetTensorData<int>(op_context.axis), num_axis, op_context.params->keep_dims, GetTensorData<int>(temp_index), GetTensorData<int>(resolved_axis), GetTensorData<int32>(temp_sum), /*compute_sum=*/true)); } if (input->type == kTfLiteInt8) { TF_LITE_ENSURE( context, reference_ops::QuantizedMeanOrSum<>( GetTensorData<int8_t>(op_context.input), op_context.input->params.zero_point, op_context.input->params.scale, op_context.input->dims->data, op_context.input->dims->size, GetTensorData<int8_t>(op_context.output), op_context.output->params.zero_point, op_context.output->params.scale, op_context.output->dims->data, op_context.output->dims->size, GetTensorData<int>(op_context.axis), num_axis, op_context.params->keep_dims, GetTensorData<int>(temp_index), GetTensorData<int>(resolved_axis), GetTensorData<int32>(temp_sum), /*compute_sum=*/true)); } } else { return EvalGeneric<kReference, kSum>(context, node); } return kTfLiteOk; }
539
True
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Read
The software reads data past the end, or before the beginning, of the intended buffer.
Typically, this can allow attackers to read sensitive information from other memory locations or cause a crash. A crash can occur when the code reads a variable amount of data and assumes that a sentinel exists to stop the read operation, such as a NUL in a string. The expected sentinel might not be located in the out-of-bounds memory, causing excessive data to be read, leading to a segmentation fault or a buffer overflow. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent read operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/125.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::builtin::reduce::InitializeTemporaries
tflite::ops::builtin::reduce::InitializeTemporaries( TfLiteContext * context , TfLiteNode * node , OpContext * op_context)
['context', 'node', 'op_context']
TfLiteStatus InitializeTemporaries(TfLiteContext* context, TfLiteNode* node, OpContext* op_context) { // Creates a temp index to iterate through input data. OpData* op_data = reinterpret_cast<OpData*>(node->user_data); TfLiteIntArrayFree(node->temporaries); node->temporaries = TfLiteIntArrayCreate(3); node->temporaries->data[0] = op_data->scratch_tensor_index; TfLiteTensor* scratch_tensor = GetTemporary(context, node, /*index=*/0); scratch_tensor->type = kTfLiteInt32; scratch_tensor->allocation_type = kTfLiteArenaRw; TfLiteIntArray* index_size = TfLiteIntArrayCreate(1); index_size->data[0] = NumDimensions(op_context->input); TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, scratch_tensor, index_size)); // Creates a temp tensor to store resolved axis given input data. node->temporaries->data[1] = op_data->scratch_tensor_index + 1; TfLiteTensor* resolved_axis = GetTemporary(context, node, /*index=*/1); resolved_axis->type = kTfLiteInt32; // Creates a temp tensor to store temp sums when calculating mean. node->temporaries->data[2] = op_data->scratch_tensor_index + 2; TfLiteTensor* temp_sum = GetTemporary(context, node, /*index=*/2); switch (op_context->input->type) { case kTfLiteFloat32: temp_sum->type = kTfLiteFloat32; break; case kTfLiteInt32: temp_sum->type = kTfLiteInt64; break; case kTfLiteInt64: temp_sum->type = kTfLiteInt64; break; case kTfLiteUInt8: case kTfLiteInt8: case kTfLiteInt16: temp_sum->type = kTfLiteInt32; break; case kTfLiteBool: temp_sum->type = kTfLiteBool; break; default: return kTfLiteError; } return kTfLiteOk; }
265
True
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Write
The software writes data past the end, or before the beginning, of the intended buffer.
Typically, this can result in corruption of data, a crash, or code execution. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent write operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/787.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::builtin::reduce::InitializeTemporaries
tflite::ops::builtin::reduce::InitializeTemporaries( TfLiteContext * context , TfLiteNode * node , OpContext * op_context)
['context', 'node', 'op_context']
TfLiteStatus InitializeTemporaries(TfLiteContext* context, TfLiteNode* node, OpContext* op_context) { // Creates a temp index to iterate through input data. OpData* op_data = reinterpret_cast<OpData*>(node->user_data); TfLiteIntArrayFree(node->temporaries); node->temporaries = TfLiteIntArrayCreate(3); node->temporaries->data[0] = op_data->scratch_tensor_index; TfLiteTensor* scratch_tensor = GetTemporary(context, node, /*index=*/0); scratch_tensor->type = kTfLiteInt32; scratch_tensor->allocation_type = kTfLiteArenaRw; TfLiteIntArray* index_size = TfLiteIntArrayCreate(1); index_size->data[0] = NumDimensions(op_context->input); TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, scratch_tensor, index_size)); // Creates a temp tensor to store resolved axis given input data. node->temporaries->data[1] = op_data->scratch_tensor_index + 1; TfLiteTensor* resolved_axis = GetTemporary(context, node, /*index=*/1); resolved_axis->type = kTfLiteInt32; // Creates a temp tensor to store temp sums when calculating mean. node->temporaries->data[2] = op_data->scratch_tensor_index + 2; TfLiteTensor* temp_sum = GetTemporary(context, node, /*index=*/2); switch (op_context->input->type) { case kTfLiteFloat32: temp_sum->type = kTfLiteFloat32; break; case kTfLiteInt32: temp_sum->type = kTfLiteInt64; break; case kTfLiteInt64: temp_sum->type = kTfLiteInt64; break; case kTfLiteUInt8: case kTfLiteInt8: case kTfLiteInt16: temp_sum->type = kTfLiteInt32; break; case kTfLiteBool: temp_sum->type = kTfLiteBool; break; default: return kTfLiteError; } return kTfLiteOk; }
265
True
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Read
The software reads data past the end, or before the beginning, of the intended buffer.
Typically, this can allow attackers to read sensitive information from other memory locations or cause a crash. A crash can occur when the code reads a variable amount of data and assumes that a sentinel exists to stop the read operation, such as a NUL in a string. The expected sentinel might not be located in the out-of-bounds memory, causing excessive data to be read, leading to a segmentation fault or a buffer overflow. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent read operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/125.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::builtin::reduce::PrepareAny
tflite::ops::builtin::reduce::PrepareAny( TfLiteContext * context , TfLiteNode * node)
['context', 'node']
TfLiteStatus PrepareAny(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 2); const TfLiteTensor* input = GetInput(context, node, 0); TF_LITE_ENSURE_TYPES_EQ(context, input->type, kTfLiteBool); return PrepareSimple(context, node); }
57
True
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Write
The software writes data past the end, or before the beginning, of the intended buffer.
Typically, this can result in corruption of data, a crash, or code execution. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent write operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/787.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::builtin::reduce::PrepareAny
tflite::ops::builtin::reduce::PrepareAny( TfLiteContext * context , TfLiteNode * node)
['context', 'node']
TfLiteStatus PrepareAny(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 2); const TfLiteTensor* input = GetInput(context, node, 0); TF_LITE_ENSURE_TYPES_EQ(context, input->type, kTfLiteBool); return PrepareSimple(context, node); }
57
True
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Read
The software reads data past the end, or before the beginning, of the intended buffer.
Typically, this can allow attackers to read sensitive information from other memory locations or cause a crash. A crash can occur when the code reads a variable amount of data and assumes that a sentinel exists to stop the read operation, such as a NUL in a string. The expected sentinel might not be located in the out-of-bounds memory, causing excessive data to be read, leading to a segmentation fault or a buffer overflow. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent read operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/125.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::builtin::reduce::PrepareMeanOrSum
tflite::ops::builtin::reduce::PrepareMeanOrSum( TfLiteContext * context , TfLiteNode * node)
['context', 'node']
TfLiteStatus PrepareMeanOrSum(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_OK(context, PrepareSimple(context, node)); OpData* data = reinterpret_cast<OpData*>(node->user_data); // reduce_mean requires a buffer to store intermediate sum result. OpContext op_context(context, node); if (op_context.input->type == kTfLiteInt8 || op_context.input->type == kTfLiteUInt8 || op_context.input->type == kTfLiteInt16) { const double real_multiplier = static_cast<double>(op_context.input->params.scale) / static_cast<double>(op_context.output->params.scale); int exponent; QuantizeMultiplier(real_multiplier, &data->multiplier, &exponent); data->shift = exponent; } TfLiteTensor* temp_sum = GetTemporary(context, node, /*index=*/2); if (!IsConstantTensor(op_context.axis)) { SetTensorToDynamic(temp_sum); return kTfLiteOk; } temp_sum->allocation_type = kTfLiteArenaRw; return ResizeTempSum(context, &op_context, temp_sum); }
179
True
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Write
The software writes data past the end, or before the beginning, of the intended buffer.
Typically, this can result in corruption of data, a crash, or code execution. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent write operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/787.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::builtin::reduce::PrepareMeanOrSum
tflite::ops::builtin::reduce::PrepareMeanOrSum( TfLiteContext * context , TfLiteNode * node)
['context', 'node']
TfLiteStatus PrepareMeanOrSum(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_OK(context, PrepareSimple(context, node)); OpData* data = reinterpret_cast<OpData*>(node->user_data); // reduce_mean requires a buffer to store intermediate sum result. OpContext op_context(context, node); if (op_context.input->type == kTfLiteInt8 || op_context.input->type == kTfLiteUInt8 || op_context.input->type == kTfLiteInt16) { const double real_multiplier = static_cast<double>(op_context.input->params.scale) / static_cast<double>(op_context.output->params.scale); int exponent; QuantizeMultiplier(real_multiplier, &data->multiplier, &exponent); data->shift = exponent; } TfLiteTensor* temp_sum = GetTemporary(context, node, /*index=*/2); if (!IsConstantTensor(op_context.axis)) { SetTensorToDynamic(temp_sum); return kTfLiteOk; } temp_sum->allocation_type = kTfLiteArenaRw; return ResizeTempSum(context, &op_context, temp_sum); }
179
True
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Read
The software reads data past the end, or before the beginning, of the intended buffer.
Typically, this can allow attackers to read sensitive information from other memory locations or cause a crash. A crash can occur when the code reads a variable amount of data and assumes that a sentinel exists to stop the read operation, such as a NUL in a string. The expected sentinel might not be located in the out-of-bounds memory, causing excessive data to be read, leading to a segmentation fault or a buffer overflow. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent read operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/125.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::builtin::reduce::PrepareSimple
tflite::ops::builtin::reduce::PrepareSimple( TfLiteContext * context , TfLiteNode * node)
['context', 'node']
TfLiteStatus PrepareSimple(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 2); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); OpContext op_context(context, node); TF_LITE_ENSURE_TYPES_EQ(context, op_context.axis->type, kTfLiteInt32); TF_LITE_ENSURE_OK(context, InitializeTemporaries(context, node, &op_context)); TfLiteTensor* resolved_axis = GetTemporary(context, node, /*index=*/1); // Leaves work to Eval if axis is not constant; else resizes output. if (!IsConstantTensor(op_context.axis)) { SetTensorToDynamic(op_context.output); SetTensorToDynamic(resolved_axis); return kTfLiteOk; } resolved_axis->allocation_type = kTfLiteArenaRw; TF_LITE_ENSURE_OK(context, ResizeTempAxis(context, &op_context, resolved_axis)); TF_LITE_ENSURE_OK(context, ResizeOutputTensor(context, &op_context)); return kTfLiteOk; }
149
True
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Write
The software writes data past the end, or before the beginning, of the intended buffer.
Typically, this can result in corruption of data, a crash, or code execution. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent write operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/787.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::builtin::reduce::PrepareSimple
tflite::ops::builtin::reduce::PrepareSimple( TfLiteContext * context , TfLiteNode * node)
['context', 'node']
TfLiteStatus PrepareSimple(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 2); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); OpContext op_context(context, node); TF_LITE_ENSURE_TYPES_EQ(context, op_context.axis->type, kTfLiteInt32); TF_LITE_ENSURE_OK(context, InitializeTemporaries(context, node, &op_context)); TfLiteTensor* resolved_axis = GetTemporary(context, node, /*index=*/1); // Leaves work to Eval if axis is not constant; else resizes output. if (!IsConstantTensor(op_context.axis)) { SetTensorToDynamic(op_context.output); SetTensorToDynamic(resolved_axis); return kTfLiteOk; } resolved_axis->allocation_type = kTfLiteArenaRw; TF_LITE_ENSURE_OK(context, ResizeTempAxis(context, &op_context, resolved_axis)); TF_LITE_ENSURE_OK(context, ResizeOutputTensor(context, &op_context)); return kTfLiteOk; }
149
True
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Read
The software reads data past the end, or before the beginning, of the intended buffer.
Typically, this can allow attackers to read sensitive information from other memory locations or cause a crash. A crash can occur when the code reads a variable amount of data and assumes that a sentinel exists to stop the read operation, such as a NUL in a string. The expected sentinel might not be located in the out-of-bounds memory, causing excessive data to be read, leading to a segmentation fault or a buffer overflow. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent read operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/125.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::builtin::reshape::Eval
tflite::ops::builtin::reshape::Eval( TfLiteContext * context , TfLiteNode * node)
['context', 'node']
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input = GetInput(context, node, kInputTensor); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); // There are two ways in which the 'output' can be made dynamic: it could be // a string tensor, or its shape cannot be calculated during Prepare(). In // either case, we now have all the information to calculate its shape. if (IsDynamicTensor(output)) { TF_LITE_ENSURE_OK(context, ResizeOutput(context, node)); } // Note that string tensors are always "dynamic" in the sense that their size // is not known until we have all the content. This applies even when their // shape is known ahead of time. As a result, a string tensor is never given // any memory by ResizeOutput(), and we need to do it manually here. Since // reshape doesn't change the data, the output tensor needs exactly as many // bytes as the input tensor. if (output->type == kTfLiteString) { auto bytes_required = input->bytes; TfLiteTensorRealloc(bytes_required, output); output->bytes = bytes_required; } memcpy(output->data.raw, input->data.raw, input->bytes); return kTfLiteOk; }
112
True
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Write
The software writes data past the end, or before the beginning, of the intended buffer.
Typically, this can result in corruption of data, a crash, or code execution. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent write operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/787.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::builtin::reshape::Eval
tflite::ops::builtin::reshape::Eval( TfLiteContext * context , TfLiteNode * node)
['context', 'node']
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input = GetInput(context, node, kInputTensor); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); // There are two ways in which the 'output' can be made dynamic: it could be // a string tensor, or its shape cannot be calculated during Prepare(). In // either case, we now have all the information to calculate its shape. if (IsDynamicTensor(output)) { TF_LITE_ENSURE_OK(context, ResizeOutput(context, node)); } // Note that string tensors are always "dynamic" in the sense that their size // is not known until we have all the content. This applies even when their // shape is known ahead of time. As a result, a string tensor is never given // any memory by ResizeOutput(), and we need to do it manually here. Since // reshape doesn't change the data, the output tensor needs exactly as many // bytes as the input tensor. if (output->type == kTfLiteString) { auto bytes_required = input->bytes; TfLiteTensorRealloc(bytes_required, output); output->bytes = bytes_required; } memcpy(output->data.raw, input->data.raw, input->bytes); return kTfLiteOk; }
112
True
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Read
The software reads data past the end, or before the beginning, of the intended buffer.
Typically, this can allow attackers to read sensitive information from other memory locations or cause a crash. A crash can occur when the code reads a variable amount of data and assumes that a sentinel exists to stop the read operation, such as a NUL in a string. The expected sentinel might not be located in the out-of-bounds memory, causing excessive data to be read, leading to a segmentation fault or a buffer overflow. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent read operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/125.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::builtin::reshape::Prepare
tflite::ops::builtin::reshape::Prepare( TfLiteContext * context , TfLiteNode * node)
['context', 'node']
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE(context, NumInputs(node) == 1 || NumInputs(node) == 2); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); // Always postpone sizing string tensors, even if we could in principle // calculate their shapes now. String tensors don't benefit from having their // shapes precalculated because the actual memory can only be allocated after // we know all the content. TfLiteTensor* output = GetOutput(context, node, kOutputTensor); if (output->type != kTfLiteString) { if (NumInputs(node) == 1 || IsConstantTensor(GetInput(context, node, kShapeTensor))) { TF_LITE_ENSURE_OK(context, ResizeOutput(context, node)); } else { SetTensorToDynamic(output); } } return kTfLiteOk; }
112
True
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Write
The software writes data past the end, or before the beginning, of the intended buffer.
Typically, this can result in corruption of data, a crash, or code execution. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent write operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/787.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::builtin::reshape::Prepare
tflite::ops::builtin::reshape::Prepare( TfLiteContext * context , TfLiteNode * node)
['context', 'node']
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE(context, NumInputs(node) == 1 || NumInputs(node) == 2); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); // Always postpone sizing string tensors, even if we could in principle // calculate their shapes now. String tensors don't benefit from having their // shapes precalculated because the actual memory can only be allocated after // we know all the content. TfLiteTensor* output = GetOutput(context, node, kOutputTensor); if (output->type != kTfLiteString) { if (NumInputs(node) == 1 || IsConstantTensor(GetInput(context, node, kShapeTensor))) { TF_LITE_ENSURE_OK(context, ResizeOutput(context, node)); } else { SetTensorToDynamic(output); } } return kTfLiteOk; }
112
True
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Read
The software reads data past the end, or before the beginning, of the intended buffer.
Typically, this can allow attackers to read sensitive information from other memory locations or cause a crash. A crash can occur when the code reads a variable amount of data and assumes that a sentinel exists to stop the read operation, such as a NUL in a string. The expected sentinel might not be located in the out-of-bounds memory, causing excessive data to be read, leading to a segmentation fault or a buffer overflow. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent read operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/125.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::builtin::reshape::ResizeOutput
tflite::ops::builtin::reshape::ResizeOutput( TfLiteContext * context , TfLiteNode * node)
['context', 'node']
TfLiteStatus ResizeOutput(TfLiteContext* context, TfLiteNode* node) { TfLiteIntArray* output_shape = GetOutputShape(context, node); std::unique_ptr<TfLiteIntArray, void (*)(TfLiteIntArray*)> scoped_output_shape(output_shape, TfLiteIntArrayFree); const TfLiteTensor* input = GetInput(context, node, kInputTensor); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); // Tensorflow's Reshape allows one of the shape components to have the // special -1 value, meaning it will be calculated automatically based on the // input. Here we calculate what that dimension should be so that the number // of output elements in the same as the number of input elements. int num_input_elements = NumElements(input); int num_output_elements = 1; int stretch_dim = -1; for (int i = 0; i < output_shape->size; ++i) { int value = output_shape->data[i]; if (value == -1) { TF_LITE_ENSURE_EQ(context, stretch_dim, -1); stretch_dim = i; } else { num_output_elements *= value; } } if (stretch_dim != -1) { output_shape->data[stretch_dim] = num_input_elements / num_output_elements; num_output_elements *= output_shape->data[stretch_dim]; } TF_LITE_ENSURE_EQ(context, num_input_elements, num_output_elements); return context->ResizeTensor(context, output, scoped_output_shape.release()); }
205
True
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Write
The software writes data past the end, or before the beginning, of the intended buffer.
Typically, this can result in corruption of data, a crash, or code execution. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent write operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/787.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::builtin::reshape::ResizeOutput
tflite::ops::builtin::reshape::ResizeOutput( TfLiteContext * context , TfLiteNode * node)
['context', 'node']
TfLiteStatus ResizeOutput(TfLiteContext* context, TfLiteNode* node) { TfLiteIntArray* output_shape = GetOutputShape(context, node); std::unique_ptr<TfLiteIntArray, void (*)(TfLiteIntArray*)> scoped_output_shape(output_shape, TfLiteIntArrayFree); const TfLiteTensor* input = GetInput(context, node, kInputTensor); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); // Tensorflow's Reshape allows one of the shape components to have the // special -1 value, meaning it will be calculated automatically based on the // input. Here we calculate what that dimension should be so that the number // of output elements in the same as the number of input elements. int num_input_elements = NumElements(input); int num_output_elements = 1; int stretch_dim = -1; for (int i = 0; i < output_shape->size; ++i) { int value = output_shape->data[i]; if (value == -1) { TF_LITE_ENSURE_EQ(context, stretch_dim, -1); stretch_dim = i; } else { num_output_elements *= value; } } if (stretch_dim != -1) { output_shape->data[stretch_dim] = num_input_elements / num_output_elements; num_output_elements *= output_shape->data[stretch_dim]; } TF_LITE_ENSURE_EQ(context, num_input_elements, num_output_elements); return context->ResizeTensor(context, output, scoped_output_shape.release()); }
205
True
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Read
The software reads data past the end, or before the beginning, of the intended buffer.
Typically, this can allow attackers to read sensitive information from other memory locations or cause a crash. A crash can occur when the code reads a variable amount of data and assumes that a sentinel exists to stop the read operation, such as a NUL in a string. The expected sentinel might not be located in the out-of-bounds memory, causing excessive data to be read, leading to a segmentation fault or a buffer overflow. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent read operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/125.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::builtin::reshape::ShapeIsVector
tflite::ops::builtin::reshape::ShapeIsVector( TfLiteContext * context , TfLiteNode * node)
['context', 'node']
inline bool ShapeIsVector(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* shape = GetInput(context, node, kShapeTensor); return (shape->dims->size == 1 && shape->type == kTfLiteInt32); }
43
True
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Write
The software writes data past the end, or before the beginning, of the intended buffer.
Typically, this can result in corruption of data, a crash, or code execution. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent write operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/787.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::builtin::reshape::ShapeIsVector
tflite::ops::builtin::reshape::ShapeIsVector( TfLiteContext * context , TfLiteNode * node)
['context', 'node']
inline bool ShapeIsVector(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* shape = GetInput(context, node, kShapeTensor); return (shape->dims->size == 1 && shape->type == kTfLiteInt32); }
43
True
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Read
The software reads data past the end, or before the beginning, of the intended buffer.
Typically, this can allow attackers to read sensitive information from other memory locations or cause a crash. A crash can occur when the code reads a variable amount of data and assumes that a sentinel exists to stop the read operation, such as a NUL in a string. The expected sentinel might not be located in the out-of-bounds memory, causing excessive data to be read, leading to a segmentation fault or a buffer overflow. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent read operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/125.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::builtin::resize_bilinear::Eval
tflite::ops::builtin::resize_bilinear::Eval( TfLiteContext * context , TfLiteNode * node)
['context', 'node']
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { auto* params = reinterpret_cast<TfLiteResizeBilinearParams*>(node->builtin_data); const TfLiteTensor* input = GetInput(context, node, kInputTensor); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); const TfLiteTensor* size = GetInput(context, node, kSizeTensor); if (IsDynamicTensor(output)) { TF_LITE_ENSURE_OK(context, ResizeOutputTensor(context, input, size, output)); } if (output->type == kTfLiteFloat32) { #define TF_LITE_RESIZE_BILINEAR(type, datatype) \ tflite::ResizeBilinearParams op_params; \ op_params.align_corners = params->align_corners; \ op_params.half_pixel_centers = params->half_pixel_centers; \ type::ResizeBilinear(op_params, GetTensorShape(input), \ GetTensorData<datatype>(input), GetTensorShape(size), \ GetTensorData<int32>(size), GetTensorShape(output), \ GetTensorData<datatype>(output)) if (kernel_type == kReference) { TF_LITE_RESIZE_BILINEAR(reference_ops, float); } if (kernel_type == kGenericOptimized || kernel_type == kNeonOptimized) { TF_LITE_RESIZE_BILINEAR(optimized_ops, float); } } else if (output->type == kTfLiteUInt8) { if (kernel_type == kReference) { TF_LITE_RESIZE_BILINEAR(reference_ops, uint8_t); } if (kernel_type == kGenericOptimized || kernel_type == kNeonOptimized) { TF_LITE_RESIZE_BILINEAR(optimized_ops, uint8_t); } } else if (output->type == kTfLiteInt8) { TF_LITE_RESIZE_BILINEAR(reference_ops, int8_t); #undef TF_LITE_RESIZE_BILINEAR } else { context->ReportError(context, "Output type is %d, requires float.", output->type); return kTfLiteError; } return kTfLiteOk; }
222
True
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Write
The software writes data past the end, or before the beginning, of the intended buffer.
Typically, this can result in corruption of data, a crash, or code execution. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent write operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/787.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::builtin::resize_bilinear::Eval
tflite::ops::builtin::resize_bilinear::Eval( TfLiteContext * context , TfLiteNode * node)
['context', 'node']
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { auto* params = reinterpret_cast<TfLiteResizeBilinearParams*>(node->builtin_data); const TfLiteTensor* input = GetInput(context, node, kInputTensor); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); const TfLiteTensor* size = GetInput(context, node, kSizeTensor); if (IsDynamicTensor(output)) { TF_LITE_ENSURE_OK(context, ResizeOutputTensor(context, input, size, output)); } if (output->type == kTfLiteFloat32) { #define TF_LITE_RESIZE_BILINEAR(type, datatype) \ tflite::ResizeBilinearParams op_params; \ op_params.align_corners = params->align_corners; \ op_params.half_pixel_centers = params->half_pixel_centers; \ type::ResizeBilinear(op_params, GetTensorShape(input), \ GetTensorData<datatype>(input), GetTensorShape(size), \ GetTensorData<int32>(size), GetTensorShape(output), \ GetTensorData<datatype>(output)) if (kernel_type == kReference) { TF_LITE_RESIZE_BILINEAR(reference_ops, float); } if (kernel_type == kGenericOptimized || kernel_type == kNeonOptimized) { TF_LITE_RESIZE_BILINEAR(optimized_ops, float); } } else if (output->type == kTfLiteUInt8) { if (kernel_type == kReference) { TF_LITE_RESIZE_BILINEAR(reference_ops, uint8_t); } if (kernel_type == kGenericOptimized || kernel_type == kNeonOptimized) { TF_LITE_RESIZE_BILINEAR(optimized_ops, uint8_t); } } else if (output->type == kTfLiteInt8) { TF_LITE_RESIZE_BILINEAR(reference_ops, int8_t); #undef TF_LITE_RESIZE_BILINEAR } else { context->ReportError(context, "Output type is %d, requires float.", output->type); return kTfLiteError; } return kTfLiteOk; }
222
True
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Read
The software reads data past the end, or before the beginning, of the intended buffer.
Typically, this can allow attackers to read sensitive information from other memory locations or cause a crash. A crash can occur when the code reads a variable amount of data and assumes that a sentinel exists to stop the read operation, such as a NUL in a string. The expected sentinel might not be located in the out-of-bounds memory, causing excessive data to be read, leading to a segmentation fault or a buffer overflow. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent read operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/125.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::builtin::resize_bilinear::Prepare
tflite::ops::builtin::resize_bilinear::Prepare( TfLiteContext * context , TfLiteNode * node)
['context', 'node']
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 2); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input = GetInput(context, node, kInputTensor); const TfLiteTensor* size = GetInput(context, node, kSizeTensor); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); // TODO(ahentz): Our current implementations rely on the inputs being 4D. TF_LITE_ENSURE_EQ(context, NumDimensions(input), 4); TF_LITE_ENSURE_EQ(context, NumDimensions(size), 1); TF_LITE_ENSURE_EQ(context, size->type, kTfLiteInt32); // ResizeBilinear creates a float tensor even when the input is made of // integers. output->type = input->type; if (!IsConstantTensor(size)) { SetTensorToDynamic(output); return kTfLiteOk; } // Ensure params are valid. auto* params = reinterpret_cast<TfLiteResizeBilinearParams*>(node->builtin_data); if (params->half_pixel_centers && params->align_corners) { context->ReportError( context, "If half_pixel_centers is True, align_corners must be False."); return kTfLiteError; } return ResizeOutputTensor(context, input, size, output); }
189
True
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Write
The software writes data past the end, or before the beginning, of the intended buffer.
Typically, this can result in corruption of data, a crash, or code execution. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent write operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/787.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::builtin::resize_bilinear::Prepare
tflite::ops::builtin::resize_bilinear::Prepare( TfLiteContext * context , TfLiteNode * node)
['context', 'node']
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 2); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input = GetInput(context, node, kInputTensor); const TfLiteTensor* size = GetInput(context, node, kSizeTensor); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); // TODO(ahentz): Our current implementations rely on the inputs being 4D. TF_LITE_ENSURE_EQ(context, NumDimensions(input), 4); TF_LITE_ENSURE_EQ(context, NumDimensions(size), 1); TF_LITE_ENSURE_EQ(context, size->type, kTfLiteInt32); // ResizeBilinear creates a float tensor even when the input is made of // integers. output->type = input->type; if (!IsConstantTensor(size)) { SetTensorToDynamic(output); return kTfLiteOk; } // Ensure params are valid. auto* params = reinterpret_cast<TfLiteResizeBilinearParams*>(node->builtin_data); if (params->half_pixel_centers && params->align_corners) { context->ReportError( context, "If half_pixel_centers is True, align_corners must be False."); return kTfLiteError; } return ResizeOutputTensor(context, input, size, output); }
189
True
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Read
The software reads data past the end, or before the beginning, of the intended buffer.
Typically, this can allow attackers to read sensitive information from other memory locations or cause a crash. A crash can occur when the code reads a variable amount of data and assumes that a sentinel exists to stop the read operation, such as a NUL in a string. The expected sentinel might not be located in the out-of-bounds memory, causing excessive data to be read, leading to a segmentation fault or a buffer overflow. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent read operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/125.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::builtin::resize_nearest_neighbor::Eval
tflite::ops::builtin::resize_nearest_neighbor::Eval( TfLiteContext * context , TfLiteNode * node)
['context', 'node']
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { auto* params = reinterpret_cast<TfLiteResizeNearestNeighborParams*>(node->builtin_data); const TfLiteTensor* input = GetInput(context, node, kInputTensor); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); const TfLiteTensor* size = GetInput(context, node, kSizeTensor); if (IsDynamicTensor(output)) { TF_LITE_ENSURE_OK(context, ResizeOutputTensor(context, input, size, output)); } tflite::ResizeNearestNeighborParams op_params; op_params.align_corners = params->align_corners; op_params.half_pixel_centers = params->half_pixel_centers; if (output->type == kTfLiteFloat32) { reference_ops::ResizeNearestNeighbor( op_params, GetTensorShape(input), GetTensorData<int32>(input), GetTensorShape(size), GetTensorData<int32>(size), GetTensorShape(output), GetTensorData<int32>(output)); } else if (output->type == kTfLiteUInt8) { if (kernel_type == kReference) { reference_ops::ResizeNearestNeighbor( op_params, GetTensorShape(input), GetTensorData<uint8_t>(input), GetTensorShape(size), GetTensorData<int32>(size), GetTensorShape(output), GetTensorData<uint8_t>(output)); } if (kernel_type == kGenericOptimized || kernel_type == kNeonOptimized) { optimized_ops::ResizeNearestNeighbor( op_params, GetTensorShape(input), GetTensorData<uint8_t>(input), GetTensorShape(size), GetTensorData<int32>(size), GetTensorShape(output), GetTensorData<uint8_t>(output)); } } else if (output->type == kTfLiteInt8) { reference_ops::ResizeNearestNeighbor( op_params, GetTensorShape(input), GetTensorData<int8_t>(input), GetTensorShape(size), GetTensorData<int32>(size), GetTensorShape(output), GetTensorData<int8_t>(output)); } else if (output->type == kTfLiteInt16) { reference_ops::ResizeNearestNeighbor( op_params, GetTensorShape(input), GetTensorData<int16_t>(input), GetTensorShape(size), GetTensorData<int32>(size), GetTensorShape(output), GetTensorData<int16_t>(output)); } else { TF_LITE_KERNEL_LOG( context, "Output type is %s, requires float, uint8, int8 or int16.", TfLiteTypeGetName(output->type)); return kTfLiteError; } return kTfLiteOk; }
430
True
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Write
The software writes data past the end, or before the beginning, of the intended buffer.
Typically, this can result in corruption of data, a crash, or code execution. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent write operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/787.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::builtin::resize_nearest_neighbor::Eval
tflite::ops::builtin::resize_nearest_neighbor::Eval( TfLiteContext * context , TfLiteNode * node)
['context', 'node']
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { auto* params = reinterpret_cast<TfLiteResizeNearestNeighborParams*>(node->builtin_data); const TfLiteTensor* input = GetInput(context, node, kInputTensor); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); const TfLiteTensor* size = GetInput(context, node, kSizeTensor); if (IsDynamicTensor(output)) { TF_LITE_ENSURE_OK(context, ResizeOutputTensor(context, input, size, output)); } tflite::ResizeNearestNeighborParams op_params; op_params.align_corners = params->align_corners; op_params.half_pixel_centers = params->half_pixel_centers; if (output->type == kTfLiteFloat32) { reference_ops::ResizeNearestNeighbor( op_params, GetTensorShape(input), GetTensorData<int32>(input), GetTensorShape(size), GetTensorData<int32>(size), GetTensorShape(output), GetTensorData<int32>(output)); } else if (output->type == kTfLiteUInt8) { if (kernel_type == kReference) { reference_ops::ResizeNearestNeighbor( op_params, GetTensorShape(input), GetTensorData<uint8_t>(input), GetTensorShape(size), GetTensorData<int32>(size), GetTensorShape(output), GetTensorData<uint8_t>(output)); } if (kernel_type == kGenericOptimized || kernel_type == kNeonOptimized) { optimized_ops::ResizeNearestNeighbor( op_params, GetTensorShape(input), GetTensorData<uint8_t>(input), GetTensorShape(size), GetTensorData<int32>(size), GetTensorShape(output), GetTensorData<uint8_t>(output)); } } else if (output->type == kTfLiteInt8) { reference_ops::ResizeNearestNeighbor( op_params, GetTensorShape(input), GetTensorData<int8_t>(input), GetTensorShape(size), GetTensorData<int32>(size), GetTensorShape(output), GetTensorData<int8_t>(output)); } else if (output->type == kTfLiteInt16) { reference_ops::ResizeNearestNeighbor( op_params, GetTensorShape(input), GetTensorData<int16_t>(input), GetTensorShape(size), GetTensorData<int32>(size), GetTensorShape(output), GetTensorData<int16_t>(output)); } else { TF_LITE_KERNEL_LOG( context, "Output type is %s, requires float, uint8, int8 or int16.", TfLiteTypeGetName(output->type)); return kTfLiteError; } return kTfLiteOk; }
430
True
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Read
The software reads data past the end, or before the beginning, of the intended buffer.
Typically, this can allow attackers to read sensitive information from other memory locations or cause a crash. A crash can occur when the code reads a variable amount of data and assumes that a sentinel exists to stop the read operation, such as a NUL in a string. The expected sentinel might not be located in the out-of-bounds memory, causing excessive data to be read, leading to a segmentation fault or a buffer overflow. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent read operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/125.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::builtin::resize_nearest_neighbor::Prepare
tflite::ops::builtin::resize_nearest_neighbor::Prepare( TfLiteContext * context , TfLiteNode * node)
['context', 'node']
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 2); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input = GetInput(context, node, kInputTensor); const TfLiteTensor* size = GetInput(context, node, kSizeTensor); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); // TODO(ahentz): Our current implementations rely on the input being 4D, // and the size being 1D tensor with exactly 2 elements. TF_LITE_ENSURE_EQ(context, NumDimensions(input), 4); TF_LITE_ENSURE_EQ(context, NumDimensions(size), 1); TF_LITE_ENSURE_TYPES_EQ(context, size->type, kTfLiteInt32); TF_LITE_ENSURE_EQ(context, size->dims->data[0], 2); output->type = input->type; if (!IsConstantTensor(size)) { SetTensorToDynamic(output); return kTfLiteOk; } return ResizeOutputTensor(context, input, size, output); }
166
True
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Write
The software writes data past the end, or before the beginning, of the intended buffer.
Typically, this can result in corruption of data, a crash, or code execution. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent write operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/787.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::builtin::resize_nearest_neighbor::Prepare
tflite::ops::builtin::resize_nearest_neighbor::Prepare( TfLiteContext * context , TfLiteNode * node)
['context', 'node']
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 2); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input = GetInput(context, node, kInputTensor); const TfLiteTensor* size = GetInput(context, node, kSizeTensor); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); // TODO(ahentz): Our current implementations rely on the input being 4D, // and the size being 1D tensor with exactly 2 elements. TF_LITE_ENSURE_EQ(context, NumDimensions(input), 4); TF_LITE_ENSURE_EQ(context, NumDimensions(size), 1); TF_LITE_ENSURE_TYPES_EQ(context, size->type, kTfLiteInt32); TF_LITE_ENSURE_EQ(context, size->dims->data[0], 2); output->type = input->type; if (!IsConstantTensor(size)) { SetTensorToDynamic(output); return kTfLiteOk; } return ResizeOutputTensor(context, input, size, output); }
166
True
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Read
The software reads data past the end, or before the beginning, of the intended buffer.
Typically, this can allow attackers to read sensitive information from other memory locations or cause a crash. A crash can occur when the code reads a variable amount of data and assumes that a sentinel exists to stop the read operation, such as a NUL in a string. The expected sentinel might not be located in the out-of-bounds memory, causing excessive data to be read, leading to a segmentation fault or a buffer overflow. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent read operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/125.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::builtin::reverse::Eval
tflite::ops::builtin::reverse::Eval( TfLiteContext * context , TfLiteNode * node)
['context', 'node']
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input = GetInput(context, node, kInputTensor); const TfLiteTensor* axis_tensor = GetInput(context, node, kAxisTensor); int axis = GetTensorData<int32_t>(axis_tensor)[0]; const int rank = NumDimensions(input); if (axis < 0) { axis += rank; } TF_LITE_ENSURE(context, axis >= 0 && axis < rank); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); switch (output->type) { case kTfLiteFloat32: { reference_ops::Reverse<float>( axis, GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(output), GetTensorData<float>(output)); break; } case kTfLiteUInt8: { reference_ops::Reverse<uint8_t>( axis, GetTensorShape(input), GetTensorData<uint8_t>(input), GetTensorShape(output), GetTensorData<uint8_t>(output)); break; } case kTfLiteInt16: { reference_ops::Reverse<int16_t>( axis, GetTensorShape(input), GetTensorData<int16_t>(input), GetTensorShape(output), GetTensorData<int16_t>(output)); break; } case kTfLiteInt32: { reference_ops::Reverse<int32_t>( axis, GetTensorShape(input), GetTensorData<int32_t>(input), GetTensorShape(output), GetTensorData<int32_t>(output)); break; } case kTfLiteInt64: { reference_ops::Reverse<int64_t>( axis, GetTensorShape(input), GetTensorData<int64_t>(input), GetTensorShape(output), GetTensorData<int64_t>(output)); break; } case kTfLiteBool: { reference_ops::Reverse<bool>( axis, GetTensorShape(input), GetTensorData<bool>(input), GetTensorShape(output), GetTensorData<bool>(output)); break; } default: { context->ReportError(context, "Type '%s' is not supported by reverse.", TfLiteTypeGetName(output->type)); return kTfLiteError; } } return kTfLiteOk; }
393
True
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Write
The software writes data past the end, or before the beginning, of the intended buffer.
Typically, this can result in corruption of data, a crash, or code execution. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent write operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/787.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::builtin::reverse::Eval
tflite::ops::builtin::reverse::Eval( TfLiteContext * context , TfLiteNode * node)
['context', 'node']
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input = GetInput(context, node, kInputTensor); const TfLiteTensor* axis_tensor = GetInput(context, node, kAxisTensor); int axis = GetTensorData<int32_t>(axis_tensor)[0]; const int rank = NumDimensions(input); if (axis < 0) { axis += rank; } TF_LITE_ENSURE(context, axis >= 0 && axis < rank); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); switch (output->type) { case kTfLiteFloat32: { reference_ops::Reverse<float>( axis, GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(output), GetTensorData<float>(output)); break; } case kTfLiteUInt8: { reference_ops::Reverse<uint8_t>( axis, GetTensorShape(input), GetTensorData<uint8_t>(input), GetTensorShape(output), GetTensorData<uint8_t>(output)); break; } case kTfLiteInt16: { reference_ops::Reverse<int16_t>( axis, GetTensorShape(input), GetTensorData<int16_t>(input), GetTensorShape(output), GetTensorData<int16_t>(output)); break; } case kTfLiteInt32: { reference_ops::Reverse<int32_t>( axis, GetTensorShape(input), GetTensorData<int32_t>(input), GetTensorShape(output), GetTensorData<int32_t>(output)); break; } case kTfLiteInt64: { reference_ops::Reverse<int64_t>( axis, GetTensorShape(input), GetTensorData<int64_t>(input), GetTensorShape(output), GetTensorData<int64_t>(output)); break; } case kTfLiteBool: { reference_ops::Reverse<bool>( axis, GetTensorShape(input), GetTensorData<bool>(input), GetTensorShape(output), GetTensorData<bool>(output)); break; } default: { context->ReportError(context, "Type '%s' is not supported by reverse.", TfLiteTypeGetName(output->type)); return kTfLiteError; } } return kTfLiteOk; }
393
True
1