cve_id
stringlengths
13
16
obtain_all_privilege
stringclasses
3 values
obtain_user_privilege
stringclasses
2 values
obtain_other_privilege
stringclasses
2 values
user_interaction_required
stringclasses
3 values
cvss2_vector_string
stringclasses
106 values
cvss2_access_vector
stringclasses
4 values
cvss2_access_complexity
stringclasses
4 values
cvss2_authentication
stringclasses
3 values
cvss2_confidentiality_impact
stringclasses
4 values
cvss2_integrity_impact
stringclasses
4 values
cvss2_availability_impact
stringclasses
4 values
cvss2_base_score
stringclasses
50 values
cvss3_vector_string
stringclasses
226 values
cvss3_attack_vector
stringclasses
5 values
cvss3_attack_complexity
stringclasses
3 values
cvss3_privileges_required
stringclasses
4 values
cvss3_user_interaction
stringclasses
3 values
cvss3_scope
stringclasses
3 values
cvss3_confidentiality_impact
stringclasses
4 values
cvss3_integrity_impact
stringclasses
4 values
cvss3_availability_impact
stringclasses
4 values
cvss3_base_score
stringclasses
55 values
cvss3_base_severity
stringclasses
5 values
exploitability_score
stringclasses
22 values
impact_score
stringclasses
15 values
ac_insuf_info
stringclasses
3 values
reference_json
stringlengths
221
23.3k
problemtype_json
stringclasses
200 values
severity
stringclasses
4 values
cve_nodes
stringlengths
2
33.1k
cve_description
stringlengths
64
1.99k
cve_last_modified_date
stringlengths
17
17
cve_published_date
stringlengths
17
17
cwe_name
stringclasses
125 values
cwe_description
stringclasses
124 values
cwe_extended_description
stringclasses
95 values
cwe_url
stringclasses
124 values
cwe_is_category
int64
0
1
commit_author
stringlengths
0
34
commit_author_date
stringlengths
25
25
commit_msg
stringlengths
0
13.3k
commit_hash
stringlengths
40
40
commit_is_merge
stringclasses
1 value
repo_name
stringclasses
467 values
repo_description
stringclasses
459 values
repo_date_created
stringclasses
467 values
repo_date_last_push
stringclasses
467 values
repo_homepage
stringclasses
294 values
repo_owner
stringclasses
470 values
repo_stars
stringclasses
406 values
repo_forks
stringclasses
352 values
function_name
stringlengths
3
120
function_signature
stringlengths
6
640
function_parameters
stringlengths
2
302
function
stringlengths
12
114k
function_token_count
stringlengths
1
5
function_before_change
stringclasses
1 value
labels
int64
1
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Read
The software reads data past the end, or before the beginning, of the intended buffer.
Typically, this can allow attackers to read sensitive information from other memory locations or cause a crash. A crash can occur when the code reads a variable amount of data and assumes that a sentinel exists to stop the read operation, such as a NUL in a string. The expected sentinel might not be located in the out-of-bounds memory, causing excessive data to be read, leading to a segmentation fault or a buffer overflow. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent read operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/125.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::builtin::activations::LogSoftmaxEval
tflite::ops::builtin::activations::LogSoftmaxEval( TfLiteContext * context , TfLiteNode * node)
['context', 'node']
TfLiteStatus LogSoftmaxEval(TfLiteContext* context, TfLiteNode* node) { const LogSoftmaxOpData* data = reinterpret_cast<LogSoftmaxOpData*>(node->user_data); const TfLiteTensor* input = GetInput(context, node, 0); TfLiteTensor* output = GetOutput(context, node, 0); switch (input->type) { case kTfLiteFloat32: { SoftmaxParams op_params; if (kernel_type == kGenericOptimized) { optimized_ops::LogSoftmax( op_params, GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(output), GetTensorData<float>(output)); } else { reference_ops::LogSoftmax( op_params, GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(output), GetTensorData<float>(output)); } return kTfLiteOk; } case kTfLiteUInt8: { SoftmaxParams op_params = data->params; if (kernel_type == kGenericOptimized) { optimized_ops::LogSoftmax( op_params, input->params.scale, GetTensorShape(input), GetTensorData<uint8_t>(input), GetTensorShape(output), GetTensorData<uint8_t>(output)); } else { reference_ops::LogSoftmax( op_params, GetTensorShape(input), GetTensorData<uint8_t>(input), GetTensorShape(output), GetTensorData<uint8_t>(output)); } return kTfLiteOk; } case kTfLiteInt8: { const auto input_shape = GetTensorShape(input); const auto output_shape = GetTensorShape(output); const int trailing_dim = input_shape.DimensionsCount() - 1; const int outer_size = MatchingFlatSizeSkipDim(input_shape, trailing_dim, output_shape); const int depth = MatchingDim(input_shape, trailing_dim, output_shape, trailing_dim); reference_integer_ops::LogSoftmax( data->input_multiplier, data->input_left_shift, data->reverse_scaling_divisor, data->reverse_scaling_right_shift, data->diff_min, outer_size, depth, GetTensorData<int8_t>(input), GetTensorData<int8_t>(output)); return kTfLiteOk; } default: TF_LITE_KERNEL_LOG( context, "Only float32, uint8 and int8 are supported currently, got %s.", TfLiteTypeGetName(input->type)); return kTfLiteError; } }
379
True
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Write
The software writes data past the end, or before the beginning, of the intended buffer.
Typically, this can result in corruption of data, a crash, or code execution. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent write operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/787.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::builtin::activations::LogSoftmaxEval
tflite::ops::builtin::activations::LogSoftmaxEval( TfLiteContext * context , TfLiteNode * node)
['context', 'node']
TfLiteStatus LogSoftmaxEval(TfLiteContext* context, TfLiteNode* node) { const LogSoftmaxOpData* data = reinterpret_cast<LogSoftmaxOpData*>(node->user_data); const TfLiteTensor* input = GetInput(context, node, 0); TfLiteTensor* output = GetOutput(context, node, 0); switch (input->type) { case kTfLiteFloat32: { SoftmaxParams op_params; if (kernel_type == kGenericOptimized) { optimized_ops::LogSoftmax( op_params, GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(output), GetTensorData<float>(output)); } else { reference_ops::LogSoftmax( op_params, GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(output), GetTensorData<float>(output)); } return kTfLiteOk; } case kTfLiteUInt8: { SoftmaxParams op_params = data->params; if (kernel_type == kGenericOptimized) { optimized_ops::LogSoftmax( op_params, input->params.scale, GetTensorShape(input), GetTensorData<uint8_t>(input), GetTensorShape(output), GetTensorData<uint8_t>(output)); } else { reference_ops::LogSoftmax( op_params, GetTensorShape(input), GetTensorData<uint8_t>(input), GetTensorShape(output), GetTensorData<uint8_t>(output)); } return kTfLiteOk; } case kTfLiteInt8: { const auto input_shape = GetTensorShape(input); const auto output_shape = GetTensorShape(output); const int trailing_dim = input_shape.DimensionsCount() - 1; const int outer_size = MatchingFlatSizeSkipDim(input_shape, trailing_dim, output_shape); const int depth = MatchingDim(input_shape, trailing_dim, output_shape, trailing_dim); reference_integer_ops::LogSoftmax( data->input_multiplier, data->input_left_shift, data->reverse_scaling_divisor, data->reverse_scaling_right_shift, data->diff_min, outer_size, depth, GetTensorData<int8_t>(input), GetTensorData<int8_t>(output)); return kTfLiteOk; } default: TF_LITE_KERNEL_LOG( context, "Only float32, uint8 and int8 are supported currently, got %s.", TfLiteTypeGetName(input->type)); return kTfLiteError; } }
379
True
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Read
The software reads data past the end, or before the beginning, of the intended buffer.
Typically, this can allow attackers to read sensitive information from other memory locations or cause a crash. A crash can occur when the code reads a variable amount of data and assumes that a sentinel exists to stop the read operation, such as a NUL in a string. The expected sentinel might not be located in the out-of-bounds memory, causing excessive data to be read, leading to a segmentation fault or a buffer overflow. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent read operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/125.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::builtin::activations::LogSoftmaxPrepare
tflite::ops::builtin::activations::LogSoftmaxPrepare( TfLiteContext * context , TfLiteNode * node)
['context', 'node']
TfLiteStatus LogSoftmaxPrepare(TfLiteContext* context, TfLiteNode* node) { LogSoftmaxOpData* data = reinterpret_cast<LogSoftmaxOpData*>(node->user_data); TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input = GetInput(context, node, 0); TfLiteTensor* output = GetOutput(context, node, 0); TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); if (input->type == kTfLiteUInt8 || input->type == kTfLiteInt8) { TF_LITE_ENSURE_EQ(context, output->params.scale, 16.0 / 256); static const double kBeta = 1.0; if (input->type == kTfLiteUInt8) { TF_LITE_ENSURE_EQ(context, output->params.zero_point, 255); data->params.table = data->f_table; optimized_ops::PopulateSoftmaxLookupTable(&data->params, input->params.scale, kBeta); data->params.zero_point = output->params.zero_point; data->params.scale = output->params.scale; } if (input->type == kTfLiteInt8) { TF_LITE_ENSURE_EQ(context, output->params.zero_point, 127); static const int kScaledDiffIntegerBits = 5; tflite::PreprocessLogSoftmaxScalingExp( kBeta, input->params.scale, kScaledDiffIntegerBits, &data->input_multiplier, &data->input_left_shift, &data->reverse_scaling_divisor, &data->reverse_scaling_right_shift); data->reverse_scaling_right_shift *= -1; data->diff_min = -1.0 * tflite::CalculateInputRadius(kScaledDiffIntegerBits, data->input_left_shift); } } return context->ResizeTensor(context, output, TfLiteIntArrayCopy(input->dims)); }
318
True
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Write
The software writes data past the end, or before the beginning, of the intended buffer.
Typically, this can result in corruption of data, a crash, or code execution. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent write operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/787.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::builtin::activations::LogSoftmaxPrepare
tflite::ops::builtin::activations::LogSoftmaxPrepare( TfLiteContext * context , TfLiteNode * node)
['context', 'node']
TfLiteStatus LogSoftmaxPrepare(TfLiteContext* context, TfLiteNode* node) { LogSoftmaxOpData* data = reinterpret_cast<LogSoftmaxOpData*>(node->user_data); TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input = GetInput(context, node, 0); TfLiteTensor* output = GetOutput(context, node, 0); TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); if (input->type == kTfLiteUInt8 || input->type == kTfLiteInt8) { TF_LITE_ENSURE_EQ(context, output->params.scale, 16.0 / 256); static const double kBeta = 1.0; if (input->type == kTfLiteUInt8) { TF_LITE_ENSURE_EQ(context, output->params.zero_point, 255); data->params.table = data->f_table; optimized_ops::PopulateSoftmaxLookupTable(&data->params, input->params.scale, kBeta); data->params.zero_point = output->params.zero_point; data->params.scale = output->params.scale; } if (input->type == kTfLiteInt8) { TF_LITE_ENSURE_EQ(context, output->params.zero_point, 127); static const int kScaledDiffIntegerBits = 5; tflite::PreprocessLogSoftmaxScalingExp( kBeta, input->params.scale, kScaledDiffIntegerBits, &data->input_multiplier, &data->input_left_shift, &data->reverse_scaling_divisor, &data->reverse_scaling_right_shift); data->reverse_scaling_right_shift *= -1; data->diff_min = -1.0 * tflite::CalculateInputRadius(kScaledDiffIntegerBits, data->input_left_shift); } } return context->ResizeTensor(context, output, TfLiteIntArrayCopy(input->dims)); }
318
True
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Read
The software reads data past the end, or before the beginning, of the intended buffer.
Typically, this can allow attackers to read sensitive information from other memory locations or cause a crash. A crash can occur when the code reads a variable amount of data and assumes that a sentinel exists to stop the read operation, such as a NUL in a string. The expected sentinel might not be located in the out-of-bounds memory, causing excessive data to be read, leading to a segmentation fault or a buffer overflow. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent read operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/125.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::builtin::activations::PreluEval
tflite::ops::builtin::activations::PreluEval( TfLiteContext * context , TfLiteNode * node)
['context', 'node']
TfLiteStatus PreluEval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input = GetInput(context, node, 0); const TfLiteTensor* alpha = GetInput(context, node, 1); TfLiteTensor* output = GetOutput(context, node, 0); const PreluOpData* data = reinterpret_cast<PreluOpData*>(node->user_data); switch (input->type) { case kTfLiteFloat32: { if (kernel_type == kGenericOptimized) { tflite::ArithmeticParams op_params; bool need_broadcast = optimized_ops::ProcessBroadcastShapes( GetTensorShape(input), GetTensorShape(alpha), &op_params); if (need_broadcast) { optimized_ops::BroadcastPReluDispatch( op_params, GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(alpha), GetTensorData<float>(alpha), GetTensorShape(output), GetTensorData<float>(output), ApplyPrelu<float>); } else { const int flat_size = MatchingElementsSize(GetTensorShape(input), GetTensorShape(alpha), GetTensorShape(output)); optimized_ops::PReluElementWise( flat_size, op_params, GetTensorData<float>(alpha), GetTensorData<float>(input), GetTensorData<float>(output)); } } else { if (data->requires_broadcast) { reference_ops::BroadcastBinaryFunction4DSlow<float, float, float>( GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(alpha), GetTensorData<float>(alpha), GetTensorShape(output), GetTensorData<float>(output), ApplyPrelu<float>); } else { reference_ops::BinaryFunction<float, float, float>( GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(alpha), GetTensorData<float>(alpha), GetTensorShape(output), GetTensorData<float>(output), ApplyPrelu<float>); } } return kTfLiteOk; } break; case kTfLiteUInt8: { PreluParams op_params; op_params.input_offset = -input->params.zero_point; op_params.alpha_offset = -alpha->params.zero_point; op_params.output_offset = output->params.zero_point; op_params.output_multiplier_1 = data->output_multiplier_1; op_params.output_shift_1 = data->output_shift_1; op_params.output_multiplier_2 = data->output_multiplier_2; op_params.output_shift_2 = data->output_shift_2; if (data->requires_broadcast) { reference_ops::BroadcastPrelu4DSlow( op_params, GetTensorShape(input), GetTensorData<uint8_t>(input), GetTensorShape(alpha), GetTensorData<uint8_t>(alpha), GetTensorShape(output), GetTensorData<uint8_t>(output)); } else { reference_ops::Prelu( op_params, GetTensorShape(input), GetTensorData<uint8_t>(input), GetTensorShape(alpha), GetTensorData<uint8_t>(alpha), GetTensorShape(output), GetTensorData<uint8_t>(output)); } return kTfLiteOk; } break; case kTfLiteInt8: { PreluParams op_params; op_params.input_offset = -input->params.zero_point; op_params.alpha_offset = -alpha->params.zero_point; op_params.output_offset = output->params.zero_point; op_params.output_multiplier_1 = data->output_multiplier_1; op_params.output_shift_1 = data->output_shift_1; op_params.output_multiplier_2 = data->output_multiplier_2; op_params.output_shift_2 = data->output_shift_2; if (data->requires_broadcast) { reference_ops::BroadcastPrelu4DSlow( op_params, GetTensorShape(input), GetTensorData<int8_t>(input), GetTensorShape(alpha), GetTensorData<int8_t>(alpha), GetTensorShape(output), GetTensorData<int8_t>(output)); } else { reference_ops::Prelu( op_params, GetTensorShape(input), GetTensorData<int8_t>(input), GetTensorShape(alpha), GetTensorData<int8_t>(alpha), GetTensorShape(output), GetTensorData<int8_t>(output)); } return kTfLiteOk; } break; default: TF_LITE_KERNEL_LOG( context, "Only float32 and uint8 and int8 are supported currently, got %d.", TfLiteTypeGetName(input->type)); return kTfLiteError; } }
741
True
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Write
The software writes data past the end, or before the beginning, of the intended buffer.
Typically, this can result in corruption of data, a crash, or code execution. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent write operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/787.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::builtin::activations::PreluEval
tflite::ops::builtin::activations::PreluEval( TfLiteContext * context , TfLiteNode * node)
['context', 'node']
TfLiteStatus PreluEval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input = GetInput(context, node, 0); const TfLiteTensor* alpha = GetInput(context, node, 1); TfLiteTensor* output = GetOutput(context, node, 0); const PreluOpData* data = reinterpret_cast<PreluOpData*>(node->user_data); switch (input->type) { case kTfLiteFloat32: { if (kernel_type == kGenericOptimized) { tflite::ArithmeticParams op_params; bool need_broadcast = optimized_ops::ProcessBroadcastShapes( GetTensorShape(input), GetTensorShape(alpha), &op_params); if (need_broadcast) { optimized_ops::BroadcastPReluDispatch( op_params, GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(alpha), GetTensorData<float>(alpha), GetTensorShape(output), GetTensorData<float>(output), ApplyPrelu<float>); } else { const int flat_size = MatchingElementsSize(GetTensorShape(input), GetTensorShape(alpha), GetTensorShape(output)); optimized_ops::PReluElementWise( flat_size, op_params, GetTensorData<float>(alpha), GetTensorData<float>(input), GetTensorData<float>(output)); } } else { if (data->requires_broadcast) { reference_ops::BroadcastBinaryFunction4DSlow<float, float, float>( GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(alpha), GetTensorData<float>(alpha), GetTensorShape(output), GetTensorData<float>(output), ApplyPrelu<float>); } else { reference_ops::BinaryFunction<float, float, float>( GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(alpha), GetTensorData<float>(alpha), GetTensorShape(output), GetTensorData<float>(output), ApplyPrelu<float>); } } return kTfLiteOk; } break; case kTfLiteUInt8: { PreluParams op_params; op_params.input_offset = -input->params.zero_point; op_params.alpha_offset = -alpha->params.zero_point; op_params.output_offset = output->params.zero_point; op_params.output_multiplier_1 = data->output_multiplier_1; op_params.output_shift_1 = data->output_shift_1; op_params.output_multiplier_2 = data->output_multiplier_2; op_params.output_shift_2 = data->output_shift_2; if (data->requires_broadcast) { reference_ops::BroadcastPrelu4DSlow( op_params, GetTensorShape(input), GetTensorData<uint8_t>(input), GetTensorShape(alpha), GetTensorData<uint8_t>(alpha), GetTensorShape(output), GetTensorData<uint8_t>(output)); } else { reference_ops::Prelu( op_params, GetTensorShape(input), GetTensorData<uint8_t>(input), GetTensorShape(alpha), GetTensorData<uint8_t>(alpha), GetTensorShape(output), GetTensorData<uint8_t>(output)); } return kTfLiteOk; } break; case kTfLiteInt8: { PreluParams op_params; op_params.input_offset = -input->params.zero_point; op_params.alpha_offset = -alpha->params.zero_point; op_params.output_offset = output->params.zero_point; op_params.output_multiplier_1 = data->output_multiplier_1; op_params.output_shift_1 = data->output_shift_1; op_params.output_multiplier_2 = data->output_multiplier_2; op_params.output_shift_2 = data->output_shift_2; if (data->requires_broadcast) { reference_ops::BroadcastPrelu4DSlow( op_params, GetTensorShape(input), GetTensorData<int8_t>(input), GetTensorShape(alpha), GetTensorData<int8_t>(alpha), GetTensorShape(output), GetTensorData<int8_t>(output)); } else { reference_ops::Prelu( op_params, GetTensorShape(input), GetTensorData<int8_t>(input), GetTensorShape(alpha), GetTensorData<int8_t>(alpha), GetTensorShape(output), GetTensorData<int8_t>(output)); } return kTfLiteOk; } break; default: TF_LITE_KERNEL_LOG( context, "Only float32 and uint8 and int8 are supported currently, got %d.", TfLiteTypeGetName(input->type)); return kTfLiteError; } }
741
True
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Read
The software reads data past the end, or before the beginning, of the intended buffer.
Typically, this can allow attackers to read sensitive information from other memory locations or cause a crash. A crash can occur when the code reads a variable amount of data and assumes that a sentinel exists to stop the read operation, such as a NUL in a string. The expected sentinel might not be located in the out-of-bounds memory, causing excessive data to be read, leading to a segmentation fault or a buffer overflow. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent read operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/125.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::builtin::activations::PreluPrepare
tflite::ops::builtin::activations::PreluPrepare( TfLiteContext * context , TfLiteNode * node)
['context', 'node']
TfLiteStatus PreluPrepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 2); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input = GetInput(context, node, 0); TfLiteTensor* output = GetOutput(context, node, 0); const TfLiteTensor* alpha = GetInput(context, node, 1); PreluOpData* data = reinterpret_cast<PreluOpData*>(node->user_data); TF_LITE_ENSURE_TYPES_EQ(context, input->type, alpha->type); output->type = input->type; if (output->type == kTfLiteUInt8 || output->type == kTfLiteInt8 || output->type == kTfLiteInt16) { // prelu(x) = x if x >= 0 else x * alpha. // So if we translate that for quantized computation: // // input_float = (input_q - input_zp) * input_scale // output_float = (output_q - output_zp) * output_scale // alpha_float = (alpha_q - alpha_zp) * alpha_scale // // When input_q - input_zp >= 0: // ouput_q = (input_q - input_zp) * input_scale / output_scale + output_q // else: // output_q = (input_q - input_zp) * (alpha_q - alpha_zp) * input_scale // * alpha_scale / output_scale + output_q // // So for input_q - input_zp >= 0: // output real multiplier 1 is input_scale / output_scale; // for input_q - input_zp < 0: // output real multiplier 2 is input_scale * alpha_scale/ output_scale. double real_multiplier_1 = input->params.scale / output->params.scale; double real_multiplier_2 = input->params.scale * alpha->params.scale / output->params.scale; QuantizeMultiplier(real_multiplier_1, &data->output_multiplier_1, &data->output_shift_1); QuantizeMultiplier(real_multiplier_2, &data->output_multiplier_2, &data->output_shift_2); } data->requires_broadcast = !HaveSameShapes(input, alpha); // PRelu (parameteric Relu) shares the same alpha value on "shared axis". // This means it's always required to "broadcast" alpha values in PRelu. TfLiteIntArray* output_size = nullptr; TF_LITE_ENSURE_OK( context, CalculateShapeForBroadcast(context, input, alpha, &output_size)); TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, output, output_size)); // After broadcasting, the output shape should always be the same as the // input shape. TF_LITE_ENSURE(context, HaveSameShapes(input, output)); return kTfLiteOk; }
267
True
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Write
The software writes data past the end, or before the beginning, of the intended buffer.
Typically, this can result in corruption of data, a crash, or code execution. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent write operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/787.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::builtin::activations::PreluPrepare
tflite::ops::builtin::activations::PreluPrepare( TfLiteContext * context , TfLiteNode * node)
['context', 'node']
TfLiteStatus PreluPrepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 2); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input = GetInput(context, node, 0); TfLiteTensor* output = GetOutput(context, node, 0); const TfLiteTensor* alpha = GetInput(context, node, 1); PreluOpData* data = reinterpret_cast<PreluOpData*>(node->user_data); TF_LITE_ENSURE_TYPES_EQ(context, input->type, alpha->type); output->type = input->type; if (output->type == kTfLiteUInt8 || output->type == kTfLiteInt8 || output->type == kTfLiteInt16) { // prelu(x) = x if x >= 0 else x * alpha. // So if we translate that for quantized computation: // // input_float = (input_q - input_zp) * input_scale // output_float = (output_q - output_zp) * output_scale // alpha_float = (alpha_q - alpha_zp) * alpha_scale // // When input_q - input_zp >= 0: // ouput_q = (input_q - input_zp) * input_scale / output_scale + output_q // else: // output_q = (input_q - input_zp) * (alpha_q - alpha_zp) * input_scale // * alpha_scale / output_scale + output_q // // So for input_q - input_zp >= 0: // output real multiplier 1 is input_scale / output_scale; // for input_q - input_zp < 0: // output real multiplier 2 is input_scale * alpha_scale/ output_scale. double real_multiplier_1 = input->params.scale / output->params.scale; double real_multiplier_2 = input->params.scale * alpha->params.scale / output->params.scale; QuantizeMultiplier(real_multiplier_1, &data->output_multiplier_1, &data->output_shift_1); QuantizeMultiplier(real_multiplier_2, &data->output_multiplier_2, &data->output_shift_2); } data->requires_broadcast = !HaveSameShapes(input, alpha); // PRelu (parameteric Relu) shares the same alpha value on "shared axis". // This means it's always required to "broadcast" alpha values in PRelu. TfLiteIntArray* output_size = nullptr; TF_LITE_ENSURE_OK( context, CalculateShapeForBroadcast(context, input, alpha, &output_size)); TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, output, output_size)); // After broadcasting, the output shape should always be the same as the // input shape. TF_LITE_ENSURE(context, HaveSameShapes(input, output)); return kTfLiteOk; }
267
True
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Read
The software reads data past the end, or before the beginning, of the intended buffer.
Typically, this can allow attackers to read sensitive information from other memory locations or cause a crash. A crash can occur when the code reads a variable amount of data and assumes that a sentinel exists to stop the read operation, such as a NUL in a string. The expected sentinel might not be located in the out-of-bounds memory, causing excessive data to be read, leading to a segmentation fault or a buffer overflow. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent read operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/125.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::builtin::activations::Relu1Eval
tflite::ops::builtin::activations::Relu1Eval( TfLiteContext * context , TfLiteNode * node)
['context', 'node']
TfLiteStatus Relu1Eval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input = GetInput(context, node, 0); TfLiteTensor* output = GetOutput(context, node, 0); const ReluOpData* data = reinterpret_cast<ReluOpData*>(node->user_data); switch (input->type) { case kTfLiteFloat32: { optimized_ops::Relu1(GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(output), GetTensorData<float>(output)); return kTfLiteOk; } break; case kTfLiteUInt8: { QuantizedReluX<uint8_t>(-1.0f, 1.0f, input, output, data); return kTfLiteOk; } break; case kTfLiteInt8: { QuantizedReluX<int8_t>(-1, 1, input, output, data); return kTfLiteOk; } break; default: TF_LITE_KERNEL_LOG(context, "Only float32, uint8, int8 supported " "currently, got %s.", TfLiteTypeGetName(input->type)); return kTfLiteError; } }
182
True
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Write
The software writes data past the end, or before the beginning, of the intended buffer.
Typically, this can result in corruption of data, a crash, or code execution. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent write operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/787.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::builtin::activations::Relu1Eval
tflite::ops::builtin::activations::Relu1Eval( TfLiteContext * context , TfLiteNode * node)
['context', 'node']
TfLiteStatus Relu1Eval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input = GetInput(context, node, 0); TfLiteTensor* output = GetOutput(context, node, 0); const ReluOpData* data = reinterpret_cast<ReluOpData*>(node->user_data); switch (input->type) { case kTfLiteFloat32: { optimized_ops::Relu1(GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(output), GetTensorData<float>(output)); return kTfLiteOk; } break; case kTfLiteUInt8: { QuantizedReluX<uint8_t>(-1.0f, 1.0f, input, output, data); return kTfLiteOk; } break; case kTfLiteInt8: { QuantizedReluX<int8_t>(-1, 1, input, output, data); return kTfLiteOk; } break; default: TF_LITE_KERNEL_LOG(context, "Only float32, uint8, int8 supported " "currently, got %s.", TfLiteTypeGetName(input->type)); return kTfLiteError; } }
182
True
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Read
The software reads data past the end, or before the beginning, of the intended buffer.
Typically, this can allow attackers to read sensitive information from other memory locations or cause a crash. A crash can occur when the code reads a variable amount of data and assumes that a sentinel exists to stop the read operation, such as a NUL in a string. The expected sentinel might not be located in the out-of-bounds memory, causing excessive data to be read, leading to a segmentation fault or a buffer overflow. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent read operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/125.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::builtin::activations::Relu6Eval
tflite::ops::builtin::activations::Relu6Eval( TfLiteContext * context , TfLiteNode * node)
['context', 'node']
TfLiteStatus Relu6Eval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input = GetInput(context, node, 0); TfLiteTensor* output = GetOutput(context, node, 0); ReluOpData* data = reinterpret_cast<ReluOpData*>(node->user_data); switch (input->type) { case kTfLiteFloat32: { size_t elements = input->bytes / sizeof(float); const float* in = GetTensorData<float>(input); const float* in_end = in + elements; float* out = GetTensorData<float>(output); for (; in < in_end; in++, out++) *out = std::min(std::max(0.f, *in), 6.f); return kTfLiteOk; } break; case kTfLiteUInt8: QuantizedReluX<uint8_t>(0.0f, 6.0f, input, output, data); return kTfLiteOk; case kTfLiteInt8: { QuantizedReluX<int8_t>(0.0f, 6.0f, input, output, data); return kTfLiteOk; } break; default: TF_LITE_KERNEL_LOG( context, "Only float32, uint8 and int8 are supported currently, got %s.", TfLiteTypeGetName(input->type)); return kTfLiteError; } }
230
True
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Write
The software writes data past the end, or before the beginning, of the intended buffer.
Typically, this can result in corruption of data, a crash, or code execution. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent write operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/787.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::builtin::activations::Relu6Eval
tflite::ops::builtin::activations::Relu6Eval( TfLiteContext * context , TfLiteNode * node)
['context', 'node']
TfLiteStatus Relu6Eval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input = GetInput(context, node, 0); TfLiteTensor* output = GetOutput(context, node, 0); ReluOpData* data = reinterpret_cast<ReluOpData*>(node->user_data); switch (input->type) { case kTfLiteFloat32: { size_t elements = input->bytes / sizeof(float); const float* in = GetTensorData<float>(input); const float* in_end = in + elements; float* out = GetTensorData<float>(output); for (; in < in_end; in++, out++) *out = std::min(std::max(0.f, *in), 6.f); return kTfLiteOk; } break; case kTfLiteUInt8: QuantizedReluX<uint8_t>(0.0f, 6.0f, input, output, data); return kTfLiteOk; case kTfLiteInt8: { QuantizedReluX<int8_t>(0.0f, 6.0f, input, output, data); return kTfLiteOk; } break; default: TF_LITE_KERNEL_LOG( context, "Only float32, uint8 and int8 are supported currently, got %s.", TfLiteTypeGetName(input->type)); return kTfLiteError; } }
230
True
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Read
The software reads data past the end, or before the beginning, of the intended buffer.
Typically, this can allow attackers to read sensitive information from other memory locations or cause a crash. A crash can occur when the code reads a variable amount of data and assumes that a sentinel exists to stop the read operation, such as a NUL in a string. The expected sentinel might not be located in the out-of-bounds memory, causing excessive data to be read, leading to a segmentation fault or a buffer overflow. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent read operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/125.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::builtin::activations::ReluEval
tflite::ops::builtin::activations::ReluEval( TfLiteContext * context , TfLiteNode * node)
['context', 'node']
TfLiteStatus ReluEval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input = GetInput(context, node, 0); TfLiteTensor* output = GetOutput(context, node, 0); const ReluOpData* data = reinterpret_cast<ReluOpData*>(node->user_data); switch (input->type) { case kTfLiteFloat32: { optimized_ops::Relu(GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(output), GetTensorData<float>(output)); } break; // TODO(renjieliu): We may revisit the quantization calculation logic, // the unbounded upper limit is actually hard to quantize. case kTfLiteUInt8: { QuantizedReluX<uint8_t>(0.0f, std::numeric_limits<float>::infinity(), input, output, data); } break; case kTfLiteInt8: { QuantizedReluX<int8_t>(0.0f, std::numeric_limits<float>::infinity(), input, output, data); } break; default: TF_LITE_KERNEL_LOG( context, "Only float32 & int8/uint8 is supported currently, got %s.", TfLiteTypeGetName(input->type)); return kTfLiteError; } return kTfLiteOk; }
191
True
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Write
The software writes data past the end, or before the beginning, of the intended buffer.
Typically, this can result in corruption of data, a crash, or code execution. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent write operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/787.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::builtin::activations::ReluEval
tflite::ops::builtin::activations::ReluEval( TfLiteContext * context , TfLiteNode * node)
['context', 'node']
TfLiteStatus ReluEval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input = GetInput(context, node, 0); TfLiteTensor* output = GetOutput(context, node, 0); const ReluOpData* data = reinterpret_cast<ReluOpData*>(node->user_data); switch (input->type) { case kTfLiteFloat32: { optimized_ops::Relu(GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(output), GetTensorData<float>(output)); } break; // TODO(renjieliu): We may revisit the quantization calculation logic, // the unbounded upper limit is actually hard to quantize. case kTfLiteUInt8: { QuantizedReluX<uint8_t>(0.0f, std::numeric_limits<float>::infinity(), input, output, data); } break; case kTfLiteInt8: { QuantizedReluX<int8_t>(0.0f, std::numeric_limits<float>::infinity(), input, output, data); } break; default: TF_LITE_KERNEL_LOG( context, "Only float32 & int8/uint8 is supported currently, got %s.", TfLiteTypeGetName(input->type)); return kTfLiteError; } return kTfLiteOk; }
191
True
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Read
The software reads data past the end, or before the beginning, of the intended buffer.
Typically, this can allow attackers to read sensitive information from other memory locations or cause a crash. A crash can occur when the code reads a variable amount of data and assumes that a sentinel exists to stop the read operation, such as a NUL in a string. The expected sentinel might not be located in the out-of-bounds memory, causing excessive data to be read, leading to a segmentation fault or a buffer overflow. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent read operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/125.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::builtin::activations::ReluPrepare
tflite::ops::builtin::activations::ReluPrepare( TfLiteContext * context , TfLiteNode * node)
['context', 'node']
TfLiteStatus ReluPrepare(TfLiteContext* context, TfLiteNode* node) { ReluOpData* data = reinterpret_cast<ReluOpData*>(node->user_data); TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input = GetInput(context, node, 0); TfLiteTensor* output = GetOutput(context, node, 0); TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); if (input->type == kTfLiteInt8 || input->type == kTfLiteUInt8) { double real_multiplier = input->params.scale / output->params.scale; QuantizeMultiplier(real_multiplier, &data->output_multiplier, &data->output_shift); } return context->ResizeTensor(context, output, TfLiteIntArrayCopy(input->dims)); }
154
True
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Write
The software writes data past the end, or before the beginning, of the intended buffer.
Typically, this can result in corruption of data, a crash, or code execution. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent write operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/787.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::builtin::activations::ReluPrepare
tflite::ops::builtin::activations::ReluPrepare( TfLiteContext * context , TfLiteNode * node)
['context', 'node']
TfLiteStatus ReluPrepare(TfLiteContext* context, TfLiteNode* node) { ReluOpData* data = reinterpret_cast<ReluOpData*>(node->user_data); TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input = GetInput(context, node, 0); TfLiteTensor* output = GetOutput(context, node, 0); TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); if (input->type == kTfLiteInt8 || input->type == kTfLiteUInt8) { double real_multiplier = input->params.scale / output->params.scale; QuantizeMultiplier(real_multiplier, &data->output_multiplier, &data->output_shift); } return context->ResizeTensor(context, output, TfLiteIntArrayCopy(input->dims)); }
154
True
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Read
The software reads data past the end, or before the beginning, of the intended buffer.
Typically, this can allow attackers to read sensitive information from other memory locations or cause a crash. A crash can occur when the code reads a variable amount of data and assumes that a sentinel exists to stop the read operation, such as a NUL in a string. The expected sentinel might not be located in the out-of-bounds memory, causing excessive data to be read, leading to a segmentation fault or a buffer overflow. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent read operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/125.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::builtin::activations::SigmoidEval
tflite::ops::builtin::activations::SigmoidEval( TfLiteContext * context , TfLiteNode * node)
['context', 'node']
TfLiteStatus SigmoidEval(TfLiteContext* context, TfLiteNode* node) { OpData* data = reinterpret_cast<OpData*>(node->user_data); const TfLiteTensor* input = GetInput(context, node, 0); TfLiteTensor* output = GetOutput(context, node, 0); switch (input->type) { case kTfLiteFloat32: { if (kernel_type == kReference) { reference_ops::Logistic( GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(output), GetTensorData<float>(output)); } else { optimized_ops::Logistic( GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(output), GetTensorData<float>(output)); } break; } case kTfLiteInt16: { LogisticParams params; if (kernel_type == kReference || (data->input_multiplier > 0)) { const int size = MatchingFlatSize(GetTensorShape(input), GetTensorShape(output)); reference_integer_ops::Logistic(data->input_multiplier, size, GetTensorData<int16_t>(input), GetTensorData<int16_t>(output)); } else { optimized_ops::Logistic( params, GetTensorShape(input), GetTensorData<int16_t>(input), GetTensorShape(output), GetTensorData<int16_t>(output)); } break; } case kTfLiteUInt8: { if (kernel_type == kFixedPointOptimized) { LogisticParams params; params.input_zero_point = input->params.zero_point; params.input_range_radius = data->input_range_radius; params.input_multiplier = data->input_multiplier; params.input_left_shift = data->input_left_shift; optimized_ops::Logistic16bitPrecision( params, GetTensorShape(input), GetTensorData<uint8_t>(input), GetTensorShape(output), GetTensorData<uint8_t>(output)); } else { EvalUsingLookupTable(data, input, output); } break; } case kTfLiteInt8: { if (kernel_type == kFixedPointOptimized) { LogisticParams params; params.input_zero_point = input->params.zero_point; params.input_range_radius = data->input_range_radius; params.input_multiplier = data->input_multiplier; params.input_left_shift = data->input_left_shift; optimized_ops::Logistic16bitPrecision( params, GetTensorShape(input), GetTensorData<int8_t>(input), GetTensorShape(output), GetTensorData<int8_t>(output)); } else { EvalUsingLookupTable(data, input, output); } break; } default: TF_LITE_KERNEL_LOG(context, "Only float32, uint8, int16 and int8 are supported " "currently, got %s.", TfLiteTypeGetName(input->type)); return kTfLiteError; } return kTfLiteOk; }
465
True
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Write
The software writes data past the end, or before the beginning, of the intended buffer.
Typically, this can result in corruption of data, a crash, or code execution. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent write operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/787.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::builtin::activations::SigmoidEval
tflite::ops::builtin::activations::SigmoidEval( TfLiteContext * context , TfLiteNode * node)
['context', 'node']
TfLiteStatus SigmoidEval(TfLiteContext* context, TfLiteNode* node) { OpData* data = reinterpret_cast<OpData*>(node->user_data); const TfLiteTensor* input = GetInput(context, node, 0); TfLiteTensor* output = GetOutput(context, node, 0); switch (input->type) { case kTfLiteFloat32: { if (kernel_type == kReference) { reference_ops::Logistic( GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(output), GetTensorData<float>(output)); } else { optimized_ops::Logistic( GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(output), GetTensorData<float>(output)); } break; } case kTfLiteInt16: { LogisticParams params; if (kernel_type == kReference || (data->input_multiplier > 0)) { const int size = MatchingFlatSize(GetTensorShape(input), GetTensorShape(output)); reference_integer_ops::Logistic(data->input_multiplier, size, GetTensorData<int16_t>(input), GetTensorData<int16_t>(output)); } else { optimized_ops::Logistic( params, GetTensorShape(input), GetTensorData<int16_t>(input), GetTensorShape(output), GetTensorData<int16_t>(output)); } break; } case kTfLiteUInt8: { if (kernel_type == kFixedPointOptimized) { LogisticParams params; params.input_zero_point = input->params.zero_point; params.input_range_radius = data->input_range_radius; params.input_multiplier = data->input_multiplier; params.input_left_shift = data->input_left_shift; optimized_ops::Logistic16bitPrecision( params, GetTensorShape(input), GetTensorData<uint8_t>(input), GetTensorShape(output), GetTensorData<uint8_t>(output)); } else { EvalUsingLookupTable(data, input, output); } break; } case kTfLiteInt8: { if (kernel_type == kFixedPointOptimized) { LogisticParams params; params.input_zero_point = input->params.zero_point; params.input_range_radius = data->input_range_radius; params.input_multiplier = data->input_multiplier; params.input_left_shift = data->input_left_shift; optimized_ops::Logistic16bitPrecision( params, GetTensorShape(input), GetTensorData<int8_t>(input), GetTensorShape(output), GetTensorData<int8_t>(output)); } else { EvalUsingLookupTable(data, input, output); } break; } default: TF_LITE_KERNEL_LOG(context, "Only float32, uint8, int16 and int8 are supported " "currently, got %s.", TfLiteTypeGetName(input->type)); return kTfLiteError; } return kTfLiteOk; }
465
True
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Read
The software reads data past the end, or before the beginning, of the intended buffer.
Typically, this can allow attackers to read sensitive information from other memory locations or cause a crash. A crash can occur when the code reads a variable amount of data and assumes that a sentinel exists to stop the read operation, such as a NUL in a string. The expected sentinel might not be located in the out-of-bounds memory, causing excessive data to be read, leading to a segmentation fault or a buffer overflow. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent read operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/125.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::builtin::activations::SigmoidPrepare
tflite::ops::builtin::activations::SigmoidPrepare( TfLiteContext * context , TfLiteNode * node)
['context', 'node']
TfLiteStatus SigmoidPrepare(TfLiteContext* context, TfLiteNode* node) { OpData* data = reinterpret_cast<OpData*>(node->user_data); TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input = GetInput(context, node, 0); TfLiteTensor* output = GetOutput(context, node, 0); TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); if (kernel_type == kFixedPointOptimized) { if (input->type == kTfLiteUInt8 || input->type == kTfLiteInt8) { if (input->type == kTfLiteUInt8) { TF_LITE_ENSURE_EQ(context, output->params.zero_point, std::numeric_limits<uint8_t>::min()); } if (input->type == kTfLiteInt8) { TF_LITE_ENSURE_EQ(context, output->params.zero_point, std::numeric_limits<int8_t>::min()); } TF_LITE_ENSURE(context, output->params.scale == 1. / 256); static constexpr int kInputIntegerBits = 4; const double input_real_multiplier = input->params.scale * static_cast<double>(1 << (15 - kInputIntegerBits)); const double q = std::frexp(input_real_multiplier, &data->input_left_shift); auto q_fixed = static_cast<int32_t>(TfLiteRound(q * (1ll << 15))); data->input_multiplier = static_cast<int16_t>(q_fixed); int16_t input_range_radius = CalculateInputRadius(kInputIntegerBits, data->input_left_shift, 15); data->input_range_radius = input_range_radius; } } if (kernel_type == kGenericOptimized || kernel_type == kReference) { if (input->type == kTfLiteUInt8) { TF_LITE_ENSURE(context, output->params.scale == 1. / 256); PopulateLookupTable<uint8_t>(data, input, output, [](float value) { return 1.0f / (1.0f + std::exp(-value)); }); } else if (input->type == kTfLiteInt8) { TF_LITE_ENSURE(context, output->params.scale == 1. / 256); PopulateLookupTable<int8_t>(data, input, output, [](float value) { return 1.0f / (1.0f + std::exp(-value)); }); } else if (input->type == kTfLiteInt16) { TF_LITE_ENSURE(context, output->params.scale == 1. / 32768); TF_LITE_ENSURE(context, output->params.zero_point == 0); } } if (input->type == kTfLiteInt16) { static constexpr int kInputIntegerBits = 3; static constexpr int kOutputFractionalBits = 15; // See comments in TanhPrepare about requiring zero_point==0 // and a power-of-two ("POT") scale. TF_LITE_ENSURE_EQ(context, input->params.zero_point, 0); TF_LITE_ENSURE_EQ(context, output->params.zero_point, 0); int input_scale_log2_rounded; bool param_scale_pot = CheckedLog2(input->params.scale, &input_scale_log2_rounded); data->input_left_shift = (15 - kInputIntegerBits) + input_scale_log2_rounded; param_scale_pot &= (data->input_left_shift == 0); if (!param_scale_pot) { // In case of general scale parameter, we need to do a rescaling. // Magic constant 4096: // We need to scale down to (-2^3, 2^3) / 3 is kInputIntegerBits/ interval // from 16-bit (-2^15, 2^15), // so we need to multiply by // 2^(15 - kInputIntegerBits) = 2^12 = 4096. data->input_multiplier = static_cast<int32_t>(input->params.scale * 4096); } int output_scale_log2_rounded; TF_LITE_ENSURE( context, CheckedLog2(output->params.scale, &output_scale_log2_rounded)); TF_LITE_ENSURE_EQ(context, output_scale_log2_rounded, -kOutputFractionalBits); } return context->ResizeTensor(context, output, TfLiteIntArrayCopy(input->dims)); }
643
True
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Write
The software writes data past the end, or before the beginning, of the intended buffer.
Typically, this can result in corruption of data, a crash, or code execution. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent write operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/787.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::builtin::activations::SigmoidPrepare
tflite::ops::builtin::activations::SigmoidPrepare( TfLiteContext * context , TfLiteNode * node)
['context', 'node']
TfLiteStatus SigmoidPrepare(TfLiteContext* context, TfLiteNode* node) { OpData* data = reinterpret_cast<OpData*>(node->user_data); TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input = GetInput(context, node, 0); TfLiteTensor* output = GetOutput(context, node, 0); TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); if (kernel_type == kFixedPointOptimized) { if (input->type == kTfLiteUInt8 || input->type == kTfLiteInt8) { if (input->type == kTfLiteUInt8) { TF_LITE_ENSURE_EQ(context, output->params.zero_point, std::numeric_limits<uint8_t>::min()); } if (input->type == kTfLiteInt8) { TF_LITE_ENSURE_EQ(context, output->params.zero_point, std::numeric_limits<int8_t>::min()); } TF_LITE_ENSURE(context, output->params.scale == 1. / 256); static constexpr int kInputIntegerBits = 4; const double input_real_multiplier = input->params.scale * static_cast<double>(1 << (15 - kInputIntegerBits)); const double q = std::frexp(input_real_multiplier, &data->input_left_shift); auto q_fixed = static_cast<int32_t>(TfLiteRound(q * (1ll << 15))); data->input_multiplier = static_cast<int16_t>(q_fixed); int16_t input_range_radius = CalculateInputRadius(kInputIntegerBits, data->input_left_shift, 15); data->input_range_radius = input_range_radius; } } if (kernel_type == kGenericOptimized || kernel_type == kReference) { if (input->type == kTfLiteUInt8) { TF_LITE_ENSURE(context, output->params.scale == 1. / 256); PopulateLookupTable<uint8_t>(data, input, output, [](float value) { return 1.0f / (1.0f + std::exp(-value)); }); } else if (input->type == kTfLiteInt8) { TF_LITE_ENSURE(context, output->params.scale == 1. / 256); PopulateLookupTable<int8_t>(data, input, output, [](float value) { return 1.0f / (1.0f + std::exp(-value)); }); } else if (input->type == kTfLiteInt16) { TF_LITE_ENSURE(context, output->params.scale == 1. / 32768); TF_LITE_ENSURE(context, output->params.zero_point == 0); } } if (input->type == kTfLiteInt16) { static constexpr int kInputIntegerBits = 3; static constexpr int kOutputFractionalBits = 15; // See comments in TanhPrepare about requiring zero_point==0 // and a power-of-two ("POT") scale. TF_LITE_ENSURE_EQ(context, input->params.zero_point, 0); TF_LITE_ENSURE_EQ(context, output->params.zero_point, 0); int input_scale_log2_rounded; bool param_scale_pot = CheckedLog2(input->params.scale, &input_scale_log2_rounded); data->input_left_shift = (15 - kInputIntegerBits) + input_scale_log2_rounded; param_scale_pot &= (data->input_left_shift == 0); if (!param_scale_pot) { // In case of general scale parameter, we need to do a rescaling. // Magic constant 4096: // We need to scale down to (-2^3, 2^3) / 3 is kInputIntegerBits/ interval // from 16-bit (-2^15, 2^15), // so we need to multiply by // 2^(15 - kInputIntegerBits) = 2^12 = 4096. data->input_multiplier = static_cast<int32_t>(input->params.scale * 4096); } int output_scale_log2_rounded; TF_LITE_ENSURE( context, CheckedLog2(output->params.scale, &output_scale_log2_rounded)); TF_LITE_ENSURE_EQ(context, output_scale_log2_rounded, -kOutputFractionalBits); } return context->ResizeTensor(context, output, TfLiteIntArrayCopy(input->dims)); }
643
True
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Read
The software reads data past the end, or before the beginning, of the intended buffer.
Typically, this can allow attackers to read sensitive information from other memory locations or cause a crash. A crash can occur when the code reads a variable amount of data and assumes that a sentinel exists to stop the read operation, such as a NUL in a string. The expected sentinel might not be located in the out-of-bounds memory, causing excessive data to be read, leading to a segmentation fault or a buffer overflow. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent read operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/125.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::builtin::activations::SoftmaxEval
tflite::ops::builtin::activations::SoftmaxEval( TfLiteContext * context , TfLiteNode * node)
['context', 'node']
TfLiteStatus SoftmaxEval(TfLiteContext* context, TfLiteNode* node) { auto* params = reinterpret_cast<TfLiteSoftmaxParams*>(node->builtin_data); SoftmaxOpData* data = reinterpret_cast<SoftmaxOpData*>(node->user_data); const TfLiteTensor* input = GetInput(context, node, 0); TfLiteTensor* output = GetOutput(context, node, 0); switch (input->type) { case kTfLiteFloat32: { return SoftmaxFloat(context, input, output, params); } case kTfLiteUInt8: { switch (output->type) { case kTfLiteUInt8: return SoftmaxQuantized<uint8_t, uint8_t>(context, input, output, data); case kTfLiteInt16: return SoftmaxQuantized<uint8_t, int16_t>(context, input, output, data); default: TF_LITE_KERNEL_LOG(context, "Only uint8_t and int16_t outputs are supported " "with uint8_t inputs currently, got %s.", TfLiteTypeGetName(output->type)); return kTfLiteError; } } case kTfLiteInt8: { switch (output->type) { case kTfLiteInt8: return SoftmaxQuantized<int8_t, int8_t>(context, input, output, data); case kTfLiteInt16: return SoftmaxQuantized<int8_t, int16_t>(context, input, output, data); default: TF_LITE_KERNEL_LOG(context, "Only int8_t and int16_t outputs are supported " "with int8_t inputs currently, got %s.", TfLiteTypeGetName(output->type)); return kTfLiteError; } } case kTfLiteInt16: { return SoftmaxQuantized<int16_t, int16_t>(context, input, output, data); } default: TF_LITE_KERNEL_LOG(context, "Only float32, uint8_t, Int8_t, Int16_t are supported " "currently, got %s.", TfLiteTypeGetName(input->type)); return kTfLiteError; } }
282
True
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Write
The software writes data past the end, or before the beginning, of the intended buffer.
Typically, this can result in corruption of data, a crash, or code execution. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent write operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/787.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::builtin::activations::SoftmaxEval
tflite::ops::builtin::activations::SoftmaxEval( TfLiteContext * context , TfLiteNode * node)
['context', 'node']
TfLiteStatus SoftmaxEval(TfLiteContext* context, TfLiteNode* node) { auto* params = reinterpret_cast<TfLiteSoftmaxParams*>(node->builtin_data); SoftmaxOpData* data = reinterpret_cast<SoftmaxOpData*>(node->user_data); const TfLiteTensor* input = GetInput(context, node, 0); TfLiteTensor* output = GetOutput(context, node, 0); switch (input->type) { case kTfLiteFloat32: { return SoftmaxFloat(context, input, output, params); } case kTfLiteUInt8: { switch (output->type) { case kTfLiteUInt8: return SoftmaxQuantized<uint8_t, uint8_t>(context, input, output, data); case kTfLiteInt16: return SoftmaxQuantized<uint8_t, int16_t>(context, input, output, data); default: TF_LITE_KERNEL_LOG(context, "Only uint8_t and int16_t outputs are supported " "with uint8_t inputs currently, got %s.", TfLiteTypeGetName(output->type)); return kTfLiteError; } } case kTfLiteInt8: { switch (output->type) { case kTfLiteInt8: return SoftmaxQuantized<int8_t, int8_t>(context, input, output, data); case kTfLiteInt16: return SoftmaxQuantized<int8_t, int16_t>(context, input, output, data); default: TF_LITE_KERNEL_LOG(context, "Only int8_t and int16_t outputs are supported " "with int8_t inputs currently, got %s.", TfLiteTypeGetName(output->type)); return kTfLiteError; } } case kTfLiteInt16: { return SoftmaxQuantized<int16_t, int16_t>(context, input, output, data); } default: TF_LITE_KERNEL_LOG(context, "Only float32, uint8_t, Int8_t, Int16_t are supported " "currently, got %s.", TfLiteTypeGetName(input->type)); return kTfLiteError; } }
282
True
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Read
The software reads data past the end, or before the beginning, of the intended buffer.
Typically, this can allow attackers to read sensitive information from other memory locations or cause a crash. A crash can occur when the code reads a variable amount of data and assumes that a sentinel exists to stop the read operation, such as a NUL in a string. The expected sentinel might not be located in the out-of-bounds memory, causing excessive data to be read, leading to a segmentation fault or a buffer overflow. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent read operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/125.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::builtin::activations::SoftmaxPrepare
tflite::ops::builtin::activations::SoftmaxPrepare( TfLiteContext * context , TfLiteNode * node)
['context', 'node']
TfLiteStatus SoftmaxPrepare(TfLiteContext* context, TfLiteNode* node) { auto* params = reinterpret_cast<TfLiteSoftmaxParams*>(node->builtin_data); SoftmaxOpData* data = reinterpret_cast<SoftmaxOpData*>(node->user_data); TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input = GetInput(context, node, 0); TfLiteTensor* output = GetOutput(context, node, 0); if (output->type == kTfLiteInt16) { TF_LITE_ENSURE(context, input->type == kTfLiteInt8 || input->type == kTfLiteUInt8 || input->type == kTfLiteInt16); } else { TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); } TF_LITE_ENSURE(context, NumDimensions(input) >= 1); if (input->type == kTfLiteUInt8 || input->type == kTfLiteInt8) { switch (output->type) { case kTfLiteUInt8: case kTfLiteInt8: #ifdef TFLITE_SOFTMAX_USE_UINT16_LUT // Only apply when both input & output are uint8/int8 & build with clang // on aarch64. // TODO(b/143709993): Port to ARMv7 and other platforms. data->params.uint8_table1 = data->uint8_table1; data->params.uint8_table2 = data->uint8_table2; optimized_ops::PopulateSoftmaxUInt8LookupTable( &data->params, input->params.scale, params->beta); break; #endif case kTfLiteInt16: default: data->params.table = data->table; optimized_ops::PopulateSoftmaxLookupTable( &data->params, input->params.scale, params->beta); } data->params.zero_point = output->params.zero_point; data->params.scale = output->params.scale; } if (input->type == kTfLiteInt16) { TF_LITE_ENSURE_EQ(context, output->params.zero_point, 0); data->params.exp_lut = data->exp_lut; // exp LUT only used on nagative values // we consider exp(-10.0) is insignificant to accumulation gen_lut([](double value) { return std::exp(value); }, -10.0, 0.0, data->params.exp_lut, data->kInt16LUTArraySize); data->params.one_over_one_plus_x_lut = data->one_over_one_plus_x_lut; gen_lut([](double value) { return 1.0 / (1.0 + value); }, 0.0, 1.0, data->params.one_over_one_plus_x_lut, data->kInt16LUTArraySize); data->params.zero_point = output->params.zero_point; data->params.scale = output->params.scale; double input_scale_beta_rescale = input->params.scale * params->beta / (10.0 / 65535.0); // scale the input_diff such that [-65535, 0] // correspond to [-10.0, 0.0] QuantizeMultiplier(input_scale_beta_rescale, &data->params.input_multiplier, &data->params.input_left_shift); } return context->ResizeTensor(context, output, TfLiteIntArrayCopy(input->dims)); }
493
True
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Write
The software writes data past the end, or before the beginning, of the intended buffer.
Typically, this can result in corruption of data, a crash, or code execution. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent write operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/787.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::builtin::activations::SoftmaxPrepare
tflite::ops::builtin::activations::SoftmaxPrepare( TfLiteContext * context , TfLiteNode * node)
['context', 'node']
TfLiteStatus SoftmaxPrepare(TfLiteContext* context, TfLiteNode* node) { auto* params = reinterpret_cast<TfLiteSoftmaxParams*>(node->builtin_data); SoftmaxOpData* data = reinterpret_cast<SoftmaxOpData*>(node->user_data); TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input = GetInput(context, node, 0); TfLiteTensor* output = GetOutput(context, node, 0); if (output->type == kTfLiteInt16) { TF_LITE_ENSURE(context, input->type == kTfLiteInt8 || input->type == kTfLiteUInt8 || input->type == kTfLiteInt16); } else { TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); } TF_LITE_ENSURE(context, NumDimensions(input) >= 1); if (input->type == kTfLiteUInt8 || input->type == kTfLiteInt8) { switch (output->type) { case kTfLiteUInt8: case kTfLiteInt8: #ifdef TFLITE_SOFTMAX_USE_UINT16_LUT // Only apply when both input & output are uint8/int8 & build with clang // on aarch64. // TODO(b/143709993): Port to ARMv7 and other platforms. data->params.uint8_table1 = data->uint8_table1; data->params.uint8_table2 = data->uint8_table2; optimized_ops::PopulateSoftmaxUInt8LookupTable( &data->params, input->params.scale, params->beta); break; #endif case kTfLiteInt16: default: data->params.table = data->table; optimized_ops::PopulateSoftmaxLookupTable( &data->params, input->params.scale, params->beta); } data->params.zero_point = output->params.zero_point; data->params.scale = output->params.scale; } if (input->type == kTfLiteInt16) { TF_LITE_ENSURE_EQ(context, output->params.zero_point, 0); data->params.exp_lut = data->exp_lut; // exp LUT only used on nagative values // we consider exp(-10.0) is insignificant to accumulation gen_lut([](double value) { return std::exp(value); }, -10.0, 0.0, data->params.exp_lut, data->kInt16LUTArraySize); data->params.one_over_one_plus_x_lut = data->one_over_one_plus_x_lut; gen_lut([](double value) { return 1.0 / (1.0 + value); }, 0.0, 1.0, data->params.one_over_one_plus_x_lut, data->kInt16LUTArraySize); data->params.zero_point = output->params.zero_point; data->params.scale = output->params.scale; double input_scale_beta_rescale = input->params.scale * params->beta / (10.0 / 65535.0); // scale the input_diff such that [-65535, 0] // correspond to [-10.0, 0.0] QuantizeMultiplier(input_scale_beta_rescale, &data->params.input_multiplier, &data->params.input_left_shift); } return context->ResizeTensor(context, output, TfLiteIntArrayCopy(input->dims)); }
493
True
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Read
The software reads data past the end, or before the beginning, of the intended buffer.
Typically, this can allow attackers to read sensitive information from other memory locations or cause a crash. A crash can occur when the code reads a variable amount of data and assumes that a sentinel exists to stop the read operation, such as a NUL in a string. The expected sentinel might not be located in the out-of-bounds memory, causing excessive data to be read, leading to a segmentation fault or a buffer overflow. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent read operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/125.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::builtin::activations::TanhEval
tflite::ops::builtin::activations::TanhEval( TfLiteContext * context , TfLiteNode * node)
['context', 'node']
TfLiteStatus TanhEval(TfLiteContext* context, TfLiteNode* node) { OpData* data = reinterpret_cast<OpData*>(node->user_data); const TfLiteTensor* input = GetInput(context, node, 0); TfLiteTensor* output = GetOutput(context, node, 0); switch (input->type) { case kTfLiteFloat32: { if (kernel_type == kReference) { reference_ops::Tanh(GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(output), GetTensorData<float>(output)); } else { optimized_ops::Tanh(GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(output), GetTensorData<float>(output)); } return kTfLiteOk; } break; case kTfLiteInt16: { TanhParams params; params.input_left_shift = data->input_left_shift; if (kernel_type == kReference || (data->input_multiplier > 0)) { reference_integer_ops::Tanh( data->input_multiplier, data->input_left_shift, GetTensorShape(input), GetTensorData<int16_t>(input), GetTensorShape(output), GetTensorData<int16_t>(output)); } else { optimized_ops::Tanh( params, GetTensorShape(input), GetTensorData<int16_t>(input), GetTensorShape(output), GetTensorData<int16_t>(output)); } return kTfLiteOk; } break; case kTfLiteUInt8: { if (kernel_type == kFixedPointOptimized) { TanhParams params; params.input_zero_point = input->params.zero_point; params.input_range_radius = data->input_range_radius; params.input_multiplier = data->input_multiplier; params.input_left_shift = data->input_left_shift; optimized_ops::Tanh16bitPrecision( params, GetTensorShape(input), GetTensorData<uint8_t>(input), GetTensorShape(output), GetTensorData<uint8_t>(output)); } else { EvalUsingLookupTable(data, input, output); } return kTfLiteOk; } break; case kTfLiteInt8: { if (kernel_type == kFixedPointOptimized) { TanhParams params; params.input_zero_point = input->params.zero_point; params.input_range_radius = data->input_range_radius; params.input_multiplier = data->input_multiplier; params.input_left_shift = data->input_left_shift; optimized_ops::Tanh16bitPrecision( params, GetTensorShape(input), GetTensorData<int8_t>(input), GetTensorShape(output), GetTensorData<int8_t>(output)); } else { EvalUsingLookupTable(data, input, output); } return kTfLiteOk; } break; default: TF_LITE_KERNEL_LOG(context, "Only float32, uint8, int16 and int8 are supported " "currently, got %s.", TfLiteTypeGetName(input->type)); return kTfLiteError; } }
477
True
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Write
The software writes data past the end, or before the beginning, of the intended buffer.
Typically, this can result in corruption of data, a crash, or code execution. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent write operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/787.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::builtin::activations::TanhEval
tflite::ops::builtin::activations::TanhEval( TfLiteContext * context , TfLiteNode * node)
['context', 'node']
TfLiteStatus TanhEval(TfLiteContext* context, TfLiteNode* node) { OpData* data = reinterpret_cast<OpData*>(node->user_data); const TfLiteTensor* input = GetInput(context, node, 0); TfLiteTensor* output = GetOutput(context, node, 0); switch (input->type) { case kTfLiteFloat32: { if (kernel_type == kReference) { reference_ops::Tanh(GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(output), GetTensorData<float>(output)); } else { optimized_ops::Tanh(GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(output), GetTensorData<float>(output)); } return kTfLiteOk; } break; case kTfLiteInt16: { TanhParams params; params.input_left_shift = data->input_left_shift; if (kernel_type == kReference || (data->input_multiplier > 0)) { reference_integer_ops::Tanh( data->input_multiplier, data->input_left_shift, GetTensorShape(input), GetTensorData<int16_t>(input), GetTensorShape(output), GetTensorData<int16_t>(output)); } else { optimized_ops::Tanh( params, GetTensorShape(input), GetTensorData<int16_t>(input), GetTensorShape(output), GetTensorData<int16_t>(output)); } return kTfLiteOk; } break; case kTfLiteUInt8: { if (kernel_type == kFixedPointOptimized) { TanhParams params; params.input_zero_point = input->params.zero_point; params.input_range_radius = data->input_range_radius; params.input_multiplier = data->input_multiplier; params.input_left_shift = data->input_left_shift; optimized_ops::Tanh16bitPrecision( params, GetTensorShape(input), GetTensorData<uint8_t>(input), GetTensorShape(output), GetTensorData<uint8_t>(output)); } else { EvalUsingLookupTable(data, input, output); } return kTfLiteOk; } break; case kTfLiteInt8: { if (kernel_type == kFixedPointOptimized) { TanhParams params; params.input_zero_point = input->params.zero_point; params.input_range_radius = data->input_range_radius; params.input_multiplier = data->input_multiplier; params.input_left_shift = data->input_left_shift; optimized_ops::Tanh16bitPrecision( params, GetTensorShape(input), GetTensorData<int8_t>(input), GetTensorShape(output), GetTensorData<int8_t>(output)); } else { EvalUsingLookupTable(data, input, output); } return kTfLiteOk; } break; default: TF_LITE_KERNEL_LOG(context, "Only float32, uint8, int16 and int8 are supported " "currently, got %s.", TfLiteTypeGetName(input->type)); return kTfLiteError; } }
477
True
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Read
The software reads data past the end, or before the beginning, of the intended buffer.
Typically, this can allow attackers to read sensitive information from other memory locations or cause a crash. A crash can occur when the code reads a variable amount of data and assumes that a sentinel exists to stop the read operation, such as a NUL in a string. The expected sentinel might not be located in the out-of-bounds memory, causing excessive data to be read, leading to a segmentation fault or a buffer overflow. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent read operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/125.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::builtin::activations::TanhPrepare
tflite::ops::builtin::activations::TanhPrepare( TfLiteContext * context , TfLiteNode * node)
['context', 'node']
TfLiteStatus TanhPrepare(TfLiteContext* context, TfLiteNode* node) { OpData* data = reinterpret_cast<OpData*>(node->user_data); TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input = GetInput(context, node, 0); TfLiteTensor* output = GetOutput(context, node, 0); TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); if (kernel_type == kFixedPointOptimized) { if (input->type == kTfLiteUInt8 || input->type == kTfLiteInt8) { static constexpr int kInputIntegerBits = 4; const double input_real_multiplier = input->params.scale * static_cast<double>(1 << (15 - kInputIntegerBits)); const double q = std::frexp(input_real_multiplier, &data->input_left_shift); auto q_fixed = static_cast<int32_t>(TfLiteRound(q * (1ll << 15))); data->input_multiplier = static_cast<int16_t>(q_fixed); int16_t input_range_radius = CalculateInputRadius(kInputIntegerBits, data->input_left_shift, 15); data->input_range_radius = input_range_radius; } } if (kernel_type == kGenericOptimized || kernel_type == kReference) { if (input->type == kTfLiteUInt8) { PopulateLookupTable<uint8_t>( data, input, output, [](float value) { return std::tanh(value); }); } else if (input->type == kTfLiteInt8) { PopulateLookupTable<int8_t>(data, input, output, [](float value) { return std::tanh(value); }); } } if (input->type == kTfLiteInt16) { static constexpr int kInputIntegerBits = 3; static constexpr int kOutputFractionalBits = 15; // These operators are implemented in fixed-point arithmetic, // which intrinsically wants symmetric ranges (zero_point==0) // and power-of-two scales (power-of-two is abbreviated below as POT). // While more general support would be possible by means of rescaling, // that would add some overhead and some loss of accuracy and wouldn't // be used at the moment as current quantized LSTM applications are // happy with symmetric, power-of-two-scales quantization. So we just // implement that narrow case only for now. TF_LITE_ENSURE_EQ(context, input->params.zero_point, 0); TF_LITE_ENSURE_EQ(context, output->params.zero_point, 0); int input_scale_log2_rounded; bool param_scale_pot = CheckedLog2(input->params.scale, &input_scale_log2_rounded); data->input_left_shift = (15 - kInputIntegerBits) + input_scale_log2_rounded; param_scale_pot &= (data->input_left_shift == 0 || data->input_left_shift == 1); if (!param_scale_pot) { // In case of general scale parameter, we need to do a rescaling. // Magic constant 4096: // We need to scale down to (-2^3, 2^3) / 3 is kInputIntegerBits/ interval // from 16-bit (-2^15, 2^15), // so we need to multiply by // 2^(15 - kInputIntegerBits) = 2^12 = 4096. data->input_multiplier = static_cast<int32_t>(input->params.scale * 4096); } int output_scale_log2_rounded; TF_LITE_ENSURE( context, CheckedLog2(output->params.scale, &output_scale_log2_rounded)); TF_LITE_ENSURE_EQ(context, output_scale_log2_rounded, -kOutputFractionalBits); } return context->ResizeTensor(context, output, TfLiteIntArrayCopy(input->dims)); }
475
True
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Write
The software writes data past the end, or before the beginning, of the intended buffer.
Typically, this can result in corruption of data, a crash, or code execution. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent write operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/787.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::builtin::activations::TanhPrepare
tflite::ops::builtin::activations::TanhPrepare( TfLiteContext * context , TfLiteNode * node)
['context', 'node']
TfLiteStatus TanhPrepare(TfLiteContext* context, TfLiteNode* node) { OpData* data = reinterpret_cast<OpData*>(node->user_data); TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input = GetInput(context, node, 0); TfLiteTensor* output = GetOutput(context, node, 0); TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); if (kernel_type == kFixedPointOptimized) { if (input->type == kTfLiteUInt8 || input->type == kTfLiteInt8) { static constexpr int kInputIntegerBits = 4; const double input_real_multiplier = input->params.scale * static_cast<double>(1 << (15 - kInputIntegerBits)); const double q = std::frexp(input_real_multiplier, &data->input_left_shift); auto q_fixed = static_cast<int32_t>(TfLiteRound(q * (1ll << 15))); data->input_multiplier = static_cast<int16_t>(q_fixed); int16_t input_range_radius = CalculateInputRadius(kInputIntegerBits, data->input_left_shift, 15); data->input_range_radius = input_range_radius; } } if (kernel_type == kGenericOptimized || kernel_type == kReference) { if (input->type == kTfLiteUInt8) { PopulateLookupTable<uint8_t>( data, input, output, [](float value) { return std::tanh(value); }); } else if (input->type == kTfLiteInt8) { PopulateLookupTable<int8_t>(data, input, output, [](float value) { return std::tanh(value); }); } } if (input->type == kTfLiteInt16) { static constexpr int kInputIntegerBits = 3; static constexpr int kOutputFractionalBits = 15; // These operators are implemented in fixed-point arithmetic, // which intrinsically wants symmetric ranges (zero_point==0) // and power-of-two scales (power-of-two is abbreviated below as POT). // While more general support would be possible by means of rescaling, // that would add some overhead and some loss of accuracy and wouldn't // be used at the moment as current quantized LSTM applications are // happy with symmetric, power-of-two-scales quantization. So we just // implement that narrow case only for now. TF_LITE_ENSURE_EQ(context, input->params.zero_point, 0); TF_LITE_ENSURE_EQ(context, output->params.zero_point, 0); int input_scale_log2_rounded; bool param_scale_pot = CheckedLog2(input->params.scale, &input_scale_log2_rounded); data->input_left_shift = (15 - kInputIntegerBits) + input_scale_log2_rounded; param_scale_pot &= (data->input_left_shift == 0 || data->input_left_shift == 1); if (!param_scale_pot) { // In case of general scale parameter, we need to do a rescaling. // Magic constant 4096: // We need to scale down to (-2^3, 2^3) / 3 is kInputIntegerBits/ interval // from 16-bit (-2^15, 2^15), // so we need to multiply by // 2^(15 - kInputIntegerBits) = 2^12 = 4096. data->input_multiplier = static_cast<int32_t>(input->params.scale * 4096); } int output_scale_log2_rounded; TF_LITE_ENSURE( context, CheckedLog2(output->params.scale, &output_scale_log2_rounded)); TF_LITE_ENSURE_EQ(context, output_scale_log2_rounded, -kOutputFractionalBits); } return context->ResizeTensor(context, output, TfLiteIntArrayCopy(input->dims)); }
475
True
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Read
The software reads data past the end, or before the beginning, of the intended buffer.
Typically, this can allow attackers to read sensitive information from other memory locations or cause a crash. A crash can occur when the code reads a variable amount of data and assumes that a sentinel exists to stop the read operation, such as a NUL in a string. The expected sentinel might not be located in the out-of-bounds memory, causing excessive data to be read, leading to a segmentation fault or a buffer overflow. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent read operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/125.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::builtin::add::Eval
tflite::ops::builtin::add::Eval( TfLiteContext * context , TfLiteNode * node)
['context', 'node']
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { auto* params = reinterpret_cast<TfLiteAddParams*>(node->builtin_data); OpData* data = reinterpret_cast<OpData*>(node->user_data); const TfLiteTensor* input1 = GetInput(context, node, kInputTensor1); const TfLiteTensor* input2 = GetInput(context, node, kInputTensor2); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); if (output->type == kTfLiteFloat32 || output->type == kTfLiteInt32) { EvalAdd<kernel_type>(context, node, params, data, input1, input2, output); } else if (output->type == kTfLiteUInt8 || output->type == kTfLiteInt8 || output->type == kTfLiteInt16) { TF_LITE_ENSURE_OK(context, EvalAddQuantized<kernel_type>(context, node, params, data, input1, input2, output)); } else { TF_LITE_UNSUPPORTED_TYPE(context, output->type, "Add"); } return kTfLiteOk; }
184
True
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Write
The software writes data past the end, or before the beginning, of the intended buffer.
Typically, this can result in corruption of data, a crash, or code execution. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent write operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/787.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::builtin::add::Eval
tflite::ops::builtin::add::Eval( TfLiteContext * context , TfLiteNode * node)
['context', 'node']
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { auto* params = reinterpret_cast<TfLiteAddParams*>(node->builtin_data); OpData* data = reinterpret_cast<OpData*>(node->user_data); const TfLiteTensor* input1 = GetInput(context, node, kInputTensor1); const TfLiteTensor* input2 = GetInput(context, node, kInputTensor2); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); if (output->type == kTfLiteFloat32 || output->type == kTfLiteInt32) { EvalAdd<kernel_type>(context, node, params, data, input1, input2, output); } else if (output->type == kTfLiteUInt8 || output->type == kTfLiteInt8 || output->type == kTfLiteInt16) { TF_LITE_ENSURE_OK(context, EvalAddQuantized<kernel_type>(context, node, params, data, input1, input2, output)); } else { TF_LITE_UNSUPPORTED_TYPE(context, output->type, "Add"); } return kTfLiteOk; }
184
True
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Read
The software reads data past the end, or before the beginning, of the intended buffer.
Typically, this can allow attackers to read sensitive information from other memory locations or cause a crash. A crash can occur when the code reads a variable amount of data and assumes that a sentinel exists to stop the read operation, such as a NUL in a string. The expected sentinel might not be located in the out-of-bounds memory, causing excessive data to be read, leading to a segmentation fault or a buffer overflow. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent read operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/125.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::builtin::add::Prepare
tflite::ops::builtin::add::Prepare( TfLiteContext * context , TfLiteNode * node)
['context', 'node']
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { auto* params = reinterpret_cast<TfLiteAddParams*>(node->builtin_data); OpData* data = reinterpret_cast<OpData*>(node->user_data); TF_LITE_ENSURE_EQ(context, NumInputs(node), 2); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input1 = GetInput(context, node, kInputTensor1); const TfLiteTensor* input2 = GetInput(context, node, kInputTensor2); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); TF_LITE_ENSURE_TYPES_EQ(context, input1->type, input2->type); output->type = input2->type; const bool requires_broadcast = !HaveSameShapes(input1, input2); TfLiteIntArray* output_size = nullptr; if (requires_broadcast) { TF_LITE_ENSURE_OK(context, CalculateShapeForBroadcast( context, input1, input2, &output_size)); } else { output_size = TfLiteIntArrayCopy(input1->dims); } // 8bit -> 8bit general quantized path, with general rescalings // as well as, int16 -> int16 with general rescalings bool pot_scale_int16 = true; bool input1_scale_is_pot = false; bool input2_scale_is_pot = false; bool output_scale_is_pot = false; int input1_scale_log2_rounded{0}; int input2_scale_log2_rounded{0}; int output_scale_log2_rounded{0}; if (input1->type == kTfLiteInt16 && input2->type == kTfLiteInt16 && output->type == kTfLiteInt16) { // In case of 16-bit, there are two implementation: // the scale parameter is a general number // the scale parameter is POT and // zero_point is zero for inputs/output. pot_scale_int16 = (input1->params.zero_point == 0) && (input2->params.zero_point == 0) && (output->params.zero_point == 0); input1_scale_is_pot = CheckedLog2(input1->params.scale, &input1_scale_log2_rounded); input2_scale_is_pot = CheckedLog2(input2->params.scale, &input2_scale_log2_rounded); output_scale_is_pot = CheckedLog2(output->params.scale, &output_scale_log2_rounded); pot_scale_int16 &= input1_scale_is_pot && input2_scale_is_pot && output_scale_is_pot; } data->pot_scale_int16 = pot_scale_int16; if (output->type == kTfLiteUInt8 || output->type == kTfLiteInt8 || !pot_scale_int16) { // 8bit -> 8bit general quantized path, with general rescalings // as well as, 16bit -> 16bit with general rescalings data->input1_offset = -input1->params.zero_point; data->input2_offset = -input2->params.zero_point; data->output_offset = output->params.zero_point; // The shift is set to 15 for 16-bit and 20 in case of 8-bit, accordingly. // In case of 16-bit we have 65535 << 15 which is less than 1 << 31, // therefore the addition will still fit in a 32 bit accumulator. data->left_shift = !pot_scale_int16 ? 15 : 20; const double twice_max_input_scale = 2 * std::max(input1->params.scale, input2->params.scale); const double real_input1_multiplier = input1->params.scale / twice_max_input_scale; const double real_input2_multiplier = input2->params.scale / twice_max_input_scale; const double real_output_multiplier = twice_max_input_scale / ((1 << data->left_shift) * output->params.scale); QuantizeMultiplierSmallerThanOneExp( real_input1_multiplier, &data->input1_multiplier, &data->input1_shift); QuantizeMultiplierSmallerThanOneExp( real_input2_multiplier, &data->input2_multiplier, &data->input2_shift); QuantizeMultiplierSmallerThanOneExp( real_output_multiplier, &data->output_multiplier, &data->output_shift); TF_LITE_ENSURE_STATUS(CalculateActivationRangeQuantized( context, params->activation, output, &data->output_activation_min, &data->output_activation_max)); } else if (output->type == kTfLiteInt16) { // 16bit -> 16bit special quantized path, supporting only a rather // narrow case of quantization parameters: zero_points must all be 0 // ("symmetric quantization") and scales must be power-of-two (which // we abbreviate as "POT" below). The intended use case for this path // is in LSTM cells, where, due to the constraints of implementing // some of the math in these LSTM cells in fixed-point arithmetic, // we need to have such symmetric, power-of-two quantization // (Fixed-point formats are inherently symmetric, power-of-two). TF_LITE_ENSURE_EQ(context, input1->params.zero_point, 0); TF_LITE_ENSURE_EQ(context, input2->params.zero_point, 0); TF_LITE_ENSURE_EQ(context, output->params.zero_point, 0); TF_LITE_ENSURE(context, input1_scale_is_pot); TF_LITE_ENSURE(context, input2_scale_is_pot); TF_LITE_ENSURE(context, output_scale_is_pot); data->input1_shift = input1_scale_log2_rounded - output_scale_log2_rounded; data->input2_shift = input2_scale_log2_rounded - output_scale_log2_rounded; // Shifting of one input is supported. The graph quantization should ensure // that the other input matches the output. TF_LITE_ENSURE(context, data->input1_shift == 0 || data->input2_shift == 0); TF_LITE_ENSURE(context, data->input1_shift <= 0); TF_LITE_ENSURE(context, data->input2_shift <= 0); TF_LITE_ENSURE_STATUS(CalculateActivationRangeQuantized( context, params->activation, output, &data->output_activation_min, &data->output_activation_max)); } return context->ResizeTensor(context, output, output_size); }
692
True
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Write
The software writes data past the end, or before the beginning, of the intended buffer.
Typically, this can result in corruption of data, a crash, or code execution. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent write operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/787.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::builtin::add::Prepare
tflite::ops::builtin::add::Prepare( TfLiteContext * context , TfLiteNode * node)
['context', 'node']
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { auto* params = reinterpret_cast<TfLiteAddParams*>(node->builtin_data); OpData* data = reinterpret_cast<OpData*>(node->user_data); TF_LITE_ENSURE_EQ(context, NumInputs(node), 2); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input1 = GetInput(context, node, kInputTensor1); const TfLiteTensor* input2 = GetInput(context, node, kInputTensor2); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); TF_LITE_ENSURE_TYPES_EQ(context, input1->type, input2->type); output->type = input2->type; const bool requires_broadcast = !HaveSameShapes(input1, input2); TfLiteIntArray* output_size = nullptr; if (requires_broadcast) { TF_LITE_ENSURE_OK(context, CalculateShapeForBroadcast( context, input1, input2, &output_size)); } else { output_size = TfLiteIntArrayCopy(input1->dims); } // 8bit -> 8bit general quantized path, with general rescalings // as well as, int16 -> int16 with general rescalings bool pot_scale_int16 = true; bool input1_scale_is_pot = false; bool input2_scale_is_pot = false; bool output_scale_is_pot = false; int input1_scale_log2_rounded{0}; int input2_scale_log2_rounded{0}; int output_scale_log2_rounded{0}; if (input1->type == kTfLiteInt16 && input2->type == kTfLiteInt16 && output->type == kTfLiteInt16) { // In case of 16-bit, there are two implementation: // the scale parameter is a general number // the scale parameter is POT and // zero_point is zero for inputs/output. pot_scale_int16 = (input1->params.zero_point == 0) && (input2->params.zero_point == 0) && (output->params.zero_point == 0); input1_scale_is_pot = CheckedLog2(input1->params.scale, &input1_scale_log2_rounded); input2_scale_is_pot = CheckedLog2(input2->params.scale, &input2_scale_log2_rounded); output_scale_is_pot = CheckedLog2(output->params.scale, &output_scale_log2_rounded); pot_scale_int16 &= input1_scale_is_pot && input2_scale_is_pot && output_scale_is_pot; } data->pot_scale_int16 = pot_scale_int16; if (output->type == kTfLiteUInt8 || output->type == kTfLiteInt8 || !pot_scale_int16) { // 8bit -> 8bit general quantized path, with general rescalings // as well as, 16bit -> 16bit with general rescalings data->input1_offset = -input1->params.zero_point; data->input2_offset = -input2->params.zero_point; data->output_offset = output->params.zero_point; // The shift is set to 15 for 16-bit and 20 in case of 8-bit, accordingly. // In case of 16-bit we have 65535 << 15 which is less than 1 << 31, // therefore the addition will still fit in a 32 bit accumulator. data->left_shift = !pot_scale_int16 ? 15 : 20; const double twice_max_input_scale = 2 * std::max(input1->params.scale, input2->params.scale); const double real_input1_multiplier = input1->params.scale / twice_max_input_scale; const double real_input2_multiplier = input2->params.scale / twice_max_input_scale; const double real_output_multiplier = twice_max_input_scale / ((1 << data->left_shift) * output->params.scale); QuantizeMultiplierSmallerThanOneExp( real_input1_multiplier, &data->input1_multiplier, &data->input1_shift); QuantizeMultiplierSmallerThanOneExp( real_input2_multiplier, &data->input2_multiplier, &data->input2_shift); QuantizeMultiplierSmallerThanOneExp( real_output_multiplier, &data->output_multiplier, &data->output_shift); TF_LITE_ENSURE_STATUS(CalculateActivationRangeQuantized( context, params->activation, output, &data->output_activation_min, &data->output_activation_max)); } else if (output->type == kTfLiteInt16) { // 16bit -> 16bit special quantized path, supporting only a rather // narrow case of quantization parameters: zero_points must all be 0 // ("symmetric quantization") and scales must be power-of-two (which // we abbreviate as "POT" below). The intended use case for this path // is in LSTM cells, where, due to the constraints of implementing // some of the math in these LSTM cells in fixed-point arithmetic, // we need to have such symmetric, power-of-two quantization // (Fixed-point formats are inherently symmetric, power-of-two). TF_LITE_ENSURE_EQ(context, input1->params.zero_point, 0); TF_LITE_ENSURE_EQ(context, input2->params.zero_point, 0); TF_LITE_ENSURE_EQ(context, output->params.zero_point, 0); TF_LITE_ENSURE(context, input1_scale_is_pot); TF_LITE_ENSURE(context, input2_scale_is_pot); TF_LITE_ENSURE(context, output_scale_is_pot); data->input1_shift = input1_scale_log2_rounded - output_scale_log2_rounded; data->input2_shift = input2_scale_log2_rounded - output_scale_log2_rounded; // Shifting of one input is supported. The graph quantization should ensure // that the other input matches the output. TF_LITE_ENSURE(context, data->input1_shift == 0 || data->input2_shift == 0); TF_LITE_ENSURE(context, data->input1_shift <= 0); TF_LITE_ENSURE(context, data->input2_shift <= 0); TF_LITE_ENSURE_STATUS(CalculateActivationRangeQuantized( context, params->activation, output, &data->output_activation_min, &data->output_activation_max)); } return context->ResizeTensor(context, output, output_size); }
692
True
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Read
The software reads data past the end, or before the beginning, of the intended buffer.
Typically, this can allow attackers to read sensitive information from other memory locations or cause a crash. A crash can occur when the code reads a variable amount of data and assumes that a sentinel exists to stop the read operation, such as a NUL in a string. The expected sentinel might not be located in the out-of-bounds memory, causing excessive data to be read, leading to a segmentation fault or a buffer overflow. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent read operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/125.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::builtin::add_n::Eval
tflite::ops::builtin::add_n::Eval( TfLiteContext * context , TfLiteNode * node)
['context', 'node']
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* output = GetOutput(context, node, kOutputTensor); if (output->type == kTfLiteFloat32) { EvalAddN<float>(context, node); } else if (output->type == kTfLiteInt32) { EvalAddN<int32_t>(context, node); } else { context->ReportError(context, "AddN only supports FLOAT32|INT32 now, got %s.", TfLiteTypeGetName(output->type)); return kTfLiteError; } return kTfLiteOk; }
92
True
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Write
The software writes data past the end, or before the beginning, of the intended buffer.
Typically, this can result in corruption of data, a crash, or code execution. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent write operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/787.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::builtin::add_n::Eval
tflite::ops::builtin::add_n::Eval( TfLiteContext * context , TfLiteNode * node)
['context', 'node']
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* output = GetOutput(context, node, kOutputTensor); if (output->type == kTfLiteFloat32) { EvalAddN<float>(context, node); } else if (output->type == kTfLiteInt32) { EvalAddN<int32_t>(context, node); } else { context->ReportError(context, "AddN only supports FLOAT32|INT32 now, got %s.", TfLiteTypeGetName(output->type)); return kTfLiteError; } return kTfLiteOk; }
92
True
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Read
The software reads data past the end, or before the beginning, of the intended buffer.
Typically, this can allow attackers to read sensitive information from other memory locations or cause a crash. A crash can occur when the code reads a variable amount of data and assumes that a sentinel exists to stop the read operation, such as a NUL in a string. The expected sentinel might not be located in the out-of-bounds memory, causing excessive data to be read, leading to a segmentation fault or a buffer overflow. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent read operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/125.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::builtin::add_n::Prepare
tflite::ops::builtin::add_n::Prepare( TfLiteContext * context , TfLiteNode * node)
['context', 'node']
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { int num_inputs = NumInputs(node); TF_LITE_ENSURE(context, num_inputs >= 2); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input1 = GetInput(context, node, kInputTensor1); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); output->type = input1->type; // Check that all input tensors have the same shape and type. for (int i = kInputTensor1 + 1; i < num_inputs; ++i) { const TfLiteTensor* input = GetInput(context, node, i); TF_LITE_ENSURE(context, HaveSameShapes(input1, input)); TF_LITE_ENSURE_TYPES_EQ(context, input1->type, input->type); } // Use the first input node's dimension to be the dimension of the output // node. TfLiteIntArray* input1_dims = input1->dims; TfLiteIntArray* output_dims = TfLiteIntArrayCopy(input1_dims); return context->ResizeTensor(context, output, output_dims); }
162
True
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Write
The software writes data past the end, or before the beginning, of the intended buffer.
Typically, this can result in corruption of data, a crash, or code execution. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent write operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/787.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::builtin::add_n::Prepare
tflite::ops::builtin::add_n::Prepare( TfLiteContext * context , TfLiteNode * node)
['context', 'node']
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { int num_inputs = NumInputs(node); TF_LITE_ENSURE(context, num_inputs >= 2); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input1 = GetInput(context, node, kInputTensor1); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); output->type = input1->type; // Check that all input tensors have the same shape and type. for (int i = kInputTensor1 + 1; i < num_inputs; ++i) { const TfLiteTensor* input = GetInput(context, node, i); TF_LITE_ENSURE(context, HaveSameShapes(input1, input)); TF_LITE_ENSURE_TYPES_EQ(context, input1->type, input->type); } // Use the first input node's dimension to be the dimension of the output // node. TfLiteIntArray* input1_dims = input1->dims; TfLiteIntArray* output_dims = TfLiteIntArrayCopy(input1_dims); return context->ResizeTensor(context, output, output_dims); }
162
True
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Read
The software reads data past the end, or before the beginning, of the intended buffer.
Typically, this can allow attackers to read sensitive information from other memory locations or cause a crash. A crash can occur when the code reads a variable amount of data and assumes that a sentinel exists to stop the read operation, such as a NUL in a string. The expected sentinel might not be located in the out-of-bounds memory, causing excessive data to be read, leading to a segmentation fault or a buffer overflow. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent read operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/125.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::builtin::arg_min_max::Eval
tflite::ops::builtin::arg_min_max::Eval( TfLiteContext * context , TfLiteNode * node , bool is_arg_max)
['context', 'node', 'is_arg_max']
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node, bool is_arg_max) { const TfLiteTensor* input = GetInput(context, node, kInputTensor); const TfLiteTensor* axis = GetInput(context, node, kAxis); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); if (IsDynamicTensor(output)) { TF_LITE_ENSURE_STATUS(ResizeOutput(context, input, axis, output)); } #define TF_LITE_ARG_MIN_MAX(data_type, axis_type, output_type) \ optimized_ops::ArgMinMax( \ GetTensorShape(input), GetTensorData<data_type>(input), \ GetTensorData<axis_type>(axis), GetTensorShape(output), \ GetTensorData<output_type>(output), \ GetComparefunction<data_type>(is_arg_max)) if (axis->type == kTfLiteInt32) { switch (output->type) { case kTfLiteInt32: { switch (input->type) { case kTfLiteFloat32: TF_LITE_ARG_MIN_MAX(float, int32_t, int32_t); break; case kTfLiteUInt8: TF_LITE_ARG_MIN_MAX(uint8_t, int32_t, int32_t); break; case kTfLiteInt8: TF_LITE_ARG_MIN_MAX(int8_t, int32_t, int32_t); break; case kTfLiteInt32: TF_LITE_ARG_MIN_MAX(int32_t, int32_t, int32_t); break; default: context->ReportError(context, "Only float32, uint8, int8 and int32 are " "supported currently, got %s.", TfLiteTypeGetName(input->type)); return kTfLiteError; } } break; case kTfLiteInt64: { switch (input->type) { case kTfLiteFloat32: TF_LITE_ARG_MIN_MAX(float, int32_t, int64_t); break; case kTfLiteUInt8: TF_LITE_ARG_MIN_MAX(uint8_t, int32_t, int64_t); break; case kTfLiteInt8: TF_LITE_ARG_MIN_MAX(int8_t, int32_t, int64_t); break; case kTfLiteInt32: TF_LITE_ARG_MIN_MAX(int32_t, int32_t, int64_t); break; default: context->ReportError(context, "Only float32, uint8, int8 and int32 are " "supported currently, got %s.", TfLiteTypeGetName(input->type)); return kTfLiteError; } } break; default: context->ReportError( context, "Only int32 and int64 are supported currently, got %s.", TfLiteTypeGetName(output->type)); return kTfLiteError; } } else { switch (output->type) { case kTfLiteInt32: { switch (input->type) { case kTfLiteFloat32: TF_LITE_ARG_MIN_MAX(float, int64_t, int32_t); break; case kTfLiteUInt8: TF_LITE_ARG_MIN_MAX(uint8_t, int64_t, int32_t); break; case kTfLiteInt8: TF_LITE_ARG_MIN_MAX(int8_t, int64_t, int32_t); break; case kTfLiteInt32: TF_LITE_ARG_MIN_MAX(int32_t, int64_t, int32_t); break; default: context->ReportError(context, "Only float32, uint8, int8 and int32 are " "supported currently, got %s.", TfLiteTypeGetName(input->type)); return kTfLiteError; } } break; case kTfLiteInt64: { switch (input->type) { case kTfLiteFloat32: TF_LITE_ARG_MIN_MAX(float, int64_t, int64_t); break; case kTfLiteUInt8: TF_LITE_ARG_MIN_MAX(uint8_t, int64_t, int64_t); break; case kTfLiteInt8: TF_LITE_ARG_MIN_MAX(int8_t, int64_t, int64_t); break; case kTfLiteInt32: TF_LITE_ARG_MIN_MAX(int32_t, int64_t, int64_t); break; default: context->ReportError(context, "Only float32, uint8, int8 and int32 are " "supported currently, got %s.", TfLiteTypeGetName(input->type)); return kTfLiteError; } } break; default: context->ReportError( context, "Only int32 and int64 are supported currently, got %s.", TfLiteTypeGetName(output->type)); return kTfLiteError; } } #undef TF_LITE_ARG_MIN_MAX return kTfLiteOk; }
525
True
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Write
The software writes data past the end, or before the beginning, of the intended buffer.
Typically, this can result in corruption of data, a crash, or code execution. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent write operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/787.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::builtin::arg_min_max::Eval
tflite::ops::builtin::arg_min_max::Eval( TfLiteContext * context , TfLiteNode * node , bool is_arg_max)
['context', 'node', 'is_arg_max']
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node, bool is_arg_max) { const TfLiteTensor* input = GetInput(context, node, kInputTensor); const TfLiteTensor* axis = GetInput(context, node, kAxis); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); if (IsDynamicTensor(output)) { TF_LITE_ENSURE_STATUS(ResizeOutput(context, input, axis, output)); } #define TF_LITE_ARG_MIN_MAX(data_type, axis_type, output_type) \ optimized_ops::ArgMinMax( \ GetTensorShape(input), GetTensorData<data_type>(input), \ GetTensorData<axis_type>(axis), GetTensorShape(output), \ GetTensorData<output_type>(output), \ GetComparefunction<data_type>(is_arg_max)) if (axis->type == kTfLiteInt32) { switch (output->type) { case kTfLiteInt32: { switch (input->type) { case kTfLiteFloat32: TF_LITE_ARG_MIN_MAX(float, int32_t, int32_t); break; case kTfLiteUInt8: TF_LITE_ARG_MIN_MAX(uint8_t, int32_t, int32_t); break; case kTfLiteInt8: TF_LITE_ARG_MIN_MAX(int8_t, int32_t, int32_t); break; case kTfLiteInt32: TF_LITE_ARG_MIN_MAX(int32_t, int32_t, int32_t); break; default: context->ReportError(context, "Only float32, uint8, int8 and int32 are " "supported currently, got %s.", TfLiteTypeGetName(input->type)); return kTfLiteError; } } break; case kTfLiteInt64: { switch (input->type) { case kTfLiteFloat32: TF_LITE_ARG_MIN_MAX(float, int32_t, int64_t); break; case kTfLiteUInt8: TF_LITE_ARG_MIN_MAX(uint8_t, int32_t, int64_t); break; case kTfLiteInt8: TF_LITE_ARG_MIN_MAX(int8_t, int32_t, int64_t); break; case kTfLiteInt32: TF_LITE_ARG_MIN_MAX(int32_t, int32_t, int64_t); break; default: context->ReportError(context, "Only float32, uint8, int8 and int32 are " "supported currently, got %s.", TfLiteTypeGetName(input->type)); return kTfLiteError; } } break; default: context->ReportError( context, "Only int32 and int64 are supported currently, got %s.", TfLiteTypeGetName(output->type)); return kTfLiteError; } } else { switch (output->type) { case kTfLiteInt32: { switch (input->type) { case kTfLiteFloat32: TF_LITE_ARG_MIN_MAX(float, int64_t, int32_t); break; case kTfLiteUInt8: TF_LITE_ARG_MIN_MAX(uint8_t, int64_t, int32_t); break; case kTfLiteInt8: TF_LITE_ARG_MIN_MAX(int8_t, int64_t, int32_t); break; case kTfLiteInt32: TF_LITE_ARG_MIN_MAX(int32_t, int64_t, int32_t); break; default: context->ReportError(context, "Only float32, uint8, int8 and int32 are " "supported currently, got %s.", TfLiteTypeGetName(input->type)); return kTfLiteError; } } break; case kTfLiteInt64: { switch (input->type) { case kTfLiteFloat32: TF_LITE_ARG_MIN_MAX(float, int64_t, int64_t); break; case kTfLiteUInt8: TF_LITE_ARG_MIN_MAX(uint8_t, int64_t, int64_t); break; case kTfLiteInt8: TF_LITE_ARG_MIN_MAX(int8_t, int64_t, int64_t); break; case kTfLiteInt32: TF_LITE_ARG_MIN_MAX(int32_t, int64_t, int64_t); break; default: context->ReportError(context, "Only float32, uint8, int8 and int32 are " "supported currently, got %s.", TfLiteTypeGetName(input->type)); return kTfLiteError; } } break; default: context->ReportError( context, "Only int32 and int64 are supported currently, got %s.", TfLiteTypeGetName(output->type)); return kTfLiteError; } } #undef TF_LITE_ARG_MIN_MAX return kTfLiteOk; }
525
True
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Read
The software reads data past the end, or before the beginning, of the intended buffer.
Typically, this can allow attackers to read sensitive information from other memory locations or cause a crash. A crash can occur when the code reads a variable amount of data and assumes that a sentinel exists to stop the read operation, such as a NUL in a string. The expected sentinel might not be located in the out-of-bounds memory, causing excessive data to be read, leading to a segmentation fault or a buffer overflow. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent read operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/125.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::builtin::arg_min_max::Prepare
tflite::ops::builtin::arg_min_max::Prepare( TfLiteContext * context , TfLiteNode * node)
['context', 'node']
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 2); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input = GetInput(context, node, kInputTensor); const TfLiteTensor* axis = GetInput(context, node, kAxis); // Make sure the axis is only 1 dimension. TF_LITE_ENSURE_EQ(context, NumElements(axis), 1); // Make sure the axis is only either int32 or int64. TF_LITE_ENSURE(context, axis->type == kTfLiteInt32 || axis->type == kTfLiteInt64); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); auto* params = reinterpret_cast<TfLiteArgMaxParams*>(node->builtin_data); switch (params->output_type) { case kTfLiteInt32: output->type = kTfLiteInt32; break; case kTfLiteInt64: output->type = kTfLiteInt64; break; default: context->ReportError(context, "Unknown index output data type: %d", params->output_type); return kTfLiteError; } // Check conditions for different types. switch (input->type) { case kTfLiteFloat32: case kTfLiteUInt8: case kTfLiteInt8: case kTfLiteInt32: break; default: context->ReportError( context, "Unknown input type: %d, only float32 and int types are supported", input->type); return kTfLiteError; } TF_LITE_ENSURE(context, NumDimensions(input) >= 1); if (IsConstantTensor(axis)) { TF_LITE_ENSURE_STATUS(ResizeOutput(context, input, axis, output)); } else { SetTensorToDynamic(output); } return kTfLiteOk; }
255
True
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Write
The software writes data past the end, or before the beginning, of the intended buffer.
Typically, this can result in corruption of data, a crash, or code execution. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent write operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/787.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::builtin::arg_min_max::Prepare
tflite::ops::builtin::arg_min_max::Prepare( TfLiteContext * context , TfLiteNode * node)
['context', 'node']
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 2); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input = GetInput(context, node, kInputTensor); const TfLiteTensor* axis = GetInput(context, node, kAxis); // Make sure the axis is only 1 dimension. TF_LITE_ENSURE_EQ(context, NumElements(axis), 1); // Make sure the axis is only either int32 or int64. TF_LITE_ENSURE(context, axis->type == kTfLiteInt32 || axis->type == kTfLiteInt64); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); auto* params = reinterpret_cast<TfLiteArgMaxParams*>(node->builtin_data); switch (params->output_type) { case kTfLiteInt32: output->type = kTfLiteInt32; break; case kTfLiteInt64: output->type = kTfLiteInt64; break; default: context->ReportError(context, "Unknown index output data type: %d", params->output_type); return kTfLiteError; } // Check conditions for different types. switch (input->type) { case kTfLiteFloat32: case kTfLiteUInt8: case kTfLiteInt8: case kTfLiteInt32: break; default: context->ReportError( context, "Unknown input type: %d, only float32 and int types are supported", input->type); return kTfLiteError; } TF_LITE_ENSURE(context, NumDimensions(input) >= 1); if (IsConstantTensor(axis)) { TF_LITE_ENSURE_STATUS(ResizeOutput(context, input, axis, output)); } else { SetTensorToDynamic(output); } return kTfLiteOk; }
255
True
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Read
The software reads data past the end, or before the beginning, of the intended buffer.
Typically, this can allow attackers to read sensitive information from other memory locations or cause a crash. A crash can occur when the code reads a variable amount of data and assumes that a sentinel exists to stop the read operation, such as a NUL in a string. The expected sentinel might not be located in the out-of-bounds memory, causing excessive data to be read, leading to a segmentation fault or a buffer overflow. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent read operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/125.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::custom::assign_variable::Eval
tflite::ops::custom::assign_variable::Eval( TfLiteContext * context , TfLiteNode * node)
['context', 'node']
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { Subgraph* subgraph = reinterpret_cast<Subgraph*>(context->impl_); const TfLiteTensor* input_resource_id_tensor = GetInput(context, node, kInputVariableId); const TfLiteTensor* input_value_tensor = GetInput(context, node, kInputValue); int resource_id = input_resource_id_tensor->data.i32[0]; auto& resources = subgraph->resources(); resource::CreateResourceVariableIfNotAvailable(&resources, resource_id); auto* variable = resource::GetResourceVariable(&resources, resource_id); TF_LITE_ENSURE(context, variable != nullptr); variable->AssignFrom(input_value_tensor); return kTfLiteOk; }
120
True
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Write
The software writes data past the end, or before the beginning, of the intended buffer.
Typically, this can result in corruption of data, a crash, or code execution. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent write operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/787.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::custom::assign_variable::Eval
tflite::ops::custom::assign_variable::Eval( TfLiteContext * context , TfLiteNode * node)
['context', 'node']
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { Subgraph* subgraph = reinterpret_cast<Subgraph*>(context->impl_); const TfLiteTensor* input_resource_id_tensor = GetInput(context, node, kInputVariableId); const TfLiteTensor* input_value_tensor = GetInput(context, node, kInputValue); int resource_id = input_resource_id_tensor->data.i32[0]; auto& resources = subgraph->resources(); resource::CreateResourceVariableIfNotAvailable(&resources, resource_id); auto* variable = resource::GetResourceVariable(&resources, resource_id); TF_LITE_ENSURE(context, variable != nullptr); variable->AssignFrom(input_value_tensor); return kTfLiteOk; }
120
True
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Read
The software reads data past the end, or before the beginning, of the intended buffer.
Typically, this can allow attackers to read sensitive information from other memory locations or cause a crash. A crash can occur when the code reads a variable amount of data and assumes that a sentinel exists to stop the read operation, such as a NUL in a string. The expected sentinel might not be located in the out-of-bounds memory, causing excessive data to be read, leading to a segmentation fault or a buffer overflow. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent read operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/125.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::custom::assign_variable::Prepare
tflite::ops::custom::assign_variable::Prepare( TfLiteContext * context , TfLiteNode * node)
['context', 'node']
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 2); // TODO(b/137042749): TFLite infrastructure (converter, delegate) doesn't // fully support 0-output ops yet. Currently it works if we manually crfat // a TFLite graph that contains variable ops. Note: // * The TFLite Converter need to be changed to be able to produce an op // with 0 output. // * The delegation code need to be changed to handle 0 output ops. However // everything still works fine when variable ops aren't used. TF_LITE_ENSURE_EQ(context, NumOutputs(node), 0); const TfLiteTensor* input_resource_id_tensor = GetInput(context, node, kInputVariableId); TF_LITE_ENSURE_EQ(context, input_resource_id_tensor->type, kTfLiteInt32); TF_LITE_ENSURE_EQ(context, NumElements(input_resource_id_tensor), 1); return kTfLiteOk; }
76
True
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Write
The software writes data past the end, or before the beginning, of the intended buffer.
Typically, this can result in corruption of data, a crash, or code execution. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent write operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/787.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::custom::assign_variable::Prepare
tflite::ops::custom::assign_variable::Prepare( TfLiteContext * context , TfLiteNode * node)
['context', 'node']
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 2); // TODO(b/137042749): TFLite infrastructure (converter, delegate) doesn't // fully support 0-output ops yet. Currently it works if we manually crfat // a TFLite graph that contains variable ops. Note: // * The TFLite Converter need to be changed to be able to produce an op // with 0 output. // * The delegation code need to be changed to handle 0 output ops. However // everything still works fine when variable ops aren't used. TF_LITE_ENSURE_EQ(context, NumOutputs(node), 0); const TfLiteTensor* input_resource_id_tensor = GetInput(context, node, kInputVariableId); TF_LITE_ENSURE_EQ(context, input_resource_id_tensor->type, kTfLiteInt32); TF_LITE_ENSURE_EQ(context, NumElements(input_resource_id_tensor), 1); return kTfLiteOk; }
76
True
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Read
The software reads data past the end, or before the beginning, of the intended buffer.
Typically, this can allow attackers to read sensitive information from other memory locations or cause a crash. A crash can occur when the code reads a variable amount of data and assumes that a sentinel exists to stop the read operation, such as a NUL in a string. The expected sentinel might not be located in the out-of-bounds memory, causing excessive data to be read, leading to a segmentation fault or a buffer overflow. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent read operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/125.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::custom::audio_spectrogram::Eval
tflite::ops::custom::audio_spectrogram::Eval( TfLiteContext * context , TfLiteNode * node)
['context', 'node']
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { auto* params = reinterpret_cast<TfLiteAudioSpectrogramParams*>(node->user_data); const TfLiteTensor* input = GetInput(context, node, kInputTensor); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); TF_LITE_ENSURE(context, params->spectrogram->Initialize(params->window_size, params->stride)); const float* input_data = GetTensorData<float>(input); const int64_t sample_count = input->dims->data[0]; const int64_t channel_count = input->dims->data[1]; const int64_t output_width = params->spectrogram->output_frequency_channels(); float* output_flat = GetTensorData<float>(output); std::vector<float> input_for_channel(sample_count); for (int64_t channel = 0; channel < channel_count; ++channel) { float* output_slice = output_flat + (channel * params->output_height * output_width); for (int i = 0; i < sample_count; ++i) { input_for_channel[i] = input_data[i * channel_count + channel]; } std::vector<std::vector<float>> spectrogram_output; TF_LITE_ENSURE(context, params->spectrogram->ComputeSquaredMagnitudeSpectrogram( input_for_channel, &spectrogram_output)); TF_LITE_ENSURE_EQ(context, spectrogram_output.size(), params->output_height); TF_LITE_ENSURE(context, spectrogram_output.empty() || (spectrogram_output[0].size() == output_width)); for (int row_index = 0; row_index < params->output_height; ++row_index) { const std::vector<float>& spectrogram_row = spectrogram_output[row_index]; TF_LITE_ENSURE_EQ(context, spectrogram_row.size(), output_width); float* output_row = output_slice + (row_index * output_width); if (params->magnitude_squared) { for (int i = 0; i < output_width; ++i) { output_row[i] = spectrogram_row[i]; } } else { for (int i = 0; i < output_width; ++i) { output_row[i] = sqrtf(spectrogram_row[i]); } } } } return kTfLiteOk; }
406
True
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Write
The software writes data past the end, or before the beginning, of the intended buffer.
Typically, this can result in corruption of data, a crash, or code execution. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent write operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/787.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::custom::audio_spectrogram::Eval
tflite::ops::custom::audio_spectrogram::Eval( TfLiteContext * context , TfLiteNode * node)
['context', 'node']
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { auto* params = reinterpret_cast<TfLiteAudioSpectrogramParams*>(node->user_data); const TfLiteTensor* input = GetInput(context, node, kInputTensor); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); TF_LITE_ENSURE(context, params->spectrogram->Initialize(params->window_size, params->stride)); const float* input_data = GetTensorData<float>(input); const int64_t sample_count = input->dims->data[0]; const int64_t channel_count = input->dims->data[1]; const int64_t output_width = params->spectrogram->output_frequency_channels(); float* output_flat = GetTensorData<float>(output); std::vector<float> input_for_channel(sample_count); for (int64_t channel = 0; channel < channel_count; ++channel) { float* output_slice = output_flat + (channel * params->output_height * output_width); for (int i = 0; i < sample_count; ++i) { input_for_channel[i] = input_data[i * channel_count + channel]; } std::vector<std::vector<float>> spectrogram_output; TF_LITE_ENSURE(context, params->spectrogram->ComputeSquaredMagnitudeSpectrogram( input_for_channel, &spectrogram_output)); TF_LITE_ENSURE_EQ(context, spectrogram_output.size(), params->output_height); TF_LITE_ENSURE(context, spectrogram_output.empty() || (spectrogram_output[0].size() == output_width)); for (int row_index = 0; row_index < params->output_height; ++row_index) { const std::vector<float>& spectrogram_row = spectrogram_output[row_index]; TF_LITE_ENSURE_EQ(context, spectrogram_row.size(), output_width); float* output_row = output_slice + (row_index * output_width); if (params->magnitude_squared) { for (int i = 0; i < output_width; ++i) { output_row[i] = spectrogram_row[i]; } } else { for (int i = 0; i < output_width; ++i) { output_row[i] = sqrtf(spectrogram_row[i]); } } } } return kTfLiteOk; }
406
True
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Read
The software reads data past the end, or before the beginning, of the intended buffer.
Typically, this can allow attackers to read sensitive information from other memory locations or cause a crash. A crash can occur when the code reads a variable amount of data and assumes that a sentinel exists to stop the read operation, such as a NUL in a string. The expected sentinel might not be located in the out-of-bounds memory, causing excessive data to be read, leading to a segmentation fault or a buffer overflow. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent read operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/125.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::custom::audio_spectrogram::Prepare
tflite::ops::custom::audio_spectrogram::Prepare( TfLiteContext * context , TfLiteNode * node)
['context', 'node']
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { auto* params = reinterpret_cast<TfLiteAudioSpectrogramParams*>(node->user_data); TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input = GetInput(context, node, kInputTensor); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); TF_LITE_ENSURE_EQ(context, NumDimensions(input), 2); TF_LITE_ENSURE_TYPES_EQ(context, output->type, kTfLiteFloat32); TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); TF_LITE_ENSURE(context, params->spectrogram->Initialize(params->window_size, params->stride)); const int64_t sample_count = input->dims->data[0]; const int64_t length_minus_window = (sample_count - params->window_size); if (length_minus_window < 0) { params->output_height = 0; } else { params->output_height = 1 + (length_minus_window / params->stride); } TfLiteIntArray* output_size = TfLiteIntArrayCreate(3); output_size->data[0] = input->dims->data[1]; output_size->data[1] = params->output_height; output_size->data[2] = params->spectrogram->output_frequency_channels(); return context->ResizeTensor(context, output, output_size); }
253
True
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Write
The software writes data past the end, or before the beginning, of the intended buffer.
Typically, this can result in corruption of data, a crash, or code execution. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent write operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/787.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::custom::audio_spectrogram::Prepare
tflite::ops::custom::audio_spectrogram::Prepare( TfLiteContext * context , TfLiteNode * node)
['context', 'node']
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { auto* params = reinterpret_cast<TfLiteAudioSpectrogramParams*>(node->user_data); TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input = GetInput(context, node, kInputTensor); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); TF_LITE_ENSURE_EQ(context, NumDimensions(input), 2); TF_LITE_ENSURE_TYPES_EQ(context, output->type, kTfLiteFloat32); TF_LITE_ENSURE_TYPES_EQ(context, input->type, output->type); TF_LITE_ENSURE(context, params->spectrogram->Initialize(params->window_size, params->stride)); const int64_t sample_count = input->dims->data[0]; const int64_t length_minus_window = (sample_count - params->window_size); if (length_minus_window < 0) { params->output_height = 0; } else { params->output_height = 1 + (length_minus_window / params->stride); } TfLiteIntArray* output_size = TfLiteIntArrayCreate(3); output_size->data[0] = input->dims->data[1]; output_size->data[1] = params->output_height; output_size->data[2] = params->spectrogram->output_frequency_channels(); return context->ResizeTensor(context, output, output_size); }
253
True
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Read
The software reads data past the end, or before the beginning, of the intended buffer.
Typically, this can allow attackers to read sensitive information from other memory locations or cause a crash. A crash can occur when the code reads a variable amount of data and assumes that a sentinel exists to stop the read operation, such as a NUL in a string. The expected sentinel might not be located in the out-of-bounds memory, causing excessive data to be read, leading to a segmentation fault or a buffer overflow. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent read operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/125.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::builtin::batch_matmul::Eval
tflite::ops::builtin::batch_matmul::Eval( TfLiteContext * context , TfLiteNode * node)
['context', 'node']
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { OpContext op_context(context, node); OpData* op_data = reinterpret_cast<OpData*>(node->user_data); const TfLiteTensor* lhs = GetInput(context, node, kInputLHSTensor); const TfLiteTensor* rhs = GetInput(context, node, kInputRHSTensor); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); RuntimeShape orig_lhs_shape = GetTensorShape(lhs); RuntimeShape orig_rhs_shape = GetTensorShape(rhs); bool adj_y = op_context.params->adj_y; bool adj_x = op_context.params->adj_x; const TfLiteTensor* rhs_tensor = adj_y ? rhs : GetTempRhs(context, node, rhs); const TfLiteTensor* lhs_tensor = adj_x ? GetTempLhs(context, node, lhs) : lhs; if (!adj_y) { // TODO(b/154760341) Constant tensors should already be transposed, but // we transpose once if necessary for now. if (!(IsConstantTensor(rhs) && op_data->rhs_transposed)) { TransposeRowsColumns(context, rhs, GetTemporary(context, node, 1)); op_data->rhs_transposed = true; } } if (adj_x) { TransposeRowsColumns(context, lhs, GetTemporary(context, node, 0)); } RuntimeShape rhs_shape = adj_y ? orig_rhs_shape : SwapRowColumnDims(orig_rhs_shape); RuntimeShape lhs_shape = adj_x ? orig_lhs_shape : SwapRowColumnDims(orig_lhs_shape); switch (rhs->type) { case kTfLiteFloat32: // Note we pass RHS args first, LHS args second. See note above. if (kernel_type == kGenericOptimized) { optimized_ops::BatchMatMul(rhs_shape, GetTensorData<float>(rhs_tensor), lhs_shape, GetTensorData<float>(lhs_tensor), GetTensorShape(output), GetTensorData<float>(output), CpuBackendContext::GetFromContext(context)); } else { reference_ops::BatchMatMul(rhs_shape, GetTensorData<float>(rhs_tensor), lhs_shape, GetTensorData<float>(lhs_tensor), GetTensorShape(output), GetTensorData<float>(output)); } break; case kTfLiteInt8: EvalQuantized<kernel_type>(context, node, op_data, lhs_shape, lhs_tensor, rhs_shape, rhs_tensor, output); break; default: TF_LITE_KERNEL_LOG(context, "Currently BatchMatMul doesn't support type: %s", TfLiteTypeGetName(lhs->type)); return kTfLiteError; } return kTfLiteOk; }
393
True
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Write
The software writes data past the end, or before the beginning, of the intended buffer.
Typically, this can result in corruption of data, a crash, or code execution. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent write operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/787.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::builtin::batch_matmul::Eval
tflite::ops::builtin::batch_matmul::Eval( TfLiteContext * context , TfLiteNode * node)
['context', 'node']
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { OpContext op_context(context, node); OpData* op_data = reinterpret_cast<OpData*>(node->user_data); const TfLiteTensor* lhs = GetInput(context, node, kInputLHSTensor); const TfLiteTensor* rhs = GetInput(context, node, kInputRHSTensor); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); RuntimeShape orig_lhs_shape = GetTensorShape(lhs); RuntimeShape orig_rhs_shape = GetTensorShape(rhs); bool adj_y = op_context.params->adj_y; bool adj_x = op_context.params->adj_x; const TfLiteTensor* rhs_tensor = adj_y ? rhs : GetTempRhs(context, node, rhs); const TfLiteTensor* lhs_tensor = adj_x ? GetTempLhs(context, node, lhs) : lhs; if (!adj_y) { // TODO(b/154760341) Constant tensors should already be transposed, but // we transpose once if necessary for now. if (!(IsConstantTensor(rhs) && op_data->rhs_transposed)) { TransposeRowsColumns(context, rhs, GetTemporary(context, node, 1)); op_data->rhs_transposed = true; } } if (adj_x) { TransposeRowsColumns(context, lhs, GetTemporary(context, node, 0)); } RuntimeShape rhs_shape = adj_y ? orig_rhs_shape : SwapRowColumnDims(orig_rhs_shape); RuntimeShape lhs_shape = adj_x ? orig_lhs_shape : SwapRowColumnDims(orig_lhs_shape); switch (rhs->type) { case kTfLiteFloat32: // Note we pass RHS args first, LHS args second. See note above. if (kernel_type == kGenericOptimized) { optimized_ops::BatchMatMul(rhs_shape, GetTensorData<float>(rhs_tensor), lhs_shape, GetTensorData<float>(lhs_tensor), GetTensorShape(output), GetTensorData<float>(output), CpuBackendContext::GetFromContext(context)); } else { reference_ops::BatchMatMul(rhs_shape, GetTensorData<float>(rhs_tensor), lhs_shape, GetTensorData<float>(lhs_tensor), GetTensorShape(output), GetTensorData<float>(output)); } break; case kTfLiteInt8: EvalQuantized<kernel_type>(context, node, op_data, lhs_shape, lhs_tensor, rhs_shape, rhs_tensor, output); break; default: TF_LITE_KERNEL_LOG(context, "Currently BatchMatMul doesn't support type: %s", TfLiteTypeGetName(lhs->type)); return kTfLiteError; } return kTfLiteOk; }
393
True
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Read
The software reads data past the end, or before the beginning, of the intended buffer.
Typically, this can allow attackers to read sensitive information from other memory locations or cause a crash. A crash can occur when the code reads a variable amount of data and assumes that a sentinel exists to stop the read operation, such as a NUL in a string. The expected sentinel might not be located in the out-of-bounds memory, causing excessive data to be read, leading to a segmentation fault or a buffer overflow. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent read operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/125.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::builtin::batch_matmul::EvalQuantized
tflite::ops::builtin::batch_matmul::EvalQuantized( TfLiteContext * context , TfLiteNode * node , OpData * data , const RuntimeShape & lhs_shape , const TfLiteTensor * lhs , const RuntimeShape & rhs_shape , const TfLiteTensor * rhs , TfLiteTensor * output)
['context', 'node', 'data', 'lhs_shape', 'lhs', 'rhs_shape', 'rhs', 'output']
TfLiteStatus EvalQuantized(TfLiteContext* context, TfLiteNode* node, OpData* data, const RuntimeShape& lhs_shape, const TfLiteTensor* lhs, const RuntimeShape& rhs_shape, const TfLiteTensor* rhs, TfLiteTensor* output) { if (lhs->type == kTfLiteFloat32) { TfLiteTensor* input_quantized = GetTemporary(context, node, /*index=*/2); TfLiteTensor* scaling_factors = GetTemporary(context, node, /*index=*/3); TfLiteTensor* accum_scratch = GetTemporary(context, node, /*index=*/4); TfLiteTensor* input_offsets = GetTemporary(context, node, /*index=*/5); TfLiteTensor* row_sums = GetTemporary(context, node, /*index=*/6); return EvalHybrid<kernel_type>( context, node, data, lhs_shape, lhs, rhs_shape, rhs, input_quantized, scaling_factors, accum_scratch, row_sums, input_offsets, output); } else if (lhs->type == kTfLiteInt8) { return EvalInt8<kernel_type>(context, data, lhs_shape, lhs, rhs_shape, rhs, GetTensorShape(output), output); } else { TF_LITE_KERNEL_LOG( context, "Currently only hybrid and int8 quantization is supported.\n"); return kTfLiteError; } return kTfLiteOk; }
201
True
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Write
The software writes data past the end, or before the beginning, of the intended buffer.
Typically, this can result in corruption of data, a crash, or code execution. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent write operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/787.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::builtin::batch_matmul::EvalQuantized
tflite::ops::builtin::batch_matmul::EvalQuantized( TfLiteContext * context , TfLiteNode * node , OpData * data , const RuntimeShape & lhs_shape , const TfLiteTensor * lhs , const RuntimeShape & rhs_shape , const TfLiteTensor * rhs , TfLiteTensor * output)
['context', 'node', 'data', 'lhs_shape', 'lhs', 'rhs_shape', 'rhs', 'output']
TfLiteStatus EvalQuantized(TfLiteContext* context, TfLiteNode* node, OpData* data, const RuntimeShape& lhs_shape, const TfLiteTensor* lhs, const RuntimeShape& rhs_shape, const TfLiteTensor* rhs, TfLiteTensor* output) { if (lhs->type == kTfLiteFloat32) { TfLiteTensor* input_quantized = GetTemporary(context, node, /*index=*/2); TfLiteTensor* scaling_factors = GetTemporary(context, node, /*index=*/3); TfLiteTensor* accum_scratch = GetTemporary(context, node, /*index=*/4); TfLiteTensor* input_offsets = GetTemporary(context, node, /*index=*/5); TfLiteTensor* row_sums = GetTemporary(context, node, /*index=*/6); return EvalHybrid<kernel_type>( context, node, data, lhs_shape, lhs, rhs_shape, rhs, input_quantized, scaling_factors, accum_scratch, row_sums, input_offsets, output); } else if (lhs->type == kTfLiteInt8) { return EvalInt8<kernel_type>(context, data, lhs_shape, lhs, rhs_shape, rhs, GetTensorShape(output), output); } else { TF_LITE_KERNEL_LOG( context, "Currently only hybrid and int8 quantization is supported.\n"); return kTfLiteError; } return kTfLiteOk; }
201
True
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Read
The software reads data past the end, or before the beginning, of the intended buffer.
Typically, this can allow attackers to read sensitive information from other memory locations or cause a crash. A crash can occur when the code reads a variable amount of data and assumes that a sentinel exists to stop the read operation, such as a NUL in a string. The expected sentinel might not be located in the out-of-bounds memory, causing excessive data to be read, leading to a segmentation fault or a buffer overflow. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent read operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/125.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::builtin::batch_matmul::InitializeTemporaries
tflite::ops::builtin::batch_matmul::InitializeTemporaries( TfLiteContext * context , TfLiteNode * node , OpContext * op_context)
['context', 'node', 'op_context']
TfLiteStatus InitializeTemporaries(TfLiteContext* context, TfLiteNode* node, OpContext* op_context) { // Create temporary tensors to hold transposed LHS/RHS. OpData* op_data = reinterpret_cast<OpData*>(node->user_data); const TfLiteTensor* lhs = op_context->lhs; const TfLiteTensor* rhs = op_context->rhs; TfLiteIntArrayFree(node->temporaries); // For "hybrid" quantization, we impose the constraint that the LHS // is float (typically an activation from a prior layer) and the RHS // is quantized int8. bool is_hybrid = (op_context->lhs->type == kTfLiteFloat32 && rhs->type == kTfLiteInt8); if (is_hybrid) { node->temporaries = TfLiteIntArrayCreate(kNumTempTensorsForAdjoints + kNumTempTensorsForHybrid); } else { node->temporaries = TfLiteIntArrayCreate(kNumTempTensorsForAdjoints); } const int lhs_rank = NumDimensions(lhs); const int rhs_rank = NumDimensions(rhs); const int batch_size = op_context->params->adj_x ? lhs->dims->data[lhs_rank - 2] : lhs->dims->data[lhs_rank - 1]; const int num_units = op_context->params->adj_x ? lhs->dims->data[lhs_rank - 1] : lhs->dims->data[lhs_rank - 2]; // Temp tensor for Transposed LHS; { node->temporaries->data[0] = op_data->scratch_tensor_index; TfLiteTensor* scratch_buffer = GetTemporary(context, node, /*index=*/0); TfLiteIntArray* scratch_buffer_size = TfLiteIntArrayCreate(lhs_rank); for (int i = 0; i < lhs_rank - 2; ++i) { scratch_buffer_size->data[i] = lhs->dims->data[i]; } // Swap last two dimensions. scratch_buffer_size->data[lhs_rank - 2] = lhs->dims->data[lhs_rank - 1]; scratch_buffer_size->data[lhs_rank - 1] = lhs->dims->data[lhs_rank - 2]; scratch_buffer->type = op_context->lhs->type; scratch_buffer->allocation_type = kTfLiteArenaRw; TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, scratch_buffer, scratch_buffer_size)); } // We need a temp buffer for the RHS if we need to transpose the RHS. We // transpose by default, so that the two inputs (LHS and RHS) are in a proper // layout for our fast matrix multiplication routines. If the transpose flag // is set by the caller, the data is already in the desired layout. { node->temporaries->data[1] = op_data->scratch_tensor_index + 1; TfLiteTensor* scratch_buffer = GetTemporary(context, node, /*index=*/1); const TfLiteTensor* rhs = op_context->rhs; int rhs_rank = NumDimensions(rhs); TfLiteIntArray* scratch_buffer_size = TfLiteIntArrayCreate(rhs_rank); for (int i = 0; i < rhs_rank - 2; ++i) { scratch_buffer_size->data[i] = rhs->dims->data[i]; } // Swap last two dimensions. scratch_buffer_size->data[rhs_rank - 2] = rhs->dims->data[rhs_rank - 1]; scratch_buffer_size->data[rhs_rank - 1] = rhs->dims->data[rhs_rank - 2]; if (IsConstantTensor(op_context->rhs)) { scratch_buffer->allocation_type = kTfLiteArenaRwPersistent; } else { scratch_buffer->allocation_type = kTfLiteArenaRw; } scratch_buffer->type = op_context->rhs->type; scratch_buffer->allocation_type = kTfLiteArenaRw; TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, scratch_buffer, scratch_buffer_size)); } // If we have to perform on-the-fly quantization (with quantized weights and // float inputs) first we need to quantize the inputs. Allocate temporary // buffer to store the intermediate quantized values, the batch scaling // factors, the accumulator buffer (optimized version), the input offsets, // and the sums of the rows for each weights matrix. // RHS = weights, LHS = inputs if (is_hybrid) { // Calculate the total number of LHS batches. int num_batches = 1; for (int i = 0; i < lhs_rank - 2; ++i) { num_batches *= lhs->dims->data[i]; } int num_weights_matrices = 1; for (int i = 0; i < rhs_rank - 2; ++i) { num_weights_matrices *= rhs->dims->data[i]; } op_data->compute_row_sums = true; node->temporaries->data[2] = op_data->scratch_tensor_index + 2; TfLiteTensor* input_quantized = GetTemporary(context, node, /*index=*/2); input_quantized->type = op_context->rhs->type; input_quantized->allocation_type = kTfLiteArenaRw; TfLiteIntArray* input_quantized_size = TfLiteIntArrayCopy(op_context->lhs->dims); TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, input_quantized, input_quantized_size)); node->temporaries->data[3] = op_data->scratch_tensor_index + 3; TfLiteTensor* scaling_factors = GetTemporary(context, node, /*index=*/3); scaling_factors->type = kTfLiteFloat32; scaling_factors->allocation_type = kTfLiteArenaRw; // Total size of scaling factors is batch size * number of total batches int scaling_dims[1] = {num_batches * batch_size}; if (!TfLiteIntArrayEqualsArray(scaling_factors->dims, 1, scaling_dims)) { TfLiteIntArray* scaling_factors_size = TfLiteIntArrayCreate(1); scaling_factors_size->data[0] = batch_size; TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, scaling_factors, scaling_factors_size)); } node->temporaries->data[4] = op_data->scratch_tensor_index + 4; TfLiteTensor* accum_scratch = GetTemporary(context, node, /*index=*/4); accum_scratch->type = kTfLiteInt32; accum_scratch->allocation_type = kTfLiteArenaRw; int accum_scratch_dims[2] = {num_units, batch_size}; if (!TfLiteIntArrayEqualsArray(accum_scratch->dims, 2, accum_scratch_dims)) { TfLiteIntArray* accum_size = TfLiteIntArrayCreate(2); accum_size->data[0] = num_units; accum_size->data[1] = batch_size; TF_LITE_ENSURE_OK( context, context->ResizeTensor(context, accum_scratch, accum_size)); } node->temporaries->data[5] = op_data->scratch_tensor_index + 5; TfLiteTensor* input_offsets = GetTemporary(context, node, /*index=*/5); input_offsets->type = kTfLiteInt32; input_offsets->allocation_type = kTfLiteArenaRw; if (!TfLiteIntArrayEqualsArray(input_offsets->dims, 1, scaling_dims)) { TfLiteIntArray* input_offsets_size = TfLiteIntArrayCreate(1); input_offsets_size->data[0] = num_batches * batch_size; TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, input_offsets, input_offsets_size)); } node->temporaries->data[6] = op_data->scratch_tensor_index + 6; TfLiteTensor* row_sums = GetTemporary(context, node, /*index=*/6); row_sums->type = kTfLiteInt32; row_sums->allocation_type = kTfLiteArenaRwPersistent; int row_sums_dims[1] = {num_weights_matrices * num_units}; if (!TfLiteIntArrayEqualsArray(row_sums->dims, 1, row_sums_dims)) { TfLiteIntArray* row_sums_size = TfLiteIntArrayCreate(1); row_sums_size->data[0] = row_sums_dims[0]; TF_LITE_ENSURE_OK( context, context->ResizeTensor(context, row_sums, row_sums_size)); } } return kTfLiteOk; }
1085
True
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Write
The software writes data past the end, or before the beginning, of the intended buffer.
Typically, this can result in corruption of data, a crash, or code execution. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent write operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/787.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::builtin::batch_matmul::InitializeTemporaries
tflite::ops::builtin::batch_matmul::InitializeTemporaries( TfLiteContext * context , TfLiteNode * node , OpContext * op_context)
['context', 'node', 'op_context']
TfLiteStatus InitializeTemporaries(TfLiteContext* context, TfLiteNode* node, OpContext* op_context) { // Create temporary tensors to hold transposed LHS/RHS. OpData* op_data = reinterpret_cast<OpData*>(node->user_data); const TfLiteTensor* lhs = op_context->lhs; const TfLiteTensor* rhs = op_context->rhs; TfLiteIntArrayFree(node->temporaries); // For "hybrid" quantization, we impose the constraint that the LHS // is float (typically an activation from a prior layer) and the RHS // is quantized int8. bool is_hybrid = (op_context->lhs->type == kTfLiteFloat32 && rhs->type == kTfLiteInt8); if (is_hybrid) { node->temporaries = TfLiteIntArrayCreate(kNumTempTensorsForAdjoints + kNumTempTensorsForHybrid); } else { node->temporaries = TfLiteIntArrayCreate(kNumTempTensorsForAdjoints); } const int lhs_rank = NumDimensions(lhs); const int rhs_rank = NumDimensions(rhs); const int batch_size = op_context->params->adj_x ? lhs->dims->data[lhs_rank - 2] : lhs->dims->data[lhs_rank - 1]; const int num_units = op_context->params->adj_x ? lhs->dims->data[lhs_rank - 1] : lhs->dims->data[lhs_rank - 2]; // Temp tensor for Transposed LHS; { node->temporaries->data[0] = op_data->scratch_tensor_index; TfLiteTensor* scratch_buffer = GetTemporary(context, node, /*index=*/0); TfLiteIntArray* scratch_buffer_size = TfLiteIntArrayCreate(lhs_rank); for (int i = 0; i < lhs_rank - 2; ++i) { scratch_buffer_size->data[i] = lhs->dims->data[i]; } // Swap last two dimensions. scratch_buffer_size->data[lhs_rank - 2] = lhs->dims->data[lhs_rank - 1]; scratch_buffer_size->data[lhs_rank - 1] = lhs->dims->data[lhs_rank - 2]; scratch_buffer->type = op_context->lhs->type; scratch_buffer->allocation_type = kTfLiteArenaRw; TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, scratch_buffer, scratch_buffer_size)); } // We need a temp buffer for the RHS if we need to transpose the RHS. We // transpose by default, so that the two inputs (LHS and RHS) are in a proper // layout for our fast matrix multiplication routines. If the transpose flag // is set by the caller, the data is already in the desired layout. { node->temporaries->data[1] = op_data->scratch_tensor_index + 1; TfLiteTensor* scratch_buffer = GetTemporary(context, node, /*index=*/1); const TfLiteTensor* rhs = op_context->rhs; int rhs_rank = NumDimensions(rhs); TfLiteIntArray* scratch_buffer_size = TfLiteIntArrayCreate(rhs_rank); for (int i = 0; i < rhs_rank - 2; ++i) { scratch_buffer_size->data[i] = rhs->dims->data[i]; } // Swap last two dimensions. scratch_buffer_size->data[rhs_rank - 2] = rhs->dims->data[rhs_rank - 1]; scratch_buffer_size->data[rhs_rank - 1] = rhs->dims->data[rhs_rank - 2]; if (IsConstantTensor(op_context->rhs)) { scratch_buffer->allocation_type = kTfLiteArenaRwPersistent; } else { scratch_buffer->allocation_type = kTfLiteArenaRw; } scratch_buffer->type = op_context->rhs->type; scratch_buffer->allocation_type = kTfLiteArenaRw; TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, scratch_buffer, scratch_buffer_size)); } // If we have to perform on-the-fly quantization (with quantized weights and // float inputs) first we need to quantize the inputs. Allocate temporary // buffer to store the intermediate quantized values, the batch scaling // factors, the accumulator buffer (optimized version), the input offsets, // and the sums of the rows for each weights matrix. // RHS = weights, LHS = inputs if (is_hybrid) { // Calculate the total number of LHS batches. int num_batches = 1; for (int i = 0; i < lhs_rank - 2; ++i) { num_batches *= lhs->dims->data[i]; } int num_weights_matrices = 1; for (int i = 0; i < rhs_rank - 2; ++i) { num_weights_matrices *= rhs->dims->data[i]; } op_data->compute_row_sums = true; node->temporaries->data[2] = op_data->scratch_tensor_index + 2; TfLiteTensor* input_quantized = GetTemporary(context, node, /*index=*/2); input_quantized->type = op_context->rhs->type; input_quantized->allocation_type = kTfLiteArenaRw; TfLiteIntArray* input_quantized_size = TfLiteIntArrayCopy(op_context->lhs->dims); TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, input_quantized, input_quantized_size)); node->temporaries->data[3] = op_data->scratch_tensor_index + 3; TfLiteTensor* scaling_factors = GetTemporary(context, node, /*index=*/3); scaling_factors->type = kTfLiteFloat32; scaling_factors->allocation_type = kTfLiteArenaRw; // Total size of scaling factors is batch size * number of total batches int scaling_dims[1] = {num_batches * batch_size}; if (!TfLiteIntArrayEqualsArray(scaling_factors->dims, 1, scaling_dims)) { TfLiteIntArray* scaling_factors_size = TfLiteIntArrayCreate(1); scaling_factors_size->data[0] = batch_size; TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, scaling_factors, scaling_factors_size)); } node->temporaries->data[4] = op_data->scratch_tensor_index + 4; TfLiteTensor* accum_scratch = GetTemporary(context, node, /*index=*/4); accum_scratch->type = kTfLiteInt32; accum_scratch->allocation_type = kTfLiteArenaRw; int accum_scratch_dims[2] = {num_units, batch_size}; if (!TfLiteIntArrayEqualsArray(accum_scratch->dims, 2, accum_scratch_dims)) { TfLiteIntArray* accum_size = TfLiteIntArrayCreate(2); accum_size->data[0] = num_units; accum_size->data[1] = batch_size; TF_LITE_ENSURE_OK( context, context->ResizeTensor(context, accum_scratch, accum_size)); } node->temporaries->data[5] = op_data->scratch_tensor_index + 5; TfLiteTensor* input_offsets = GetTemporary(context, node, /*index=*/5); input_offsets->type = kTfLiteInt32; input_offsets->allocation_type = kTfLiteArenaRw; if (!TfLiteIntArrayEqualsArray(input_offsets->dims, 1, scaling_dims)) { TfLiteIntArray* input_offsets_size = TfLiteIntArrayCreate(1); input_offsets_size->data[0] = num_batches * batch_size; TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, input_offsets, input_offsets_size)); } node->temporaries->data[6] = op_data->scratch_tensor_index + 6; TfLiteTensor* row_sums = GetTemporary(context, node, /*index=*/6); row_sums->type = kTfLiteInt32; row_sums->allocation_type = kTfLiteArenaRwPersistent; int row_sums_dims[1] = {num_weights_matrices * num_units}; if (!TfLiteIntArrayEqualsArray(row_sums->dims, 1, row_sums_dims)) { TfLiteIntArray* row_sums_size = TfLiteIntArrayCreate(1); row_sums_size->data[0] = row_sums_dims[0]; TF_LITE_ENSURE_OK( context, context->ResizeTensor(context, row_sums, row_sums_size)); } } return kTfLiteOk; }
1085
True
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Read
The software reads data past the end, or before the beginning, of the intended buffer.
Typically, this can allow attackers to read sensitive information from other memory locations or cause a crash. A crash can occur when the code reads a variable amount of data and assumes that a sentinel exists to stop the read operation, such as a NUL in a string. The expected sentinel might not be located in the out-of-bounds memory, causing excessive data to be read, leading to a segmentation fault or a buffer overflow. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent read operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/125.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::builtin::batch_matmul::Prepare
tflite::ops::builtin::batch_matmul::Prepare( TfLiteContext * context , TfLiteNode * node)
['context', 'node']
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 2); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); OpContext op_context(context, node); TF_LITE_ENSURE_OK(context, InitializeTemporaries(context, node, &op_context)); OpData* op_data = reinterpret_cast<OpData*>(node->user_data); bool adj_x = op_context.params->adj_x; bool adj_y = op_context.params->adj_y; const TfLiteTensor* lhs_data = GetInput(context, node, kInputLHSTensor); const TfLiteTensor* rhs_data = GetInput(context, node, kInputRHSTensor); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); // Note that quantized inference requires that all tensors have their // parameters set. This is usually done during quantized training. if (lhs_data->type == kTfLiteInt8) { double real_multiplier = 0.0; TF_LITE_ENSURE_STATUS(GetQuantizedConvolutionMultipler( context, lhs_data, rhs_data, output, &real_multiplier)); int exponent; QuantizeMultiplier(real_multiplier, &op_data->output_multiplier, &exponent); op_data->output_shift = exponent; // BatchMatMul has no fused activation functions. Therefore, set // output activation min and max to min and max of int8_t type, // respecitvely. op_data->output_activation_min = std::numeric_limits<int8_t>::min(); op_data->output_activation_max = std::numeric_limits<int8_t>::max(); } TF_LITE_ENSURE(context, lhs_data->type == kTfLiteFloat32 || lhs_data->type == kTfLiteInt8); TF_LITE_ENSURE(context, rhs_data->type == kTfLiteFloat32 || rhs_data->type == kTfLiteInt8); // Support dimensions between 2 and 4, inclusive. TF_LITE_ENSURE(context, NumDimensions(lhs_data) >= 2); TF_LITE_ENSURE(context, NumDimensions(lhs_data) <= 4); TF_LITE_ENSURE(context, NumDimensions(rhs_data) >= 2); TF_LITE_ENSURE(context, NumDimensions(rhs_data) <= 4); const int lhs_rank = NumDimensions(lhs_data); const int rhs_rank = NumDimensions(rhs_data); const int output_rank = std::max(lhs_rank, rhs_rank); const RuntimeShape extended_lhs_shape = RuntimeShape::ExtendedShape(output_rank, GetTensorShape(lhs_data)); const RuntimeShape extended_rhs_shape = RuntimeShape::ExtendedShape(output_rank, GetTensorShape(rhs_data)); // Ensure any batch dimensions obey broacasting rules. for (int i = 0; i < output_rank - 2; ++i) { const int lhs_dim = extended_lhs_shape.Dims(i); const int rhs_dim = extended_rhs_shape.Dims(i); if (lhs_dim != rhs_dim) { if (lhs_dim != 1) { TF_LITE_ENSURE_EQ(context, rhs_dim, 1); } } } // Ensure other dimensions work for matrix multiplication. int accum_dim_lhs = adj_x ? extended_lhs_shape.Dims(output_rank - 2) : extended_lhs_shape.Dims(output_rank - 1); int accum_dim_rhs = adj_y ? extended_rhs_shape.Dims(output_rank - 1) : extended_rhs_shape.Dims(output_rank - 2); TF_LITE_ENSURE_EQ(context, accum_dim_lhs, accum_dim_rhs); TfLiteStatus status = ResizeOutputTensor(context, extended_lhs_shape, extended_rhs_shape, adj_x, adj_y, output_rank, output); return status; }
507
True
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Write
The software writes data past the end, or before the beginning, of the intended buffer.
Typically, this can result in corruption of data, a crash, or code execution. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent write operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/787.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::builtin::batch_matmul::Prepare
tflite::ops::builtin::batch_matmul::Prepare( TfLiteContext * context , TfLiteNode * node)
['context', 'node']
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 2); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); OpContext op_context(context, node); TF_LITE_ENSURE_OK(context, InitializeTemporaries(context, node, &op_context)); OpData* op_data = reinterpret_cast<OpData*>(node->user_data); bool adj_x = op_context.params->adj_x; bool adj_y = op_context.params->adj_y; const TfLiteTensor* lhs_data = GetInput(context, node, kInputLHSTensor); const TfLiteTensor* rhs_data = GetInput(context, node, kInputRHSTensor); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); // Note that quantized inference requires that all tensors have their // parameters set. This is usually done during quantized training. if (lhs_data->type == kTfLiteInt8) { double real_multiplier = 0.0; TF_LITE_ENSURE_STATUS(GetQuantizedConvolutionMultipler( context, lhs_data, rhs_data, output, &real_multiplier)); int exponent; QuantizeMultiplier(real_multiplier, &op_data->output_multiplier, &exponent); op_data->output_shift = exponent; // BatchMatMul has no fused activation functions. Therefore, set // output activation min and max to min and max of int8_t type, // respecitvely. op_data->output_activation_min = std::numeric_limits<int8_t>::min(); op_data->output_activation_max = std::numeric_limits<int8_t>::max(); } TF_LITE_ENSURE(context, lhs_data->type == kTfLiteFloat32 || lhs_data->type == kTfLiteInt8); TF_LITE_ENSURE(context, rhs_data->type == kTfLiteFloat32 || rhs_data->type == kTfLiteInt8); // Support dimensions between 2 and 4, inclusive. TF_LITE_ENSURE(context, NumDimensions(lhs_data) >= 2); TF_LITE_ENSURE(context, NumDimensions(lhs_data) <= 4); TF_LITE_ENSURE(context, NumDimensions(rhs_data) >= 2); TF_LITE_ENSURE(context, NumDimensions(rhs_data) <= 4); const int lhs_rank = NumDimensions(lhs_data); const int rhs_rank = NumDimensions(rhs_data); const int output_rank = std::max(lhs_rank, rhs_rank); const RuntimeShape extended_lhs_shape = RuntimeShape::ExtendedShape(output_rank, GetTensorShape(lhs_data)); const RuntimeShape extended_rhs_shape = RuntimeShape::ExtendedShape(output_rank, GetTensorShape(rhs_data)); // Ensure any batch dimensions obey broacasting rules. for (int i = 0; i < output_rank - 2; ++i) { const int lhs_dim = extended_lhs_shape.Dims(i); const int rhs_dim = extended_rhs_shape.Dims(i); if (lhs_dim != rhs_dim) { if (lhs_dim != 1) { TF_LITE_ENSURE_EQ(context, rhs_dim, 1); } } } // Ensure other dimensions work for matrix multiplication. int accum_dim_lhs = adj_x ? extended_lhs_shape.Dims(output_rank - 2) : extended_lhs_shape.Dims(output_rank - 1); int accum_dim_rhs = adj_y ? extended_rhs_shape.Dims(output_rank - 1) : extended_rhs_shape.Dims(output_rank - 2); TF_LITE_ENSURE_EQ(context, accum_dim_lhs, accum_dim_rhs); TfLiteStatus status = ResizeOutputTensor(context, extended_lhs_shape, extended_rhs_shape, adj_x, adj_y, output_rank, output); return status; }
507
True
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Read
The software reads data past the end, or before the beginning, of the intended buffer.
Typically, this can allow attackers to read sensitive information from other memory locations or cause a crash. A crash can occur when the code reads a variable amount of data and assumes that a sentinel exists to stop the read operation, such as a NUL in a string. The expected sentinel might not be located in the out-of-bounds memory, causing excessive data to be read, leading to a segmentation fault or a buffer overflow. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent read operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/125.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::builtin::bidirectional_sequence_rnn::Eval
tflite::ops::builtin::bidirectional_sequence_rnn::Eval( TfLiteContext * context , TfLiteNode * node)
['context', 'node']
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { const auto* params = reinterpret_cast<TfLiteBidirectionalSequenceRNNParams*>( node->builtin_data); const TfLiteTensor* input = GetInput(context, node, kInputTensor); const TfLiteTensor* fw_input_weights = GetInput(context, node, kFwWeightsTensor); const TfLiteTensor* fw_recurrent_weights = GetInput(context, node, kFwRecurrentWeightsTensor); const TfLiteTensor* fw_bias = GetInput(context, node, kFwBiasTensor); const TfLiteTensor* bw_input_weights = GetInput(context, node, kBwWeightsTensor); const TfLiteTensor* bw_recurrent_weights = GetInput(context, node, kBwRecurrentWeightsTensor); const TfLiteTensor* bw_bias = GetInput(context, node, kBwBiasTensor); // Get auxiliary inputs. const TfLiteTensor* aux_input = GetOptionalInputTensor(context, node, kAuxInputTensor); const TfLiteTensor* fw_aux_input_weights = GetOptionalInputTensor(context, node, kFwAuxWeightsTensor); const TfLiteTensor* bw_aux_input_weights = GetOptionalInputTensor(context, node, kBwAuxWeightsTensor); TfLiteTensor* fw_hidden_state = GetVariableInput(context, node, kFwHiddenStateTensor); TF_LITE_ENSURE(context, fw_hidden_state != nullptr); TfLiteTensor* bw_hidden_state = GetVariableInput(context, node, kBwHiddenStateTensor); TF_LITE_ENSURE(context, bw_hidden_state != nullptr); TfLiteTensor* fw_output = GetOutput(context, node, kFwOutputTensor); TfLiteTensor* bw_output = params->merge_outputs ? nullptr : GetOutput(context, node, kBwOutputTensor); const bool has_previous_bw_output = (aux_input != nullptr); const bool use_aux_input = (fw_aux_input_weights != nullptr); // We want to cover the following cases: // // If not stacking (not connected after other bidi lstms): // both fw & bw will just use `input`; aux_input will be null. // // If stacking with cross_links, TensorFlow equivalent // (tf.contrib.rnn.stack_bidirectional_rnn): // both fw & bw will use `input`, but aux_input will be none null. // Note, this time, whether connected after other bidi lstms both works. // // If stacking without cross_links, but connected after other bidi lstms, // TensorFlow equivalent (tf.nn.static_bidirectional_rnn): // fw will use `input`, bw will use aux_input, and the `real aux_input` // will be null. const bool non_stacking_mode = !use_aux_input && has_previous_bw_output; const TfLiteTensor* bw_input = non_stacking_mode ? aux_input : input; const TfLiteTensor* real_aux_input = non_stacking_mode ? nullptr : aux_input; switch (fw_input_weights->type) { case kTfLiteFloat32: return EvalFloat(input, bw_input, fw_input_weights, fw_recurrent_weights, fw_bias, bw_input_weights, bw_recurrent_weights, bw_bias, real_aux_input, fw_aux_input_weights, bw_aux_input_weights, params, fw_hidden_state, fw_output, bw_hidden_state, bw_output); case kTfLiteUInt8: case kTfLiteInt8: { TfLiteTensor* input_quantized = GetTemporary(context, node, kInputQuantized); TfLiteTensor* fw_hidden_state_quantized = GetTemporary(context, node, kFwHiddenStateQuantized); TfLiteTensor* bw_hidden_state_quantized = GetTemporary(context, node, kBwHiddenStateQuantized); TfLiteTensor* scaling_factors = GetTemporary(context, node, kScalingFactors); TfLiteTensor* zero_points = GetTemporary(context, node, kZeroPoints); TfLiteTensor* accum_scratch = GetTemporary(context, node, kAccumScratch); TfLiteTensor* fw_row_sums = GetTemporary(context, node, kFwRowSums); TfLiteTensor* bw_row_sums = GetTemporary(context, node, kBwRowSums); TfLiteTensor* aux_input_quantized = use_aux_input ? GetTemporary(context, node, kAuxInputQuantized) : nullptr; auto* op_data = reinterpret_cast<OpData*>(node->user_data); return EvalHybrid( input, bw_input, fw_input_weights, fw_recurrent_weights, fw_bias, bw_input_weights, bw_recurrent_weights, bw_bias, real_aux_input, fw_aux_input_weights, bw_aux_input_weights, params, scaling_factors, input_quantized, aux_input_quantized, fw_hidden_state_quantized, fw_hidden_state, fw_output, bw_hidden_state_quantized, bw_hidden_state, bw_output, zero_points, accum_scratch, fw_row_sums, bw_row_sums, &op_data->fw_compute_row_sums, &op_data->bw_compute_row_sums); } default: context->ReportError(context, "Type not currently supported."); return kTfLiteError; } return kTfLiteOk; }
567
True
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Write
The software writes data past the end, or before the beginning, of the intended buffer.
Typically, this can result in corruption of data, a crash, or code execution. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent write operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/787.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::builtin::bidirectional_sequence_rnn::Eval
tflite::ops::builtin::bidirectional_sequence_rnn::Eval( TfLiteContext * context , TfLiteNode * node)
['context', 'node']
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { const auto* params = reinterpret_cast<TfLiteBidirectionalSequenceRNNParams*>( node->builtin_data); const TfLiteTensor* input = GetInput(context, node, kInputTensor); const TfLiteTensor* fw_input_weights = GetInput(context, node, kFwWeightsTensor); const TfLiteTensor* fw_recurrent_weights = GetInput(context, node, kFwRecurrentWeightsTensor); const TfLiteTensor* fw_bias = GetInput(context, node, kFwBiasTensor); const TfLiteTensor* bw_input_weights = GetInput(context, node, kBwWeightsTensor); const TfLiteTensor* bw_recurrent_weights = GetInput(context, node, kBwRecurrentWeightsTensor); const TfLiteTensor* bw_bias = GetInput(context, node, kBwBiasTensor); // Get auxiliary inputs. const TfLiteTensor* aux_input = GetOptionalInputTensor(context, node, kAuxInputTensor); const TfLiteTensor* fw_aux_input_weights = GetOptionalInputTensor(context, node, kFwAuxWeightsTensor); const TfLiteTensor* bw_aux_input_weights = GetOptionalInputTensor(context, node, kBwAuxWeightsTensor); TfLiteTensor* fw_hidden_state = GetVariableInput(context, node, kFwHiddenStateTensor); TF_LITE_ENSURE(context, fw_hidden_state != nullptr); TfLiteTensor* bw_hidden_state = GetVariableInput(context, node, kBwHiddenStateTensor); TF_LITE_ENSURE(context, bw_hidden_state != nullptr); TfLiteTensor* fw_output = GetOutput(context, node, kFwOutputTensor); TfLiteTensor* bw_output = params->merge_outputs ? nullptr : GetOutput(context, node, kBwOutputTensor); const bool has_previous_bw_output = (aux_input != nullptr); const bool use_aux_input = (fw_aux_input_weights != nullptr); // We want to cover the following cases: // // If not stacking (not connected after other bidi lstms): // both fw & bw will just use `input`; aux_input will be null. // // If stacking with cross_links, TensorFlow equivalent // (tf.contrib.rnn.stack_bidirectional_rnn): // both fw & bw will use `input`, but aux_input will be none null. // Note, this time, whether connected after other bidi lstms both works. // // If stacking without cross_links, but connected after other bidi lstms, // TensorFlow equivalent (tf.nn.static_bidirectional_rnn): // fw will use `input`, bw will use aux_input, and the `real aux_input` // will be null. const bool non_stacking_mode = !use_aux_input && has_previous_bw_output; const TfLiteTensor* bw_input = non_stacking_mode ? aux_input : input; const TfLiteTensor* real_aux_input = non_stacking_mode ? nullptr : aux_input; switch (fw_input_weights->type) { case kTfLiteFloat32: return EvalFloat(input, bw_input, fw_input_weights, fw_recurrent_weights, fw_bias, bw_input_weights, bw_recurrent_weights, bw_bias, real_aux_input, fw_aux_input_weights, bw_aux_input_weights, params, fw_hidden_state, fw_output, bw_hidden_state, bw_output); case kTfLiteUInt8: case kTfLiteInt8: { TfLiteTensor* input_quantized = GetTemporary(context, node, kInputQuantized); TfLiteTensor* fw_hidden_state_quantized = GetTemporary(context, node, kFwHiddenStateQuantized); TfLiteTensor* bw_hidden_state_quantized = GetTemporary(context, node, kBwHiddenStateQuantized); TfLiteTensor* scaling_factors = GetTemporary(context, node, kScalingFactors); TfLiteTensor* zero_points = GetTemporary(context, node, kZeroPoints); TfLiteTensor* accum_scratch = GetTemporary(context, node, kAccumScratch); TfLiteTensor* fw_row_sums = GetTemporary(context, node, kFwRowSums); TfLiteTensor* bw_row_sums = GetTemporary(context, node, kBwRowSums); TfLiteTensor* aux_input_quantized = use_aux_input ? GetTemporary(context, node, kAuxInputQuantized) : nullptr; auto* op_data = reinterpret_cast<OpData*>(node->user_data); return EvalHybrid( input, bw_input, fw_input_weights, fw_recurrent_weights, fw_bias, bw_input_weights, bw_recurrent_weights, bw_bias, real_aux_input, fw_aux_input_weights, bw_aux_input_weights, params, scaling_factors, input_quantized, aux_input_quantized, fw_hidden_state_quantized, fw_hidden_state, fw_output, bw_hidden_state_quantized, bw_hidden_state, bw_output, zero_points, accum_scratch, fw_row_sums, bw_row_sums, &op_data->fw_compute_row_sums, &op_data->bw_compute_row_sums); } default: context->ReportError(context, "Type not currently supported."); return kTfLiteError; } return kTfLiteOk; }
567
True
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Read
The software reads data past the end, or before the beginning, of the intended buffer.
Typically, this can allow attackers to read sensitive information from other memory locations or cause a crash. A crash can occur when the code reads a variable amount of data and assumes that a sentinel exists to stop the read operation, such as a NUL in a string. The expected sentinel might not be located in the out-of-bounds memory, causing excessive data to be read, leading to a segmentation fault or a buffer overflow. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent read operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/125.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::builtin::bidirectional_sequence_rnn::Prepare
tflite::ops::builtin::bidirectional_sequence_rnn::Prepare( TfLiteContext * context , TfLiteNode * node)
['context', 'node']
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { const auto* params = reinterpret_cast<TfLiteBidirectionalSequenceRNNParams*>( node->builtin_data); // Check we have all the inputs and outputs we need. TF_LITE_ENSURE_EQ(context, node->inputs->size, 12); TF_LITE_ENSURE_EQ(context, node->outputs->size, params->merge_outputs ? 1 : 2); const TfLiteTensor* input = GetInput(context, node, kInputTensor); const TfLiteTensor* fw_input_weights = GetInput(context, node, kFwWeightsTensor); const TfLiteTensor* fw_recurrent_weights = GetInput(context, node, kFwRecurrentWeightsTensor); const TfLiteTensor* fw_bias = GetInput(context, node, kFwBiasTensor); const TfLiteTensor* fw_hidden_state = GetInput(context, node, kFwHiddenStateTensor); const TfLiteTensor* bw_input_weights = GetInput(context, node, kBwWeightsTensor); const TfLiteTensor* bw_recurrent_weights = GetInput(context, node, kBwRecurrentWeightsTensor); const TfLiteTensor* bw_bias = GetInput(context, node, kBwBiasTensor); const TfLiteTensor* bw_hidden_state = GetInput(context, node, kBwHiddenStateTensor); const TfLiteTensor* aux_input = GetOptionalInputTensor(context, node, kAuxInputTensor); const TfLiteTensor* fw_aux_input_weights = GetOptionalInputTensor(context, node, kFwAuxWeightsTensor); const TfLiteTensor* bw_aux_input_weights = GetOptionalInputTensor(context, node, kBwAuxWeightsTensor); const bool aux_inputs_weights_or_none = ((fw_aux_input_weights != nullptr) && (bw_aux_input_weights != nullptr)) || ((fw_aux_input_weights == nullptr) && (bw_aux_input_weights == nullptr)); TF_LITE_ENSURE(context, aux_inputs_weights_or_none); const bool has_aux_input = (fw_aux_input_weights != nullptr); // Check all the parameters of tensor match within themselves and match the // input configuration. TF_LITE_ENSURE_TYPES_EQ(context, input->type, kTfLiteFloat32); TF_LITE_ENSURE_EQ(context, input->dims->size, 3); const bool time_major = params->time_major; const int batch_size = (time_major) ? input->dims->data[1] : input->dims->data[0]; const int max_time = (time_major) ? input->dims->data[0] : input->dims->data[1]; const int fw_num_units = fw_input_weights->dims->data[0]; const int bw_num_units = bw_input_weights->dims->data[0]; TF_LITE_ENSURE_EQ(context, input->dims->data[2], fw_input_weights->dims->data[1]); TF_LITE_ENSURE_EQ(context, input->dims->data[2], bw_input_weights->dims->data[1]); TF_LITE_ENSURE_EQ(context, fw_input_weights->dims->data[0], fw_bias->dims->data[0]); TF_LITE_ENSURE_EQ(context, bw_input_weights->dims->data[0], bw_bias->dims->data[0]); TF_LITE_ENSURE_EQ(context, fw_recurrent_weights->dims->data[0], fw_bias->dims->data[0]); TF_LITE_ENSURE_EQ(context, bw_recurrent_weights->dims->data[1], bw_bias->dims->data[0]); TF_LITE_ENSURE_EQ(context, NumDimensions(fw_hidden_state), 2); TF_LITE_ENSURE_EQ(context, fw_hidden_state->dims->data[0], batch_size); TF_LITE_ENSURE_EQ(context, fw_hidden_state->dims->data[1], fw_num_units); TF_LITE_ENSURE_EQ(context, NumDimensions(bw_hidden_state), 2); TF_LITE_ENSURE_EQ(context, bw_hidden_state->dims->data[0], batch_size); TF_LITE_ENSURE_EQ(context, bw_hidden_state->dims->data[1], bw_num_units); if (has_aux_input) { // Check that aux_input has the same dimensions (except last) as the input. TF_LITE_ASSERT_EQ(aux_input->dims->data[0], input->dims->data[0]); TF_LITE_ASSERT_EQ(aux_input->dims->data[1], input->dims->data[1]); // Check that aux_input_weights has the same dimensions (except last) as // the input_weights. TF_LITE_ASSERT_EQ(fw_aux_input_weights->dims->data[0], fw_num_units); TF_LITE_ASSERT_EQ(bw_aux_input_weights->dims->data[0], bw_num_units); TF_LITE_ASSERT_EQ(aux_input->dims->data[2], fw_aux_input_weights->dims->data[1]); TF_LITE_ASSERT_EQ(aux_input->dims->data[2], bw_aux_input_weights->dims->data[1]); } if (IsHybridOp(input, fw_input_weights)) { OpData* op_data = reinterpret_cast<OpData*>(node->user_data); op_data->fw_compute_row_sums = true; op_data->bw_compute_row_sums = true; TfLiteIntArrayFree(node->temporaries); if (has_aux_input) { node->temporaries = TfLiteIntArrayCreate(kNumTemporaryTensors); } else { // No need to create a temporary tensor for the non-existent aux_input. node->temporaries = TfLiteIntArrayCreate(kNumTemporaryTensors - 1); } node->temporaries->data[kInputQuantized] = op_data->scratch_tensor_index + kInputQuantized; TfLiteTensor* input_quantized = GetTemporary(context, node, kInputQuantized); input_quantized->type = fw_input_weights->type; input_quantized->allocation_type = kTfLiteArenaRw; if (!TfLiteIntArrayEqual(input_quantized->dims, input->dims)) { TfLiteIntArray* input_quantized_size = TfLiteIntArrayCopy(input->dims); TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, input_quantized, input_quantized_size)); } node->temporaries->data[kFwHiddenStateQuantized] = op_data->scratch_tensor_index + kFwHiddenStateQuantized; TfLiteTensor* fw_hidden_state_quantized = GetTemporary(context, node, kFwHiddenStateQuantized); fw_hidden_state_quantized->type = fw_input_weights->type; fw_hidden_state_quantized->allocation_type = kTfLiteArenaRw; if (!TfLiteIntArrayEqual(fw_hidden_state_quantized->dims, fw_hidden_state->dims)) { TfLiteIntArray* fw_hidden_state_quantized_size = TfLiteIntArrayCopy(fw_hidden_state->dims); TF_LITE_ENSURE_OK( context, context->ResizeTensor(context, fw_hidden_state_quantized, fw_hidden_state_quantized_size)); } node->temporaries->data[kBwHiddenStateQuantized] = op_data->scratch_tensor_index + kBwHiddenStateQuantized; TfLiteTensor* bw_hidden_state_quantized = GetTemporary(context, node, kBwHiddenStateQuantized); bw_hidden_state_quantized->type = fw_input_weights->type; bw_hidden_state_quantized->allocation_type = kTfLiteArenaRw; if (!TfLiteIntArrayEqual(bw_hidden_state_quantized->dims, bw_hidden_state->dims)) { TfLiteIntArray* bw_hidden_state_quantized_size = TfLiteIntArrayCopy(bw_hidden_state->dims); TF_LITE_ENSURE_OK( context, context->ResizeTensor(context, bw_hidden_state_quantized, bw_hidden_state_quantized_size)); } // Allocate temporary tensors to store scaling factors of quantization. node->temporaries->data[kScalingFactors] = op_data->scratch_tensor_index + kScalingFactors; TfLiteTensor* scaling_factors = GetTemporary(context, node, kScalingFactors); scaling_factors->type = kTfLiteFloat32; scaling_factors->allocation_type = kTfLiteArenaRw; int scaling_dims[1] = {batch_size}; if (!TfLiteIntArrayEqualsArray(scaling_factors->dims, 1, scaling_dims)) { TfLiteIntArray* scaling_factors_size = TfLiteIntArrayCreate(1); scaling_factors_size->data[0] = batch_size; TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, scaling_factors, scaling_factors_size)); } node->temporaries->data[kAccumScratch] = op_data->scratch_tensor_index + kAccumScratch; TfLiteTensor* accum_scratch = GetTemporary(context, node, kAccumScratch); accum_scratch->type = kTfLiteInt32; accum_scratch->allocation_type = kTfLiteArenaRw; int accum_scratch_dims[2] = {std::max(fw_num_units, bw_num_units), batch_size}; if (!TfLiteIntArrayEqualsArray(accum_scratch->dims, 2, accum_scratch_dims)) { TfLiteIntArray* accum_scratch_size = TfLiteIntArrayCreate(2); accum_scratch_size->data[0] = accum_scratch_dims[0]; accum_scratch_size->data[1] = accum_scratch_dims[1]; TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, accum_scratch, accum_scratch_size)); } node->temporaries->data[kZeroPoints] = op_data->scratch_tensor_index + kZeroPoints; TfLiteTensor* zero_points = GetTemporary(context, node, /*index=*/kZeroPoints); zero_points->type = kTfLiteInt32; zero_points->allocation_type = kTfLiteArenaRw; int zero_points_dims[1] = {batch_size}; if (!TfLiteIntArrayEqualsArray(zero_points->dims, 1, zero_points_dims)) { TfLiteIntArray* zero_points_size = TfLiteIntArrayCreate(1); zero_points_size->data[0] = batch_size; TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, zero_points, zero_points_size)); } const int num_row_sums = has_aux_input ? 3 : 2; node->temporaries->data[kFwRowSums] = op_data->scratch_tensor_index + kFwRowSums; TfLiteTensor* fw_row_sums = GetTemporary(context, node, /*index=*/kFwRowSums); fw_row_sums->type = kTfLiteInt32; fw_row_sums->allocation_type = kTfLiteArenaRwPersistent; int fw_row_sums_dims[2] = {num_row_sums, fw_num_units}; if (!TfLiteIntArrayEqualsArray(fw_row_sums->dims, 2, fw_row_sums_dims)) { TfLiteIntArray* fw_row_sums_size = TfLiteIntArrayCreate(2); fw_row_sums_size->data[0] = fw_row_sums_dims[0]; fw_row_sums_size->data[1] = fw_row_sums_dims[1]; TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, fw_row_sums, fw_row_sums_size)); } node->temporaries->data[kBwRowSums] = op_data->scratch_tensor_index + kBwRowSums; TfLiteTensor* bw_row_sums = GetTemporary(context, node, /*index=*/kBwRowSums); bw_row_sums->type = kTfLiteInt32; bw_row_sums->allocation_type = kTfLiteArenaRwPersistent; int bw_row_sums_dims[2] = {num_row_sums, bw_num_units}; if (!TfLiteIntArrayEqualsArray(bw_row_sums->dims, 2, bw_row_sums_dims)) { TfLiteIntArray* bw_row_sums_size = TfLiteIntArrayCreate(2); bw_row_sums_size->data[0] = bw_row_sums_dims[0]; bw_row_sums_size->data[1] = bw_row_sums_dims[1]; TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, bw_row_sums, bw_row_sums_size)); } if (has_aux_input) { node->temporaries->data[kAuxInputQuantized] = op_data->scratch_tensor_index + kAuxInputQuantized; TfLiteTensor* aux_input_quantized = GetTemporary(context, node, kAuxInputQuantized); aux_input_quantized->type = fw_input_weights->type; aux_input_quantized->allocation_type = kTfLiteArenaRw; if (!TfLiteIntArrayEqual(aux_input_quantized->dims, aux_input->dims)) { TfLiteIntArray* aux_input_quantized_size = TfLiteIntArrayCopy(aux_input->dims); TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, aux_input_quantized, aux_input_quantized_size)); } } } // Resize outputs. TfLiteTensor* fw_output = GetOutput(context, node, kFwOutputTensor); TfLiteIntArray* fw_output_size_array = TfLiteIntArrayCreate(3); fw_output_size_array->data[0] = (time_major) ? max_time : batch_size; fw_output_size_array->data[1] = (time_major) ? batch_size : max_time; fw_output_size_array->data[2] = params->merge_outputs ? fw_num_units + bw_num_units : fw_num_units; TF_LITE_ENSURE_OK( context, context->ResizeTensor(context, fw_output, fw_output_size_array)); if (!params->merge_outputs) { TfLiteTensor* bw_output = GetOutput(context, node, kBwOutputTensor); TfLiteIntArray* bw_output_size_array = TfLiteIntArrayCreate(3); bw_output_size_array->data[0] = batch_size; bw_output_size_array->data[1] = max_time; bw_output_size_array->data[2] = bw_num_units; TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, bw_output, bw_output_size_array)); } return kTfLiteOk; }
1881
True
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Write
The software writes data past the end, or before the beginning, of the intended buffer.
Typically, this can result in corruption of data, a crash, or code execution. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent write operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/787.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::builtin::bidirectional_sequence_rnn::Prepare
tflite::ops::builtin::bidirectional_sequence_rnn::Prepare( TfLiteContext * context , TfLiteNode * node)
['context', 'node']
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { const auto* params = reinterpret_cast<TfLiteBidirectionalSequenceRNNParams*>( node->builtin_data); // Check we have all the inputs and outputs we need. TF_LITE_ENSURE_EQ(context, node->inputs->size, 12); TF_LITE_ENSURE_EQ(context, node->outputs->size, params->merge_outputs ? 1 : 2); const TfLiteTensor* input = GetInput(context, node, kInputTensor); const TfLiteTensor* fw_input_weights = GetInput(context, node, kFwWeightsTensor); const TfLiteTensor* fw_recurrent_weights = GetInput(context, node, kFwRecurrentWeightsTensor); const TfLiteTensor* fw_bias = GetInput(context, node, kFwBiasTensor); const TfLiteTensor* fw_hidden_state = GetInput(context, node, kFwHiddenStateTensor); const TfLiteTensor* bw_input_weights = GetInput(context, node, kBwWeightsTensor); const TfLiteTensor* bw_recurrent_weights = GetInput(context, node, kBwRecurrentWeightsTensor); const TfLiteTensor* bw_bias = GetInput(context, node, kBwBiasTensor); const TfLiteTensor* bw_hidden_state = GetInput(context, node, kBwHiddenStateTensor); const TfLiteTensor* aux_input = GetOptionalInputTensor(context, node, kAuxInputTensor); const TfLiteTensor* fw_aux_input_weights = GetOptionalInputTensor(context, node, kFwAuxWeightsTensor); const TfLiteTensor* bw_aux_input_weights = GetOptionalInputTensor(context, node, kBwAuxWeightsTensor); const bool aux_inputs_weights_or_none = ((fw_aux_input_weights != nullptr) && (bw_aux_input_weights != nullptr)) || ((fw_aux_input_weights == nullptr) && (bw_aux_input_weights == nullptr)); TF_LITE_ENSURE(context, aux_inputs_weights_or_none); const bool has_aux_input = (fw_aux_input_weights != nullptr); // Check all the parameters of tensor match within themselves and match the // input configuration. TF_LITE_ENSURE_TYPES_EQ(context, input->type, kTfLiteFloat32); TF_LITE_ENSURE_EQ(context, input->dims->size, 3); const bool time_major = params->time_major; const int batch_size = (time_major) ? input->dims->data[1] : input->dims->data[0]; const int max_time = (time_major) ? input->dims->data[0] : input->dims->data[1]; const int fw_num_units = fw_input_weights->dims->data[0]; const int bw_num_units = bw_input_weights->dims->data[0]; TF_LITE_ENSURE_EQ(context, input->dims->data[2], fw_input_weights->dims->data[1]); TF_LITE_ENSURE_EQ(context, input->dims->data[2], bw_input_weights->dims->data[1]); TF_LITE_ENSURE_EQ(context, fw_input_weights->dims->data[0], fw_bias->dims->data[0]); TF_LITE_ENSURE_EQ(context, bw_input_weights->dims->data[0], bw_bias->dims->data[0]); TF_LITE_ENSURE_EQ(context, fw_recurrent_weights->dims->data[0], fw_bias->dims->data[0]); TF_LITE_ENSURE_EQ(context, bw_recurrent_weights->dims->data[1], bw_bias->dims->data[0]); TF_LITE_ENSURE_EQ(context, NumDimensions(fw_hidden_state), 2); TF_LITE_ENSURE_EQ(context, fw_hidden_state->dims->data[0], batch_size); TF_LITE_ENSURE_EQ(context, fw_hidden_state->dims->data[1], fw_num_units); TF_LITE_ENSURE_EQ(context, NumDimensions(bw_hidden_state), 2); TF_LITE_ENSURE_EQ(context, bw_hidden_state->dims->data[0], batch_size); TF_LITE_ENSURE_EQ(context, bw_hidden_state->dims->data[1], bw_num_units); if (has_aux_input) { // Check that aux_input has the same dimensions (except last) as the input. TF_LITE_ASSERT_EQ(aux_input->dims->data[0], input->dims->data[0]); TF_LITE_ASSERT_EQ(aux_input->dims->data[1], input->dims->data[1]); // Check that aux_input_weights has the same dimensions (except last) as // the input_weights. TF_LITE_ASSERT_EQ(fw_aux_input_weights->dims->data[0], fw_num_units); TF_LITE_ASSERT_EQ(bw_aux_input_weights->dims->data[0], bw_num_units); TF_LITE_ASSERT_EQ(aux_input->dims->data[2], fw_aux_input_weights->dims->data[1]); TF_LITE_ASSERT_EQ(aux_input->dims->data[2], bw_aux_input_weights->dims->data[1]); } if (IsHybridOp(input, fw_input_weights)) { OpData* op_data = reinterpret_cast<OpData*>(node->user_data); op_data->fw_compute_row_sums = true; op_data->bw_compute_row_sums = true; TfLiteIntArrayFree(node->temporaries); if (has_aux_input) { node->temporaries = TfLiteIntArrayCreate(kNumTemporaryTensors); } else { // No need to create a temporary tensor for the non-existent aux_input. node->temporaries = TfLiteIntArrayCreate(kNumTemporaryTensors - 1); } node->temporaries->data[kInputQuantized] = op_data->scratch_tensor_index + kInputQuantized; TfLiteTensor* input_quantized = GetTemporary(context, node, kInputQuantized); input_quantized->type = fw_input_weights->type; input_quantized->allocation_type = kTfLiteArenaRw; if (!TfLiteIntArrayEqual(input_quantized->dims, input->dims)) { TfLiteIntArray* input_quantized_size = TfLiteIntArrayCopy(input->dims); TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, input_quantized, input_quantized_size)); } node->temporaries->data[kFwHiddenStateQuantized] = op_data->scratch_tensor_index + kFwHiddenStateQuantized; TfLiteTensor* fw_hidden_state_quantized = GetTemporary(context, node, kFwHiddenStateQuantized); fw_hidden_state_quantized->type = fw_input_weights->type; fw_hidden_state_quantized->allocation_type = kTfLiteArenaRw; if (!TfLiteIntArrayEqual(fw_hidden_state_quantized->dims, fw_hidden_state->dims)) { TfLiteIntArray* fw_hidden_state_quantized_size = TfLiteIntArrayCopy(fw_hidden_state->dims); TF_LITE_ENSURE_OK( context, context->ResizeTensor(context, fw_hidden_state_quantized, fw_hidden_state_quantized_size)); } node->temporaries->data[kBwHiddenStateQuantized] = op_data->scratch_tensor_index + kBwHiddenStateQuantized; TfLiteTensor* bw_hidden_state_quantized = GetTemporary(context, node, kBwHiddenStateQuantized); bw_hidden_state_quantized->type = fw_input_weights->type; bw_hidden_state_quantized->allocation_type = kTfLiteArenaRw; if (!TfLiteIntArrayEqual(bw_hidden_state_quantized->dims, bw_hidden_state->dims)) { TfLiteIntArray* bw_hidden_state_quantized_size = TfLiteIntArrayCopy(bw_hidden_state->dims); TF_LITE_ENSURE_OK( context, context->ResizeTensor(context, bw_hidden_state_quantized, bw_hidden_state_quantized_size)); } // Allocate temporary tensors to store scaling factors of quantization. node->temporaries->data[kScalingFactors] = op_data->scratch_tensor_index + kScalingFactors; TfLiteTensor* scaling_factors = GetTemporary(context, node, kScalingFactors); scaling_factors->type = kTfLiteFloat32; scaling_factors->allocation_type = kTfLiteArenaRw; int scaling_dims[1] = {batch_size}; if (!TfLiteIntArrayEqualsArray(scaling_factors->dims, 1, scaling_dims)) { TfLiteIntArray* scaling_factors_size = TfLiteIntArrayCreate(1); scaling_factors_size->data[0] = batch_size; TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, scaling_factors, scaling_factors_size)); } node->temporaries->data[kAccumScratch] = op_data->scratch_tensor_index + kAccumScratch; TfLiteTensor* accum_scratch = GetTemporary(context, node, kAccumScratch); accum_scratch->type = kTfLiteInt32; accum_scratch->allocation_type = kTfLiteArenaRw; int accum_scratch_dims[2] = {std::max(fw_num_units, bw_num_units), batch_size}; if (!TfLiteIntArrayEqualsArray(accum_scratch->dims, 2, accum_scratch_dims)) { TfLiteIntArray* accum_scratch_size = TfLiteIntArrayCreate(2); accum_scratch_size->data[0] = accum_scratch_dims[0]; accum_scratch_size->data[1] = accum_scratch_dims[1]; TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, accum_scratch, accum_scratch_size)); } node->temporaries->data[kZeroPoints] = op_data->scratch_tensor_index + kZeroPoints; TfLiteTensor* zero_points = GetTemporary(context, node, /*index=*/kZeroPoints); zero_points->type = kTfLiteInt32; zero_points->allocation_type = kTfLiteArenaRw; int zero_points_dims[1] = {batch_size}; if (!TfLiteIntArrayEqualsArray(zero_points->dims, 1, zero_points_dims)) { TfLiteIntArray* zero_points_size = TfLiteIntArrayCreate(1); zero_points_size->data[0] = batch_size; TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, zero_points, zero_points_size)); } const int num_row_sums = has_aux_input ? 3 : 2; node->temporaries->data[kFwRowSums] = op_data->scratch_tensor_index + kFwRowSums; TfLiteTensor* fw_row_sums = GetTemporary(context, node, /*index=*/kFwRowSums); fw_row_sums->type = kTfLiteInt32; fw_row_sums->allocation_type = kTfLiteArenaRwPersistent; int fw_row_sums_dims[2] = {num_row_sums, fw_num_units}; if (!TfLiteIntArrayEqualsArray(fw_row_sums->dims, 2, fw_row_sums_dims)) { TfLiteIntArray* fw_row_sums_size = TfLiteIntArrayCreate(2); fw_row_sums_size->data[0] = fw_row_sums_dims[0]; fw_row_sums_size->data[1] = fw_row_sums_dims[1]; TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, fw_row_sums, fw_row_sums_size)); } node->temporaries->data[kBwRowSums] = op_data->scratch_tensor_index + kBwRowSums; TfLiteTensor* bw_row_sums = GetTemporary(context, node, /*index=*/kBwRowSums); bw_row_sums->type = kTfLiteInt32; bw_row_sums->allocation_type = kTfLiteArenaRwPersistent; int bw_row_sums_dims[2] = {num_row_sums, bw_num_units}; if (!TfLiteIntArrayEqualsArray(bw_row_sums->dims, 2, bw_row_sums_dims)) { TfLiteIntArray* bw_row_sums_size = TfLiteIntArrayCreate(2); bw_row_sums_size->data[0] = bw_row_sums_dims[0]; bw_row_sums_size->data[1] = bw_row_sums_dims[1]; TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, bw_row_sums, bw_row_sums_size)); } if (has_aux_input) { node->temporaries->data[kAuxInputQuantized] = op_data->scratch_tensor_index + kAuxInputQuantized; TfLiteTensor* aux_input_quantized = GetTemporary(context, node, kAuxInputQuantized); aux_input_quantized->type = fw_input_weights->type; aux_input_quantized->allocation_type = kTfLiteArenaRw; if (!TfLiteIntArrayEqual(aux_input_quantized->dims, aux_input->dims)) { TfLiteIntArray* aux_input_quantized_size = TfLiteIntArrayCopy(aux_input->dims); TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, aux_input_quantized, aux_input_quantized_size)); } } } // Resize outputs. TfLiteTensor* fw_output = GetOutput(context, node, kFwOutputTensor); TfLiteIntArray* fw_output_size_array = TfLiteIntArrayCreate(3); fw_output_size_array->data[0] = (time_major) ? max_time : batch_size; fw_output_size_array->data[1] = (time_major) ? batch_size : max_time; fw_output_size_array->data[2] = params->merge_outputs ? fw_num_units + bw_num_units : fw_num_units; TF_LITE_ENSURE_OK( context, context->ResizeTensor(context, fw_output, fw_output_size_array)); if (!params->merge_outputs) { TfLiteTensor* bw_output = GetOutput(context, node, kBwOutputTensor); TfLiteIntArray* bw_output_size_array = TfLiteIntArrayCreate(3); bw_output_size_array->data[0] = batch_size; bw_output_size_array->data[1] = max_time; bw_output_size_array->data[2] = bw_num_units; TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, bw_output, bw_output_size_array)); } return kTfLiteOk; }
1881
True
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Read
The software reads data past the end, or before the beginning, of the intended buffer.
Typically, this can allow attackers to read sensitive information from other memory locations or cause a crash. A crash can occur when the code reads a variable amount of data and assumes that a sentinel exists to stop the read operation, such as a NUL in a string. The expected sentinel might not be located in the out-of-bounds memory, causing excessive data to be read, leading to a segmentation fault or a buffer overflow. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent read operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/125.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::builtin::cast::Eval
tflite::ops::builtin::cast::Eval( TfLiteContext * context , TfLiteNode * node)
['context', 'node']
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input = GetInput(context, node, kInputTensor); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); const int num_elements = NumElements(input); TF_LITE_ENSURE_EQ(context, num_elements, NumElements(output)); switch (input->type) { case kTfLiteInt64: return copyToTensor(context, input->data.i64, output, num_elements); case kTfLiteInt32: return copyToTensor(context, input->data.i32, output, num_elements); case kTfLiteUInt8: return copyToTensor(context, input->data.uint8, output, num_elements); case kTfLiteFloat32: return copyToTensor(context, GetTensorData<float>(input), output, num_elements); case kTfLiteBool: return copyToTensor(context, input->data.b, output, num_elements); case kTfLiteComplex64: return copyToTensor( context, reinterpret_cast<std::complex<float>*>(input->data.c64), output, num_elements); default: // Unsupported type. TF_LITE_UNSUPPORTED_TYPE(context, input->type, "Cast"); } return kTfLiteOk; }
212
True
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Write
The software writes data past the end, or before the beginning, of the intended buffer.
Typically, this can result in corruption of data, a crash, or code execution. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent write operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/787.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::builtin::cast::Eval
tflite::ops::builtin::cast::Eval( TfLiteContext * context , TfLiteNode * node)
['context', 'node']
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input = GetInput(context, node, kInputTensor); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); const int num_elements = NumElements(input); TF_LITE_ENSURE_EQ(context, num_elements, NumElements(output)); switch (input->type) { case kTfLiteInt64: return copyToTensor(context, input->data.i64, output, num_elements); case kTfLiteInt32: return copyToTensor(context, input->data.i32, output, num_elements); case kTfLiteUInt8: return copyToTensor(context, input->data.uint8, output, num_elements); case kTfLiteFloat32: return copyToTensor(context, GetTensorData<float>(input), output, num_elements); case kTfLiteBool: return copyToTensor(context, input->data.b, output, num_elements); case kTfLiteComplex64: return copyToTensor( context, reinterpret_cast<std::complex<float>*>(input->data.c64), output, num_elements); default: // Unsupported type. TF_LITE_UNSUPPORTED_TYPE(context, input->type, "Cast"); } return kTfLiteOk; }
212
True
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Read
The software reads data past the end, or before the beginning, of the intended buffer.
Typically, this can allow attackers to read sensitive information from other memory locations or cause a crash. A crash can occur when the code reads a variable amount of data and assumes that a sentinel exists to stop the read operation, such as a NUL in a string. The expected sentinel might not be located in the out-of-bounds memory, causing excessive data to be read, leading to a segmentation fault or a buffer overflow. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent read operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/125.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::builtin::cast::Prepare
tflite::ops::builtin::cast::Prepare( TfLiteContext * context , TfLiteNode * node)
['context', 'node']
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input = GetInput(context, node, kInputTensor); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); // TODO(ahentz): these two checks would make the new implementation // incompatible with some existing models, where params is not specified. It // is OK not to have them because toco would have set input and output types // to match the parameters. // auto* params = reinterpret_cast<TfLiteCastParams*>(node->builtin_data); // TF_LITE_ENSURE_EQ(context, input->type, params->in_data_type); // TF_LITE_ENSURE_EQ(context, output->type, params->out_data_type); return context->ResizeTensor(context, output, TfLiteIntArrayCopy(input->dims)); }
80
True
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Write
The software writes data past the end, or before the beginning, of the intended buffer.
Typically, this can result in corruption of data, a crash, or code execution. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent write operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/787.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::builtin::cast::Prepare
tflite::ops::builtin::cast::Prepare( TfLiteContext * context , TfLiteNode * node)
['context', 'node']
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input = GetInput(context, node, kInputTensor); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); // TODO(ahentz): these two checks would make the new implementation // incompatible with some existing models, where params is not specified. It // is OK not to have them because toco would have set input and output types // to match the parameters. // auto* params = reinterpret_cast<TfLiteCastParams*>(node->builtin_data); // TF_LITE_ENSURE_EQ(context, input->type, params->in_data_type); // TF_LITE_ENSURE_EQ(context, output->type, params->out_data_type); return context->ResizeTensor(context, output, TfLiteIntArrayCopy(input->dims)); }
80
True
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Read
The software reads data past the end, or before the beginning, of the intended buffer.
Typically, this can allow attackers to read sensitive information from other memory locations or cause a crash. A crash can occur when the code reads a variable amount of data and assumes that a sentinel exists to stop the read operation, such as a NUL in a string. The expected sentinel might not be located in the out-of-bounds memory, causing excessive data to be read, leading to a segmentation fault or a buffer overflow. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent read operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/125.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::builtin::ceil::Eval
tflite::ops::builtin::ceil::Eval( TfLiteContext * context , TfLiteNode * node)
['context', 'node']
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input = GetInput(context, node, kInputTensor); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); if (input->type != kTfLiteFloat32) { TF_LITE_UNSUPPORTED_TYPE(context, input->type, "Ceil"); } optimized_ops::Ceil(GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(output), GetTensorData<float>(output)); return kTfLiteOk; }
94
True
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Write
The software writes data past the end, or before the beginning, of the intended buffer.
Typically, this can result in corruption of data, a crash, or code execution. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent write operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/787.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::builtin::ceil::Eval
tflite::ops::builtin::ceil::Eval( TfLiteContext * context , TfLiteNode * node)
['context', 'node']
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input = GetInput(context, node, kInputTensor); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); if (input->type != kTfLiteFloat32) { TF_LITE_UNSUPPORTED_TYPE(context, input->type, "Ceil"); } optimized_ops::Ceil(GetTensorShape(input), GetTensorData<float>(input), GetTensorShape(output), GetTensorData<float>(output)); return kTfLiteOk; }
94
True
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Read
The software reads data past the end, or before the beginning, of the intended buffer.
Typically, this can allow attackers to read sensitive information from other memory locations or cause a crash. A crash can occur when the code reads a variable amount of data and assumes that a sentinel exists to stop the read operation, such as a NUL in a string. The expected sentinel might not be located in the out-of-bounds memory, causing excessive data to be read, leading to a segmentation fault or a buffer overflow. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent read operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/125.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::builtin::ceil::Prepare
tflite::ops::builtin::ceil::Prepare( TfLiteContext * context , TfLiteNode * node)
['context', 'node']
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input = GetInput(context, node, kInputTensor); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); TF_LITE_ENSURE_TYPES_EQ(context, input->type, kTfLiteFloat32); output->type = input->type; TfLiteIntArray* output_size = TfLiteIntArrayCopy(input->dims); return context->ResizeTensor(context, output, output_size); }
105
True
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Write
The software writes data past the end, or before the beginning, of the intended buffer.
Typically, this can result in corruption of data, a crash, or code execution. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent write operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/787.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::builtin::ceil::Prepare
tflite::ops::builtin::ceil::Prepare( TfLiteContext * context , TfLiteNode * node)
['context', 'node']
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input = GetInput(context, node, kInputTensor); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); TF_LITE_ENSURE_TYPES_EQ(context, input->type, kTfLiteFloat32); output->type = input->type; TfLiteIntArray* output_size = TfLiteIntArrayCopy(input->dims); return context->ResizeTensor(context, output, output_size); }
105
True
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Read
The software reads data past the end, or before the beginning, of the intended buffer.
Typically, this can allow attackers to read sensitive information from other memory locations or cause a crash. A crash can occur when the code reads a variable amount of data and assumes that a sentinel exists to stop the read operation, such as a NUL in a string. The expected sentinel might not be located in the out-of-bounds memory, causing excessive data to be read, leading to a segmentation fault or a buffer overflow. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent read operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/125.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::builtin::comparisons::ComparisonPrepareCommon
tflite::ops::builtin::comparisons::ComparisonPrepareCommon( TfLiteContext * context , TfLiteNode * node , bool is_string_allowed)
['context', 'node', 'is_string_allowed']
TfLiteStatus ComparisonPrepareCommon(TfLiteContext* context, TfLiteNode* node, bool is_string_allowed) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 2); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input1 = GetInput(context, node, kInputTensor1); const TfLiteTensor* input2 = GetInput(context, node, kInputTensor2); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); // Don't support string. if (!is_string_allowed) { TF_LITE_ENSURE(context, input1->type != kTfLiteString); } // Currently only support tensors have the same type. TF_LITE_ENSURE_TYPES_EQ(context, input1->type, input2->type); output->type = kTfLiteBool; bool requires_broadcast = !HaveSameShapes(input1, input2); TfLiteIntArray* output_size = nullptr; if (requires_broadcast) { TF_LITE_ENSURE_OK(context, CalculateShapeForBroadcast( context, input1, input2, &output_size)); } else { output_size = TfLiteIntArrayCopy(input1->dims); } return context->ResizeTensor(context, output, output_size); }
181
True
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Write
The software writes data past the end, or before the beginning, of the intended buffer.
Typically, this can result in corruption of data, a crash, or code execution. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent write operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/787.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::builtin::comparisons::ComparisonPrepareCommon
tflite::ops::builtin::comparisons::ComparisonPrepareCommon( TfLiteContext * context , TfLiteNode * node , bool is_string_allowed)
['context', 'node', 'is_string_allowed']
TfLiteStatus ComparisonPrepareCommon(TfLiteContext* context, TfLiteNode* node, bool is_string_allowed) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 2); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input1 = GetInput(context, node, kInputTensor1); const TfLiteTensor* input2 = GetInput(context, node, kInputTensor2); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); // Don't support string. if (!is_string_allowed) { TF_LITE_ENSURE(context, input1->type != kTfLiteString); } // Currently only support tensors have the same type. TF_LITE_ENSURE_TYPES_EQ(context, input1->type, input2->type); output->type = kTfLiteBool; bool requires_broadcast = !HaveSameShapes(input1, input2); TfLiteIntArray* output_size = nullptr; if (requires_broadcast) { TF_LITE_ENSURE_OK(context, CalculateShapeForBroadcast( context, input1, input2, &output_size)); } else { output_size = TfLiteIntArrayCopy(input1->dims); } return context->ResizeTensor(context, output, output_size); }
181
True
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Read
The software reads data past the end, or before the beginning, of the intended buffer.
Typically, this can allow attackers to read sensitive information from other memory locations or cause a crash. A crash can occur when the code reads a variable amount of data and assumes that a sentinel exists to stop the read operation, such as a NUL in a string. The expected sentinel might not be located in the out-of-bounds memory, causing excessive data to be read, leading to a segmentation fault or a buffer overflow. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent read operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/125.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::builtin::comparisons::EqualEval
tflite::ops::builtin::comparisons::EqualEval( TfLiteContext * context , TfLiteNode * node)
['context', 'node']
TfLiteStatus EqualEval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input1 = GetInput(context, node, kInputTensor1); const TfLiteTensor* input2 = GetInput(context, node, kInputTensor2); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); bool requires_broadcast = !HaveSameShapes(input1, input2); switch (input1->type) { case kTfLiteBool: Comparison<bool, reference_ops::EqualFn>(input1, input2, output, requires_broadcast); break; case kTfLiteFloat32: Comparison<float, reference_ops::EqualFn>(input1, input2, output, requires_broadcast); break; case kTfLiteInt32: Comparison<int32_t, reference_ops::EqualFn>(input1, input2, output, requires_broadcast); break; case kTfLiteInt64: Comparison<int64_t, reference_ops::EqualFn>(input1, input2, output, requires_broadcast); break; case kTfLiteUInt8: ComparisonQuantized<uint8_t, reference_ops::EqualFn>( input1, input2, output, requires_broadcast); break; case kTfLiteInt8: ComparisonQuantized<int8_t, reference_ops::EqualFn>( input1, input2, output, requires_broadcast); break; case kTfLiteString: ComparisonString(reference_ops::StringRefEqualFn, input1, input2, output, requires_broadcast); break; default: context->ReportError( context, "Does not support type %d, requires bool|float|int|uint8|string", input1->type); return kTfLiteError; } return kTfLiteOk; }
251
True
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Write
The software writes data past the end, or before the beginning, of the intended buffer.
Typically, this can result in corruption of data, a crash, or code execution. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent write operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/787.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::builtin::comparisons::EqualEval
tflite::ops::builtin::comparisons::EqualEval( TfLiteContext * context , TfLiteNode * node)
['context', 'node']
TfLiteStatus EqualEval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input1 = GetInput(context, node, kInputTensor1); const TfLiteTensor* input2 = GetInput(context, node, kInputTensor2); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); bool requires_broadcast = !HaveSameShapes(input1, input2); switch (input1->type) { case kTfLiteBool: Comparison<bool, reference_ops::EqualFn>(input1, input2, output, requires_broadcast); break; case kTfLiteFloat32: Comparison<float, reference_ops::EqualFn>(input1, input2, output, requires_broadcast); break; case kTfLiteInt32: Comparison<int32_t, reference_ops::EqualFn>(input1, input2, output, requires_broadcast); break; case kTfLiteInt64: Comparison<int64_t, reference_ops::EqualFn>(input1, input2, output, requires_broadcast); break; case kTfLiteUInt8: ComparisonQuantized<uint8_t, reference_ops::EqualFn>( input1, input2, output, requires_broadcast); break; case kTfLiteInt8: ComparisonQuantized<int8_t, reference_ops::EqualFn>( input1, input2, output, requires_broadcast); break; case kTfLiteString: ComparisonString(reference_ops::StringRefEqualFn, input1, input2, output, requires_broadcast); break; default: context->ReportError( context, "Does not support type %d, requires bool|float|int|uint8|string", input1->type); return kTfLiteError; } return kTfLiteOk; }
251
True
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Read
The software reads data past the end, or before the beginning, of the intended buffer.
Typically, this can allow attackers to read sensitive information from other memory locations or cause a crash. A crash can occur when the code reads a variable amount of data and assumes that a sentinel exists to stop the read operation, such as a NUL in a string. The expected sentinel might not be located in the out-of-bounds memory, causing excessive data to be read, leading to a segmentation fault or a buffer overflow. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent read operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/125.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::builtin::comparisons::GreaterEqualEval
tflite::ops::builtin::comparisons::GreaterEqualEval( TfLiteContext * context , TfLiteNode * node)
['context', 'node']
TfLiteStatus GreaterEqualEval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input1 = GetInput(context, node, kInputTensor1); const TfLiteTensor* input2 = GetInput(context, node, kInputTensor2); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); bool requires_broadcast = !HaveSameShapes(input1, input2); switch (input1->type) { case kTfLiteFloat32: Comparison<float, reference_ops::GreaterEqualFn>(input1, input2, output, requires_broadcast); break; case kTfLiteInt32: Comparison<int32_t, reference_ops::GreaterEqualFn>(input1, input2, output, requires_broadcast); break; case kTfLiteInt64: Comparison<int64_t, reference_ops::GreaterEqualFn>(input1, input2, output, requires_broadcast); break; case kTfLiteUInt8: ComparisonQuantized<uint8_t, reference_ops::GreaterEqualFn>( input1, input2, output, requires_broadcast); break; case kTfLiteInt8: ComparisonQuantized<int8_t, reference_ops::GreaterEqualFn>( input1, input2, output, requires_broadcast); break; default: context->ReportError(context, "Does not support type %d, requires float|int|uint8", input1->type); return kTfLiteError; } return kTfLiteOk; }
208
True
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Write
The software writes data past the end, or before the beginning, of the intended buffer.
Typically, this can result in corruption of data, a crash, or code execution. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent write operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/787.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::builtin::comparisons::GreaterEqualEval
tflite::ops::builtin::comparisons::GreaterEqualEval( TfLiteContext * context , TfLiteNode * node)
['context', 'node']
TfLiteStatus GreaterEqualEval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input1 = GetInput(context, node, kInputTensor1); const TfLiteTensor* input2 = GetInput(context, node, kInputTensor2); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); bool requires_broadcast = !HaveSameShapes(input1, input2); switch (input1->type) { case kTfLiteFloat32: Comparison<float, reference_ops::GreaterEqualFn>(input1, input2, output, requires_broadcast); break; case kTfLiteInt32: Comparison<int32_t, reference_ops::GreaterEqualFn>(input1, input2, output, requires_broadcast); break; case kTfLiteInt64: Comparison<int64_t, reference_ops::GreaterEqualFn>(input1, input2, output, requires_broadcast); break; case kTfLiteUInt8: ComparisonQuantized<uint8_t, reference_ops::GreaterEqualFn>( input1, input2, output, requires_broadcast); break; case kTfLiteInt8: ComparisonQuantized<int8_t, reference_ops::GreaterEqualFn>( input1, input2, output, requires_broadcast); break; default: context->ReportError(context, "Does not support type %d, requires float|int|uint8", input1->type); return kTfLiteError; } return kTfLiteOk; }
208
True
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Read
The software reads data past the end, or before the beginning, of the intended buffer.
Typically, this can allow attackers to read sensitive information from other memory locations or cause a crash. A crash can occur when the code reads a variable amount of data and assumes that a sentinel exists to stop the read operation, such as a NUL in a string. The expected sentinel might not be located in the out-of-bounds memory, causing excessive data to be read, leading to a segmentation fault or a buffer overflow. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent read operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/125.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::builtin::comparisons::GreaterEval
tflite::ops::builtin::comparisons::GreaterEval( TfLiteContext * context , TfLiteNode * node)
['context', 'node']
TfLiteStatus GreaterEval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input1 = GetInput(context, node, kInputTensor1); const TfLiteTensor* input2 = GetInput(context, node, kInputTensor2); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); bool requires_broadcast = !HaveSameShapes(input1, input2); switch (input1->type) { case kTfLiteFloat32: Comparison<float, reference_ops::GreaterFn>(input1, input2, output, requires_broadcast); break; case kTfLiteInt32: Comparison<int32_t, reference_ops::GreaterFn>(input1, input2, output, requires_broadcast); break; case kTfLiteInt64: Comparison<int64_t, reference_ops::GreaterFn>(input1, input2, output, requires_broadcast); break; case kTfLiteUInt8: ComparisonQuantized<uint8_t, reference_ops::GreaterFn>( input1, input2, output, requires_broadcast); break; case kTfLiteInt8: ComparisonQuantized<int8_t, reference_ops::GreaterFn>( input1, input2, output, requires_broadcast); break; default: context->ReportError(context, "Does not support type %d, requires float|int|uint8", input1->type); return kTfLiteError; } return kTfLiteOk; }
208
True
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Write
The software writes data past the end, or before the beginning, of the intended buffer.
Typically, this can result in corruption of data, a crash, or code execution. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent write operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/787.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::builtin::comparisons::GreaterEval
tflite::ops::builtin::comparisons::GreaterEval( TfLiteContext * context , TfLiteNode * node)
['context', 'node']
TfLiteStatus GreaterEval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input1 = GetInput(context, node, kInputTensor1); const TfLiteTensor* input2 = GetInput(context, node, kInputTensor2); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); bool requires_broadcast = !HaveSameShapes(input1, input2); switch (input1->type) { case kTfLiteFloat32: Comparison<float, reference_ops::GreaterFn>(input1, input2, output, requires_broadcast); break; case kTfLiteInt32: Comparison<int32_t, reference_ops::GreaterFn>(input1, input2, output, requires_broadcast); break; case kTfLiteInt64: Comparison<int64_t, reference_ops::GreaterFn>(input1, input2, output, requires_broadcast); break; case kTfLiteUInt8: ComparisonQuantized<uint8_t, reference_ops::GreaterFn>( input1, input2, output, requires_broadcast); break; case kTfLiteInt8: ComparisonQuantized<int8_t, reference_ops::GreaterFn>( input1, input2, output, requires_broadcast); break; default: context->ReportError(context, "Does not support type %d, requires float|int|uint8", input1->type); return kTfLiteError; } return kTfLiteOk; }
208
True
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Read
The software reads data past the end, or before the beginning, of the intended buffer.
Typically, this can allow attackers to read sensitive information from other memory locations or cause a crash. A crash can occur when the code reads a variable amount of data and assumes that a sentinel exists to stop the read operation, such as a NUL in a string. The expected sentinel might not be located in the out-of-bounds memory, causing excessive data to be read, leading to a segmentation fault or a buffer overflow. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent read operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/125.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::builtin::comparisons::LessEqualEval
tflite::ops::builtin::comparisons::LessEqualEval( TfLiteContext * context , TfLiteNode * node)
['context', 'node']
TfLiteStatus LessEqualEval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input1 = GetInput(context, node, kInputTensor1); const TfLiteTensor* input2 = GetInput(context, node, kInputTensor2); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); bool requires_broadcast = !HaveSameShapes(input1, input2); switch (input1->type) { case kTfLiteFloat32: Comparison<float, reference_ops::LessEqualFn>(input1, input2, output, requires_broadcast); break; case kTfLiteInt32: Comparison<int32_t, reference_ops::LessEqualFn>(input1, input2, output, requires_broadcast); break; case kTfLiteInt64: Comparison<int64_t, reference_ops::LessEqualFn>(input1, input2, output, requires_broadcast); break; case kTfLiteUInt8: ComparisonQuantized<uint8_t, reference_ops::LessEqualFn>( input1, input2, output, requires_broadcast); break; case kTfLiteInt8: ComparisonQuantized<int8_t, reference_ops::LessEqualFn>( input1, input2, output, requires_broadcast); break; default: context->ReportError(context, "Does not support type %d, requires float|int|uint8", input1->type); return kTfLiteError; } return kTfLiteOk; }
208
True
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Write
The software writes data past the end, or before the beginning, of the intended buffer.
Typically, this can result in corruption of data, a crash, or code execution. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent write operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/787.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::builtin::comparisons::LessEqualEval
tflite::ops::builtin::comparisons::LessEqualEval( TfLiteContext * context , TfLiteNode * node)
['context', 'node']
TfLiteStatus LessEqualEval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input1 = GetInput(context, node, kInputTensor1); const TfLiteTensor* input2 = GetInput(context, node, kInputTensor2); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); bool requires_broadcast = !HaveSameShapes(input1, input2); switch (input1->type) { case kTfLiteFloat32: Comparison<float, reference_ops::LessEqualFn>(input1, input2, output, requires_broadcast); break; case kTfLiteInt32: Comparison<int32_t, reference_ops::LessEqualFn>(input1, input2, output, requires_broadcast); break; case kTfLiteInt64: Comparison<int64_t, reference_ops::LessEqualFn>(input1, input2, output, requires_broadcast); break; case kTfLiteUInt8: ComparisonQuantized<uint8_t, reference_ops::LessEqualFn>( input1, input2, output, requires_broadcast); break; case kTfLiteInt8: ComparisonQuantized<int8_t, reference_ops::LessEqualFn>( input1, input2, output, requires_broadcast); break; default: context->ReportError(context, "Does not support type %d, requires float|int|uint8", input1->type); return kTfLiteError; } return kTfLiteOk; }
208
True
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Read
The software reads data past the end, or before the beginning, of the intended buffer.
Typically, this can allow attackers to read sensitive information from other memory locations or cause a crash. A crash can occur when the code reads a variable amount of data and assumes that a sentinel exists to stop the read operation, such as a NUL in a string. The expected sentinel might not be located in the out-of-bounds memory, causing excessive data to be read, leading to a segmentation fault or a buffer overflow. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent read operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/125.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::builtin::comparisons::LessEval
tflite::ops::builtin::comparisons::LessEval( TfLiteContext * context , TfLiteNode * node)
['context', 'node']
TfLiteStatus LessEval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input1 = GetInput(context, node, kInputTensor1); const TfLiteTensor* input2 = GetInput(context, node, kInputTensor2); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); bool requires_broadcast = !HaveSameShapes(input1, input2); switch (input1->type) { case kTfLiteFloat32: Comparison<float, reference_ops::LessFn>(input1, input2, output, requires_broadcast); break; case kTfLiteInt32: Comparison<int32_t, reference_ops::LessFn>(input1, input2, output, requires_broadcast); break; case kTfLiteInt64: Comparison<int64_t, reference_ops::LessFn>(input1, input2, output, requires_broadcast); break; case kTfLiteUInt8: ComparisonQuantized<uint8_t, reference_ops::LessFn>( input1, input2, output, requires_broadcast); break; case kTfLiteInt8: ComparisonQuantized<int8_t, reference_ops::LessFn>(input1, input2, output, requires_broadcast); break; default: context->ReportError(context, "Does not support type %d, requires float|int|uint8", input1->type); return kTfLiteError; } return kTfLiteOk; }
208
True
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Write
The software writes data past the end, or before the beginning, of the intended buffer.
Typically, this can result in corruption of data, a crash, or code execution. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent write operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/787.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::builtin::comparisons::LessEval
tflite::ops::builtin::comparisons::LessEval( TfLiteContext * context , TfLiteNode * node)
['context', 'node']
TfLiteStatus LessEval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input1 = GetInput(context, node, kInputTensor1); const TfLiteTensor* input2 = GetInput(context, node, kInputTensor2); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); bool requires_broadcast = !HaveSameShapes(input1, input2); switch (input1->type) { case kTfLiteFloat32: Comparison<float, reference_ops::LessFn>(input1, input2, output, requires_broadcast); break; case kTfLiteInt32: Comparison<int32_t, reference_ops::LessFn>(input1, input2, output, requires_broadcast); break; case kTfLiteInt64: Comparison<int64_t, reference_ops::LessFn>(input1, input2, output, requires_broadcast); break; case kTfLiteUInt8: ComparisonQuantized<uint8_t, reference_ops::LessFn>( input1, input2, output, requires_broadcast); break; case kTfLiteInt8: ComparisonQuantized<int8_t, reference_ops::LessFn>(input1, input2, output, requires_broadcast); break; default: context->ReportError(context, "Does not support type %d, requires float|int|uint8", input1->type); return kTfLiteError; } return kTfLiteOk; }
208
True
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Read
The software reads data past the end, or before the beginning, of the intended buffer.
Typically, this can allow attackers to read sensitive information from other memory locations or cause a crash. A crash can occur when the code reads a variable amount of data and assumes that a sentinel exists to stop the read operation, such as a NUL in a string. The expected sentinel might not be located in the out-of-bounds memory, causing excessive data to be read, leading to a segmentation fault or a buffer overflow. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent read operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/125.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::builtin::comparisons::NotEqualEval
tflite::ops::builtin::comparisons::NotEqualEval( TfLiteContext * context , TfLiteNode * node)
['context', 'node']
TfLiteStatus NotEqualEval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input1 = GetInput(context, node, kInputTensor1); const TfLiteTensor* input2 = GetInput(context, node, kInputTensor2); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); bool requires_broadcast = !HaveSameShapes(input1, input2); switch (input1->type) { case kTfLiteBool: Comparison<bool, reference_ops::NotEqualFn>(input1, input2, output, requires_broadcast); break; case kTfLiteFloat32: Comparison<float, reference_ops::NotEqualFn>(input1, input2, output, requires_broadcast); break; case kTfLiteInt32: Comparison<int32_t, reference_ops::NotEqualFn>(input1, input2, output, requires_broadcast); break; case kTfLiteInt64: Comparison<int64_t, reference_ops::NotEqualFn>(input1, input2, output, requires_broadcast); break; case kTfLiteUInt8: ComparisonQuantized<uint8_t, reference_ops::NotEqualFn>( input1, input2, output, requires_broadcast); break; case kTfLiteInt8: ComparisonQuantized<int8_t, reference_ops::NotEqualFn>( input1, input2, output, requires_broadcast); break; case kTfLiteString: ComparisonString(reference_ops::StringRefNotEqualFn, input1, input2, output, requires_broadcast); break; default: context->ReportError( context, "Does not support type %d, requires bool|float|int|uint8|string", input1->type); return kTfLiteError; } return kTfLiteOk; }
251
True
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Write
The software writes data past the end, or before the beginning, of the intended buffer.
Typically, this can result in corruption of data, a crash, or code execution. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent write operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/787.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::builtin::comparisons::NotEqualEval
tflite::ops::builtin::comparisons::NotEqualEval( TfLiteContext * context , TfLiteNode * node)
['context', 'node']
TfLiteStatus NotEqualEval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input1 = GetInput(context, node, kInputTensor1); const TfLiteTensor* input2 = GetInput(context, node, kInputTensor2); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); bool requires_broadcast = !HaveSameShapes(input1, input2); switch (input1->type) { case kTfLiteBool: Comparison<bool, reference_ops::NotEqualFn>(input1, input2, output, requires_broadcast); break; case kTfLiteFloat32: Comparison<float, reference_ops::NotEqualFn>(input1, input2, output, requires_broadcast); break; case kTfLiteInt32: Comparison<int32_t, reference_ops::NotEqualFn>(input1, input2, output, requires_broadcast); break; case kTfLiteInt64: Comparison<int64_t, reference_ops::NotEqualFn>(input1, input2, output, requires_broadcast); break; case kTfLiteUInt8: ComparisonQuantized<uint8_t, reference_ops::NotEqualFn>( input1, input2, output, requires_broadcast); break; case kTfLiteInt8: ComparisonQuantized<int8_t, reference_ops::NotEqualFn>( input1, input2, output, requires_broadcast); break; case kTfLiteString: ComparisonString(reference_ops::StringRefNotEqualFn, input1, input2, output, requires_broadcast); break; default: context->ReportError( context, "Does not support type %d, requires bool|float|int|uint8|string", input1->type); return kTfLiteError; } return kTfLiteOk; }
251
True
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Read
The software reads data past the end, or before the beginning, of the intended buffer.
Typically, this can allow attackers to read sensitive information from other memory locations or cause a crash. A crash can occur when the code reads a variable amount of data and assumes that a sentinel exists to stop the read operation, such as a NUL in a string. The expected sentinel might not be located in the out-of-bounds memory, causing excessive data to be read, leading to a segmentation fault or a buffer overflow. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent read operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/125.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::builtin::concatenation::Eval
tflite::ops::builtin::concatenation::Eval( TfLiteContext * context , TfLiteNode * node)
['context', 'node']
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { auto* params = reinterpret_cast<TfLiteConcatenationParams*>(node->builtin_data); int axis = params->axis; TfLiteTensor* output = GetOutput(context, node, 0); if (axis < 0) axis += output->dims->size; // TODO(ahentz): Creating 'all_inputs' below is not very efficient. We should // allocate and populate these during Prepare(). // TODO(ycling): Activation function parameter is ignored. For now we dont have // a model with a Concatenation with fused activation function. #define TF_LITE_CONCATENATION(scalar) \ { \ VectorOfTensors<scalar> all_inputs(*context, *node->inputs); \ tflite::ConcatenationParams op_params; \ op_params.axis = axis; \ op_params.inputs_count = node->inputs->size; \ if (kernel_type == kReference) { \ reference_ops::Concatenation(op_params, all_inputs.shapes(), \ all_inputs.data(), GetTensorShape(output), \ GetTensorData<scalar>(output)); \ } else { \ optimized_ops::Concatenation(op_params, all_inputs.shapes(), \ all_inputs.data(), GetTensorShape(output), \ GetTensorData<scalar>(output)); \ } \ } #define TF_LITE_CONCATENATION_QUANTIZED() \ { \ VectorOfQuantizedTensors all_inputs(*context, *node->inputs); \ tflite::ConcatenationParams op_params; \ op_params.axis = axis; \ op_params.input_zeropoint = all_inputs.zero_point(); \ op_params.input_scale = all_inputs.scale(); \ op_params.inputs_count = node->inputs->size; \ op_params.output_zeropoint = output->params.zero_point; \ op_params.output_scale = output->params.scale; \ if (kernel_type == kReference) { \ reference_ops::ConcatenationWithScaling( \ op_params, all_inputs.shapes(), all_inputs.data(), \ GetTensorShape(output), GetTensorData<uint8>(output)); \ } else { \ optimized_ops::ConcatenationWithScaling( \ op_params, all_inputs.shapes(), all_inputs.data(), \ GetTensorShape(output), GetTensorData<uint8>(output)); \ } \ } switch (output->type) { // Already know in/outtypes are same. case kTfLiteFloat32: TF_LITE_CONCATENATION(float); break; case kTfLiteInt32: TF_LITE_CONCATENATION(int32); break; case kTfLiteUInt8: TF_LITE_CONCATENATION_QUANTIZED(); break; case kTfLiteInt8: TF_LITE_CONCATENATION(int8_t); break; case kTfLiteInt64: TF_LITE_CONCATENATION(int64_t); break; case kTfLiteInt16: TF_LITE_CONCATENATION(int16_t); break; default: context->ReportError(context, "Type '%s' is not supported currently.", TfLiteTypeGetName(output->type)); return kTfLiteError; } #undef TF_LITE_CONCATENATION_QUANTIZED #undef TF_LITE_CONCATENATION return kTfLiteOk; }
152
True
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Write
The software writes data past the end, or before the beginning, of the intended buffer.
Typically, this can result in corruption of data, a crash, or code execution. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent write operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/787.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::builtin::concatenation::Eval
tflite::ops::builtin::concatenation::Eval( TfLiteContext * context , TfLiteNode * node)
['context', 'node']
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { auto* params = reinterpret_cast<TfLiteConcatenationParams*>(node->builtin_data); int axis = params->axis; TfLiteTensor* output = GetOutput(context, node, 0); if (axis < 0) axis += output->dims->size; // TODO(ahentz): Creating 'all_inputs' below is not very efficient. We should // allocate and populate these during Prepare(). // TODO(ycling): Activation function parameter is ignored. For now we dont have // a model with a Concatenation with fused activation function. #define TF_LITE_CONCATENATION(scalar) \ { \ VectorOfTensors<scalar> all_inputs(*context, *node->inputs); \ tflite::ConcatenationParams op_params; \ op_params.axis = axis; \ op_params.inputs_count = node->inputs->size; \ if (kernel_type == kReference) { \ reference_ops::Concatenation(op_params, all_inputs.shapes(), \ all_inputs.data(), GetTensorShape(output), \ GetTensorData<scalar>(output)); \ } else { \ optimized_ops::Concatenation(op_params, all_inputs.shapes(), \ all_inputs.data(), GetTensorShape(output), \ GetTensorData<scalar>(output)); \ } \ } #define TF_LITE_CONCATENATION_QUANTIZED() \ { \ VectorOfQuantizedTensors all_inputs(*context, *node->inputs); \ tflite::ConcatenationParams op_params; \ op_params.axis = axis; \ op_params.input_zeropoint = all_inputs.zero_point(); \ op_params.input_scale = all_inputs.scale(); \ op_params.inputs_count = node->inputs->size; \ op_params.output_zeropoint = output->params.zero_point; \ op_params.output_scale = output->params.scale; \ if (kernel_type == kReference) { \ reference_ops::ConcatenationWithScaling( \ op_params, all_inputs.shapes(), all_inputs.data(), \ GetTensorShape(output), GetTensorData<uint8>(output)); \ } else { \ optimized_ops::ConcatenationWithScaling( \ op_params, all_inputs.shapes(), all_inputs.data(), \ GetTensorShape(output), GetTensorData<uint8>(output)); \ } \ } switch (output->type) { // Already know in/outtypes are same. case kTfLiteFloat32: TF_LITE_CONCATENATION(float); break; case kTfLiteInt32: TF_LITE_CONCATENATION(int32); break; case kTfLiteUInt8: TF_LITE_CONCATENATION_QUANTIZED(); break; case kTfLiteInt8: TF_LITE_CONCATENATION(int8_t); break; case kTfLiteInt64: TF_LITE_CONCATENATION(int64_t); break; case kTfLiteInt16: TF_LITE_CONCATENATION(int16_t); break; default: context->ReportError(context, "Type '%s' is not supported currently.", TfLiteTypeGetName(output->type)); return kTfLiteError; } #undef TF_LITE_CONCATENATION_QUANTIZED #undef TF_LITE_CONCATENATION return kTfLiteOk; }
152
True
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Read
The software reads data past the end, or before the beginning, of the intended buffer.
Typically, this can allow attackers to read sensitive information from other memory locations or cause a crash. A crash can occur when the code reads a variable amount of data and assumes that a sentinel exists to stop the read operation, such as a NUL in a string. The expected sentinel might not be located in the out-of-bounds memory, causing excessive data to be read, leading to a segmentation fault or a buffer overflow. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent read operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/125.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::builtin::concatenation::Prepare
tflite::ops::builtin::concatenation::Prepare( TfLiteContext * context , TfLiteNode * node)
['context', 'node']
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { auto* params = reinterpret_cast<TfLiteConcatenationParams*>(node->builtin_data); int axis = params->axis; int num_inputs = node->inputs->size; // The number of dimensions of the input tensors must match, and all // dimensions except 'axis' must be equal. const TfLiteTensor* t0 = GetInput(context, node, 0); TfLiteType input_type = t0->type; if (axis < 0) axis += t0->dims->size; TF_LITE_ENSURE(context, axis >= 0); TF_LITE_ENSURE(context, axis < t0->dims->size); // TODO(ahentz): These are limitations of our implementation that could be // removed with a bit of effort. TF_LITE_ENSURE_EQ(context, params->activation, kTfLiteActNone); TF_LITE_ENSURE(context, input_type == kTfLiteFloat32 || input_type == kTfLiteUInt8 || input_type == kTfLiteInt8 || input_type == kTfLiteInt16 || input_type == kTfLiteInt32 || input_type == kTfLiteInt64); // Output dimensions will match input dimensions, except 'axis', which // will be the sum of inputs int sum_axis = t0->dims->data[axis]; for (int i = 1; i < num_inputs; ++i) { const TfLiteTensor* t = GetInput(context, node, i); TF_LITE_ENSURE_EQ(context, t->dims->size, t0->dims->size); TF_LITE_ENSURE_EQ(context, t->type, input_type); for (int d = 0; d < t0->dims->size; ++d) { if (d == axis) { sum_axis += t->dims->data[axis]; } else { TF_LITE_ENSURE_EQ(context, t->dims->data[d], t0->dims->data[d]); } } } TfLiteIntArray* output_size = TfLiteIntArrayCreate(t0->dims->size); for (int d = 0; d < t0->dims->size; ++d) { output_size->data[d] = (d == axis) ? sum_axis : t0->dims->data[d]; } TfLiteTensor* output = GetOutput(context, node, 0); TF_LITE_ENSURE_TYPES_EQ(context, output->type, input_type); if (input_type == kTfLiteInt8) { // Make sure there is no re-scaling needed for Int8 quantized kernel. This // is a restriction we introduced to Int8 kernels. VectorOfTensors<int8_t> all_inputs(*context, *node->inputs); for (int i = 0; i < node->inputs->size; ++i) { const TfLiteTensor* t = GetInput(context, node, i); TF_LITE_ENSURE_EQ(context, t->params.scale, output->params.scale); TF_LITE_ENSURE_EQ(context, t->params.zero_point, output->params.zero_point); } } return context->ResizeTensor(context, output, output_size); }
459
True
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Write
The software writes data past the end, or before the beginning, of the intended buffer.
Typically, this can result in corruption of data, a crash, or code execution. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent write operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/787.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::builtin::concatenation::Prepare
tflite::ops::builtin::concatenation::Prepare( TfLiteContext * context , TfLiteNode * node)
['context', 'node']
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { auto* params = reinterpret_cast<TfLiteConcatenationParams*>(node->builtin_data); int axis = params->axis; int num_inputs = node->inputs->size; // The number of dimensions of the input tensors must match, and all // dimensions except 'axis' must be equal. const TfLiteTensor* t0 = GetInput(context, node, 0); TfLiteType input_type = t0->type; if (axis < 0) axis += t0->dims->size; TF_LITE_ENSURE(context, axis >= 0); TF_LITE_ENSURE(context, axis < t0->dims->size); // TODO(ahentz): These are limitations of our implementation that could be // removed with a bit of effort. TF_LITE_ENSURE_EQ(context, params->activation, kTfLiteActNone); TF_LITE_ENSURE(context, input_type == kTfLiteFloat32 || input_type == kTfLiteUInt8 || input_type == kTfLiteInt8 || input_type == kTfLiteInt16 || input_type == kTfLiteInt32 || input_type == kTfLiteInt64); // Output dimensions will match input dimensions, except 'axis', which // will be the sum of inputs int sum_axis = t0->dims->data[axis]; for (int i = 1; i < num_inputs; ++i) { const TfLiteTensor* t = GetInput(context, node, i); TF_LITE_ENSURE_EQ(context, t->dims->size, t0->dims->size); TF_LITE_ENSURE_EQ(context, t->type, input_type); for (int d = 0; d < t0->dims->size; ++d) { if (d == axis) { sum_axis += t->dims->data[axis]; } else { TF_LITE_ENSURE_EQ(context, t->dims->data[d], t0->dims->data[d]); } } } TfLiteIntArray* output_size = TfLiteIntArrayCreate(t0->dims->size); for (int d = 0; d < t0->dims->size; ++d) { output_size->data[d] = (d == axis) ? sum_axis : t0->dims->data[d]; } TfLiteTensor* output = GetOutput(context, node, 0); TF_LITE_ENSURE_TYPES_EQ(context, output->type, input_type); if (input_type == kTfLiteInt8) { // Make sure there is no re-scaling needed for Int8 quantized kernel. This // is a restriction we introduced to Int8 kernels. VectorOfTensors<int8_t> all_inputs(*context, *node->inputs); for (int i = 0; i < node->inputs->size; ++i) { const TfLiteTensor* t = GetInput(context, node, i); TF_LITE_ENSURE_EQ(context, t->params.scale, output->params.scale); TF_LITE_ENSURE_EQ(context, t->params.zero_point, output->params.zero_point); } } return context->ResizeTensor(context, output, output_size); }
459
True
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Read
The software reads data past the end, or before the beginning, of the intended buffer.
Typically, this can allow attackers to read sensitive information from other memory locations or cause a crash. A crash can occur when the code reads a variable amount of data and assumes that a sentinel exists to stop the read operation, such as a NUL in a string. The expected sentinel might not be located in the out-of-bounds memory, causing excessive data to be read, leading to a segmentation fault or a buffer overflow. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent read operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/125.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::builtin::conv::AllocateTemporaryTensorsIfRequired
tflite::ops::builtin::conv::AllocateTemporaryTensorsIfRequired( TfLiteContext * context , TfLiteNode * node , bool is_hybrid , bool is_per_channel , KernelType kernel_type)
['context', 'node', 'is_hybrid', 'is_per_channel', 'kernel_type']
static TfLiteStatus AllocateTemporaryTensorsIfRequired(TfLiteContext* context, TfLiteNode* node, bool is_hybrid, bool is_per_channel, KernelType kernel_type) { auto* params = reinterpret_cast<TfLiteConvParams*>(node->builtin_data); OpData* data = reinterpret_cast<OpData*>(node->user_data); TF_LITE_ENSURE(context, node->inputs->size >= 2); const TfLiteTensor* input = GetInput(context, node, 0); const TfLiteTensor* filter = GetInput(context, node, 1); // If we're using the optimized multithreaded EigenTensor implementation of // convolution, it expects the filter weights to be transposed compared to // the normal TF Lite buffer format. Typical TF Lite weights are // [filter_count, filter_height, filter_width, input_depth], but for the float // implementation we need them as [filter_height, filter_width, input_depth, // filter_count]. We get to that format by transposing, and create a temporary // buffer to store the results. // This path is only used for float processing, so only create the buffer if // we're running with that data type. data->need_hwcn_weights = input->type == kTfLiteFloat32 && data->supports_multithreaded_kernel; // We don't always need to allocate im2col. It is only used in some versions // of the optimized Conv. This test just mimics something that happens inside // optimized_ops.h, in order to avoid a DCHECK(!im2col_data). data->need_im2col = IsIm2ColRequired(input, params, filter, data, is_hybrid, kernel_type); int temporaries_count = 0; if (data->need_im2col) { data->im2col_index = temporaries_count; if (data->im2col_id == kTensorNotAllocated) { context->AddTensors(context, 1, &data->im2col_id); } ++temporaries_count; } if (data->need_hwcn_weights) { data->hwcn_weights_index = temporaries_count; if (data->hwcn_weights_id == kTensorNotAllocated) { context->AddTensors(context, 1, &data->hwcn_weights_id); } ++temporaries_count; } if (is_hybrid) { // Allocate tensor to store the on-the-fly quantized inputs. data->input_quantized_index = temporaries_count; if (data->input_quantized_id == kTensorNotAllocated) { TF_LITE_ENSURE_OK( context, context->AddTensors(context, 1, &data->input_quantized_id)); } ++temporaries_count; // Allocate tensor to store the quantization params computed during // on-the-fly input quantization. data->scaling_factors_index = temporaries_count; if (data->scaling_factors_id == kTensorNotAllocated) { TF_LITE_ENSURE_OK( context, context->AddTensors(context, 1, &data->scaling_factors_id)); } ++temporaries_count; // Allocate tensor to store the accumulators for the matrix multiply. data->accum_scratch_index = temporaries_count; if (data->accum_scratch_id == kTensorNotAllocated) { TF_LITE_ENSURE_OK( context, context->AddTensors(context, 1, &data->accum_scratch_id)); } ++temporaries_count; if (is_per_channel) { data->input_offset_index = temporaries_count; if (data->input_offset_id == kTensorNotAllocated) { TF_LITE_ENSURE_OK( context, context->AddTensors(context, 1, &data->input_offset_id)); } ++temporaries_count; data->row_sums_index = temporaries_count; if (data->row_sums_id == kTensorNotAllocated) { TF_LITE_ENSURE_OK(context, context->AddTensors(context, 1, &data->row_sums_id)); } ++temporaries_count; } } TfLiteIntArrayFree(node->temporaries); node->temporaries = TfLiteIntArrayCreate(temporaries_count); return kTfLiteOk; }
433
True
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Write
The software writes data past the end, or before the beginning, of the intended buffer.
Typically, this can result in corruption of data, a crash, or code execution. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent write operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/787.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::builtin::conv::AllocateTemporaryTensorsIfRequired
tflite::ops::builtin::conv::AllocateTemporaryTensorsIfRequired( TfLiteContext * context , TfLiteNode * node , bool is_hybrid , bool is_per_channel , KernelType kernel_type)
['context', 'node', 'is_hybrid', 'is_per_channel', 'kernel_type']
static TfLiteStatus AllocateTemporaryTensorsIfRequired(TfLiteContext* context, TfLiteNode* node, bool is_hybrid, bool is_per_channel, KernelType kernel_type) { auto* params = reinterpret_cast<TfLiteConvParams*>(node->builtin_data); OpData* data = reinterpret_cast<OpData*>(node->user_data); TF_LITE_ENSURE(context, node->inputs->size >= 2); const TfLiteTensor* input = GetInput(context, node, 0); const TfLiteTensor* filter = GetInput(context, node, 1); // If we're using the optimized multithreaded EigenTensor implementation of // convolution, it expects the filter weights to be transposed compared to // the normal TF Lite buffer format. Typical TF Lite weights are // [filter_count, filter_height, filter_width, input_depth], but for the float // implementation we need them as [filter_height, filter_width, input_depth, // filter_count]. We get to that format by transposing, and create a temporary // buffer to store the results. // This path is only used for float processing, so only create the buffer if // we're running with that data type. data->need_hwcn_weights = input->type == kTfLiteFloat32 && data->supports_multithreaded_kernel; // We don't always need to allocate im2col. It is only used in some versions // of the optimized Conv. This test just mimics something that happens inside // optimized_ops.h, in order to avoid a DCHECK(!im2col_data). data->need_im2col = IsIm2ColRequired(input, params, filter, data, is_hybrid, kernel_type); int temporaries_count = 0; if (data->need_im2col) { data->im2col_index = temporaries_count; if (data->im2col_id == kTensorNotAllocated) { context->AddTensors(context, 1, &data->im2col_id); } ++temporaries_count; } if (data->need_hwcn_weights) { data->hwcn_weights_index = temporaries_count; if (data->hwcn_weights_id == kTensorNotAllocated) { context->AddTensors(context, 1, &data->hwcn_weights_id); } ++temporaries_count; } if (is_hybrid) { // Allocate tensor to store the on-the-fly quantized inputs. data->input_quantized_index = temporaries_count; if (data->input_quantized_id == kTensorNotAllocated) { TF_LITE_ENSURE_OK( context, context->AddTensors(context, 1, &data->input_quantized_id)); } ++temporaries_count; // Allocate tensor to store the quantization params computed during // on-the-fly input quantization. data->scaling_factors_index = temporaries_count; if (data->scaling_factors_id == kTensorNotAllocated) { TF_LITE_ENSURE_OK( context, context->AddTensors(context, 1, &data->scaling_factors_id)); } ++temporaries_count; // Allocate tensor to store the accumulators for the matrix multiply. data->accum_scratch_index = temporaries_count; if (data->accum_scratch_id == kTensorNotAllocated) { TF_LITE_ENSURE_OK( context, context->AddTensors(context, 1, &data->accum_scratch_id)); } ++temporaries_count; if (is_per_channel) { data->input_offset_index = temporaries_count; if (data->input_offset_id == kTensorNotAllocated) { TF_LITE_ENSURE_OK( context, context->AddTensors(context, 1, &data->input_offset_id)); } ++temporaries_count; data->row_sums_index = temporaries_count; if (data->row_sums_id == kTensorNotAllocated) { TF_LITE_ENSURE_OK(context, context->AddTensors(context, 1, &data->row_sums_id)); } ++temporaries_count; } } TfLiteIntArrayFree(node->temporaries); node->temporaries = TfLiteIntArrayCreate(temporaries_count); return kTfLiteOk; }
433
True
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Read
The software reads data past the end, or before the beginning, of the intended buffer.
Typically, this can allow attackers to read sensitive information from other memory locations or cause a crash. A crash can occur when the code reads a variable amount of data and assumes that a sentinel exists to stop the read operation, such as a NUL in a string. The expected sentinel might not be located in the out-of-bounds memory, causing excessive data to be read, leading to a segmentation fault or a buffer overflow. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent read operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/125.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::builtin::conv::Eval
tflite::ops::builtin::conv::Eval( TfLiteContext * context , TfLiteNode * node)
['context', 'node']
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input = GetInput(context, node, 0); switch (input->type) { case kTfLiteFloat32: return EvalImpl<kernel_type, kTfLiteFloat32>(context, node); case kTfLiteUInt8: return EvalImpl<kernel_type, kTfLiteUInt8>(context, node); case kTfLiteInt8: return EvalImpl<kernel_type, kTfLiteInt8>(context, node); case kTfLiteInt16: return EvalImpl<kernel_type, kTfLiteInt16>(context, node); default: TF_LITE_KERNEL_LOG(context, "Type %s not currently supported.", TfLiteTypeGetName(input->type)); return kTfLiteError; } }
117
True
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Write
The software writes data past the end, or before the beginning, of the intended buffer.
Typically, this can result in corruption of data, a crash, or code execution. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent write operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/787.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::builtin::conv::Eval
tflite::ops::builtin::conv::Eval( TfLiteContext * context , TfLiteNode * node)
['context', 'node']
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input = GetInput(context, node, 0); switch (input->type) { case kTfLiteFloat32: return EvalImpl<kernel_type, kTfLiteFloat32>(context, node); case kTfLiteUInt8: return EvalImpl<kernel_type, kTfLiteUInt8>(context, node); case kTfLiteInt8: return EvalImpl<kernel_type, kTfLiteInt8>(context, node); case kTfLiteInt16: return EvalImpl<kernel_type, kTfLiteInt16>(context, node); default: TF_LITE_KERNEL_LOG(context, "Type %s not currently supported.", TfLiteTypeGetName(input->type)); return kTfLiteError; } }
117
True
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Read
The software reads data past the end, or before the beginning, of the intended buffer.
Typically, this can allow attackers to read sensitive information from other memory locations or cause a crash. A crash can occur when the code reads a variable amount of data and assumes that a sentinel exists to stop the read operation, such as a NUL in a string. The expected sentinel might not be located in the out-of-bounds memory, causing excessive data to be read, leading to a segmentation fault or a buffer overflow. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent read operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/125.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::builtin::conv::EvalHybrid
tflite::ops::builtin::conv::EvalHybrid( TfLiteContext * context , TfLiteNode * node , TfLiteConvParams * params , OpData * data , const TfLiteTensor * input , const TfLiteTensor * filter , const TfLiteTensor * bias , TfLiteTensor * im2col , TfLiteTensor * accum_scratch , TfLiteTensor * output)
['context', 'node', 'params', 'data', 'input', 'filter', 'bias', 'im2col', 'accum_scratch', 'output']
void EvalHybrid(TfLiteContext* context, TfLiteNode* node, TfLiteConvParams* params, OpData* data, const TfLiteTensor* input, const TfLiteTensor* filter, const TfLiteTensor* bias, TfLiteTensor* im2col, TfLiteTensor* accum_scratch, TfLiteTensor* output) { float output_activation_min, output_activation_max; CalculateActivationRange(params->activation, &output_activation_min, &output_activation_max); const int input_size = NumElements(input) / SizeOfDimension(input, 0); const int batch_size = SizeOfDimension(input, 0); const float* input_ptr = GetTensorData<float>(input); int8_t* quantized_input_ptr_batch = GetTensorData<int8_t>( GetTemporary(context, node, data->input_quantized_index)); float* scaling_factors_ptr = GetTensorData<float>( GetTemporary(context, node, data->scaling_factors_index)); // Per-batch input quantization for higher accuracy. { ruy::profiler::ScopeLabel label("ConvHybridQuantizeInputs"); for (int b = 0; b < batch_size; ++b) { float unused_min, unused_max; const int offset = b * input_size; tensor_utils::SymmetricQuantizeFloats( input_ptr + offset, input_size, quantized_input_ptr_batch + offset, &unused_min, &unused_max, &scaling_factors_ptr[b]); scaling_factors_ptr[b] *= filter->params.scale; } } switch (kernel_type) { case kReference: case kGenericOptimized: case kMultithreadOptimized: case kCblasOptimized: { // There is only one implementation for hybrid kernel. ConvParams op_params; op_params.padding_type = PaddingType::kSame; op_params.padding_values.width = data->padding.width; op_params.padding_values.height = data->padding.height; op_params.stride_width = params->stride_width; op_params.stride_height = params->stride_height; op_params.dilation_width_factor = 1; op_params.dilation_height_factor = 1; op_params.float_activation_min = output_activation_min; op_params.float_activation_max = output_activation_max; optimized_ops::HybridConv( op_params, scaling_factors_ptr, GetTensorShape(input), quantized_input_ptr_batch, GetTensorShape(filter), GetTensorData<int8_t>(filter), GetTensorShape(bias), GetTensorData<float>(bias), GetTensorShape(accum_scratch), GetTensorData<int32_t>(accum_scratch), GetTensorShape(output), GetTensorData<float>(output), GetTensorShape(im2col), GetTensorData<int8_t>(im2col), CpuBackendContext::GetFromContext(context)); break; } } }
412
True
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Write
The software writes data past the end, or before the beginning, of the intended buffer.
Typically, this can result in corruption of data, a crash, or code execution. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent write operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/787.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::builtin::conv::EvalHybrid
tflite::ops::builtin::conv::EvalHybrid( TfLiteContext * context , TfLiteNode * node , TfLiteConvParams * params , OpData * data , const TfLiteTensor * input , const TfLiteTensor * filter , const TfLiteTensor * bias , TfLiteTensor * im2col , TfLiteTensor * accum_scratch , TfLiteTensor * output)
['context', 'node', 'params', 'data', 'input', 'filter', 'bias', 'im2col', 'accum_scratch', 'output']
void EvalHybrid(TfLiteContext* context, TfLiteNode* node, TfLiteConvParams* params, OpData* data, const TfLiteTensor* input, const TfLiteTensor* filter, const TfLiteTensor* bias, TfLiteTensor* im2col, TfLiteTensor* accum_scratch, TfLiteTensor* output) { float output_activation_min, output_activation_max; CalculateActivationRange(params->activation, &output_activation_min, &output_activation_max); const int input_size = NumElements(input) / SizeOfDimension(input, 0); const int batch_size = SizeOfDimension(input, 0); const float* input_ptr = GetTensorData<float>(input); int8_t* quantized_input_ptr_batch = GetTensorData<int8_t>( GetTemporary(context, node, data->input_quantized_index)); float* scaling_factors_ptr = GetTensorData<float>( GetTemporary(context, node, data->scaling_factors_index)); // Per-batch input quantization for higher accuracy. { ruy::profiler::ScopeLabel label("ConvHybridQuantizeInputs"); for (int b = 0; b < batch_size; ++b) { float unused_min, unused_max; const int offset = b * input_size; tensor_utils::SymmetricQuantizeFloats( input_ptr + offset, input_size, quantized_input_ptr_batch + offset, &unused_min, &unused_max, &scaling_factors_ptr[b]); scaling_factors_ptr[b] *= filter->params.scale; } } switch (kernel_type) { case kReference: case kGenericOptimized: case kMultithreadOptimized: case kCblasOptimized: { // There is only one implementation for hybrid kernel. ConvParams op_params; op_params.padding_type = PaddingType::kSame; op_params.padding_values.width = data->padding.width; op_params.padding_values.height = data->padding.height; op_params.stride_width = params->stride_width; op_params.stride_height = params->stride_height; op_params.dilation_width_factor = 1; op_params.dilation_height_factor = 1; op_params.float_activation_min = output_activation_min; op_params.float_activation_max = output_activation_max; optimized_ops::HybridConv( op_params, scaling_factors_ptr, GetTensorShape(input), quantized_input_ptr_batch, GetTensorShape(filter), GetTensorData<int8_t>(filter), GetTensorShape(bias), GetTensorData<float>(bias), GetTensorShape(accum_scratch), GetTensorData<int32_t>(accum_scratch), GetTensorShape(output), GetTensorData<float>(output), GetTensorShape(im2col), GetTensorData<int8_t>(im2col), CpuBackendContext::GetFromContext(context)); break; } } }
412
True
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Read
The software reads data past the end, or before the beginning, of the intended buffer.
Typically, this can allow attackers to read sensitive information from other memory locations or cause a crash. A crash can occur when the code reads a variable amount of data and assumes that a sentinel exists to stop the read operation, such as a NUL in a string. The expected sentinel might not be located in the out-of-bounds memory, causing excessive data to be read, leading to a segmentation fault or a buffer overflow. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent read operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/125.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::builtin::conv::EvalHybridPerChannel
tflite::ops::builtin::conv::EvalHybridPerChannel( TfLiteContext * context , TfLiteNode * node , TfLiteConvParams * params , OpData * data , const TfLiteTensor * input , const TfLiteTensor * filter , const TfLiteTensor * bias , TfLiteTensor * im2col , TfLiteTensor * output)
['context', 'node', 'params', 'data', 'input', 'filter', 'bias', 'im2col', 'output']
void EvalHybridPerChannel(TfLiteContext* context, TfLiteNode* node, TfLiteConvParams* params, OpData* data, const TfLiteTensor* input, const TfLiteTensor* filter, const TfLiteTensor* bias, TfLiteTensor* im2col, TfLiteTensor* output) { float output_activation_min, output_activation_max; CalculateActivationRange(params->activation, &output_activation_min, &output_activation_max); const int input_size = NumElements(input) / SizeOfDimension(input, 0); const int batch_size = SizeOfDimension(input, 0); int8_t* quantized_input_ptr_batch = GetTensorData<int8_t>( GetTemporary(context, node, data->input_quantized_index)); float* scaling_factors_ptr = GetTensorData<float>( GetTemporary(context, node, data->scaling_factors_index)); int32_t* input_offset_ptr = GetTensorData<int32_t>( GetTemporary(context, node, data->input_offset_index)); for (int b = 0; b < batch_size; ++b) { const int offset = b * input_size; tensor_utils::AsymmetricQuantizeFloats( GetTensorData<float>(input) + offset, input_size, quantized_input_ptr_batch + offset, &scaling_factors_ptr[b], &input_offset_ptr[b]); } int8_t* im2col_ptr = nullptr; int8_t* filter_ptr = nullptr; if (im2col != nullptr) { im2col_ptr = im2col->data.int8; } filter_ptr = filter->data.int8; const auto* affine_quantization = reinterpret_cast<TfLiteAffineQuantization*>(filter->quantization.params); ConvParams op_params; op_params.padding_type = PaddingType::kSame; op_params.padding_values.width = data->padding.width; op_params.padding_values.height = data->padding.height; op_params.stride_width = params->stride_width; op_params.stride_height = params->stride_height; op_params.dilation_width_factor = 1; op_params.dilation_height_factor = 1; op_params.float_activation_min = output_activation_min; op_params.float_activation_max = output_activation_max; switch (kernel_type) { case kReference: reference_ops::HybridConvPerChannel( op_params, scaling_factors_ptr, GetTensorShape(input), quantized_input_ptr_batch, GetTensorShape(filter), filter_ptr, GetTensorShape(bias), GetTensorData<float>(bias), GetTensorShape(output), GetTensorData<float>(output), GetTensorShape(im2col), im2col_ptr, affine_quantization->scale->data, input_offset_ptr); break; case kGenericOptimized: case kMultithreadOptimized: case kCblasOptimized: { TfLiteTensor* row_sums = GetTemporary(context, node, data->row_sums_index); TfLiteTensor* scratch = GetTemporary(context, node, data->accum_scratch_index); optimized_ops::HybridConvPerChannel( op_params, scaling_factors_ptr, GetTensorShape(input), quantized_input_ptr_batch, GetTensorShape(filter), filter_ptr, GetTensorShape(bias), GetTensorData<float>(bias), GetTensorShape(output), GetTensorData<float>(output), GetTensorShape(im2col), im2col_ptr, affine_quantization->scale->data, input_offset_ptr, GetTensorShape(scratch), GetTensorData<int32>(scratch), GetTensorData<int32_t>(row_sums), &data->compute_hybrid_row_sums, CpuBackendContext::GetFromContext(context)); data->compute_hybrid_row_sums = false; break; } } }
558
True
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Write
The software writes data past the end, or before the beginning, of the intended buffer.
Typically, this can result in corruption of data, a crash, or code execution. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent write operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/787.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::builtin::conv::EvalHybridPerChannel
tflite::ops::builtin::conv::EvalHybridPerChannel( TfLiteContext * context , TfLiteNode * node , TfLiteConvParams * params , OpData * data , const TfLiteTensor * input , const TfLiteTensor * filter , const TfLiteTensor * bias , TfLiteTensor * im2col , TfLiteTensor * output)
['context', 'node', 'params', 'data', 'input', 'filter', 'bias', 'im2col', 'output']
void EvalHybridPerChannel(TfLiteContext* context, TfLiteNode* node, TfLiteConvParams* params, OpData* data, const TfLiteTensor* input, const TfLiteTensor* filter, const TfLiteTensor* bias, TfLiteTensor* im2col, TfLiteTensor* output) { float output_activation_min, output_activation_max; CalculateActivationRange(params->activation, &output_activation_min, &output_activation_max); const int input_size = NumElements(input) / SizeOfDimension(input, 0); const int batch_size = SizeOfDimension(input, 0); int8_t* quantized_input_ptr_batch = GetTensorData<int8_t>( GetTemporary(context, node, data->input_quantized_index)); float* scaling_factors_ptr = GetTensorData<float>( GetTemporary(context, node, data->scaling_factors_index)); int32_t* input_offset_ptr = GetTensorData<int32_t>( GetTemporary(context, node, data->input_offset_index)); for (int b = 0; b < batch_size; ++b) { const int offset = b * input_size; tensor_utils::AsymmetricQuantizeFloats( GetTensorData<float>(input) + offset, input_size, quantized_input_ptr_batch + offset, &scaling_factors_ptr[b], &input_offset_ptr[b]); } int8_t* im2col_ptr = nullptr; int8_t* filter_ptr = nullptr; if (im2col != nullptr) { im2col_ptr = im2col->data.int8; } filter_ptr = filter->data.int8; const auto* affine_quantization = reinterpret_cast<TfLiteAffineQuantization*>(filter->quantization.params); ConvParams op_params; op_params.padding_type = PaddingType::kSame; op_params.padding_values.width = data->padding.width; op_params.padding_values.height = data->padding.height; op_params.stride_width = params->stride_width; op_params.stride_height = params->stride_height; op_params.dilation_width_factor = 1; op_params.dilation_height_factor = 1; op_params.float_activation_min = output_activation_min; op_params.float_activation_max = output_activation_max; switch (kernel_type) { case kReference: reference_ops::HybridConvPerChannel( op_params, scaling_factors_ptr, GetTensorShape(input), quantized_input_ptr_batch, GetTensorShape(filter), filter_ptr, GetTensorShape(bias), GetTensorData<float>(bias), GetTensorShape(output), GetTensorData<float>(output), GetTensorShape(im2col), im2col_ptr, affine_quantization->scale->data, input_offset_ptr); break; case kGenericOptimized: case kMultithreadOptimized: case kCblasOptimized: { TfLiteTensor* row_sums = GetTemporary(context, node, data->row_sums_index); TfLiteTensor* scratch = GetTemporary(context, node, data->accum_scratch_index); optimized_ops::HybridConvPerChannel( op_params, scaling_factors_ptr, GetTensorShape(input), quantized_input_ptr_batch, GetTensorShape(filter), filter_ptr, GetTensorShape(bias), GetTensorData<float>(bias), GetTensorShape(output), GetTensorData<float>(output), GetTensorShape(im2col), im2col_ptr, affine_quantization->scale->data, input_offset_ptr, GetTensorShape(scratch), GetTensorData<int32>(scratch), GetTensorData<int32_t>(row_sums), &data->compute_hybrid_row_sums, CpuBackendContext::GetFromContext(context)); data->compute_hybrid_row_sums = false; break; } } }
558
True
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Read
The software reads data past the end, or before the beginning, of the intended buffer.
Typically, this can allow attackers to read sensitive information from other memory locations or cause a crash. A crash can occur when the code reads a variable amount of data and assumes that a sentinel exists to stop the read operation, such as a NUL in a string. The expected sentinel might not be located in the out-of-bounds memory, causing excessive data to be read, leading to a segmentation fault or a buffer overflow. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent read operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/125.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::builtin::conv::EvalImpl
tflite::ops::builtin::conv::EvalImpl( TfLiteContext * context , TfLiteNode * node)
['context', 'node']
TfLiteStatus EvalImpl(TfLiteContext* context, TfLiteNode* node) { auto* params = reinterpret_cast<TfLiteConvParams*>(node->builtin_data); OpData* data = reinterpret_cast<OpData*>(node->user_data); TfLiteTensor* output = GetOutput(context, node, 0); const TfLiteTensor* input = GetInput(context, node, 0); const TfLiteTensor* filter = GetInput(context, node, 1); bool has_bias = node->inputs->size == 3; const TfLiteTensor* bias = has_bias ? GetInput(context, node, 2) : nullptr; TfLiteTensor* im2col = data->need_im2col ? &context->tensors[node->temporaries->data[data->im2col_index]] : nullptr; TfLiteTensor* hwcn_weights = data->need_hwcn_weights ? &context->tensors[node->temporaries->data[data->hwcn_weights_index]] : nullptr; if (data->need_hwcn_weights && !data->have_weights_been_transposed) { TransposeFloatTensor(filter, hwcn_weights); data->have_weights_been_transposed = true; } TFLITE_DCHECK_EQ(input_type, input->type); switch (input_type) { // Already know in/outtypes are same. case kTfLiteFloat32: if (filter->type == kTfLiteUInt8 || filter->type == kTfLiteInt8) { if (data->is_hybrid_per_channel) { EvalHybridPerChannel<kernel_type>(context, node, params, data, input, filter, bias, im2col, output); } else { TfLiteTensor* accum_scratch = &context->tensors[node->temporaries ->data[data->accum_scratch_index]]; EvalHybrid<kernel_type>(context, node, params, data, input, filter, bias, im2col, accum_scratch, output); } } else { EvalFloat<kernel_type>(context, node, params, data, input, filter, bias, im2col, hwcn_weights, output); } break; case kTfLiteUInt8: EvalQuantized<kernel_type>(context, node, params, data, input, filter, bias, im2col, output); break; case kTfLiteInt8: EvalQuantizedPerChannel<kernel_type>(context, node, params, data, input, filter, bias, output, im2col); break; case kTfLiteInt16: EvalQuantizedPerChannel16x8<kernel_type>( context, node, params, data, input, filter, bias, output, im2col); break; default: TF_LITE_KERNEL_LOG(context, "Type %s currently not supported.", TfLiteTypeGetName(input->type)); return kTfLiteError; } return kTfLiteOk; }
448
True
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Write
The software writes data past the end, or before the beginning, of the intended buffer.
Typically, this can result in corruption of data, a crash, or code execution. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent write operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/787.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::builtin::conv::EvalImpl
tflite::ops::builtin::conv::EvalImpl( TfLiteContext * context , TfLiteNode * node)
['context', 'node']
TfLiteStatus EvalImpl(TfLiteContext* context, TfLiteNode* node) { auto* params = reinterpret_cast<TfLiteConvParams*>(node->builtin_data); OpData* data = reinterpret_cast<OpData*>(node->user_data); TfLiteTensor* output = GetOutput(context, node, 0); const TfLiteTensor* input = GetInput(context, node, 0); const TfLiteTensor* filter = GetInput(context, node, 1); bool has_bias = node->inputs->size == 3; const TfLiteTensor* bias = has_bias ? GetInput(context, node, 2) : nullptr; TfLiteTensor* im2col = data->need_im2col ? &context->tensors[node->temporaries->data[data->im2col_index]] : nullptr; TfLiteTensor* hwcn_weights = data->need_hwcn_weights ? &context->tensors[node->temporaries->data[data->hwcn_weights_index]] : nullptr; if (data->need_hwcn_weights && !data->have_weights_been_transposed) { TransposeFloatTensor(filter, hwcn_weights); data->have_weights_been_transposed = true; } TFLITE_DCHECK_EQ(input_type, input->type); switch (input_type) { // Already know in/outtypes are same. case kTfLiteFloat32: if (filter->type == kTfLiteUInt8 || filter->type == kTfLiteInt8) { if (data->is_hybrid_per_channel) { EvalHybridPerChannel<kernel_type>(context, node, params, data, input, filter, bias, im2col, output); } else { TfLiteTensor* accum_scratch = &context->tensors[node->temporaries ->data[data->accum_scratch_index]]; EvalHybrid<kernel_type>(context, node, params, data, input, filter, bias, im2col, accum_scratch, output); } } else { EvalFloat<kernel_type>(context, node, params, data, input, filter, bias, im2col, hwcn_weights, output); } break; case kTfLiteUInt8: EvalQuantized<kernel_type>(context, node, params, data, input, filter, bias, im2col, output); break; case kTfLiteInt8: EvalQuantizedPerChannel<kernel_type>(context, node, params, data, input, filter, bias, output, im2col); break; case kTfLiteInt16: EvalQuantizedPerChannel16x8<kernel_type>( context, node, params, data, input, filter, bias, output, im2col); break; default: TF_LITE_KERNEL_LOG(context, "Type %s currently not supported.", TfLiteTypeGetName(input->type)); return kTfLiteError; } return kTfLiteOk; }
448
True
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Read
The software reads data past the end, or before the beginning, of the intended buffer.
Typically, this can allow attackers to read sensitive information from other memory locations or cause a crash. A crash can occur when the code reads a variable amount of data and assumes that a sentinel exists to stop the read operation, such as a NUL in a string. The expected sentinel might not be located in the out-of-bounds memory, causing excessive data to be read, leading to a segmentation fault or a buffer overflow. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent read operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/125.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::builtin::conv::Prepare
tflite::ops::builtin::conv::Prepare( KernelType kernel_type , TfLiteContext * context , TfLiteNode * node)
['kernel_type', 'context', 'node']
TfLiteStatus Prepare(KernelType kernel_type, TfLiteContext* context, TfLiteNode* node) { auto* params = reinterpret_cast<TfLiteConvParams*>(node->builtin_data); OpData* data = reinterpret_cast<OpData*>(node->user_data); bool has_bias = node->inputs->size == 3; // Check number of inputs/outputs TF_LITE_ENSURE(context, has_bias || node->inputs->size == 2); TF_LITE_ENSURE_EQ(context, node->outputs->size, 1); TfLiteTensor* output = GetOutput(context, node, 0); const TfLiteTensor* input = GetInput(context, node, 0); const TfLiteTensor* filter = GetInput(context, node, 1); // Check dimensionality of input, filter TF_LITE_ENSURE_EQ(context, input->dims->size, 4); TF_LITE_ENSURE_EQ(context, filter->dims->size, 4); // Check input channels matching filter TF_LITE_ENSURE_EQ(context, input->dims->data[3], filter->dims->data[3]); // Check types. (We assume that UINT8 refers to quantized tensors) TfLiteType input_type = input->type; TF_LITE_ENSURE(context, input_type == kTfLiteFloat32 || input_type == kTfLiteUInt8 || input_type == kTfLiteInt8 || input_type == kTfLiteInt16); TF_LITE_ENSURE_TYPES_EQ(context, output->type, input_type); const TfLiteTensor* bias = nullptr; // TODO(ahentz): At this point the optimized versions require 'bias'. We can // either change that or document that convolution requires it. TF_LITE_ENSURE(context, has_bias); if (has_bias) { bias = GetInput(context, node, 2); if (input_type == kTfLiteUInt8 || input_type == kTfLiteInt8) { TF_LITE_ENSURE_TYPES_EQ(context, bias->type, kTfLiteInt32); TF_LITE_ENSURE_EQ(context, bias->params.zero_point, 0); } else if (input_type == kTfLiteInt16) { TF_LITE_ENSURE_TYPES_EQ(context, bias->type, kTfLiteInt64); TF_LITE_ENSURE_EQ(context, bias->params.zero_point, 0); TF_LITE_ENSURE_EQ(context, input->params.zero_point, 0); TF_LITE_ENSURE_EQ(context, output->params.zero_point, 0); } else { TF_LITE_ENSURE_TYPES_EQ(context, bias->type, input_type); } TF_LITE_ENSURE_EQ(context, NumElements(bias), SizeOfDimension(filter, 0)); } const bool is_hybrid = (input->type == kTfLiteFloat32 && (filter->type == kTfLiteUInt8 || filter->type == kTfLiteInt8)); if (is_hybrid && filter->type == kTfLiteInt8 && filter->quantization.type == kTfLiteAffineQuantization && filter->quantization.params && reinterpret_cast<TfLiteAffineQuantization*>(filter->quantization.params) ->scale && reinterpret_cast<TfLiteAffineQuantization*>(filter->quantization.params) ->scale->size > 1) { const auto* affine_quantization = reinterpret_cast<TfLiteAffineQuantization*>( filter->quantization.params); const float scale = affine_quantization->scale->data[0]; for (int i = 1; i < affine_quantization->scale->size; i++) { if (affine_quantization->scale->data[i] != scale) { data->is_hybrid_per_channel = true; break; } } } // The multi-threaded kernel supports neither dilation nor hybrid kernels, and // is incompatible with mutable input filters that might change between evals. data->supports_multithreaded_kernel = (kernel_type == kMultithreadOptimized) && (context->recommended_num_threads != 1) && !is_hybrid && (params->dilation_width_factor == 1) && (params->dilation_height_factor == 1) && (filter->allocation_type != kTfLiteArenaRw) && !IsDynamicTensor(filter); TF_LITE_ENSURE_STATUS(AllocateTemporaryTensorsIfRequired( context, node, is_hybrid, data->is_hybrid_per_channel, kernel_type)); int channels_in = filter->dims->data[3]; int channels_out = filter->dims->data[0]; int width = input->dims->data[2]; int height = input->dims->data[1]; int filter_width = filter->dims->data[2]; int filter_height = filter->dims->data[1]; int batches = input->dims->data[0]; // Matching GetWindowedOutputSize in TensorFlow. auto padding = params->padding; int out_width, out_height; data->padding = ComputePaddingHeightWidth( params->stride_height, params->stride_width, params->dilation_height_factor, params->dilation_width_factor, height, width, filter_height, filter_width, padding, &out_height, &out_width); TF_LITE_ENSURE(context, has_bias); // Note that full fixed-point inference requires that all tensors have their // parameters set. This is usually done during quantized training or // calibration. if (input_type != kTfLiteFloat32) { TF_LITE_ENSURE_EQ(context, filter->quantization.type, kTfLiteAffineQuantization); const auto* affine_quantization = reinterpret_cast<TfLiteAffineQuantization*>( filter->quantization.params); TF_LITE_ENSURE(context, affine_quantization); TF_LITE_ENSURE(context, affine_quantization->scale); TF_LITE_ENSURE(context, (affine_quantization->scale->size == 1 || affine_quantization->scale->size == channels_out)); data->per_channel_output_multiplier.resize(channels_out); data->per_channel_output_shift.resize(channels_out); TF_LITE_ENSURE_STATUS(tflite::PopulateConvolutionQuantizationParams( context, input, filter, bias, output, params->activation, &data->output_multiplier, &data->output_shift, &data->output_activation_min, &data->output_activation_max, data->per_channel_output_multiplier.data(), data->per_channel_output_shift.data(), channels_out)); } TfLiteIntArray* output_size = TfLiteIntArrayCreate(4); output_size->data[0] = batches; output_size->data[1] = out_height; output_size->data[2] = out_width; output_size->data[3] = channels_out; auto output_status = context->ResizeTensor(context, output, output_size); if (output_status != kTfLiteOk) return output_status; if (data->need_im2col) { node->temporaries->data[data->im2col_index] = data->im2col_id; TfLiteIntArray* im2col_size = TfLiteIntArrayCreate(4); int input_depth = input->dims->data[3]; im2col_size->data[0] = output_size->data[0]; im2col_size->data[1] = output_size->data[1]; im2col_size->data[2] = output_size->data[2]; im2col_size->data[3] = input_depth * filter_height * filter_width; TfLiteTensor* im2col = &context->tensors[node->temporaries->data[data->im2col_index]]; im2col->type = input->type; if (is_hybrid) { im2col->type = filter->type; } im2col->allocation_type = kTfLiteArenaRw; auto im2col_status = context->ResizeTensor(context, im2col, im2col_size); if (im2col_status != kTfLiteOk) return im2col_status; } if (data->need_hwcn_weights) { node->temporaries->data[data->hwcn_weights_index] = data->hwcn_weights_id; TfLiteIntArray* hwcn_weights_size = TfLiteIntArrayCreate(2); // Because we're treating the filter weights as a matrix when we do the // transpose, we allocate the buffer with a two-dimensional shape, where one // dimension is the number of elements in each filter, and the second is the // total number of filters. int input_depth = input->dims->data[3]; hwcn_weights_size->data[0] = (filter_height * filter_width * input_depth); hwcn_weights_size->data[1] = channels_out; TfLiteTensor* hwcn_weights = &context->tensors[node->temporaries->data[data->hwcn_weights_index]]; hwcn_weights->type = input_type; hwcn_weights->allocation_type = kTfLiteArenaRwPersistent; auto hwcn_weights_status = context->ResizeTensor(context, hwcn_weights, hwcn_weights_size); if (hwcn_weights_status != kTfLiteOk) return hwcn_weights_status; // TODO(petewarden): If Resize() is called when the size hasn't actually // changed, this will do extra redundant work. data->have_weights_been_transposed = false; } if (is_hybrid) { node->temporaries->data[data->input_quantized_index] = data->input_quantized_id; TfLiteTensor* input_quantized = GetTemporary(context, node, data->input_quantized_index); input_quantized->type = kTfLiteInt8; input_quantized->allocation_type = kTfLiteArenaRw; if (!TfLiteIntArrayEqual(input_quantized->dims, input->dims)) { TfLiteIntArray* input_quantized_size = TfLiteIntArrayCopy(input->dims); TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, input_quantized, input_quantized_size)); } node->temporaries->data[data->scaling_factors_index] = data->scaling_factors_id; TfLiteTensor* scaling_factors = GetTemporary(context, node, data->scaling_factors_index); scaling_factors->type = kTfLiteFloat32; scaling_factors->allocation_type = kTfLiteArenaRw; // Only one scale factor per batch is typically necessary. See optimized // implementation for why we need to allocate for the height of the inputs // flattened to 2D. const int height = NumElements(input) / channels_in; int scaling_dims[1] = {height}; if (!TfLiteIntArrayEqualsArray(scaling_factors->dims, 1, scaling_dims)) { TfLiteIntArray* scaling_factors_size = TfLiteIntArrayCreate(1); scaling_factors_size->data[0] = height; TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, scaling_factors, scaling_factors_size)); } node->temporaries->data[data->accum_scratch_index] = data->accum_scratch_id; TfLiteTensor* accum_scratch = GetTemporary(context, node, data->accum_scratch_index); accum_scratch->type = kTfLiteInt32; accum_scratch->allocation_type = kTfLiteArenaRw; const int scratch_width = batches * out_height * out_width; int accum_scratch_dims[2] = {channels_out, scratch_width}; if (!TfLiteIntArrayEqualsArray(accum_scratch->dims, 2, accum_scratch_dims)) { TfLiteIntArray* accum_scratch_size = TfLiteIntArrayCreate(2); accum_scratch_size->data[0] = channels_out; accum_scratch_size->data[1] = scratch_width; TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, accum_scratch, accum_scratch_size)); } if (data->is_hybrid_per_channel) { const auto* affine_quantization = reinterpret_cast<TfLiteAffineQuantization*>( filter->quantization.params); TF_LITE_ENSURE_EQ( context, affine_quantization->scale->size, filter->dims->data[affine_quantization->quantized_dimension]); node->temporaries->data[data->input_offset_index] = data->input_offset_id; TfLiteTensor* input_offsets = GetTemporary(context, node, data->input_offset_index); input_offsets->type = kTfLiteInt32; input_offsets->allocation_type = kTfLiteArenaRw; // See above comment for the need to allocate for height of inputs. const int height = NumElements(input) / channels_in; const int input_offset_dims[1] = {height}; if (!TfLiteIntArrayEqualsArray(input_offsets->dims, 1, input_offset_dims)) { TfLiteIntArray* input_offsets_size = TfLiteIntArrayCreate(1); input_offsets_size->data[0] = input_offset_dims[0]; TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, input_offsets, input_offsets_size)); } node->temporaries->data[data->row_sums_index] = data->row_sums_id; TfLiteTensor* row_sums = GetTemporary(context, node, data->row_sums_index); row_sums->type = kTfLiteInt32; row_sums->allocation_type = kTfLiteArenaRwPersistent; // See above comment for the need to allocate for height of inputs. const int row_sums_dims[1] = {channels_out}; if (!TfLiteIntArrayEqualsArray(row_sums->dims, 1, row_sums_dims)) { TfLiteIntArray* row_sums_size = TfLiteIntArrayCreate(1); row_sums_size->data[0] = row_sums_dims[0]; TF_LITE_ENSURE_OK( context, context->ResizeTensor(context, row_sums, row_sums_size)); } } } return kTfLiteOk; }
1867
True
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Write
The software writes data past the end, or before the beginning, of the intended buffer.
Typically, this can result in corruption of data, a crash, or code execution. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent write operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/787.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::builtin::conv::Prepare
tflite::ops::builtin::conv::Prepare( KernelType kernel_type , TfLiteContext * context , TfLiteNode * node)
['kernel_type', 'context', 'node']
TfLiteStatus Prepare(KernelType kernel_type, TfLiteContext* context, TfLiteNode* node) { auto* params = reinterpret_cast<TfLiteConvParams*>(node->builtin_data); OpData* data = reinterpret_cast<OpData*>(node->user_data); bool has_bias = node->inputs->size == 3; // Check number of inputs/outputs TF_LITE_ENSURE(context, has_bias || node->inputs->size == 2); TF_LITE_ENSURE_EQ(context, node->outputs->size, 1); TfLiteTensor* output = GetOutput(context, node, 0); const TfLiteTensor* input = GetInput(context, node, 0); const TfLiteTensor* filter = GetInput(context, node, 1); // Check dimensionality of input, filter TF_LITE_ENSURE_EQ(context, input->dims->size, 4); TF_LITE_ENSURE_EQ(context, filter->dims->size, 4); // Check input channels matching filter TF_LITE_ENSURE_EQ(context, input->dims->data[3], filter->dims->data[3]); // Check types. (We assume that UINT8 refers to quantized tensors) TfLiteType input_type = input->type; TF_LITE_ENSURE(context, input_type == kTfLiteFloat32 || input_type == kTfLiteUInt8 || input_type == kTfLiteInt8 || input_type == kTfLiteInt16); TF_LITE_ENSURE_TYPES_EQ(context, output->type, input_type); const TfLiteTensor* bias = nullptr; // TODO(ahentz): At this point the optimized versions require 'bias'. We can // either change that or document that convolution requires it. TF_LITE_ENSURE(context, has_bias); if (has_bias) { bias = GetInput(context, node, 2); if (input_type == kTfLiteUInt8 || input_type == kTfLiteInt8) { TF_LITE_ENSURE_TYPES_EQ(context, bias->type, kTfLiteInt32); TF_LITE_ENSURE_EQ(context, bias->params.zero_point, 0); } else if (input_type == kTfLiteInt16) { TF_LITE_ENSURE_TYPES_EQ(context, bias->type, kTfLiteInt64); TF_LITE_ENSURE_EQ(context, bias->params.zero_point, 0); TF_LITE_ENSURE_EQ(context, input->params.zero_point, 0); TF_LITE_ENSURE_EQ(context, output->params.zero_point, 0); } else { TF_LITE_ENSURE_TYPES_EQ(context, bias->type, input_type); } TF_LITE_ENSURE_EQ(context, NumElements(bias), SizeOfDimension(filter, 0)); } const bool is_hybrid = (input->type == kTfLiteFloat32 && (filter->type == kTfLiteUInt8 || filter->type == kTfLiteInt8)); if (is_hybrid && filter->type == kTfLiteInt8 && filter->quantization.type == kTfLiteAffineQuantization && filter->quantization.params && reinterpret_cast<TfLiteAffineQuantization*>(filter->quantization.params) ->scale && reinterpret_cast<TfLiteAffineQuantization*>(filter->quantization.params) ->scale->size > 1) { const auto* affine_quantization = reinterpret_cast<TfLiteAffineQuantization*>( filter->quantization.params); const float scale = affine_quantization->scale->data[0]; for (int i = 1; i < affine_quantization->scale->size; i++) { if (affine_quantization->scale->data[i] != scale) { data->is_hybrid_per_channel = true; break; } } } // The multi-threaded kernel supports neither dilation nor hybrid kernels, and // is incompatible with mutable input filters that might change between evals. data->supports_multithreaded_kernel = (kernel_type == kMultithreadOptimized) && (context->recommended_num_threads != 1) && !is_hybrid && (params->dilation_width_factor == 1) && (params->dilation_height_factor == 1) && (filter->allocation_type != kTfLiteArenaRw) && !IsDynamicTensor(filter); TF_LITE_ENSURE_STATUS(AllocateTemporaryTensorsIfRequired( context, node, is_hybrid, data->is_hybrid_per_channel, kernel_type)); int channels_in = filter->dims->data[3]; int channels_out = filter->dims->data[0]; int width = input->dims->data[2]; int height = input->dims->data[1]; int filter_width = filter->dims->data[2]; int filter_height = filter->dims->data[1]; int batches = input->dims->data[0]; // Matching GetWindowedOutputSize in TensorFlow. auto padding = params->padding; int out_width, out_height; data->padding = ComputePaddingHeightWidth( params->stride_height, params->stride_width, params->dilation_height_factor, params->dilation_width_factor, height, width, filter_height, filter_width, padding, &out_height, &out_width); TF_LITE_ENSURE(context, has_bias); // Note that full fixed-point inference requires that all tensors have their // parameters set. This is usually done during quantized training or // calibration. if (input_type != kTfLiteFloat32) { TF_LITE_ENSURE_EQ(context, filter->quantization.type, kTfLiteAffineQuantization); const auto* affine_quantization = reinterpret_cast<TfLiteAffineQuantization*>( filter->quantization.params); TF_LITE_ENSURE(context, affine_quantization); TF_LITE_ENSURE(context, affine_quantization->scale); TF_LITE_ENSURE(context, (affine_quantization->scale->size == 1 || affine_quantization->scale->size == channels_out)); data->per_channel_output_multiplier.resize(channels_out); data->per_channel_output_shift.resize(channels_out); TF_LITE_ENSURE_STATUS(tflite::PopulateConvolutionQuantizationParams( context, input, filter, bias, output, params->activation, &data->output_multiplier, &data->output_shift, &data->output_activation_min, &data->output_activation_max, data->per_channel_output_multiplier.data(), data->per_channel_output_shift.data(), channels_out)); } TfLiteIntArray* output_size = TfLiteIntArrayCreate(4); output_size->data[0] = batches; output_size->data[1] = out_height; output_size->data[2] = out_width; output_size->data[3] = channels_out; auto output_status = context->ResizeTensor(context, output, output_size); if (output_status != kTfLiteOk) return output_status; if (data->need_im2col) { node->temporaries->data[data->im2col_index] = data->im2col_id; TfLiteIntArray* im2col_size = TfLiteIntArrayCreate(4); int input_depth = input->dims->data[3]; im2col_size->data[0] = output_size->data[0]; im2col_size->data[1] = output_size->data[1]; im2col_size->data[2] = output_size->data[2]; im2col_size->data[3] = input_depth * filter_height * filter_width; TfLiteTensor* im2col = &context->tensors[node->temporaries->data[data->im2col_index]]; im2col->type = input->type; if (is_hybrid) { im2col->type = filter->type; } im2col->allocation_type = kTfLiteArenaRw; auto im2col_status = context->ResizeTensor(context, im2col, im2col_size); if (im2col_status != kTfLiteOk) return im2col_status; } if (data->need_hwcn_weights) { node->temporaries->data[data->hwcn_weights_index] = data->hwcn_weights_id; TfLiteIntArray* hwcn_weights_size = TfLiteIntArrayCreate(2); // Because we're treating the filter weights as a matrix when we do the // transpose, we allocate the buffer with a two-dimensional shape, where one // dimension is the number of elements in each filter, and the second is the // total number of filters. int input_depth = input->dims->data[3]; hwcn_weights_size->data[0] = (filter_height * filter_width * input_depth); hwcn_weights_size->data[1] = channels_out; TfLiteTensor* hwcn_weights = &context->tensors[node->temporaries->data[data->hwcn_weights_index]]; hwcn_weights->type = input_type; hwcn_weights->allocation_type = kTfLiteArenaRwPersistent; auto hwcn_weights_status = context->ResizeTensor(context, hwcn_weights, hwcn_weights_size); if (hwcn_weights_status != kTfLiteOk) return hwcn_weights_status; // TODO(petewarden): If Resize() is called when the size hasn't actually // changed, this will do extra redundant work. data->have_weights_been_transposed = false; } if (is_hybrid) { node->temporaries->data[data->input_quantized_index] = data->input_quantized_id; TfLiteTensor* input_quantized = GetTemporary(context, node, data->input_quantized_index); input_quantized->type = kTfLiteInt8; input_quantized->allocation_type = kTfLiteArenaRw; if (!TfLiteIntArrayEqual(input_quantized->dims, input->dims)) { TfLiteIntArray* input_quantized_size = TfLiteIntArrayCopy(input->dims); TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, input_quantized, input_quantized_size)); } node->temporaries->data[data->scaling_factors_index] = data->scaling_factors_id; TfLiteTensor* scaling_factors = GetTemporary(context, node, data->scaling_factors_index); scaling_factors->type = kTfLiteFloat32; scaling_factors->allocation_type = kTfLiteArenaRw; // Only one scale factor per batch is typically necessary. See optimized // implementation for why we need to allocate for the height of the inputs // flattened to 2D. const int height = NumElements(input) / channels_in; int scaling_dims[1] = {height}; if (!TfLiteIntArrayEqualsArray(scaling_factors->dims, 1, scaling_dims)) { TfLiteIntArray* scaling_factors_size = TfLiteIntArrayCreate(1); scaling_factors_size->data[0] = height; TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, scaling_factors, scaling_factors_size)); } node->temporaries->data[data->accum_scratch_index] = data->accum_scratch_id; TfLiteTensor* accum_scratch = GetTemporary(context, node, data->accum_scratch_index); accum_scratch->type = kTfLiteInt32; accum_scratch->allocation_type = kTfLiteArenaRw; const int scratch_width = batches * out_height * out_width; int accum_scratch_dims[2] = {channels_out, scratch_width}; if (!TfLiteIntArrayEqualsArray(accum_scratch->dims, 2, accum_scratch_dims)) { TfLiteIntArray* accum_scratch_size = TfLiteIntArrayCreate(2); accum_scratch_size->data[0] = channels_out; accum_scratch_size->data[1] = scratch_width; TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, accum_scratch, accum_scratch_size)); } if (data->is_hybrid_per_channel) { const auto* affine_quantization = reinterpret_cast<TfLiteAffineQuantization*>( filter->quantization.params); TF_LITE_ENSURE_EQ( context, affine_quantization->scale->size, filter->dims->data[affine_quantization->quantized_dimension]); node->temporaries->data[data->input_offset_index] = data->input_offset_id; TfLiteTensor* input_offsets = GetTemporary(context, node, data->input_offset_index); input_offsets->type = kTfLiteInt32; input_offsets->allocation_type = kTfLiteArenaRw; // See above comment for the need to allocate for height of inputs. const int height = NumElements(input) / channels_in; const int input_offset_dims[1] = {height}; if (!TfLiteIntArrayEqualsArray(input_offsets->dims, 1, input_offset_dims)) { TfLiteIntArray* input_offsets_size = TfLiteIntArrayCreate(1); input_offsets_size->data[0] = input_offset_dims[0]; TF_LITE_ENSURE_OK(context, context->ResizeTensor(context, input_offsets, input_offsets_size)); } node->temporaries->data[data->row_sums_index] = data->row_sums_id; TfLiteTensor* row_sums = GetTemporary(context, node, data->row_sums_index); row_sums->type = kTfLiteInt32; row_sums->allocation_type = kTfLiteArenaRwPersistent; // See above comment for the need to allocate for height of inputs. const int row_sums_dims[1] = {channels_out}; if (!TfLiteIntArrayEqualsArray(row_sums->dims, 1, row_sums_dims)) { TfLiteIntArray* row_sums_size = TfLiteIntArrayCreate(1); row_sums_size->data[0] = row_sums_dims[0]; TF_LITE_ENSURE_OK( context, context->ResizeTensor(context, row_sums, row_sums_size)); } } } return kTfLiteOk; }
1867
True
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Read
The software reads data past the end, or before the beginning, of the intended buffer.
Typically, this can allow attackers to read sensitive information from other memory locations or cause a crash. A crash can occur when the code reads a variable amount of data and assumes that a sentinel exists to stop the read operation, such as a NUL in a string. The expected sentinel might not be located in the out-of-bounds memory, causing excessive data to be read, leading to a segmentation fault or a buffer overflow. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent read operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/125.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::builtin::depth_to_space::Eval
tflite::ops::builtin::depth_to_space::Eval( TfLiteContext * context , TfLiteNode * node)
['context', 'node']
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { auto* params = reinterpret_cast<TfLiteDepthToSpaceParams*>(node->builtin_data); const TfLiteTensor* input = GetInput(context, node, kInputTensor); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); #define TF_LITE_DEPTH_TO_SPACE(type, scalar) \ tflite::DepthToSpaceParams op_params; \ op_params.block_size = params->block_size; \ type::DepthToSpace(op_params, GetTensorShape(input), \ GetTensorData<scalar>(input), GetTensorShape(output), \ GetTensorData<scalar>(output)) switch (input->type) { // Already know in/out types are same. case kTfLiteFloat32: if (kernel_type == kReference) { TF_LITE_DEPTH_TO_SPACE(reference_ops, float); } else { TF_LITE_DEPTH_TO_SPACE(optimized_ops, float); } break; case kTfLiteUInt8: if (kernel_type == kReference) { TF_LITE_DEPTH_TO_SPACE(reference_ops, uint8_t); } else { TF_LITE_DEPTH_TO_SPACE(optimized_ops, uint8_t); } break; case kTfLiteInt8: if (kernel_type == kReference) { TF_LITE_DEPTH_TO_SPACE(reference_ops, int8_t); } else { TF_LITE_DEPTH_TO_SPACE(optimized_ops, int8_t); } break; case kTfLiteInt32: if (kernel_type == kReference) { TF_LITE_DEPTH_TO_SPACE(reference_ops, int32_t); } else { TF_LITE_DEPTH_TO_SPACE(optimized_ops, int32_t); } break; case kTfLiteInt64: if (kernel_type == kReference) { TF_LITE_DEPTH_TO_SPACE(reference_ops, int64_t); } else { TF_LITE_DEPTH_TO_SPACE(optimized_ops, int64_t); } break; default: context->ReportError(context, "Type '%s' not currently supported.", TfLiteTypeGetName(input->type)); return kTfLiteError; } #undef TF_LITE_DEPTH_TO_SPACE return kTfLiteOk; }
236
True
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Write
The software writes data past the end, or before the beginning, of the intended buffer.
Typically, this can result in corruption of data, a crash, or code execution. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent write operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/787.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::builtin::depth_to_space::Eval
tflite::ops::builtin::depth_to_space::Eval( TfLiteContext * context , TfLiteNode * node)
['context', 'node']
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { auto* params = reinterpret_cast<TfLiteDepthToSpaceParams*>(node->builtin_data); const TfLiteTensor* input = GetInput(context, node, kInputTensor); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); #define TF_LITE_DEPTH_TO_SPACE(type, scalar) \ tflite::DepthToSpaceParams op_params; \ op_params.block_size = params->block_size; \ type::DepthToSpace(op_params, GetTensorShape(input), \ GetTensorData<scalar>(input), GetTensorShape(output), \ GetTensorData<scalar>(output)) switch (input->type) { // Already know in/out types are same. case kTfLiteFloat32: if (kernel_type == kReference) { TF_LITE_DEPTH_TO_SPACE(reference_ops, float); } else { TF_LITE_DEPTH_TO_SPACE(optimized_ops, float); } break; case kTfLiteUInt8: if (kernel_type == kReference) { TF_LITE_DEPTH_TO_SPACE(reference_ops, uint8_t); } else { TF_LITE_DEPTH_TO_SPACE(optimized_ops, uint8_t); } break; case kTfLiteInt8: if (kernel_type == kReference) { TF_LITE_DEPTH_TO_SPACE(reference_ops, int8_t); } else { TF_LITE_DEPTH_TO_SPACE(optimized_ops, int8_t); } break; case kTfLiteInt32: if (kernel_type == kReference) { TF_LITE_DEPTH_TO_SPACE(reference_ops, int32_t); } else { TF_LITE_DEPTH_TO_SPACE(optimized_ops, int32_t); } break; case kTfLiteInt64: if (kernel_type == kReference) { TF_LITE_DEPTH_TO_SPACE(reference_ops, int64_t); } else { TF_LITE_DEPTH_TO_SPACE(optimized_ops, int64_t); } break; default: context->ReportError(context, "Type '%s' not currently supported.", TfLiteTypeGetName(input->type)); return kTfLiteError; } #undef TF_LITE_DEPTH_TO_SPACE return kTfLiteOk; }
236
True
1