cve_id
stringlengths
13
16
obtain_all_privilege
stringclasses
3 values
obtain_user_privilege
stringclasses
2 values
obtain_other_privilege
stringclasses
2 values
user_interaction_required
stringclasses
3 values
cvss2_vector_string
stringclasses
106 values
cvss2_access_vector
stringclasses
4 values
cvss2_access_complexity
stringclasses
4 values
cvss2_authentication
stringclasses
3 values
cvss2_confidentiality_impact
stringclasses
4 values
cvss2_integrity_impact
stringclasses
4 values
cvss2_availability_impact
stringclasses
4 values
cvss2_base_score
stringclasses
50 values
cvss3_vector_string
stringclasses
226 values
cvss3_attack_vector
stringclasses
5 values
cvss3_attack_complexity
stringclasses
3 values
cvss3_privileges_required
stringclasses
4 values
cvss3_user_interaction
stringclasses
3 values
cvss3_scope
stringclasses
3 values
cvss3_confidentiality_impact
stringclasses
4 values
cvss3_integrity_impact
stringclasses
4 values
cvss3_availability_impact
stringclasses
4 values
cvss3_base_score
stringclasses
55 values
cvss3_base_severity
stringclasses
5 values
exploitability_score
stringclasses
22 values
impact_score
stringclasses
15 values
ac_insuf_info
stringclasses
3 values
reference_json
stringlengths
221
23.3k
problemtype_json
stringclasses
200 values
severity
stringclasses
4 values
cve_nodes
stringlengths
2
33.1k
cve_description
stringlengths
64
1.99k
cve_last_modified_date
stringlengths
17
17
cve_published_date
stringlengths
17
17
cwe_name
stringclasses
125 values
cwe_description
stringclasses
124 values
cwe_extended_description
stringclasses
95 values
cwe_url
stringclasses
124 values
cwe_is_category
int64
0
1
commit_author
stringlengths
0
34
commit_author_date
stringlengths
25
25
commit_msg
stringlengths
0
13.3k
commit_hash
stringlengths
40
40
commit_is_merge
stringclasses
1 value
repo_name
stringclasses
467 values
repo_description
stringclasses
459 values
repo_date_created
stringclasses
467 values
repo_date_last_push
stringclasses
467 values
repo_homepage
stringclasses
294 values
repo_owner
stringclasses
470 values
repo_stars
stringclasses
406 values
repo_forks
stringclasses
352 values
function_name
stringlengths
3
120
function_signature
stringlengths
6
640
function_parameters
stringlengths
2
302
function
stringlengths
12
114k
function_token_count
stringlengths
1
5
function_before_change
stringclasses
1 value
labels
int64
1
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Read
The software reads data past the end, or before the beginning, of the intended buffer.
Typically, this can allow attackers to read sensitive information from other memory locations or cause a crash. A crash can occur when the code reads a variable amount of data and assumes that a sentinel exists to stop the read operation, such as a NUL in a string. The expected sentinel might not be located in the out-of-bounds memory, causing excessive data to be read, leading to a segmentation fault or a buffer overflow. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent read operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/125.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::builtin::where::Prepare
tflite::ops::builtin::where::Prepare( TfLiteContext * context , TfLiteNode * node)
['context', 'node']
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* cond_tensor = GetInput(context, node, kInputConditionTensor); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); if (cond_tensor->type != kTfLiteBool) { context->ReportError(context, "Condition tensor must be of type bool, but saw '%s'.", TfLiteTypeGetName(cond_tensor->type)); return kTfLiteError; } // As output will be a 2D tensor of indices, use int64 to be consistent with // tensorflow. output->type = kTfLiteInt64; // Exit early if cond is a non-const tensor. Set output tensor to dynamic so // output size can be determined in Eval. if (!IsConstantTensor(cond_tensor)) { SetTensorToDynamic(output); return kTfLiteOk; } return ResizeOutputTensor(context, cond_tensor, output); }
126
True
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Write
The software writes data past the end, or before the beginning, of the intended buffer.
Typically, this can result in corruption of data, a crash, or code execution. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent write operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/787.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::builtin::where::Prepare
tflite::ops::builtin::where::Prepare( TfLiteContext * context , TfLiteNode * node)
['context', 'node']
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* cond_tensor = GetInput(context, node, kInputConditionTensor); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); if (cond_tensor->type != kTfLiteBool) { context->ReportError(context, "Condition tensor must be of type bool, but saw '%s'.", TfLiteTypeGetName(cond_tensor->type)); return kTfLiteError; } // As output will be a 2D tensor of indices, use int64 to be consistent with // tensorflow. output->type = kTfLiteInt64; // Exit early if cond is a non-const tensor. Set output tensor to dynamic so // output size can be determined in Eval. if (!IsConstantTensor(cond_tensor)) { SetTensorToDynamic(output); return kTfLiteOk; } return ResizeOutputTensor(context, cond_tensor, output); }
126
True
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Read
The software reads data past the end, or before the beginning, of the intended buffer.
Typically, this can allow attackers to read sensitive information from other memory locations or cause a crash. A crash can occur when the code reads a variable amount of data and assumes that a sentinel exists to stop the read operation, such as a NUL in a string. The expected sentinel might not be located in the out-of-bounds memory, causing excessive data to be read, leading to a segmentation fault or a buffer overflow. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent read operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/125.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::builtin::zeros_like::Eval
tflite::ops::builtin::zeros_like::Eval( TfLiteContext * context , TfLiteNode * node)
['context', 'node']
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input = GetInput(context, node, kInputTensor); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); const int num_elements = NumElements(input); switch (input->type) { case kTfLiteInt64: memset(GetTensorData<int64_t>(output), 0, num_elements * sizeof(int64_t)); break; case kTfLiteInt32: memset(GetTensorData<int32_t>(output), 0, num_elements * sizeof(int32_t)); break; case kTfLiteFloat32: memset(GetTensorData<float>(output), 0, num_elements * sizeof(float)); break; default: context->ReportError(context, "ZerosLike only currently supports int64, int32, " "and float32, got %d.", input->type); return kTfLiteError; } return kTfLiteOk; }
153
True
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Write
The software writes data past the end, or before the beginning, of the intended buffer.
Typically, this can result in corruption of data, a crash, or code execution. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent write operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/787.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::builtin::zeros_like::Eval
tflite::ops::builtin::zeros_like::Eval( TfLiteContext * context , TfLiteNode * node)
['context', 'node']
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { const TfLiteTensor* input = GetInput(context, node, kInputTensor); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); const int num_elements = NumElements(input); switch (input->type) { case kTfLiteInt64: memset(GetTensorData<int64_t>(output), 0, num_elements * sizeof(int64_t)); break; case kTfLiteInt32: memset(GetTensorData<int32_t>(output), 0, num_elements * sizeof(int32_t)); break; case kTfLiteFloat32: memset(GetTensorData<float>(output), 0, num_elements * sizeof(float)); break; default: context->ReportError(context, "ZerosLike only currently supports int64, int32, " "and float32, got %d.", input->type); return kTfLiteError; } return kTfLiteOk; }
153
True
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Read
The software reads data past the end, or before the beginning, of the intended buffer.
Typically, this can allow attackers to read sensitive information from other memory locations or cause a crash. A crash can occur when the code reads a variable amount of data and assumes that a sentinel exists to stop the read operation, such as a NUL in a string. The expected sentinel might not be located in the out-of-bounds memory, causing excessive data to be read, leading to a segmentation fault or a buffer overflow. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent read operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/125.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::builtin::zeros_like::Prepare
tflite::ops::builtin::zeros_like::Prepare( TfLiteContext * context , TfLiteNode * node)
['context', 'node']
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input = GetInput(context, node, kInputTensor); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); output->type = input->type; return context->ResizeTensor(context, output, TfLiteIntArrayCopy(input->dims)); }
88
True
1
CVE-2020-15211
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:N
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
NONE
5.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:L/I:L/A:N
NETWORK
HIGH
NONE
NONE
UNCHANGED
LOW
LOW
NONE
4.8
MEDIUM
2.2
2.5
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'name': 'https://github.com/tensorflow/tensorflow/commit/e11f55585f614645b360563072ffeb5c3eeff162', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'name': 'https://github.com/tensorflow/tensorflow/commit/cd31fd0ce0449a9e0f83dcad08d6ed7f1d6bef3f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'name': 'https://github.com/tensorflow/tensorflow/commit/46d5b0852528ddfd614ded79bccc75589f801bd9', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/00302787b788c5ff04cb6f62aed5a74d936e86c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cvpc-8phh-8f45', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'name': 'https://github.com/tensorflow/tensorflow/commit/fff2c8326280c07733828f990548979bdc893859', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'name': 'https://github.com/tensorflow/tensorflow/commit/1970c2158b1ffa416d159d03c3370b9a462aee35', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In TensorFlow Lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, saved models in the flatbuffer format use a double indexing scheme: a model has a set of subgraphs, each subgraph has a set of operators and each operator has a set of input/output tensors. The flatbuffer format uses indices for the tensors, indexing into an array of tensors that is owned by the subgraph. This results in a pattern of double array indexing when trying to get the data of each tensor. However, some operators can have some tensors be optional. To handle this scenario, the flatbuffer model uses a negative `-1` value as index for these tensors. This results in special casing during validation at model loading time. Unfortunately, this means that the `-1` index is a valid tensor index for any operator, including those that don't expect optional inputs and including for output tensors. Thus, this allows writing and reading from outside the bounds of heap allocated arrays, although only at a specific offset from the start of these arrays. This results in both read and write gadgets, albeit very limited in scope. The issue is patched in several commits (46d5b0852, 00302787b7, e11f5558, cd31fd0ce, 1970c21, and fff2c83), and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1. A potential workaround would be to add a custom `Verifier` to the model loading code to ensure that only operators which accept optional inputs use the `-1` special value and only for the tensors that they expect to be optional. Since this allow-list type approach is erro-prone, we advise upgrading to the patched code."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Out-of-bounds Write
The software writes data past the end, or before the beginning, of the intended buffer.
Typically, this can result in corruption of data, a crash, or code execution. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent write operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/787.html
0
Mihai Maruseac
2020-09-18 13:56:43-07:00
[tflite]: Insert `nullptr` checks when obtaining tensors. As part of ongoing refactoring, `tflite::GetInput`, `tflite::GetOutput`, `tflite::GetTemporary` and `tflite::GetIntermediates` will return `nullptr` in some cases. Hence, we insert the `nullptr` checks on all usages. We also insert `nullptr` checks on usages of `tflite::GetVariableInput` and `tflite::GetOptionalInputTensor` but only in the cases where there is no obvious check that `nullptr` is acceptable (that is, we only insert the check for the output of these two functions if the tensor is accessed as if it is always not `nullptr`). PiperOrigin-RevId: 332521299 Change-Id: I29af455bcb48d0b92e58132d951a3badbd772d56
1970c2158b1ffa416d159d03c3370b9a462aee35
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::builtin::zeros_like::Prepare
tflite::ops::builtin::zeros_like::Prepare( TfLiteContext * context , TfLiteNode * node)
['context', 'node']
TfLiteStatus Prepare(TfLiteContext* context, TfLiteNode* node) { TF_LITE_ENSURE_EQ(context, NumInputs(node), 1); TF_LITE_ENSURE_EQ(context, NumOutputs(node), 1); const TfLiteTensor* input = GetInput(context, node, kInputTensor); TfLiteTensor* output = GetOutput(context, node, kOutputTensor); output->type = input->type; return context->ResizeTensor(context, output, TfLiteIntArrayCopy(input->dims)); }
88
True
1
CVE-2020-15209
False
False
False
False
AV:N/AC:M/Au:N/C:N/I:N/A:P
NETWORK
MEDIUM
NONE
NONE
NONE
PARTIAL
4.3
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:U/C:N/I:N/A:H
NETWORK
HIGH
NONE
NONE
UNCHANGED
NONE
NONE
HIGH
5.9
MEDIUM
2.2
3.6
False
[{'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-qh32-6jjc-qprm', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-qh32-6jjc-qprm', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/0b5662bc2be13a8c8f044d925d87fb6e56247cd8', 'name': 'https://github.com/tensorflow/tensorflow/commit/0b5662bc2be13a8c8f044d925d87fb6e56247cd8', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-476'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:lite:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': 'In tensorflow-lite before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, a crafted TFLite model can force a node to have as input a tensor backed by a `nullptr` buffer. This can be achieved by changing a buffer index in the flatbuffer serialization to convert a read-only tensor to a read-write one. The runtime assumes that these buffers are written to before a possible read, hence they are initialized with `nullptr`. However, by changing the buffer index for a tensor and implicitly converting that tensor to be a read-write one, as there is nothing in the model that writes to it, we get a null pointer dereference. The issue is patched in commit 0b5662bc, and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1.'}]
2021-09-16T15:45Z
2020-09-25T19:15Z
NULL Pointer Dereference
A NULL pointer dereference occurs when the application dereferences a pointer that it expects to be valid, but is NULL, typically causing a crash or exit.
NULL pointer dereference issues can occur through a number of flaws, including race conditions, and simple programming omissions.
https://cwe.mitre.org/data/definitions/476.html
0
Mihai Maruseac
2020-09-18 14:12:45-07:00
[tflite] Ensure input tensors don't have `nullptr` buffers. A crafted TFLite model can force a node to have as input a tensor backed by a `nullptr` buffer. That is, by carefully changing the buffer index in the flatbuffer serialization, we can force the TFLite interpreter to consider a read-only tensor to be a read-write one and assume that there is an operator that has this tensor as output, writing to it and allocating memory before the tensor is used as input. If this does not happen, we get memory corruption. PiperOrigin-RevId: 332524692 Change-Id: I57ef175152a29020af9ab041dc959e5631dce40f
0b5662bc2be13a8c8f044d925d87fb6e56247cd8
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::TEST
tflite::TEST( BasicFlatBufferModel , TestHandleMalformedModel)
['BasicFlatBufferModel', 'TestHandleMalformedModel']
TEST(BasicFlatBufferModel, TestHandleMalformedModel) { const auto model_paths = { // These models use the same tensor as both input and ouput of a node "tensorflow/lite/testdata/add_shared_tensors.bin", }; for (const auto& model_path : model_paths) { std::unique_ptr<tflite::FlatBufferModel> model = FlatBufferModel::BuildFromFile(model_path); ASSERT_NE(model, nullptr); tflite::ops::builtin::BuiltinOpResolver resolver; InterpreterBuilder builder(*model, resolver); std::unique_ptr<Interpreter> interpreter; ASSERT_EQ(builder(&interpreter), kTfLiteOk); ASSERT_NE(interpreter, nullptr); ASSERT_NE(interpreter->AllocateTensors(), kTfLiteOk); } }
107
True
1
CVE-2020-15206
False
False
False
False
AV:N/AC:L/Au:N/C:N/I:N/A:P
NETWORK
LOW
NONE
NONE
NONE
PARTIAL
5.0
CVSS:3.1/AV:N/AC:L/PR:N/UI:N/S:U/C:N/I:N/A:H
NETWORK
LOW
NONE
NONE
UNCHANGED
NONE
NONE
HIGH
7.5
HIGH
3.9
3.6
False
[{'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-w5gh-2wr2-pm6g', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-w5gh-2wr2-pm6g', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/adf095206f25471e864a8e63a0f1caef53a0e3a6', 'name': 'https://github.com/tensorflow/tensorflow/commit/adf095206f25471e864a8e63a0f1caef53a0e3a6', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory', 'Vendor Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-20'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:-:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:-:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:-:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:-:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:-:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "In Tensorflow before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, changing the TensorFlow's `SavedModel` protocol buffer and altering the name of required keys results in segfaults and data corruption while loading the model. This can cause a denial of service in products using `tensorflow-serving` or other inference-as-a-service installments. Fixed were added in commits f760f88b4267d981e13f4b302c437ae800445968 and fcfef195637c6e365577829c4d67681695956e7d (both going into TensorFlow 2.2.0 and 2.3.0 but not yet backported to earlier versions). However, this was not enough, as #41097 reports a different failure mode. The issue is patched in commit adf095206f25471e864a8e63a0f1caef53a0e3a6, and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1."}]
2021-09-16T15:45Z
2020-09-25T19:15Z
Improper Input Validation
The product receives input or data, but it does not validate or incorrectly validates that the input has the properties that are required to process the data safely and correctly.
Input validation is a frequently-used technique for checking potentially dangerous inputs in order to ensure that the inputs are safe for processing within the code, or when communicating with other components. When software does not validate input properly, an attacker is able to craft the input in a form that is not expected by the rest of the application. This will lead to parts of the system receiving unintended input, which may result in altered control flow, arbitrary control of a resource, or arbitrary code execution. Input validation is not the only technique for processing input, however. Other techniques attempt to transform potentially-dangerous input into something safe, such as filtering (CWE-790) - which attempts to remove dangerous inputs - or encoding/escaping (CWE-116), which attempts to ensure that the input is not misinterpreted when it is included in output to another component. Other techniques exist as well (see CWE-138 for more examples.) Input validation can be applied to: raw data - strings, numbers, parameters, file contents, etc. metadata - information about the raw data, such as headers or size Data can be simple or structured. Structured data can be composed of many nested layers, composed of combinations of metadata and raw data, with other simple or structured data. Many properties of raw data or metadata may need to be validated upon entry into the code, such as: specified quantities such as size, length, frequency, price, rate, number of operations, time, etc. implied or derived quantities, such as the actual size of a file instead of a specified size indexes, offsets, or positions into more complex data structures symbolic keys or other elements into hash tables, associative arrays, etc. well-formedness, i.e. syntactic correctness - compliance with expected syntax lexical token correctness - compliance with rules for what is treated as a token specified or derived type - the actual type of the input (or what the input appears to be) consistency - between individual data elements, between raw data and metadata, between references, etc. conformance to domain-specific rules, e.g. business logic equivalence - ensuring that equivalent inputs are treated the same authenticity, ownership, or other attestations about the input, e.g. a cryptographic signature to prove the source of the data Implied or derived properties of data must often be calculated or inferred by the code itself. Errors in deriving properties may be considered a contributing factor to improper input validation. Note that "input validation" has very different meanings to different people, or within different classification schemes. Caution must be used when referencing this CWE entry or mapping to it. For example, some weaknesses might involve inadvertently giving control to an attacker over an input when they should not be able to provide an input at all, but sometimes this is referred to as input validation. Finally, it is important to emphasize that the distinctions between input validation and output escaping are often blurred, and developers must be careful to understand the difference, including how input validation is not always sufficient to prevent vulnerabilities, especially when less stringent data types must be supported, such as free-form text. Consider a SQL injection scenario in which a person's last name is inserted into a query. The name "O'Reilly" would likely pass the validation step since it is a common last name in the English language. However, this valid name cannot be directly inserted into the database because it contains the "'" apostrophe character, which would need to be escaped or otherwise transformed. In this case, removing the apostrophe might reduce the risk of SQL injection, but it would produce incorrect behavior because the wrong name would be recorded.
https://cwe.mitre.org/data/definitions/20.html
0
Mihai Maruseac
2020-09-18 15:10:57-07:00
Validate `NodeDef`s from `FunctionDefLibrary` of a `GraphDef`. We already validated `NodeDef`s from a `GraphDef` but missed validating those from the `FunctionDefLibrary`. Thus, some maliciously crafted models could evade detection and cause denial of service due to a `CHECK`-fail. PiperOrigin-RevId: 332536309 Change-Id: I052efe919ff1fe2f90815e286a1aa4c54c7b94ff
adf095206f25471e864a8e63a0f1caef53a0e3a6
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tensorflow::ValidateSavedTensors
tensorflow::ValidateSavedTensors( const GraphDef & graph_def)
['graph_def']
static Status ValidateSavedTensors(const GraphDef& graph_def) { for (const auto& node : graph_def.node()) { const auto node_iterator = node.attr().find("value"); if (node_iterator != node.attr().end()) { AttrValue node_value = node_iterator->second; if (node_value.has_tensor()) { const PartialTensorShape node_shape(node_value.tensor().tensor_shape()); if (node_shape.num_elements() < 0) { return errors::FailedPrecondition( "Saved model contains node \"", node.name(), "\" (op \"", node.op(), "\") which initializes from a tensor with ", node_shape.num_elements(), " elements"); } } } else if (node.op() == "Const") { return errors::FailedPrecondition( "Saved model contains node \"", node.name(), "\" which is a constant tensor but no value has been provided"); } } return Status::OK(); }
167
True
1
CVE-2020-15204
False
False
False
False
AV:N/AC:L/Au:N/C:N/I:N/A:P
NETWORK
LOW
NONE
NONE
NONE
PARTIAL
5.0
CVSS:3.1/AV:N/AC:L/PR:N/UI:N/S:U/C:N/I:N/A:L
NETWORK
LOW
NONE
NONE
UNCHANGED
NONE
NONE
LOW
5.3
MEDIUM
3.9
1.4
False
[{'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-q8gv-q7wr-9jf8', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-q8gv-q7wr-9jf8', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/9a133d73ae4b4664d22bd1aa6d654fec13c52ee1', 'name': 'https://github.com/tensorflow/tensorflow/commit/9a133d73ae4b4664d22bd1aa6d654fec13c52ee1', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-476'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:-:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:-:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:-:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:-:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:-:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': 'In eager mode, TensorFlow before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1 does not set the session state. Hence, calling `tf.raw_ops.GetSessionHandle` or `tf.raw_ops.GetSessionHandleV2` results in a null pointer dereference In linked snippet, in eager mode, `ctx->session_state()` returns `nullptr`. Since code immediately dereferences this, we get a segmentation fault. The issue is patched in commit 9a133d73ae4b4664d22bd1aa6d654fec13c52ee1, and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1.'}]
2021-09-16T15:45Z
2020-09-25T19:15Z
NULL Pointer Dereference
A NULL pointer dereference occurs when the application dereferences a pointer that it expects to be valid, but is NULL, typically causing a crash or exit.
NULL pointer dereference issues can occur through a number of flaws, including race conditions, and simple programming omissions.
https://cwe.mitre.org/data/definitions/476.html
0
Mihai Maruseac
2020-09-18 16:23:20-07:00
Prevent segfault in `GetSessionHandle{,V2}`. In eager mode, session state is null. PiperOrigin-RevId: 332548597 Change-Id: If094812c2e094044220b9ba28f7d7601be042f38
9a133d73ae4b4664d22bd1aa6d654fec13c52ee1
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tensorflow::GetSessionHandleOp::Compute
tensorflow::GetSessionHandleOp::Compute( OpKernelContext * ctx)
['ctx']
void Compute(OpKernelContext* ctx) override { const Tensor& val = ctx->input(0); int64 id = ctx->session_state()->GetNewId(); TensorStore::TensorAndKey tk{val, id, requested_device()}; OP_REQUIRES_OK(ctx, ctx->tensor_store()->AddTensor(name(), tk)); Tensor* handle = nullptr; OP_REQUIRES_OK(ctx, ctx->allocate_output(0, TensorShape({}), &handle)); if (ctx->expected_output_dtype(0) == DT_RESOURCE) { ResourceHandle resource_handle = MakeResourceHandle<Tensor>( ctx, SessionState::kTensorHandleResourceTypeName, tk.GetHandle(name())); resource_handle.set_maybe_type_name( SessionState::kTensorHandleResourceTypeName); handle->scalar<ResourceHandle>()() = resource_handle; } else { // Legacy behavior in V1. handle->flat<tstring>().setConstant(tk.GetHandle(name())); } }
178
True
1
CVE-2020-15203
False
False
False
False
AV:N/AC:L/Au:N/C:N/I:N/A:P
NETWORK
LOW
NONE
NONE
NONE
PARTIAL
5.0
CVSS:3.1/AV:N/AC:L/PR:N/UI:N/S:U/C:N/I:N/A:H
NETWORK
LOW
NONE
NONE
UNCHANGED
NONE
NONE
HIGH
7.5
HIGH
3.9
3.6
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/33be22c65d86256e6826666662e40dbdfe70ee83', 'name': 'https://github.com/tensorflow/tensorflow/commit/33be22c65d86256e6826666662e40dbdfe70ee83', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-xmq7-7fxm-rr79', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-xmq7-7fxm-rr79', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-134'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:-:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:-:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:-:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:-:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:-:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': 'In Tensorflow before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, by controlling the `fill` argument of tf.strings.as_string, a malicious attacker is able to trigger a format string vulnerability due to the way the internal format use in a `printf` call is constructed. This may result in segmentation fault. The issue is patched in commit 33be22c65d86256e6826666662e40dbdfe70ee83, and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1.'}]
2021-11-18T17:26Z
2020-09-25T19:15Z
Use of Externally-Controlled Format String
The software uses a function that accepts a format string as an argument, but the format string originates from an external source.
When an attacker can modify an externally-controlled format string, this can lead to buffer overflows, denial of service, or data representation problems. It should be noted that in some circumstances, such as internationalization, the set of format strings is externally controlled by design. If the source of these format strings is trusted (e.g. only contained in library files that are only modifiable by the system administrator), then the external control might not itself pose a vulnerability.
https://cwe.mitre.org/data/definitions/134.html
0
Mihai Maruseac
2020-09-18 16:54:17-07:00
Prevent format string vulnerability in `tf.strings.as_string`. The `printf` format specifier only allows `#`, `0`, `-`, `+` and space as flag characters. Others are interpreted as width/precision/length modifier or conversion specifiers. If a character does not fit into any of these sets `printf` just displays it. Also add a test suite for `tf.strings.as_string`. Also fix the issue where the flag character was used only if width was specified. PiperOrigin-RevId: 332553548 Change-Id: Ie57cf2a7c14d1a36097642794c14329db669bbba
33be22c65d86256e6826666662e40dbdfe70ee83
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tensorflow::AsStringOp::AsStringOp
tensorflow::AsStringOp::AsStringOp( OpKernelConstruction * ctx)
['ctx']
explicit AsStringOp(OpKernelConstruction* ctx) : OpKernel(ctx) { int32 precision; bool scientific; bool shortest; int32 width; string fill_string; DataType dtype; OP_REQUIRES_OK(ctx, ctx->GetAttr("T", &dtype)); OP_REQUIRES_OK(ctx, ctx->GetAttr("precision", &precision)); OP_REQUIRES_OK(ctx, ctx->GetAttr("scientific", &scientific)); OP_REQUIRES_OK(ctx, ctx->GetAttr("shortest", &shortest)); OP_REQUIRES_OK(ctx, ctx->GetAttr("width", &width)); OP_REQUIRES_OK(ctx, ctx->GetAttr("fill", &fill_string)); switch (dtype) { case DT_FLOAT: case DT_DOUBLE: case DT_COMPLEX64: case DT_COMPLEX128: break; default: OP_REQUIRES(ctx, !(scientific || shortest), errors::InvalidArgument("scientific and shortest format " "not supported for datatype ", DataTypeString(dtype))); OP_REQUIRES(ctx, precision < 0, errors::InvalidArgument("precision not supported " "for datatype ", DataTypeString(dtype))); } OP_REQUIRES( ctx, fill_string.size() <= 1, errors::InvalidArgument("Fill string must be one or fewer characters")); OP_REQUIRES(ctx, !(scientific && shortest), errors::InvalidArgument( "Cannot select both scientific and shortest notation")); format_ = "%"; if (width > -1) { strings::Appendf(&format_, "%s%d", fill_string.c_str(), width); } if (precision > -1) { strings::Appendf(&format_, ".%d", precision); } switch (dtype) { case DT_INT8: case DT_INT16: case DT_INT32: strings::Appendf(&format_, "d"); break; case DT_INT64: strings::Appendf(&format_, "lld"); break; case DT_FLOAT: case DT_DOUBLE: case DT_COMPLEX64: case DT_COMPLEX128: if (shortest) { strings::Appendf(&format_, "g"); } else if (scientific) { strings::Appendf(&format_, "e"); } else { strings::Appendf(&format_, "f"); } break; case DT_BOOL: break; default: bool type_not_supported = true; OP_REQUIRES(ctx, !type_not_supported, errors::InvalidArgument("Type not supported: ", DataTypeString(dtype))); } if (dtype == DT_COMPLEX64 || dtype == DT_COMPLEX128) { format_ = strings::Printf("(%s,%s)", format_.c_str(), format_.c_str()); } }
448
True
1
CVE-2020-15202
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:P
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
PARTIAL
6.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:C/C:H/I:H/A:H
NETWORK
HIGH
NONE
NONE
CHANGED
HIGH
HIGH
HIGH
9.0
CRITICAL
2.2
6.0
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/27b417360cbd671ef55915e4bb6bb06af8b8a832', 'name': 'https://github.com/tensorflow/tensorflow/commit/27b417360cbd671ef55915e4bb6bb06af8b8a832', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-h6fg-mjxg-hqq4', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-h6fg-mjxg-hqq4', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/ca8c013b5e97b1373b3bb1c97ea655e69f31a575', 'name': 'https://github.com/tensorflow/tensorflow/commit/ca8c013b5e97b1373b3bb1c97ea655e69f31a575', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'NVD-CWE-Other'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:-:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:-:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:-:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:-:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:-:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': 'In Tensorflow before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, the `Shard` API in TensorFlow expects the last argument to be a function taking two `int64` (i.e., `long long`) arguments. However, there are several places in TensorFlow where a lambda taking `int` or `int32` arguments is being used. In these cases, if the amount of work to be parallelized is large enough, integer truncation occurs. Depending on how the two arguments of the lambda are used, this can result in segfaults, read/write outside of heap allocated arrays, stack overflows, or data corruption. The issue is patched in commits 27b417360cbd671ef55915e4bb6bb06af8b8a832 and ca8c013b5e97b1373b3bb1c97ea655e69f31a575, and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1.'}]
2021-11-18T17:26Z
2020-09-25T19:15Z
Other
NVD is only using a subset of CWE for mapping instead of the entire CWE, and the weakness type is not covered by that subset.
Insufficient Information
https://nvd.nist.gov/vuln/categories
0
Mihai Maruseac
2020-09-18 17:21:24-07:00
Prevent `int64` to `int` truncation in `Shard` API usage. The function argument in `Shard` must be a function of two `int64` arguments. However, we are passing in a function with two `int` arguments. Thus, for large workloads, these arguments get truncated from positive `int64` values to negative `int` ones, resulting in a buffer out of bounds write. PiperOrigin-RevId: 332557334 Change-Id: I236c9a2e7f53580e520571da8ba941a3aa9fa0b5
27b417360cbd671ef55915e4bb6bb06af8b8a832
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tensorflow::RandomGammaOp::Compute
tensorflow::RandomGammaOp::Compute( OpKernelContext * ctx)
['ctx']
void Compute(OpKernelContext* ctx) override { const Tensor& shape_t = ctx->input(0); const Tensor& alpha_t = ctx->input(1); OP_REQUIRES(ctx, TensorShapeUtils::IsVector(shape_t.shape()) && (shape_t.dtype() == DataType::DT_INT32 || shape_t.dtype() == DataType::DT_INT64), errors::InvalidArgument( "shape must be a vector of {int32,int64}, got shape: ", shape_t.DebugString())); TensorShape samples_shape; if (shape_t.dtype() == DataType::DT_INT32) { auto vec = shape_t.flat<int32>(); OP_REQUIRES_OK(ctx, TensorShapeUtils::MakeShape(vec.data(), vec.size(), &samples_shape)); } else if (shape_t.dtype() == DataType::DT_INT64) { auto vec = shape_t.flat<int64>(); OP_REQUIRES_OK(ctx, TensorShapeUtils::MakeShape(vec.data(), vec.size(), &samples_shape)); } const int64 samples_per_alpha = samples_shape.num_elements(); samples_shape.AppendShape(alpha_t.shape()); // Allocate output samples. Tensor* samples_t = nullptr; OP_REQUIRES_OK(ctx, ctx->allocate_output(0, samples_shape, &samples_t)); if (samples_shape.num_elements() == 0) return; using random::PhiloxRandom; typedef random::NormalDistribution<PhiloxRandom, double> Normal; typedef random::UniformDistribution<PhiloxRandom, double> Uniform; #define UNIFORM(X) \ if (uniform_remaining == 0) { \ uniform_remaining = Uniform::kResultElementCount; \ uniform_result = uniform(&gen); \ } \ uniform_remaining--; \ double X = uniform_result[uniform_remaining] // Each attempt is 95+% successful, and requires 1-2 normal + 1 uniform static constexpr int kReservedSamplesPerOutput = 256; const auto alpha_flat = alpha_t.flat<T>().data(); const int64 num_alphas = alpha_t.NumElements(); OP_REQUIRES(ctx, num_alphas > 0, errors::InvalidArgument( "Input alpha should have non-zero element count, got: ", num_alphas)); auto samples_flat = samples_t->flat<T>().data(); PhiloxRandom rng = generator_.ReserveRandomOutputs( samples_per_alpha * num_alphas, kReservedSamplesPerOutput); // We partition work first across alphas then across samples-per-alpha to // avoid a couple flops which can be done on a per-alpha basis. auto DoWork = [samples_per_alpha, num_alphas, &rng, samples_flat, alpha_flat](int start_output, int limit_output) { using Eigen::numext::exp; using Eigen::numext::log; using Eigen::numext::log1p; using Eigen::numext::pow; // Capturing "rng" by-value would only make a copy for the _shared_ // lambda. Since we want to let each worker have its own copy, we pass // "rng" by reference and explicitly do a copy assignment. Normal normal; Uniform uniform; typename Normal::ResultType norm_result; typename Uniform::ResultType uniform_result; for (int64 output_idx = start_output; output_idx < limit_output; /* output_idx incremented within inner loop below */) { int64 alpha_idx = output_idx / samples_per_alpha; // Instead of +alpha_idx for each sample, we offset the pointer once. T* const samples_alpha_offset = samples_flat + alpha_idx; // Several calculations can be done on a per-alpha basis. const double alpha = static_cast<double>(alpha_flat[alpha_idx]); DISABLE_FLOAT_EQUALITY_WARNING if (alpha == static_cast<double>(1.0)) { ENABLE_FLOAT_EQUALITY_WARNING // Sample from an exponential distribution. for (int64 sample_idx = output_idx % samples_per_alpha; sample_idx < samples_per_alpha && output_idx < limit_output; sample_idx++, output_idx++) { // As we want data stable regardless of sharding // (including eventually on GPU), we skip on a per-sample basis. PhiloxRandom gen = rng; gen.Skip(kReservedSamplesPerOutput * output_idx); int16 uniform_remaining = 0; UNIFORM(u); const double res = -log1p(-u); samples_alpha_offset[sample_idx * num_alphas] = static_cast<T>(res); } // for (sample_idx) } else { // if alpha != 1.0 // Transformation-rejection from pairs of uniform and normal random // variables. http://dl.acm.org/citation.cfm?id=358414 // // The algorithm has an acceptance rate of ~95% for small alpha (~1), // and higher accept rates for higher alpha, so runtime is // O(NumAlphas * NumSamples * k) with k ~ 1 / 0.95. // // For alpha<1, we add one to d=alpha-1/3, and multiply the final // result by uniform()^(1/alpha) const bool alpha_less_than_one = alpha < 1; const double d = alpha + (alpha_less_than_one ? 2.0 / 3 : -1.0 / 3); const double c = 1.0 / 3 / sqrt(d); // Compute the rest of the samples for the current alpha value. for (int64 sample_idx = output_idx % samples_per_alpha; sample_idx < samples_per_alpha && output_idx < limit_output; sample_idx++, output_idx++) { // Since each sample may use a variable number of normal/uniform // samples, and we want data stable regardless of sharding // (including eventually on GPU), we skip on a per-sample basis. PhiloxRandom gen = rng; gen.Skip(kReservedSamplesPerOutput * output_idx); int16 norm_remaining = 0; int16 uniform_remaining = 0; // Keep trying until we don't reject a sample. In practice, we will // only reject ~5% at worst, for low alpha near 1. while (true) { if (norm_remaining == 0) { norm_remaining = Normal::kResultElementCount; norm_result = normal(&gen); } norm_remaining--; const double x = norm_result[norm_remaining]; double v = 1 + c * x; if (v <= 0) { continue; } v = v * v * v; UNIFORM(u); // The first option in the if is a "squeeze" short-circuit to // dodge the two logs. Magic constant sourced from the paper // linked above. Upward of .91 of the area covered by the log // inequality is covered by the squeeze as well (larger coverage // for smaller values of alpha). if ((u < 1 - 0.0331 * (x * x) * (x * x)) || (log(u) < 0.5 * x * x + d * (1 - v + log(v)))) { double res = d * v; if (alpha_less_than_one) { UNIFORM(b); res *= pow(b, 1 / alpha); } samples_alpha_offset[sample_idx * num_alphas] = static_cast<T>(res); break; } } // while: true } // for: sample_idx } // if (alpha == 1.0) } // for: output_idx }; // DoWork #undef UNIFORM // Two calls to log only occur for ~10% of samples reaching the log line. // 2 x 100 (64-bit cycles per log) x 0.10 = ~20. // Other ops: sqrt, +, *, /, %... something like 15 of these, at 3-6 cycles // each = ~60. // All of this /0.95 due to the rejection possibility = ~85. static const int kElementCost = 85 + 2 * Normal::kElementCost + Uniform::kElementCost + 3 * PhiloxRandom::kElementCost; auto worker_threads = *(ctx->device()->tensorflow_cpu_worker_threads()); Shard(worker_threads.num_threads, worker_threads.workers, num_alphas * samples_per_alpha, kElementCost, DoWork); }
893
True
1
CVE-2020-15202
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:P
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
PARTIAL
6.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:C/C:H/I:H/A:H
NETWORK
HIGH
NONE
NONE
CHANGED
HIGH
HIGH
HIGH
9.0
CRITICAL
2.2
6.0
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/27b417360cbd671ef55915e4bb6bb06af8b8a832', 'name': 'https://github.com/tensorflow/tensorflow/commit/27b417360cbd671ef55915e4bb6bb06af8b8a832', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-h6fg-mjxg-hqq4', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-h6fg-mjxg-hqq4', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/ca8c013b5e97b1373b3bb1c97ea655e69f31a575', 'name': 'https://github.com/tensorflow/tensorflow/commit/ca8c013b5e97b1373b3bb1c97ea655e69f31a575', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'NVD-CWE-Other'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:-:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:-:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:-:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:-:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:-:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': 'In Tensorflow before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, the `Shard` API in TensorFlow expects the last argument to be a function taking two `int64` (i.e., `long long`) arguments. However, there are several places in TensorFlow where a lambda taking `int` or `int32` arguments is being used. In these cases, if the amount of work to be parallelized is large enough, integer truncation occurs. Depending on how the two arguments of the lambda are used, this can result in segfaults, read/write outside of heap allocated arrays, stack overflows, or data corruption. The issue is patched in commits 27b417360cbd671ef55915e4bb6bb06af8b8a832 and ca8c013b5e97b1373b3bb1c97ea655e69f31a575, and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1.'}]
2021-11-18T17:26Z
2020-09-25T19:15Z
Other
NVD is only using a subset of CWE for mapping instead of the entire CWE, and the weakness type is not covered by that subset.
Insufficient Information
https://nvd.nist.gov/vuln/categories
0
Mihai Maruseac
2020-09-18 17:49:02-07:00
Prevent integer truncation from 64 to 32 bits. The `tensorflow::Shard` functions last argument must be a 2 argument function where both arguments are `int64` (`long long`, 64 bits). However, there are usages where code passes in a function where arguments are `int` or `int32` (32 bits). In these cases, it is possible that the integer truncation would later cause a segfault or other unexpected behavior. PiperOrigin-RevId: 332560414 Change-Id: Ief649406babc8d4f60b3e7a9d573cbcc5ce5b767
ca8c013b5e97b1373b3bb1c97ea655e69f31a575
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tensorflow::BoostedTreesExampleDebugOutputsOp::Compute
tensorflow::BoostedTreesExampleDebugOutputsOp::Compute( OpKernelContext * const context)
['context']
void Compute(OpKernelContext* const context) override { core::RefCountPtr<BoostedTreesEnsembleResource> resource; // Get the resource. OP_REQUIRES_OK(context, LookupResource(context, HandleFromInput(context, 0), &resource)); // Get the inputs. OpInputList bucketized_features_list; OP_REQUIRES_OK(context, context->input_list("bucketized_features", &bucketized_features_list)); std::vector<tensorflow::TTypes<int32>::ConstMatrix> bucketized_features; bucketized_features.reserve(bucketized_features_list.size()); ConvertVectorsToMatrices(bucketized_features_list, bucketized_features); const int batch_size = bucketized_features[0].dimension(0); // We need to get the feature ids used for splitting and the logits after // each split. We will use these to calculate the changes in the prediction // (contributions) for an arbitrary activation function (done in Python) and // attribute them to the associated feature ids. We will store these in // a proto below. Tensor* output_debug_info_t = nullptr; OP_REQUIRES_OK( context, context->allocate_output("examples_debug_outputs_serialized", {batch_size}, &output_debug_info_t)); // Will contain serialized protos, per example. auto output_debug_info = output_debug_info_t->flat<tstring>(); const int32 last_tree = resource->num_trees() - 1; // For each given example, traverse through all trees keeping track of the // features used to split and the associated logits at each point along the // path. Note: feature_ids has one less value than logits_path because the // first value of each logit path will be the bias. auto do_work = [&resource, &bucketized_features, &output_debug_info, last_tree](int32 start, int32 end) { for (int32 i = start; i < end; ++i) { // Proto to store debug outputs, per example. boosted_trees::DebugOutput example_debug_info; // Initial bias prediction. E.g., prediction based off training mean. const auto& tree_logits = resource->node_value(0, 0); DCHECK_EQ(tree_logits.size(), 1); float tree_logit = resource->GetTreeWeight(0) * tree_logits[0]; example_debug_info.add_logits_path(tree_logit); int32 node_id = 0; int32 tree_id = 0; int32 feature_id; float past_trees_logit = 0; // Sum of leaf logits from prior trees. // Go through each tree and populate proto. while (tree_id <= last_tree) { if (resource->is_leaf(tree_id, node_id)) { // Move onto other trees. // Accumulate tree_logits only if the leaf is non-root, but do so // for bias tree. if (tree_id == 0 || node_id > 0) { past_trees_logit += tree_logit; } ++tree_id; node_id = 0; } else { // Add to proto. // Feature id used to split. feature_id = resource->feature_id(tree_id, node_id); example_debug_info.add_feature_ids(feature_id); // Get logit after split. node_id = resource->next_node(tree_id, node_id, i, bucketized_features); const auto& tree_logits = resource->node_value(tree_id, node_id); DCHECK_EQ(tree_logits.size(), 1); tree_logit = resource->GetTreeWeight(tree_id) * tree_logits[0]; // Output logit incorporates sum of leaf logits from prior trees. example_debug_info.add_logits_path(tree_logit + past_trees_logit); } } // Set output as serialized proto containing debug info. string serialized = example_debug_info.SerializeAsString(); output_debug_info(i) = serialized; } }; // 10 is the magic number. The actual number might depend on (the number of // layers in the trees) and (cpu cycles spent on each layer), but this // value would work for many cases. May be tuned later. const int64 cost = (last_tree + 1) * 10; thread::ThreadPool* const worker_threads = context->device()->tensorflow_cpu_worker_threads()->workers; Shard(worker_threads->NumThreads(), worker_threads, batch_size, /*cost_per_unit=*/cost, do_work); }
455
True
1
CVE-2020-15202
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:P
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
PARTIAL
6.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:C/C:H/I:H/A:H
NETWORK
HIGH
NONE
NONE
CHANGED
HIGH
HIGH
HIGH
9.0
CRITICAL
2.2
6.0
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/27b417360cbd671ef55915e4bb6bb06af8b8a832', 'name': 'https://github.com/tensorflow/tensorflow/commit/27b417360cbd671ef55915e4bb6bb06af8b8a832', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-h6fg-mjxg-hqq4', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-h6fg-mjxg-hqq4', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/ca8c013b5e97b1373b3bb1c97ea655e69f31a575', 'name': 'https://github.com/tensorflow/tensorflow/commit/ca8c013b5e97b1373b3bb1c97ea655e69f31a575', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'NVD-CWE-Other'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:-:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:-:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:-:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:-:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:-:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': 'In Tensorflow before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, the `Shard` API in TensorFlow expects the last argument to be a function taking two `int64` (i.e., `long long`) arguments. However, there are several places in TensorFlow where a lambda taking `int` or `int32` arguments is being used. In these cases, if the amount of work to be parallelized is large enough, integer truncation occurs. Depending on how the two arguments of the lambda are used, this can result in segfaults, read/write outside of heap allocated arrays, stack overflows, or data corruption. The issue is patched in commits 27b417360cbd671ef55915e4bb6bb06af8b8a832 and ca8c013b5e97b1373b3bb1c97ea655e69f31a575, and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1.'}]
2021-11-18T17:26Z
2020-09-25T19:15Z
Other
NVD is only using a subset of CWE for mapping instead of the entire CWE, and the weakness type is not covered by that subset.
Insufficient Information
https://nvd.nist.gov/vuln/categories
0
Mihai Maruseac
2020-09-18 17:49:02-07:00
Prevent integer truncation from 64 to 32 bits. The `tensorflow::Shard` functions last argument must be a 2 argument function where both arguments are `int64` (`long long`, 64 bits). However, there are usages where code passes in a function where arguments are `int` or `int32` (32 bits). In these cases, it is possible that the integer truncation would later cause a segfault or other unexpected behavior. PiperOrigin-RevId: 332560414 Change-Id: Ief649406babc8d4f60b3e7a9d573cbcc5ce5b767
ca8c013b5e97b1373b3bb1c97ea655e69f31a575
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tensorflow::BoostedTreesPredictOp::Compute
tensorflow::BoostedTreesPredictOp::Compute( OpKernelContext * const context)
['context']
void Compute(OpKernelContext* const context) override { core::RefCountPtr<BoostedTreesEnsembleResource> resource; // Get the resource. OP_REQUIRES_OK(context, LookupResource(context, HandleFromInput(context, 0), &resource)); // Get the inputs. OpInputList bucketized_features_list; OP_REQUIRES_OK(context, context->input_list("bucketized_features", &bucketized_features_list)); std::vector<tensorflow::TTypes<int32>::ConstMatrix> bucketized_features; bucketized_features.reserve(bucketized_features_list.size()); ConvertVectorsToMatrices(bucketized_features_list, bucketized_features); const int batch_size = bucketized_features[0].dimension(0); // Allocate outputs. Tensor* output_logits_t = nullptr; OP_REQUIRES_OK(context, context->allocate_output( "logits", {batch_size, logits_dimension_}, &output_logits_t)); auto output_logits = output_logits_t->matrix<float>(); // Return zero logits if it's an empty ensemble. if (resource->num_trees() <= 0) { output_logits.setZero(); return; } const int32 last_tree = resource->num_trees() - 1; auto do_work = [&resource, &bucketized_features, &output_logits, last_tree, this](int32 start, int32 end) { for (int32 i = start; i < end; ++i) { std::vector<float> tree_logits(logits_dimension_, 0.0); int32 tree_id = 0; int32 node_id = 0; while (true) { if (resource->is_leaf(tree_id, node_id)) { const float tree_weight = resource->GetTreeWeight(tree_id); const auto& leaf_logits = resource->node_value(tree_id, node_id); DCHECK_EQ(leaf_logits.size(), logits_dimension_); for (int32 j = 0; j < logits_dimension_; ++j) { tree_logits[j] += tree_weight * leaf_logits[j]; } // Stop if it was the last tree. if (tree_id == last_tree) { break; } // Move onto other trees. ++tree_id; node_id = 0; } else { node_id = resource->next_node(tree_id, node_id, i, bucketized_features); } } for (int32 j = 0; j < logits_dimension_; ++j) { output_logits(i, j) = tree_logits[j]; } } }; // 10 is the magic number. The actual number might depend on (the number of // layers in the trees) and (cpu cycles spent on each layer), but this // value would work for many cases. May be tuned later. const int64 cost = (last_tree + 1) * 10; thread::ThreadPool* const worker_threads = context->device()->tensorflow_cpu_worker_threads()->workers; Shard(worker_threads->NumThreads(), worker_threads, batch_size, /*cost_per_unit=*/cost, do_work); }
436
True
1
CVE-2020-15202
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:P
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
PARTIAL
6.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:C/C:H/I:H/A:H
NETWORK
HIGH
NONE
NONE
CHANGED
HIGH
HIGH
HIGH
9.0
CRITICAL
2.2
6.0
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/27b417360cbd671ef55915e4bb6bb06af8b8a832', 'name': 'https://github.com/tensorflow/tensorflow/commit/27b417360cbd671ef55915e4bb6bb06af8b8a832', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-h6fg-mjxg-hqq4', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-h6fg-mjxg-hqq4', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/ca8c013b5e97b1373b3bb1c97ea655e69f31a575', 'name': 'https://github.com/tensorflow/tensorflow/commit/ca8c013b5e97b1373b3bb1c97ea655e69f31a575', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'NVD-CWE-Other'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:-:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:-:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:-:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:-:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:-:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': 'In Tensorflow before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, the `Shard` API in TensorFlow expects the last argument to be a function taking two `int64` (i.e., `long long`) arguments. However, there are several places in TensorFlow where a lambda taking `int` or `int32` arguments is being used. In these cases, if the amount of work to be parallelized is large enough, integer truncation occurs. Depending on how the two arguments of the lambda are used, this can result in segfaults, read/write outside of heap allocated arrays, stack overflows, or data corruption. The issue is patched in commits 27b417360cbd671ef55915e4bb6bb06af8b8a832 and ca8c013b5e97b1373b3bb1c97ea655e69f31a575, and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1.'}]
2021-11-18T17:26Z
2020-09-25T19:15Z
Other
NVD is only using a subset of CWE for mapping instead of the entire CWE, and the weakness type is not covered by that subset.
Insufficient Information
https://nvd.nist.gov/vuln/categories
0
Mihai Maruseac
2020-09-18 17:49:02-07:00
Prevent integer truncation from 64 to 32 bits. The `tensorflow::Shard` functions last argument must be a 2 argument function where both arguments are `int64` (`long long`, 64 bits). However, there are usages where code passes in a function where arguments are `int` or `int32` (32 bits). In these cases, it is possible that the integer truncation would later cause a segfault or other unexpected behavior. PiperOrigin-RevId: 332560414 Change-Id: Ief649406babc8d4f60b3e7a9d573cbcc5ce5b767
ca8c013b5e97b1373b3bb1c97ea655e69f31a575
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tensorflow::BoostedTreesTrainingPredictOp::Compute
tensorflow::BoostedTreesTrainingPredictOp::Compute( OpKernelContext * const context)
['context']
void Compute(OpKernelContext* const context) override { core::RefCountPtr<BoostedTreesEnsembleResource> resource; // Get the resource. OP_REQUIRES_OK(context, LookupResource(context, HandleFromInput(context, 0), &resource)); // Get the inputs. OpInputList bucketized_features_list; OP_REQUIRES_OK(context, context->input_list("bucketized_features", &bucketized_features_list)); std::vector<tensorflow::TTypes<int32>::ConstMatrix> bucketized_features; bucketized_features.reserve(bucketized_features_list.size()); ConvertVectorsToMatrices(bucketized_features_list, bucketized_features); const int batch_size = bucketized_features[0].dimension(0); const Tensor* cached_tree_ids_t; OP_REQUIRES_OK(context, context->input("cached_tree_ids", &cached_tree_ids_t)); const auto cached_tree_ids = cached_tree_ids_t->vec<int32>(); const Tensor* cached_node_ids_t; OP_REQUIRES_OK(context, context->input("cached_node_ids", &cached_node_ids_t)); const auto cached_node_ids = cached_node_ids_t->vec<int32>(); // Allocate outputs. Tensor* output_partial_logits_t = nullptr; OP_REQUIRES_OK(context, context->allocate_output("partial_logits", {batch_size, logits_dimension_}, &output_partial_logits_t)); auto output_partial_logits = output_partial_logits_t->matrix<float>(); Tensor* output_tree_ids_t = nullptr; OP_REQUIRES_OK(context, context->allocate_output("tree_ids", {batch_size}, &output_tree_ids_t)); auto output_tree_ids = output_tree_ids_t->vec<int32>(); Tensor* output_node_ids_t = nullptr; OP_REQUIRES_OK(context, context->allocate_output("node_ids", {batch_size}, &output_node_ids_t)); auto output_node_ids = output_node_ids_t->vec<int32>(); // Indicate that the latest tree was used. const int32 latest_tree = resource->num_trees() - 1; if (latest_tree < 0) { // Ensemble was empty. Output the very first node. output_node_ids.setZero(); output_tree_ids = cached_tree_ids; // All the predictions are zeros. output_partial_logits.setZero(); } else { output_tree_ids.setConstant(latest_tree); auto do_work = [&resource, &bucketized_features, &cached_tree_ids, &cached_node_ids, &output_partial_logits, &output_node_ids, latest_tree, this](int32 start, int32 end) { for (int32 i = start; i < end; ++i) { int32 tree_id = cached_tree_ids(i); int32 node_id = cached_node_ids(i); std::vector<float> partial_tree_logits(logits_dimension_, 0.0); if (node_id >= 0) { // If the tree was pruned, returns the node id into which the // current_node_id was pruned, as well the correction of the cached // logit prediction. resource->GetPostPruneCorrection(tree_id, node_id, &node_id, &partial_tree_logits); // Logic in the loop adds the cached node value again if it is a // leaf. If it is not a leaf anymore we need to subtract the old // node's value. The following logic handles both of these cases. const auto& node_logits = resource->node_value(tree_id, node_id); if (!node_logits.empty()) { DCHECK_EQ(node_logits.size(), logits_dimension_); for (int32 j = 0; j < logits_dimension_; ++j) { partial_tree_logits[j] -= node_logits[j]; } } } else { // No cache exists, start from the very first node. node_id = 0; } std::vector<float> partial_all_logits(logits_dimension_, 0.0); while (true) { if (resource->is_leaf(tree_id, node_id)) { const auto& leaf_logits = resource->node_value(tree_id, node_id); DCHECK_EQ(leaf_logits.size(), logits_dimension_); // Tree is done const float tree_weight = resource->GetTreeWeight(tree_id); for (int32 j = 0; j < logits_dimension_; ++j) { partial_all_logits[j] += tree_weight * (partial_tree_logits[j] + leaf_logits[j]); partial_tree_logits[j] = 0; } // Stop if it was the latest tree. if (tree_id == latest_tree) { break; } // Move onto other trees. ++tree_id; node_id = 0; } else { node_id = resource->next_node(tree_id, node_id, i, bucketized_features); } } output_node_ids(i) = node_id; for (int32 j = 0; j < logits_dimension_; ++j) { output_partial_logits(i, j) = partial_all_logits[j]; } } }; // 30 is the magic number. The actual value might be a function of (the // number of layers) * (cpu cycles spent on each layer), but this value // would work for many cases. May be tuned later. const int64 cost = 30; thread::ThreadPool* const worker_threads = context->device()->tensorflow_cpu_worker_threads()->workers; Shard(worker_threads->NumThreads(), worker_threads, batch_size, /*cost_per_unit=*/cost, do_work); } }
727
True
1
CVE-2020-15202
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:P
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
PARTIAL
6.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:C/C:H/I:H/A:H
NETWORK
HIGH
NONE
NONE
CHANGED
HIGH
HIGH
HIGH
9.0
CRITICAL
2.2
6.0
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/27b417360cbd671ef55915e4bb6bb06af8b8a832', 'name': 'https://github.com/tensorflow/tensorflow/commit/27b417360cbd671ef55915e4bb6bb06af8b8a832', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-h6fg-mjxg-hqq4', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-h6fg-mjxg-hqq4', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/ca8c013b5e97b1373b3bb1c97ea655e69f31a575', 'name': 'https://github.com/tensorflow/tensorflow/commit/ca8c013b5e97b1373b3bb1c97ea655e69f31a575', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'NVD-CWE-Other'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:-:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:-:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:-:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:-:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:-:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': 'In Tensorflow before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, the `Shard` API in TensorFlow expects the last argument to be a function taking two `int64` (i.e., `long long`) arguments. However, there are several places in TensorFlow where a lambda taking `int` or `int32` arguments is being used. In these cases, if the amount of work to be parallelized is large enough, integer truncation occurs. Depending on how the two arguments of the lambda are used, this can result in segfaults, read/write outside of heap allocated arrays, stack overflows, or data corruption. The issue is patched in commits 27b417360cbd671ef55915e4bb6bb06af8b8a832 and ca8c013b5e97b1373b3bb1c97ea655e69f31a575, and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1.'}]
2021-11-18T17:26Z
2020-09-25T19:15Z
Other
NVD is only using a subset of CWE for mapping instead of the entire CWE, and the weakness type is not covered by that subset.
Insufficient Information
https://nvd.nist.gov/vuln/categories
0
Mihai Maruseac
2020-09-18 17:49:02-07:00
Prevent integer truncation from 64 to 32 bits. The `tensorflow::Shard` functions last argument must be a 2 argument function where both arguments are `int64` (`long long`, 64 bits). However, there are usages where code passes in a function where arguments are `int` or `int32` (32 bits). In these cases, it is possible that the integer truncation would later cause a segfault or other unexpected behavior. PiperOrigin-RevId: 332560414 Change-Id: Ief649406babc8d4f60b3e7a9d573cbcc5ce5b767
ca8c013b5e97b1373b3bb1c97ea655e69f31a575
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tensorflow::functor::CropAndResize<CPUDevice,T>::operator ( )
tensorflow::functor::CropAndResize<CPUDevice,T>::operator ( )( const OpKernelContext * context , typename TTypes<T,4> :: ConstTensor image , typename TTypes<float,2> :: ConstTensor boxes , typename TTypes<int32,1> :: ConstTensor box_index , const string & method_name , float extrapolation_value , typename TTypes<float,4> :: Tensor crops)
['context', 'image', 'boxes', 'box_index', 'method_name', 'extrapolation_value', 'crops']
bool operator()(const OpKernelContext* context, typename TTypes<T, 4>::ConstTensor image, typename TTypes<float, 2>::ConstTensor boxes, typename TTypes<int32, 1>::ConstTensor box_index, const string& method_name, float extrapolation_value, typename TTypes<float, 4>::Tensor crops) { const int batch_size = image.dimension(0); const int image_height = image.dimension(1); const int image_width = image.dimension(2); const int num_boxes = crops.dimension(0); const int crop_height = crops.dimension(1); const int crop_width = crops.dimension(2); const int depth = crops.dimension(3); // Sharding across boxes. auto CropAndResizePerBox = [&](int start_box, int limit_box) { for (int b = start_box; b < limit_box; ++b) { const float y1 = boxes(b, 0); const float x1 = boxes(b, 1); const float y2 = boxes(b, 2); const float x2 = boxes(b, 3); const int32 b_in = box_index(b); if (!FastBoundsCheck(b_in, batch_size)) { continue; } const float height_scale = (crop_height > 1) ? (y2 - y1) * (image_height - 1) / (crop_height - 1) : 0; const float width_scale = (crop_width > 1) ? (x2 - x1) * (image_width - 1) / (crop_width - 1) : 0; for (int y = 0; y < crop_height; ++y) { const float in_y = (crop_height > 1) ? y1 * (image_height - 1) + y * height_scale : 0.5 * (y1 + y2) * (image_height - 1); if (in_y < 0 || in_y > image_height - 1) { for (int x = 0; x < crop_width; ++x) { for (int d = 0; d < depth; ++d) { crops(b, y, x, d) = extrapolation_value; } } continue; } if (method_name == "bilinear") { const int top_y_index = floorf(in_y); const int bottom_y_index = ceilf(in_y); const float y_lerp = in_y - top_y_index; for (int x = 0; x < crop_width; ++x) { const float in_x = (crop_width > 1) ? x1 * (image_width - 1) + x * width_scale : 0.5 * (x1 + x2) * (image_width - 1); if (in_x < 0 || in_x > image_width - 1) { for (int d = 0; d < depth; ++d) { crops(b, y, x, d) = extrapolation_value; } continue; } const int left_x_index = floorf(in_x); const int right_x_index = ceilf(in_x); const float x_lerp = in_x - left_x_index; for (int d = 0; d < depth; ++d) { const float top_left(static_cast<float>( image(b_in, top_y_index, left_x_index, d))); const float top_right(static_cast<float>( image(b_in, top_y_index, right_x_index, d))); const float bottom_left(static_cast<float>( image(b_in, bottom_y_index, left_x_index, d))); const float bottom_right(static_cast<float>( image(b_in, bottom_y_index, right_x_index, d))); const float top = top_left + (top_right - top_left) * x_lerp; const float bottom = bottom_left + (bottom_right - bottom_left) * x_lerp; crops(b, y, x, d) = top + (bottom - top) * y_lerp; } } } else { // method == "nearest" for (int x = 0; x < crop_width; ++x) { const float in_x = (crop_width > 1) ? x1 * (image_width - 1) + x * width_scale : 0.5 * (x1 + x2) * (image_width - 1); if (in_x < 0 || in_x > image_width - 1) { for (int d = 0; d < depth; ++d) { crops(b, y, x, d) = extrapolation_value; } continue; } const int closest_x_index = roundf(in_x); const int closest_y_index = roundf(in_y); for (int d = 0; d < depth; ++d) { crops(b, y, x, d) = static_cast<float>( image(b_in, closest_y_index, closest_x_index, d)); } } } } } }; // A rough estimation of the cost for each cropped box. double cost_per_pixel = depth * (Eigen::TensorOpCost::AddCost<float>() * 6 + Eigen::TensorOpCost::MulCost<float>() * 3 + Eigen::TensorOpCost::CastCost<T, float>() * 4) + (Eigen::TensorOpCost::AddCost<float>() * 2 + Eigen::TensorOpCost::AddCost<float>() * 3); if (method_name == "nearest") { cost_per_pixel = depth * Eigen::TensorOpCost::CastCost<T, float>() + Eigen::TensorOpCost::AddCost<float>() * 4 + Eigen::TensorOpCost::MulCost<float>() * 4; } const double cost_per_box = crop_height * crop_width * cost_per_pixel; const DeviceBase::CpuWorkerThreads& worker_threads = *(context->device()->tensorflow_cpu_worker_threads()); Shard(worker_threads.num_threads, worker_threads.workers, num_boxes, cost_per_box, CropAndResizePerBox); return true; }
1067
True
1
CVE-2020-15202
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:P
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
PARTIAL
6.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:C/C:H/I:H/A:H
NETWORK
HIGH
NONE
NONE
CHANGED
HIGH
HIGH
HIGH
9.0
CRITICAL
2.2
6.0
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/27b417360cbd671ef55915e4bb6bb06af8b8a832', 'name': 'https://github.com/tensorflow/tensorflow/commit/27b417360cbd671ef55915e4bb6bb06af8b8a832', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-h6fg-mjxg-hqq4', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-h6fg-mjxg-hqq4', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/ca8c013b5e97b1373b3bb1c97ea655e69f31a575', 'name': 'https://github.com/tensorflow/tensorflow/commit/ca8c013b5e97b1373b3bb1c97ea655e69f31a575', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'NVD-CWE-Other'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:-:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:-:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:-:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:-:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:-:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': 'In Tensorflow before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, the `Shard` API in TensorFlow expects the last argument to be a function taking two `int64` (i.e., `long long`) arguments. However, there are several places in TensorFlow where a lambda taking `int` or `int32` arguments is being used. In these cases, if the amount of work to be parallelized is large enough, integer truncation occurs. Depending on how the two arguments of the lambda are used, this can result in segfaults, read/write outside of heap allocated arrays, stack overflows, or data corruption. The issue is patched in commits 27b417360cbd671ef55915e4bb6bb06af8b8a832 and ca8c013b5e97b1373b3bb1c97ea655e69f31a575, and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1.'}]
2021-11-18T17:26Z
2020-09-25T19:15Z
Other
NVD is only using a subset of CWE for mapping instead of the entire CWE, and the weakness type is not covered by that subset.
Insufficient Information
https://nvd.nist.gov/vuln/categories
0
Mihai Maruseac
2020-09-18 17:49:02-07:00
Prevent integer truncation from 64 to 32 bits. The `tensorflow::Shard` functions last argument must be a 2 argument function where both arguments are `int64` (`long long`, 64 bits). However, there are usages where code passes in a function where arguments are `int` or `int32` (32 bits). In these cases, it is possible that the integer truncation would later cause a segfault or other unexpected behavior. PiperOrigin-RevId: 332560414 Change-Id: Ief649406babc8d4f60b3e7a9d573cbcc5ce5b767
ca8c013b5e97b1373b3bb1c97ea655e69f31a575
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tensorflow::functor::CropAndResizeBackpropImage<CPUDevice,T>::operator ( )
tensorflow::functor::CropAndResizeBackpropImage<CPUDevice,T>::operator ( )( const OpKernelContext * context , typename TTypes<float,4> :: ConstTensor grads , typename TTypes<float,2> :: ConstTensor boxes , typename TTypes<int32,1> :: ConstTensor box_index , typename TTypes<T,4> :: Tensor grads_image , const string & method_name)
['context', 'grads', 'boxes', 'box_index', 'grads_image', 'method_name']
bool operator()(const OpKernelContext* context, typename TTypes<float, 4>::ConstTensor grads, typename TTypes<float, 2>::ConstTensor boxes, typename TTypes<int32, 1>::ConstTensor box_index, typename TTypes<T, 4>::Tensor grads_image, const string& method_name) { const int batch_size = grads_image.dimension(0); const int image_height = grads_image.dimension(1); const int image_width = grads_image.dimension(2); const int num_boxes = grads.dimension(0); const int crop_height = grads.dimension(1); const int crop_width = grads.dimension(2); const int depth = grads.dimension(3); grads_image.setZero(); auto CropAndResizeBackImgPerBox = [&](int start_box, int limit_box) { for (int b = start_box; b < limit_box; ++b) { const float y1 = boxes(b, 0); const float x1 = boxes(b, 1); const float y2 = boxes(b, 2); const float x2 = boxes(b, 3); const int32 b_in = box_index(b); if (!FastBoundsCheck(b_in, batch_size)) { continue; } const float height_scale = (crop_height > 1) ? (y2 - y1) * (image_height - 1) / (crop_height - 1) : 0; const float width_scale = (crop_width > 1) ? (x2 - x1) * (image_width - 1) / (crop_width - 1) : 0; for (int y = 0; y < crop_height; ++y) { const float in_y = (crop_height > 1) ? y1 * (image_height - 1) + y * height_scale : 0.5 * (y1 + y2) * (image_height - 1); if (in_y < 0 || in_y > image_height - 1) { continue; } const int top_y_index = floorf(in_y); const int bottom_y_index = ceilf(in_y); const float y_lerp = in_y - top_y_index; for (int x = 0; x < crop_width; ++x) { const float in_x = (crop_width > 1) ? x1 * (image_width - 1) + x * width_scale : 0.5 * (x1 + x2) * (image_width - 1); if (in_x < 0 || in_x > image_width - 1) { continue; } if (method_name == "bilinear") { const int left_x_index = floorf(in_x); const int right_x_index = ceilf(in_x); const float x_lerp = in_x - left_x_index; for (int d = 0; d < depth; ++d) { const float dtop = (1 - y_lerp) * grads(b, y, x, d); grads_image(b_in, top_y_index, left_x_index, d) += static_cast<T>((1 - x_lerp) * dtop); grads_image(b_in, top_y_index, right_x_index, d) += static_cast<T>(x_lerp * dtop); const float dbottom = y_lerp * grads(b, y, x, d); grads_image(b_in, bottom_y_index, left_x_index, d) += static_cast<T>((1 - x_lerp) * dbottom); grads_image(b_in, bottom_y_index, right_x_index, d) += static_cast<T>(x_lerp * dbottom); } } else { // method_name == "nearest" for (int d = 0; d < depth; ++d) { int closest_x_index = roundf(in_x); int closest_y_index = roundf(in_y); grads_image(b_in, closest_y_index, closest_x_index, d) += static_cast<T>(grads(b, y, x, d)); } } } } } }; // A rough estimation of the cost for each cropped box. // Including calculation cost in the depth loop and pixel loop. const double cost_per_pixel = (method_name == "bilinear" ? depth * (Eigen::TensorOpCost::AddCost<float>() * 7 + Eigen::TensorOpCost::MulCost<float>() * 6 + Eigen::TensorOpCost::CastCost<T, float>() * 4) + Eigen::TensorOpCost::AddCost<float>() * 4 : depth * (Eigen::TensorOpCost::AddCost<float>() + Eigen::TensorOpCost::CastCost<T, float>()) + Eigen::TensorOpCost::AddCost<float>() * 3); const double cost_per_box = crop_height * crop_width * cost_per_pixel; const DeviceBase::CpuWorkerThreads& worker_threads = *(context->device()->tensorflow_cpu_worker_threads()); Shard(worker_threads.num_threads, worker_threads.workers, num_boxes, cost_per_box, CropAndResizeBackImgPerBox); return true; }
870
True
1
CVE-2020-15202
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:P
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
PARTIAL
6.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:C/C:H/I:H/A:H
NETWORK
HIGH
NONE
NONE
CHANGED
HIGH
HIGH
HIGH
9.0
CRITICAL
2.2
6.0
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/27b417360cbd671ef55915e4bb6bb06af8b8a832', 'name': 'https://github.com/tensorflow/tensorflow/commit/27b417360cbd671ef55915e4bb6bb06af8b8a832', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-h6fg-mjxg-hqq4', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-h6fg-mjxg-hqq4', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/ca8c013b5e97b1373b3bb1c97ea655e69f31a575', 'name': 'https://github.com/tensorflow/tensorflow/commit/ca8c013b5e97b1373b3bb1c97ea655e69f31a575', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'NVD-CWE-Other'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:-:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:-:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:-:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:-:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:-:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': 'In Tensorflow before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, the `Shard` API in TensorFlow expects the last argument to be a function taking two `int64` (i.e., `long long`) arguments. However, there are several places in TensorFlow where a lambda taking `int` or `int32` arguments is being used. In these cases, if the amount of work to be parallelized is large enough, integer truncation occurs. Depending on how the two arguments of the lambda are used, this can result in segfaults, read/write outside of heap allocated arrays, stack overflows, or data corruption. The issue is patched in commits 27b417360cbd671ef55915e4bb6bb06af8b8a832 and ca8c013b5e97b1373b3bb1c97ea655e69f31a575, and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1.'}]
2021-11-18T17:26Z
2020-09-25T19:15Z
Other
NVD is only using a subset of CWE for mapping instead of the entire CWE, and the weakness type is not covered by that subset.
Insufficient Information
https://nvd.nist.gov/vuln/categories
0
Mihai Maruseac
2020-09-18 17:49:02-07:00
Prevent integer truncation from 64 to 32 bits. The `tensorflow::Shard` functions last argument must be a 2 argument function where both arguments are `int64` (`long long`, 64 bits). However, there are usages where code passes in a function where arguments are `int` or `int32` (32 bits). In these cases, it is possible that the integer truncation would later cause a segfault or other unexpected behavior. PiperOrigin-RevId: 332560414 Change-Id: Ief649406babc8d4f60b3e7a9d573cbcc5ce5b767
ca8c013b5e97b1373b3bb1c97ea655e69f31a575
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tensorflow::LaunchBatchBandedTriangularSolve::Launch
tensorflow::LaunchBatchBandedTriangularSolve::Launch( OpKernelContext * context , const Tensor & in_x , const Tensor & in_y , bool adjoint , bool lower , const MatMulBCast & bcast , Tensor * out)
['context', 'in_x', 'in_y', 'adjoint', 'lower', 'bcast', 'out']
static void Launch(OpKernelContext* context, const Tensor& in_x, const Tensor& in_y, bool adjoint, bool lower, const MatMulBCast& bcast, Tensor* out) { // Number of banded matrix triangular solves i.e. size of the batch. const int64 batch_size = bcast.output_batch_size(); const int64 cost_per_unit = in_x.dim_size(1) * in_x.dim_size(2) * in_y.dim_size(2); auto worker_threads = *(context->device()->tensorflow_cpu_worker_threads()); using Matrix = Eigen::Matrix<Scalar, Eigen::Dynamic, Eigen::Dynamic, Eigen::RowMajor>; using ConstMatrixMap = Eigen::Map<const Matrix>; using RealScalar = typename Eigen::NumTraits<Scalar>::Real; // Check diagonal before doing any solves. This is the first row in the // lower case and else is the last row. auto matrix = ConstMatrixMap(in_x.flat<Scalar>().data(), in_x.dim_size(1), in_x.dim_size(2)); RealScalar min_abs_pivot; if (lower) { min_abs_pivot = matrix.row(0).cwiseAbs().minCoeff(); } else { min_abs_pivot = matrix.row(in_x.dim_size(1) - 1).cwiseAbs().minCoeff(); } OP_REQUIRES(context, min_abs_pivot > RealScalar(0), errors::InvalidArgument("Input matrix is not invertible.")); Shard(worker_threads.num_threads, worker_threads.workers, batch_size, cost_per_unit, [&in_x, &in_y, adjoint, lower, &bcast, out](int start, int limit) { SequentialBandedTriangularSolveKernel<Scalar>::Run( in_x, in_y, lower, adjoint, bcast, out, start, limit); }); }
300
True
1
CVE-2020-15202
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:P
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
PARTIAL
6.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:C/C:H/I:H/A:H
NETWORK
HIGH
NONE
NONE
CHANGED
HIGH
HIGH
HIGH
9.0
CRITICAL
2.2
6.0
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/27b417360cbd671ef55915e4bb6bb06af8b8a832', 'name': 'https://github.com/tensorflow/tensorflow/commit/27b417360cbd671ef55915e4bb6bb06af8b8a832', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-h6fg-mjxg-hqq4', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-h6fg-mjxg-hqq4', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/ca8c013b5e97b1373b3bb1c97ea655e69f31a575', 'name': 'https://github.com/tensorflow/tensorflow/commit/ca8c013b5e97b1373b3bb1c97ea655e69f31a575', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'NVD-CWE-Other'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:-:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:-:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:-:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:-:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:-:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': 'In Tensorflow before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, the `Shard` API in TensorFlow expects the last argument to be a function taking two `int64` (i.e., `long long`) arguments. However, there are several places in TensorFlow where a lambda taking `int` or `int32` arguments is being used. In these cases, if the amount of work to be parallelized is large enough, integer truncation occurs. Depending on how the two arguments of the lambda are used, this can result in segfaults, read/write outside of heap allocated arrays, stack overflows, or data corruption. The issue is patched in commits 27b417360cbd671ef55915e4bb6bb06af8b8a832 and ca8c013b5e97b1373b3bb1c97ea655e69f31a575, and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1.'}]
2021-11-18T17:26Z
2020-09-25T19:15Z
Other
NVD is only using a subset of CWE for mapping instead of the entire CWE, and the weakness type is not covered by that subset.
Insufficient Information
https://nvd.nist.gov/vuln/categories
0
Mihai Maruseac
2020-09-18 17:49:02-07:00
Prevent integer truncation from 64 to 32 bits. The `tensorflow::Shard` functions last argument must be a 2 argument function where both arguments are `int64` (`long long`, 64 bits). However, there are usages where code passes in a function where arguments are `int` or `int32` (32 bits). In these cases, it is possible that the integer truncation would later cause a segfault or other unexpected behavior. PiperOrigin-RevId: 332560414 Change-Id: Ief649406babc8d4f60b3e7a9d573cbcc5ce5b767
ca8c013b5e97b1373b3bb1c97ea655e69f31a575
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tensorflow::functor::NthElementFunctor<CPUDevice,T>::operator ( )
tensorflow::functor::NthElementFunctor<CPUDevice,T>::operator ( )( OpKernelContext * context , const Tensor & input_tensor , Tensor & output_tensor , int n , bool reverse)
['context', 'input_tensor', 'output_tensor', 'n', 'reverse']
void operator()(OpKernelContext* context, const Tensor& input_tensor, Tensor& output_tensor, int n, bool reverse) { const T* input = input_tensor.flat<T>().data(); T* output = output_tensor.flat<T>().data(); // Assume input_shape is [d1,d2,...dk], and output_shape is [d1,d2...dk-1], // then num_rows = d1*d2...dk-1, last_dim = dk. const int num_rows = output_tensor.NumElements(); const int last_dim = input_tensor.dim_size(input_tensor.dims() - 1); // Allocate each row to different shard. auto SubNthElement = [&, input, output, last_dim, n](int start, int limit) { // std::nth_element would rearrange the array, so we need a new buffer. std::vector<T> buf(last_dim); for (int b = start; b < limit; ++b) { // Copy from one row of elements to buffer const T* input_start = input + b * last_dim; const T* input_end = input + (b + 1) * last_dim; std::copy(input_start, input_end, buf.begin()); std::nth_element(buf.begin(), buf.begin() + n, buf.end()); // The element placed in the nth position is exactly the element that // would occur in this position if the range was fully sorted. output[b] = buf[n]; } }; auto worker_threads = *(context->device()->tensorflow_cpu_worker_threads()); // The average time complexity of partition-based nth_element (BFPRT) is // O(n), although the worst time complexity could be O(n^2). Here, 20 is a // empirical factor of cost_per_unit. Shard(worker_threads.num_threads, worker_threads.workers, num_rows, 20 * last_dim, SubNthElement); }
249
True
1
CVE-2020-15202
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:P
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
PARTIAL
6.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:C/C:H/I:H/A:H
NETWORK
HIGH
NONE
NONE
CHANGED
HIGH
HIGH
HIGH
9.0
CRITICAL
2.2
6.0
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/27b417360cbd671ef55915e4bb6bb06af8b8a832', 'name': 'https://github.com/tensorflow/tensorflow/commit/27b417360cbd671ef55915e4bb6bb06af8b8a832', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-h6fg-mjxg-hqq4', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-h6fg-mjxg-hqq4', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/ca8c013b5e97b1373b3bb1c97ea655e69f31a575', 'name': 'https://github.com/tensorflow/tensorflow/commit/ca8c013b5e97b1373b3bb1c97ea655e69f31a575', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'NVD-CWE-Other'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:-:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:-:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:-:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:-:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:-:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': 'In Tensorflow before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, the `Shard` API in TensorFlow expects the last argument to be a function taking two `int64` (i.e., `long long`) arguments. However, there are several places in TensorFlow where a lambda taking `int` or `int32` arguments is being used. In these cases, if the amount of work to be parallelized is large enough, integer truncation occurs. Depending on how the two arguments of the lambda are used, this can result in segfaults, read/write outside of heap allocated arrays, stack overflows, or data corruption. The issue is patched in commits 27b417360cbd671ef55915e4bb6bb06af8b8a832 and ca8c013b5e97b1373b3bb1c97ea655e69f31a575, and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1.'}]
2021-11-18T17:26Z
2020-09-25T19:15Z
Other
NVD is only using a subset of CWE for mapping instead of the entire CWE, and the weakness type is not covered by that subset.
Insufficient Information
https://nvd.nist.gov/vuln/categories
0
Mihai Maruseac
2020-09-18 17:49:02-07:00
Prevent integer truncation from 64 to 32 bits. The `tensorflow::Shard` functions last argument must be a 2 argument function where both arguments are `int64` (`long long`, 64 bits). However, there are usages where code passes in a function where arguments are `int` or `int32` (32 bits). In these cases, it is possible that the integer truncation would later cause a segfault or other unexpected behavior. PiperOrigin-RevId: 332560414 Change-Id: Ief649406babc8d4f60b3e7a9d573cbcc5ce5b767
ca8c013b5e97b1373b3bb1c97ea655e69f31a575
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tensorflow::functor::TruncatedNormalFunctor<CPUDevice,T>::operator ( )
tensorflow::functor::TruncatedNormalFunctor<CPUDevice,T>::operator ( )( OpKernelContext * ctx , const CPUDevice & d , int64 num_batches , int64 samples_per_batch , int64 num_elements , typename TTypes<T> :: ConstFlat means , typename TTypes<T> :: ConstFlat stddevs , typename TTypes<T> :: ConstFlat minvals , typename TTypes<T> :: ConstFlat maxvals , const random :: PhiloxRandom & gen , typename TTypes<T> :: Flat output)
['ctx', 'd', 'num_batches', 'samples_per_batch', 'num_elements', 'means', 'stddevs', 'minvals', 'maxvals', 'gen', 'output']
void operator()(OpKernelContext* ctx, const CPUDevice& d, int64 num_batches, int64 samples_per_batch, int64 num_elements, typename TTypes<T>::ConstFlat means, typename TTypes<T>::ConstFlat stddevs, typename TTypes<T>::ConstFlat minvals, typename TTypes<T>::ConstFlat maxvals, const random::PhiloxRandom& gen, typename TTypes<T>::Flat output) { // The randn rejection sampling is used when the mean and at least this many // standard deviations are inside the bounds. // The uniform proposal samplers become less efficient as the bounds are // further from the mean, the reverse is true for the randn sampler. // This number was chosen by empirical benchmarking. If modified, the // benchmarks in parameterized_truncated_normal_op_test should also be // changed. const T kStdDevsInsideBoundsToUseRandnSampler = T(1.3); auto worker_threads = *(ctx->device()->tensorflow_cpu_worker_threads()); auto do_work = [samples_per_batch, num_elements, &ctx, &means, &stddevs, &minvals, &maxvals, &gen, &output, kStdDevsInsideBoundsToUseRandnSampler](int start_batch, int limit_batch) { // Capturing "gen" by-value would only make a copy for the _shared_ // lambda. Since we want to let each worker have its own copy, we pass // "gen" by reference and explicitly do a copy assignment here. random::PhiloxRandom gen_copy = gen; // Skip takes units of 128 bytes. +3 is so rounding doesn't lead to // us using the same state in different batches. // The sample from each iteration uses 2 random numbers. gen_copy.Skip(start_batch * 2 * kMaxIterations * (samples_per_batch + 3) / 4); using Uniform = random::UniformDistribution<random::PhiloxRandom, T>; Uniform dist; using Normal = random::NormalDistribution<random::PhiloxRandom, T>; Normal normal_dist; // Vectorized intermediate calculations for uniform rejection sampling. // We always generate at most 4 samples. Eigen::array<T, 4> z; Eigen::array<T, 4> g; for (int64 b = start_batch; b < limit_batch; ++b) { // We are passed a flat array for each of the parameter tensors. // The input is either a scalar broadcasted to all batches or a vector // with length num_batches, but the scalar becomes an array of length 1. T mean = means((means.dimension(0) == 1) ? 0 : b); T stddev = stddevs((stddevs.dimension(0) == 1) ? 0 : b); T minval = minvals((minvals.dimension(0) == 1) ? 0 : b); T maxval = maxvals((maxvals.dimension(0) == 1) ? 0 : b); // The last batch can be short, if we adjusted num_batches and // samples_per_batch. const int64 limit_sample = std::min((b + 1) * samples_per_batch, num_elements); int64 sample = b * samples_per_batch; // On GPU, this check will just fill samples with NAN if it fails. OP_REQUIRES(ctx, stddev > T(0) && minval < maxval && (Eigen::numext::isfinite(minval) || Eigen::numext::isfinite(maxval)), errors::InvalidArgument("Invalid parameters")); int num_iterations = 0; // If possible, make one-sided bound be the lower bound, or make both // bounds positive. Otherwise, the bounds are on either side of the // mean. if ((Eigen::numext::isinf(minval) && minval < T(0)) || maxval < mean) { // Reverse all calculations. normMin and normMax will be flipped. std::swap(minval, maxval); stddev = -stddev; } // Calculate normalized samples, then convert them. const T normMin = (minval - mean) / stddev; const T normMax = (maxval - mean) / stddev; // Determine the method to use. const T sqrtFactor = Eigen::numext::sqrt((normMin * normMin) + T(4)); const T cutoff = T(2) * Eigen::numext::exp(T(0.5) + (normMin * (normMin - sqrtFactor)) / T(4)) / (normMin + sqrtFactor); const T diff = normMax - normMin; if (((normMin < -kStdDevsInsideBoundsToUseRandnSampler) && (normMax >= T(0.))) || ((normMax > kStdDevsInsideBoundsToUseRandnSampler) && (normMin <= T(0.)))) { // If the bounds are a least 3 standard deviations from the mean // on at least one side then we rejection sample by sampling // from the normal distribution and rejecting samples outside // the bounds. // Under this condition the acceptance rate per iteration should // always be ~ 50%. This sampler is more efficient (and more // numerically stable when one or both bounds is far from the mean). while (sample < limit_sample) { const auto randn_sample = normal_dist(&gen_copy); const int size = randn_sample.size(); for (int i = 0; i < size; i++) { if ((randn_sample[i] >= normMin) && (randn_sample[i] <= normMax)) { output(sample) = randn_sample[i] * stddev + mean; sample++; if (sample >= limit_sample) { break; } num_iterations = 0; } else { num_iterations++; if (num_iterations > kMaxIterations) { // This should never occur because this sampler should // (by the selection criteria above) be used if at least 3 // standard deviations of one side of the distribution // is within the limits (so acceptance probability per // iterations >~ 1/2 per iteration). LOG(ERROR) << "TruncatedNormal randn rejection sampler " << "exceeded maximum iterations for " << "normMin=" << normMin << " normMax=" << normMax << " kMaxIterations=" << kMaxIterations; ctx->SetStatus(errors::Internal( "TruncatedNormal randn rejection sampler failed to accept" " a sample.")); return; } } } } } else if (diff < cutoff) { // Sample from a uniform distribution on [normMin, normMax]. const T plusFactor = (normMin < T(0)) ? T(0) : normMin * normMin; while (sample < limit_sample) { const auto rand = dist(&gen_copy); const int size = rand.size(); // NOTE(ringwalt): These loops seem to only generate packed AVX // instructions for float32. for (int i = 0; i < size; i++) { z[i] = rand[i] * diff + normMin; } for (int i = 0; i < size; i++) { g[i] = (plusFactor - z[i] * z[i]) / T(2.0); } const auto u = dist(&gen_copy); for (int i = 0; i < size; i++) { auto accept = u[i] <= Eigen::numext::exp(g[i]); if (accept || num_iterations + 1 >= kMaxIterations) { // Accept the sample z. // If we run out of iterations, just use the current uniform // sample, but emit a warning. // TODO(jjhunt) For small entropies (relative to the bounds), // this sampler is poor and may take many iterations since // the proposal distribution is the uniform distribution // U(lower_bound, upper_bound). if (!accept) { LOG(ERROR) << "TruncatedNormal uniform rejection sampler " << "exceeded max iterations. Sample may contain " << "outliers."; ctx->SetStatus(errors::Internal( "TruncatedNormal uniform rejection sampler failed to " " accept a sample.")); return; } output(sample) = z[i] * stddev + mean; sample++; if (sample >= limit_sample) { break; } num_iterations = 0; } else { num_iterations++; } } } } else { // Sample from an exponential distribution with alpha maximizing // acceptance probability, offset by normMin from the origin. // Accept only if less than normMax. const T alpha = (normMin + Eigen::numext::sqrt((normMin * normMin) + T(4))) / T(2); while (sample < limit_sample) { auto rand = dist(&gen_copy); const int size = rand.size(); int i = 0; while (i < size) { const T z = -Eigen::numext::log(rand[i]) / alpha + normMin; i++; const T x = normMin < alpha ? alpha - z : normMin - alpha; const T g = Eigen::numext::exp(-x * x / T(2.0)); const T u = rand[i]; i++; auto accept = (u <= g && z < normMax); if (accept || num_iterations + 1 >= kMaxIterations) { if (!accept) { LOG(ERROR) << "TruncatedNormal exponential distribution " << "rejection sampler exceeds max iterations. " << "Sample may contain outliers."; ctx->SetStatus(errors::Internal( "TruncatedNormal exponential distribution rejection" " sampler failed to accept a sample.")); return; } output(sample) = z * stddev + mean; sample++; if (sample >= limit_sample) { break; } num_iterations = 0; } else { num_iterations++; } } } } } }; // The cost of the initial calculations for the batch. const int64 batchInitCost = // normMin, normMax (Eigen::TensorOpCost::AddCost<T>() + Eigen::TensorOpCost::MulCost<T>()) * 2 // sqrtFactor + Eigen::TensorOpCost::AddCost<T>() + Eigen::TensorOpCost::MulCost<T>() + Eigen::internal::functor_traits< Eigen::internal::scalar_sqrt_op<T>>::Cost // cutoff + Eigen::TensorOpCost::MulCost<T>() * 4 + Eigen::internal::functor_traits<Eigen::internal::scalar_exp_op<T>>::Cost // diff + Eigen::TensorOpCost::AddCost<T>(); const int64 uniformSampleCost = random::PhiloxRandom::kElementCost + random::UniformDistribution<random::PhiloxRandom, T>::kElementCost; // The cost of a single uniform sampling round. const int64 uniformRejectionSamplingCost = uniformSampleCost + Eigen::TensorOpCost::MulCost<T>() + Eigen::TensorOpCost::AddCost<T>() + Eigen::TensorOpCost::MulCost<T>() * 2 + Eigen::TensorOpCost::AddCost<T>() + uniformSampleCost + Eigen::internal::functor_traits< Eigen::internal::scalar_exp_op<T>>::Cost + Eigen::TensorOpCost::MulCost<T>() + Eigen::TensorOpCost::AddCost<T>(); // Estimate the cost for an entire batch. // Assume we use uniform sampling, and accept the 2nd sample on average. const int64 batchCost = batchInitCost + uniformRejectionSamplingCost * 2 * samples_per_batch; Shard(worker_threads.num_threads, worker_threads.workers, num_batches, batchCost, do_work); }
1496
True
1
CVE-2020-15202
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:P
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
PARTIAL
6.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:C/C:H/I:H/A:H
NETWORK
HIGH
NONE
NONE
CHANGED
HIGH
HIGH
HIGH
9.0
CRITICAL
2.2
6.0
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/27b417360cbd671ef55915e4bb6bb06af8b8a832', 'name': 'https://github.com/tensorflow/tensorflow/commit/27b417360cbd671ef55915e4bb6bb06af8b8a832', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-h6fg-mjxg-hqq4', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-h6fg-mjxg-hqq4', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/ca8c013b5e97b1373b3bb1c97ea655e69f31a575', 'name': 'https://github.com/tensorflow/tensorflow/commit/ca8c013b5e97b1373b3bb1c97ea655e69f31a575', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'NVD-CWE-Other'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:-:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:-:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:-:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:-:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:-:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': 'In Tensorflow before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, the `Shard` API in TensorFlow expects the last argument to be a function taking two `int64` (i.e., `long long`) arguments. However, there are several places in TensorFlow where a lambda taking `int` or `int32` arguments is being used. In these cases, if the amount of work to be parallelized is large enough, integer truncation occurs. Depending on how the two arguments of the lambda are used, this can result in segfaults, read/write outside of heap allocated arrays, stack overflows, or data corruption. The issue is patched in commits 27b417360cbd671ef55915e4bb6bb06af8b8a832 and ca8c013b5e97b1373b3bb1c97ea655e69f31a575, and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1.'}]
2021-11-18T17:26Z
2020-09-25T19:15Z
Other
NVD is only using a subset of CWE for mapping instead of the entire CWE, and the weakness type is not covered by that subset.
Insufficient Information
https://nvd.nist.gov/vuln/categories
0
Mihai Maruseac
2020-09-18 17:49:02-07:00
Prevent integer truncation from 64 to 32 bits. The `tensorflow::Shard` functions last argument must be a 2 argument function where both arguments are `int64` (`long long`, 64 bits). However, there are usages where code passes in a function where arguments are `int` or `int32` (32 bits). In these cases, it is possible that the integer truncation would later cause a segfault or other unexpected behavior. PiperOrigin-RevId: 332560414 Change-Id: Ief649406babc8d4f60b3e7a9d573cbcc5ce5b767
ca8c013b5e97b1373b3bb1c97ea655e69f31a575
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tensorflow::functor::TruncatedNormalFunctorV2<CPUDevice,T>::operator ( )
tensorflow::functor::TruncatedNormalFunctorV2<CPUDevice,T>::operator ( )( OpKernelContext * ctx , const CPUDevice & d , int64 num_batches , int64 samples_per_batch , int64 num_elements , const BCastList<4> & bcast , typename TTypes<T> :: ConstFlat means , typename TTypes<T> :: ConstFlat stddevs , typename TTypes<T> :: ConstFlat minvals , typename TTypes<T> :: ConstFlat maxvals , const random :: PhiloxRandom & gen , typename TTypes<T> :: Flat output)
['ctx', 'd', 'num_batches', 'samples_per_batch', 'num_elements', 'bcast', 'means', 'stddevs', 'minvals', 'maxvals', 'gen', 'output']
void operator()(OpKernelContext* ctx, const CPUDevice& d, int64 num_batches, int64 samples_per_batch, int64 num_elements, const BCastList<4>& bcast, typename TTypes<T>::ConstFlat means, typename TTypes<T>::ConstFlat stddevs, typename TTypes<T>::ConstFlat minvals, typename TTypes<T>::ConstFlat maxvals, const random::PhiloxRandom& gen, typename TTypes<T>::Flat output) { // The randn rejection sampling is used when the mean and at least this many // standard deviations are inside the bounds. // The uniform proposal samplers become less efficient as the bounds are // further from the mean, the reverse is true for the randn sampler. // This number was chosen by empirical benchmarking. If modified, the // benchmarks in parameterized_truncated_normal_op_test should also be // changed. const T kStdDevsInsideBoundsToUseRandnSampler = T(1.3); auto worker_threads = *(ctx->device()->tensorflow_cpu_worker_threads()); auto do_work = [num_batches, samples_per_batch, &ctx, &bcast, &means, &stddevs, &minvals, &maxvals, &gen, &output, kStdDevsInsideBoundsToUseRandnSampler](int start_output, int limit_output) { // Capturing "gen" by-value would only make a copy for the _shared_ // lambda. Since we want to let each worker have its own copy, we pass // "gen" by reference and explicitly do a copy assignment here. random::PhiloxRandom gen_copy = gen; using Uniform = random::UniformDistribution<random::PhiloxRandom, T>; Uniform dist; using Normal = random::NormalDistribution<random::PhiloxRandom, T>; Normal normal_dist; // Skip takes units of 128 bits. The Uniform::kResultElementCount - 1 // is so rounding doesn't lead to // us using the same state in different workloads. // The sample from each iteration uses 2 random numbers. gen_copy.Skip((start_output * 2 * kMaxIterations + Uniform::kResultElementCount - 1) / Uniform::kResultElementCount); // Vectorized intermediate calculations for uniform rejection sampling. // We always generate at most 4 samples. Eigen::array<T, Uniform::kResultElementCount> z; Eigen::array<T, Uniform::kResultElementCount> g; const bool should_bcast = bcast.IsBroadcastingRequired(); const auto& means_batch_indices = bcast.batch_indices(0); const auto& stddevs_batch_indices = bcast.batch_indices(1); const auto& minvals_batch_indices = bcast.batch_indices(2); const auto& maxvals_batch_indices = bcast.batch_indices(3); auto output_flat = output.data(); // We partition work across batches and then across samples // per batch member, to avoid extra work. for (int64 output_idx = start_output; output_idx < limit_output; // output_idx is incremented with the inner loops below. ) { int64 batch_idx = output_idx / samples_per_batch; // The output layout is [samples_per_batch, num_batches]. Thus // the output address is sample_idx * num_batches + batch_idx. // Below, code will index at output_batch_offset[sample_idx * // num_batches] matching this. T* const output_batch_offset = output_flat + batch_idx; // Generate batch counts from BCast, as it has the right indices to loop // over. T mean, stddev, minval, maxval; if (should_bcast) { mean = means(means_batch_indices[batch_idx]); stddev = stddevs(stddevs_batch_indices[batch_idx]); minval = minvals(minvals_batch_indices[batch_idx]); maxval = maxvals(maxvals_batch_indices[batch_idx]); } else { mean = means(batch_idx); stddev = stddevs(batch_idx); minval = minvals(batch_idx); maxval = maxvals(batch_idx); } // On GPU, this check will just fill samples with NAN if it fails. OP_REQUIRES(ctx, stddev > T(0) && minval < maxval && (Eigen::numext::isfinite(minval) || Eigen::numext::isfinite(maxval)), errors::InvalidArgument("Invalid parameters")); int num_iterations = 0; // If possible, make one-sided bound be the lower bound, or make both // bounds positive. Otherwise, the bounds are on either side of the // mean. if ((Eigen::numext::isinf(minval) && minval < T(0)) || maxval < mean) { // Reverse all calculations. normMin and normMax will be flipped. std::swap(minval, maxval); stddev = -stddev; } // Calculate normalized samples, then convert them. const T normMin = (minval - mean) / stddev; const T normMax = (maxval - mean) / stddev; // Determine the method to use. const T sqrtFactor = Eigen::numext::sqrt((normMin * normMin) + T(4)); const T cutoff = T(2) * Eigen::numext::exp(T(0.5) + (normMin * (normMin - sqrtFactor)) / T(4)) / (normMin + sqrtFactor); const T diff = normMax - normMin; if (((normMin < -kStdDevsInsideBoundsToUseRandnSampler) && (normMax >= T(0.))) || ((normMax > kStdDevsInsideBoundsToUseRandnSampler) && (normMin <= T(0.)))) { // If the bounds are a least 3 standard deviations from the mean // on at least one side then we rejection sample by sampling // from the normal distribution and rejecting samples outside // the bounds. // Under this condition the acceptance rate per iteration should // always be ~ 50%. This sampler is more efficient (and more // numerically stable when one or both bounds is far from the mean). for (int64 sample_idx = output_idx % samples_per_batch; sample_idx < samples_per_batch && output_idx < limit_output;) { const auto randn_sample = normal_dist(&gen_copy); const int size = randn_sample.size(); for (int i = 0; i < size; ++i) { if ((randn_sample[i] >= normMin) && (randn_sample[i] <= normMax)) { output_batch_offset[sample_idx * num_batches] = randn_sample[i] * stddev + mean; ++sample_idx; ++output_idx; if (sample_idx >= samples_per_batch || output_idx >= limit_output) { break; } num_iterations = 0; } else { ++num_iterations; if (num_iterations > kMaxIterations) { // This should never occur because this sampler should // (by the selection criteria above) be used if at least 3 // standard deviations of one side of the distribution // is within the limits (so acceptance probability per // iterations >~ 1/2 per iteration). LOG(ERROR) << "TruncatedNormal randn rejection sampler " << "exceeded maximum iterations for " << "normMin=" << normMin << " normMax=" << normMax << " kMaxIterations=" << kMaxIterations; ctx->SetStatus(errors::Internal( "TruncatedNormal randn rejection sampler failed to accept" " a sample.")); return; } } } } } else if (diff < cutoff) { // Sample from a uniform distribution on [normMin, normMax]. const T plusFactor = (normMin < T(0)) ? T(0) : normMin * normMin; for (int64 sample_idx = output_idx % samples_per_batch; sample_idx < samples_per_batch && output_idx < limit_output;) { const auto rand = dist(&gen_copy); const int size = rand.size(); // NOTE(ringwalt): These loops seem to only generate packed AVX // instructions for float32. for (int i = 0; i < size; i++) { z[i] = rand[i] * diff + normMin; g[i] = (plusFactor - z[i] * z[i]) / T(2.0); } const auto u = dist(&gen_copy); for (int i = 0; i < size; i++) { auto accept = u[i] <= Eigen::numext::exp(g[i]); if (accept || num_iterations + 1 >= kMaxIterations) { // Accept the sample z. // If we run out of iterations, just use the current uniform // sample, but emit a warning. // TODO(jjhunt) For small entropies (relative to the bounds), // this sampler is poor and may take many iterations since // the proposal distribution is the uniform distribution // U(lower_bound, upper_bound). if (!accept) { LOG(ERROR) << "TruncatedNormal uniform rejection sampler " << "exceeded max iterations. Sample may contain " << "outliers."; ctx->SetStatus(errors::Internal( "TruncatedNormal uniform rejection sampler failed to " " accept a sample.")); return; } output_batch_offset[sample_idx * num_batches] = z[i] * stddev + mean; ++sample_idx; ++output_idx; if (sample_idx >= samples_per_batch || output_idx >= limit_output) { break; } num_iterations = 0; } else { num_iterations++; } } } } else { // Sample from an exponential distribution with alpha maximizing // acceptance probability, offset by normMin from the origin. // Accept only if less than normMax. const T alpha = (normMin + Eigen::numext::sqrt((normMin * normMin) + T(4))) / T(2); for (int64 sample_idx = output_idx % samples_per_batch; sample_idx < samples_per_batch && output_idx < limit_output;) { auto rand = dist(&gen_copy); const int size = rand.size(); int i = 0; while (i < size) { const T z = -Eigen::numext::log(rand[i]) / alpha + normMin; i++; const T x = normMin < alpha ? alpha - z : normMin - alpha; const T g = Eigen::numext::exp(-x * x / T(2.0)); const T u = rand[i]; i++; auto accept = (u <= g && z < normMax); if (accept || num_iterations + 1 >= kMaxIterations) { if (!accept) { LOG(ERROR) << "TruncatedNormal exponential distribution " << "rejection sampler exceeds max iterations. " << "Sample may contain outliers."; ctx->SetStatus(errors::Internal( "TruncatedNormal exponential distribution rejection" " sampler failed to accept a sample.")); return; } output_batch_offset[sample_idx * num_batches] = z * stddev + mean; ++sample_idx; ++output_idx; if (sample_idx >= samples_per_batch || output_idx >= limit_output) { break; } num_iterations = 0; } else { num_iterations++; } } } } } }; // The cost of the initial calculations for the batch. const int64 batchInitCost = // normMin, normMax (Eigen::TensorOpCost::AddCost<T>() + Eigen::TensorOpCost::MulCost<T>()) * 2 // sqrtFactor + Eigen::TensorOpCost::AddCost<T>() + Eigen::TensorOpCost::MulCost<T>() + Eigen::internal::functor_traits< Eigen::internal::scalar_sqrt_op<T>>::Cost // cutoff + Eigen::TensorOpCost::MulCost<T>() * 4 + Eigen::internal::functor_traits<Eigen::internal::scalar_exp_op<T>>::Cost // diff + Eigen::TensorOpCost::AddCost<T>(); const int64 uniformSampleCost = random::PhiloxRandom::kElementCost + random::UniformDistribution<random::PhiloxRandom, T>::kElementCost; // The cost of a single uniform sampling round. const int64 uniformRejectionSamplingCost = uniformSampleCost + Eigen::TensorOpCost::MulCost<T>() + Eigen::TensorOpCost::AddCost<T>() + Eigen::TensorOpCost::MulCost<T>() * 2 + Eigen::TensorOpCost::AddCost<T>() + uniformSampleCost + Eigen::internal::functor_traits< Eigen::internal::scalar_exp_op<T>>::Cost + Eigen::TensorOpCost::MulCost<T>() + Eigen::TensorOpCost::AddCost<T>(); // Estimate the cost for an entire batch. // Assume we use uniform sampling, and accept the 2nd sample on average. const int64 batchCost = batchInitCost + uniformRejectionSamplingCost * 2; Shard(worker_threads.num_threads, worker_threads.workers, num_elements, batchCost, do_work); }
1617
True
1
CVE-2020-15202
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:P
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
PARTIAL
6.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:C/C:H/I:H/A:H
NETWORK
HIGH
NONE
NONE
CHANGED
HIGH
HIGH
HIGH
9.0
CRITICAL
2.2
6.0
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/27b417360cbd671ef55915e4bb6bb06af8b8a832', 'name': 'https://github.com/tensorflow/tensorflow/commit/27b417360cbd671ef55915e4bb6bb06af8b8a832', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-h6fg-mjxg-hqq4', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-h6fg-mjxg-hqq4', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/ca8c013b5e97b1373b3bb1c97ea655e69f31a575', 'name': 'https://github.com/tensorflow/tensorflow/commit/ca8c013b5e97b1373b3bb1c97ea655e69f31a575', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'NVD-CWE-Other'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:-:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:-:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:-:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:-:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:-:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': 'In Tensorflow before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, the `Shard` API in TensorFlow expects the last argument to be a function taking two `int64` (i.e., `long long`) arguments. However, there are several places in TensorFlow where a lambda taking `int` or `int32` arguments is being used. In these cases, if the amount of work to be parallelized is large enough, integer truncation occurs. Depending on how the two arguments of the lambda are used, this can result in segfaults, read/write outside of heap allocated arrays, stack overflows, or data corruption. The issue is patched in commits 27b417360cbd671ef55915e4bb6bb06af8b8a832 and ca8c013b5e97b1373b3bb1c97ea655e69f31a575, and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1.'}]
2021-11-18T17:26Z
2020-09-25T19:15Z
Other
NVD is only using a subset of CWE for mapping instead of the entire CWE, and the weakness type is not covered by that subset.
Insufficient Information
https://nvd.nist.gov/vuln/categories
0
Mihai Maruseac
2020-09-18 17:49:02-07:00
Prevent integer truncation from 64 to 32 bits. The `tensorflow::Shard` functions last argument must be a 2 argument function where both arguments are `int64` (`long long`, 64 bits). However, there are usages where code passes in a function where arguments are `int` or `int32` (32 bits). In these cases, it is possible that the integer truncation would later cause a segfault or other unexpected behavior. PiperOrigin-RevId: 332560414 Change-Id: Ief649406babc8d4f60b3e7a9d573cbcc5ce5b767
ca8c013b5e97b1373b3bb1c97ea655e69f31a575
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tensorflow::functor::RandomBinomialFunctor<CPUDevice,T,U>::operator ( )
tensorflow::functor::RandomBinomialFunctor<CPUDevice,T,U>::operator ( )( OpKernelContext * ctx , const CPUDevice & d , int64 num_batches , int64 samples_per_batch , int64 num_elements , const BCast & bcast , typename TTypes<T> :: ConstFlat counts , typename TTypes<T> :: ConstFlat probs , const random :: PhiloxRandom & gen , typename TTypes<U> :: Flat output)
['ctx', 'd', 'num_batches', 'samples_per_batch', 'num_elements', 'bcast', 'counts', 'probs', 'gen', 'output']
void operator()(OpKernelContext* ctx, const CPUDevice& d, int64 num_batches, int64 samples_per_batch, int64 num_elements, const BCast& bcast, typename TTypes<T>::ConstFlat counts, typename TTypes<T>::ConstFlat probs, const random::PhiloxRandom& gen, typename TTypes<U>::Flat output) { auto worker_threads = *(ctx->device()->tensorflow_cpu_worker_threads()); // The output layout is [B1, ... Bk, H1, ... Hm]. We have [B1, ... Bk] for // the sample shape and [H1, ... Hm] for the batch shape of the samples. // We have B1 * ... * Bk samples per batch member we need. auto DoWork = [num_batches, samples_per_batch, &bcast, &counts, &probs, &gen, &output](int start_output, int limit_output) { // Vectorized intermediate calculations for uniform rejection sampling. // We always generate at most 4 samples. Eigen::array<T, 4> z; Eigen::array<T, 4> g; const bool should_bcast = bcast.IsBroadcastingRequired(); const auto& counts_batch_indices = bcast.x_batch_indices(); const auto& probs_batch_indices = bcast.y_batch_indices(); auto output_flat = output.data(); // We partition work across batches (count, prob) and then across samples // per batch member, to avoid extra work. for (int64 output_idx = start_output; output_idx < limit_output; // output_idx is incremented with the inner loops below. ) { int64 batch_idx = output_idx / samples_per_batch; U* const output_batch_offset = output_flat + batch_idx; // Generate batch counts from BCast, as it has the right indices to loop // over. T count, prob; if (should_bcast) { count = counts(counts_batch_indices[batch_idx]); prob = probs(probs_batch_indices[batch_idx]); } else { count = counts(batch_idx); prob = probs(batch_idx); } // Calculate normalized samples, then convert them. // Determine the method to use. double dcount = static_cast<double>(count); if (dcount <= 0.0 || prob <= T(0.0)) { for (int64 sample_idx = output_idx % samples_per_batch; sample_idx < samples_per_batch && output_idx < limit_output; ++sample_idx, ++output_idx) { output_batch_offset[sample_idx * num_batches] = static_cast<U>(0.0); } } else if (prob >= T(1.0)) { for (int64 sample_idx = output_idx % samples_per_batch; sample_idx < samples_per_batch && output_idx < limit_output; ++sample_idx, ++output_idx) { output_batch_offset[sample_idx * num_batches] = static_cast<U>(dcount); } } else if (prob <= T(0.5)) { double dp = static_cast<double>(prob); if (count * prob >= T(10)) { for (int64 sample_idx = output_idx % samples_per_batch; sample_idx < samples_per_batch && output_idx < limit_output; ++sample_idx, ++output_idx) { random::PhiloxRandom gen_copy = gen; gen_copy.Skip(256 * output_idx); output_batch_offset[sample_idx * num_batches] = static_cast<U>(btrs(dcount, dp, &gen_copy)); } } else { for (int64 sample_idx = output_idx % samples_per_batch; sample_idx < samples_per_batch && output_idx < limit_output; ++sample_idx, ++output_idx) { random::PhiloxRandom gen_copy = gen; // For binomial inversion, we have mean <= 10, variance <= 10. // This means on average we need at most 10 number of samples, // and for 10 standard deviations, we need 42 samples. We reserve // that much. gen_copy.Skip(42 * output_idx); output_batch_offset[sample_idx * num_batches] = static_cast<U>(binomial_inversion(dcount, dp, &gen_copy)); } } } else if (prob > T(0.5)) { T q = T(1) - prob; double dcount = static_cast<double>(count); double dq = static_cast<double>(q); if (count * q >= T(10)) { for (int64 sample_idx = output_idx % samples_per_batch; sample_idx < samples_per_batch && output_idx < limit_output; ++sample_idx, ++output_idx) { random::PhiloxRandom gen_copy = gen; gen_copy.Skip(256 * output_idx); output_batch_offset[sample_idx * num_batches] = static_cast<U>(dcount - btrs(dcount, dq, &gen_copy)); } } else { for (int64 sample_idx = output_idx % samples_per_batch; sample_idx < samples_per_batch && output_idx < limit_output; ++sample_idx, ++output_idx) { random::PhiloxRandom gen_copy = gen; // For binomial inversion, we have mean <= 10, variance <= 10. // This means on average we need at most 10 number of samples, // and for 10 standard deviations, we need 42 samples. We reserve // that much. gen_copy.Skip(42 * output_idx); output_batch_offset[sample_idx * num_batches] = static_cast<U>( dcount - binomial_inversion(dcount, dq, &gen_copy)); } } } else { // prob is NaN // TODO(srvasude): What should happen if prob is NaN but the output // type is an integer (which doesn't have a sentinel for NaN)? Fail // the whole batch sample? Return a specialized sentinel like -1? for (int64 sample_idx = output_idx % samples_per_batch; sample_idx < samples_per_batch && output_idx < limit_output; ++sample_idx, ++output_idx) { output_batch_offset[sample_idx * num_batches] = static_cast<U>(NAN); } } } }; // This will depend on count * p (or count * q). // For n * p < 10, on average, O(n * p) calls to uniform are // needed, with that // many multiplies. ~10 uniform calls on average with ~200 cost op calls. // // Very roughly, for rate >= 10, the four calls to log // occur for ~72 percent of samples. // 4 x 100 (64-bit cycles per log) * 0.72 = ~288 // Additionally, there are ~10 other ops (+, *, /, ...) at 3-6 cycles each: // 40 * .72 = ~25. // // Finally, there are several other ops that are done every loop along with // 2 uniform generations along with 5 other ops at 3-6 cycles each. // ~15 / .89 = ~16 // // In total this (rate >= 10) should be ~329 + 2 * Uniform::kElementCost. // We assume that half the tensor has rate < 10, so on average 6 // uniform's // will be needed. We will upper bound the other op cost by the one for // rate > 10. static const int kElementCost = 329 + 6 * Uniform::kElementCost + 6 * random::PhiloxRandom::kElementCost; Shard(worker_threads.num_threads, worker_threads.workers, num_elements, kElementCost, DoWork); }
821
True
1
CVE-2020-15202
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:P
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
PARTIAL
6.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:C/C:H/I:H/A:H
NETWORK
HIGH
NONE
NONE
CHANGED
HIGH
HIGH
HIGH
9.0
CRITICAL
2.2
6.0
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/27b417360cbd671ef55915e4bb6bb06af8b8a832', 'name': 'https://github.com/tensorflow/tensorflow/commit/27b417360cbd671ef55915e4bb6bb06af8b8a832', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-h6fg-mjxg-hqq4', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-h6fg-mjxg-hqq4', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/ca8c013b5e97b1373b3bb1c97ea655e69f31a575', 'name': 'https://github.com/tensorflow/tensorflow/commit/ca8c013b5e97b1373b3bb1c97ea655e69f31a575', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'NVD-CWE-Other'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:-:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:-:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:-:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:-:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:-:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': 'In Tensorflow before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, the `Shard` API in TensorFlow expects the last argument to be a function taking two `int64` (i.e., `long long`) arguments. However, there are several places in TensorFlow where a lambda taking `int` or `int32` arguments is being used. In these cases, if the amount of work to be parallelized is large enough, integer truncation occurs. Depending on how the two arguments of the lambda are used, this can result in segfaults, read/write outside of heap allocated arrays, stack overflows, or data corruption. The issue is patched in commits 27b417360cbd671ef55915e4bb6bb06af8b8a832 and ca8c013b5e97b1373b3bb1c97ea655e69f31a575, and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1.'}]
2021-11-18T17:26Z
2020-09-25T19:15Z
Other
NVD is only using a subset of CWE for mapping instead of the entire CWE, and the weakness type is not covered by that subset.
Insufficient Information
https://nvd.nist.gov/vuln/categories
0
Mihai Maruseac
2020-09-18 17:49:02-07:00
Prevent integer truncation from 64 to 32 bits. The `tensorflow::Shard` functions last argument must be a 2 argument function where both arguments are `int64` (`long long`, 64 bits). However, there are usages where code passes in a function where arguments are `int` or `int32` (32 bits). In these cases, it is possible that the integer truncation would later cause a segfault or other unexpected behavior. PiperOrigin-RevId: 332560414 Change-Id: Ief649406babc8d4f60b3e7a9d573cbcc5ce5b767
ca8c013b5e97b1373b3bb1c97ea655e69f31a575
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tensorflow::functor::PoissonFunctor<CPUDevice,T,U>::operator ( )
tensorflow::functor::PoissonFunctor<CPUDevice,T,U>::operator ( )( OpKernelContext * ctx , const CPUDevice & d , const T * rate_flat , int num_rate , int num_samples , const random :: PhiloxRandom & rng , U * samples_flat)
['ctx', 'd', 'rate_flat', 'num_rate', 'num_samples', 'rng', 'samples_flat']
void operator()(OpKernelContext* ctx, const CPUDevice& d, const T* rate_flat, int num_rate, int num_samples, const random::PhiloxRandom& rng, U* samples_flat) { // Two different algorithms are employed, depending on the size of // rate. // If rate < 10, we use an algorithm attributed to Knuth: // Seminumerical Algorithms. Art of Computer Programming, Volume 2. // // This algorithm runs in O(rate) time, and will require O(rate) // uniform variates. // // If rate >= 10 we use a transformation-rejection algorithm from // pairs of uniform random variables due to Hormann. // http://www.sciencedirect.com/science/article/pii/0167668793909974 // // The algorithm has an acceptance rate of ~89% for the smallest rate // (~10), // and higher accept rates for higher rate, so runtime is // O(NumRate * NumSamples * k) with k ~ 1 / 0.89. // // We partition work first across rates then across // samples-per-rate to // avoid a couple flops which can be done on a per-rate basis. typedef random::UniformDistribution<random::PhiloxRandom, CT> Uniform; auto DoWork = [num_samples, num_rate, &rng, samples_flat, rate_flat]( int start_output, int limit_output) { // Capturing "rng" by value would only make a copy for the _shared_ // lambda. Since we want to let each worker have its own copy, we pass // "rng" by reference and explicitly do a copy assignment. Uniform uniform; typename Uniform::ResultType uniform_result; for (int64 output_idx = start_output; output_idx < limit_output; /* output_idx incremented within inner loop below */) { const int64 rate_idx = output_idx / num_samples; // Several calculations can be done on a per-rate basis. const CT rate = CT(rate_flat[rate_idx]); auto samples_rate_output = samples_flat + rate_idx; if (rate < CT(10)) { // Knuth's algorithm for generating Poisson random variates. // Given a Poisson process, the time between events is exponentially // distributed. If we have a Poisson process with rate lambda, then, // the time between events is distributed Exp(lambda). If X ~ // Uniform(0, 1), then Y ~ Exp(lambda), where Y = -log(X) / lambda. // Thus to simulate a Poisson draw, we can draw X_i ~ Exp(lambda), // and N ~ Poisson(lambda), where N is the least number such that // \sum_i^N X_i > 1. const CT exp_neg_rate = Eigen::numext::exp(-rate); // Compute the rest of the samples for the current rate value. for (int64 sample_idx = output_idx % num_samples; sample_idx < num_samples && output_idx < limit_output; sample_idx++, output_idx++) { random::PhiloxRandom gen = rng; gen.Skip(kReservedSamplesPerOutput * output_idx); int16 uniform_remaining = 0; CT prod = 1; CT x = 0; // Keep trying until we surpass e^(-rate). This will take // expected time proportional to rate. while (true) { UNIFORM(u); prod = prod * u; if (prod <= exp_neg_rate && x <= CT(Eigen::NumTraits<U>::highest())) { samples_rate_output[sample_idx * num_rate] = U(x); break; } x += 1; } } continue; } // Transformed rejection due to Hormann. // // Given a CDF F(x), and G(x), a dominating distribution chosen such // that it is close to the inverse CDF F^-1(x), compute the following // steps: // // 1) Generate U and V, two independent random variates. Set U = U - 0.5 // (this step isn't strictly necessary, but is done to make some // calculations symmetric and convenient. Henceforth, G is defined on // [-0.5, 0.5]). // // 2) If V <= alpha * F'(G(U)) * G'(U), return floor(G(U)), else return // to step 1. alpha is the acceptance probability of the rejection // algorithm. // // For more details on transformed rejection, see: // http://citeseer.ist.psu.edu/viewdoc/citations;jsessionid=1BEB35946CC807879F55D42512E5490C?doi=10.1.1.48.3054. // // The dominating distribution in this case: // // G(u) = (2 * a / (2 - |u|) + b) * u + c using Eigen::numext::log; const CT log_rate = log(rate); // Constants used to define the dominating distribution. Names taken // from Hormann's paper. Constants were chosen to define the tightest // G(u) for the inverse Poisson CDF. const CT b = CT(0.931) + CT(2.53) * Eigen::numext::sqrt(rate); const CT a = CT(-0.059) + CT(0.02483) * b; // This is the inverse acceptance rate. At a minimum (when rate = 10), // this corresponds to ~75% acceptance. As the rate becomes larger, this // approaches ~89%. const CT inv_alpha = CT(1.1239) + CT(1.1328) / (b - CT(3.4)); // Compute the rest of the samples for the current rate value. for (int64 sample_idx = output_idx % num_samples; sample_idx < num_samples && output_idx < limit_output; sample_idx++, output_idx++) { random::PhiloxRandom gen = rng; gen.Skip(kReservedSamplesPerOutput * output_idx); int16 uniform_remaining = 0; while (true) { UNIFORM(u); u -= CT(0.5); UNIFORM(v); CT u_shifted = CT(0.5) - Eigen::numext::abs(u); CT k = Eigen::numext::floor((CT(2) * a / u_shifted + b) * u + rate + CT(0.43)); if (k > CT(Eigen::NumTraits<U>::highest())) { // retry in case of overflow. continue; } // When alpha * f(G(U)) * G'(U) is close to 1, it is possible to // find a rectangle (-u_r, u_r) x (0, v_r) under the curve, such // that if v <= v_r and |u| <= u_r, then we can accept. // Here v_r = 0.9227 - 3.6224 / (b - 2) and u_r = 0.43. if (u_shifted >= CT(0.07) && v <= CT(0.9277) - CT(3.6224) / (b - CT(2))) { samples_rate_output[sample_idx * num_rate] = U(k); break; } if (k < 0 || (u_shifted < CT(0.013) && v > u_shifted)) { continue; } // The expression below is equivalent to the computation of step 2) // in transformed rejection (v <= alpha * F'(G(u)) * G'(u)). CT s = log(v * inv_alpha / (a / (u_shifted * u_shifted) + b)); CT t = -rate + k * log_rate - Eigen::numext::lgamma(k + 1); if (s <= t) { samples_rate_output[sample_idx * num_rate] = U(k); break; } } } } }; // This will depend on rate. // For rate < 10, on average, O(rate) calls to uniform are // needed, with that // many multiplies. ~10 uniform calls on average with ~25 cost op calls. // // Very roughly, for rate >= 10, the single call to log + call to // lgamma // occur for ~60 percent of samples. // 2 x 100 (64-bit cycles per log) * 0.62 = ~124 // Additionally, there are ~10 other ops (+, *, /, ...) at 3-6 cycles each: // 40 * .62 = ~25. // // Finally, there are several other ops that are done every loop along with // 2 uniform generations along with 5 other ops at 3-6 cycles each. // ~15 / .89 = ~16 // // In total this should be ~165 + 2 * Uniform::kElementCost. // We assume that half the tensor has rate < 10, so on average 6 // uniform's // will be needed. We will upper bound the other op cost by the one for // rate > 10. static const int kElementCost = 165 + 6 * Uniform::kElementCost + 6 * random::PhiloxRandom::kElementCost; auto worker_threads = *(ctx->device()->tensorflow_cpu_worker_threads()); Shard(worker_threads.num_threads, worker_threads.workers, num_rate * num_samples, kElementCost, DoWork); }
704
True
1
CVE-2020-15202
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:P
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
PARTIAL
6.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:C/C:H/I:H/A:H
NETWORK
HIGH
NONE
NONE
CHANGED
HIGH
HIGH
HIGH
9.0
CRITICAL
2.2
6.0
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/27b417360cbd671ef55915e4bb6bb06af8b8a832', 'name': 'https://github.com/tensorflow/tensorflow/commit/27b417360cbd671ef55915e4bb6bb06af8b8a832', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-h6fg-mjxg-hqq4', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-h6fg-mjxg-hqq4', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/ca8c013b5e97b1373b3bb1c97ea655e69f31a575', 'name': 'https://github.com/tensorflow/tensorflow/commit/ca8c013b5e97b1373b3bb1c97ea655e69f31a575', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'NVD-CWE-Other'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:-:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:-:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:-:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:-:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:-:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': 'In Tensorflow before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, the `Shard` API in TensorFlow expects the last argument to be a function taking two `int64` (i.e., `long long`) arguments. However, there are several places in TensorFlow where a lambda taking `int` or `int32` arguments is being used. In these cases, if the amount of work to be parallelized is large enough, integer truncation occurs. Depending on how the two arguments of the lambda are used, this can result in segfaults, read/write outside of heap allocated arrays, stack overflows, or data corruption. The issue is patched in commits 27b417360cbd671ef55915e4bb6bb06af8b8a832 and ca8c013b5e97b1373b3bb1c97ea655e69f31a575, and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1.'}]
2021-11-18T17:26Z
2020-09-25T19:15Z
Other
NVD is only using a subset of CWE for mapping instead of the entire CWE, and the weakness type is not covered by that subset.
Insufficient Information
https://nvd.nist.gov/vuln/categories
0
Mihai Maruseac
2020-09-18 17:49:02-07:00
Prevent integer truncation from 64 to 32 bits. The `tensorflow::Shard` functions last argument must be a 2 argument function where both arguments are `int64` (`long long`, 64 bits). However, there are usages where code passes in a function where arguments are `int` or `int32` (32 bits). In these cases, it is possible that the integer truncation would later cause a segfault or other unexpected behavior. PiperOrigin-RevId: 332560414 Change-Id: Ief649406babc8d4f60b3e7a9d573cbcc5ce5b767
ca8c013b5e97b1373b3bb1c97ea655e69f31a575
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tensorflow::StatelessRandomGammaOp::Fill
tensorflow::StatelessRandomGammaOp::Fill( OpKernelContext * ctx , random :: PhiloxRandom random , Tensor * output)
['ctx', 'random', 'output']
void Fill(OpKernelContext* ctx, random::PhiloxRandom random, Tensor* output) override { const Tensor& alpha_t = ctx->input(2); TensorShape samples_shape = output->shape(); OP_REQUIRES(ctx, TensorShapeUtils::EndsWith(samples_shape, alpha_t.shape()), errors::InvalidArgument( "Shape passed in must end with broadcasted shape.")); typedef random::NormalDistribution<random::PhiloxRandom, double> Normal; typedef random::UniformDistribution<random::PhiloxRandom, double> Uniform; #define UNIFORM(X) \ if (uniform_remaining == 0) { \ uniform_remaining = Uniform::kResultElementCount; \ uniform_result = uniform(&gen); \ } \ uniform_remaining--; \ double X = uniform_result[uniform_remaining] // Each attempt is 95+% successful, and requires 1-2 normal + 1 uniform static constexpr int kReservedSamplesPerOutput = 256; const int64 num_alphas = alpha_t.NumElements(); OP_REQUIRES(ctx, num_alphas > 0, errors::InvalidArgument( "Input alpha should have non-zero element count, got: ", num_alphas)); const int64 samples_per_alpha = samples_shape.num_elements() / num_alphas; const auto alpha_flat = alpha_t.flat<T>().data(); auto samples_flat = output->flat<T>().data(); // We partition work first across alphas then across samples-per-alpha to // avoid a couple flops which can be done on a per-alpha basis. auto DoWork = [samples_per_alpha, num_alphas, &random, samples_flat, alpha_flat](int start_output, int limit_output) { // Capturing "random" by-value would only make a copy for the _shared_ // lambda. Since we want to let each worker have its own copy, we pass // "random" by reference and explicitly do a copy assignment. using Eigen::numext::exp; using Eigen::numext::log; using Eigen::numext::log1p; using Eigen::numext::pow; Normal normal; Uniform uniform; typename Normal::ResultType norm_result; typename Uniform::ResultType uniform_result; for (int64 output_idx = start_output; output_idx < limit_output; /* output_idx incremented within inner loop below */) { int64 alpha_idx = output_idx / samples_per_alpha; // Instead of +alpha_idx for each sample, we offset the pointer once. T* const samples_alpha_offset = samples_flat + alpha_idx; // Several calculations can be done on a per-alpha basis. const double alpha = static_cast<double>(alpha_flat[alpha_idx]); DISABLE_FLOAT_EQUALITY_WARNING if (alpha == static_cast<double>(1.0)) { ENABLE_FLOAT_EQUALITY_WARNING // Sample from an exponential distribution. for (int64 sample_idx = output_idx % samples_per_alpha; sample_idx < samples_per_alpha && output_idx < limit_output; sample_idx++, output_idx++) { // As we want data stable regardless of sharding // (including eventually on GPU), we skip on a per-sample basis. random::PhiloxRandom gen = random; gen.Skip(kReservedSamplesPerOutput * output_idx); int16 uniform_remaining = 0; UNIFORM(u); const double res = -log1p(-u); samples_alpha_offset[sample_idx * num_alphas] = static_cast<T>(res); } // for (sample_idx) } else { // if alpha != 1.0 // Transformation-rejection from pairs of uniform and normal random // variables. http://dl.acm.org/citation.cfm?id=358414 // // The algorithm has an acceptance rate of ~95% for small alpha (~1), // and higher accept rates for higher alpha, so runtime is // O(NumAlphas * NumSamples * k) with k ~ 1 / 0.95. // // For alpha<1, we add one to d=alpha-1/3, and multiply the final // result by uniform()^(1/alpha) const bool alpha_less_than_one = alpha < 1; const double d = alpha + (alpha_less_than_one ? 2.0 / 3 : -1.0 / 3); const double c = 1.0 / 3 / sqrt(d); // Compute the rest of the samples for the current alpha value. for (int64 sample_idx = output_idx % samples_per_alpha; sample_idx < samples_per_alpha && output_idx < limit_output; sample_idx++, output_idx++) { // Since each sample may use a variable number of normal/uniform // samples, and we want data stable regardless of sharding // (including eventually on GPU), we skip on a per-sample basis. random::PhiloxRandom gen = random; gen.Skip(kReservedSamplesPerOutput * output_idx); int16 norm_remaining = 0; int16 uniform_remaining = 0; // Keep trying until we don't reject a sample. In practice, we will // only reject ~5% at worst, for low alpha near 1. while (true) { if (norm_remaining == 0) { norm_remaining = Normal::kResultElementCount; norm_result = normal(&gen); } norm_remaining--; const double x = norm_result[norm_remaining]; double v = 1 + c * x; if (v <= 0) { continue; } v = v * v * v; UNIFORM(u); // The first option in the if is a "squeeze" short-circuit to // dodge the two logs. Magic constant sourced from the paper // linked above. Upward of .91 of the area covered by the log // inequality is covered by the squeeze as well (larger coverage // for smaller values of alpha). if ((u < 1 - 0.0331 * (x * x) * (x * x)) || (log(u) < 0.5 * x * x + d * (1 - v + log(v)))) { double res = d * v; if (alpha_less_than_one) { UNIFORM(b); res *= pow(b, 1 / alpha); } samples_alpha_offset[sample_idx * num_alphas] = static_cast<T>(res); break; } } // while: true } // for: sample_idx } // if (alpha == 1.0) } // for: output_idx }; // DoWork #undef UNIFORM // Two calls to log only occur for ~10% of samples reaching the log line. // 2 x 100 (64-bit cycles per log) x 0.10 = ~20. // Other ops: sqrt, +, *, /, %... something like 15 of these, at 3-6 cycles // each = ~60. // All of this /0.95 due to the rejection possibility = ~85. static const int kElementCost = 85 + 2 * Normal::kElementCost + Uniform::kElementCost + 3 * random::PhiloxRandom::kElementCost; auto worker_threads = *(ctx->device()->tensorflow_cpu_worker_threads()); Shard(worker_threads.num_threads, worker_threads.workers, num_alphas * samples_per_alpha, kElementCost, DoWork); }
714
True
1
CVE-2020-15202
False
False
False
False
AV:N/AC:M/Au:N/C:P/I:P/A:P
NETWORK
MEDIUM
NONE
PARTIAL
PARTIAL
PARTIAL
6.8
CVSS:3.1/AV:N/AC:H/PR:N/UI:N/S:C/C:H/I:H/A:H
NETWORK
HIGH
NONE
NONE
CHANGED
HIGH
HIGH
HIGH
9.0
CRITICAL
2.2
6.0
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/27b417360cbd671ef55915e4bb6bb06af8b8a832', 'name': 'https://github.com/tensorflow/tensorflow/commit/27b417360cbd671ef55915e4bb6bb06af8b8a832', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-h6fg-mjxg-hqq4', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-h6fg-mjxg-hqq4', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/ca8c013b5e97b1373b3bb1c97ea655e69f31a575', 'name': 'https://github.com/tensorflow/tensorflow/commit/ca8c013b5e97b1373b3bb1c97ea655e69f31a575', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'NVD-CWE-Other'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:-:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:-:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:-:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:-:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:-:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': 'In Tensorflow before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, the `Shard` API in TensorFlow expects the last argument to be a function taking two `int64` (i.e., `long long`) arguments. However, there are several places in TensorFlow where a lambda taking `int` or `int32` arguments is being used. In these cases, if the amount of work to be parallelized is large enough, integer truncation occurs. Depending on how the two arguments of the lambda are used, this can result in segfaults, read/write outside of heap allocated arrays, stack overflows, or data corruption. The issue is patched in commits 27b417360cbd671ef55915e4bb6bb06af8b8a832 and ca8c013b5e97b1373b3bb1c97ea655e69f31a575, and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1.'}]
2021-11-18T17:26Z
2020-09-25T19:15Z
Other
NVD is only using a subset of CWE for mapping instead of the entire CWE, and the weakness type is not covered by that subset.
Insufficient Information
https://nvd.nist.gov/vuln/categories
0
Mihai Maruseac
2020-09-18 17:49:02-07:00
Prevent integer truncation from 64 to 32 bits. The `tensorflow::Shard` functions last argument must be a 2 argument function where both arguments are `int64` (`long long`, 64 bits). However, there are usages where code passes in a function where arguments are `int` or `int32` (32 bits). In these cases, it is possible that the integer truncation would later cause a segfault or other unexpected behavior. PiperOrigin-RevId: 332560414 Change-Id: Ief649406babc8d4f60b3e7a9d573cbcc5ce5b767
ca8c013b5e97b1373b3bb1c97ea655e69f31a575
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tensorflow::functor::TopKFunctor<CPUDevice,T>::Compute
tensorflow::functor::TopKFunctor<CPUDevice,T>::Compute( OpKernelContext * context , bool sorted , int k , const typename TTypes<T,2> :: ConstTensor & input , const int64 num_rows , const int64 num_cols , typename TTypes<T,2> :: Tensor values , typename TTypes<int,2> :: Tensor indices)
['context', 'sorted', 'k', 'input', 'num_rows', 'num_cols', 'values', 'indices']
Compute(OpKernelContext* context, bool sorted, int k, const typename TTypes<T, 2>::ConstTensor& input, const int64 num_rows, const int64 num_cols, typename TTypes<T, 2>::Tensor values, typename TTypes<int, 2>::Tensor indices) { const CPUDevice& d = context->eigen_device<CPUDevice>(); // Special case for k == 1. if (k == 1) { #ifdef EIGEN_HAS_INDEX_LIST typename Eigen::IndexList<Eigen::type2index<1>> reduce_on_cols; typename Eigen::IndexList<int, Eigen::type2index<1>> rows_by_one; rows_by_one.set(0, num_rows); #else Eigen::array<int, 1> reduce_on_cols = {1}; Eigen::array<int, 2> rows_by_one = {static_cast<int>(num_rows), 1}; #endif values.device(d) = input.maximum(/*dims=*/reduce_on_cols).eval().reshape(rows_by_one); // Get the indices of the maximum values. for (int r = 0; r < num_rows; ++r) { indices(r, 0) = 0; for (int c = 0; c < num_cols; ++c) { if (values(r, 0) == input(r, c)) { indices(r, 0) = c; break; } } values(r, 0) = input(r, indices(r, 0)); } return Status::OK(); } auto SortIndices = [&](int start_batch, int limit_batch) { for (int32 b = start_batch; b < limit_batch; ++b) { const T* input_data = &input(b, 0); const auto stable_comp = [input_data](const int32 a, const int32 b) { if (input_data[b] < input_data[a]) { return true; } else if (input_data[b] > input_data[a]) { return false; } else { return a < b; } }; const auto comp = [input_data](const int32 a, const int32 b) { return input_data[b] < input_data[a]; }; // TODO(ebrevdo): For large k < num_cols, instead of using // TopN, it may be faster to create a temporary vector of // values 0..num_cols - 1 and then use std::partial_sort_copy // of this into indices. Choosing the appropriate minimum k or // ratio of k/num_cols will require some experimentation. if (k == num_cols) { auto* begin = &indices(b, 0); auto* end = &indices(b, k); // Set the initial array of indices 0 ... k - 1. std::iota(begin, end, 0); // We want an in-place sort, but we can cheat because we're sorting // indices that started out sorted. First, do a std::sort, which // is notably faster than std::stable_sort. std::sort(begin, end, comp); // Then, for runs of adjacent elements that were equal, sort the // indices in those runs in increasing order. for (auto* run_begin = begin; run_begin != end;) { auto* run_end = run_begin + 1; if (run_end == end) break; if (input_data[*run_begin] == input_data[*run_end]) { while (++run_end != end) { if (input_data[*run_begin] != input_data[*run_end]) break; } std::sort(run_begin, run_end); } run_begin = run_end; } } else { // Use the TopN heap object to sort. gtl::TopN<int32, decltype(stable_comp)> filter(k, stable_comp); filter.reserve(num_cols); for (int32 c = 0; c < num_cols; ++c) { filter.push(c); } int32 i = 0; if (sorted) { std::unique_ptr<std::vector<int32>> top_k(filter.Extract()); for (auto top_k_it = top_k->begin(); top_k_it != top_k->end(); ++top_k_it, ++i) { indices(b, i) = *top_k_it; } } else { for (auto top_k_it = filter.unsorted_begin(); top_k_it != filter.unsorted_end(); ++top_k_it, ++i) { indices(b, i) = *top_k_it; } } } // Now that the indices are sorted, copy the values over in // sorted order. std::transform(&indices(b, 0), &indices(b, k), &values(b, 0), [b, &input](const int32 loc) { return input(b, loc); }); } // for (int32 b = ... }; // Guesstimate of cost; 4*N*log(K) where N == num_cols. // If K == N, assume the cost is N*log(K + 1). const double cmp_cost = 3 * Eigen::TensorOpCost::AddCost<int32>() + Eigen::TensorOpCost::AddCost<T>(); const double base_cost = cmp_cost * static_cast<double>(num_cols * Eigen::numext::log2(static_cast<float>(k + 1))); const double sort_cost = (k == num_cols) ? base_cost : 4 * base_cost; const double copy_cost = 2 * k * Eigen::TensorOpCost::AddCost<T>(); const double total_cost = sort_cost + copy_cost; const int64 final_cost = (total_cost >= static_cast<double>(kint64max)) ? kint64max : static_cast<int64>(total_cost); auto worker_threads = *(context->device()->tensorflow_cpu_worker_threads()); Shard(worker_threads.num_threads, worker_threads.workers, num_rows, final_cost, SortIndices); return Status::OK(); }
927
True
1
CVE-2020-15194
False
False
False
False
AV:N/AC:L/Au:N/C:N/I:N/A:P
NETWORK
LOW
NONE
NONE
NONE
PARTIAL
5.0
CVSS:3.1/AV:N/AC:L/PR:N/UI:N/S:U/C:N/I:N/A:L
NETWORK
LOW
NONE
NONE
UNCHANGED
NONE
NONE
LOW
5.3
MEDIUM
3.9
1.4
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/390611e0d45c5793c7066110af37c8514e6a6c54', 'name': 'https://github.com/tensorflow/tensorflow/commit/390611e0d45c5793c7066110af37c8514e6a6c54', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Release Notes', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-9mqp-7v2h-2382', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-9mqp-7v2h-2382', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-20'}, {'lang': 'en', 'value': 'CWE-617'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:-:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:-:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:-:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:-:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:-:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': 'In Tensorflow before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, the `SparseFillEmptyRowsGrad` implementation has incomplete validation of the shapes of its arguments. Although `reverse_index_map_t` and `grad_values_t` are accessed in a similar pattern, only `reverse_index_map_t` is validated to be of proper shape. Hence, malicious users can pass a bad `grad_values_t` to trigger an assertion failure in `vec`, causing denial of service in serving installations. The issue is patched in commit 390611e0d45c5793c7066110af37c8514e6a6c54, and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1."'}]
2021-08-17T13:21Z
2020-09-25T19:15Z
Improper Input Validation
The product receives input or data, but it does not validate or incorrectly validates that the input has the properties that are required to process the data safely and correctly.
Input validation is a frequently-used technique for checking potentially dangerous inputs in order to ensure that the inputs are safe for processing within the code, or when communicating with other components. When software does not validate input properly, an attacker is able to craft the input in a form that is not expected by the rest of the application. This will lead to parts of the system receiving unintended input, which may result in altered control flow, arbitrary control of a resource, or arbitrary code execution. Input validation is not the only technique for processing input, however. Other techniques attempt to transform potentially-dangerous input into something safe, such as filtering (CWE-790) - which attempts to remove dangerous inputs - or encoding/escaping (CWE-116), which attempts to ensure that the input is not misinterpreted when it is included in output to another component. Other techniques exist as well (see CWE-138 for more examples.) Input validation can be applied to: raw data - strings, numbers, parameters, file contents, etc. metadata - information about the raw data, such as headers or size Data can be simple or structured. Structured data can be composed of many nested layers, composed of combinations of metadata and raw data, with other simple or structured data. Many properties of raw data or metadata may need to be validated upon entry into the code, such as: specified quantities such as size, length, frequency, price, rate, number of operations, time, etc. implied or derived quantities, such as the actual size of a file instead of a specified size indexes, offsets, or positions into more complex data structures symbolic keys or other elements into hash tables, associative arrays, etc. well-formedness, i.e. syntactic correctness - compliance with expected syntax lexical token correctness - compliance with rules for what is treated as a token specified or derived type - the actual type of the input (or what the input appears to be) consistency - between individual data elements, between raw data and metadata, between references, etc. conformance to domain-specific rules, e.g. business logic equivalence - ensuring that equivalent inputs are treated the same authenticity, ownership, or other attestations about the input, e.g. a cryptographic signature to prove the source of the data Implied or derived properties of data must often be calculated or inferred by the code itself. Errors in deriving properties may be considered a contributing factor to improper input validation. Note that "input validation" has very different meanings to different people, or within different classification schemes. Caution must be used when referencing this CWE entry or mapping to it. For example, some weaknesses might involve inadvertently giving control to an attacker over an input when they should not be able to provide an input at all, but sometimes this is referred to as input validation. Finally, it is important to emphasize that the distinctions between input validation and output escaping are often blurred, and developers must be careful to understand the difference, including how input validation is not always sufficient to prevent vulnerabilities, especially when less stringent data types must be supported, such as free-form text. Consider a SQL injection scenario in which a person's last name is inserted into a query. The name "O'Reilly" would likely pass the validation step since it is a common last name in the English language. However, this valid name cannot be directly inserted into the database because it contains the "'" apostrophe character, which would need to be escaped or otherwise transformed. In this case, removing the apostrophe might reduce the risk of SQL injection, but it would produce incorrect behavior because the wrong name would be recorded.
https://cwe.mitre.org/data/definitions/20.html
0
Mihai Maruseac
2020-09-18 18:43:38-07:00
Fix heap buffer overflow in `tf.raw_ops.SparseFillEmptyRowsGrad`. Also add tests as they were lacking PiperOrigin-RevId: 332566071 Change-Id: I44277578e26ff5fb3fdb0dcbba6e91b2ec3e7859
390611e0d45c5793c7066110af37c8514e6a6c54
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tensorflow::SparseFillEmptyRowsGradOp::Compute
tensorflow::SparseFillEmptyRowsGradOp::Compute( OpKernelContext * context)
['context']
void Compute(OpKernelContext* context) override { const Tensor* reverse_index_map_t; const Tensor* grad_values_t; OP_REQUIRES_OK(context, context->input("reverse_index_map", &reverse_index_map_t)); OP_REQUIRES_OK(context, context->input("grad_values", &grad_values_t)); const CPUDevice& d = context->eigen_device<CPUDevice>(); OP_REQUIRES( context, TensorShapeUtils::IsVector(reverse_index_map_t->shape()), errors::InvalidArgument("reverse_index_map must be a vector, saw: ", reverse_index_map_t->shape().DebugString())); const auto reverse_index_map = reverse_index_map_t->vec<int64>(); const auto grad_values = grad_values_t->vec<T>(); const int64 N = reverse_index_map_t->shape().dim_size(0); const int64 N_full = grad_values_t->shape().dim_size(0); Tensor* d_values_t; OP_REQUIRES_OK(context, context->allocate_output( "d_values", TensorShape({N}), &d_values_t)); auto d_values = d_values_t->vec<T>(); Tensor* d_default_value_t; OP_REQUIRES_OK(context, context->allocate_output("d_default_value", TensorShape({}), &d_default_value_t)); T& d_default_value = d_default_value_t->scalar<T>()(); d_default_value = T(); Tensor visited_t; OP_REQUIRES_OK(context, context->allocate_temp( DT_BOOL, TensorShape({N_full}), &visited_t)); auto visited = visited_t.vec<bool>(); visited.device(d) = visited.constant(false); for (int i = 0; i < N; ++i) { // Locate the index of the output of the forward prop associated // with this location in the input of the forward prop. Copy // the gradient into it. Mark it as visited. d_values(i) = grad_values(reverse_index_map(i)); visited(reverse_index_map(i)) = true; } for (int j = 0; j < N_full; ++j) { // The default value gradient gets the accumulated remainder of // the backprop values (since the default value was used to fill // in these slots in the forward calculation). if (!visited(j)) { d_default_value += grad_values(j); } } }
359
True
1
CVE-2020-15194
False
False
False
False
AV:N/AC:L/Au:N/C:N/I:N/A:P
NETWORK
LOW
NONE
NONE
NONE
PARTIAL
5.0
CVSS:3.1/AV:N/AC:L/PR:N/UI:N/S:U/C:N/I:N/A:L
NETWORK
LOW
NONE
NONE
UNCHANGED
NONE
NONE
LOW
5.3
MEDIUM
3.9
1.4
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/390611e0d45c5793c7066110af37c8514e6a6c54', 'name': 'https://github.com/tensorflow/tensorflow/commit/390611e0d45c5793c7066110af37c8514e6a6c54', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Release Notes', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-9mqp-7v2h-2382', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-9mqp-7v2h-2382', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-20'}, {'lang': 'en', 'value': 'CWE-617'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:-:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:-:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:-:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:-:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:-:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': 'In Tensorflow before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, the `SparseFillEmptyRowsGrad` implementation has incomplete validation of the shapes of its arguments. Although `reverse_index_map_t` and `grad_values_t` are accessed in a similar pattern, only `reverse_index_map_t` is validated to be of proper shape. Hence, malicious users can pass a bad `grad_values_t` to trigger an assertion failure in `vec`, causing denial of service in serving installations. The issue is patched in commit 390611e0d45c5793c7066110af37c8514e6a6c54, and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1."'}]
2021-08-17T13:21Z
2020-09-25T19:15Z
Reachable Assertion
The product contains an assert() or similar statement that can be triggered by an attacker, which leads to an application exit or other behavior that is more severe than necessary.
While assertion is good for catching logic errors and reducing the chances of reaching more serious vulnerability conditions, it can still lead to a denial of service. For example, if a server handles multiple simultaneous connections, and an assert() occurs in one single connection that causes all other connections to be dropped, this is a reachable assertion that leads to a denial of service.
https://cwe.mitre.org/data/definitions/617.html
0
Mihai Maruseac
2020-09-18 18:43:38-07:00
Fix heap buffer overflow in `tf.raw_ops.SparseFillEmptyRowsGrad`. Also add tests as they were lacking PiperOrigin-RevId: 332566071 Change-Id: I44277578e26ff5fb3fdb0dcbba6e91b2ec3e7859
390611e0d45c5793c7066110af37c8514e6a6c54
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tensorflow::SparseFillEmptyRowsGradOp::Compute
tensorflow::SparseFillEmptyRowsGradOp::Compute( OpKernelContext * context)
['context']
void Compute(OpKernelContext* context) override { const Tensor* reverse_index_map_t; const Tensor* grad_values_t; OP_REQUIRES_OK(context, context->input("reverse_index_map", &reverse_index_map_t)); OP_REQUIRES_OK(context, context->input("grad_values", &grad_values_t)); const CPUDevice& d = context->eigen_device<CPUDevice>(); OP_REQUIRES( context, TensorShapeUtils::IsVector(reverse_index_map_t->shape()), errors::InvalidArgument("reverse_index_map must be a vector, saw: ", reverse_index_map_t->shape().DebugString())); const auto reverse_index_map = reverse_index_map_t->vec<int64>(); const auto grad_values = grad_values_t->vec<T>(); const int64 N = reverse_index_map_t->shape().dim_size(0); const int64 N_full = grad_values_t->shape().dim_size(0); Tensor* d_values_t; OP_REQUIRES_OK(context, context->allocate_output( "d_values", TensorShape({N}), &d_values_t)); auto d_values = d_values_t->vec<T>(); Tensor* d_default_value_t; OP_REQUIRES_OK(context, context->allocate_output("d_default_value", TensorShape({}), &d_default_value_t)); T& d_default_value = d_default_value_t->scalar<T>()(); d_default_value = T(); Tensor visited_t; OP_REQUIRES_OK(context, context->allocate_temp( DT_BOOL, TensorShape({N_full}), &visited_t)); auto visited = visited_t.vec<bool>(); visited.device(d) = visited.constant(false); for (int i = 0; i < N; ++i) { // Locate the index of the output of the forward prop associated // with this location in the input of the forward prop. Copy // the gradient into it. Mark it as visited. d_values(i) = grad_values(reverse_index_map(i)); visited(reverse_index_map(i)) = true; } for (int j = 0; j < N_full; ++j) { // The default value gradient gets the accumulated remainder of // the backprop values (since the default value was used to fill // in these slots in the forward calculation). if (!visited(j)) { d_default_value += grad_values(j); } } }
359
True
1
CVE-2020-15195
False
False
False
False
AV:N/AC:L/Au:S/C:P/I:P/A:P
NETWORK
LOW
SINGLE
PARTIAL
PARTIAL
PARTIAL
6.5
CVSS:3.1/AV:N/AC:L/PR:L/UI:N/S:U/C:H/I:H/A:H
NETWORK
LOW
LOW
NONE
UNCHANGED
HIGH
HIGH
HIGH
8.8
HIGH
2.8
5.9
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/390611e0d45c5793c7066110af37c8514e6a6c54', 'name': 'https://github.com/tensorflow/tensorflow/commit/390611e0d45c5793c7066110af37c8514e6a6c54', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-63xm-rx5p-xvqr', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-63xm-rx5p-xvqr', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:-:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:-:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:-:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:-:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:-:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': 'In Tensorflow before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, the implementation of `SparseFillEmptyRowsGrad` uses a double indexing pattern. It is possible for `reverse_index_map(i)` to be an index outside of bounds of `grad_values`, thus resulting in a heap buffer overflow. The issue is patched in commit 390611e0d45c5793c7066110af37c8514e6a6c54, and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1.'}]
2021-11-18T17:20Z
2020-09-25T19:15Z
Out-of-bounds Write
The software writes data past the end, or before the beginning, of the intended buffer.
Typically, this can result in corruption of data, a crash, or code execution. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent write operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/787.html
0
Mihai Maruseac
2020-09-18 18:43:38-07:00
Fix heap buffer overflow in `tf.raw_ops.SparseFillEmptyRowsGrad`. Also add tests as they were lacking PiperOrigin-RevId: 332566071 Change-Id: I44277578e26ff5fb3fdb0dcbba6e91b2ec3e7859
390611e0d45c5793c7066110af37c8514e6a6c54
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tensorflow::SparseFillEmptyRowsGradOp::Compute
tensorflow::SparseFillEmptyRowsGradOp::Compute( OpKernelContext * context)
['context']
void Compute(OpKernelContext* context) override { const Tensor* reverse_index_map_t; const Tensor* grad_values_t; OP_REQUIRES_OK(context, context->input("reverse_index_map", &reverse_index_map_t)); OP_REQUIRES_OK(context, context->input("grad_values", &grad_values_t)); const CPUDevice& d = context->eigen_device<CPUDevice>(); OP_REQUIRES( context, TensorShapeUtils::IsVector(reverse_index_map_t->shape()), errors::InvalidArgument("reverse_index_map must be a vector, saw: ", reverse_index_map_t->shape().DebugString())); const auto reverse_index_map = reverse_index_map_t->vec<int64>(); const auto grad_values = grad_values_t->vec<T>(); const int64 N = reverse_index_map_t->shape().dim_size(0); const int64 N_full = grad_values_t->shape().dim_size(0); Tensor* d_values_t; OP_REQUIRES_OK(context, context->allocate_output( "d_values", TensorShape({N}), &d_values_t)); auto d_values = d_values_t->vec<T>(); Tensor* d_default_value_t; OP_REQUIRES_OK(context, context->allocate_output("d_default_value", TensorShape({}), &d_default_value_t)); T& d_default_value = d_default_value_t->scalar<T>()(); d_default_value = T(); Tensor visited_t; OP_REQUIRES_OK(context, context->allocate_temp( DT_BOOL, TensorShape({N_full}), &visited_t)); auto visited = visited_t.vec<bool>(); visited.device(d) = visited.constant(false); for (int i = 0; i < N; ++i) { // Locate the index of the output of the forward prop associated // with this location in the input of the forward prop. Copy // the gradient into it. Mark it as visited. d_values(i) = grad_values(reverse_index_map(i)); visited(reverse_index_map(i)) = true; } for (int j = 0; j < N_full; ++j) { // The default value gradient gets the accumulated remainder of // the backprop values (since the default value was used to fill // in these slots in the forward calculation). if (!visited(j)) { d_default_value += grad_values(j); } } }
359
True
1
CVE-2020-15191
False
False
False
False
AV:N/AC:L/Au:N/C:N/I:N/A:P
NETWORK
LOW
NONE
NONE
NONE
PARTIAL
5.0
CVSS:3.1/AV:N/AC:L/PR:N/UI:N/S:U/C:N/I:N/A:L
NETWORK
LOW
NONE
NONE
UNCHANGED
NONE
NONE
LOW
5.3
MEDIUM
3.9
1.4
False
[{'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-q8qj-fc9q-cphr', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-q8qj-fc9q-cphr', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/22e07fb204386768e5bcbea563641ea11f96ceb8', 'name': 'https://github.com/tensorflow/tensorflow/commit/22e07fb204386768e5bcbea563641ea11f96ceb8', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-252'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:2.2.0:*:*:*:-:*:*:*', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:2.3.0:*:*:*:-:*:*:*', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': 'In Tensorflow before versions 2.2.1 and 2.3.1, if a user passes an invalid argument to `dlpack.to_dlpack` the expected validations will cause variables to bind to `nullptr` while setting a `status` variable to the error condition. However, this `status` argument is not properly checked. Hence, code following these methods will bind references to null pointers. This is undefined behavior and reported as an error if compiling with `-fsanitize=null`. The issue is patched in commit 22e07fb204386768e5bcbea563641ea11f96ceb8 and is released in TensorFlow versions 2.2.1, or 2.3.1.'}]
2021-11-18T17:18Z
2020-09-25T19:15Z
Unchecked Return Value
The software does not check the return value from a method or function, which can prevent it from detecting unexpected states and conditions.
Two common programmer assumptions are "this function call can never fail" and "it doesn't matter if this function call fails". If an attacker can force the function to fail or otherwise return a value that is not expected, then the subsequent program logic could lead to a vulnerability, because the software is not in a state that the programmer assumes. For example, if the program calls a function to drop privileges but does not check the return code to ensure that privileges were successfully dropped, then the program will continue to operate with the higher privileges.
https://cwe.mitre.org/data/definitions/252.html
0
Mihai Maruseac
2020-09-18 19:14:17-07:00
Fix multiple vulnerabilities in `tf.experimental.dlpack.to_dlpack`. We have a use after free caused by memory coruption, a segmentation fault caused by memory corruption, several memory leaks and an undefined behavior when taking the reference of a nullptr. PiperOrigin-RevId: 332568894 Change-Id: Ife0fc05e103b35325094ae5d822ee5fdea764572
22e07fb204386768e5bcbea563641ea11f96ceb8
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tensorflow::TFE_HandleToDLPack
tensorflow::TFE_HandleToDLPack( TFE_TensorHandle * h , TF_Status * status)
['h', 'status']
void* TFE_HandleToDLPack(TFE_TensorHandle* h, TF_Status* status) { const Tensor* tensor = GetTensorFromHandle(h, status); TF_DataType data_type = static_cast<TF_DataType>(tensor->dtype()); TensorReference tensor_ref(*tensor); // This will call buf_->Ref() auto* tf_dlm_tensor_ctx = new TfDlManagedTensorCtx(tensor_ref); tf_dlm_tensor_ctx->reference = tensor_ref; DLManagedTensor* dlm_tensor = &tf_dlm_tensor_ctx->tensor; dlm_tensor->manager_ctx = tf_dlm_tensor_ctx; dlm_tensor->deleter = &DLManagedTensorDeleter; dlm_tensor->dl_tensor.ctx = GetDlContext(h, status); int ndim = tensor->dims(); dlm_tensor->dl_tensor.ndim = ndim; dlm_tensor->dl_tensor.data = TFE_TensorHandleDevicePointer(h, status); dlm_tensor->dl_tensor.dtype = GetDlDataType(data_type, status); std::vector<int64_t>* shape_arr = &tf_dlm_tensor_ctx->shape; std::vector<int64_t>* stride_arr = &tf_dlm_tensor_ctx->strides; shape_arr->resize(ndim); stride_arr->resize(ndim, 1); for (int i = 0; i < ndim; i++) { (*shape_arr)[i] = tensor->dim_size(i); } for (int i = ndim - 2; i >= 0; --i) { (*stride_arr)[i] = (*shape_arr)[i + 1] * (*stride_arr)[i + 1]; } dlm_tensor->dl_tensor.shape = &(*shape_arr)[0]; // There are two ways to represent compact row-major data // 1) nullptr indicates tensor is compact and row-majored. // 2) fill in the strides array as the real case for compact row-major data. // Here we choose option 2, since some frameworks didn't handle the strides // argument properly. dlm_tensor->dl_tensor.strides = &(*stride_arr)[0]; dlm_tensor->dl_tensor.byte_offset = 0; // TF doesn't handle the strides and byte_offsets here return static_cast<void*>(dlm_tensor); }
309
True
1
CVE-2020-15192
False
False
False
False
AV:N/AC:L/Au:S/C:N/I:N/A:P
NETWORK
LOW
SINGLE
NONE
NONE
PARTIAL
4.0
CVSS:3.1/AV:N/AC:L/PR:L/UI:N/S:U/C:N/I:N/A:L
NETWORK
LOW
LOW
NONE
UNCHANGED
NONE
NONE
LOW
4.3
MEDIUM
2.8
1.4
False
[{'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-8fxw-76px-3rxv', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-8fxw-76px-3rxv', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/22e07fb204386768e5bcbea563641ea11f96ceb8', 'name': 'https://github.com/tensorflow/tensorflow/commit/22e07fb204386768e5bcbea563641ea11f96ceb8', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-20'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:2.2.0:*:*:*:-:*:*:*', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:2.3.0:*:*:*:-:*:*:*', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': 'In Tensorflow before versions 2.2.1 and 2.3.1, if a user passes a list of strings to `dlpack.to_dlpack` there is a memory leak following an expected validation failure. The issue occurs because the `status` argument during validation failures is not properly checked. Since each of the above methods can return an error status, the `status` value must be checked before continuing. The issue is patched in commit 22e07fb204386768e5bcbea563641ea11f96ceb8 and is released in TensorFlow versions 2.2.1, or 2.3.1.'}]
2021-11-18T17:18Z
2020-09-25T19:15Z
Improper Input Validation
The product receives input or data, but it does not validate or incorrectly validates that the input has the properties that are required to process the data safely and correctly.
Input validation is a frequently-used technique for checking potentially dangerous inputs in order to ensure that the inputs are safe for processing within the code, or when communicating with other components. When software does not validate input properly, an attacker is able to craft the input in a form that is not expected by the rest of the application. This will lead to parts of the system receiving unintended input, which may result in altered control flow, arbitrary control of a resource, or arbitrary code execution. Input validation is not the only technique for processing input, however. Other techniques attempt to transform potentially-dangerous input into something safe, such as filtering (CWE-790) - which attempts to remove dangerous inputs - or encoding/escaping (CWE-116), which attempts to ensure that the input is not misinterpreted when it is included in output to another component. Other techniques exist as well (see CWE-138 for more examples.) Input validation can be applied to: raw data - strings, numbers, parameters, file contents, etc. metadata - information about the raw data, such as headers or size Data can be simple or structured. Structured data can be composed of many nested layers, composed of combinations of metadata and raw data, with other simple or structured data. Many properties of raw data or metadata may need to be validated upon entry into the code, such as: specified quantities such as size, length, frequency, price, rate, number of operations, time, etc. implied or derived quantities, such as the actual size of a file instead of a specified size indexes, offsets, or positions into more complex data structures symbolic keys or other elements into hash tables, associative arrays, etc. well-formedness, i.e. syntactic correctness - compliance with expected syntax lexical token correctness - compliance with rules for what is treated as a token specified or derived type - the actual type of the input (or what the input appears to be) consistency - between individual data elements, between raw data and metadata, between references, etc. conformance to domain-specific rules, e.g. business logic equivalence - ensuring that equivalent inputs are treated the same authenticity, ownership, or other attestations about the input, e.g. a cryptographic signature to prove the source of the data Implied or derived properties of data must often be calculated or inferred by the code itself. Errors in deriving properties may be considered a contributing factor to improper input validation. Note that "input validation" has very different meanings to different people, or within different classification schemes. Caution must be used when referencing this CWE entry or mapping to it. For example, some weaknesses might involve inadvertently giving control to an attacker over an input when they should not be able to provide an input at all, but sometimes this is referred to as input validation. Finally, it is important to emphasize that the distinctions between input validation and output escaping are often blurred, and developers must be careful to understand the difference, including how input validation is not always sufficient to prevent vulnerabilities, especially when less stringent data types must be supported, such as free-form text. Consider a SQL injection scenario in which a person's last name is inserted into a query. The name "O'Reilly" would likely pass the validation step since it is a common last name in the English language. However, this valid name cannot be directly inserted into the database because it contains the "'" apostrophe character, which would need to be escaped or otherwise transformed. In this case, removing the apostrophe might reduce the risk of SQL injection, but it would produce incorrect behavior because the wrong name would be recorded.
https://cwe.mitre.org/data/definitions/20.html
0
Mihai Maruseac
2020-09-18 19:14:17-07:00
Fix multiple vulnerabilities in `tf.experimental.dlpack.to_dlpack`. We have a use after free caused by memory coruption, a segmentation fault caused by memory corruption, several memory leaks and an undefined behavior when taking the reference of a nullptr. PiperOrigin-RevId: 332568894 Change-Id: Ife0fc05e103b35325094ae5d822ee5fdea764572
22e07fb204386768e5bcbea563641ea11f96ceb8
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tensorflow::TFE_HandleToDLPack
tensorflow::TFE_HandleToDLPack( TFE_TensorHandle * h , TF_Status * status)
['h', 'status']
void* TFE_HandleToDLPack(TFE_TensorHandle* h, TF_Status* status) { const Tensor* tensor = GetTensorFromHandle(h, status); TF_DataType data_type = static_cast<TF_DataType>(tensor->dtype()); TensorReference tensor_ref(*tensor); // This will call buf_->Ref() auto* tf_dlm_tensor_ctx = new TfDlManagedTensorCtx(tensor_ref); tf_dlm_tensor_ctx->reference = tensor_ref; DLManagedTensor* dlm_tensor = &tf_dlm_tensor_ctx->tensor; dlm_tensor->manager_ctx = tf_dlm_tensor_ctx; dlm_tensor->deleter = &DLManagedTensorDeleter; dlm_tensor->dl_tensor.ctx = GetDlContext(h, status); int ndim = tensor->dims(); dlm_tensor->dl_tensor.ndim = ndim; dlm_tensor->dl_tensor.data = TFE_TensorHandleDevicePointer(h, status); dlm_tensor->dl_tensor.dtype = GetDlDataType(data_type, status); std::vector<int64_t>* shape_arr = &tf_dlm_tensor_ctx->shape; std::vector<int64_t>* stride_arr = &tf_dlm_tensor_ctx->strides; shape_arr->resize(ndim); stride_arr->resize(ndim, 1); for (int i = 0; i < ndim; i++) { (*shape_arr)[i] = tensor->dim_size(i); } for (int i = ndim - 2; i >= 0; --i) { (*stride_arr)[i] = (*shape_arr)[i + 1] * (*stride_arr)[i + 1]; } dlm_tensor->dl_tensor.shape = &(*shape_arr)[0]; // There are two ways to represent compact row-major data // 1) nullptr indicates tensor is compact and row-majored. // 2) fill in the strides array as the real case for compact row-major data. // Here we choose option 2, since some frameworks didn't handle the strides // argument properly. dlm_tensor->dl_tensor.strides = &(*stride_arr)[0]; dlm_tensor->dl_tensor.byte_offset = 0; // TF doesn't handle the strides and byte_offsets here return static_cast<void*>(dlm_tensor); }
309
True
1
CVE-2020-15193
False
False
False
False
AV:N/AC:L/Au:S/C:N/I:P/A:P
NETWORK
LOW
SINGLE
NONE
PARTIAL
PARTIAL
5.5
CVSS:3.1/AV:N/AC:L/PR:L/UI:N/S:U/C:N/I:H/A:L
NETWORK
LOW
LOW
NONE
UNCHANGED
NONE
HIGH
LOW
7.1
HIGH
2.8
4.2
False
[{'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-rjjg-hgv6-h69v', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-rjjg-hgv6-h69v', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/22e07fb204386768e5bcbea563641ea11f96ceb8', 'name': 'https://github.com/tensorflow/tensorflow/commit/22e07fb204386768e5bcbea563641ea11f96ceb8', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-908'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:2.2.0:*:*:*:-:*:*:*', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:2.3.0:*:*:*:-:*:*:*', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': 'In Tensorflow before versions 2.2.1 and 2.3.1, the implementation of `dlpack.to_dlpack` can be made to use uninitialized memory resulting in further memory corruption. This is because the pybind11 glue code assumes that the argument is a tensor. However, there is nothing stopping users from passing in a Python object instead of a tensor. The uninitialized memory address is due to a `reinterpret_cast` Since the `PyObject` is a Python object, not a TensorFlow Tensor, the cast to `EagerTensor` fails. The issue is patched in commit 22e07fb204386768e5bcbea563641ea11f96ceb8 and is released in TensorFlow versions 2.2.1, or 2.3.1.'}]
2021-11-18T17:20Z
2020-09-25T19:15Z
Use of Uninitialized Resource
The software uses or accesses a resource that has not been initialized.
When a resource has not been properly initialized, the software may behave unexpectedly. This may lead to a crash or invalid memory access, but the consequences vary depending on the type of resource and how it is used within the software.
https://cwe.mitre.org/data/definitions/908.html
0
Mihai Maruseac
2020-09-18 19:14:17-07:00
Fix multiple vulnerabilities in `tf.experimental.dlpack.to_dlpack`. We have a use after free caused by memory coruption, a segmentation fault caused by memory corruption, several memory leaks and an undefined behavior when taking the reference of a nullptr. PiperOrigin-RevId: 332568894 Change-Id: Ife0fc05e103b35325094ae5d822ee5fdea764572
22e07fb204386768e5bcbea563641ea11f96ceb8
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tensorflow::TFE_HandleToDLPack
tensorflow::TFE_HandleToDLPack( TFE_TensorHandle * h , TF_Status * status)
['h', 'status']
void* TFE_HandleToDLPack(TFE_TensorHandle* h, TF_Status* status) { const Tensor* tensor = GetTensorFromHandle(h, status); TF_DataType data_type = static_cast<TF_DataType>(tensor->dtype()); TensorReference tensor_ref(*tensor); // This will call buf_->Ref() auto* tf_dlm_tensor_ctx = new TfDlManagedTensorCtx(tensor_ref); tf_dlm_tensor_ctx->reference = tensor_ref; DLManagedTensor* dlm_tensor = &tf_dlm_tensor_ctx->tensor; dlm_tensor->manager_ctx = tf_dlm_tensor_ctx; dlm_tensor->deleter = &DLManagedTensorDeleter; dlm_tensor->dl_tensor.ctx = GetDlContext(h, status); int ndim = tensor->dims(); dlm_tensor->dl_tensor.ndim = ndim; dlm_tensor->dl_tensor.data = TFE_TensorHandleDevicePointer(h, status); dlm_tensor->dl_tensor.dtype = GetDlDataType(data_type, status); std::vector<int64_t>* shape_arr = &tf_dlm_tensor_ctx->shape; std::vector<int64_t>* stride_arr = &tf_dlm_tensor_ctx->strides; shape_arr->resize(ndim); stride_arr->resize(ndim, 1); for (int i = 0; i < ndim; i++) { (*shape_arr)[i] = tensor->dim_size(i); } for (int i = ndim - 2; i >= 0; --i) { (*stride_arr)[i] = (*shape_arr)[i + 1] * (*stride_arr)[i + 1]; } dlm_tensor->dl_tensor.shape = &(*shape_arr)[0]; // There are two ways to represent compact row-major data // 1) nullptr indicates tensor is compact and row-majored. // 2) fill in the strides array as the real case for compact row-major data. // Here we choose option 2, since some frameworks didn't handle the strides // argument properly. dlm_tensor->dl_tensor.strides = &(*stride_arr)[0]; dlm_tensor->dl_tensor.byte_offset = 0; // TF doesn't handle the strides and byte_offsets here return static_cast<void*>(dlm_tensor); }
309
True
1
CVE-2020-15191
False
False
False
False
AV:N/AC:L/Au:N/C:N/I:N/A:P
NETWORK
LOW
NONE
NONE
NONE
PARTIAL
5.0
CVSS:3.1/AV:N/AC:L/PR:N/UI:N/S:U/C:N/I:N/A:L
NETWORK
LOW
NONE
NONE
UNCHANGED
NONE
NONE
LOW
5.3
MEDIUM
3.9
1.4
False
[{'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-q8qj-fc9q-cphr', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-q8qj-fc9q-cphr', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/22e07fb204386768e5bcbea563641ea11f96ceb8', 'name': 'https://github.com/tensorflow/tensorflow/commit/22e07fb204386768e5bcbea563641ea11f96ceb8', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-252'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:2.2.0:*:*:*:-:*:*:*', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:2.3.0:*:*:*:-:*:*:*', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': 'In Tensorflow before versions 2.2.1 and 2.3.1, if a user passes an invalid argument to `dlpack.to_dlpack` the expected validations will cause variables to bind to `nullptr` while setting a `status` variable to the error condition. However, this `status` argument is not properly checked. Hence, code following these methods will bind references to null pointers. This is undefined behavior and reported as an error if compiling with `-fsanitize=null`. The issue is patched in commit 22e07fb204386768e5bcbea563641ea11f96ceb8 and is released in TensorFlow versions 2.2.1, or 2.3.1.'}]
2021-11-18T17:18Z
2020-09-25T19:15Z
Unchecked Return Value
The software does not check the return value from a method or function, which can prevent it from detecting unexpected states and conditions.
Two common programmer assumptions are "this function call can never fail" and "it doesn't matter if this function call fails". If an attacker can force the function to fail or otherwise return a value that is not expected, then the subsequent program logic could lead to a vulnerability, because the software is not in a state that the programmer assumes. For example, if the program calls a function to drop privileges but does not check the return code to ensure that privileges were successfully dropped, then the program will continue to operate with the higher privileges.
https://cwe.mitre.org/data/definitions/252.html
0
Mihai Maruseac
2020-09-18 19:14:17-07:00
Fix multiple vulnerabilities in `tf.experimental.dlpack.to_dlpack`. We have a use after free caused by memory coruption, a segmentation fault caused by memory corruption, several memory leaks and an undefined behavior when taking the reference of a nullptr. PiperOrigin-RevId: 332568894 Change-Id: Ife0fc05e103b35325094ae5d822ee5fdea764572
22e07fb204386768e5bcbea563641ea11f96ceb8
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tensorflow::PYBIND11_MODULE
tensorflow::PYBIND11_MODULE( _pywrap_tfe , m)
['_pywrap_tfe', 'm']
PYBIND11_MODULE(_pywrap_tfe, m) { py::class_<TFE_Executor> TFE_Executor_class(m, "TFE_Executor"); py::class_<TFE_ContextOptions> TFE_ContextOptions_class(m, "TFE_ContextOptions"); py::class_<TFE_MonitoringCounter0> TFE_MonitoringCounter0_class( m, "TFE_MonitoringCounter0"); py::class_<TFE_MonitoringCounter1> TFE_MonitoringCounter1_class( m, "TFE_MonitoringCounter1"); py::class_<TFE_MonitoringCounter2> TFE_MonitoringCounter2_class( m, "TFE_MonitoringCounter2"); py::class_<TFE_MonitoringStringGauge0> TFE_MonitoringStringGauge0_class( m, "TFE_MonitoringStringGauge0"); py::class_<TFE_MonitoringStringGauge1> TFE_MonitoringStringGauge1_class( m, "TFE_MonitoringStringGauge1"); py::class_<TFE_MonitoringStringGauge2> TFE_MonitoringStringGauge2_class( m, "TFE_MonitoringStringGauge2"); py::class_<TFE_MonitoringIntGauge0> TFE_MonitoringIntGauge0_class( m, "TFE_MonitoringIntGauge0"); py::class_<TFE_MonitoringIntGauge1> TFE_MonitoringIntGauge1_class( m, "TFE_MonitoringIntGauge1"); py::class_<TFE_MonitoringIntGauge2> TFE_MonitoringIntGauge2_class( m, "TFE_MonitoringIntGauge2"); py::class_<TFE_MonitoringBoolGauge0> TFE_MonitoringBoolGauge0_class( m, "TFE_MonitoringBoolGauge0"); py::class_<TFE_MonitoringBoolGauge1> TFE_MonitoringBoolGauge1_class( m, "TFE_MonitoringBoolGauge1"); py::class_<TFE_MonitoringBoolGauge2> TFE_MonitoringBoolGauge2_class( m, "TFE_MonitoringBoolGauge2"); py::class_<TFE_MonitoringCounterCell> TFE_MonitoringCounterCell_class( m, "TFE_MonitoringCounterCell"); py::class_<TFE_MonitoringIntGaugeCell> TFE_MonitoringIntGaugeCell_class( m, "TFE_MonitoringIntGaugeCell"); py::class_<TFE_MonitoringStringGaugeCell> TFE_MonitoringStringGaugeCell_class( m, "TFE_MonitoringStringGaugeCell"); py::class_<TFE_MonitoringBoolGaugeCell> TFE_MonitoringBoolGaugeCell_class( m, "TFE_MonitoringBoolGaugeCell"); py::class_<TFE_MonitoringSamplerCell> TFE_MonitoringSamplerCell_class( m, "TFE_MonitoringSamplerCell"); py::class_<TFE_MonitoringBuckets> TFE_MonitoringBuckets_class( m, "TFE_MonitoringBuckets"); py::class_<TFE_MonitoringSampler0> TFE_MonitoringSampler0_class( m, "TFE_MonitoringSampler0"); py::class_<TFE_MonitoringSampler1> TFE_MonitoringSampler1_class( m, "TFE_MonitoringSampler1"); py::class_<TFE_MonitoringSampler2> TFE_MonitoringSampler2_class( m, "TFE_MonitoringSampler2"); py::class_<TFE_CancellationManager> TFE_CancellationManager_class( m, "TFE_CancellationManager"); py::class_<TF_DeviceList> TF_DeviceList_class(m, "TF_DeviceList"); py::class_<TF_Function> TF_Function_class(m, "TF_Function"); m.def("TFE_Py_RegisterExceptionClass", [](const py::handle& e) { return tensorflow::PyoOrThrow(TFE_Py_RegisterExceptionClass(e.ptr())); }); m.def("TFE_Py_RegisterFallbackExceptionClass", [](const py::handle& e) { return tensorflow::PyoOrThrow( TFE_Py_RegisterFallbackExceptionClass(e.ptr())); }); m.def( "TFE_GetTotalMemoryUsage", [](py::handle& ctx, const char* device_name) { tensorflow::EagerContext* context = tensorflow::ContextFromInterface( reinterpret_cast<tensorflow::ImmediateExecutionContext*>( tensorflow::InputTFE_Context(ctx))); tensorflow::DeviceNameUtils::ParsedName input_device_name; if (!tensorflow::DeviceNameUtils::ParseFullOrLocalName( device_name, &input_device_name)) { tensorflow::ThrowValueError( absl::StrFormat("Failed parsing device name: '%s'", device_name) .c_str()); } std::vector<tensorflow::Device*> devices = context->local_device_mgr()->ListDevices(); tensorflow::Device* matched_device = nullptr; for (int device_idx = 0; device_idx < devices.size(); device_idx++) { tensorflow::Device* device = devices[device_idx]; if (tensorflow::DeviceNameUtils::AreCompatibleDevNames( input_device_name, device->parsed_name())) { if (device->device_type() == tensorflow::DEVICE_CPU) { tensorflow::ThrowValueError( "CPU does not support getting allocator information"); } if (matched_device != nullptr) { tensorflow::ThrowValueError( absl::StrFormat( "Multiple devices matching the provided string " "'%s': '%s' and " "'%s' ", device_name, matched_device->name(), device->name()) .c_str()); } matched_device = device; } } if (matched_device == nullptr) { tensorflow::ThrowValueError( absl::StrFormat("No matching devices found for '%s'", device_name) .c_str()); } tensorflow::AllocatorAttributes attrs; tensorflow::Allocator* allocator = matched_device->GetAllocator(attrs); if (absl::optional<tensorflow::AllocatorStats> stats = allocator->GetStats()) { return stats->bytes_in_use; } tensorflow::ThrowTypeError( absl::StrFormat("Allocator stats not available for device '%s'", matched_device->name()) .c_str()); }); // XLA Eager Logic m.def("TF_SetXlaEnableLazyCompilation", &TF_SetXlaEnableLazyCompilation); m.def("TF_SetTfXlaCpuGlobalJit", &TF_SetTfXlaCpuGlobalJit); m.def("TF_SetXlaAutoJitMode", &TF_SetXlaAutoJitMode); m.def("TF_SetXlaConstantFoldingDisabled", &TF_SetXlaConstantFoldingDisabled); m.def("TF_GetXlaConstantFoldingDisabled", &TF_GetXlaConstantFoldingDisabled); m.def("TF_SetXlaMinClusterSize", &TF_SetXlaMinClusterSize); m.def("TF_GetCompilerIr", &tensorflow::TFE_GetCompilerIr); // MLIR Logic m.def("TF_IsMlirBridgeEnabled", [] { return tensorflow::GetMlirCommonFlags()->tf_mlir_enable_mlir_bridge; }); m.def("TF_EnableMlirBridge", [](bool enabled) { tensorflow::GetMlirCommonFlags()->tf_mlir_enable_mlir_bridge = enabled; }); m.def("TF_EnableXlaDevices", [] { tensorflow::GetXlaDeviceFlags()->tf_xla_enable_xla_devices = true; }); // // TFE_Context Logic m.def( "TFE_NewContext", [](const TFE_ContextOptions* opts) { tensorflow::Safe_TF_StatusPtr status = tensorflow::make_safe(TF_NewStatus()); TFE_Context* context = TFE_NewContext(opts, status.get()); tensorflow::MaybeRaiseRegisteredFromTFStatus(status.get()); return tensorflow::PyoOrThrow(tensorflow::OutputTFE_Context(context)); }, py::return_value_policy::reference); m.def("TFE_DeleteContext", [](py::handle& o) { TFE_DeleteContext(tensorflow::InputTFE_Context(o)); }); m.def( "TFE_ContextListDevices", [](py::handle& o) { tensorflow::Safe_TF_StatusPtr status = tensorflow::make_safe(TF_NewStatus()); auto output = TFE_ContextListDevices(tensorflow::InputTFE_Context(o), status.get()); tensorflow::MaybeRaiseRegisteredFromTFStatus(status.get()); return output; }, py::return_value_policy::reference); m.def("TFE_HostAddressSpace", [](py::handle& o, TF_Buffer& buf) { TFE_HostAddressSpace(tensorflow::InputTFE_Context(o), &buf); }); m.def("TFE_ContextAddFunction", [](py::handle& ctx, TF_Function* func) { tensorflow::Safe_TF_StatusPtr status = tensorflow::make_safe(TF_NewStatus()); TFE_ContextAddFunction(tensorflow::InputTFE_Context(ctx), func, status.get()); tensorflow::MaybeRaiseRegisteredFromTFStatus(status.get()); }); m.def("TFE_ContextAddFunctionDef", [](py::handle& ctx, const char* serialized_function_def, size_t size) { tensorflow::Safe_TF_StatusPtr status = tensorflow::make_safe(TF_NewStatus()); TFE_ContextAddFunctionDef(tensorflow::InputTFE_Context(ctx), serialized_function_def, size, status.get()); tensorflow::MaybeRaiseRegisteredFromTFStatus(status.get()); }); m.def("TFE_ContextGetFunctionDef", [](py::handle& ctx, const char* function_name, TF_Buffer& buf) { tensorflow::Safe_TF_StatusPtr status = tensorflow::make_safe(TF_NewStatus()); TFE_ContextGetFunctionDef(tensorflow::InputTFE_Context(ctx), function_name, &buf, status.get()); tensorflow::MaybeRaiseRegisteredFromTFStatus(status.get()); }); m.def("TFE_ContextRemoveFunction", [](py::handle& ctx, const char* name) { tensorflow::Safe_TF_StatusPtr status = tensorflow::make_safe(TF_NewStatus()); TFE_ContextRemoveFunction(tensorflow::InputTFE_Context(ctx), name, status.get()); tensorflow::MaybeRaiseRegisteredFromTFStatus(status.get()); }); m.def("TFE_ContextHasFunction", [](py::handle& ctx, const char* name) { tensorflow::Safe_TF_StatusPtr status = tensorflow::make_safe(TF_NewStatus()); auto output = TFE_ContextHasFunction(tensorflow::InputTFE_Context(ctx), name); tensorflow::MaybeRaiseRegisteredFromTFStatus(status.get()); return output; }); m.def("TFE_ContextEnableRunMetadata", [](py::handle& ctx) { TFE_ContextEnableRunMetadata(tensorflow::InputTFE_Context(ctx)); }); m.def("TFE_ContextDisableRunMetadata", [](py::handle& ctx) { TFE_ContextEnableRunMetadata(tensorflow::InputTFE_Context(ctx)); }); m.def("TFE_ContextEnableGraphCollection", [](py::handle& ctx) { TFE_ContextEnableGraphCollection(tensorflow::InputTFE_Context(ctx)); }); m.def("TFE_ContextDisableGraphCollection", [](py::handle& ctx) { TFE_ContextDisableGraphCollection(tensorflow::InputTFE_Context(ctx)); }); m.def("TFE_ContextExportRunMetadata", [](py::handle& ctx, TF_Buffer& buf) { tensorflow::Safe_TF_StatusPtr status = tensorflow::make_safe(TF_NewStatus()); TFE_ContextExportRunMetadata(tensorflow::InputTFE_Context(ctx), &buf, status.get()); tensorflow::MaybeRaiseRegisteredFromTFStatus(status.get()); }); m.def("TFE_ContextClearCaches", [](py::handle& o) { TFE_ContextClearCaches(tensorflow::InputTFE_Context(o)); }); m.def("TFE_GetContextId", [](py::handle& ctx) { return TFE_GetContextId(tensorflow::InputTFE_Context(ctx)); }); m.def("TFE_ContextGetDevicePlacementPolicy", [](py::handle& ctx) { return TFE_ContextGetDevicePlacementPolicy( tensorflow::InputTFE_Context(ctx)); }); m.def("TFE_ContextSetThreadLocalDevicePlacementPolicy", [](py::handle& ctx, TFE_ContextDevicePlacementPolicy policy) { TFE_ContextSetThreadLocalDevicePlacementPolicy( tensorflow::InputTFE_Context(ctx), policy); }); m.def("TFE_ContextSetServerDef", [](py::handle& ctx, int keep_alive_secs, py::bytes proto) { tensorflow::Safe_TF_StatusPtr status = tensorflow::make_safe(TF_NewStatus()); tensorflow::Safe_TF_BufferPtr buf = tensorflow::make_safe(tensorflow::ProtoStringToTFBuffer(proto.ptr())); TFE_ContextSetServerDef(tensorflow::InputTFE_Context(ctx), keep_alive_secs, buf.get()->data, buf.get()->length, status.get()); tensorflow::MaybeRaiseRegisteredFromTFStatus(status.get()); }); m.def("TFE_ContextUpdateServerDef", [](py::handle& ctx, int keep_alive_secs, py::bytes proto) { tensorflow::Safe_TF_StatusPtr status = tensorflow::make_safe(TF_NewStatus()); tensorflow::Safe_TF_BufferPtr buf = tensorflow::make_safe(tensorflow::ProtoStringToTFBuffer(proto.ptr())); Py_BEGIN_ALLOW_THREADS; TFE_ContextUpdateServerDef(tensorflow::InputTFE_Context(ctx), keep_alive_secs, buf.get()->data, buf.get()->length, status.get()); Py_END_ALLOW_THREADS; tensorflow::MaybeRaiseRegisteredFromTFStatus(status.get()); }); m.def("TFE_ContextCheckAlive", [](py::handle& ctx, const char* worker_name) { tensorflow::Safe_TF_StatusPtr status = tensorflow::make_safe(TF_NewStatus()); bool output = TFE_ContextCheckAlive(tensorflow::InputTFE_Context(ctx), worker_name, status.get()); tensorflow::MaybeRaiseRegisteredFromTFStatus(status.get()); return output; }); m.def("TFE_ContextSyncExecutors", [](py::handle& ctx) { tensorflow::Safe_TF_StatusPtr status = tensorflow::make_safe(TF_NewStatus()); TFE_ContextAsyncWait(tensorflow::InputTFE_Context(ctx), status.get()); tensorflow::MaybeRaiseRegisteredFromTFStatus(status.get()); }); m.def("TFE_ContextClearExecutors", [](py::handle& ctx) { tensorflow::Safe_TF_StatusPtr status = tensorflow::make_safe(TF_NewStatus()); TFE_ContextAsyncWait(tensorflow::InputTFE_Context(ctx), status.get()); // NOTE: different from TFE_ContextSyncExecutors that raises potential // errors, deliberately ignore executor statuses in cleanup. }); m.def("TFE_ContextSetSoftDevicePlacement", [](py::handle& ctx, bool enable) { tensorflow::Safe_TF_StatusPtr status = tensorflow::make_safe(TF_NewStatus()); TFE_ContextSetSoftDevicePlacement(tensorflow::InputTFE_Context(ctx), enable, status.get()); }); m.def("TFE_ContextSetLogDevicePlacement", [](py::handle& ctx, bool enable) { tensorflow::Safe_TF_StatusPtr status = tensorflow::make_safe(TF_NewStatus()); TFE_ContextSetSoftDevicePlacement(tensorflow::InputTFE_Context(ctx), enable, status.get()); }); // TFE_Executor logic m.def( "TFE_NewExecutor", [](const bool is_async) { TFE_Executor* exc = TFE_NewExecutor(is_async); return exc; }, py::return_value_policy::reference); m.def("TFE_DeleteExecutor", &TFE_DeleteExecutor); m.def("TFE_ExecutorIsAsync", &TFE_ExecutorIsAsync); m.def("TFE_ExecutorWaitForAllPendingNodes", [](TFE_Executor& exc) { tensorflow::Safe_TF_StatusPtr status = tensorflow::make_safe(TF_NewStatus()); // NOTE: release Python GIL for pending PyFunc ops to be executed properly. Py_BEGIN_ALLOW_THREADS; TFE_ExecutorWaitForAllPendingNodes(&exc, status.get()); Py_END_ALLOW_THREADS; tensorflow::MaybeRaiseRegisteredFromTFStatus(status.get()); }); m.def("TFE_ExecutorClearError", &TFE_ExecutorClearError); m.def("TFE_ContextSetExecutorForThread", [](py::handle& ctx, TFE_Executor& exc) { TFE_ContextSetExecutorForThread(tensorflow::InputTFE_Context(ctx), &exc); }); m.def( "TFE_ContextGetExecutorForThread", [](py::handle& o) { return TFE_ContextGetExecutorForThread(tensorflow::InputTFE_Context(o)); }, py::return_value_policy::reference); m.def("TFE_OpNameGetAttrType", [](py::handle& ctx, const char* op_or_function_name, const char* attr_name) { int temp = 0; unsigned char* is_list = reinterpret_cast<unsigned char*>(&temp); tensorflow::Safe_TF_StatusPtr status = tensorflow::make_safe(TF_NewStatus()); auto output = TFE_OpNameGetAttrType(tensorflow::InputTFE_Context(ctx), op_or_function_name, attr_name, is_list, status.get()); tensorflow::MaybeRaiseRegisteredFromTFStatus(status.get()); #if PY_MAJOR_VERSION < 3 PyObject* output_pyo = PyInt_FromLong(output); #else PyObject* output_pyo = PyLong_FromLong(output); #endif if (*is_list == 1) { PyObject* list = PyList_New(1); PyList_SetItem(list, 0, output_pyo); return tensorflow::PyoOrThrow(list); } return tensorflow::PyoOrThrow(output_pyo); }); m.def("TFE_Py_InitEagerTensor", [](const py::handle& o) { return tensorflow::PyoOrThrow(TFE_Py_InitEagerTensor(o.ptr())); }); m.def("TFE_Py_PackEagerTensors", [](const py::handle& context, const py::handle& handles) { return tensorflow::TFE_Py_PackEagerTensors_wrapper(context, handles); }); m.def("TFE_Py_SetEagerTensorProfiler", &TFE_Py_SetEagerTensorProfiler); m.def("TFE_Py_RegisterJVPFunction", [](const py::handle& o) { return tensorflow::PyoOrThrow(TFE_Py_RegisterJVPFunction(o.ptr())); }); m.def("TFE_Py_RegisterGradientFunction", [](const py::handle& o) { return tensorflow::PyoOrThrow(TFE_Py_RegisterGradientFunction(o.ptr())); }); m.def("TFE_Py_Execute", [](const py::handle& context, const char* device_name, const char* op_name, const py::handle& inputs, const py::handle& attrs, const py::handle& num_outputs) { return tensorflow::TFE_Py_ExecuteCancelable_wrapper( context, device_name, op_name, inputs, attrs.ptr(), nullptr, num_outputs); }); m.def( "TFE_Py_ExecuteCancelable", [](const py::handle& context, const char* device_name, const char* op_name, const py::handle& inputs, const py::handle& attrs, TFE_CancellationManager& cancellation_manager, const py::handle& num_outputs) { return tensorflow::TFE_Py_ExecuteCancelable_wrapper( context, device_name, op_name, inputs, attrs.ptr(), &cancellation_manager, num_outputs); }); m.def("TFE_Py_FastPathExecute", [](const py::args args) { // TFE_Py_FastPathExecute requires error checking prior to returning. return tensorflow::PyoOrThrow(TFE_Py_FastPathExecute_C(args.ptr())); }); m.def("TFE_Py_RecordGradient", [](const py::handle& op_name, const py::handle& inputs, const py::handle& attrs, const py::handle& results, const py::handle& forward_pass_name_scope) { return tensorflow::PyoOrThrow(TFE_Py_RecordGradient( op_name.ptr(), inputs.ptr(), attrs.ptr(), results.ptr(), forward_pass_name_scope.ptr())); }); m.def("TFE_Py_UID", []() { return tensorflow::PyoOrThrow(TFE_Py_UID()); }); // TFE_Py_Tape Logic m.def("TFE_Py_TapeSetNew", [](const py::handle& persistent, const py::handle& watch_accessed_variables) { return tensorflow::PyoOrThrow( TFE_Py_TapeSetNew(persistent.ptr(), watch_accessed_variables.ptr())); }); m.def("TFE_Py_TapeSetAdd", [](const py::handle& tape) { TFE_Py_TapeSetAdd(tape.ptr()); }); m.def("TFE_Py_TapeSetRemove", [](const py::handle& tape) { TFE_Py_TapeSetRemove(tape.ptr()); }); m.def("TFE_Py_TapeSetStopOnThread", &TFE_Py_TapeSetStopOnThread); m.def("TFE_Py_TapeSetRestartOnThread", &TFE_Py_TapeSetRestartOnThread); m.def("TFE_Py_TapeSetIsStopped", []() { return tensorflow::PyoOrThrow(TFE_Py_TapeSetIsStopped()); }); m.def("TFE_Py_TapeSetIsEmpty", []() { return tensorflow::PyoOrThrow(TFE_Py_TapeSetIsEmpty()); }); m.def("TFE_Py_TapeSetShouldRecordBackprop", [](const py::handle& tensors) { return tensorflow::PyoOrThrow( TFE_Py_TapeSetShouldRecordBackprop(tensors.ptr())); }); m.def("TFE_Py_TapeSetPossibleGradientTypes", [](const py::handle& tensors) { return tensorflow::PyoOrThrow( TFE_Py_TapeSetPossibleGradientTypes(tensors.ptr())); }); m.def("TFE_Py_TapeSetDeleteTrace", &TFE_Py_TapeSetDeleteTrace); m.def("TFE_Py_TapeSetRecordOperation", [](const py::handle& op_type, const py::handle& output_tensors, const py::handle& input_tensors, const py::handle& backward_function, const py::handle& forward_function) { return tensorflow::PyoOrThrow(TFE_Py_TapeSetRecordOperation( op_type.ptr(), output_tensors.ptr(), input_tensors.ptr(), backward_function.ptr(), forward_function.ptr())); }); m.def( "TFE_Py_TapeSetRecordOperationBackprop", [](const py::handle& op_type, const py::handle& output_tensors, const py::handle& input_tensors, const py::handle& backward_function) { return tensorflow::PyoOrThrow(TFE_Py_TapeSetRecordOperationBackprop( op_type.ptr(), output_tensors.ptr(), input_tensors.ptr(), backward_function.ptr())); }); m.def( "TFE_Py_TapeSetRecordOperationForwardprop", [](const py::handle& op_type, const py::handle& output_tensors, const py::handle& input_tensors, const py::handle& backward_function, const py::handle& forwardprop_output_indices) { return tensorflow::PyoOrThrow(TFE_Py_TapeSetRecordOperationForwardprop( op_type.ptr(), output_tensors.ptr(), input_tensors.ptr(), backward_function.ptr(), forwardprop_output_indices.ptr())); }); m.def("TFE_Py_TapeGradient", [](const py::handle& tape, const py::handle& target, const py::handle& sources, const py::handle& output_gradients, const py::handle& sources_raw, const py::handle& unconnected_gradients) { tensorflow::Safe_TF_StatusPtr status = tensorflow::make_safe(TF_NewStatus()); PyObject* output = TFE_Py_TapeGradient( tape.ptr(), target.ptr(), sources.ptr(), output_gradients.ptr(), sources_raw.ptr(), unconnected_gradients.ptr(), status.get()); tensorflow::MaybeRaiseRegisteredFromTFStatus(status.get()); return tensorflow::PyoOrThrow(output); }); m.def("TFE_Py_TapeVariableAccessed", [](const py::handle& variable) { TFE_Py_TapeVariableAccessed(variable.ptr()); }); m.def("TFE_Py_TapeWatch", [](const py::handle& tape, const py::handle& tensor) { TFE_Py_TapeWatch(tape.ptr(), tensor.ptr()); }); m.def("TFE_Py_TapeWatchVariable", [](const py::handle& tape, const py::handle& variable) { TFE_Py_TapeWatchVariable(tape.ptr(), variable.ptr()); }); m.def("TFE_Py_TapeWatchedVariables", [](const py::handle& tape) { return tensorflow::PyoOrThrow(TFE_Py_TapeWatchedVariables(tape.ptr())); }); // TFE_Py_VariableWatcher logic. m.def("TFE_Py_VariableWatcherNew", []() { return tensorflow::PyoOrThrow(TFE_Py_VariableWatcherNew()); }); m.def("TFE_Py_VariableWatcherRemove", [](const py::handle& variable_watcher) { TFE_Py_VariableWatcherRemove(variable_watcher.ptr()); }); m.def("TFE_Py_VariableWatcherVariableAccessed", [](const py::handle& variable) { TFE_Py_VariableWatcherVariableAccessed(variable.ptr()); }); m.def("TFE_Py_VariableWatcherWatchedVariables", [](const py::handle& variable_watcher) { return tensorflow::PyoOrThrow( TFE_Py_VariableWatcherWatchedVariables(variable_watcher.ptr())); }); // TFE_Py_ForwardAccumulator logic. m.def("TFE_Py_ForwardAccumulatorNew", [](bool use_batch) { return tensorflow::PyoOrThrow(TFE_Py_ForwardAccumulatorNew(use_batch)); }); m.def("TFE_Py_ForwardAccumulatorSetAdd", [](const py::handle& accumulator) { return tensorflow::PyoOrThrow( TFE_Py_ForwardAccumulatorSetAdd(accumulator.ptr())); }); m.def("TFE_Py_ForwardAccumulatorSetRemove", [](const py::handle& accumulator) { TFE_Py_ForwardAccumulatorSetRemove(accumulator.ptr()); }); m.def("TFE_Py_ForwardAccumulatorWatch", [](const py::handle& accumulator, const py::handle& tensor, const py::handle& tangent) { TFE_Py_ForwardAccumulatorWatch(accumulator.ptr(), tensor.ptr(), tangent.ptr()); }); m.def("TFE_Py_ForwardAccumulatorJVP", [](const py::handle& accumulator, const py::handle& tensor) { return tensorflow::PyoOrThrow( TFE_Py_ForwardAccumulatorJVP(accumulator.ptr(), tensor.ptr())); }); m.def("TFE_Py_ForwardAccumulatorPushState", []() { return tensorflow::PyoOrThrow(TFE_Py_ForwardAccumulatorPushState()); }); m.def("TFE_Py_ForwardAccumulatorPopState", []() { return tensorflow::PyoOrThrow(TFE_Py_ForwardAccumulatorPopState()); }); m.def("TFE_Py_PackJVPs", [](const py::handle& tensors) { return tensorflow::PyoOrThrow(TFE_Py_PackJVPs(tensors.ptr())); }); // TFE_ContextOptions Logic m.def("TFE_NewContextOptions", &TFE_NewContextOptions, py::return_value_policy::reference); m.def("TFE_ContextOptionsSetConfig", [](TFE_ContextOptions* options, py::bytes proto) { tensorflow::Safe_TF_StatusPtr status = tensorflow::make_safe(TF_NewStatus()); tensorflow::Safe_TF_BufferPtr buf = tensorflow::make_safe(tensorflow::ProtoStringToTFBuffer(proto.ptr())); TFE_ContextOptionsSetConfig(options, buf.get()->data, buf.get()->length, status.get()); tensorflow::MaybeRaiseRegisteredFromTFStatus(status.get()); }); m.def("TFE_ContextOptionsSetDevicePlacementPolicy", &TFE_ContextOptionsSetDevicePlacementPolicy); m.def("TFE_ContextOptionsSetLazyRemoteInputsCopy", &TFE_ContextOptionsSetLazyRemoteInputsCopy); m.def("TFE_ContextOptionsSetTfrt", &TFE_ContextOptionsSetTfrt); m.def("TFE_ContextOptionsSetAsync", &TFE_ContextOptionsSetAsync); m.def("TFE_DeleteContextOptions", &TFE_DeleteContextOptions, py::return_value_policy::reference); // TFE_Py_TensorShape Logic m.def("TFE_Py_TensorShapeSlice", [](const py::handle& tensors, int slice_dim) { return tensorflow::PyoOrThrow( TFE_Py_TensorShapeSlice(tensors.ptr(), slice_dim)); }); m.def("TFE_Py_TensorShapeOnDevice", [](const py::handle& tensors, int slice_dim) { return tensorflow::PyoOrThrow(TFE_Py_TensorShapeOnDevice(tensors.ptr())); }); m.def("TFE_Py_EnableInteractivePythonLogging", &TFE_Py_EnableInteractivePythonLogging); // Additional Context Logic m.def("TFE_Py_SetEagerContext", [](const py::handle& o) { return tensorflow::PyoOrThrow(TFE_Py_SetEagerContext(o.ptr())); }); m.def("TFE_ContextStartStep", [](py::handle& o) { TFE_ContextStartStep(tensorflow::InputTFE_Context(o.ptr())); }); m.def("TFE_ContextEndStep", [](py::handle& o) { TFE_ContextEndStep(tensorflow::InputTFE_Context(o.ptr())); }); m.def("TFE_Py_RegisterVSpace", [](const py::handle& o) { return tensorflow::PyoOrThrow(TFE_Py_RegisterVSpace(o.ptr())); }); m.def("TFE_Py_EncodeArg", [](const py::handle& o, bool include_tensor_ranks_only) { return tensorflow::PyoOrThrow( TFE_Py_EncodeArg(o.ptr(), include_tensor_ranks_only)); }); m.def("TFE_EnableCollectiveOps", [](const py::handle& ctx, py::bytes proto) { tensorflow::Safe_TF_StatusPtr status = tensorflow::make_safe(TF_NewStatus()); tensorflow::Safe_TF_BufferPtr buf = tensorflow::make_safe(tensorflow::ProtoStringToTFBuffer(proto.ptr())); TFE_EnableCollectiveOps(tensorflow::InputTFE_Context(ctx), buf.get()->data, buf.get()->length, status.get()); tensorflow::MaybeRaiseRegisteredFromTFStatus(status.get()); }); m.def("TFE_AbortCollectiveOps", [](const py::handle& ctx, int code, const char* message) { tensorflow::Safe_TF_StatusPtr status = tensorflow::make_safe(TF_NewStatus()); TF_SetStatus(status.get(), static_cast<TF_Code>(code), message); TFE_AbortCollectiveOps(tensorflow::InputTFE_Context(ctx), status.get()); }); m.def("TFE_CollectiveOpsCheckPeerHealth", [](const py::handle& ctx, const char* task) { tensorflow::Safe_TF_StatusPtr status = tensorflow::make_safe(TF_NewStatus()); TFE_CollectiveOpsCheckPeerHealth(tensorflow::InputTFE_Context(ctx), task, status.get()); tensorflow::MaybeRaiseRegisteredFromTFStatus(status.get()); }); m.def("TF_ListPhysicalDevices", &tensorflow::TF_ListPhysicalDevices); m.def("TF_GetDeviceDetails", &tensorflow::TF_GetDeviceDetails); m.def("TF_DeleteDeviceList", &TF_DeleteDeviceList, py::return_value_policy::reference); m.def("TF_DeviceListCount", &TF_DeviceListCount); m.def("TF_DeviceListName", [](const TF_DeviceList* list, int index) { tensorflow::Safe_TF_StatusPtr status = tensorflow::make_safe(TF_NewStatus()); auto output = TF_DeviceListName(list, index, status.get()); tensorflow::MaybeRaiseRegisteredFromTFStatus(status.get()); return output; }); m.def("TF_DeviceListType", [](const TF_DeviceList* list, int index) { tensorflow::Safe_TF_StatusPtr status = tensorflow::make_safe(TF_NewStatus()); auto output = TF_DeviceListType(list, index, status.get()); tensorflow::MaybeRaiseRegisteredFromTFStatus(status.get()); return output; }); m.def("TF_PickUnusedPortOrDie", &TF_PickUnusedPortOrDie); // TFE_MonitoringCounter Logic m.def("TFE_MonitoringCounterCellIncrementBy", &TFE_MonitoringCounterCellIncrementBy); m.def("TFE_MonitoringCounterCellValue", &TFE_MonitoringCounterCellValue); m.def( "TFE_MonitoringNewCounter0", [](const char* name, const char* description) { tensorflow::Safe_TF_StatusPtr status = tensorflow::make_safe(TF_NewStatus()); auto output = TFE_MonitoringNewCounter0(name, status.get(), description); tensorflow::MaybeRaiseRegisteredFromTFStatus(status.get()); return output; }, py::return_value_policy::reference); m.def("TFE_MonitoringDeleteCounter0", &TFE_MonitoringDeleteCounter0, py::return_value_policy::reference); m.def("TFE_MonitoringGetCellCounter0", &TFE_MonitoringGetCellCounter0, py::return_value_policy::reference); m.def( "TFE_MonitoringNewCounter1", [](const char* name, const char* description, const char* label1) { tensorflow::Safe_TF_StatusPtr status = tensorflow::make_safe(TF_NewStatus()); auto output = TFE_MonitoringNewCounter1(name, status.get(), description, label1); tensorflow::MaybeRaiseRegisteredFromTFStatus(status.get()); return output; }, py::return_value_policy::reference); m.def("TFE_MonitoringDeleteCounter1", &TFE_MonitoringDeleteCounter1, py::return_value_policy::reference); m.def("TFE_MonitoringGetCellCounter1", &TFE_MonitoringGetCellCounter1, py::return_value_policy::reference); m.def( "TFE_MonitoringNewCounter2", [](const char* name, const char* description, const char* label1, const char* label2) { tensorflow::Safe_TF_StatusPtr status = tensorflow::make_safe(TF_NewStatus()); auto output = TFE_MonitoringNewCounter2(name, status.get(), description, label1, label2); tensorflow::MaybeRaiseRegisteredFromTFStatus(status.get()); return output; }, py::return_value_policy::reference); m.def("TFE_MonitoringDeleteCounter2", &TFE_MonitoringDeleteCounter2, py::return_value_policy::reference); m.def("TFE_MonitoringGetCellCounter2", &TFE_MonitoringGetCellCounter2, py::return_value_policy::reference); // TFE_MonitoringIntGauge Logic m.def("TFE_MonitoringIntGaugeCellSet", &TFE_MonitoringIntGaugeCellSet); m.def("TFE_MonitoringIntGaugeCellValue", &TFE_MonitoringIntGaugeCellValue); m.def( "TFE_MonitoringNewIntGauge0", [](const char* name, const char* description) { tensorflow::Safe_TF_StatusPtr status = tensorflow::make_safe(TF_NewStatus()); auto output = TFE_MonitoringNewIntGauge0(name, status.get(), description); tensorflow::MaybeRaiseRegisteredFromTFStatus(status.get()); return output; }, py::return_value_policy::reference); m.def("TFE_MonitoringDeleteIntGauge0", &TFE_MonitoringDeleteIntGauge0, py::return_value_policy::reference); m.def("TFE_MonitoringGetCellIntGauge0", &TFE_MonitoringGetCellIntGauge0, py::return_value_policy::reference); m.def( "TFE_MonitoringNewIntGauge1", [](const char* name, const char* description, const char* label1) { tensorflow::Safe_TF_StatusPtr status = tensorflow::make_safe(TF_NewStatus()); auto output = TFE_MonitoringNewIntGauge1(name, status.get(), description, label1); tensorflow::MaybeRaiseRegisteredFromTFStatus(status.get()); return output; }, py::return_value_policy::reference); m.def("TFE_MonitoringDeleteIntGauge1", &TFE_MonitoringDeleteIntGauge1, py::return_value_policy::reference); m.def("TFE_MonitoringGetCellIntGauge1", &TFE_MonitoringGetCellIntGauge1, py::return_value_policy::reference); m.def( "TFE_MonitoringNewIntGauge2", [](const char* name, const char* description, const char* label1, const char* label2) { tensorflow::Safe_TF_StatusPtr status = tensorflow::make_safe(TF_NewStatus()); auto output = TFE_MonitoringNewIntGauge2(name, status.get(), description, label1, label2); tensorflow::MaybeRaiseRegisteredFromTFStatus(status.get()); return output; }, py::return_value_policy::reference); m.def("TFE_MonitoringDeleteIntGauge2", &TFE_MonitoringDeleteIntGauge2, py::return_value_policy::reference); m.def("TFE_MonitoringGetCellIntGauge2", &TFE_MonitoringGetCellIntGauge2, py::return_value_policy::reference); m.def("TFE_MonitoringStringGaugeCellSet", &TFE_MonitoringStringGaugeCellSet); m.def("TFE_MonitoringStringGaugeCellValue", &TFE_MonitoringStringGaugeCellValue); m.def( "TFE_MonitoringNewStringGauge0", [](const char* name, const char* description) { tensorflow::Safe_TF_StatusPtr status = tensorflow::make_safe(TF_NewStatus()); auto output = TFE_MonitoringNewStringGauge0(name, status.get(), description); tensorflow::MaybeRaiseRegisteredFromTFStatus(status.get()); return output; }, py::return_value_policy::reference); // TFE_MonitoringStringGauge Logic m.def("TFE_MonitoringDeleteStringGauge0", &TFE_MonitoringDeleteStringGauge0); m.def("TFE_MonitoringGetCellStringGauge0", &TFE_MonitoringGetCellStringGauge0, py::return_value_policy::reference); m.def( "TFE_MonitoringNewStringGauge1", [](const char* name, const char* description, const char* label1) { tensorflow::Safe_TF_StatusPtr status = tensorflow::make_safe(TF_NewStatus()); auto output = TFE_MonitoringNewStringGauge1(name, status.get(), description, label1); tensorflow::MaybeRaiseRegisteredFromTFStatus(status.get()); return output; }, py::return_value_policy::reference); m.def("TFE_MonitoringDeleteStringGauge1", &TFE_MonitoringDeleteStringGauge1); m.def("TFE_MonitoringGetCellStringGauge1", &TFE_MonitoringGetCellStringGauge1, py::return_value_policy::reference); m.def( "TFE_MonitoringNewStringGauge2", [](const char* name, const char* description, const char* label1, const char* label2) { tensorflow::Safe_TF_StatusPtr status = tensorflow::make_safe(TF_NewStatus()); auto output = TFE_MonitoringNewStringGauge2( name, status.get(), description, label1, label2); tensorflow::MaybeRaiseRegisteredFromTFStatus(status.get()); return output; }, py::return_value_policy::reference); m.def("TFE_MonitoringDeleteStringGauge2", &TFE_MonitoringDeleteStringGauge2); m.def("TFE_MonitoringGetCellStringGauge2", &TFE_MonitoringGetCellStringGauge2, py::return_value_policy::reference); // TFE_MonitoringBoolGauge Logic m.def("TFE_MonitoringBoolGaugeCellSet", &TFE_MonitoringBoolGaugeCellSet); m.def("TFE_MonitoringBoolGaugeCellValue", &TFE_MonitoringBoolGaugeCellValue); m.def( "TFE_MonitoringNewBoolGauge0", [](const char* name, const char* description) { tensorflow::Safe_TF_StatusPtr status = tensorflow::make_safe(TF_NewStatus()); auto output = TFE_MonitoringNewBoolGauge0(name, status.get(), description); tensorflow::MaybeRaiseRegisteredFromTFStatus(status.get()); return output; }, py::return_value_policy::reference); m.def("TFE_MonitoringDeleteBoolGauge0", &TFE_MonitoringDeleteBoolGauge0, py::return_value_policy::reference); m.def("TFE_MonitoringGetCellBoolGauge0", &TFE_MonitoringGetCellBoolGauge0, py::return_value_policy::reference); m.def( "TFE_MonitoringNewBoolGauge1", [](const char* name, const char* description, const char* label1) { tensorflow::Safe_TF_StatusPtr status = tensorflow::make_safe(TF_NewStatus()); auto output = TFE_MonitoringNewBoolGauge1(name, status.get(), description, label1); tensorflow::MaybeRaiseRegisteredFromTFStatus(status.get()); return output; }, py::return_value_policy::reference); m.def("TFE_MonitoringDeleteBoolGauge1", &TFE_MonitoringDeleteBoolGauge1, py::return_value_policy::reference); m.def("TFE_MonitoringGetCellBoolGauge1", &TFE_MonitoringGetCellBoolGauge1, py::return_value_policy::reference); m.def( "TFE_MonitoringNewBoolGauge2", [](const char* name, const char* description, const char* label1, const char* label2) { tensorflow::Safe_TF_StatusPtr status = tensorflow::make_safe(TF_NewStatus()); auto output = TFE_MonitoringNewBoolGauge2(name, status.get(), description, label1, label2); tensorflow::MaybeRaiseRegisteredFromTFStatus(status.get()); return output; }, py::return_value_policy::reference); m.def("TFE_MonitoringDeleteBoolGauge2", &TFE_MonitoringDeleteBoolGauge2, py::return_value_policy::reference); m.def("TFE_MonitoringGetCellBoolGauge2", &TFE_MonitoringGetCellBoolGauge2, py::return_value_policy::reference); // TFE_MonitoringSampler Logic m.def("TFE_MonitoringSamplerCellAdd", &TFE_MonitoringSamplerCellAdd); m.def("TFE_MonitoringSamplerCellValue", &TFE_MonitoringSamplerCellValue); m.def("TFE_MonitoringNewExponentialBuckets", &TFE_MonitoringNewExponentialBuckets, py::return_value_policy::reference); m.def("TFE_MonitoringDeleteBuckets", &TFE_MonitoringDeleteBuckets, py::return_value_policy::reference); m.def( "TFE_MonitoringNewSampler0", [](const char* name, TFE_MonitoringBuckets* buckets, const char* description) { tensorflow::Safe_TF_StatusPtr status = tensorflow::make_safe(TF_NewStatus()); auto output = TFE_MonitoringNewSampler0(name, buckets, status.get(), description); tensorflow::MaybeRaiseRegisteredFromTFStatus(status.get()); return output; }, py::return_value_policy::reference); m.def("TFE_MonitoringDeleteSampler0", &TFE_MonitoringDeleteSampler0, py::return_value_policy::reference); m.def("TFE_MonitoringGetCellSampler0", &TFE_MonitoringGetCellSampler0, py::return_value_policy::reference); m.def( "TFE_MonitoringNewSampler1", [](const char* name, TFE_MonitoringBuckets* buckets, const char* description, const char* label1) { tensorflow::Safe_TF_StatusPtr status = tensorflow::make_safe(TF_NewStatus()); auto output = TFE_MonitoringNewSampler1(name, buckets, status.get(), description, label1); tensorflow::MaybeRaiseRegisteredFromTFStatus(status.get()); return output; }, py::return_value_policy::reference); m.def("TFE_MonitoringDeleteSampler1", &TFE_MonitoringDeleteSampler1, py::return_value_policy::reference); m.def("TFE_MonitoringGetCellSampler1", &TFE_MonitoringGetCellSampler1, py::return_value_policy::reference); m.def( "TFE_MonitoringNewSampler2", [](const char* name, TFE_MonitoringBuckets* buckets, const char* description, const char* label1, const char* label2) { tensorflow::Safe_TF_StatusPtr status = tensorflow::make_safe(TF_NewStatus()); auto output = TFE_MonitoringNewSampler2(name, buckets, status.get(), description, label1, label2); tensorflow::MaybeRaiseRegisteredFromTFStatus(status.get()); return output; }, py::return_value_policy::reference); m.def("TFE_MonitoringDeleteSampler2", &TFE_MonitoringDeleteSampler2, py::return_value_policy::reference); m.def("TFE_MonitoringGetCellSampler2", &TFE_MonitoringGetCellSampler2, py::return_value_policy::reference); // TFE_CancellationManager Logic m.def("TFE_NewCancellationManager", &TFE_NewCancellationManager, py::return_value_policy::reference); m.def("TFE_CancellationManagerIsCancelled", &TFE_CancellationManagerIsCancelled); m.def("TFE_CancellationManagerStartCancel", &TFE_CancellationManagerStartCancel); m.def("TFE_DeleteCancellationManager", &TFE_DeleteCancellationManager, py::return_value_policy::reference); m.def("TFE_ClearScalarCache", &tensorflow::TFE_ClearScalarCache); // Util buffer helper functions m.def("TF_NewBufferFromString", &TF_NewBufferFromString, py::return_value_policy::reference); // DLPack functions m.def("TFE_ToDlpackCapsule", [](py::handle& o) { PyObject* eager_tensor_pyobject_ptr = o.ptr(); TFE_TensorHandle* thandle = EagerTensor_Handle(eager_tensor_pyobject_ptr); tensorflow::Safe_TF_StatusPtr status = tensorflow::make_safe(TF_NewStatus()); void* dlm_ptr = tensorflow::TFE_HandleToDLPack(thandle, status.get()); tensorflow::MaybeRaiseRegisteredFromTFStatus(status.get()); py::capsule capsule( dlm_ptr, tensorflow::kDlTensorCapsuleName, [](PyObject* capsule) { if (PyCapsule_IsValid(capsule, tensorflow::kDlTensorCapsuleName)) { void* dlm_rptr = PyCapsule_GetPointer(capsule, tensorflow::kDlTensorCapsuleName); if (dlm_rptr) { tensorflow::TFE_CallDLManagedTensorDeleter(dlm_rptr); PyCapsule_SetDestructor(capsule, nullptr); } } }); return capsule; }); m.def("TFE_FromDlpackCapsule", [](const py::capsule& pycapsule, const py::handle& context) { tensorflow::Safe_TF_StatusPtr status = tensorflow::make_safe(TF_NewStatus()); if (absl::string_view(pycapsule.name()) != tensorflow::kDlTensorCapsuleName) { status->status = tensorflow::errors::InvalidArgument( "DLPack tensor must be a capsule with name \"dltensor\", got \"%s\". " "Note that a DLPack tensor may be consumed at most once.", absl::string_view(pycapsule.name())); tensorflow::MaybeRaiseRegisteredFromTFStatus(status.get()); } TFE_TensorHandle* thandle = tensorflow::TFE_HandleFromDLPack( pycapsule, status.get(), tensorflow::InputTFE_Context(context)); tensorflow::MaybeRaiseRegisteredFromTFStatus(status.get()); PyCapsule_SetName(pycapsule.ptr(), "used_dltensor"); PyCapsule_SetDestructor(pycapsule.ptr(), nullptr); PyObject* pyhandle = EagerTensorFromHandle(thandle); return tensorflow::PyoOrThrow(pyhandle); }); m.def("TFE_Py_RegisterCustomDevice", [](const py::handle& context, const py::capsule& device, const char* device_name, const py::capsule& device_info) { tensorflow::Safe_TF_StatusPtr status = tensorflow::make_safe(TF_NewStatus()); if (absl::string_view(device.name()) != "TFE_CustomDevice") { status->status = tensorflow::errors::InvalidArgument( "Expected a capsule named 'TFE_CustomDevice' for the `device` " "argument, got ", absl::string_view(device.name())); tensorflow::MaybeRaiseRegisteredFromTFStatus(status.get()); } if (absl::string_view(device_info.name()) != "TFE_CustomDevice_DeviceInfo") { status->status = tensorflow::errors::InvalidArgument( "Expected a capsule named 'TFE_CustomDevice_DeviceInfo' for " "the `device_info` argument, got ", absl::string_view(device_info.name())); tensorflow::MaybeRaiseRegisteredFromTFStatus(status.get()); } // TFE_RegisterCustomDevice takes ownership PyCapsule_SetDestructor(device_info.ptr(), nullptr); TFE_RegisterCustomDevice( tensorflow::InputTFE_Context(context), *reinterpret_cast<TFE_CustomDevice*>( PyCapsule_GetPointer(device.ptr(), "TFE_CustomDevice")), device_name, PyCapsule_GetPointer(device_info.ptr(), "TFE_CustomDevice_DeviceInfo"), status.get()); tensorflow::MaybeRaiseRegisteredFromTFStatus(status.get()); }); py::class_<EagerContextThreadLocalDataWrapper>(m, "EagerContextThreadLocalData") .def(py::init<py::handle, py::handle, py::handle>(), py::arg("py_eager_context"), py::arg("is_eager"), py::arg("device_spec")) .def_property("is_eager", &EagerContextThreadLocalDataWrapper::get_is_eager, &EagerContextThreadLocalDataWrapper::set_is_eager) .def_property( "invoking_op_callbacks", &EagerContextThreadLocalDataWrapper::get_invoking_op_callbacks, &EagerContextThreadLocalDataWrapper::set_invoking_op_callbacks) .def_property("device_name", &EagerContextThreadLocalDataWrapper::get_device_name, &EagerContextThreadLocalDataWrapper::set_device_name) .def_property("scope_name", &EagerContextThreadLocalDataWrapper::get_scope_name, &EagerContextThreadLocalDataWrapper::set_scope_name) .def_property("device_spec", &EagerContextThreadLocalDataWrapper::get_device_spec, &EagerContextThreadLocalDataWrapper::set_device_spec) .def_property( "function_call_options", &EagerContextThreadLocalDataWrapper::get_function_call_options, &EagerContextThreadLocalDataWrapper::set_function_call_options) .def_property("executor", &EagerContextThreadLocalDataWrapper::get_executor, &EagerContextThreadLocalDataWrapper::set_executor) .def_property("op_callbacks", &EagerContextThreadLocalDataWrapper::get_op_callbacks, &EagerContextThreadLocalDataWrapper::set_op_callbacks); // C API Enum py::enum_<TFE_ContextDevicePlacementPolicy>( m, "TFE_ContextDevicePlacementPolicy") .value("TFE_DEVICE_PLACEMENT_EXPLICIT", TFE_DEVICE_PLACEMENT_EXPLICIT) .value("TFE_DEVICE_PLACEMENT_WARN", TFE_DEVICE_PLACEMENT_WARN) .value("TFE_DEVICE_PLACEMENT_SILENT", TFE_DEVICE_PLACEMENT_SILENT) .value("TFE_DEVICE_PLACEMENT_SILENT_FOR_INT32", TFE_DEVICE_PLACEMENT_SILENT_FOR_INT32) .export_values(); py::enum_<TF_AttrType>(m, "TF_AttrType") .value("TF_ATTR_STRING", TF_ATTR_STRING) .value("TF_ATTR_INT", TF_ATTR_INT) .value("TF_ATTR_FLOAT", TF_ATTR_FLOAT) .value("TF_ATTR_BOOL", TF_ATTR_BOOL) .value("TF_ATTR_TYPE", TF_ATTR_TYPE) .value("TF_ATTR_SHAPE", TF_ATTR_SHAPE) .value("TF_ATTR_TENSOR", TF_ATTR_TENSOR) .value("TF_ATTR_PLACEHOLDER", TF_ATTR_PLACEHOLDER) .value("TF_ATTR_FUNC", TF_ATTR_FUNC) .export_values(); };
7857
True
1
CVE-2020-15192
False
False
False
False
AV:N/AC:L/Au:S/C:N/I:N/A:P
NETWORK
LOW
SINGLE
NONE
NONE
PARTIAL
4.0
CVSS:3.1/AV:N/AC:L/PR:L/UI:N/S:U/C:N/I:N/A:L
NETWORK
LOW
LOW
NONE
UNCHANGED
NONE
NONE
LOW
4.3
MEDIUM
2.8
1.4
False
[{'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-8fxw-76px-3rxv', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-8fxw-76px-3rxv', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/22e07fb204386768e5bcbea563641ea11f96ceb8', 'name': 'https://github.com/tensorflow/tensorflow/commit/22e07fb204386768e5bcbea563641ea11f96ceb8', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-20'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:2.2.0:*:*:*:-:*:*:*', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:2.3.0:*:*:*:-:*:*:*', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': 'In Tensorflow before versions 2.2.1 and 2.3.1, if a user passes a list of strings to `dlpack.to_dlpack` there is a memory leak following an expected validation failure. The issue occurs because the `status` argument during validation failures is not properly checked. Since each of the above methods can return an error status, the `status` value must be checked before continuing. The issue is patched in commit 22e07fb204386768e5bcbea563641ea11f96ceb8 and is released in TensorFlow versions 2.2.1, or 2.3.1.'}]
2021-11-18T17:18Z
2020-09-25T19:15Z
Improper Input Validation
The product receives input or data, but it does not validate or incorrectly validates that the input has the properties that are required to process the data safely and correctly.
Input validation is a frequently-used technique for checking potentially dangerous inputs in order to ensure that the inputs are safe for processing within the code, or when communicating with other components. When software does not validate input properly, an attacker is able to craft the input in a form that is not expected by the rest of the application. This will lead to parts of the system receiving unintended input, which may result in altered control flow, arbitrary control of a resource, or arbitrary code execution. Input validation is not the only technique for processing input, however. Other techniques attempt to transform potentially-dangerous input into something safe, such as filtering (CWE-790) - which attempts to remove dangerous inputs - or encoding/escaping (CWE-116), which attempts to ensure that the input is not misinterpreted when it is included in output to another component. Other techniques exist as well (see CWE-138 for more examples.) Input validation can be applied to: raw data - strings, numbers, parameters, file contents, etc. metadata - information about the raw data, such as headers or size Data can be simple or structured. Structured data can be composed of many nested layers, composed of combinations of metadata and raw data, with other simple or structured data. Many properties of raw data or metadata may need to be validated upon entry into the code, such as: specified quantities such as size, length, frequency, price, rate, number of operations, time, etc. implied or derived quantities, such as the actual size of a file instead of a specified size indexes, offsets, or positions into more complex data structures symbolic keys or other elements into hash tables, associative arrays, etc. well-formedness, i.e. syntactic correctness - compliance with expected syntax lexical token correctness - compliance with rules for what is treated as a token specified or derived type - the actual type of the input (or what the input appears to be) consistency - between individual data elements, between raw data and metadata, between references, etc. conformance to domain-specific rules, e.g. business logic equivalence - ensuring that equivalent inputs are treated the same authenticity, ownership, or other attestations about the input, e.g. a cryptographic signature to prove the source of the data Implied or derived properties of data must often be calculated or inferred by the code itself. Errors in deriving properties may be considered a contributing factor to improper input validation. Note that "input validation" has very different meanings to different people, or within different classification schemes. Caution must be used when referencing this CWE entry or mapping to it. For example, some weaknesses might involve inadvertently giving control to an attacker over an input when they should not be able to provide an input at all, but sometimes this is referred to as input validation. Finally, it is important to emphasize that the distinctions between input validation and output escaping are often blurred, and developers must be careful to understand the difference, including how input validation is not always sufficient to prevent vulnerabilities, especially when less stringent data types must be supported, such as free-form text. Consider a SQL injection scenario in which a person's last name is inserted into a query. The name "O'Reilly" would likely pass the validation step since it is a common last name in the English language. However, this valid name cannot be directly inserted into the database because it contains the "'" apostrophe character, which would need to be escaped or otherwise transformed. In this case, removing the apostrophe might reduce the risk of SQL injection, but it would produce incorrect behavior because the wrong name would be recorded.
https://cwe.mitre.org/data/definitions/20.html
0
Mihai Maruseac
2020-09-18 19:14:17-07:00
Fix multiple vulnerabilities in `tf.experimental.dlpack.to_dlpack`. We have a use after free caused by memory coruption, a segmentation fault caused by memory corruption, several memory leaks and an undefined behavior when taking the reference of a nullptr. PiperOrigin-RevId: 332568894 Change-Id: Ife0fc05e103b35325094ae5d822ee5fdea764572
22e07fb204386768e5bcbea563641ea11f96ceb8
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tensorflow::PYBIND11_MODULE
tensorflow::PYBIND11_MODULE( _pywrap_tfe , m)
['_pywrap_tfe', 'm']
PYBIND11_MODULE(_pywrap_tfe, m) { py::class_<TFE_Executor> TFE_Executor_class(m, "TFE_Executor"); py::class_<TFE_ContextOptions> TFE_ContextOptions_class(m, "TFE_ContextOptions"); py::class_<TFE_MonitoringCounter0> TFE_MonitoringCounter0_class( m, "TFE_MonitoringCounter0"); py::class_<TFE_MonitoringCounter1> TFE_MonitoringCounter1_class( m, "TFE_MonitoringCounter1"); py::class_<TFE_MonitoringCounter2> TFE_MonitoringCounter2_class( m, "TFE_MonitoringCounter2"); py::class_<TFE_MonitoringStringGauge0> TFE_MonitoringStringGauge0_class( m, "TFE_MonitoringStringGauge0"); py::class_<TFE_MonitoringStringGauge1> TFE_MonitoringStringGauge1_class( m, "TFE_MonitoringStringGauge1"); py::class_<TFE_MonitoringStringGauge2> TFE_MonitoringStringGauge2_class( m, "TFE_MonitoringStringGauge2"); py::class_<TFE_MonitoringIntGauge0> TFE_MonitoringIntGauge0_class( m, "TFE_MonitoringIntGauge0"); py::class_<TFE_MonitoringIntGauge1> TFE_MonitoringIntGauge1_class( m, "TFE_MonitoringIntGauge1"); py::class_<TFE_MonitoringIntGauge2> TFE_MonitoringIntGauge2_class( m, "TFE_MonitoringIntGauge2"); py::class_<TFE_MonitoringBoolGauge0> TFE_MonitoringBoolGauge0_class( m, "TFE_MonitoringBoolGauge0"); py::class_<TFE_MonitoringBoolGauge1> TFE_MonitoringBoolGauge1_class( m, "TFE_MonitoringBoolGauge1"); py::class_<TFE_MonitoringBoolGauge2> TFE_MonitoringBoolGauge2_class( m, "TFE_MonitoringBoolGauge2"); py::class_<TFE_MonitoringCounterCell> TFE_MonitoringCounterCell_class( m, "TFE_MonitoringCounterCell"); py::class_<TFE_MonitoringIntGaugeCell> TFE_MonitoringIntGaugeCell_class( m, "TFE_MonitoringIntGaugeCell"); py::class_<TFE_MonitoringStringGaugeCell> TFE_MonitoringStringGaugeCell_class( m, "TFE_MonitoringStringGaugeCell"); py::class_<TFE_MonitoringBoolGaugeCell> TFE_MonitoringBoolGaugeCell_class( m, "TFE_MonitoringBoolGaugeCell"); py::class_<TFE_MonitoringSamplerCell> TFE_MonitoringSamplerCell_class( m, "TFE_MonitoringSamplerCell"); py::class_<TFE_MonitoringBuckets> TFE_MonitoringBuckets_class( m, "TFE_MonitoringBuckets"); py::class_<TFE_MonitoringSampler0> TFE_MonitoringSampler0_class( m, "TFE_MonitoringSampler0"); py::class_<TFE_MonitoringSampler1> TFE_MonitoringSampler1_class( m, "TFE_MonitoringSampler1"); py::class_<TFE_MonitoringSampler2> TFE_MonitoringSampler2_class( m, "TFE_MonitoringSampler2"); py::class_<TFE_CancellationManager> TFE_CancellationManager_class( m, "TFE_CancellationManager"); py::class_<TF_DeviceList> TF_DeviceList_class(m, "TF_DeviceList"); py::class_<TF_Function> TF_Function_class(m, "TF_Function"); m.def("TFE_Py_RegisterExceptionClass", [](const py::handle& e) { return tensorflow::PyoOrThrow(TFE_Py_RegisterExceptionClass(e.ptr())); }); m.def("TFE_Py_RegisterFallbackExceptionClass", [](const py::handle& e) { return tensorflow::PyoOrThrow( TFE_Py_RegisterFallbackExceptionClass(e.ptr())); }); m.def( "TFE_GetTotalMemoryUsage", [](py::handle& ctx, const char* device_name) { tensorflow::EagerContext* context = tensorflow::ContextFromInterface( reinterpret_cast<tensorflow::ImmediateExecutionContext*>( tensorflow::InputTFE_Context(ctx))); tensorflow::DeviceNameUtils::ParsedName input_device_name; if (!tensorflow::DeviceNameUtils::ParseFullOrLocalName( device_name, &input_device_name)) { tensorflow::ThrowValueError( absl::StrFormat("Failed parsing device name: '%s'", device_name) .c_str()); } std::vector<tensorflow::Device*> devices = context->local_device_mgr()->ListDevices(); tensorflow::Device* matched_device = nullptr; for (int device_idx = 0; device_idx < devices.size(); device_idx++) { tensorflow::Device* device = devices[device_idx]; if (tensorflow::DeviceNameUtils::AreCompatibleDevNames( input_device_name, device->parsed_name())) { if (device->device_type() == tensorflow::DEVICE_CPU) { tensorflow::ThrowValueError( "CPU does not support getting allocator information"); } if (matched_device != nullptr) { tensorflow::ThrowValueError( absl::StrFormat( "Multiple devices matching the provided string " "'%s': '%s' and " "'%s' ", device_name, matched_device->name(), device->name()) .c_str()); } matched_device = device; } } if (matched_device == nullptr) { tensorflow::ThrowValueError( absl::StrFormat("No matching devices found for '%s'", device_name) .c_str()); } tensorflow::AllocatorAttributes attrs; tensorflow::Allocator* allocator = matched_device->GetAllocator(attrs); if (absl::optional<tensorflow::AllocatorStats> stats = allocator->GetStats()) { return stats->bytes_in_use; } tensorflow::ThrowTypeError( absl::StrFormat("Allocator stats not available for device '%s'", matched_device->name()) .c_str()); }); // XLA Eager Logic m.def("TF_SetXlaEnableLazyCompilation", &TF_SetXlaEnableLazyCompilation); m.def("TF_SetTfXlaCpuGlobalJit", &TF_SetTfXlaCpuGlobalJit); m.def("TF_SetXlaAutoJitMode", &TF_SetXlaAutoJitMode); m.def("TF_SetXlaConstantFoldingDisabled", &TF_SetXlaConstantFoldingDisabled); m.def("TF_GetXlaConstantFoldingDisabled", &TF_GetXlaConstantFoldingDisabled); m.def("TF_SetXlaMinClusterSize", &TF_SetXlaMinClusterSize); m.def("TF_GetCompilerIr", &tensorflow::TFE_GetCompilerIr); // MLIR Logic m.def("TF_IsMlirBridgeEnabled", [] { return tensorflow::GetMlirCommonFlags()->tf_mlir_enable_mlir_bridge; }); m.def("TF_EnableMlirBridge", [](bool enabled) { tensorflow::GetMlirCommonFlags()->tf_mlir_enable_mlir_bridge = enabled; }); m.def("TF_EnableXlaDevices", [] { tensorflow::GetXlaDeviceFlags()->tf_xla_enable_xla_devices = true; }); // // TFE_Context Logic m.def( "TFE_NewContext", [](const TFE_ContextOptions* opts) { tensorflow::Safe_TF_StatusPtr status = tensorflow::make_safe(TF_NewStatus()); TFE_Context* context = TFE_NewContext(opts, status.get()); tensorflow::MaybeRaiseRegisteredFromTFStatus(status.get()); return tensorflow::PyoOrThrow(tensorflow::OutputTFE_Context(context)); }, py::return_value_policy::reference); m.def("TFE_DeleteContext", [](py::handle& o) { TFE_DeleteContext(tensorflow::InputTFE_Context(o)); }); m.def( "TFE_ContextListDevices", [](py::handle& o) { tensorflow::Safe_TF_StatusPtr status = tensorflow::make_safe(TF_NewStatus()); auto output = TFE_ContextListDevices(tensorflow::InputTFE_Context(o), status.get()); tensorflow::MaybeRaiseRegisteredFromTFStatus(status.get()); return output; }, py::return_value_policy::reference); m.def("TFE_HostAddressSpace", [](py::handle& o, TF_Buffer& buf) { TFE_HostAddressSpace(tensorflow::InputTFE_Context(o), &buf); }); m.def("TFE_ContextAddFunction", [](py::handle& ctx, TF_Function* func) { tensorflow::Safe_TF_StatusPtr status = tensorflow::make_safe(TF_NewStatus()); TFE_ContextAddFunction(tensorflow::InputTFE_Context(ctx), func, status.get()); tensorflow::MaybeRaiseRegisteredFromTFStatus(status.get()); }); m.def("TFE_ContextAddFunctionDef", [](py::handle& ctx, const char* serialized_function_def, size_t size) { tensorflow::Safe_TF_StatusPtr status = tensorflow::make_safe(TF_NewStatus()); TFE_ContextAddFunctionDef(tensorflow::InputTFE_Context(ctx), serialized_function_def, size, status.get()); tensorflow::MaybeRaiseRegisteredFromTFStatus(status.get()); }); m.def("TFE_ContextGetFunctionDef", [](py::handle& ctx, const char* function_name, TF_Buffer& buf) { tensorflow::Safe_TF_StatusPtr status = tensorflow::make_safe(TF_NewStatus()); TFE_ContextGetFunctionDef(tensorflow::InputTFE_Context(ctx), function_name, &buf, status.get()); tensorflow::MaybeRaiseRegisteredFromTFStatus(status.get()); }); m.def("TFE_ContextRemoveFunction", [](py::handle& ctx, const char* name) { tensorflow::Safe_TF_StatusPtr status = tensorflow::make_safe(TF_NewStatus()); TFE_ContextRemoveFunction(tensorflow::InputTFE_Context(ctx), name, status.get()); tensorflow::MaybeRaiseRegisteredFromTFStatus(status.get()); }); m.def("TFE_ContextHasFunction", [](py::handle& ctx, const char* name) { tensorflow::Safe_TF_StatusPtr status = tensorflow::make_safe(TF_NewStatus()); auto output = TFE_ContextHasFunction(tensorflow::InputTFE_Context(ctx), name); tensorflow::MaybeRaiseRegisteredFromTFStatus(status.get()); return output; }); m.def("TFE_ContextEnableRunMetadata", [](py::handle& ctx) { TFE_ContextEnableRunMetadata(tensorflow::InputTFE_Context(ctx)); }); m.def("TFE_ContextDisableRunMetadata", [](py::handle& ctx) { TFE_ContextEnableRunMetadata(tensorflow::InputTFE_Context(ctx)); }); m.def("TFE_ContextEnableGraphCollection", [](py::handle& ctx) { TFE_ContextEnableGraphCollection(tensorflow::InputTFE_Context(ctx)); }); m.def("TFE_ContextDisableGraphCollection", [](py::handle& ctx) { TFE_ContextDisableGraphCollection(tensorflow::InputTFE_Context(ctx)); }); m.def("TFE_ContextExportRunMetadata", [](py::handle& ctx, TF_Buffer& buf) { tensorflow::Safe_TF_StatusPtr status = tensorflow::make_safe(TF_NewStatus()); TFE_ContextExportRunMetadata(tensorflow::InputTFE_Context(ctx), &buf, status.get()); tensorflow::MaybeRaiseRegisteredFromTFStatus(status.get()); }); m.def("TFE_ContextClearCaches", [](py::handle& o) { TFE_ContextClearCaches(tensorflow::InputTFE_Context(o)); }); m.def("TFE_GetContextId", [](py::handle& ctx) { return TFE_GetContextId(tensorflow::InputTFE_Context(ctx)); }); m.def("TFE_ContextGetDevicePlacementPolicy", [](py::handle& ctx) { return TFE_ContextGetDevicePlacementPolicy( tensorflow::InputTFE_Context(ctx)); }); m.def("TFE_ContextSetThreadLocalDevicePlacementPolicy", [](py::handle& ctx, TFE_ContextDevicePlacementPolicy policy) { TFE_ContextSetThreadLocalDevicePlacementPolicy( tensorflow::InputTFE_Context(ctx), policy); }); m.def("TFE_ContextSetServerDef", [](py::handle& ctx, int keep_alive_secs, py::bytes proto) { tensorflow::Safe_TF_StatusPtr status = tensorflow::make_safe(TF_NewStatus()); tensorflow::Safe_TF_BufferPtr buf = tensorflow::make_safe(tensorflow::ProtoStringToTFBuffer(proto.ptr())); TFE_ContextSetServerDef(tensorflow::InputTFE_Context(ctx), keep_alive_secs, buf.get()->data, buf.get()->length, status.get()); tensorflow::MaybeRaiseRegisteredFromTFStatus(status.get()); }); m.def("TFE_ContextUpdateServerDef", [](py::handle& ctx, int keep_alive_secs, py::bytes proto) { tensorflow::Safe_TF_StatusPtr status = tensorflow::make_safe(TF_NewStatus()); tensorflow::Safe_TF_BufferPtr buf = tensorflow::make_safe(tensorflow::ProtoStringToTFBuffer(proto.ptr())); Py_BEGIN_ALLOW_THREADS; TFE_ContextUpdateServerDef(tensorflow::InputTFE_Context(ctx), keep_alive_secs, buf.get()->data, buf.get()->length, status.get()); Py_END_ALLOW_THREADS; tensorflow::MaybeRaiseRegisteredFromTFStatus(status.get()); }); m.def("TFE_ContextCheckAlive", [](py::handle& ctx, const char* worker_name) { tensorflow::Safe_TF_StatusPtr status = tensorflow::make_safe(TF_NewStatus()); bool output = TFE_ContextCheckAlive(tensorflow::InputTFE_Context(ctx), worker_name, status.get()); tensorflow::MaybeRaiseRegisteredFromTFStatus(status.get()); return output; }); m.def("TFE_ContextSyncExecutors", [](py::handle& ctx) { tensorflow::Safe_TF_StatusPtr status = tensorflow::make_safe(TF_NewStatus()); TFE_ContextAsyncWait(tensorflow::InputTFE_Context(ctx), status.get()); tensorflow::MaybeRaiseRegisteredFromTFStatus(status.get()); }); m.def("TFE_ContextClearExecutors", [](py::handle& ctx) { tensorflow::Safe_TF_StatusPtr status = tensorflow::make_safe(TF_NewStatus()); TFE_ContextAsyncWait(tensorflow::InputTFE_Context(ctx), status.get()); // NOTE: different from TFE_ContextSyncExecutors that raises potential // errors, deliberately ignore executor statuses in cleanup. }); m.def("TFE_ContextSetSoftDevicePlacement", [](py::handle& ctx, bool enable) { tensorflow::Safe_TF_StatusPtr status = tensorflow::make_safe(TF_NewStatus()); TFE_ContextSetSoftDevicePlacement(tensorflow::InputTFE_Context(ctx), enable, status.get()); }); m.def("TFE_ContextSetLogDevicePlacement", [](py::handle& ctx, bool enable) { tensorflow::Safe_TF_StatusPtr status = tensorflow::make_safe(TF_NewStatus()); TFE_ContextSetSoftDevicePlacement(tensorflow::InputTFE_Context(ctx), enable, status.get()); }); // TFE_Executor logic m.def( "TFE_NewExecutor", [](const bool is_async) { TFE_Executor* exc = TFE_NewExecutor(is_async); return exc; }, py::return_value_policy::reference); m.def("TFE_DeleteExecutor", &TFE_DeleteExecutor); m.def("TFE_ExecutorIsAsync", &TFE_ExecutorIsAsync); m.def("TFE_ExecutorWaitForAllPendingNodes", [](TFE_Executor& exc) { tensorflow::Safe_TF_StatusPtr status = tensorflow::make_safe(TF_NewStatus()); // NOTE: release Python GIL for pending PyFunc ops to be executed properly. Py_BEGIN_ALLOW_THREADS; TFE_ExecutorWaitForAllPendingNodes(&exc, status.get()); Py_END_ALLOW_THREADS; tensorflow::MaybeRaiseRegisteredFromTFStatus(status.get()); }); m.def("TFE_ExecutorClearError", &TFE_ExecutorClearError); m.def("TFE_ContextSetExecutorForThread", [](py::handle& ctx, TFE_Executor& exc) { TFE_ContextSetExecutorForThread(tensorflow::InputTFE_Context(ctx), &exc); }); m.def( "TFE_ContextGetExecutorForThread", [](py::handle& o) { return TFE_ContextGetExecutorForThread(tensorflow::InputTFE_Context(o)); }, py::return_value_policy::reference); m.def("TFE_OpNameGetAttrType", [](py::handle& ctx, const char* op_or_function_name, const char* attr_name) { int temp = 0; unsigned char* is_list = reinterpret_cast<unsigned char*>(&temp); tensorflow::Safe_TF_StatusPtr status = tensorflow::make_safe(TF_NewStatus()); auto output = TFE_OpNameGetAttrType(tensorflow::InputTFE_Context(ctx), op_or_function_name, attr_name, is_list, status.get()); tensorflow::MaybeRaiseRegisteredFromTFStatus(status.get()); #if PY_MAJOR_VERSION < 3 PyObject* output_pyo = PyInt_FromLong(output); #else PyObject* output_pyo = PyLong_FromLong(output); #endif if (*is_list == 1) { PyObject* list = PyList_New(1); PyList_SetItem(list, 0, output_pyo); return tensorflow::PyoOrThrow(list); } return tensorflow::PyoOrThrow(output_pyo); }); m.def("TFE_Py_InitEagerTensor", [](const py::handle& o) { return tensorflow::PyoOrThrow(TFE_Py_InitEagerTensor(o.ptr())); }); m.def("TFE_Py_PackEagerTensors", [](const py::handle& context, const py::handle& handles) { return tensorflow::TFE_Py_PackEagerTensors_wrapper(context, handles); }); m.def("TFE_Py_SetEagerTensorProfiler", &TFE_Py_SetEagerTensorProfiler); m.def("TFE_Py_RegisterJVPFunction", [](const py::handle& o) { return tensorflow::PyoOrThrow(TFE_Py_RegisterJVPFunction(o.ptr())); }); m.def("TFE_Py_RegisterGradientFunction", [](const py::handle& o) { return tensorflow::PyoOrThrow(TFE_Py_RegisterGradientFunction(o.ptr())); }); m.def("TFE_Py_Execute", [](const py::handle& context, const char* device_name, const char* op_name, const py::handle& inputs, const py::handle& attrs, const py::handle& num_outputs) { return tensorflow::TFE_Py_ExecuteCancelable_wrapper( context, device_name, op_name, inputs, attrs.ptr(), nullptr, num_outputs); }); m.def( "TFE_Py_ExecuteCancelable", [](const py::handle& context, const char* device_name, const char* op_name, const py::handle& inputs, const py::handle& attrs, TFE_CancellationManager& cancellation_manager, const py::handle& num_outputs) { return tensorflow::TFE_Py_ExecuteCancelable_wrapper( context, device_name, op_name, inputs, attrs.ptr(), &cancellation_manager, num_outputs); }); m.def("TFE_Py_FastPathExecute", [](const py::args args) { // TFE_Py_FastPathExecute requires error checking prior to returning. return tensorflow::PyoOrThrow(TFE_Py_FastPathExecute_C(args.ptr())); }); m.def("TFE_Py_RecordGradient", [](const py::handle& op_name, const py::handle& inputs, const py::handle& attrs, const py::handle& results, const py::handle& forward_pass_name_scope) { return tensorflow::PyoOrThrow(TFE_Py_RecordGradient( op_name.ptr(), inputs.ptr(), attrs.ptr(), results.ptr(), forward_pass_name_scope.ptr())); }); m.def("TFE_Py_UID", []() { return tensorflow::PyoOrThrow(TFE_Py_UID()); }); // TFE_Py_Tape Logic m.def("TFE_Py_TapeSetNew", [](const py::handle& persistent, const py::handle& watch_accessed_variables) { return tensorflow::PyoOrThrow( TFE_Py_TapeSetNew(persistent.ptr(), watch_accessed_variables.ptr())); }); m.def("TFE_Py_TapeSetAdd", [](const py::handle& tape) { TFE_Py_TapeSetAdd(tape.ptr()); }); m.def("TFE_Py_TapeSetRemove", [](const py::handle& tape) { TFE_Py_TapeSetRemove(tape.ptr()); }); m.def("TFE_Py_TapeSetStopOnThread", &TFE_Py_TapeSetStopOnThread); m.def("TFE_Py_TapeSetRestartOnThread", &TFE_Py_TapeSetRestartOnThread); m.def("TFE_Py_TapeSetIsStopped", []() { return tensorflow::PyoOrThrow(TFE_Py_TapeSetIsStopped()); }); m.def("TFE_Py_TapeSetIsEmpty", []() { return tensorflow::PyoOrThrow(TFE_Py_TapeSetIsEmpty()); }); m.def("TFE_Py_TapeSetShouldRecordBackprop", [](const py::handle& tensors) { return tensorflow::PyoOrThrow( TFE_Py_TapeSetShouldRecordBackprop(tensors.ptr())); }); m.def("TFE_Py_TapeSetPossibleGradientTypes", [](const py::handle& tensors) { return tensorflow::PyoOrThrow( TFE_Py_TapeSetPossibleGradientTypes(tensors.ptr())); }); m.def("TFE_Py_TapeSetDeleteTrace", &TFE_Py_TapeSetDeleteTrace); m.def("TFE_Py_TapeSetRecordOperation", [](const py::handle& op_type, const py::handle& output_tensors, const py::handle& input_tensors, const py::handle& backward_function, const py::handle& forward_function) { return tensorflow::PyoOrThrow(TFE_Py_TapeSetRecordOperation( op_type.ptr(), output_tensors.ptr(), input_tensors.ptr(), backward_function.ptr(), forward_function.ptr())); }); m.def( "TFE_Py_TapeSetRecordOperationBackprop", [](const py::handle& op_type, const py::handle& output_tensors, const py::handle& input_tensors, const py::handle& backward_function) { return tensorflow::PyoOrThrow(TFE_Py_TapeSetRecordOperationBackprop( op_type.ptr(), output_tensors.ptr(), input_tensors.ptr(), backward_function.ptr())); }); m.def( "TFE_Py_TapeSetRecordOperationForwardprop", [](const py::handle& op_type, const py::handle& output_tensors, const py::handle& input_tensors, const py::handle& backward_function, const py::handle& forwardprop_output_indices) { return tensorflow::PyoOrThrow(TFE_Py_TapeSetRecordOperationForwardprop( op_type.ptr(), output_tensors.ptr(), input_tensors.ptr(), backward_function.ptr(), forwardprop_output_indices.ptr())); }); m.def("TFE_Py_TapeGradient", [](const py::handle& tape, const py::handle& target, const py::handle& sources, const py::handle& output_gradients, const py::handle& sources_raw, const py::handle& unconnected_gradients) { tensorflow::Safe_TF_StatusPtr status = tensorflow::make_safe(TF_NewStatus()); PyObject* output = TFE_Py_TapeGradient( tape.ptr(), target.ptr(), sources.ptr(), output_gradients.ptr(), sources_raw.ptr(), unconnected_gradients.ptr(), status.get()); tensorflow::MaybeRaiseRegisteredFromTFStatus(status.get()); return tensorflow::PyoOrThrow(output); }); m.def("TFE_Py_TapeVariableAccessed", [](const py::handle& variable) { TFE_Py_TapeVariableAccessed(variable.ptr()); }); m.def("TFE_Py_TapeWatch", [](const py::handle& tape, const py::handle& tensor) { TFE_Py_TapeWatch(tape.ptr(), tensor.ptr()); }); m.def("TFE_Py_TapeWatchVariable", [](const py::handle& tape, const py::handle& variable) { TFE_Py_TapeWatchVariable(tape.ptr(), variable.ptr()); }); m.def("TFE_Py_TapeWatchedVariables", [](const py::handle& tape) { return tensorflow::PyoOrThrow(TFE_Py_TapeWatchedVariables(tape.ptr())); }); // TFE_Py_VariableWatcher logic. m.def("TFE_Py_VariableWatcherNew", []() { return tensorflow::PyoOrThrow(TFE_Py_VariableWatcherNew()); }); m.def("TFE_Py_VariableWatcherRemove", [](const py::handle& variable_watcher) { TFE_Py_VariableWatcherRemove(variable_watcher.ptr()); }); m.def("TFE_Py_VariableWatcherVariableAccessed", [](const py::handle& variable) { TFE_Py_VariableWatcherVariableAccessed(variable.ptr()); }); m.def("TFE_Py_VariableWatcherWatchedVariables", [](const py::handle& variable_watcher) { return tensorflow::PyoOrThrow( TFE_Py_VariableWatcherWatchedVariables(variable_watcher.ptr())); }); // TFE_Py_ForwardAccumulator logic. m.def("TFE_Py_ForwardAccumulatorNew", [](bool use_batch) { return tensorflow::PyoOrThrow(TFE_Py_ForwardAccumulatorNew(use_batch)); }); m.def("TFE_Py_ForwardAccumulatorSetAdd", [](const py::handle& accumulator) { return tensorflow::PyoOrThrow( TFE_Py_ForwardAccumulatorSetAdd(accumulator.ptr())); }); m.def("TFE_Py_ForwardAccumulatorSetRemove", [](const py::handle& accumulator) { TFE_Py_ForwardAccumulatorSetRemove(accumulator.ptr()); }); m.def("TFE_Py_ForwardAccumulatorWatch", [](const py::handle& accumulator, const py::handle& tensor, const py::handle& tangent) { TFE_Py_ForwardAccumulatorWatch(accumulator.ptr(), tensor.ptr(), tangent.ptr()); }); m.def("TFE_Py_ForwardAccumulatorJVP", [](const py::handle& accumulator, const py::handle& tensor) { return tensorflow::PyoOrThrow( TFE_Py_ForwardAccumulatorJVP(accumulator.ptr(), tensor.ptr())); }); m.def("TFE_Py_ForwardAccumulatorPushState", []() { return tensorflow::PyoOrThrow(TFE_Py_ForwardAccumulatorPushState()); }); m.def("TFE_Py_ForwardAccumulatorPopState", []() { return tensorflow::PyoOrThrow(TFE_Py_ForwardAccumulatorPopState()); }); m.def("TFE_Py_PackJVPs", [](const py::handle& tensors) { return tensorflow::PyoOrThrow(TFE_Py_PackJVPs(tensors.ptr())); }); // TFE_ContextOptions Logic m.def("TFE_NewContextOptions", &TFE_NewContextOptions, py::return_value_policy::reference); m.def("TFE_ContextOptionsSetConfig", [](TFE_ContextOptions* options, py::bytes proto) { tensorflow::Safe_TF_StatusPtr status = tensorflow::make_safe(TF_NewStatus()); tensorflow::Safe_TF_BufferPtr buf = tensorflow::make_safe(tensorflow::ProtoStringToTFBuffer(proto.ptr())); TFE_ContextOptionsSetConfig(options, buf.get()->data, buf.get()->length, status.get()); tensorflow::MaybeRaiseRegisteredFromTFStatus(status.get()); }); m.def("TFE_ContextOptionsSetDevicePlacementPolicy", &TFE_ContextOptionsSetDevicePlacementPolicy); m.def("TFE_ContextOptionsSetLazyRemoteInputsCopy", &TFE_ContextOptionsSetLazyRemoteInputsCopy); m.def("TFE_ContextOptionsSetTfrt", &TFE_ContextOptionsSetTfrt); m.def("TFE_ContextOptionsSetAsync", &TFE_ContextOptionsSetAsync); m.def("TFE_DeleteContextOptions", &TFE_DeleteContextOptions, py::return_value_policy::reference); // TFE_Py_TensorShape Logic m.def("TFE_Py_TensorShapeSlice", [](const py::handle& tensors, int slice_dim) { return tensorflow::PyoOrThrow( TFE_Py_TensorShapeSlice(tensors.ptr(), slice_dim)); }); m.def("TFE_Py_TensorShapeOnDevice", [](const py::handle& tensors, int slice_dim) { return tensorflow::PyoOrThrow(TFE_Py_TensorShapeOnDevice(tensors.ptr())); }); m.def("TFE_Py_EnableInteractivePythonLogging", &TFE_Py_EnableInteractivePythonLogging); // Additional Context Logic m.def("TFE_Py_SetEagerContext", [](const py::handle& o) { return tensorflow::PyoOrThrow(TFE_Py_SetEagerContext(o.ptr())); }); m.def("TFE_ContextStartStep", [](py::handle& o) { TFE_ContextStartStep(tensorflow::InputTFE_Context(o.ptr())); }); m.def("TFE_ContextEndStep", [](py::handle& o) { TFE_ContextEndStep(tensorflow::InputTFE_Context(o.ptr())); }); m.def("TFE_Py_RegisterVSpace", [](const py::handle& o) { return tensorflow::PyoOrThrow(TFE_Py_RegisterVSpace(o.ptr())); }); m.def("TFE_Py_EncodeArg", [](const py::handle& o, bool include_tensor_ranks_only) { return tensorflow::PyoOrThrow( TFE_Py_EncodeArg(o.ptr(), include_tensor_ranks_only)); }); m.def("TFE_EnableCollectiveOps", [](const py::handle& ctx, py::bytes proto) { tensorflow::Safe_TF_StatusPtr status = tensorflow::make_safe(TF_NewStatus()); tensorflow::Safe_TF_BufferPtr buf = tensorflow::make_safe(tensorflow::ProtoStringToTFBuffer(proto.ptr())); TFE_EnableCollectiveOps(tensorflow::InputTFE_Context(ctx), buf.get()->data, buf.get()->length, status.get()); tensorflow::MaybeRaiseRegisteredFromTFStatus(status.get()); }); m.def("TFE_AbortCollectiveOps", [](const py::handle& ctx, int code, const char* message) { tensorflow::Safe_TF_StatusPtr status = tensorflow::make_safe(TF_NewStatus()); TF_SetStatus(status.get(), static_cast<TF_Code>(code), message); TFE_AbortCollectiveOps(tensorflow::InputTFE_Context(ctx), status.get()); }); m.def("TFE_CollectiveOpsCheckPeerHealth", [](const py::handle& ctx, const char* task) { tensorflow::Safe_TF_StatusPtr status = tensorflow::make_safe(TF_NewStatus()); TFE_CollectiveOpsCheckPeerHealth(tensorflow::InputTFE_Context(ctx), task, status.get()); tensorflow::MaybeRaiseRegisteredFromTFStatus(status.get()); }); m.def("TF_ListPhysicalDevices", &tensorflow::TF_ListPhysicalDevices); m.def("TF_GetDeviceDetails", &tensorflow::TF_GetDeviceDetails); m.def("TF_DeleteDeviceList", &TF_DeleteDeviceList, py::return_value_policy::reference); m.def("TF_DeviceListCount", &TF_DeviceListCount); m.def("TF_DeviceListName", [](const TF_DeviceList* list, int index) { tensorflow::Safe_TF_StatusPtr status = tensorflow::make_safe(TF_NewStatus()); auto output = TF_DeviceListName(list, index, status.get()); tensorflow::MaybeRaiseRegisteredFromTFStatus(status.get()); return output; }); m.def("TF_DeviceListType", [](const TF_DeviceList* list, int index) { tensorflow::Safe_TF_StatusPtr status = tensorflow::make_safe(TF_NewStatus()); auto output = TF_DeviceListType(list, index, status.get()); tensorflow::MaybeRaiseRegisteredFromTFStatus(status.get()); return output; }); m.def("TF_PickUnusedPortOrDie", &TF_PickUnusedPortOrDie); // TFE_MonitoringCounter Logic m.def("TFE_MonitoringCounterCellIncrementBy", &TFE_MonitoringCounterCellIncrementBy); m.def("TFE_MonitoringCounterCellValue", &TFE_MonitoringCounterCellValue); m.def( "TFE_MonitoringNewCounter0", [](const char* name, const char* description) { tensorflow::Safe_TF_StatusPtr status = tensorflow::make_safe(TF_NewStatus()); auto output = TFE_MonitoringNewCounter0(name, status.get(), description); tensorflow::MaybeRaiseRegisteredFromTFStatus(status.get()); return output; }, py::return_value_policy::reference); m.def("TFE_MonitoringDeleteCounter0", &TFE_MonitoringDeleteCounter0, py::return_value_policy::reference); m.def("TFE_MonitoringGetCellCounter0", &TFE_MonitoringGetCellCounter0, py::return_value_policy::reference); m.def( "TFE_MonitoringNewCounter1", [](const char* name, const char* description, const char* label1) { tensorflow::Safe_TF_StatusPtr status = tensorflow::make_safe(TF_NewStatus()); auto output = TFE_MonitoringNewCounter1(name, status.get(), description, label1); tensorflow::MaybeRaiseRegisteredFromTFStatus(status.get()); return output; }, py::return_value_policy::reference); m.def("TFE_MonitoringDeleteCounter1", &TFE_MonitoringDeleteCounter1, py::return_value_policy::reference); m.def("TFE_MonitoringGetCellCounter1", &TFE_MonitoringGetCellCounter1, py::return_value_policy::reference); m.def( "TFE_MonitoringNewCounter2", [](const char* name, const char* description, const char* label1, const char* label2) { tensorflow::Safe_TF_StatusPtr status = tensorflow::make_safe(TF_NewStatus()); auto output = TFE_MonitoringNewCounter2(name, status.get(), description, label1, label2); tensorflow::MaybeRaiseRegisteredFromTFStatus(status.get()); return output; }, py::return_value_policy::reference); m.def("TFE_MonitoringDeleteCounter2", &TFE_MonitoringDeleteCounter2, py::return_value_policy::reference); m.def("TFE_MonitoringGetCellCounter2", &TFE_MonitoringGetCellCounter2, py::return_value_policy::reference); // TFE_MonitoringIntGauge Logic m.def("TFE_MonitoringIntGaugeCellSet", &TFE_MonitoringIntGaugeCellSet); m.def("TFE_MonitoringIntGaugeCellValue", &TFE_MonitoringIntGaugeCellValue); m.def( "TFE_MonitoringNewIntGauge0", [](const char* name, const char* description) { tensorflow::Safe_TF_StatusPtr status = tensorflow::make_safe(TF_NewStatus()); auto output = TFE_MonitoringNewIntGauge0(name, status.get(), description); tensorflow::MaybeRaiseRegisteredFromTFStatus(status.get()); return output; }, py::return_value_policy::reference); m.def("TFE_MonitoringDeleteIntGauge0", &TFE_MonitoringDeleteIntGauge0, py::return_value_policy::reference); m.def("TFE_MonitoringGetCellIntGauge0", &TFE_MonitoringGetCellIntGauge0, py::return_value_policy::reference); m.def( "TFE_MonitoringNewIntGauge1", [](const char* name, const char* description, const char* label1) { tensorflow::Safe_TF_StatusPtr status = tensorflow::make_safe(TF_NewStatus()); auto output = TFE_MonitoringNewIntGauge1(name, status.get(), description, label1); tensorflow::MaybeRaiseRegisteredFromTFStatus(status.get()); return output; }, py::return_value_policy::reference); m.def("TFE_MonitoringDeleteIntGauge1", &TFE_MonitoringDeleteIntGauge1, py::return_value_policy::reference); m.def("TFE_MonitoringGetCellIntGauge1", &TFE_MonitoringGetCellIntGauge1, py::return_value_policy::reference); m.def( "TFE_MonitoringNewIntGauge2", [](const char* name, const char* description, const char* label1, const char* label2) { tensorflow::Safe_TF_StatusPtr status = tensorflow::make_safe(TF_NewStatus()); auto output = TFE_MonitoringNewIntGauge2(name, status.get(), description, label1, label2); tensorflow::MaybeRaiseRegisteredFromTFStatus(status.get()); return output; }, py::return_value_policy::reference); m.def("TFE_MonitoringDeleteIntGauge2", &TFE_MonitoringDeleteIntGauge2, py::return_value_policy::reference); m.def("TFE_MonitoringGetCellIntGauge2", &TFE_MonitoringGetCellIntGauge2, py::return_value_policy::reference); m.def("TFE_MonitoringStringGaugeCellSet", &TFE_MonitoringStringGaugeCellSet); m.def("TFE_MonitoringStringGaugeCellValue", &TFE_MonitoringStringGaugeCellValue); m.def( "TFE_MonitoringNewStringGauge0", [](const char* name, const char* description) { tensorflow::Safe_TF_StatusPtr status = tensorflow::make_safe(TF_NewStatus()); auto output = TFE_MonitoringNewStringGauge0(name, status.get(), description); tensorflow::MaybeRaiseRegisteredFromTFStatus(status.get()); return output; }, py::return_value_policy::reference); // TFE_MonitoringStringGauge Logic m.def("TFE_MonitoringDeleteStringGauge0", &TFE_MonitoringDeleteStringGauge0); m.def("TFE_MonitoringGetCellStringGauge0", &TFE_MonitoringGetCellStringGauge0, py::return_value_policy::reference); m.def( "TFE_MonitoringNewStringGauge1", [](const char* name, const char* description, const char* label1) { tensorflow::Safe_TF_StatusPtr status = tensorflow::make_safe(TF_NewStatus()); auto output = TFE_MonitoringNewStringGauge1(name, status.get(), description, label1); tensorflow::MaybeRaiseRegisteredFromTFStatus(status.get()); return output; }, py::return_value_policy::reference); m.def("TFE_MonitoringDeleteStringGauge1", &TFE_MonitoringDeleteStringGauge1); m.def("TFE_MonitoringGetCellStringGauge1", &TFE_MonitoringGetCellStringGauge1, py::return_value_policy::reference); m.def( "TFE_MonitoringNewStringGauge2", [](const char* name, const char* description, const char* label1, const char* label2) { tensorflow::Safe_TF_StatusPtr status = tensorflow::make_safe(TF_NewStatus()); auto output = TFE_MonitoringNewStringGauge2( name, status.get(), description, label1, label2); tensorflow::MaybeRaiseRegisteredFromTFStatus(status.get()); return output; }, py::return_value_policy::reference); m.def("TFE_MonitoringDeleteStringGauge2", &TFE_MonitoringDeleteStringGauge2); m.def("TFE_MonitoringGetCellStringGauge2", &TFE_MonitoringGetCellStringGauge2, py::return_value_policy::reference); // TFE_MonitoringBoolGauge Logic m.def("TFE_MonitoringBoolGaugeCellSet", &TFE_MonitoringBoolGaugeCellSet); m.def("TFE_MonitoringBoolGaugeCellValue", &TFE_MonitoringBoolGaugeCellValue); m.def( "TFE_MonitoringNewBoolGauge0", [](const char* name, const char* description) { tensorflow::Safe_TF_StatusPtr status = tensorflow::make_safe(TF_NewStatus()); auto output = TFE_MonitoringNewBoolGauge0(name, status.get(), description); tensorflow::MaybeRaiseRegisteredFromTFStatus(status.get()); return output; }, py::return_value_policy::reference); m.def("TFE_MonitoringDeleteBoolGauge0", &TFE_MonitoringDeleteBoolGauge0, py::return_value_policy::reference); m.def("TFE_MonitoringGetCellBoolGauge0", &TFE_MonitoringGetCellBoolGauge0, py::return_value_policy::reference); m.def( "TFE_MonitoringNewBoolGauge1", [](const char* name, const char* description, const char* label1) { tensorflow::Safe_TF_StatusPtr status = tensorflow::make_safe(TF_NewStatus()); auto output = TFE_MonitoringNewBoolGauge1(name, status.get(), description, label1); tensorflow::MaybeRaiseRegisteredFromTFStatus(status.get()); return output; }, py::return_value_policy::reference); m.def("TFE_MonitoringDeleteBoolGauge1", &TFE_MonitoringDeleteBoolGauge1, py::return_value_policy::reference); m.def("TFE_MonitoringGetCellBoolGauge1", &TFE_MonitoringGetCellBoolGauge1, py::return_value_policy::reference); m.def( "TFE_MonitoringNewBoolGauge2", [](const char* name, const char* description, const char* label1, const char* label2) { tensorflow::Safe_TF_StatusPtr status = tensorflow::make_safe(TF_NewStatus()); auto output = TFE_MonitoringNewBoolGauge2(name, status.get(), description, label1, label2); tensorflow::MaybeRaiseRegisteredFromTFStatus(status.get()); return output; }, py::return_value_policy::reference); m.def("TFE_MonitoringDeleteBoolGauge2", &TFE_MonitoringDeleteBoolGauge2, py::return_value_policy::reference); m.def("TFE_MonitoringGetCellBoolGauge2", &TFE_MonitoringGetCellBoolGauge2, py::return_value_policy::reference); // TFE_MonitoringSampler Logic m.def("TFE_MonitoringSamplerCellAdd", &TFE_MonitoringSamplerCellAdd); m.def("TFE_MonitoringSamplerCellValue", &TFE_MonitoringSamplerCellValue); m.def("TFE_MonitoringNewExponentialBuckets", &TFE_MonitoringNewExponentialBuckets, py::return_value_policy::reference); m.def("TFE_MonitoringDeleteBuckets", &TFE_MonitoringDeleteBuckets, py::return_value_policy::reference); m.def( "TFE_MonitoringNewSampler0", [](const char* name, TFE_MonitoringBuckets* buckets, const char* description) { tensorflow::Safe_TF_StatusPtr status = tensorflow::make_safe(TF_NewStatus()); auto output = TFE_MonitoringNewSampler0(name, buckets, status.get(), description); tensorflow::MaybeRaiseRegisteredFromTFStatus(status.get()); return output; }, py::return_value_policy::reference); m.def("TFE_MonitoringDeleteSampler0", &TFE_MonitoringDeleteSampler0, py::return_value_policy::reference); m.def("TFE_MonitoringGetCellSampler0", &TFE_MonitoringGetCellSampler0, py::return_value_policy::reference); m.def( "TFE_MonitoringNewSampler1", [](const char* name, TFE_MonitoringBuckets* buckets, const char* description, const char* label1) { tensorflow::Safe_TF_StatusPtr status = tensorflow::make_safe(TF_NewStatus()); auto output = TFE_MonitoringNewSampler1(name, buckets, status.get(), description, label1); tensorflow::MaybeRaiseRegisteredFromTFStatus(status.get()); return output; }, py::return_value_policy::reference); m.def("TFE_MonitoringDeleteSampler1", &TFE_MonitoringDeleteSampler1, py::return_value_policy::reference); m.def("TFE_MonitoringGetCellSampler1", &TFE_MonitoringGetCellSampler1, py::return_value_policy::reference); m.def( "TFE_MonitoringNewSampler2", [](const char* name, TFE_MonitoringBuckets* buckets, const char* description, const char* label1, const char* label2) { tensorflow::Safe_TF_StatusPtr status = tensorflow::make_safe(TF_NewStatus()); auto output = TFE_MonitoringNewSampler2(name, buckets, status.get(), description, label1, label2); tensorflow::MaybeRaiseRegisteredFromTFStatus(status.get()); return output; }, py::return_value_policy::reference); m.def("TFE_MonitoringDeleteSampler2", &TFE_MonitoringDeleteSampler2, py::return_value_policy::reference); m.def("TFE_MonitoringGetCellSampler2", &TFE_MonitoringGetCellSampler2, py::return_value_policy::reference); // TFE_CancellationManager Logic m.def("TFE_NewCancellationManager", &TFE_NewCancellationManager, py::return_value_policy::reference); m.def("TFE_CancellationManagerIsCancelled", &TFE_CancellationManagerIsCancelled); m.def("TFE_CancellationManagerStartCancel", &TFE_CancellationManagerStartCancel); m.def("TFE_DeleteCancellationManager", &TFE_DeleteCancellationManager, py::return_value_policy::reference); m.def("TFE_ClearScalarCache", &tensorflow::TFE_ClearScalarCache); // Util buffer helper functions m.def("TF_NewBufferFromString", &TF_NewBufferFromString, py::return_value_policy::reference); // DLPack functions m.def("TFE_ToDlpackCapsule", [](py::handle& o) { PyObject* eager_tensor_pyobject_ptr = o.ptr(); TFE_TensorHandle* thandle = EagerTensor_Handle(eager_tensor_pyobject_ptr); tensorflow::Safe_TF_StatusPtr status = tensorflow::make_safe(TF_NewStatus()); void* dlm_ptr = tensorflow::TFE_HandleToDLPack(thandle, status.get()); tensorflow::MaybeRaiseRegisteredFromTFStatus(status.get()); py::capsule capsule( dlm_ptr, tensorflow::kDlTensorCapsuleName, [](PyObject* capsule) { if (PyCapsule_IsValid(capsule, tensorflow::kDlTensorCapsuleName)) { void* dlm_rptr = PyCapsule_GetPointer(capsule, tensorflow::kDlTensorCapsuleName); if (dlm_rptr) { tensorflow::TFE_CallDLManagedTensorDeleter(dlm_rptr); PyCapsule_SetDestructor(capsule, nullptr); } } }); return capsule; }); m.def("TFE_FromDlpackCapsule", [](const py::capsule& pycapsule, const py::handle& context) { tensorflow::Safe_TF_StatusPtr status = tensorflow::make_safe(TF_NewStatus()); if (absl::string_view(pycapsule.name()) != tensorflow::kDlTensorCapsuleName) { status->status = tensorflow::errors::InvalidArgument( "DLPack tensor must be a capsule with name \"dltensor\", got \"%s\". " "Note that a DLPack tensor may be consumed at most once.", absl::string_view(pycapsule.name())); tensorflow::MaybeRaiseRegisteredFromTFStatus(status.get()); } TFE_TensorHandle* thandle = tensorflow::TFE_HandleFromDLPack( pycapsule, status.get(), tensorflow::InputTFE_Context(context)); tensorflow::MaybeRaiseRegisteredFromTFStatus(status.get()); PyCapsule_SetName(pycapsule.ptr(), "used_dltensor"); PyCapsule_SetDestructor(pycapsule.ptr(), nullptr); PyObject* pyhandle = EagerTensorFromHandle(thandle); return tensorflow::PyoOrThrow(pyhandle); }); m.def("TFE_Py_RegisterCustomDevice", [](const py::handle& context, const py::capsule& device, const char* device_name, const py::capsule& device_info) { tensorflow::Safe_TF_StatusPtr status = tensorflow::make_safe(TF_NewStatus()); if (absl::string_view(device.name()) != "TFE_CustomDevice") { status->status = tensorflow::errors::InvalidArgument( "Expected a capsule named 'TFE_CustomDevice' for the `device` " "argument, got ", absl::string_view(device.name())); tensorflow::MaybeRaiseRegisteredFromTFStatus(status.get()); } if (absl::string_view(device_info.name()) != "TFE_CustomDevice_DeviceInfo") { status->status = tensorflow::errors::InvalidArgument( "Expected a capsule named 'TFE_CustomDevice_DeviceInfo' for " "the `device_info` argument, got ", absl::string_view(device_info.name())); tensorflow::MaybeRaiseRegisteredFromTFStatus(status.get()); } // TFE_RegisterCustomDevice takes ownership PyCapsule_SetDestructor(device_info.ptr(), nullptr); TFE_RegisterCustomDevice( tensorflow::InputTFE_Context(context), *reinterpret_cast<TFE_CustomDevice*>( PyCapsule_GetPointer(device.ptr(), "TFE_CustomDevice")), device_name, PyCapsule_GetPointer(device_info.ptr(), "TFE_CustomDevice_DeviceInfo"), status.get()); tensorflow::MaybeRaiseRegisteredFromTFStatus(status.get()); }); py::class_<EagerContextThreadLocalDataWrapper>(m, "EagerContextThreadLocalData") .def(py::init<py::handle, py::handle, py::handle>(), py::arg("py_eager_context"), py::arg("is_eager"), py::arg("device_spec")) .def_property("is_eager", &EagerContextThreadLocalDataWrapper::get_is_eager, &EagerContextThreadLocalDataWrapper::set_is_eager) .def_property( "invoking_op_callbacks", &EagerContextThreadLocalDataWrapper::get_invoking_op_callbacks, &EagerContextThreadLocalDataWrapper::set_invoking_op_callbacks) .def_property("device_name", &EagerContextThreadLocalDataWrapper::get_device_name, &EagerContextThreadLocalDataWrapper::set_device_name) .def_property("scope_name", &EagerContextThreadLocalDataWrapper::get_scope_name, &EagerContextThreadLocalDataWrapper::set_scope_name) .def_property("device_spec", &EagerContextThreadLocalDataWrapper::get_device_spec, &EagerContextThreadLocalDataWrapper::set_device_spec) .def_property( "function_call_options", &EagerContextThreadLocalDataWrapper::get_function_call_options, &EagerContextThreadLocalDataWrapper::set_function_call_options) .def_property("executor", &EagerContextThreadLocalDataWrapper::get_executor, &EagerContextThreadLocalDataWrapper::set_executor) .def_property("op_callbacks", &EagerContextThreadLocalDataWrapper::get_op_callbacks, &EagerContextThreadLocalDataWrapper::set_op_callbacks); // C API Enum py::enum_<TFE_ContextDevicePlacementPolicy>( m, "TFE_ContextDevicePlacementPolicy") .value("TFE_DEVICE_PLACEMENT_EXPLICIT", TFE_DEVICE_PLACEMENT_EXPLICIT) .value("TFE_DEVICE_PLACEMENT_WARN", TFE_DEVICE_PLACEMENT_WARN) .value("TFE_DEVICE_PLACEMENT_SILENT", TFE_DEVICE_PLACEMENT_SILENT) .value("TFE_DEVICE_PLACEMENT_SILENT_FOR_INT32", TFE_DEVICE_PLACEMENT_SILENT_FOR_INT32) .export_values(); py::enum_<TF_AttrType>(m, "TF_AttrType") .value("TF_ATTR_STRING", TF_ATTR_STRING) .value("TF_ATTR_INT", TF_ATTR_INT) .value("TF_ATTR_FLOAT", TF_ATTR_FLOAT) .value("TF_ATTR_BOOL", TF_ATTR_BOOL) .value("TF_ATTR_TYPE", TF_ATTR_TYPE) .value("TF_ATTR_SHAPE", TF_ATTR_SHAPE) .value("TF_ATTR_TENSOR", TF_ATTR_TENSOR) .value("TF_ATTR_PLACEHOLDER", TF_ATTR_PLACEHOLDER) .value("TF_ATTR_FUNC", TF_ATTR_FUNC) .export_values(); };
7857
True
1
CVE-2020-15193
False
False
False
False
AV:N/AC:L/Au:S/C:N/I:P/A:P
NETWORK
LOW
SINGLE
NONE
PARTIAL
PARTIAL
5.5
CVSS:3.1/AV:N/AC:L/PR:L/UI:N/S:U/C:N/I:H/A:L
NETWORK
LOW
LOW
NONE
UNCHANGED
NONE
HIGH
LOW
7.1
HIGH
2.8
4.2
False
[{'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-rjjg-hgv6-h69v', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-rjjg-hgv6-h69v', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/22e07fb204386768e5bcbea563641ea11f96ceb8', 'name': 'https://github.com/tensorflow/tensorflow/commit/22e07fb204386768e5bcbea563641ea11f96ceb8', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-908'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:2.2.0:*:*:*:-:*:*:*', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:2.3.0:*:*:*:-:*:*:*', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': 'In Tensorflow before versions 2.2.1 and 2.3.1, the implementation of `dlpack.to_dlpack` can be made to use uninitialized memory resulting in further memory corruption. This is because the pybind11 glue code assumes that the argument is a tensor. However, there is nothing stopping users from passing in a Python object instead of a tensor. The uninitialized memory address is due to a `reinterpret_cast` Since the `PyObject` is a Python object, not a TensorFlow Tensor, the cast to `EagerTensor` fails. The issue is patched in commit 22e07fb204386768e5bcbea563641ea11f96ceb8 and is released in TensorFlow versions 2.2.1, or 2.3.1.'}]
2021-11-18T17:20Z
2020-09-25T19:15Z
Use of Uninitialized Resource
The software uses or accesses a resource that has not been initialized.
When a resource has not been properly initialized, the software may behave unexpectedly. This may lead to a crash or invalid memory access, but the consequences vary depending on the type of resource and how it is used within the software.
https://cwe.mitre.org/data/definitions/908.html
0
Mihai Maruseac
2020-09-18 19:14:17-07:00
Fix multiple vulnerabilities in `tf.experimental.dlpack.to_dlpack`. We have a use after free caused by memory coruption, a segmentation fault caused by memory corruption, several memory leaks and an undefined behavior when taking the reference of a nullptr. PiperOrigin-RevId: 332568894 Change-Id: Ife0fc05e103b35325094ae5d822ee5fdea764572
22e07fb204386768e5bcbea563641ea11f96ceb8
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tensorflow::PYBIND11_MODULE
tensorflow::PYBIND11_MODULE( _pywrap_tfe , m)
['_pywrap_tfe', 'm']
PYBIND11_MODULE(_pywrap_tfe, m) { py::class_<TFE_Executor> TFE_Executor_class(m, "TFE_Executor"); py::class_<TFE_ContextOptions> TFE_ContextOptions_class(m, "TFE_ContextOptions"); py::class_<TFE_MonitoringCounter0> TFE_MonitoringCounter0_class( m, "TFE_MonitoringCounter0"); py::class_<TFE_MonitoringCounter1> TFE_MonitoringCounter1_class( m, "TFE_MonitoringCounter1"); py::class_<TFE_MonitoringCounter2> TFE_MonitoringCounter2_class( m, "TFE_MonitoringCounter2"); py::class_<TFE_MonitoringStringGauge0> TFE_MonitoringStringGauge0_class( m, "TFE_MonitoringStringGauge0"); py::class_<TFE_MonitoringStringGauge1> TFE_MonitoringStringGauge1_class( m, "TFE_MonitoringStringGauge1"); py::class_<TFE_MonitoringStringGauge2> TFE_MonitoringStringGauge2_class( m, "TFE_MonitoringStringGauge2"); py::class_<TFE_MonitoringIntGauge0> TFE_MonitoringIntGauge0_class( m, "TFE_MonitoringIntGauge0"); py::class_<TFE_MonitoringIntGauge1> TFE_MonitoringIntGauge1_class( m, "TFE_MonitoringIntGauge1"); py::class_<TFE_MonitoringIntGauge2> TFE_MonitoringIntGauge2_class( m, "TFE_MonitoringIntGauge2"); py::class_<TFE_MonitoringBoolGauge0> TFE_MonitoringBoolGauge0_class( m, "TFE_MonitoringBoolGauge0"); py::class_<TFE_MonitoringBoolGauge1> TFE_MonitoringBoolGauge1_class( m, "TFE_MonitoringBoolGauge1"); py::class_<TFE_MonitoringBoolGauge2> TFE_MonitoringBoolGauge2_class( m, "TFE_MonitoringBoolGauge2"); py::class_<TFE_MonitoringCounterCell> TFE_MonitoringCounterCell_class( m, "TFE_MonitoringCounterCell"); py::class_<TFE_MonitoringIntGaugeCell> TFE_MonitoringIntGaugeCell_class( m, "TFE_MonitoringIntGaugeCell"); py::class_<TFE_MonitoringStringGaugeCell> TFE_MonitoringStringGaugeCell_class( m, "TFE_MonitoringStringGaugeCell"); py::class_<TFE_MonitoringBoolGaugeCell> TFE_MonitoringBoolGaugeCell_class( m, "TFE_MonitoringBoolGaugeCell"); py::class_<TFE_MonitoringSamplerCell> TFE_MonitoringSamplerCell_class( m, "TFE_MonitoringSamplerCell"); py::class_<TFE_MonitoringBuckets> TFE_MonitoringBuckets_class( m, "TFE_MonitoringBuckets"); py::class_<TFE_MonitoringSampler0> TFE_MonitoringSampler0_class( m, "TFE_MonitoringSampler0"); py::class_<TFE_MonitoringSampler1> TFE_MonitoringSampler1_class( m, "TFE_MonitoringSampler1"); py::class_<TFE_MonitoringSampler2> TFE_MonitoringSampler2_class( m, "TFE_MonitoringSampler2"); py::class_<TFE_CancellationManager> TFE_CancellationManager_class( m, "TFE_CancellationManager"); py::class_<TF_DeviceList> TF_DeviceList_class(m, "TF_DeviceList"); py::class_<TF_Function> TF_Function_class(m, "TF_Function"); m.def("TFE_Py_RegisterExceptionClass", [](const py::handle& e) { return tensorflow::PyoOrThrow(TFE_Py_RegisterExceptionClass(e.ptr())); }); m.def("TFE_Py_RegisterFallbackExceptionClass", [](const py::handle& e) { return tensorflow::PyoOrThrow( TFE_Py_RegisterFallbackExceptionClass(e.ptr())); }); m.def( "TFE_GetTotalMemoryUsage", [](py::handle& ctx, const char* device_name) { tensorflow::EagerContext* context = tensorflow::ContextFromInterface( reinterpret_cast<tensorflow::ImmediateExecutionContext*>( tensorflow::InputTFE_Context(ctx))); tensorflow::DeviceNameUtils::ParsedName input_device_name; if (!tensorflow::DeviceNameUtils::ParseFullOrLocalName( device_name, &input_device_name)) { tensorflow::ThrowValueError( absl::StrFormat("Failed parsing device name: '%s'", device_name) .c_str()); } std::vector<tensorflow::Device*> devices = context->local_device_mgr()->ListDevices(); tensorflow::Device* matched_device = nullptr; for (int device_idx = 0; device_idx < devices.size(); device_idx++) { tensorflow::Device* device = devices[device_idx]; if (tensorflow::DeviceNameUtils::AreCompatibleDevNames( input_device_name, device->parsed_name())) { if (device->device_type() == tensorflow::DEVICE_CPU) { tensorflow::ThrowValueError( "CPU does not support getting allocator information"); } if (matched_device != nullptr) { tensorflow::ThrowValueError( absl::StrFormat( "Multiple devices matching the provided string " "'%s': '%s' and " "'%s' ", device_name, matched_device->name(), device->name()) .c_str()); } matched_device = device; } } if (matched_device == nullptr) { tensorflow::ThrowValueError( absl::StrFormat("No matching devices found for '%s'", device_name) .c_str()); } tensorflow::AllocatorAttributes attrs; tensorflow::Allocator* allocator = matched_device->GetAllocator(attrs); if (absl::optional<tensorflow::AllocatorStats> stats = allocator->GetStats()) { return stats->bytes_in_use; } tensorflow::ThrowTypeError( absl::StrFormat("Allocator stats not available for device '%s'", matched_device->name()) .c_str()); }); // XLA Eager Logic m.def("TF_SetXlaEnableLazyCompilation", &TF_SetXlaEnableLazyCompilation); m.def("TF_SetTfXlaCpuGlobalJit", &TF_SetTfXlaCpuGlobalJit); m.def("TF_SetXlaAutoJitMode", &TF_SetXlaAutoJitMode); m.def("TF_SetXlaConstantFoldingDisabled", &TF_SetXlaConstantFoldingDisabled); m.def("TF_GetXlaConstantFoldingDisabled", &TF_GetXlaConstantFoldingDisabled); m.def("TF_SetXlaMinClusterSize", &TF_SetXlaMinClusterSize); m.def("TF_GetCompilerIr", &tensorflow::TFE_GetCompilerIr); // MLIR Logic m.def("TF_IsMlirBridgeEnabled", [] { return tensorflow::GetMlirCommonFlags()->tf_mlir_enable_mlir_bridge; }); m.def("TF_EnableMlirBridge", [](bool enabled) { tensorflow::GetMlirCommonFlags()->tf_mlir_enable_mlir_bridge = enabled; }); m.def("TF_EnableXlaDevices", [] { tensorflow::GetXlaDeviceFlags()->tf_xla_enable_xla_devices = true; }); // // TFE_Context Logic m.def( "TFE_NewContext", [](const TFE_ContextOptions* opts) { tensorflow::Safe_TF_StatusPtr status = tensorflow::make_safe(TF_NewStatus()); TFE_Context* context = TFE_NewContext(opts, status.get()); tensorflow::MaybeRaiseRegisteredFromTFStatus(status.get()); return tensorflow::PyoOrThrow(tensorflow::OutputTFE_Context(context)); }, py::return_value_policy::reference); m.def("TFE_DeleteContext", [](py::handle& o) { TFE_DeleteContext(tensorflow::InputTFE_Context(o)); }); m.def( "TFE_ContextListDevices", [](py::handle& o) { tensorflow::Safe_TF_StatusPtr status = tensorflow::make_safe(TF_NewStatus()); auto output = TFE_ContextListDevices(tensorflow::InputTFE_Context(o), status.get()); tensorflow::MaybeRaiseRegisteredFromTFStatus(status.get()); return output; }, py::return_value_policy::reference); m.def("TFE_HostAddressSpace", [](py::handle& o, TF_Buffer& buf) { TFE_HostAddressSpace(tensorflow::InputTFE_Context(o), &buf); }); m.def("TFE_ContextAddFunction", [](py::handle& ctx, TF_Function* func) { tensorflow::Safe_TF_StatusPtr status = tensorflow::make_safe(TF_NewStatus()); TFE_ContextAddFunction(tensorflow::InputTFE_Context(ctx), func, status.get()); tensorflow::MaybeRaiseRegisteredFromTFStatus(status.get()); }); m.def("TFE_ContextAddFunctionDef", [](py::handle& ctx, const char* serialized_function_def, size_t size) { tensorflow::Safe_TF_StatusPtr status = tensorflow::make_safe(TF_NewStatus()); TFE_ContextAddFunctionDef(tensorflow::InputTFE_Context(ctx), serialized_function_def, size, status.get()); tensorflow::MaybeRaiseRegisteredFromTFStatus(status.get()); }); m.def("TFE_ContextGetFunctionDef", [](py::handle& ctx, const char* function_name, TF_Buffer& buf) { tensorflow::Safe_TF_StatusPtr status = tensorflow::make_safe(TF_NewStatus()); TFE_ContextGetFunctionDef(tensorflow::InputTFE_Context(ctx), function_name, &buf, status.get()); tensorflow::MaybeRaiseRegisteredFromTFStatus(status.get()); }); m.def("TFE_ContextRemoveFunction", [](py::handle& ctx, const char* name) { tensorflow::Safe_TF_StatusPtr status = tensorflow::make_safe(TF_NewStatus()); TFE_ContextRemoveFunction(tensorflow::InputTFE_Context(ctx), name, status.get()); tensorflow::MaybeRaiseRegisteredFromTFStatus(status.get()); }); m.def("TFE_ContextHasFunction", [](py::handle& ctx, const char* name) { tensorflow::Safe_TF_StatusPtr status = tensorflow::make_safe(TF_NewStatus()); auto output = TFE_ContextHasFunction(tensorflow::InputTFE_Context(ctx), name); tensorflow::MaybeRaiseRegisteredFromTFStatus(status.get()); return output; }); m.def("TFE_ContextEnableRunMetadata", [](py::handle& ctx) { TFE_ContextEnableRunMetadata(tensorflow::InputTFE_Context(ctx)); }); m.def("TFE_ContextDisableRunMetadata", [](py::handle& ctx) { TFE_ContextEnableRunMetadata(tensorflow::InputTFE_Context(ctx)); }); m.def("TFE_ContextEnableGraphCollection", [](py::handle& ctx) { TFE_ContextEnableGraphCollection(tensorflow::InputTFE_Context(ctx)); }); m.def("TFE_ContextDisableGraphCollection", [](py::handle& ctx) { TFE_ContextDisableGraphCollection(tensorflow::InputTFE_Context(ctx)); }); m.def("TFE_ContextExportRunMetadata", [](py::handle& ctx, TF_Buffer& buf) { tensorflow::Safe_TF_StatusPtr status = tensorflow::make_safe(TF_NewStatus()); TFE_ContextExportRunMetadata(tensorflow::InputTFE_Context(ctx), &buf, status.get()); tensorflow::MaybeRaiseRegisteredFromTFStatus(status.get()); }); m.def("TFE_ContextClearCaches", [](py::handle& o) { TFE_ContextClearCaches(tensorflow::InputTFE_Context(o)); }); m.def("TFE_GetContextId", [](py::handle& ctx) { return TFE_GetContextId(tensorflow::InputTFE_Context(ctx)); }); m.def("TFE_ContextGetDevicePlacementPolicy", [](py::handle& ctx) { return TFE_ContextGetDevicePlacementPolicy( tensorflow::InputTFE_Context(ctx)); }); m.def("TFE_ContextSetThreadLocalDevicePlacementPolicy", [](py::handle& ctx, TFE_ContextDevicePlacementPolicy policy) { TFE_ContextSetThreadLocalDevicePlacementPolicy( tensorflow::InputTFE_Context(ctx), policy); }); m.def("TFE_ContextSetServerDef", [](py::handle& ctx, int keep_alive_secs, py::bytes proto) { tensorflow::Safe_TF_StatusPtr status = tensorflow::make_safe(TF_NewStatus()); tensorflow::Safe_TF_BufferPtr buf = tensorflow::make_safe(tensorflow::ProtoStringToTFBuffer(proto.ptr())); TFE_ContextSetServerDef(tensorflow::InputTFE_Context(ctx), keep_alive_secs, buf.get()->data, buf.get()->length, status.get()); tensorflow::MaybeRaiseRegisteredFromTFStatus(status.get()); }); m.def("TFE_ContextUpdateServerDef", [](py::handle& ctx, int keep_alive_secs, py::bytes proto) { tensorflow::Safe_TF_StatusPtr status = tensorflow::make_safe(TF_NewStatus()); tensorflow::Safe_TF_BufferPtr buf = tensorflow::make_safe(tensorflow::ProtoStringToTFBuffer(proto.ptr())); Py_BEGIN_ALLOW_THREADS; TFE_ContextUpdateServerDef(tensorflow::InputTFE_Context(ctx), keep_alive_secs, buf.get()->data, buf.get()->length, status.get()); Py_END_ALLOW_THREADS; tensorflow::MaybeRaiseRegisteredFromTFStatus(status.get()); }); m.def("TFE_ContextCheckAlive", [](py::handle& ctx, const char* worker_name) { tensorflow::Safe_TF_StatusPtr status = tensorflow::make_safe(TF_NewStatus()); bool output = TFE_ContextCheckAlive(tensorflow::InputTFE_Context(ctx), worker_name, status.get()); tensorflow::MaybeRaiseRegisteredFromTFStatus(status.get()); return output; }); m.def("TFE_ContextSyncExecutors", [](py::handle& ctx) { tensorflow::Safe_TF_StatusPtr status = tensorflow::make_safe(TF_NewStatus()); TFE_ContextAsyncWait(tensorflow::InputTFE_Context(ctx), status.get()); tensorflow::MaybeRaiseRegisteredFromTFStatus(status.get()); }); m.def("TFE_ContextClearExecutors", [](py::handle& ctx) { tensorflow::Safe_TF_StatusPtr status = tensorflow::make_safe(TF_NewStatus()); TFE_ContextAsyncWait(tensorflow::InputTFE_Context(ctx), status.get()); // NOTE: different from TFE_ContextSyncExecutors that raises potential // errors, deliberately ignore executor statuses in cleanup. }); m.def("TFE_ContextSetSoftDevicePlacement", [](py::handle& ctx, bool enable) { tensorflow::Safe_TF_StatusPtr status = tensorflow::make_safe(TF_NewStatus()); TFE_ContextSetSoftDevicePlacement(tensorflow::InputTFE_Context(ctx), enable, status.get()); }); m.def("TFE_ContextSetLogDevicePlacement", [](py::handle& ctx, bool enable) { tensorflow::Safe_TF_StatusPtr status = tensorflow::make_safe(TF_NewStatus()); TFE_ContextSetSoftDevicePlacement(tensorflow::InputTFE_Context(ctx), enable, status.get()); }); // TFE_Executor logic m.def( "TFE_NewExecutor", [](const bool is_async) { TFE_Executor* exc = TFE_NewExecutor(is_async); return exc; }, py::return_value_policy::reference); m.def("TFE_DeleteExecutor", &TFE_DeleteExecutor); m.def("TFE_ExecutorIsAsync", &TFE_ExecutorIsAsync); m.def("TFE_ExecutorWaitForAllPendingNodes", [](TFE_Executor& exc) { tensorflow::Safe_TF_StatusPtr status = tensorflow::make_safe(TF_NewStatus()); // NOTE: release Python GIL for pending PyFunc ops to be executed properly. Py_BEGIN_ALLOW_THREADS; TFE_ExecutorWaitForAllPendingNodes(&exc, status.get()); Py_END_ALLOW_THREADS; tensorflow::MaybeRaiseRegisteredFromTFStatus(status.get()); }); m.def("TFE_ExecutorClearError", &TFE_ExecutorClearError); m.def("TFE_ContextSetExecutorForThread", [](py::handle& ctx, TFE_Executor& exc) { TFE_ContextSetExecutorForThread(tensorflow::InputTFE_Context(ctx), &exc); }); m.def( "TFE_ContextGetExecutorForThread", [](py::handle& o) { return TFE_ContextGetExecutorForThread(tensorflow::InputTFE_Context(o)); }, py::return_value_policy::reference); m.def("TFE_OpNameGetAttrType", [](py::handle& ctx, const char* op_or_function_name, const char* attr_name) { int temp = 0; unsigned char* is_list = reinterpret_cast<unsigned char*>(&temp); tensorflow::Safe_TF_StatusPtr status = tensorflow::make_safe(TF_NewStatus()); auto output = TFE_OpNameGetAttrType(tensorflow::InputTFE_Context(ctx), op_or_function_name, attr_name, is_list, status.get()); tensorflow::MaybeRaiseRegisteredFromTFStatus(status.get()); #if PY_MAJOR_VERSION < 3 PyObject* output_pyo = PyInt_FromLong(output); #else PyObject* output_pyo = PyLong_FromLong(output); #endif if (*is_list == 1) { PyObject* list = PyList_New(1); PyList_SetItem(list, 0, output_pyo); return tensorflow::PyoOrThrow(list); } return tensorflow::PyoOrThrow(output_pyo); }); m.def("TFE_Py_InitEagerTensor", [](const py::handle& o) { return tensorflow::PyoOrThrow(TFE_Py_InitEagerTensor(o.ptr())); }); m.def("TFE_Py_PackEagerTensors", [](const py::handle& context, const py::handle& handles) { return tensorflow::TFE_Py_PackEagerTensors_wrapper(context, handles); }); m.def("TFE_Py_SetEagerTensorProfiler", &TFE_Py_SetEagerTensorProfiler); m.def("TFE_Py_RegisterJVPFunction", [](const py::handle& o) { return tensorflow::PyoOrThrow(TFE_Py_RegisterJVPFunction(o.ptr())); }); m.def("TFE_Py_RegisterGradientFunction", [](const py::handle& o) { return tensorflow::PyoOrThrow(TFE_Py_RegisterGradientFunction(o.ptr())); }); m.def("TFE_Py_Execute", [](const py::handle& context, const char* device_name, const char* op_name, const py::handle& inputs, const py::handle& attrs, const py::handle& num_outputs) { return tensorflow::TFE_Py_ExecuteCancelable_wrapper( context, device_name, op_name, inputs, attrs.ptr(), nullptr, num_outputs); }); m.def( "TFE_Py_ExecuteCancelable", [](const py::handle& context, const char* device_name, const char* op_name, const py::handle& inputs, const py::handle& attrs, TFE_CancellationManager& cancellation_manager, const py::handle& num_outputs) { return tensorflow::TFE_Py_ExecuteCancelable_wrapper( context, device_name, op_name, inputs, attrs.ptr(), &cancellation_manager, num_outputs); }); m.def("TFE_Py_FastPathExecute", [](const py::args args) { // TFE_Py_FastPathExecute requires error checking prior to returning. return tensorflow::PyoOrThrow(TFE_Py_FastPathExecute_C(args.ptr())); }); m.def("TFE_Py_RecordGradient", [](const py::handle& op_name, const py::handle& inputs, const py::handle& attrs, const py::handle& results, const py::handle& forward_pass_name_scope) { return tensorflow::PyoOrThrow(TFE_Py_RecordGradient( op_name.ptr(), inputs.ptr(), attrs.ptr(), results.ptr(), forward_pass_name_scope.ptr())); }); m.def("TFE_Py_UID", []() { return tensorflow::PyoOrThrow(TFE_Py_UID()); }); // TFE_Py_Tape Logic m.def("TFE_Py_TapeSetNew", [](const py::handle& persistent, const py::handle& watch_accessed_variables) { return tensorflow::PyoOrThrow( TFE_Py_TapeSetNew(persistent.ptr(), watch_accessed_variables.ptr())); }); m.def("TFE_Py_TapeSetAdd", [](const py::handle& tape) { TFE_Py_TapeSetAdd(tape.ptr()); }); m.def("TFE_Py_TapeSetRemove", [](const py::handle& tape) { TFE_Py_TapeSetRemove(tape.ptr()); }); m.def("TFE_Py_TapeSetStopOnThread", &TFE_Py_TapeSetStopOnThread); m.def("TFE_Py_TapeSetRestartOnThread", &TFE_Py_TapeSetRestartOnThread); m.def("TFE_Py_TapeSetIsStopped", []() { return tensorflow::PyoOrThrow(TFE_Py_TapeSetIsStopped()); }); m.def("TFE_Py_TapeSetIsEmpty", []() { return tensorflow::PyoOrThrow(TFE_Py_TapeSetIsEmpty()); }); m.def("TFE_Py_TapeSetShouldRecordBackprop", [](const py::handle& tensors) { return tensorflow::PyoOrThrow( TFE_Py_TapeSetShouldRecordBackprop(tensors.ptr())); }); m.def("TFE_Py_TapeSetPossibleGradientTypes", [](const py::handle& tensors) { return tensorflow::PyoOrThrow( TFE_Py_TapeSetPossibleGradientTypes(tensors.ptr())); }); m.def("TFE_Py_TapeSetDeleteTrace", &TFE_Py_TapeSetDeleteTrace); m.def("TFE_Py_TapeSetRecordOperation", [](const py::handle& op_type, const py::handle& output_tensors, const py::handle& input_tensors, const py::handle& backward_function, const py::handle& forward_function) { return tensorflow::PyoOrThrow(TFE_Py_TapeSetRecordOperation( op_type.ptr(), output_tensors.ptr(), input_tensors.ptr(), backward_function.ptr(), forward_function.ptr())); }); m.def( "TFE_Py_TapeSetRecordOperationBackprop", [](const py::handle& op_type, const py::handle& output_tensors, const py::handle& input_tensors, const py::handle& backward_function) { return tensorflow::PyoOrThrow(TFE_Py_TapeSetRecordOperationBackprop( op_type.ptr(), output_tensors.ptr(), input_tensors.ptr(), backward_function.ptr())); }); m.def( "TFE_Py_TapeSetRecordOperationForwardprop", [](const py::handle& op_type, const py::handle& output_tensors, const py::handle& input_tensors, const py::handle& backward_function, const py::handle& forwardprop_output_indices) { return tensorflow::PyoOrThrow(TFE_Py_TapeSetRecordOperationForwardprop( op_type.ptr(), output_tensors.ptr(), input_tensors.ptr(), backward_function.ptr(), forwardprop_output_indices.ptr())); }); m.def("TFE_Py_TapeGradient", [](const py::handle& tape, const py::handle& target, const py::handle& sources, const py::handle& output_gradients, const py::handle& sources_raw, const py::handle& unconnected_gradients) { tensorflow::Safe_TF_StatusPtr status = tensorflow::make_safe(TF_NewStatus()); PyObject* output = TFE_Py_TapeGradient( tape.ptr(), target.ptr(), sources.ptr(), output_gradients.ptr(), sources_raw.ptr(), unconnected_gradients.ptr(), status.get()); tensorflow::MaybeRaiseRegisteredFromTFStatus(status.get()); return tensorflow::PyoOrThrow(output); }); m.def("TFE_Py_TapeVariableAccessed", [](const py::handle& variable) { TFE_Py_TapeVariableAccessed(variable.ptr()); }); m.def("TFE_Py_TapeWatch", [](const py::handle& tape, const py::handle& tensor) { TFE_Py_TapeWatch(tape.ptr(), tensor.ptr()); }); m.def("TFE_Py_TapeWatchVariable", [](const py::handle& tape, const py::handle& variable) { TFE_Py_TapeWatchVariable(tape.ptr(), variable.ptr()); }); m.def("TFE_Py_TapeWatchedVariables", [](const py::handle& tape) { return tensorflow::PyoOrThrow(TFE_Py_TapeWatchedVariables(tape.ptr())); }); // TFE_Py_VariableWatcher logic. m.def("TFE_Py_VariableWatcherNew", []() { return tensorflow::PyoOrThrow(TFE_Py_VariableWatcherNew()); }); m.def("TFE_Py_VariableWatcherRemove", [](const py::handle& variable_watcher) { TFE_Py_VariableWatcherRemove(variable_watcher.ptr()); }); m.def("TFE_Py_VariableWatcherVariableAccessed", [](const py::handle& variable) { TFE_Py_VariableWatcherVariableAccessed(variable.ptr()); }); m.def("TFE_Py_VariableWatcherWatchedVariables", [](const py::handle& variable_watcher) { return tensorflow::PyoOrThrow( TFE_Py_VariableWatcherWatchedVariables(variable_watcher.ptr())); }); // TFE_Py_ForwardAccumulator logic. m.def("TFE_Py_ForwardAccumulatorNew", [](bool use_batch) { return tensorflow::PyoOrThrow(TFE_Py_ForwardAccumulatorNew(use_batch)); }); m.def("TFE_Py_ForwardAccumulatorSetAdd", [](const py::handle& accumulator) { return tensorflow::PyoOrThrow( TFE_Py_ForwardAccumulatorSetAdd(accumulator.ptr())); }); m.def("TFE_Py_ForwardAccumulatorSetRemove", [](const py::handle& accumulator) { TFE_Py_ForwardAccumulatorSetRemove(accumulator.ptr()); }); m.def("TFE_Py_ForwardAccumulatorWatch", [](const py::handle& accumulator, const py::handle& tensor, const py::handle& tangent) { TFE_Py_ForwardAccumulatorWatch(accumulator.ptr(), tensor.ptr(), tangent.ptr()); }); m.def("TFE_Py_ForwardAccumulatorJVP", [](const py::handle& accumulator, const py::handle& tensor) { return tensorflow::PyoOrThrow( TFE_Py_ForwardAccumulatorJVP(accumulator.ptr(), tensor.ptr())); }); m.def("TFE_Py_ForwardAccumulatorPushState", []() { return tensorflow::PyoOrThrow(TFE_Py_ForwardAccumulatorPushState()); }); m.def("TFE_Py_ForwardAccumulatorPopState", []() { return tensorflow::PyoOrThrow(TFE_Py_ForwardAccumulatorPopState()); }); m.def("TFE_Py_PackJVPs", [](const py::handle& tensors) { return tensorflow::PyoOrThrow(TFE_Py_PackJVPs(tensors.ptr())); }); // TFE_ContextOptions Logic m.def("TFE_NewContextOptions", &TFE_NewContextOptions, py::return_value_policy::reference); m.def("TFE_ContextOptionsSetConfig", [](TFE_ContextOptions* options, py::bytes proto) { tensorflow::Safe_TF_StatusPtr status = tensorflow::make_safe(TF_NewStatus()); tensorflow::Safe_TF_BufferPtr buf = tensorflow::make_safe(tensorflow::ProtoStringToTFBuffer(proto.ptr())); TFE_ContextOptionsSetConfig(options, buf.get()->data, buf.get()->length, status.get()); tensorflow::MaybeRaiseRegisteredFromTFStatus(status.get()); }); m.def("TFE_ContextOptionsSetDevicePlacementPolicy", &TFE_ContextOptionsSetDevicePlacementPolicy); m.def("TFE_ContextOptionsSetLazyRemoteInputsCopy", &TFE_ContextOptionsSetLazyRemoteInputsCopy); m.def("TFE_ContextOptionsSetTfrt", &TFE_ContextOptionsSetTfrt); m.def("TFE_ContextOptionsSetAsync", &TFE_ContextOptionsSetAsync); m.def("TFE_DeleteContextOptions", &TFE_DeleteContextOptions, py::return_value_policy::reference); // TFE_Py_TensorShape Logic m.def("TFE_Py_TensorShapeSlice", [](const py::handle& tensors, int slice_dim) { return tensorflow::PyoOrThrow( TFE_Py_TensorShapeSlice(tensors.ptr(), slice_dim)); }); m.def("TFE_Py_TensorShapeOnDevice", [](const py::handle& tensors, int slice_dim) { return tensorflow::PyoOrThrow(TFE_Py_TensorShapeOnDevice(tensors.ptr())); }); m.def("TFE_Py_EnableInteractivePythonLogging", &TFE_Py_EnableInteractivePythonLogging); // Additional Context Logic m.def("TFE_Py_SetEagerContext", [](const py::handle& o) { return tensorflow::PyoOrThrow(TFE_Py_SetEagerContext(o.ptr())); }); m.def("TFE_ContextStartStep", [](py::handle& o) { TFE_ContextStartStep(tensorflow::InputTFE_Context(o.ptr())); }); m.def("TFE_ContextEndStep", [](py::handle& o) { TFE_ContextEndStep(tensorflow::InputTFE_Context(o.ptr())); }); m.def("TFE_Py_RegisterVSpace", [](const py::handle& o) { return tensorflow::PyoOrThrow(TFE_Py_RegisterVSpace(o.ptr())); }); m.def("TFE_Py_EncodeArg", [](const py::handle& o, bool include_tensor_ranks_only) { return tensorflow::PyoOrThrow( TFE_Py_EncodeArg(o.ptr(), include_tensor_ranks_only)); }); m.def("TFE_EnableCollectiveOps", [](const py::handle& ctx, py::bytes proto) { tensorflow::Safe_TF_StatusPtr status = tensorflow::make_safe(TF_NewStatus()); tensorflow::Safe_TF_BufferPtr buf = tensorflow::make_safe(tensorflow::ProtoStringToTFBuffer(proto.ptr())); TFE_EnableCollectiveOps(tensorflow::InputTFE_Context(ctx), buf.get()->data, buf.get()->length, status.get()); tensorflow::MaybeRaiseRegisteredFromTFStatus(status.get()); }); m.def("TFE_AbortCollectiveOps", [](const py::handle& ctx, int code, const char* message) { tensorflow::Safe_TF_StatusPtr status = tensorflow::make_safe(TF_NewStatus()); TF_SetStatus(status.get(), static_cast<TF_Code>(code), message); TFE_AbortCollectiveOps(tensorflow::InputTFE_Context(ctx), status.get()); }); m.def("TFE_CollectiveOpsCheckPeerHealth", [](const py::handle& ctx, const char* task) { tensorflow::Safe_TF_StatusPtr status = tensorflow::make_safe(TF_NewStatus()); TFE_CollectiveOpsCheckPeerHealth(tensorflow::InputTFE_Context(ctx), task, status.get()); tensorflow::MaybeRaiseRegisteredFromTFStatus(status.get()); }); m.def("TF_ListPhysicalDevices", &tensorflow::TF_ListPhysicalDevices); m.def("TF_GetDeviceDetails", &tensorflow::TF_GetDeviceDetails); m.def("TF_DeleteDeviceList", &TF_DeleteDeviceList, py::return_value_policy::reference); m.def("TF_DeviceListCount", &TF_DeviceListCount); m.def("TF_DeviceListName", [](const TF_DeviceList* list, int index) { tensorflow::Safe_TF_StatusPtr status = tensorflow::make_safe(TF_NewStatus()); auto output = TF_DeviceListName(list, index, status.get()); tensorflow::MaybeRaiseRegisteredFromTFStatus(status.get()); return output; }); m.def("TF_DeviceListType", [](const TF_DeviceList* list, int index) { tensorflow::Safe_TF_StatusPtr status = tensorflow::make_safe(TF_NewStatus()); auto output = TF_DeviceListType(list, index, status.get()); tensorflow::MaybeRaiseRegisteredFromTFStatus(status.get()); return output; }); m.def("TF_PickUnusedPortOrDie", &TF_PickUnusedPortOrDie); // TFE_MonitoringCounter Logic m.def("TFE_MonitoringCounterCellIncrementBy", &TFE_MonitoringCounterCellIncrementBy); m.def("TFE_MonitoringCounterCellValue", &TFE_MonitoringCounterCellValue); m.def( "TFE_MonitoringNewCounter0", [](const char* name, const char* description) { tensorflow::Safe_TF_StatusPtr status = tensorflow::make_safe(TF_NewStatus()); auto output = TFE_MonitoringNewCounter0(name, status.get(), description); tensorflow::MaybeRaiseRegisteredFromTFStatus(status.get()); return output; }, py::return_value_policy::reference); m.def("TFE_MonitoringDeleteCounter0", &TFE_MonitoringDeleteCounter0, py::return_value_policy::reference); m.def("TFE_MonitoringGetCellCounter0", &TFE_MonitoringGetCellCounter0, py::return_value_policy::reference); m.def( "TFE_MonitoringNewCounter1", [](const char* name, const char* description, const char* label1) { tensorflow::Safe_TF_StatusPtr status = tensorflow::make_safe(TF_NewStatus()); auto output = TFE_MonitoringNewCounter1(name, status.get(), description, label1); tensorflow::MaybeRaiseRegisteredFromTFStatus(status.get()); return output; }, py::return_value_policy::reference); m.def("TFE_MonitoringDeleteCounter1", &TFE_MonitoringDeleteCounter1, py::return_value_policy::reference); m.def("TFE_MonitoringGetCellCounter1", &TFE_MonitoringGetCellCounter1, py::return_value_policy::reference); m.def( "TFE_MonitoringNewCounter2", [](const char* name, const char* description, const char* label1, const char* label2) { tensorflow::Safe_TF_StatusPtr status = tensorflow::make_safe(TF_NewStatus()); auto output = TFE_MonitoringNewCounter2(name, status.get(), description, label1, label2); tensorflow::MaybeRaiseRegisteredFromTFStatus(status.get()); return output; }, py::return_value_policy::reference); m.def("TFE_MonitoringDeleteCounter2", &TFE_MonitoringDeleteCounter2, py::return_value_policy::reference); m.def("TFE_MonitoringGetCellCounter2", &TFE_MonitoringGetCellCounter2, py::return_value_policy::reference); // TFE_MonitoringIntGauge Logic m.def("TFE_MonitoringIntGaugeCellSet", &TFE_MonitoringIntGaugeCellSet); m.def("TFE_MonitoringIntGaugeCellValue", &TFE_MonitoringIntGaugeCellValue); m.def( "TFE_MonitoringNewIntGauge0", [](const char* name, const char* description) { tensorflow::Safe_TF_StatusPtr status = tensorflow::make_safe(TF_NewStatus()); auto output = TFE_MonitoringNewIntGauge0(name, status.get(), description); tensorflow::MaybeRaiseRegisteredFromTFStatus(status.get()); return output; }, py::return_value_policy::reference); m.def("TFE_MonitoringDeleteIntGauge0", &TFE_MonitoringDeleteIntGauge0, py::return_value_policy::reference); m.def("TFE_MonitoringGetCellIntGauge0", &TFE_MonitoringGetCellIntGauge0, py::return_value_policy::reference); m.def( "TFE_MonitoringNewIntGauge1", [](const char* name, const char* description, const char* label1) { tensorflow::Safe_TF_StatusPtr status = tensorflow::make_safe(TF_NewStatus()); auto output = TFE_MonitoringNewIntGauge1(name, status.get(), description, label1); tensorflow::MaybeRaiseRegisteredFromTFStatus(status.get()); return output; }, py::return_value_policy::reference); m.def("TFE_MonitoringDeleteIntGauge1", &TFE_MonitoringDeleteIntGauge1, py::return_value_policy::reference); m.def("TFE_MonitoringGetCellIntGauge1", &TFE_MonitoringGetCellIntGauge1, py::return_value_policy::reference); m.def( "TFE_MonitoringNewIntGauge2", [](const char* name, const char* description, const char* label1, const char* label2) { tensorflow::Safe_TF_StatusPtr status = tensorflow::make_safe(TF_NewStatus()); auto output = TFE_MonitoringNewIntGauge2(name, status.get(), description, label1, label2); tensorflow::MaybeRaiseRegisteredFromTFStatus(status.get()); return output; }, py::return_value_policy::reference); m.def("TFE_MonitoringDeleteIntGauge2", &TFE_MonitoringDeleteIntGauge2, py::return_value_policy::reference); m.def("TFE_MonitoringGetCellIntGauge2", &TFE_MonitoringGetCellIntGauge2, py::return_value_policy::reference); m.def("TFE_MonitoringStringGaugeCellSet", &TFE_MonitoringStringGaugeCellSet); m.def("TFE_MonitoringStringGaugeCellValue", &TFE_MonitoringStringGaugeCellValue); m.def( "TFE_MonitoringNewStringGauge0", [](const char* name, const char* description) { tensorflow::Safe_TF_StatusPtr status = tensorflow::make_safe(TF_NewStatus()); auto output = TFE_MonitoringNewStringGauge0(name, status.get(), description); tensorflow::MaybeRaiseRegisteredFromTFStatus(status.get()); return output; }, py::return_value_policy::reference); // TFE_MonitoringStringGauge Logic m.def("TFE_MonitoringDeleteStringGauge0", &TFE_MonitoringDeleteStringGauge0); m.def("TFE_MonitoringGetCellStringGauge0", &TFE_MonitoringGetCellStringGauge0, py::return_value_policy::reference); m.def( "TFE_MonitoringNewStringGauge1", [](const char* name, const char* description, const char* label1) { tensorflow::Safe_TF_StatusPtr status = tensorflow::make_safe(TF_NewStatus()); auto output = TFE_MonitoringNewStringGauge1(name, status.get(), description, label1); tensorflow::MaybeRaiseRegisteredFromTFStatus(status.get()); return output; }, py::return_value_policy::reference); m.def("TFE_MonitoringDeleteStringGauge1", &TFE_MonitoringDeleteStringGauge1); m.def("TFE_MonitoringGetCellStringGauge1", &TFE_MonitoringGetCellStringGauge1, py::return_value_policy::reference); m.def( "TFE_MonitoringNewStringGauge2", [](const char* name, const char* description, const char* label1, const char* label2) { tensorflow::Safe_TF_StatusPtr status = tensorflow::make_safe(TF_NewStatus()); auto output = TFE_MonitoringNewStringGauge2( name, status.get(), description, label1, label2); tensorflow::MaybeRaiseRegisteredFromTFStatus(status.get()); return output; }, py::return_value_policy::reference); m.def("TFE_MonitoringDeleteStringGauge2", &TFE_MonitoringDeleteStringGauge2); m.def("TFE_MonitoringGetCellStringGauge2", &TFE_MonitoringGetCellStringGauge2, py::return_value_policy::reference); // TFE_MonitoringBoolGauge Logic m.def("TFE_MonitoringBoolGaugeCellSet", &TFE_MonitoringBoolGaugeCellSet); m.def("TFE_MonitoringBoolGaugeCellValue", &TFE_MonitoringBoolGaugeCellValue); m.def( "TFE_MonitoringNewBoolGauge0", [](const char* name, const char* description) { tensorflow::Safe_TF_StatusPtr status = tensorflow::make_safe(TF_NewStatus()); auto output = TFE_MonitoringNewBoolGauge0(name, status.get(), description); tensorflow::MaybeRaiseRegisteredFromTFStatus(status.get()); return output; }, py::return_value_policy::reference); m.def("TFE_MonitoringDeleteBoolGauge0", &TFE_MonitoringDeleteBoolGauge0, py::return_value_policy::reference); m.def("TFE_MonitoringGetCellBoolGauge0", &TFE_MonitoringGetCellBoolGauge0, py::return_value_policy::reference); m.def( "TFE_MonitoringNewBoolGauge1", [](const char* name, const char* description, const char* label1) { tensorflow::Safe_TF_StatusPtr status = tensorflow::make_safe(TF_NewStatus()); auto output = TFE_MonitoringNewBoolGauge1(name, status.get(), description, label1); tensorflow::MaybeRaiseRegisteredFromTFStatus(status.get()); return output; }, py::return_value_policy::reference); m.def("TFE_MonitoringDeleteBoolGauge1", &TFE_MonitoringDeleteBoolGauge1, py::return_value_policy::reference); m.def("TFE_MonitoringGetCellBoolGauge1", &TFE_MonitoringGetCellBoolGauge1, py::return_value_policy::reference); m.def( "TFE_MonitoringNewBoolGauge2", [](const char* name, const char* description, const char* label1, const char* label2) { tensorflow::Safe_TF_StatusPtr status = tensorflow::make_safe(TF_NewStatus()); auto output = TFE_MonitoringNewBoolGauge2(name, status.get(), description, label1, label2); tensorflow::MaybeRaiseRegisteredFromTFStatus(status.get()); return output; }, py::return_value_policy::reference); m.def("TFE_MonitoringDeleteBoolGauge2", &TFE_MonitoringDeleteBoolGauge2, py::return_value_policy::reference); m.def("TFE_MonitoringGetCellBoolGauge2", &TFE_MonitoringGetCellBoolGauge2, py::return_value_policy::reference); // TFE_MonitoringSampler Logic m.def("TFE_MonitoringSamplerCellAdd", &TFE_MonitoringSamplerCellAdd); m.def("TFE_MonitoringSamplerCellValue", &TFE_MonitoringSamplerCellValue); m.def("TFE_MonitoringNewExponentialBuckets", &TFE_MonitoringNewExponentialBuckets, py::return_value_policy::reference); m.def("TFE_MonitoringDeleteBuckets", &TFE_MonitoringDeleteBuckets, py::return_value_policy::reference); m.def( "TFE_MonitoringNewSampler0", [](const char* name, TFE_MonitoringBuckets* buckets, const char* description) { tensorflow::Safe_TF_StatusPtr status = tensorflow::make_safe(TF_NewStatus()); auto output = TFE_MonitoringNewSampler0(name, buckets, status.get(), description); tensorflow::MaybeRaiseRegisteredFromTFStatus(status.get()); return output; }, py::return_value_policy::reference); m.def("TFE_MonitoringDeleteSampler0", &TFE_MonitoringDeleteSampler0, py::return_value_policy::reference); m.def("TFE_MonitoringGetCellSampler0", &TFE_MonitoringGetCellSampler0, py::return_value_policy::reference); m.def( "TFE_MonitoringNewSampler1", [](const char* name, TFE_MonitoringBuckets* buckets, const char* description, const char* label1) { tensorflow::Safe_TF_StatusPtr status = tensorflow::make_safe(TF_NewStatus()); auto output = TFE_MonitoringNewSampler1(name, buckets, status.get(), description, label1); tensorflow::MaybeRaiseRegisteredFromTFStatus(status.get()); return output; }, py::return_value_policy::reference); m.def("TFE_MonitoringDeleteSampler1", &TFE_MonitoringDeleteSampler1, py::return_value_policy::reference); m.def("TFE_MonitoringGetCellSampler1", &TFE_MonitoringGetCellSampler1, py::return_value_policy::reference); m.def( "TFE_MonitoringNewSampler2", [](const char* name, TFE_MonitoringBuckets* buckets, const char* description, const char* label1, const char* label2) { tensorflow::Safe_TF_StatusPtr status = tensorflow::make_safe(TF_NewStatus()); auto output = TFE_MonitoringNewSampler2(name, buckets, status.get(), description, label1, label2); tensorflow::MaybeRaiseRegisteredFromTFStatus(status.get()); return output; }, py::return_value_policy::reference); m.def("TFE_MonitoringDeleteSampler2", &TFE_MonitoringDeleteSampler2, py::return_value_policy::reference); m.def("TFE_MonitoringGetCellSampler2", &TFE_MonitoringGetCellSampler2, py::return_value_policy::reference); // TFE_CancellationManager Logic m.def("TFE_NewCancellationManager", &TFE_NewCancellationManager, py::return_value_policy::reference); m.def("TFE_CancellationManagerIsCancelled", &TFE_CancellationManagerIsCancelled); m.def("TFE_CancellationManagerStartCancel", &TFE_CancellationManagerStartCancel); m.def("TFE_DeleteCancellationManager", &TFE_DeleteCancellationManager, py::return_value_policy::reference); m.def("TFE_ClearScalarCache", &tensorflow::TFE_ClearScalarCache); // Util buffer helper functions m.def("TF_NewBufferFromString", &TF_NewBufferFromString, py::return_value_policy::reference); // DLPack functions m.def("TFE_ToDlpackCapsule", [](py::handle& o) { PyObject* eager_tensor_pyobject_ptr = o.ptr(); TFE_TensorHandle* thandle = EagerTensor_Handle(eager_tensor_pyobject_ptr); tensorflow::Safe_TF_StatusPtr status = tensorflow::make_safe(TF_NewStatus()); void* dlm_ptr = tensorflow::TFE_HandleToDLPack(thandle, status.get()); tensorflow::MaybeRaiseRegisteredFromTFStatus(status.get()); py::capsule capsule( dlm_ptr, tensorflow::kDlTensorCapsuleName, [](PyObject* capsule) { if (PyCapsule_IsValid(capsule, tensorflow::kDlTensorCapsuleName)) { void* dlm_rptr = PyCapsule_GetPointer(capsule, tensorflow::kDlTensorCapsuleName); if (dlm_rptr) { tensorflow::TFE_CallDLManagedTensorDeleter(dlm_rptr); PyCapsule_SetDestructor(capsule, nullptr); } } }); return capsule; }); m.def("TFE_FromDlpackCapsule", [](const py::capsule& pycapsule, const py::handle& context) { tensorflow::Safe_TF_StatusPtr status = tensorflow::make_safe(TF_NewStatus()); if (absl::string_view(pycapsule.name()) != tensorflow::kDlTensorCapsuleName) { status->status = tensorflow::errors::InvalidArgument( "DLPack tensor must be a capsule with name \"dltensor\", got \"%s\". " "Note that a DLPack tensor may be consumed at most once.", absl::string_view(pycapsule.name())); tensorflow::MaybeRaiseRegisteredFromTFStatus(status.get()); } TFE_TensorHandle* thandle = tensorflow::TFE_HandleFromDLPack( pycapsule, status.get(), tensorflow::InputTFE_Context(context)); tensorflow::MaybeRaiseRegisteredFromTFStatus(status.get()); PyCapsule_SetName(pycapsule.ptr(), "used_dltensor"); PyCapsule_SetDestructor(pycapsule.ptr(), nullptr); PyObject* pyhandle = EagerTensorFromHandle(thandle); return tensorflow::PyoOrThrow(pyhandle); }); m.def("TFE_Py_RegisterCustomDevice", [](const py::handle& context, const py::capsule& device, const char* device_name, const py::capsule& device_info) { tensorflow::Safe_TF_StatusPtr status = tensorflow::make_safe(TF_NewStatus()); if (absl::string_view(device.name()) != "TFE_CustomDevice") { status->status = tensorflow::errors::InvalidArgument( "Expected a capsule named 'TFE_CustomDevice' for the `device` " "argument, got ", absl::string_view(device.name())); tensorflow::MaybeRaiseRegisteredFromTFStatus(status.get()); } if (absl::string_view(device_info.name()) != "TFE_CustomDevice_DeviceInfo") { status->status = tensorflow::errors::InvalidArgument( "Expected a capsule named 'TFE_CustomDevice_DeviceInfo' for " "the `device_info` argument, got ", absl::string_view(device_info.name())); tensorflow::MaybeRaiseRegisteredFromTFStatus(status.get()); } // TFE_RegisterCustomDevice takes ownership PyCapsule_SetDestructor(device_info.ptr(), nullptr); TFE_RegisterCustomDevice( tensorflow::InputTFE_Context(context), *reinterpret_cast<TFE_CustomDevice*>( PyCapsule_GetPointer(device.ptr(), "TFE_CustomDevice")), device_name, PyCapsule_GetPointer(device_info.ptr(), "TFE_CustomDevice_DeviceInfo"), status.get()); tensorflow::MaybeRaiseRegisteredFromTFStatus(status.get()); }); py::class_<EagerContextThreadLocalDataWrapper>(m, "EagerContextThreadLocalData") .def(py::init<py::handle, py::handle, py::handle>(), py::arg("py_eager_context"), py::arg("is_eager"), py::arg("device_spec")) .def_property("is_eager", &EagerContextThreadLocalDataWrapper::get_is_eager, &EagerContextThreadLocalDataWrapper::set_is_eager) .def_property( "invoking_op_callbacks", &EagerContextThreadLocalDataWrapper::get_invoking_op_callbacks, &EagerContextThreadLocalDataWrapper::set_invoking_op_callbacks) .def_property("device_name", &EagerContextThreadLocalDataWrapper::get_device_name, &EagerContextThreadLocalDataWrapper::set_device_name) .def_property("scope_name", &EagerContextThreadLocalDataWrapper::get_scope_name, &EagerContextThreadLocalDataWrapper::set_scope_name) .def_property("device_spec", &EagerContextThreadLocalDataWrapper::get_device_spec, &EagerContextThreadLocalDataWrapper::set_device_spec) .def_property( "function_call_options", &EagerContextThreadLocalDataWrapper::get_function_call_options, &EagerContextThreadLocalDataWrapper::set_function_call_options) .def_property("executor", &EagerContextThreadLocalDataWrapper::get_executor, &EagerContextThreadLocalDataWrapper::set_executor) .def_property("op_callbacks", &EagerContextThreadLocalDataWrapper::get_op_callbacks, &EagerContextThreadLocalDataWrapper::set_op_callbacks); // C API Enum py::enum_<TFE_ContextDevicePlacementPolicy>( m, "TFE_ContextDevicePlacementPolicy") .value("TFE_DEVICE_PLACEMENT_EXPLICIT", TFE_DEVICE_PLACEMENT_EXPLICIT) .value("TFE_DEVICE_PLACEMENT_WARN", TFE_DEVICE_PLACEMENT_WARN) .value("TFE_DEVICE_PLACEMENT_SILENT", TFE_DEVICE_PLACEMENT_SILENT) .value("TFE_DEVICE_PLACEMENT_SILENT_FOR_INT32", TFE_DEVICE_PLACEMENT_SILENT_FOR_INT32) .export_values(); py::enum_<TF_AttrType>(m, "TF_AttrType") .value("TF_ATTR_STRING", TF_ATTR_STRING) .value("TF_ATTR_INT", TF_ATTR_INT) .value("TF_ATTR_FLOAT", TF_ATTR_FLOAT) .value("TF_ATTR_BOOL", TF_ATTR_BOOL) .value("TF_ATTR_TYPE", TF_ATTR_TYPE) .value("TF_ATTR_SHAPE", TF_ATTR_SHAPE) .value("TF_ATTR_TENSOR", TF_ATTR_TENSOR) .value("TF_ATTR_PLACEHOLDER", TF_ATTR_PLACEHOLDER) .value("TF_ATTR_FUNC", TF_ATTR_FUNC) .export_values(); };
7857
True
1
CVE-2020-15190
False
False
False
False
AV:N/AC:L/Au:N/C:N/I:N/A:P
NETWORK
LOW
NONE
NONE
NONE
PARTIAL
5.0
CVSS:3.1/AV:N/AC:L/PR:N/UI:N/S:U/C:N/I:N/A:L
NETWORK
LOW
NONE
NONE
UNCHANGED
NONE
NONE
LOW
5.3
MEDIUM
3.9
1.4
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/da8558533d925694483d2c136a9220d6d49d843c', 'name': 'https://github.com/tensorflow/tensorflow/commit/da8558533d925694483d2c136a9220d6d49d843c', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-4g9f-63rx-5cw4', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-4g9f-63rx-5cw4', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-20'}, {'lang': 'en', 'value': 'CWE-476'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:-:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:-:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:-:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:-:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:-:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': 'In Tensorflow before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, the `tf.raw_ops.Switch` operation takes as input a tensor and a boolean and outputs two tensors. Depending on the boolean value, one of the tensors is exactly the input tensor whereas the other one should be an empty tensor. However, the eager runtime traverses all tensors in the output. Since only one of the tensors is defined, the other one is `nullptr`, hence we are binding a reference to `nullptr`. This is undefined behavior and reported as an error if compiling with `-fsanitize=null`. In this case, this results in a segmentation fault The issue is patched in commit da8558533d925694483d2c136a9220d6d49d843c, and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1.'}]
2021-11-18T17:18Z
2020-09-25T19:15Z
Improper Input Validation
The product receives input or data, but it does not validate or incorrectly validates that the input has the properties that are required to process the data safely and correctly.
Input validation is a frequently-used technique for checking potentially dangerous inputs in order to ensure that the inputs are safe for processing within the code, or when communicating with other components. When software does not validate input properly, an attacker is able to craft the input in a form that is not expected by the rest of the application. This will lead to parts of the system receiving unintended input, which may result in altered control flow, arbitrary control of a resource, or arbitrary code execution. Input validation is not the only technique for processing input, however. Other techniques attempt to transform potentially-dangerous input into something safe, such as filtering (CWE-790) - which attempts to remove dangerous inputs - or encoding/escaping (CWE-116), which attempts to ensure that the input is not misinterpreted when it is included in output to another component. Other techniques exist as well (see CWE-138 for more examples.) Input validation can be applied to: raw data - strings, numbers, parameters, file contents, etc. metadata - information about the raw data, such as headers or size Data can be simple or structured. Structured data can be composed of many nested layers, composed of combinations of metadata and raw data, with other simple or structured data. Many properties of raw data or metadata may need to be validated upon entry into the code, such as: specified quantities such as size, length, frequency, price, rate, number of operations, time, etc. implied or derived quantities, such as the actual size of a file instead of a specified size indexes, offsets, or positions into more complex data structures symbolic keys or other elements into hash tables, associative arrays, etc. well-formedness, i.e. syntactic correctness - compliance with expected syntax lexical token correctness - compliance with rules for what is treated as a token specified or derived type - the actual type of the input (or what the input appears to be) consistency - between individual data elements, between raw data and metadata, between references, etc. conformance to domain-specific rules, e.g. business logic equivalence - ensuring that equivalent inputs are treated the same authenticity, ownership, or other attestations about the input, e.g. a cryptographic signature to prove the source of the data Implied or derived properties of data must often be calculated or inferred by the code itself. Errors in deriving properties may be considered a contributing factor to improper input validation. Note that "input validation" has very different meanings to different people, or within different classification schemes. Caution must be used when referencing this CWE entry or mapping to it. For example, some weaknesses might involve inadvertently giving control to an attacker over an input when they should not be able to provide an input at all, but sometimes this is referred to as input validation. Finally, it is important to emphasize that the distinctions between input validation and output escaping are often blurred, and developers must be careful to understand the difference, including how input validation is not always sufficient to prevent vulnerabilities, especially when less stringent data types must be supported, such as free-form text. Consider a SQL injection scenario in which a person's last name is inserted into a query. The name "O'Reilly" would likely pass the validation step since it is a common last name in the English language. However, this valid name cannot be directly inserted into the database because it contains the "'" apostrophe character, which would need to be escaped or otherwise transformed. In this case, removing the apostrophe might reduce the risk of SQL injection, but it would produce incorrect behavior because the wrong name would be recorded.
https://cwe.mitre.org/data/definitions/20.html
0
Mihai Maruseac
2020-09-18 21:16:05-07:00
Fix undefined behavior in `tf.raw_ops.Switch` in eager mode. PiperOrigin-RevId: 332578058 Change-Id: I9727571d2f21476b10d8aa27c1b7176564b76ac9
da8558533d925694483d2c136a9220d6d49d843c
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tensorflow::KernelAndDeviceOp::Run
tensorflow::KernelAndDeviceOp::Run( ScopedStepContainer * step_container , const EagerKernelArgs & inputs , std :: vector<EagerKernelRet> * outputs , CancellationManager * cancellation_manager , const absl :: optional<EagerRemoteFunctionParams> & remote_func_params)
['step_container', 'inputs', 'outputs', 'cancellation_manager', 'remote_func_params']
Status KernelAndDeviceOp::Run( ScopedStepContainer* step_container, const EagerKernelArgs& inputs, std::vector<EagerKernelRet>* outputs, CancellationManager* cancellation_manager, const absl::optional<EagerRemoteFunctionParams>& remote_func_params) { OpKernelContext::Params params; params.device = device_; params.frame_iter = FrameAndIter(0, 0); params.inputs = inputs.GetTensorValues(); params.op_kernel = kernel_.get(); params.resource_manager = device_->resource_manager(); params.input_alloc_attrs = &input_alloc_attrs_; params.output_attr_array = output_alloc_attrs_.data(); params.function_library = flr_; params.slice_reader_cache = &slice_reader_cache_; params.rendezvous = rendezvous_; OpExecutionState* op_execution_state = nullptr; CancellationManager default_cancellation_manager; if (cancellation_manager) { params.cancellation_manager = cancellation_manager; } else if (kernel_->is_deferred()) { op_execution_state = new OpExecutionState; params.cancellation_manager = &op_execution_state->cancellation_manager; params.inc_num_deferred_ops_function = [op_execution_state]() { op_execution_state->Ref(); }; params.dec_num_deferred_ops_function = [op_execution_state]() { op_execution_state->Unref(); }; } else { params.cancellation_manager = &default_cancellation_manager; } params.log_memory = log_memory_; params.runner = get_runner(); params.step_container = step_container == nullptr ? &step_container_ : step_container; auto step_container_cleanup = gtl::MakeCleanup([step_container, this] { if (step_container == nullptr) { this->step_container_.CleanUp(); } }); params.collective_executor = collective_executor_ ? collective_executor_->get() : nullptr; OpKernelContext context(&params); { port::ScopedFlushDenormal flush; port::ScopedSetRound round(FE_TONEAREST); // 'AnnotatedTraceMe' will trace both scheduling time on host and execution // time on device of the OpKernel. profiler::AnnotatedTraceMe activity( [&] { return kernel_->TraceString(context, /*verbose=*/false); }, profiler::TraceMeLevel::kInfo); device_->Compute(kernel_.get(), &context); } // Clean up execution op_execution_state if deferred ops aren't running. if (op_execution_state != nullptr) { op_execution_state->Unref(); } if (!context.status().ok()) return context.status(); if (outputs != nullptr) { outputs->clear(); for (int i = 0; i < context.num_outputs(); ++i) { outputs->push_back(Tensor(*context.mutable_output(i))); } } return Status::OK(); }
446
True
1
CVE-2020-15190
False
False
False
False
AV:N/AC:L/Au:N/C:N/I:N/A:P
NETWORK
LOW
NONE
NONE
NONE
PARTIAL
5.0
CVSS:3.1/AV:N/AC:L/PR:N/UI:N/S:U/C:N/I:N/A:L
NETWORK
LOW
NONE
NONE
UNCHANGED
NONE
NONE
LOW
5.3
MEDIUM
3.9
1.4
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/da8558533d925694483d2c136a9220d6d49d843c', 'name': 'https://github.com/tensorflow/tensorflow/commit/da8558533d925694483d2c136a9220d6d49d843c', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-4g9f-63rx-5cw4', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-4g9f-63rx-5cw4', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.3.1', 'refsource': 'MISC', 'tags': ['Third Party Advisory']}, {'url': 'http://lists.opensuse.org/opensuse-security-announce/2020-10/msg00065.html', 'name': 'openSUSE-SU-2020:1766', 'refsource': 'SUSE', 'tags': ['Mailing List', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-20'}, {'lang': 'en', 'value': 'CWE-476'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:-:*:*:*', 'versionEndExcluding': '1.15.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:-:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:-:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:-:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:-:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.1', 'cpe_name': []}]}, {'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:o:opensuse:leap:15.2:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': 'In Tensorflow before versions 1.15.4, 2.0.3, 2.1.2, 2.2.1 and 2.3.1, the `tf.raw_ops.Switch` operation takes as input a tensor and a boolean and outputs two tensors. Depending on the boolean value, one of the tensors is exactly the input tensor whereas the other one should be an empty tensor. However, the eager runtime traverses all tensors in the output. Since only one of the tensors is defined, the other one is `nullptr`, hence we are binding a reference to `nullptr`. This is undefined behavior and reported as an error if compiling with `-fsanitize=null`. In this case, this results in a segmentation fault The issue is patched in commit da8558533d925694483d2c136a9220d6d49d843c, and is released in TensorFlow versions 1.15.4, 2.0.3, 2.1.2, 2.2.1, or 2.3.1.'}]
2021-11-18T17:18Z
2020-09-25T19:15Z
NULL Pointer Dereference
A NULL pointer dereference occurs when the application dereferences a pointer that it expects to be valid, but is NULL, typically causing a crash or exit.
NULL pointer dereference issues can occur through a number of flaws, including race conditions, and simple programming omissions.
https://cwe.mitre.org/data/definitions/476.html
0
Mihai Maruseac
2020-09-18 21:16:05-07:00
Fix undefined behavior in `tf.raw_ops.Switch` in eager mode. PiperOrigin-RevId: 332578058 Change-Id: I9727571d2f21476b10d8aa27c1b7176564b76ac9
da8558533d925694483d2c136a9220d6d49d843c
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tensorflow::KernelAndDeviceOp::Run
tensorflow::KernelAndDeviceOp::Run( ScopedStepContainer * step_container , const EagerKernelArgs & inputs , std :: vector<EagerKernelRet> * outputs , CancellationManager * cancellation_manager , const absl :: optional<EagerRemoteFunctionParams> & remote_func_params)
['step_container', 'inputs', 'outputs', 'cancellation_manager', 'remote_func_params']
Status KernelAndDeviceOp::Run( ScopedStepContainer* step_container, const EagerKernelArgs& inputs, std::vector<EagerKernelRet>* outputs, CancellationManager* cancellation_manager, const absl::optional<EagerRemoteFunctionParams>& remote_func_params) { OpKernelContext::Params params; params.device = device_; params.frame_iter = FrameAndIter(0, 0); params.inputs = inputs.GetTensorValues(); params.op_kernel = kernel_.get(); params.resource_manager = device_->resource_manager(); params.input_alloc_attrs = &input_alloc_attrs_; params.output_attr_array = output_alloc_attrs_.data(); params.function_library = flr_; params.slice_reader_cache = &slice_reader_cache_; params.rendezvous = rendezvous_; OpExecutionState* op_execution_state = nullptr; CancellationManager default_cancellation_manager; if (cancellation_manager) { params.cancellation_manager = cancellation_manager; } else if (kernel_->is_deferred()) { op_execution_state = new OpExecutionState; params.cancellation_manager = &op_execution_state->cancellation_manager; params.inc_num_deferred_ops_function = [op_execution_state]() { op_execution_state->Ref(); }; params.dec_num_deferred_ops_function = [op_execution_state]() { op_execution_state->Unref(); }; } else { params.cancellation_manager = &default_cancellation_manager; } params.log_memory = log_memory_; params.runner = get_runner(); params.step_container = step_container == nullptr ? &step_container_ : step_container; auto step_container_cleanup = gtl::MakeCleanup([step_container, this] { if (step_container == nullptr) { this->step_container_.CleanUp(); } }); params.collective_executor = collective_executor_ ? collective_executor_->get() : nullptr; OpKernelContext context(&params); { port::ScopedFlushDenormal flush; port::ScopedSetRound round(FE_TONEAREST); // 'AnnotatedTraceMe' will trace both scheduling time on host and execution // time on device of the OpKernel. profiler::AnnotatedTraceMe activity( [&] { return kernel_->TraceString(context, /*verbose=*/false); }, profiler::TraceMeLevel::kInfo); device_->Compute(kernel_.get(), &context); } // Clean up execution op_execution_state if deferred ops aren't running. if (op_execution_state != nullptr) { op_execution_state->Unref(); } if (!context.status().ok()) return context.status(); if (outputs != nullptr) { outputs->clear(); for (int i = 0; i < context.num_outputs(); ++i) { outputs->push_back(Tensor(*context.mutable_output(i))); } } return Status::OK(); }
446
True
1
CVE-2020-26266
False
False
False
False
AV:L/AC:L/Au:N/C:P/I:P/A:P
LOCAL
LOW
NONE
PARTIAL
PARTIAL
PARTIAL
4.6
CVSS:3.1/AV:L/AC:L/PR:L/UI:N/S:U/C:L/I:L/A:L
LOCAL
LOW
LOW
NONE
UNCHANGED
LOW
LOW
LOW
5.3
MEDIUM
1.8
3.4
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/ace0c15a22f7f054abcc1f53eabbcb0a1239a9e2', 'name': 'https://github.com/tensorflow/tensorflow/commit/ace0c15a22f7f054abcc1f53eabbcb0a1239a9e2', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-qhxx-j73r-qpm2', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-qhxx-j73r-qpm2', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Patch', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-908'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionEndExcluding': '1.15.5', 'cpe_name': []}]}]
[{'lang': 'en', 'value': 'In affected versions of TensorFlow under certain cases a saved model can trigger use of uninitialized values during code execution. This is caused by having tensor buffers be filled with the default value of the type but forgetting to default initialize the quantized floating point types in Eigen. This is fixed in versions 1.15.5, 2.0.4, 2.1.3, 2.2.2, 2.3.2, and 2.4.0.'}]
2020-12-14T17:54Z
2020-12-10T23:15Z
Use of Uninitialized Resource
The software uses or accesses a resource that has not been initialized.
When a resource has not been properly initialized, the software may behave unexpectedly. This may lead to a crash or invalid memory access, but the consequences vary depending on the type of resource and how it is used within the software.
https://cwe.mitre.org/data/definitions/908.html
0
Mihai Maruseac
2020-11-24 11:40:42-08:00
Default initialize fixed point Eigen types. In certain cases, tensors are filled with default values of the type. But, for these fixed point types, these values were uninitialized. Thus, we would have uninitialized memory access bugs, some of which were caught by MSAN. PiperOrigin-RevId: 344101137 Change-Id: I14555fda74dca3b5f1582da9008901937e3f14e2
ace0c15a22f7f054abcc1f53eabbcb0a1239a9e2
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
Eigen::QInt16::QInt16
Eigen::QInt16::QInt16()
[]
QInt16() {}
5
True
1
CVE-2020-26266
False
False
False
False
AV:L/AC:L/Au:N/C:P/I:P/A:P
LOCAL
LOW
NONE
PARTIAL
PARTIAL
PARTIAL
4.6
CVSS:3.1/AV:L/AC:L/PR:L/UI:N/S:U/C:L/I:L/A:L
LOCAL
LOW
LOW
NONE
UNCHANGED
LOW
LOW
LOW
5.3
MEDIUM
1.8
3.4
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/ace0c15a22f7f054abcc1f53eabbcb0a1239a9e2', 'name': 'https://github.com/tensorflow/tensorflow/commit/ace0c15a22f7f054abcc1f53eabbcb0a1239a9e2', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-qhxx-j73r-qpm2', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-qhxx-j73r-qpm2', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Patch', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-908'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionEndExcluding': '1.15.5', 'cpe_name': []}]}]
[{'lang': 'en', 'value': 'In affected versions of TensorFlow under certain cases a saved model can trigger use of uninitialized values during code execution. This is caused by having tensor buffers be filled with the default value of the type but forgetting to default initialize the quantized floating point types in Eigen. This is fixed in versions 1.15.5, 2.0.4, 2.1.3, 2.2.2, 2.3.2, and 2.4.0.'}]
2020-12-14T17:54Z
2020-12-10T23:15Z
Use of Uninitialized Resource
The software uses or accesses a resource that has not been initialized.
When a resource has not been properly initialized, the software may behave unexpectedly. This may lead to a crash or invalid memory access, but the consequences vary depending on the type of resource and how it is used within the software.
https://cwe.mitre.org/data/definitions/908.html
0
Mihai Maruseac
2020-11-24 11:40:42-08:00
Default initialize fixed point Eigen types. In certain cases, tensors are filled with default values of the type. But, for these fixed point types, these values were uninitialized. Thus, we would have uninitialized memory access bugs, some of which were caught by MSAN. PiperOrigin-RevId: 344101137 Change-Id: I14555fda74dca3b5f1582da9008901937e3f14e2
ace0c15a22f7f054abcc1f53eabbcb0a1239a9e2
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
Eigen::QInt32::QInt32
Eigen::QInt32::QInt32()
[]
QInt32() {}
5
True
1
CVE-2020-26266
False
False
False
False
AV:L/AC:L/Au:N/C:P/I:P/A:P
LOCAL
LOW
NONE
PARTIAL
PARTIAL
PARTIAL
4.6
CVSS:3.1/AV:L/AC:L/PR:L/UI:N/S:U/C:L/I:L/A:L
LOCAL
LOW
LOW
NONE
UNCHANGED
LOW
LOW
LOW
5.3
MEDIUM
1.8
3.4
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/ace0c15a22f7f054abcc1f53eabbcb0a1239a9e2', 'name': 'https://github.com/tensorflow/tensorflow/commit/ace0c15a22f7f054abcc1f53eabbcb0a1239a9e2', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-qhxx-j73r-qpm2', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-qhxx-j73r-qpm2', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Patch', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-908'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionEndExcluding': '1.15.5', 'cpe_name': []}]}]
[{'lang': 'en', 'value': 'In affected versions of TensorFlow under certain cases a saved model can trigger use of uninitialized values during code execution. This is caused by having tensor buffers be filled with the default value of the type but forgetting to default initialize the quantized floating point types in Eigen. This is fixed in versions 1.15.5, 2.0.4, 2.1.3, 2.2.2, 2.3.2, and 2.4.0.'}]
2020-12-14T17:54Z
2020-12-10T23:15Z
Use of Uninitialized Resource
The software uses or accesses a resource that has not been initialized.
When a resource has not been properly initialized, the software may behave unexpectedly. This may lead to a crash or invalid memory access, but the consequences vary depending on the type of resource and how it is used within the software.
https://cwe.mitre.org/data/definitions/908.html
0
Mihai Maruseac
2020-11-24 11:40:42-08:00
Default initialize fixed point Eigen types. In certain cases, tensors are filled with default values of the type. But, for these fixed point types, these values were uninitialized. Thus, we would have uninitialized memory access bugs, some of which were caught by MSAN. PiperOrigin-RevId: 344101137 Change-Id: I14555fda74dca3b5f1582da9008901937e3f14e2
ace0c15a22f7f054abcc1f53eabbcb0a1239a9e2
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
Eigen::QInt8::QInt8
Eigen::QInt8::QInt8()
[]
QInt8() {}
5
True
1
CVE-2020-26266
False
False
False
False
AV:L/AC:L/Au:N/C:P/I:P/A:P
LOCAL
LOW
NONE
PARTIAL
PARTIAL
PARTIAL
4.6
CVSS:3.1/AV:L/AC:L/PR:L/UI:N/S:U/C:L/I:L/A:L
LOCAL
LOW
LOW
NONE
UNCHANGED
LOW
LOW
LOW
5.3
MEDIUM
1.8
3.4
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/ace0c15a22f7f054abcc1f53eabbcb0a1239a9e2', 'name': 'https://github.com/tensorflow/tensorflow/commit/ace0c15a22f7f054abcc1f53eabbcb0a1239a9e2', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-qhxx-j73r-qpm2', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-qhxx-j73r-qpm2', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Patch', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-908'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionEndExcluding': '1.15.5', 'cpe_name': []}]}]
[{'lang': 'en', 'value': 'In affected versions of TensorFlow under certain cases a saved model can trigger use of uninitialized values during code execution. This is caused by having tensor buffers be filled with the default value of the type but forgetting to default initialize the quantized floating point types in Eigen. This is fixed in versions 1.15.5, 2.0.4, 2.1.3, 2.2.2, 2.3.2, and 2.4.0.'}]
2020-12-14T17:54Z
2020-12-10T23:15Z
Use of Uninitialized Resource
The software uses or accesses a resource that has not been initialized.
When a resource has not been properly initialized, the software may behave unexpectedly. This may lead to a crash or invalid memory access, but the consequences vary depending on the type of resource and how it is used within the software.
https://cwe.mitre.org/data/definitions/908.html
0
Mihai Maruseac
2020-11-24 11:40:42-08:00
Default initialize fixed point Eigen types. In certain cases, tensors are filled with default values of the type. But, for these fixed point types, these values were uninitialized. Thus, we would have uninitialized memory access bugs, some of which were caught by MSAN. PiperOrigin-RevId: 344101137 Change-Id: I14555fda74dca3b5f1582da9008901937e3f14e2
ace0c15a22f7f054abcc1f53eabbcb0a1239a9e2
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
Eigen::QUInt16::QUInt16
Eigen::QUInt16::QUInt16()
[]
QUInt16() {}
5
True
1
CVE-2020-26266
False
False
False
False
AV:L/AC:L/Au:N/C:P/I:P/A:P
LOCAL
LOW
NONE
PARTIAL
PARTIAL
PARTIAL
4.6
CVSS:3.1/AV:L/AC:L/PR:L/UI:N/S:U/C:L/I:L/A:L
LOCAL
LOW
LOW
NONE
UNCHANGED
LOW
LOW
LOW
5.3
MEDIUM
1.8
3.4
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/ace0c15a22f7f054abcc1f53eabbcb0a1239a9e2', 'name': 'https://github.com/tensorflow/tensorflow/commit/ace0c15a22f7f054abcc1f53eabbcb0a1239a9e2', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-qhxx-j73r-qpm2', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-qhxx-j73r-qpm2', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Patch', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-908'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionEndExcluding': '1.15.5', 'cpe_name': []}]}]
[{'lang': 'en', 'value': 'In affected versions of TensorFlow under certain cases a saved model can trigger use of uninitialized values during code execution. This is caused by having tensor buffers be filled with the default value of the type but forgetting to default initialize the quantized floating point types in Eigen. This is fixed in versions 1.15.5, 2.0.4, 2.1.3, 2.2.2, 2.3.2, and 2.4.0.'}]
2020-12-14T17:54Z
2020-12-10T23:15Z
Use of Uninitialized Resource
The software uses or accesses a resource that has not been initialized.
When a resource has not been properly initialized, the software may behave unexpectedly. This may lead to a crash or invalid memory access, but the consequences vary depending on the type of resource and how it is used within the software.
https://cwe.mitre.org/data/definitions/908.html
0
Mihai Maruseac
2020-11-24 11:40:42-08:00
Default initialize fixed point Eigen types. In certain cases, tensors are filled with default values of the type. But, for these fixed point types, these values were uninitialized. Thus, we would have uninitialized memory access bugs, some of which were caught by MSAN. PiperOrigin-RevId: 344101137 Change-Id: I14555fda74dca3b5f1582da9008901937e3f14e2
ace0c15a22f7f054abcc1f53eabbcb0a1239a9e2
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
Eigen::QUInt8::QUInt8
Eigen::QUInt8::QUInt8()
[]
QUInt8() {}
5
True
1
CVE-2020-26267
False
False
False
False
AV:L/AC:L/Au:S/C:P/I:P/A:P
LOCAL
LOW
SINGLE
PARTIAL
PARTIAL
PARTIAL
4.3
CVSS:3.1/AV:L/AC:L/PR:L/UI:N/S:U/C:H/I:H/A:H
LOCAL
LOW
LOW
NONE
UNCHANGED
HIGH
HIGH
HIGH
7.8
HIGH
1.8
5.9
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/ebc70b7a592420d3d2f359e4b1694c236b82c7ae', 'name': 'https://github.com/tensorflow/tensorflow/commit/ebc70b7a592420d3d2f359e4b1694c236b82c7ae', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-c9f3-9wfr-wgh7', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-c9f3-9wfr-wgh7', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Patch', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionEndExcluding': '1.15.5', 'cpe_name': []}]}]
[{'lang': 'en', 'value': 'In affected versions of TensorFlow the tf.raw_ops.DataFormatVecPermute API does not validate the src_format and dst_format attributes. The code assumes that these two arguments define a permutation of NHWC. This can result in uninitialized memory accesses, read outside of bounds and even crashes. This is fixed in versions 1.15.5, 2.0.4, 2.1.3, 2.2.2, 2.3.2, and 2.4.0.'}]
2021-08-17T13:23Z
2020-12-10T23:15Z
Out-of-bounds Read
The software reads data past the end, or before the beginning, of the intended buffer.
Typically, this can allow attackers to read sensitive information from other memory locations or cause a crash. A crash can occur when the code reads a variable amount of data and assumes that a sentinel exists to stop the read operation, such as a NUL in a string. The expected sentinel might not be located in the out-of-bounds memory, causing excessive data to be read, leading to a segmentation fault or a buffer overflow. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent read operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/125.html
0
Mihai Maruseac
2020-12-07 11:15:21-08:00
Validate that `DataFormat*` attributes form a permutation. The `src_format` and `dst_format` attributes for the `DataFormatDimMap` and `DataFormatVecPermute` raw ops are supposed to determine a permutation. However, this was not validated and could result in unitialized memory accesses as well as writes outside of bounds and potential crashes. While here, we also test that the format attributes have the needed length, add tests for all validation failure cases, remove unnecessary calls to `strings::StrCat`, and fix a few grammar errors. This will be cherry-picked on the supported release branches. PiperOrigin-RevId: 346135579 Change-Id: I1c76392382c89ad8f072d5bc93d70669851eb404
ebc70b7a592420d3d2f359e4b1694c236b82c7ae
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tensorflow::DataFormatDimMapOp::DataFormatDimMapOp
tensorflow::DataFormatDimMapOp::DataFormatDimMapOp( OpKernelConstruction * context)
['context']
explicit DataFormatDimMapOp(OpKernelConstruction* context) : OpKernel(context) { string src_format; OP_REQUIRES_OK(context, context->GetAttr("src_format", &src_format)); string dst_format; OP_REQUIRES_OK(context, context->GetAttr("dst_format", &dst_format)); OP_REQUIRES(context, src_format.size() == 4 || src_format.size() == 5, errors::InvalidArgument(strings::StrCat( "Source format must of length 4 or 5, received " "src_format = ", src_format))); OP_REQUIRES( context, dst_format.size() == 4 || dst_format.size() == 5, errors::InvalidArgument(strings::StrCat( "Destination format must of length 4 or 5, received dst_format = ", dst_format))); dst_idx_ = Tensor(DT_INT32, {static_cast<int64>(src_format.size())}); for (int i = 0; i < src_format.size(); ++i) { for (int j = 0; j < dst_format.size(); ++j) { if (dst_format[j] == src_format[i]) { dst_idx_.vec<int>()(i) = j; break; } } } }
211
True
1
CVE-2020-26269
False
False
False
False
AV:N/AC:L/Au:N/C:N/I:N/A:P
NETWORK
LOW
NONE
NONE
NONE
PARTIAL
5.0
CVSS:3.1/AV:N/AC:L/PR:N/UI:N/S:U/C:N/I:N/A:H
NETWORK
LOW
NONE
NONE
UNCHANGED
NONE
NONE
HIGH
7.5
HIGH
3.9
3.6
False
[{'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-9jjw-hf72-3mxw', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-9jjw-hf72-3mxw', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/8b5b9dc96666a3a5d27fad7179ff215e3b74b67c', 'name': 'https://github.com/tensorflow/tensorflow/commit/8b5b9dc96666a3a5d27fad7179ff215e3b74b67c', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:2.4.0:rc0:*:*:*:*:*:*', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:2.4.0:rc1:*:*:*:*:*:*', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:2.4.0:rc2:*:*:*:*:*:*', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:2.4.0:rc3:*:*:*:*:*:*', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:2.4.0:rc4:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': 'In TensorFlow release candidate versions 2.4.0rc*, the general implementation for matching filesystem paths to globbing pattern is vulnerable to an access out of bounds of the array holding the directories. There are multiple invariants and preconditions that are assumed by the parallel implementation of GetMatchingPaths but are not verified by the PRs introducing it (#40861 and #44310). Thus, we are completely rewriting the implementation to fully specify and validate these. This is patched in version 2.4.0. This issue only impacts master branch and the release candidates for TF version 2.4. The final release of the 2.4 release will be patched.'}]
2021-08-17T13:21Z
2020-12-10T23:15Z
Out-of-bounds Read
The software reads data past the end, or before the beginning, of the intended buffer.
Typically, this can allow attackers to read sensitive information from other memory locations or cause a crash. A crash can occur when the code reads a variable amount of data and assumes that a sentinel exists to stop the read operation, such as a NUL in a string. The expected sentinel might not be located in the out-of-bounds memory, causing excessive data to be read, leading to a segmentation fault or a buffer overflow. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent read operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/125.html
0
Mihai Maruseac
2020-12-07 11:57:01-08:00
Completely rewrite `GetMatchingPaths`. The current parallel implementation is too complex (lambda inside lambda, two levels of parallelism) and has a read outside of bounds issue. The new implementation cleans up artifacts from the previous implementations that were left in the code as it evolves. We add multiple helper functions, and document invariants and preconditions as well as every major step. This way, we fix the security issue and a potential new one which was not caught before PiperOrigin-RevId: 346146220 Change-Id: Iec0f44673f43349797bf9944dffe9b2f779137d8
8b5b9dc96666a3a5d27fad7179ff215e3b74b67c
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tensorflow::internal::GetMatchingPaths
tensorflow::internal::GetMatchingPaths( FileSystem * fs , Env * env , const string & pattern , std :: vector<string> * results)
['fs', 'env', 'pattern', 'results']
Status GetMatchingPaths(FileSystem* fs, Env* env, const string& pattern, std::vector<string>* results) { results->clear(); if (pattern.empty()) { return Status::OK(); } string fixed_prefix = pattern.substr(0, pattern.find_first_of("*?[\\")); string eval_pattern = pattern; string dir(io::Dirname(fixed_prefix)); // If dir is empty then we need to fix up fixed_prefix and eval_pattern to // include . as the top level directory. if (dir.empty()) { dir = "."; fixed_prefix = io::JoinPath(dir, fixed_prefix); eval_pattern = io::JoinPath(dir, eval_pattern); } bool is_directory = pattern[pattern.size() - 1] == '/'; #ifdef PLATFORM_WINDOWS is_directory = is_directory || pattern[pattern.size() - 1] == '\\'; #endif std::vector<string> dirs; if (!is_directory) { dirs.emplace_back(eval_pattern); } StringPiece tmp_dir(io::Dirname(eval_pattern)); while (tmp_dir.size() > dir.size()) { dirs.emplace_back(string(tmp_dir)); tmp_dir = io::Dirname(tmp_dir); } dirs.emplace_back(dir); std::reverse(dirs.begin(), dirs.end()); // Setup a parallel BFS to explore everything under dir. std::deque<std::pair<string, int>> dir_q; std::deque<std::pair<string, int>> next_dir_q; dir_q.emplace_back(std::make_pair(dirs[0], 0)); Status ret; // Status to return. mutex results_mutex; condition_variable results_cond; mutex next_que_mutex; condition_variable next_que_cond; while (!dir_q.empty()) { next_dir_q.clear(); std::vector<Status> new_rets(dir_q.size()); auto handle_level = [fs, &results, &dir_q, &next_dir_q, &new_rets, &is_directory, &dirs, &results_mutex, &results_cond, &next_que_mutex, &next_que_cond](int i) { string current_dir = dir_q.at(i).first; int dir_index = dir_q.at(i).second; dir_index++; std::vector<string> children; Status s = fs->GetChildren(current_dir, &children); // In case PERMISSION_DENIED is encountered, we bail here. if (s.code() == tensorflow::error::PERMISSION_DENIED) { return; } new_rets[i] = s; if (children.empty()) return; // children_dir_status holds is_dir status for children. It can have three // possible values: OK for true; FAILED_PRECONDITION for false; CANCELLED // if we don't calculate IsDirectory (we might do that because there isn't // any point in exploring that child path). std::vector<Status> children_dir_status; // This IsDirectory call can be expensive for some FS. Parallelizing it. children_dir_status.resize(children.size()); auto handle_children = [fs, &current_dir, &children, &dirs, dir_index, is_directory, &children_dir_status](int j) { const string child_path = io::JoinPath(current_dir, children[j]); if (!fs->Match(child_path, dirs[dir_index])) { children_dir_status[j] = Status(tensorflow::error::CANCELLED, "Operation not needed"); } else if (dir_index != dirs.size() - 1) { children_dir_status[j] = fs->IsDirectory(child_path); } else { children_dir_status[j] = is_directory ? fs->IsDirectory(child_path) : Status::OK(); } }; ForEach(0, children.size(), handle_children); for (size_t j = 0; j < children.size(); ++j) { const string child_path = io::JoinPath(current_dir, children[j]); // If the IsDirectory call was cancelled we bail. if (children_dir_status[j].code() == tensorflow::error::CANCELLED) { continue; } if (children_dir_status[j].ok()) { if (dir_index != dirs.size() - 1) { mutex_lock lk(next_que_mutex); next_dir_q.emplace_back(std::make_pair(child_path, dir_index)); next_que_cond.notify_one(); } else { mutex_lock lk(results_mutex); results->emplace_back(child_path); results_cond.notify_one(); } } } }; ForEach(0, dir_q.size(), handle_level); ret.Update(new_rets[dir_q.size() - 1]); std::swap(dir_q, next_dir_q); } return ret; }
796
True
1
CVE-2020-26270
False
False
False
False
AV:L/AC:L/Au:N/C:N/I:N/A:P
LOCAL
LOW
NONE
NONE
NONE
PARTIAL
2.1
CVSS:3.1/AV:L/AC:L/PR:L/UI:N/S:U/C:N/I:N/A:L
LOCAL
LOW
LOW
NONE
UNCHANGED
NONE
NONE
LOW
3.3
LOW
1.8
1.4
False
[{'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-m648-33qf-v3gp', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-m648-33qf-v3gp', 'refsource': 'CONFIRM', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/14755416e364f17fb1870882fa778c7fec7f16e3', 'name': 'https://github.com/tensorflow/tensorflow/commit/14755416e364f17fb1870882fa778c7fec7f16e3', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-20'}]}]
LOW
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionEndExcluding': '1.15.5', 'cpe_name': []}]}]
[{'lang': 'en', 'value': 'In affected versions of TensorFlow running an LSTM/GRU model where the LSTM/GRU layer receives an input with zero-length results in a CHECK failure when using the CUDA backend. This can result in a query-of-death vulnerability, via denial of service, if users can control the input to the layer. This is fixed in versions 1.15.5, 2.0.4, 2.1.3, 2.2.2, 2.3.2, and 2.4.0.'}]
2020-12-14T17:33Z
2020-12-10T23:15Z
Improper Input Validation
The product receives input or data, but it does not validate or incorrectly validates that the input has the properties that are required to process the data safely and correctly.
Input validation is a frequently-used technique for checking potentially dangerous inputs in order to ensure that the inputs are safe for processing within the code, or when communicating with other components. When software does not validate input properly, an attacker is able to craft the input in a form that is not expected by the rest of the application. This will lead to parts of the system receiving unintended input, which may result in altered control flow, arbitrary control of a resource, or arbitrary code execution. Input validation is not the only technique for processing input, however. Other techniques attempt to transform potentially-dangerous input into something safe, such as filtering (CWE-790) - which attempts to remove dangerous inputs - or encoding/escaping (CWE-116), which attempts to ensure that the input is not misinterpreted when it is included in output to another component. Other techniques exist as well (see CWE-138 for more examples.) Input validation can be applied to: raw data - strings, numbers, parameters, file contents, etc. metadata - information about the raw data, such as headers or size Data can be simple or structured. Structured data can be composed of many nested layers, composed of combinations of metadata and raw data, with other simple or structured data. Many properties of raw data or metadata may need to be validated upon entry into the code, such as: specified quantities such as size, length, frequency, price, rate, number of operations, time, etc. implied or derived quantities, such as the actual size of a file instead of a specified size indexes, offsets, or positions into more complex data structures symbolic keys or other elements into hash tables, associative arrays, etc. well-formedness, i.e. syntactic correctness - compliance with expected syntax lexical token correctness - compliance with rules for what is treated as a token specified or derived type - the actual type of the input (or what the input appears to be) consistency - between individual data elements, between raw data and metadata, between references, etc. conformance to domain-specific rules, e.g. business logic equivalence - ensuring that equivalent inputs are treated the same authenticity, ownership, or other attestations about the input, e.g. a cryptographic signature to prove the source of the data Implied or derived properties of data must often be calculated or inferred by the code itself. Errors in deriving properties may be considered a contributing factor to improper input validation. Note that "input validation" has very different meanings to different people, or within different classification schemes. Caution must be used when referencing this CWE entry or mapping to it. For example, some weaknesses might involve inadvertently giving control to an attacker over an input when they should not be able to provide an input at all, but sometimes this is referred to as input validation. Finally, it is important to emphasize that the distinctions between input validation and output escaping are often blurred, and developers must be careful to understand the difference, including how input validation is not always sufficient to prevent vulnerabilities, especially when less stringent data types must be supported, such as free-form text. Consider a SQL injection scenario in which a person's last name is inserted into a query. The name "O'Reilly" would likely pass the validation step since it is a common last name in the English language. However, this valid name cannot be directly inserted into the database because it contains the "'" apostrophe character, which would need to be escaped or otherwise transformed. In this case, removing the apostrophe might reduce the risk of SQL injection, but it would produce incorrect behavior because the wrong name would be recorded.
https://cwe.mitre.org/data/definitions/20.html
0
Mihai Maruseac
2020-12-07 20:31:31-08:00
Prevent CHECK-fail in LSTM/GRU with zero-length input. PiperOrigin-RevId: 346239181 Change-Id: I5f233dbc076aab7bb4e31ba24f5abd4eaf99ea4f
14755416e364f17fb1870882fa778c7fec7f16e3
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
stream_executor::gpu::CudnnRnnSequenceTensorDescriptor::Create
stream_executor::gpu::CudnnRnnSequenceTensorDescriptor::Create( GpuExecutor * parent , int max_seq_length , int batch_size , int data_size , const absl :: Span<const int> & seq_lengths , bool time_major , cudnnDataType_t data_type)
['parent', 'max_seq_length', 'batch_size', 'data_size', 'seq_lengths', 'time_major', 'data_type']
static port::StatusOr<CudnnRnnSequenceTensorDescriptor> Create( GpuExecutor* parent, int max_seq_length, int batch_size, int data_size, const absl::Span<const int>& seq_lengths, bool time_major, cudnnDataType_t data_type) { CHECK_GT(max_seq_length, 0); int dims[] = {batch_size, data_size, 1}; int strides[] = {dims[1] * dims[2], dims[2], 1}; TensorDescriptor tensor_desc = CreateTensorDescriptor(); RETURN_IF_CUDNN_ERROR(cudnnSetTensorNdDescriptor( /*tensorDesc=*/tensor_desc.get(), /*dataType=*/data_type, /*nbDims=*/sizeof(dims) / sizeof(dims[0]), /*dimA=*/dims, /*strideA=*/strides)); const int* seq_lengths_array = seq_lengths.data(); RNNDataDescriptor data_desc = CreateRNNDataDescriptor(); float padding_fill = 0.0f; cudnnRNNDataLayout_t layout; if (time_major) { layout = CUDNN_RNN_DATA_LAYOUT_SEQ_MAJOR_UNPACKED; } else { layout = CUDNN_RNN_DATA_LAYOUT_BATCH_MAJOR_UNPACKED; } RETURN_IF_CUDNN_ERROR(cudnnSetRNNDataDescriptor( /*RNNDataDesc=*/data_desc.get(), /*dataType*/ data_type, /*layout=*/layout, /*maxSeqLength=*/max_seq_length, /*batchSize=*/batch_size, /*vectorSize=*/data_size, /*seqLengthArray=*/seq_lengths_array, /*paddingFill*/ (void*)&padding_fill)); return CudnnRnnSequenceTensorDescriptor( parent, max_seq_length, batch_size, data_size, data_type, std::move(data_desc), std::move(tensor_desc)); }
220
True
1
CVE-2020-26270
False
False
False
False
AV:L/AC:L/Au:N/C:N/I:N/A:P
LOCAL
LOW
NONE
NONE
NONE
PARTIAL
2.1
CVSS:3.1/AV:L/AC:L/PR:L/UI:N/S:U/C:N/I:N/A:L
LOCAL
LOW
LOW
NONE
UNCHANGED
NONE
NONE
LOW
3.3
LOW
1.8
1.4
False
[{'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-m648-33qf-v3gp', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-m648-33qf-v3gp', 'refsource': 'CONFIRM', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/14755416e364f17fb1870882fa778c7fec7f16e3', 'name': 'https://github.com/tensorflow/tensorflow/commit/14755416e364f17fb1870882fa778c7fec7f16e3', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-20'}]}]
LOW
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionStartIncluding': '2.1.0', 'versionEndExcluding': '2.1.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionEndExcluding': '1.15.5', 'cpe_name': []}]}]
[{'lang': 'en', 'value': 'In affected versions of TensorFlow running an LSTM/GRU model where the LSTM/GRU layer receives an input with zero-length results in a CHECK failure when using the CUDA backend. This can result in a query-of-death vulnerability, via denial of service, if users can control the input to the layer. This is fixed in versions 1.15.5, 2.0.4, 2.1.3, 2.2.2, 2.3.2, and 2.4.0.'}]
2020-12-14T17:33Z
2020-12-10T23:15Z
Improper Input Validation
The product receives input or data, but it does not validate or incorrectly validates that the input has the properties that are required to process the data safely and correctly.
Input validation is a frequently-used technique for checking potentially dangerous inputs in order to ensure that the inputs are safe for processing within the code, or when communicating with other components. When software does not validate input properly, an attacker is able to craft the input in a form that is not expected by the rest of the application. This will lead to parts of the system receiving unintended input, which may result in altered control flow, arbitrary control of a resource, or arbitrary code execution. Input validation is not the only technique for processing input, however. Other techniques attempt to transform potentially-dangerous input into something safe, such as filtering (CWE-790) - which attempts to remove dangerous inputs - or encoding/escaping (CWE-116), which attempts to ensure that the input is not misinterpreted when it is included in output to another component. Other techniques exist as well (see CWE-138 for more examples.) Input validation can be applied to: raw data - strings, numbers, parameters, file contents, etc. metadata - information about the raw data, such as headers or size Data can be simple or structured. Structured data can be composed of many nested layers, composed of combinations of metadata and raw data, with other simple or structured data. Many properties of raw data or metadata may need to be validated upon entry into the code, such as: specified quantities such as size, length, frequency, price, rate, number of operations, time, etc. implied or derived quantities, such as the actual size of a file instead of a specified size indexes, offsets, or positions into more complex data structures symbolic keys or other elements into hash tables, associative arrays, etc. well-formedness, i.e. syntactic correctness - compliance with expected syntax lexical token correctness - compliance with rules for what is treated as a token specified or derived type - the actual type of the input (or what the input appears to be) consistency - between individual data elements, between raw data and metadata, between references, etc. conformance to domain-specific rules, e.g. business logic equivalence - ensuring that equivalent inputs are treated the same authenticity, ownership, or other attestations about the input, e.g. a cryptographic signature to prove the source of the data Implied or derived properties of data must often be calculated or inferred by the code itself. Errors in deriving properties may be considered a contributing factor to improper input validation. Note that "input validation" has very different meanings to different people, or within different classification schemes. Caution must be used when referencing this CWE entry or mapping to it. For example, some weaknesses might involve inadvertently giving control to an attacker over an input when they should not be able to provide an input at all, but sometimes this is referred to as input validation. Finally, it is important to emphasize that the distinctions between input validation and output escaping are often blurred, and developers must be careful to understand the difference, including how input validation is not always sufficient to prevent vulnerabilities, especially when less stringent data types must be supported, such as free-form text. Consider a SQL injection scenario in which a person's last name is inserted into a query. The name "O'Reilly" would likely pass the validation step since it is a common last name in the English language. However, this valid name cannot be directly inserted into the database because it contains the "'" apostrophe character, which would need to be escaped or otherwise transformed. In this case, removing the apostrophe might reduce the risk of SQL injection, but it would produce incorrect behavior because the wrong name would be recorded.
https://cwe.mitre.org/data/definitions/20.html
0
Mihai Maruseac
2020-12-07 20:31:31-08:00
Prevent CHECK-fail in LSTM/GRU with zero-length input. PiperOrigin-RevId: 346239181 Change-Id: I5f233dbc076aab7bb4e31ba24f5abd4eaf99ea4f
14755416e364f17fb1870882fa778c7fec7f16e3
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
stream_executor::gpu::CudnnRnnSequenceTensorDescriptor::Create
stream_executor::gpu::CudnnRnnSequenceTensorDescriptor::Create( GpuExecutor * parent , int max_seq_length , int batch_size , int data_size , cudnnDataType_t data_type)
['parent', 'max_seq_length', 'batch_size', 'data_size', 'data_type']
static port::StatusOr<CudnnRnnSequenceTensorDescriptor> Create( GpuExecutor* parent, int max_seq_length, int batch_size, int data_size, cudnnDataType_t data_type) { CHECK_GT(max_seq_length, 0); int dims[] = {batch_size, data_size, 1}; int strides[] = {dims[1] * dims[2], dims[2], 1}; TensorDescriptor tensor_desc = CreateTensorDescriptor(); RETURN_IF_CUDNN_ERROR(cudnnSetTensorNdDescriptor( /*tensorDesc=*/tensor_desc.get(), /*dataType=*/data_type, /*nbDims=*/sizeof(dims) / sizeof(dims[0]), /*dimA=*/dims, /*strideA=*/strides)); return CudnnRnnSequenceTensorDescriptor(parent, max_seq_length, batch_size, data_size, data_type, nullptr, std::move(tensor_desc)); }
125
True
1
CVE-2021-29518
False
False
False
False
AV:L/AC:L/Au:N/C:P/I:P/A:P
LOCAL
LOW
NONE
PARTIAL
PARTIAL
PARTIAL
4.6
CVSS:3.1/AV:L/AC:L/PR:L/UI:N/S:U/C:H/I:H/A:H
LOCAL
LOW
LOW
NONE
UNCHANGED
HIGH
HIGH
HIGH
7.8
HIGH
1.8
5.9
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/ff70c47a396ef1e3cb73c90513da4f5cb71bebba', 'name': 'https://github.com/tensorflow/tensorflow/commit/ff70c47a396ef1e3cb73c90513da4f5cb71bebba', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-62gx-355r-9fhg', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-62gx-355r-9fhg', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Patch', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-476'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionStartIncluding': '2.4.0', 'versionEndExcluding': '2.4.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionEndExcluding': '2.1.4', 'cpe_name': []}]}]
[{'lang': 'en', 'value': 'TensorFlow is an end-to-end open source platform for machine learning. In eager mode (default in TF 2.0 and later), session operations are invalid. However, users could still call the raw ops associated with them and trigger a null pointer dereference. The implementation(https://github.com/tensorflow/tensorflow/blob/eebb96c2830d48597d055d247c0e9aebaea94cd5/tensorflow/core/kernels/session_ops.cc#L104) dereferences the session state pointer without checking if it is valid. Thus, in eager mode, `ctx->session_state()` is nullptr and the call of the member function is undefined behavior. The fix will be included in TensorFlow 2.5.0. We will also cherrypick this commit on TensorFlow 2.4.2, TensorFlow 2.3.3, TensorFlow 2.2.3 and TensorFlow 2.1.4, as these are also affected and still in supported range.'}]
2021-05-20T16:01Z
2021-05-14T20:15Z
NULL Pointer Dereference
A NULL pointer dereference occurs when the application dereferences a pointer that it expects to be valid, but is NULL, typically causing a crash or exit.
NULL pointer dereference issues can occur through a number of flaws, including race conditions, and simple programming omissions.
https://cwe.mitre.org/data/definitions/476.html
0
Amit Patankar
2021-04-13 14:24:00-07:00
Fix `tf.raw_ops.GetSessionTensor` and `tf.raw_ops.DeleteSessionTensor` null pointer dereferences. PiperOrigin-RevId: 368294154 Change-Id: Ie10f07a0a9a1c2b685e08153d48a0ca4b93f9fc9
ff70c47a396ef1e3cb73c90513da4f5cb71bebba
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tensorflow::DeleteSessionTensorOp::Compute
tensorflow::DeleteSessionTensorOp::Compute( OpKernelContext * ctx)
['ctx']
void Compute(OpKernelContext* ctx) override { const Tensor& handle = ctx->input(0); const string& name = handle.scalar<tstring>()(); OP_REQUIRES_OK(ctx, ctx->session_state()->DeleteTensor(name)); }
53
True
1
CVE-2021-29518
False
False
False
False
AV:L/AC:L/Au:N/C:P/I:P/A:P
LOCAL
LOW
NONE
PARTIAL
PARTIAL
PARTIAL
4.6
CVSS:3.1/AV:L/AC:L/PR:L/UI:N/S:U/C:H/I:H/A:H
LOCAL
LOW
LOW
NONE
UNCHANGED
HIGH
HIGH
HIGH
7.8
HIGH
1.8
5.9
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/ff70c47a396ef1e3cb73c90513da4f5cb71bebba', 'name': 'https://github.com/tensorflow/tensorflow/commit/ff70c47a396ef1e3cb73c90513da4f5cb71bebba', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-62gx-355r-9fhg', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-62gx-355r-9fhg', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Patch', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-476'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionStartIncluding': '2.4.0', 'versionEndExcluding': '2.4.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionEndExcluding': '2.1.4', 'cpe_name': []}]}]
[{'lang': 'en', 'value': 'TensorFlow is an end-to-end open source platform for machine learning. In eager mode (default in TF 2.0 and later), session operations are invalid. However, users could still call the raw ops associated with them and trigger a null pointer dereference. The implementation(https://github.com/tensorflow/tensorflow/blob/eebb96c2830d48597d055d247c0e9aebaea94cd5/tensorflow/core/kernels/session_ops.cc#L104) dereferences the session state pointer without checking if it is valid. Thus, in eager mode, `ctx->session_state()` is nullptr and the call of the member function is undefined behavior. The fix will be included in TensorFlow 2.5.0. We will also cherrypick this commit on TensorFlow 2.4.2, TensorFlow 2.3.3, TensorFlow 2.2.3 and TensorFlow 2.1.4, as these are also affected and still in supported range.'}]
2021-05-20T16:01Z
2021-05-14T20:15Z
NULL Pointer Dereference
A NULL pointer dereference occurs when the application dereferences a pointer that it expects to be valid, but is NULL, typically causing a crash or exit.
NULL pointer dereference issues can occur through a number of flaws, including race conditions, and simple programming omissions.
https://cwe.mitre.org/data/definitions/476.html
0
Amit Patankar
2021-04-13 14:24:00-07:00
Fix `tf.raw_ops.GetSessionTensor` and `tf.raw_ops.DeleteSessionTensor` null pointer dereferences. PiperOrigin-RevId: 368294154 Change-Id: Ie10f07a0a9a1c2b685e08153d48a0ca4b93f9fc9
ff70c47a396ef1e3cb73c90513da4f5cb71bebba
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tensorflow::GetSessionTensorOp::Compute
tensorflow::GetSessionTensorOp::Compute( OpKernelContext * ctx)
['ctx']
void Compute(OpKernelContext* ctx) override { const Tensor& handle = ctx->input(0); const string& name = handle.scalar<tstring>()(); Tensor val; OP_REQUIRES_OK(ctx, ctx->session_state()->GetTensor(name, &val)); ctx->set_output(0, val); }
68
True
1
CVE-2021-29519
False
False
False
False
AV:L/AC:L/Au:N/C:N/I:N/A:P
LOCAL
LOW
NONE
NONE
NONE
PARTIAL
2.1
CVSS:3.1/AV:L/AC:L/PR:L/UI:N/S:U/C:N/I:N/A:H
LOCAL
LOW
LOW
NONE
UNCHANGED
NONE
NONE
HIGH
5.5
MEDIUM
1.8
3.6
False
[{'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-772j-h9xw-ffp5', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-772j-h9xw-ffp5', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/b1cc5e5a50e7cee09f2c6eb48eb40ee9c4125025', 'name': 'https://github.com/tensorflow/tensorflow/commit/b1cc5e5a50e7cee09f2c6eb48eb40ee9c4125025', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-843'}]}]
LOW
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionStartIncluding': '2.4.0', 'versionEndExcluding': '2.4.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionEndExcluding': '2.1.4', 'cpe_name': []}]}]
[{'lang': 'en', 'value': 'TensorFlow is an end-to-end open source platform for machine learning. The API of `tf.raw_ops.SparseCross` allows combinations which would result in a `CHECK`-failure and denial of service. This is because the implementation(https://github.com/tensorflow/tensorflow/blob/3d782b7d47b1bf2ed32bd4a246d6d6cadc4c903d/tensorflow/core/kernels/sparse_cross_op.cc#L114-L116) is tricked to consider a tensor of type `tstring` which in fact contains integral elements. Fixing the type confusion by preventing mixing `DT_STRING` and `DT_INT64` types solves this issue. The fix will be included in TensorFlow 2.5.0. We will also cherrypick this commit on TensorFlow 2.4.2, TensorFlow 2.3.3, TensorFlow 2.2.3 and TensorFlow 2.1.4, as these are also affected and still in supported range.'}]
2021-05-20T17:24Z
2021-05-14T20:15Z
Access of Resource Using Incompatible Type ('Type Confusion')
The program allocates or initializes a resource such as a pointer, object, or variable using one type, but it later accesses that resource using a type that is incompatible with the original type.
When the program accesses the resource using an incompatible type, this could trigger logical errors because the resource does not have expected properties. In languages without memory safety, such as C and C++, type confusion can lead to out-of-bounds memory access. While this weakness is frequently associated with unions when parsing data with many different embedded object types in C, it can be present in any application that can interpret the same variable or memory location in multiple ways. This weakness is not unique to C and C++. For example, errors in PHP applications can be triggered by providing array parameters when scalars are expected, or vice versa. Languages such as Perl, which perform automatic conversion of a variable of one type when it is accessed as if it were another type, can also contain these issues.
https://cwe.mitre.org/data/definitions/843.html
0
Amit Patankar
2021-04-15 13:03:19-07:00
Fix `tf.raw_ops.SparseCross` failing CHECK. PiperOrigin-RevId: 368701671 Change-Id: Id805729dd9ba0bda36e4bb309408129b55fb649d
b1cc5e5a50e7cee09f2c6eb48eb40ee9c4125025
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tensorflow::SparseCrossHashedOp::Compute
tensorflow::SparseCrossHashedOp::Compute( OpKernelContext * context)
['context']
void Compute(OpKernelContext* context) override { OpInputList indices_list_in; OP_REQUIRES_OK(context, context->input_list("indices", &indices_list_in)); OpInputList values_list_in; OP_REQUIRES_OK(context, context->input_list("values", &values_list_in)); OpInputList shapes_list_in; OP_REQUIRES_OK(context, context->input_list("shapes", &shapes_list_in)); OpInputList dense_list_in; OP_REQUIRES_OK(context, context->input_list("dense_inputs", &dense_list_in)); OP_REQUIRES_OK(context, ValidateInput(indices_list_in, values_list_in, shapes_list_in, dense_list_in)); const Tensor* num_buckets_t; OP_REQUIRES_OK(context, context->input("num_buckets", &num_buckets_t)); const int64 num_buckets = num_buckets_t->scalar<int64>()(); const Tensor* strong_hash_t; OP_REQUIRES_OK(context, context->input("strong_hash", &strong_hash_t)); const bool strong_hash = strong_hash_t->scalar<bool>()(); const Tensor* salt_t; OP_REQUIRES_OK(context, context->input("salt", &salt_t)); const auto salt = salt_t->flat<int64>(); std::vector<int64> key_{salt(0), salt(1)}; std::vector<std::unique_ptr<ColumnInterface<int64>>> columns = GenerateKeyedColumnsFromInput<int64>(indices_list_in, values_list_in, shapes_list_in, dense_list_in, key_); Tensor* indices_out; Tensor* values_out; Tensor* shape_out; const int64 batch_size = CalculateBatchSize(shapes_list_in, dense_list_in); std::vector<int64> output_start_indices(batch_size); OP_REQUIRES_OK( context, CreateOutputTensors(columns, batch_size, context, &indices_out, &values_out, &shape_out, &output_start_indices)); const tstring unused_sep; HashCrosserV2 crosser(columns, num_buckets, 0, unused_sep); OutputUpdater<int64> updater(output_start_indices, indices_out, values_out); auto do_work = [&columns, crosser, updater, strong_hash](int64 begin, int64 end) { for (int b = begin; b < end; b++) { ProductIterator<int64> product_iterator(columns, b); int64 cross_count = 0; while (product_iterator.HasNext()) { const auto permutation = product_iterator.Next(); updater.Update(b, cross_count, crosser.Generate(b, permutation, strong_hash)); cross_count++; } } }; auto* worker_threads = context->device()->tensorflow_cpu_worker_threads(); // TODO(zakaria): optimize kCostPerUnit const int kCostPerUnit = 5000 * indices_list_in.size(); Shard(worker_threads->num_threads, worker_threads->workers, batch_size, kCostPerUnit, do_work); }
481
True
1
CVE-2021-29519
False
False
False
False
AV:L/AC:L/Au:N/C:N/I:N/A:P
LOCAL
LOW
NONE
NONE
NONE
PARTIAL
2.1
CVSS:3.1/AV:L/AC:L/PR:L/UI:N/S:U/C:N/I:N/A:H
LOCAL
LOW
LOW
NONE
UNCHANGED
NONE
NONE
HIGH
5.5
MEDIUM
1.8
3.6
False
[{'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-772j-h9xw-ffp5', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-772j-h9xw-ffp5', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/b1cc5e5a50e7cee09f2c6eb48eb40ee9c4125025', 'name': 'https://github.com/tensorflow/tensorflow/commit/b1cc5e5a50e7cee09f2c6eb48eb40ee9c4125025', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-843'}]}]
LOW
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionStartIncluding': '2.4.0', 'versionEndExcluding': '2.4.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionEndExcluding': '2.1.4', 'cpe_name': []}]}]
[{'lang': 'en', 'value': 'TensorFlow is an end-to-end open source platform for machine learning. The API of `tf.raw_ops.SparseCross` allows combinations which would result in a `CHECK`-failure and denial of service. This is because the implementation(https://github.com/tensorflow/tensorflow/blob/3d782b7d47b1bf2ed32bd4a246d6d6cadc4c903d/tensorflow/core/kernels/sparse_cross_op.cc#L114-L116) is tricked to consider a tensor of type `tstring` which in fact contains integral elements. Fixing the type confusion by preventing mixing `DT_STRING` and `DT_INT64` types solves this issue. The fix will be included in TensorFlow 2.5.0. We will also cherrypick this commit on TensorFlow 2.4.2, TensorFlow 2.3.3, TensorFlow 2.2.3 and TensorFlow 2.1.4, as these are also affected and still in supported range.'}]
2021-05-20T17:24Z
2021-05-14T20:15Z
Access of Resource Using Incompatible Type ('Type Confusion')
The program allocates or initializes a resource such as a pointer, object, or variable using one type, but it later accesses that resource using a type that is incompatible with the original type.
When the program accesses the resource using an incompatible type, this could trigger logical errors because the resource does not have expected properties. In languages without memory safety, such as C and C++, type confusion can lead to out-of-bounds memory access. While this weakness is frequently associated with unions when parsing data with many different embedded object types in C, it can be present in any application that can interpret the same variable or memory location in multiple ways. This weakness is not unique to C and C++. For example, errors in PHP applications can be triggered by providing array parameters when scalars are expected, or vice versa. Languages such as Perl, which perform automatic conversion of a variable of one type when it is accessed as if it were another type, can also contain these issues.
https://cwe.mitre.org/data/definitions/843.html
0
Amit Patankar
2021-04-15 13:03:19-07:00
Fix `tf.raw_ops.SparseCross` failing CHECK. PiperOrigin-RevId: 368701671 Change-Id: Id805729dd9ba0bda36e4bb309408129b55fb649d
b1cc5e5a50e7cee09f2c6eb48eb40ee9c4125025
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tensorflow::SparseCrossOp::Compute
tensorflow::SparseCrossOp::Compute( OpKernelContext * context)
['context']
void Compute(OpKernelContext* context) override { OpInputList indices_list_in; OP_REQUIRES_OK(context, context->input_list("indices", &indices_list_in)); OpInputList values_list_in; OP_REQUIRES_OK(context, context->input_list("values", &values_list_in)); OpInputList shapes_list_in; OP_REQUIRES_OK(context, context->input_list("shapes", &shapes_list_in)); OpInputList dense_list_in; OP_REQUIRES_OK(context, context->input_list("dense_inputs", &dense_list_in)); OP_REQUIRES_OK(context, ValidateInput(indices_list_in, values_list_in, shapes_list_in, dense_list_in)); std::vector<std::unique_ptr<ColumnInterface<InternalType>>> columns = GenerateColumnsFromInput<InternalType>(indices_list_in, values_list_in, shapes_list_in, dense_list_in); const tstring k_feature_separator = "_X_"; typename CrossTraits<HASHED_OUTPUT, InternalType>::Crosser crosser( columns, num_buckets_, hash_key_, k_feature_separator); Tensor* indices_out; Tensor* values_out; Tensor* shape_out; const int64 batch_size = CalculateBatchSize(shapes_list_in, dense_list_in); std::vector<int64> output_start_indices(batch_size); OP_REQUIRES_OK( context, CreateOutputTensors(columns, batch_size, context, &indices_out, &values_out, &shape_out, &output_start_indices)); typename CrossTraits<HASHED_OUTPUT, InternalType>::Updater updater( output_start_indices, indices_out, values_out); auto do_work = [&columns, crosser, updater](int64 begin, int64 end) { for (int b = begin; b < end; b++) { ProductIterator<InternalType> product_iterator(columns, b); int64 cross_count = 0; while (product_iterator.HasNext()) { const auto permutation = product_iterator.Next(); updater.Update(b, cross_count, crosser.Generate(b, permutation, false)); cross_count++; } } }; auto* worker_threads = context->device()->tensorflow_cpu_worker_threads(); // TODO(zakaria): optimize kCostPerUnit const int kCostPerUnit = 5000 * indices_list_in.size(); Shard(worker_threads->num_threads, worker_threads->workers, batch_size, kCostPerUnit, do_work); }
370
True
1
CVE-2021-29519
False
False
False
False
AV:L/AC:L/Au:N/C:N/I:N/A:P
LOCAL
LOW
NONE
NONE
NONE
PARTIAL
2.1
CVSS:3.1/AV:L/AC:L/PR:L/UI:N/S:U/C:N/I:N/A:H
LOCAL
LOW
LOW
NONE
UNCHANGED
NONE
NONE
HIGH
5.5
MEDIUM
1.8
3.6
False
[{'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-772j-h9xw-ffp5', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-772j-h9xw-ffp5', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/b1cc5e5a50e7cee09f2c6eb48eb40ee9c4125025', 'name': 'https://github.com/tensorflow/tensorflow/commit/b1cc5e5a50e7cee09f2c6eb48eb40ee9c4125025', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-843'}]}]
LOW
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionStartIncluding': '2.4.0', 'versionEndExcluding': '2.4.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionEndExcluding': '2.1.4', 'cpe_name': []}]}]
[{'lang': 'en', 'value': 'TensorFlow is an end-to-end open source platform for machine learning. The API of `tf.raw_ops.SparseCross` allows combinations which would result in a `CHECK`-failure and denial of service. This is because the implementation(https://github.com/tensorflow/tensorflow/blob/3d782b7d47b1bf2ed32bd4a246d6d6cadc4c903d/tensorflow/core/kernels/sparse_cross_op.cc#L114-L116) is tricked to consider a tensor of type `tstring` which in fact contains integral elements. Fixing the type confusion by preventing mixing `DT_STRING` and `DT_INT64` types solves this issue. The fix will be included in TensorFlow 2.5.0. We will also cherrypick this commit on TensorFlow 2.4.2, TensorFlow 2.3.3, TensorFlow 2.2.3 and TensorFlow 2.1.4, as these are also affected and still in supported range.'}]
2021-05-20T17:24Z
2021-05-14T20:15Z
Access of Resource Using Incompatible Type ('Type Confusion')
The program allocates or initializes a resource such as a pointer, object, or variable using one type, but it later accesses that resource using a type that is incompatible with the original type.
When the program accesses the resource using an incompatible type, this could trigger logical errors because the resource does not have expected properties. In languages without memory safety, such as C and C++, type confusion can lead to out-of-bounds memory access. While this weakness is frequently associated with unions when parsing data with many different embedded object types in C, it can be present in any application that can interpret the same variable or memory location in multiple ways. This weakness is not unique to C and C++. For example, errors in PHP applications can be triggered by providing array parameters when scalars are expected, or vice versa. Languages such as Perl, which perform automatic conversion of a variable of one type when it is accessed as if it were another type, can also contain these issues.
https://cwe.mitre.org/data/definitions/843.html
0
Amit Patankar
2021-04-15 13:03:19-07:00
Fix `tf.raw_ops.SparseCross` failing CHECK. PiperOrigin-RevId: 368701671 Change-Id: Id805729dd9ba0bda36e4bb309408129b55fb649d
b1cc5e5a50e7cee09f2c6eb48eb40ee9c4125025
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tensorflow::SparseCrossV2Op::Compute
tensorflow::SparseCrossV2Op::Compute( OpKernelContext * context)
['context']
void Compute(OpKernelContext* context) override { OpInputList indices_list_in; OP_REQUIRES_OK(context, context->input_list("indices", &indices_list_in)); OpInputList values_list_in; OP_REQUIRES_OK(context, context->input_list("values", &values_list_in)); OpInputList shapes_list_in; OP_REQUIRES_OK(context, context->input_list("shapes", &shapes_list_in)); OpInputList dense_list_in; OP_REQUIRES_OK(context, context->input_list("dense_inputs", &dense_list_in)); OP_REQUIRES_OK(context, ValidateInput(indices_list_in, values_list_in, shapes_list_in, dense_list_in)); const Tensor* sep_t; OP_REQUIRES_OK(context, context->input("sep", &sep_t)); const tstring separator = sep_t->scalar<tstring>()(); std::vector<std::unique_ptr<ColumnInterface<tstring>>> columns = GenerateColumnsFromInput<tstring>(indices_list_in, values_list_in, shapes_list_in, dense_list_in); Tensor* indices_out; Tensor* values_out; Tensor* shape_out; const int64 batch_size = CalculateBatchSize(shapes_list_in, dense_list_in); std::vector<int64> output_start_indices(batch_size); OP_REQUIRES_OK( context, CreateOutputTensors(columns, batch_size, context, &indices_out, &values_out, &shape_out, &output_start_indices)); StringCrosser<tstring> crosser(columns, 0, 0, separator); OutputUpdater<tstring> updater(output_start_indices, indices_out, values_out); auto do_work = [&columns, crosser, updater](int64 begin, int64 end) { for (int b = begin; b < end; b++) { ProductIterator<tstring> product_iterator(columns, b); int64 cross_count = 0; while (product_iterator.HasNext()) { const auto permutation = product_iterator.Next(); updater.Update(b, cross_count, crosser.Generate(b, permutation, false)); cross_count++; } } }; auto* worker_threads = context->device()->tensorflow_cpu_worker_threads(); // TODO(zakaria): optimize kCostPerUnit const int kCostPerUnit = 5000 * indices_list_in.size(); Shard(worker_threads->num_threads, worker_threads->workers, batch_size, kCostPerUnit, do_work); }
389
True
1
CVE-2021-29519
False
False
False
False
AV:L/AC:L/Au:N/C:N/I:N/A:P
LOCAL
LOW
NONE
NONE
NONE
PARTIAL
2.1
CVSS:3.1/AV:L/AC:L/PR:L/UI:N/S:U/C:N/I:N/A:H
LOCAL
LOW
LOW
NONE
UNCHANGED
NONE
NONE
HIGH
5.5
MEDIUM
1.8
3.6
False
[{'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-772j-h9xw-ffp5', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-772j-h9xw-ffp5', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/b1cc5e5a50e7cee09f2c6eb48eb40ee9c4125025', 'name': 'https://github.com/tensorflow/tensorflow/commit/b1cc5e5a50e7cee09f2c6eb48eb40ee9c4125025', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-843'}]}]
LOW
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionStartIncluding': '2.4.0', 'versionEndExcluding': '2.4.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionEndExcluding': '2.1.4', 'cpe_name': []}]}]
[{'lang': 'en', 'value': 'TensorFlow is an end-to-end open source platform for machine learning. The API of `tf.raw_ops.SparseCross` allows combinations which would result in a `CHECK`-failure and denial of service. This is because the implementation(https://github.com/tensorflow/tensorflow/blob/3d782b7d47b1bf2ed32bd4a246d6d6cadc4c903d/tensorflow/core/kernels/sparse_cross_op.cc#L114-L116) is tricked to consider a tensor of type `tstring` which in fact contains integral elements. Fixing the type confusion by preventing mixing `DT_STRING` and `DT_INT64` types solves this issue. The fix will be included in TensorFlow 2.5.0. We will also cherrypick this commit on TensorFlow 2.4.2, TensorFlow 2.3.3, TensorFlow 2.2.3 and TensorFlow 2.1.4, as these are also affected and still in supported range.'}]
2021-05-20T17:24Z
2021-05-14T20:15Z
Access of Resource Using Incompatible Type ('Type Confusion')
The program allocates or initializes a resource such as a pointer, object, or variable using one type, but it later accesses that resource using a type that is incompatible with the original type.
When the program accesses the resource using an incompatible type, this could trigger logical errors because the resource does not have expected properties. In languages without memory safety, such as C and C++, type confusion can lead to out-of-bounds memory access. While this weakness is frequently associated with unions when parsing data with many different embedded object types in C, it can be present in any application that can interpret the same variable or memory location in multiple ways. This weakness is not unique to C and C++. For example, errors in PHP applications can be triggered by providing array parameters when scalars are expected, or vice versa. Languages such as Perl, which perform automatic conversion of a variable of one type when it is accessed as if it were another type, can also contain these issues.
https://cwe.mitre.org/data/definitions/843.html
0
Amit Patankar
2021-04-15 13:03:19-07:00
Fix `tf.raw_ops.SparseCross` failing CHECK. PiperOrigin-RevId: 368701671 Change-Id: Id805729dd9ba0bda36e4bb309408129b55fb649d
b1cc5e5a50e7cee09f2c6eb48eb40ee9c4125025
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tensorflow::ValidateInput
tensorflow::ValidateInput( const OpInputList & indices_list_in , const OpInputList & values_list_in , const OpInputList & shapes_list_in , const OpInputList & dense_list_in)
['indices_list_in', 'values_list_in', 'shapes_list_in', 'dense_list_in']
Status ValidateInput(const OpInputList& indices_list_in, const OpInputList& values_list_in, const OpInputList& shapes_list_in, const OpInputList& dense_list_in) { const auto size = indices_list_in.size(); // Validates indices_list_in OpInputList. for (int i = 0; i < size; i++) { if (!TensorShapeUtils::IsMatrix(indices_list_in[i].shape())) { return errors::InvalidArgument( "Input indices should be a matrix but received shape ", indices_list_in[i].shape().DebugString(), " at position ", i); } if (indices_list_in[i].shape().dim_size(1) != 2) { return errors::InvalidArgument("Expected D2 of index to be 2 got ", indices_list_in[i].shape().dim_size(1), " at position ", i); } } // Validates values_list_in OpInputList. if (values_list_in.size() != size) { return errors::InvalidArgument("Expected ", size, " input values, got ", values_list_in.size()); } for (int i = 0; i < size; i++) { if (!TensorShapeUtils::IsVector(values_list_in[i].shape())) { return errors::InvalidArgument( "Input values should be a vector but received shape ", values_list_in[i].shape().DebugString(), " at position ", i); } if (indices_list_in[i].shape().dim_size(0) != values_list_in[i].shape().dim_size(0)) { return errors::InvalidArgument( "Expected size of values to be ", indices_list_in[i].shape().dim_size(0), " got ", values_list_in[i].shape().dim_size(0), " at position ", i); } } // Validates shapes_list_in OpInputList if (shapes_list_in.size() != size) { return errors::InvalidArgument("Expected ", size, " input shapes, got ", shapes_list_in.size()); } for (int i = 0; i < size; i++) { if (!TensorShapeUtils::IsVector(shapes_list_in[i].shape())) { return errors::InvalidArgument( "Input shapes should be a vector but received shape ", shapes_list_in[i].shape().DebugString(), " at position ", i); } if (shapes_list_in[i].vec<int64>().size() != 2) { return errors::InvalidArgument("shape should imply a 2D tensor, but got ", shapes_list_in[i].shape().DebugString(), " at position ", i); } } // Validates dense_list_in OpInputList for (int i = 0; i < dense_list_in.size(); ++i) { if (!TensorShapeUtils::IsMatrix(dense_list_in[i].shape())) { return errors::InvalidArgument( "Dense inputs should be a matrix but received shape ", dense_list_in[i].shape().DebugString(), " at position ", i); } } // Validates batch sizes. (Note: we do this after validating the input // shapes, because CalculateBatchSize() depends on inputs having valid // shapes). const auto batch_size = CalculateBatchSize(shapes_list_in, dense_list_in); for (int i = 0; i < size; i++) { if (shapes_list_in[i].vec<int64>()(0) != batch_size) { return errors::InvalidArgument("Expected batch size ", batch_size, " got ", shapes_list_in[i].vec<int64>()(0), " at position ", i); } } for (int i = 0; i < dense_list_in.size(); ++i) { if (dense_list_in[i].dim_size(0) != batch_size) { return errors::InvalidArgument("Expected batch size ", batch_size, " got ", dense_list_in[i].dim_size(0), " at dense tensor ", i); } } return Status::OK(); }
653
True
1
CVE-2021-29608
False
False
False
False
AV:L/AC:L/Au:N/C:P/I:P/A:P
LOCAL
LOW
NONE
PARTIAL
PARTIAL
PARTIAL
4.6
CVSS:3.1/AV:L/AC:L/PR:L/UI:N/S:U/C:H/I:H/A:H
LOCAL
LOW
LOW
NONE
UNCHANGED
HIGH
HIGH
HIGH
7.8
HIGH
1.8
5.9
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/c4d7afb6a5986b04505aca4466ae1951686c80f6', 'name': 'https://github.com/tensorflow/tensorflow/commit/c4d7afb6a5986b04505aca4466ae1951686c80f6', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/f94ef358bb3e91d517446454edff6535bcfe8e4a', 'name': 'https://github.com/tensorflow/tensorflow/commit/f94ef358bb3e91d517446454edff6535bcfe8e4a', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-rgvq-pcvf-hx75', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-rgvq-pcvf-hx75', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/b761c9b652af2107cfbc33efd19be0ce41daa33e', 'name': 'https://github.com/tensorflow/tensorflow/commit/b761c9b652af2107cfbc33efd19be0ce41daa33e', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-131'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionStartIncluding': '2.4.0', 'versionEndExcluding': '2.4.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionEndExcluding': '2.1.4', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "TensorFlow is an end-to-end open source platform for machine learning. Due to lack of validation in `tf.raw_ops.RaggedTensorToTensor`, an attacker can exploit an undefined behavior if input arguments are empty. The implementation(https://github.com/tensorflow/tensorflow/blob/656e7673b14acd7835dc778867f84916c6d1cac2/tensorflow/core/kernels/ragged_tensor_to_tensor_op.cc#L356-L360) only checks that one of the tensors is not empty, but does not check for the other ones. There are multiple `DCHECK` validations to prevent heap OOB, but these are no-op in release builds, hence they don't prevent anything. The fix will be included in TensorFlow 2.5.0. We will also cherrypick these commits on TensorFlow 2.4.2, TensorFlow 2.3.3, TensorFlow 2.2.3 and TensorFlow 2.1.4, as these are also affected and still in supported range."}]
2021-07-26T16:23Z
2021-05-14T20:15Z
Incorrect Calculation of Buffer Size
The software does not correctly calculate the size to be used when allocating a buffer, which could lead to a buffer overflow.
https://cwe.mitre.org/data/definitions/131.html
0
Amit Patankar
2021-04-15 13:28:49-07:00
Fix `tf.raw_ops.RaggedTensorToTensor` failing CHECK. PiperOrigin-RevId: 368706628 Change-Id: I5c9ea4833f38835ee183ca50d63251dc89c9f3bc
b761c9b652af2107cfbc33efd19be0ce41daa33e
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tensorflow::RaggedTensorToTensorBaseOp::CalculateOutputIndex
tensorflow::RaggedTensorToTensorBaseOp::CalculateOutputIndex( OpKernelContext * context , int dimension , const vector<INDEX_TYPE> & parent_output_index , INDEX_TYPE output_index_multiplier , INDEX_TYPE output_size , vector<INDEX_TYPE> * result)
['context', 'dimension', 'parent_output_index', 'output_index_multiplier', 'output_size', 'result']
Status CalculateOutputIndex(OpKernelContext* context, int dimension, const vector<INDEX_TYPE>& parent_output_index, INDEX_TYPE output_index_multiplier, INDEX_TYPE output_size, vector<INDEX_TYPE>* result) { const RowPartitionTensor row_partition_tensor = GetRowPartitionTensor(context, dimension); auto partition_type = GetRowPartitionTypeByDimension(dimension); switch (partition_type) { case RowPartitionType::VALUE_ROWIDS: CalculateOutputIndexValueRowID( row_partition_tensor, parent_output_index, output_index_multiplier, output_size, result); return tensorflow::Status::OK(); case RowPartitionType::ROW_SPLITS: CalculateOutputIndexRowSplit(row_partition_tensor, parent_output_index, output_index_multiplier, output_size, result); return tensorflow::Status::OK(); default: return errors::InvalidArgument( "Unsupported partition type:", RowPartitionTypeToString(partition_type)); } }
126
True
1
CVE-2021-29608
False
False
False
False
AV:L/AC:L/Au:N/C:P/I:P/A:P
LOCAL
LOW
NONE
PARTIAL
PARTIAL
PARTIAL
4.6
CVSS:3.1/AV:L/AC:L/PR:L/UI:N/S:U/C:H/I:H/A:H
LOCAL
LOW
LOW
NONE
UNCHANGED
HIGH
HIGH
HIGH
7.8
HIGH
1.8
5.9
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/c4d7afb6a5986b04505aca4466ae1951686c80f6', 'name': 'https://github.com/tensorflow/tensorflow/commit/c4d7afb6a5986b04505aca4466ae1951686c80f6', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/f94ef358bb3e91d517446454edff6535bcfe8e4a', 'name': 'https://github.com/tensorflow/tensorflow/commit/f94ef358bb3e91d517446454edff6535bcfe8e4a', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-rgvq-pcvf-hx75', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-rgvq-pcvf-hx75', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/b761c9b652af2107cfbc33efd19be0ce41daa33e', 'name': 'https://github.com/tensorflow/tensorflow/commit/b761c9b652af2107cfbc33efd19be0ce41daa33e', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-131'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionStartIncluding': '2.4.0', 'versionEndExcluding': '2.4.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionEndExcluding': '2.1.4', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "TensorFlow is an end-to-end open source platform for machine learning. Due to lack of validation in `tf.raw_ops.RaggedTensorToTensor`, an attacker can exploit an undefined behavior if input arguments are empty. The implementation(https://github.com/tensorflow/tensorflow/blob/656e7673b14acd7835dc778867f84916c6d1cac2/tensorflow/core/kernels/ragged_tensor_to_tensor_op.cc#L356-L360) only checks that one of the tensors is not empty, but does not check for the other ones. There are multiple `DCHECK` validations to prevent heap OOB, but these are no-op in release builds, hence they don't prevent anything. The fix will be included in TensorFlow 2.5.0. We will also cherrypick these commits on TensorFlow 2.4.2, TensorFlow 2.3.3, TensorFlow 2.2.3 and TensorFlow 2.1.4, as these are also affected and still in supported range."}]
2021-07-26T16:23Z
2021-05-14T20:15Z
Incorrect Calculation of Buffer Size
The software does not correctly calculate the size to be used when allocating a buffer, which could lead to a buffer overflow.
https://cwe.mitre.org/data/definitions/131.html
0
Amit Patankar
2021-04-15 13:28:49-07:00
Fix `tf.raw_ops.RaggedTensorToTensor` failing CHECK. PiperOrigin-RevId: 368706628 Change-Id: I5c9ea4833f38835ee183ca50d63251dc89c9f3bc
b761c9b652af2107cfbc33efd19be0ce41daa33e
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tensorflow::RaggedTensorToTensorBaseOp::CalculateOutputIndexRowSplit
tensorflow::RaggedTensorToTensorBaseOp::CalculateOutputIndexRowSplit( const RowPartitionTensor & row_split , const vector<INDEX_TYPE> & parent_output_index , INDEX_TYPE output_index_multiplier , INDEX_TYPE output_size , vector<INDEX_TYPE> * result)
['row_split', 'parent_output_index', 'output_index_multiplier', 'output_size', 'result']
void CalculateOutputIndexRowSplit( const RowPartitionTensor& row_split, const vector<INDEX_TYPE>& parent_output_index, INDEX_TYPE output_index_multiplier, INDEX_TYPE output_size, vector<INDEX_TYPE>* result) { INDEX_TYPE row_split_size = row_split.size(); if (row_split_size > 0) { result->reserve(row_split(row_split_size - 1)); } for (INDEX_TYPE i = 0; i < row_split_size - 1; ++i) { INDEX_TYPE row_length = row_split(i + 1) - row_split(i); INDEX_TYPE real_length = std::min(output_size, row_length); INDEX_TYPE parent_output_index_current = parent_output_index[i]; if (parent_output_index_current == -1) { real_length = 0; } for (INDEX_TYPE j = 0; j < real_length; ++j) { result->push_back(parent_output_index_current); parent_output_index_current += output_index_multiplier; } for (INDEX_TYPE j = 0; j < row_length - real_length; ++j) { result->push_back(-1); } } if (row_split_size > 0) { DCHECK_EQ(result->size(), row_split(row_split_size - 1)); } }
202
True
1
CVE-2021-29608
False
False
False
False
AV:L/AC:L/Au:N/C:P/I:P/A:P
LOCAL
LOW
NONE
PARTIAL
PARTIAL
PARTIAL
4.6
CVSS:3.1/AV:L/AC:L/PR:L/UI:N/S:U/C:H/I:H/A:H
LOCAL
LOW
LOW
NONE
UNCHANGED
HIGH
HIGH
HIGH
7.8
HIGH
1.8
5.9
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/c4d7afb6a5986b04505aca4466ae1951686c80f6', 'name': 'https://github.com/tensorflow/tensorflow/commit/c4d7afb6a5986b04505aca4466ae1951686c80f6', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/f94ef358bb3e91d517446454edff6535bcfe8e4a', 'name': 'https://github.com/tensorflow/tensorflow/commit/f94ef358bb3e91d517446454edff6535bcfe8e4a', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-rgvq-pcvf-hx75', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-rgvq-pcvf-hx75', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/b761c9b652af2107cfbc33efd19be0ce41daa33e', 'name': 'https://github.com/tensorflow/tensorflow/commit/b761c9b652af2107cfbc33efd19be0ce41daa33e', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-131'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionStartIncluding': '2.4.0', 'versionEndExcluding': '2.4.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionEndExcluding': '2.1.4', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "TensorFlow is an end-to-end open source platform for machine learning. Due to lack of validation in `tf.raw_ops.RaggedTensorToTensor`, an attacker can exploit an undefined behavior if input arguments are empty. The implementation(https://github.com/tensorflow/tensorflow/blob/656e7673b14acd7835dc778867f84916c6d1cac2/tensorflow/core/kernels/ragged_tensor_to_tensor_op.cc#L356-L360) only checks that one of the tensors is not empty, but does not check for the other ones. There are multiple `DCHECK` validations to prevent heap OOB, but these are no-op in release builds, hence they don't prevent anything. The fix will be included in TensorFlow 2.5.0. We will also cherrypick these commits on TensorFlow 2.4.2, TensorFlow 2.3.3, TensorFlow 2.2.3 and TensorFlow 2.1.4, as these are also affected and still in supported range."}]
2021-07-26T16:23Z
2021-05-14T20:15Z
Incorrect Calculation of Buffer Size
The software does not correctly calculate the size to be used when allocating a buffer, which could lead to a buffer overflow.
https://cwe.mitre.org/data/definitions/131.html
0
Amit Patankar
2021-04-15 13:28:49-07:00
Fix `tf.raw_ops.RaggedTensorToTensor` failing CHECK. PiperOrigin-RevId: 368706628 Change-Id: I5c9ea4833f38835ee183ca50d63251dc89c9f3bc
b761c9b652af2107cfbc33efd19be0ce41daa33e
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tensorflow::RaggedTensorToTensorBaseOp::CalculateOutputIndexValueRowID
tensorflow::RaggedTensorToTensorBaseOp::CalculateOutputIndexValueRowID( const RowPartitionTensor & value_rowids , const vector<INDEX_TYPE> & parent_output_index , INDEX_TYPE output_index_multiplier , INDEX_TYPE output_size , vector<INDEX_TYPE> * result)
['value_rowids', 'parent_output_index', 'output_index_multiplier', 'output_size', 'result']
void CalculateOutputIndexValueRowID( const RowPartitionTensor& value_rowids, const vector<INDEX_TYPE>& parent_output_index, INDEX_TYPE output_index_multiplier, INDEX_TYPE output_size, vector<INDEX_TYPE>* result) { const INDEX_TYPE index_size = value_rowids.size(); result->reserve(index_size); if (index_size == 0) { return; } INDEX_TYPE current_output_column = 0; INDEX_TYPE current_value_rowid = value_rowids(0); DCHECK_LT(current_value_rowid, parent_output_index.size()); INDEX_TYPE current_output_index = parent_output_index[current_value_rowid]; result->push_back(current_output_index); for (INDEX_TYPE i = 1; i < index_size; ++i) { INDEX_TYPE next_value_rowid = value_rowids(i); if (next_value_rowid == current_value_rowid) { if (current_output_index >= 0) { ++current_output_column; if (current_output_column < output_size) { current_output_index += output_index_multiplier; } else { current_output_index = -1; } } } else { current_output_column = 0; current_value_rowid = next_value_rowid; DCHECK_LT(next_value_rowid, parent_output_index.size()); current_output_index = parent_output_index[next_value_rowid]; } result->push_back(current_output_index); } DCHECK_EQ(result->size(), value_rowids.size()); }
210
True
1
CVE-2021-29515
False
False
False
False
AV:L/AC:L/Au:N/C:P/I:P/A:P
LOCAL
LOW
NONE
PARTIAL
PARTIAL
PARTIAL
4.6
CVSS:3.1/AV:L/AC:L/PR:L/UI:N/S:U/C:H/I:H/A:H
LOCAL
LOW
LOW
NONE
UNCHANGED
HIGH
HIGH
HIGH
7.8
HIGH
1.8
5.9
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/a7116dd3913c4a4afd2a3a938573aa7c785fdfc6', 'name': 'https://github.com/tensorflow/tensorflow/commit/a7116dd3913c4a4afd2a3a938573aa7c785fdfc6', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-hc6c-75p4-hmq4', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-hc6c-75p4-hmq4', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Patch', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-476'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionStartIncluding': '2.4.0', 'versionEndExcluding': '2.4.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionEndExcluding': '2.1.4', 'cpe_name': []}]}]
[{'lang': 'en', 'value': 'TensorFlow is an end-to-end open source platform for machine learning. The implementation of `MatrixDiag*` operations(https://github.com/tensorflow/tensorflow/blob/4c4f420e68f1cfaf8f4b6e8e3eb857e9e4c3ff33/tensorflow/core/kernels/linalg/matrix_diag_op.cc#L195-L197) does not validate that the tensor arguments are non-empty. The fix will be included in TensorFlow 2.5.0. We will also cherrypick this commit on TensorFlow 2.4.2, TensorFlow 2.3.3, TensorFlow 2.2.3 and TensorFlow 2.1.4, as these are also affected and still in supported range.'}]
2021-05-20T16:28Z
2021-05-14T20:15Z
NULL Pointer Dereference
A NULL pointer dereference occurs when the application dereferences a pointer that it expects to be valid, but is NULL, typically causing a crash or exit.
NULL pointer dereference issues can occur through a number of flaws, including race conditions, and simple programming omissions.
https://cwe.mitre.org/data/definitions/476.html
0
Mihai Maruseac
2021-04-17 20:55:53-07:00
Validate `MatrixDiagV{2,3}` arguments to prevent breakage. PiperOrigin-RevId: 369056033 Change-Id: Ic2018c297d3dd6f252dc1dd3667f1ed5cb1eaa42
a7116dd3913c4a4afd2a3a938573aa7c785fdfc6
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tensorflow::MatrixDiagOp::Compute
tensorflow::MatrixDiagOp::Compute( OpKernelContext * context)
['context']
void Compute(OpKernelContext* context) override { const Tensor& diagonal = context->input(0); // MatrixDiag and MatrixDiagV2 both use this OpKernel. MatrixDiag only has // one input, so we have to check the number of inputs before reading // additional parameters in MatrixDiagV2. int32 lower_diag_index = 0; int32 upper_diag_index = 0; int32 num_rows = -1; int32 num_cols = -1; T padding_value(0); // MatrixDiagOpV2-specific. if (context->num_inputs() > kNumV1Inputs) { auto& diag_index = context->input(1); OP_REQUIRES(context, TensorShapeUtils::IsScalar(diag_index.shape()) || TensorShapeUtils::IsVector(diag_index.shape()), errors::InvalidArgument( "diag_index must be a scalar or vector, received shape: ", diag_index.shape().DebugString())); lower_diag_index = diag_index.flat<int32>()(0); upper_diag_index = lower_diag_index; if (TensorShapeUtils::IsVector(diag_index.shape())) { auto diag_index_size = diag_index.dim_size(0); OP_REQUIRES( context, 0 < diag_index_size && diag_index_size <= 2, errors::InvalidArgument( "diag_index must have only one or two elements, received ", diag_index_size, " elements.")); if (diag_index_size > 1) { upper_diag_index = diag_index.flat<int32>()(1); } } num_rows = context->input(2).flat<int32>()(0); num_cols = context->input(3).flat<int32>()(0); padding_value = context->input(4).flat<T>()(0); } // Size validations. const TensorShape& diagonal_shape = diagonal.shape(); const int diag_rank = diagonal_shape.dims(); const Eigen::Index num_diags = upper_diag_index - lower_diag_index + 1; OP_REQUIRES(context, TensorShapeUtils::IsVectorOrHigher(diagonal_shape), errors::InvalidArgument( "diagonal must be at least 1-dim, received shape: ", diagonal.shape().DebugString())); OP_REQUIRES( context, lower_diag_index <= upper_diag_index, errors::InvalidArgument( "lower_diag_index must not be larger than upper_diag_index: ", lower_diag_index, " > ", upper_diag_index)); OP_REQUIRES(context, lower_diag_index == upper_diag_index || diagonal_shape.dim_size(diag_rank - 2) == num_diags, errors::InvalidArgument( "The number of diagonals provided in the input does not " "match the lower_diag_index and upper_diag_index range.")); const Eigen::Index max_diag_len = diagonal_shape.dim_size(diag_rank - 1); const int32 min_num_rows = max_diag_len - std::min(upper_diag_index, 0); const int32 min_num_cols = max_diag_len + std::max(lower_diag_index, 0); OP_REQUIRES(context, num_rows == -1 || num_rows >= min_num_rows, errors::InvalidArgument("The number of rows is too small.")); OP_REQUIRES(context, num_cols == -1 || num_cols >= min_num_cols, errors::InvalidArgument("The number of columns is too small.")); // If both num_rows and num_cols are unknown, assume that output is square. // Otherwise, use smallest possible values. if (num_rows == -1 && num_cols == -1) { num_rows = std::max(min_num_rows, min_num_cols); num_cols = num_rows; } else if (num_rows == -1) { num_rows = min_num_rows; } else if (num_cols == -1) { num_cols = min_num_cols; } OP_REQUIRES(context, num_rows == min_num_rows || num_cols == min_num_cols, errors::InvalidArgument( "The number of rows or columns is not consistent with " "the specified d_lower, d_upper, and diagonal.")); TensorShape output_shape = diagonal_shape; if (num_diags == 1) { // Output has rank `rank+1`. output_shape.set_dim(diag_rank - 1, num_rows); output_shape.AddDim(num_cols); } else { // Output has rank `rank`. output_shape.set_dim(diag_rank - 2, num_rows); output_shape.set_dim(diag_rank - 1, num_cols); } Tensor* output = nullptr; OP_REQUIRES_OK(context, context->allocate_output(0, output_shape, &output)); auto output_reshaped = output->flat_inner_dims<T, 3>(); auto diag_reshaped = diagonal.flat<T>(); functor::MatrixDiag<Device, T>::Compute( context, context->eigen_device<Device>(), diag_reshaped, output_reshaped, lower_diag_index, upper_diag_index, max_diag_len, padding_value, left_align_superdiagonal_, left_align_subdiagonal_); }
683
True
1
CVE-2021-29521
False
False
False
False
AV:L/AC:L/Au:N/C:N/I:N/A:P
LOCAL
LOW
NONE
NONE
NONE
PARTIAL
2.1
CVSS:3.1/AV:L/AC:L/PR:L/UI:N/S:U/C:N/I:N/A:H
LOCAL
LOW
LOW
NONE
UNCHANGED
NONE
NONE
HIGH
5.5
MEDIUM
1.8
3.6
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/c57c0b9f3a4f8684f3489dd9a9ec627ad8b599f5', 'name': 'https://github.com/tensorflow/tensorflow/commit/c57c0b9f3a4f8684f3489dd9a9ec627ad8b599f5', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-hr84-fqvp-48mm', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-hr84-fqvp-48mm', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Patch', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-131'}]}]
LOW
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionStartIncluding': '2.4.0', 'versionEndExcluding': '2.4.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.3', 'cpe_name': []}]}]
[{'lang': 'en', 'value': 'TensorFlow is an end-to-end open source platform for machine learning. Specifying a negative dense shape in `tf.raw_ops.SparseCountSparseOutput` results in a segmentation fault being thrown out from the standard library as `std::vector` invariants are broken. This is because the implementation(https://github.com/tensorflow/tensorflow/blob/8f7b60ee8c0206a2c99802e3a4d1bb55d2bc0624/tensorflow/core/kernels/count_ops.cc#L199-L213) assumes the first element of the dense shape is always positive and uses it to initialize a `BatchedMap<T>` (i.e., `std::vector<absl::flat_hash_map<int64,T>>`(https://github.com/tensorflow/tensorflow/blob/8f7b60ee8c0206a2c99802e3a4d1bb55d2bc0624/tensorflow/core/kernels/count_ops.cc#L27)) data structure. If the `shape` tensor has more than one element, `num_batches` is the first value in `shape`. Ensuring that the `dense_shape` argument is a valid tensor shape (that is, all elements are non-negative) solves this issue. The fix will be included in TensorFlow 2.5.0. We will also cherrypick this commit on TensorFlow 2.4.2 and TensorFlow 2.3.3.'}]
2021-05-20T17:19Z
2021-05-14T20:15Z
Incorrect Calculation of Buffer Size
The software does not correctly calculate the size to be used when allocating a buffer, which could lead to a buffer overflow.
https://cwe.mitre.org/data/definitions/131.html
0
Amit Patankar
2021-04-19 11:33:50-07:00
Fix the segfault in `tf.raw_ops.SparseCountSparseOutput`. PiperOrigin-RevId: 369264941 Change-Id: I23a96a15b8370c01ee21ba3841e1c7dcbf55e93d
c57c0b9f3a4f8684f3489dd9a9ec627ad8b599f5
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tensorflow::SparseCount::Compute
tensorflow::SparseCount::Compute( OpKernelContext * context)
['context']
void Compute(OpKernelContext* context) override { const Tensor& indices = context->input(0); const Tensor& values = context->input(1); const Tensor& shape = context->input(2); const Tensor& weights = context->input(3); bool use_weights = weights.NumElements() > 0; OP_REQUIRES(context, TensorShapeUtils::IsMatrix(indices.shape()), errors::InvalidArgument( "Input indices must be a 2-dimensional tensor. Got: ", indices.shape().DebugString())); if (use_weights) { OP_REQUIRES( context, weights.shape() == values.shape(), errors::InvalidArgument( "Weights and values must have the same shape. Weight shape: ", weights.shape().DebugString(), "; values shape: ", values.shape().DebugString())); } OP_REQUIRES(context, shape.NumElements() != 0, errors::InvalidArgument( "The shape argument requires at least one element.")); bool is_1d = shape.NumElements() == 1; int num_batches = is_1d ? 1 : shape.flat<int64>()(0); int num_values = values.NumElements(); OP_REQUIRES(context, num_values == indices.shape().dim_size(0), errors::InvalidArgument( "Number of values must match first dimension of indices.", "Got ", num_values, " values, indices shape: ", indices.shape().DebugString())); const auto indices_values = indices.matrix<int64>(); const auto values_values = values.flat<T>(); const auto weight_values = weights.flat<W>(); auto per_batch_counts = BatchedMap<W>(num_batches); T max_value = 0; for (int idx = 0; idx < num_values; ++idx) { int batch = is_1d ? 0 : indices_values(idx, 0); if (batch >= num_batches) { OP_REQUIRES(context, batch < num_batches, errors::InvalidArgument( "Indices value along the first dimension must be ", "lower than the first index of the shape.", "Got ", batch, " as batch and ", num_batches, " as the first dimension of the shape.")); } const auto& value = values_values(idx); if (value >= 0 && (maxlength_ <= 0 || value < maxlength_)) { if (binary_output_) { per_batch_counts[batch][value] = 1; } else if (use_weights) { per_batch_counts[batch][value] += weight_values(idx); } else { per_batch_counts[batch][value]++; } if (value > max_value) { max_value = value; } } } int num_output_values = GetOutputSize(max_value, maxlength_, minlength_); OP_REQUIRES_OK(context, OutputSparse<W>(per_batch_counts, num_output_values, is_1d, context)); }
493
True
1
CVE-2021-29527
False
False
False
False
AV:L/AC:L/Au:N/C:N/I:N/A:P
LOCAL
LOW
NONE
NONE
NONE
PARTIAL
2.1
CVSS:3.1/AV:L/AC:L/PR:L/UI:N/S:U/C:N/I:N/A:H
LOCAL
LOW
LOW
NONE
UNCHANGED
NONE
NONE
HIGH
5.5
MEDIUM
1.8
3.6
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/cfa91be9863a91d5105a3b4941096044ab32036b', 'name': 'https://github.com/tensorflow/tensorflow/commit/cfa91be9863a91d5105a3b4941096044ab32036b', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-x4g7-fvjj-prg8', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-x4g7-fvjj-prg8', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Patch', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-369'}]}]
LOW
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionStartIncluding': '2.4.0', 'versionEndExcluding': '2.4.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionEndExcluding': '2.1.4', 'cpe_name': []}]}]
[{'lang': 'en', 'value': 'TensorFlow is an end-to-end open source platform for machine learning. An attacker can trigger a division by 0 in `tf.raw_ops.QuantizedConv2D`. This is because the implementation(https://github.com/tensorflow/tensorflow/blob/00e9a4d67d76703fa1aee33dac582acf317e0e81/tensorflow/core/kernels/quantized_conv_ops.cc#L257-L259) does a division by a quantity that is controlled by the caller. The fix will be included in TensorFlow 2.5.0. We will also cherrypick this commit on TensorFlow 2.4.2, TensorFlow 2.3.3, TensorFlow 2.2.3 and TensorFlow 2.1.4, as these are also affected and still in supported range.'}]
2021-05-20T15:47Z
2021-05-14T20:15Z
Divide By Zero
The product divides a value by zero.
This weakness typically occurs when an unexpected value is provided to the product, or if an error occurs that is not properly detected. It frequently occurs in calculations involving physical dimensions such as size, length, width, and height.
https://cwe.mitre.org/data/definitions/369.html
0
Mihai Maruseac
2021-04-19 18:58:47-07:00
Fix one FPE and remove two CHECK-fails. PiperOrigin-RevId: 369349640 Change-Id: I1fedbfc2b5bab635c5cb51f103d7c9176f79831a
cfa91be9863a91d5105a3b4941096044ab32036b
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tensorflow::Im2ColConvFunctor::operator ( )
tensorflow::Im2ColConvFunctor::operator ( )( OpKernelContext * context , const T1 * input_data , int input_batches , int input_height , int input_width , int input_depth , int input_offset , const T2 * filter_data , int filter_height , int filter_width , int filter_count , int filter_offset , int stride , Padding padding , T3 * output_data , int output_height , int output_width , int output_shift , int output_offset , int output_mult)
['context', 'input_data', 'input_batches', 'input_height', 'input_width', 'input_depth', 'input_offset', 'filter_data', 'filter_height', 'filter_width', 'filter_count', 'filter_offset', 'stride', 'padding', 'output_data', 'output_height', 'output_width', 'output_shift', 'output_offset', 'output_mult']
void operator()(OpKernelContext* context, const T1* input_data, int input_batches, int input_height, int input_width, int input_depth, int input_offset, const T2* filter_data, int filter_height, int filter_width, int filter_count, int filter_offset, int stride, Padding padding, T3* output_data, int output_height, int output_width, int output_shift, int output_offset, int output_mult) { if (input_offset < 0) { // Only log the first few occurrences of this warning. static int warning_count = 0; if (warning_count < 10) { ++warning_count; LOG(WARNING) << "For kernel '" << context->op_kernel().name() << "' from input '" << context->op_kernel().requested_input(0) << "': Zero is not representable in the quantized range used by the" << " input. This means QuantizedConv2d has to fall back to a slow" << " implementation, since the border of zero values can't be" << " represented easily. You should try to construct graphs that" << " avoid this situation."; } ReferenceConvFunctor<T1, T2, T3> conv_functor; conv_functor(context, input_data, input_batches, input_height, input_width, input_depth, input_offset, filter_data, filter_height, filter_width, filter_count, filter_offset, stride, padding, output_data, output_height, output_width, output_shift, output_offset, output_mult); return; } CHECK_GT(output_width, 0); CHECK_GT(output_height, 0); int filter_left_offset; int filter_top_offset; if (padding == VALID) { filter_left_offset = ((output_width - 1) * stride + filter_width - input_width + 1) / 2; filter_top_offset = ((output_height - 1) * stride + filter_height - input_height + 1) / 2; } else { filter_left_offset = ((output_width - 1) * stride + filter_width - input_width) / 2; filter_top_offset = ((output_height - 1) * stride + filter_height - input_height) / 2; } // The im2col buffer has # of patches rows, and # of filters cols. // It's laid out like this, in row major order in memory: // < filter value count > // ^ +---------------------+ // patch | | // count | | // v +---------------------+ // Each patch row contains a filter_width x filter_height patch of the // input, with the depth channel as the most contiguous in memory, followed // by the width, then the height. This is the standard memory order in the // image world if it helps to visualize it. const int filter_value_count = filter_width * filter_height * input_depth; const int64 patches_per_chunk = kMaxChunkSize / (filter_value_count * sizeof(T1)); const int64 chunk_value_count = (kMaxChunkSize + (sizeof(T1) - 1)) / sizeof(T1); // TODO(petewarden) - Memory allocation can be very slow on Android. Can we // optimize this by keeping the scratch buffer around? // Because memory allocation is very expensive on mobile platforms, try to // allocate a persistent buffer that will be kept around between calls. We // use TensorFlow's resource management to ensure that the memory will be // released when the session is over. Im2ColBufferResource<T1, chunk_value_count>* im2col_buffer_resource; std::function<Status(Im2ColBufferResource<T1, chunk_value_count>**)> creator = [](Im2ColBufferResource<T1, chunk_value_count>** resource) { #ifdef _MSC_VER // MSVC complains about the capture of chunk_value_count which oddly // works fine in conv_ops_using_gemm.cc for example. // Define chunk_value_count inside the lambda for now. const int64 chunk_value_count = (kMaxChunkSize + (sizeof(T1) - 1)) / sizeof(T1); #endif *resource = new Im2ColBufferResource<T1, chunk_value_count>(); return Status::OK(); }; OP_REQUIRES_OK(context, context->resource_manager()->LookupOrCreate( "Conv2d", "im2col_buffer", &im2col_buffer_resource, creator)); // This means that multiple ops can't be run simultaneously on different // threads, because we have a single shared resource. The platforms this is // aimed at have intra-op parallelism as their focus though, so it shouldn't // be an issue. mutex_lock lock_buffer(im2col_buffer_resource->mu); core::ScopedUnref unref_buffer(im2col_buffer_resource); T1* im2col_buffer = im2col_buffer_resource->data; const int64 patch_count = (input_batches * output_height * output_width); const int64 chunk_count = (patch_count + (patches_per_chunk - 1)) / patches_per_chunk; for (int64 chunk_index = 0; chunk_index < chunk_count; ++chunk_index) { const int64 patch_index_start = chunk_index * patches_per_chunk; const int64 patch_index_end = std::min(patch_index_start + patches_per_chunk, patch_count); for (int64 patch_index = patch_index_start; patch_index < patch_index_end; ++patch_index) { const int64 batch = patch_index / (output_height * output_width); const int64 out_y = (patch_index / output_width) % output_height; const int64 out_x = patch_index % output_width; const T1* input_batch_start = input_data + (batch * input_height * input_width * input_depth); const int in_y_origin = (out_y * stride) - filter_top_offset; const int in_x_origin = (out_x * stride) - filter_left_offset; const int patch_index_within_chunk = patch_index % patches_per_chunk; T1* im2col_patch_start = im2col_buffer + (patch_index_within_chunk * filter_value_count); for (int filter_y = 0; filter_y < filter_height; ++filter_y) { const int in_y = in_y_origin + filter_y; T1* im2col_row_start = im2col_patch_start + (filter_y * filter_width * input_depth); // If we're off the top or the bottom of the input, fill the // whole row with zeroes. if ((in_y < 0) || (in_y >= input_height)) { // On Android, memset and memcpy are significantly faster than the // more modern std::set and std::copy equivalents. memset(im2col_row_start, input_offset, (filter_width * input_depth)); } else { // What we're doing here is trying to copy and fill the im2col // buffer as efficiently as possible, using functions to set or // duplicate values en masse. We know we don't have to worry about // vertical edges because we dealt with that case above, so we // just need to handle filters that overlap the left or right // edges. Here's what that looks like: // // < left_zero_count > < center_copy_count > < right_zero_count > // +------------------+---------------------+--------------------+ // | (filter) | (image) | (filter) | // +------------------+---------------------+--------------------+ // in_x_origin 0 input_width in_x_end // // In reality it's unlikely that a filter patch will be wider // than an input, but this shows all the edge cases. // We use memset() to set the left and right sections to zeroes // and memcpy() to copy over the input data for the center. These // are preferred to std::fill and std::copy because they're much // faster on Android. const int in_x_end = in_x_origin + filter_width; const int left_zero_count = std::max(0, 0 - in_x_origin); const int right_zero_count = std::max(0, in_x_end - input_width); const int center_copy_count = filter_width - (left_zero_count + right_zero_count); if (left_zero_count > 0) { T1* im2col_left_start = im2col_row_start; memset(im2col_left_start, input_offset, (left_zero_count * input_depth)); } if (center_copy_count > 0) { const T1* input_row_start = input_batch_start + (in_y * input_width * input_depth) + (std::max(0, in_x_origin) * input_depth); T1* im2col_center_start = im2col_row_start + (left_zero_count * input_depth); memcpy(im2col_center_start, input_row_start, (center_copy_count * input_depth)); } if (right_zero_count > 0) { T1* im2col_right_start = im2col_row_start + ((left_zero_count + center_copy_count) * input_depth); memset(im2col_right_start, input_offset, (right_zero_count * input_depth)); } } } } // Now we've assembled a set of image patches into a matrix, apply a // GEMM matrix multiply of the patches as rows, times the filter // weights in columns, to get partial results in the output matrix. const int how_many_patches = patch_index_end - patch_index_start; const bool transpose_a = false; const bool transpose_b = false; const bool transpose_c = false; const int m = how_many_patches; const int n = filter_count; const int k = filter_value_count; const int lda = filter_value_count; const int ldb = filter_count; const int ldc = filter_count; T3* chunk_output_data = output_data + (patch_index_start * filter_count); if (meta::IsSupportedAndEnabled() && std::is_same<T1, quint8>() && std::is_same<T2, quint8>() && std::is_same<T3, qint32>() && (output_offset == 0) && (output_mult == 1) && (output_shift == 0) && (transpose_c == false) && (k <= 2048)) { meta::QuantizedGemm(context, transpose_a, transpose_b, im2col_buffer, filter_data, chunk_output_data, m, n, k, -input_offset, -filter_offset, lda, ldb, ldc); } else if (std::is_same<T1, quint8>() && std::is_same<T2, quint8>() && std::is_same<T3, qint32>() && (output_offset == 0) && (output_mult == 1) && (output_shift == 0)) { // The gemmlowp optimized library only works for a particular set of // data types, so check if we meet those requirements and fall back to a // slower reference implementation if not. const uint8* im2col_data_as_uint8 = &(im2col_buffer->value); const uint8* filter_data_as_uint8 = &(filter_data->value); int32* output_data_as_int32 = &(chunk_output_data->value); // All of the transpose_* variables are currently compile-time consts, // so we could just hard-code these values too, but that would break if // anybody changed those values in the future (e.g. to match the ability // of MatMul to specify them as attributes). We're using a verbose // approach of deriving the order values from the transpose variables to // be able to catch any changes like that. static const gemmlowp::MapOrder ResultOrder = !transpose_c ? gemmlowp::MapOrder::RowMajor : gemmlowp::MapOrder::ColMajor; static const gemmlowp::MapOrder LhsOrder = !transpose_a ? gemmlowp::MapOrder::RowMajor : gemmlowp::MapOrder::ColMajor; static const gemmlowp::MapOrder RhsOrder = !transpose_b ? gemmlowp::MapOrder::RowMajor : gemmlowp::MapOrder::ColMajor; gemmlowp::MatrixMap<const std::uint8_t, LhsOrder> lhs( im2col_data_as_uint8, m, k, lda); gemmlowp::MatrixMap<const std::uint8_t, RhsOrder> rhs( filter_data_as_uint8, k, n, ldb); gemmlowp::MatrixMap<std::int32_t, ResultOrder> result( output_data_as_int32, m, n, ldc); const std::tuple<> empty_pipeline = {}; auto& worker_threads = *(context->device()->tensorflow_cpu_worker_threads()); TensorflowGemmContext context(worker_threads.num_threads, worker_threads.workers); gemmlowp::GemmWithOutputPipeline<std::uint8_t, std::int32_t, gemmlowp::DefaultL8R8BitDepthParams>( &context, lhs, rhs, &result, -input_offset, -filter_offset, empty_pipeline); // Since gemmlowp uses assembly to write to the output, msan won't // detect the output buffer as written to, so we mark it manually. TF_ANNOTATE_MEMORY_IS_INITIALIZED(output_data_as_int32, m * n * sizeof(int32)); } else { ReferenceGemm<T1, T2, T3>( transpose_a, transpose_b, transpose_c, m, n, k, im2col_buffer, input_offset, lda, filter_data, filter_offset, ldb, chunk_output_data, output_shift, output_offset, output_mult, ldc); } } }
1447
True
1
CVE-2021-29523
False
False
False
False
AV:L/AC:L/Au:N/C:N/I:N/A:P
LOCAL
LOW
NONE
NONE
NONE
PARTIAL
2.1
CVSS:3.1/AV:L/AC:L/PR:L/UI:N/S:U/C:N/I:N/A:H
LOCAL
LOW
LOW
NONE
UNCHANGED
NONE
NONE
HIGH
5.5
MEDIUM
1.8
3.6
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/69c68ecbb24dff3fa0e46da0d16c821a2dd22d7c', 'name': 'https://github.com/tensorflow/tensorflow/commit/69c68ecbb24dff3fa0e46da0d16c821a2dd22d7c', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-2cpx-427x-q2c6', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-2cpx-427x-q2c6', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Patch', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-190'}]}]
LOW
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionStartIncluding': '2.4.0', 'versionEndExcluding': '2.4.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionEndExcluding': '2.1.4', 'cpe_name': []}]}]
[{'lang': 'en', 'value': 'TensorFlow is an end-to-end open source platform for machine learning. An attacker can trigger a denial of service via a `CHECK`-fail in `tf.raw_ops.AddManySparseToTensorsMap`. This is because the implementation(https://github.com/tensorflow/tensorflow/blob/6f9896890c4c703ae0a0845394086e2e1e523299/tensorflow/core/kernels/sparse_tensors_map_ops.cc#L257) takes the values specified in `sparse_shape` as dimensions for the output shape. The `TensorShape` constructor(https://github.com/tensorflow/tensorflow/blob/6f9896890c4c703ae0a0845394086e2e1e523299/tensorflow/core/framework/tensor_shape.cc#L183-L188) uses a `CHECK` operation which triggers when `InitDims`(https://github.com/tensorflow/tensorflow/blob/6f9896890c4c703ae0a0845394086e2e1e523299/tensorflow/core/framework/tensor_shape.cc#L212-L296) returns a non-OK status. This is a legacy implementation of the constructor and operations should use `BuildTensorShapeBase` or `AddDimWithStatus` to prevent `CHECK`-failures in the presence of overflows. The fix will be included in TensorFlow 2.5.0. We will also cherrypick this commit on TensorFlow 2.4.2, TensorFlow 2.3.3, TensorFlow 2.2.3 and TensorFlow 2.1.4, as these are also affected and still in supported range.'}]
2021-05-20T17:06Z
2021-05-14T20:15Z
Integer Overflow or Wraparound
The software performs a calculation that can produce an integer overflow or wraparound, when the logic assumes that the resulting value will always be larger than the original value. This can introduce other weaknesses when the calculation is used for resource management or execution control.
An integer overflow or wraparound occurs when an integer value is incremented to a value that is too large to store in the associated representation. When this occurs, the value may wrap to become a very small or negative number. While this may be intended behavior in circumstances that rely on wrapping, it can have security consequences if the wrap is unexpected. This is especially the case if the integer overflow can be triggered using user-supplied inputs. This becomes security-critical when the result is used to control looping, make a security decision, or determine the offset or size in behaviors such as memory allocation, copying, concatenation, etc.
https://cwe.mitre.org/data/definitions/190.html
0
Amit Patankar
2021-04-20 12:14:41-07:00
Fix overflow CHECK issue with `tf.raw_ops.AddManySparseToTensorsMap`. PiperOrigin-RevId: 369492969 Change-Id: I1d70d6c0c92e3d7a25bc3b3aa2a0c0ac9688bf81
69c68ecbb24dff3fa0e46da0d16c821a2dd22d7c
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tensorflow::AddManySparseToTensorsMapOp::Compute
tensorflow::AddManySparseToTensorsMapOp::Compute( OpKernelContext * context)
['context']
void Compute(OpKernelContext* context) override { const Tensor* input_indices; const Tensor* input_values; const Tensor* input_shape; SparseTensorsMap* map; OP_REQUIRES_OK(context, context->input("sparse_indices", &input_indices)); OP_REQUIRES_OK(context, context->input("sparse_values", &input_values)); OP_REQUIRES_OK(context, context->input("sparse_shape", &input_shape)); OP_REQUIRES_OK(context, GetMap(context, true /* is_writing */, &map)); OP_REQUIRES(context, TensorShapeUtils::IsMatrix(input_indices->shape()), errors::InvalidArgument( "Input indices should be a matrix but received shape ", input_indices->shape().DebugString())); OP_REQUIRES(context, TensorShapeUtils::IsVector(input_values->shape()), errors::InvalidArgument( "Input values should be a vector but received shape ", input_values->shape().DebugString())); OP_REQUIRES(context, TensorShapeUtils::IsVector(input_shape->shape()), errors::InvalidArgument( "Input shape should be a vector but received shape ", input_shape->shape().DebugString())); int rank = input_shape->NumElements(); OP_REQUIRES( context, rank > 1, errors::InvalidArgument( "Rank of input SparseTensor should be > 1, but saw rank: ", rank)); TensorShape tensor_input_shape(input_shape->vec<int64>()); gtl::InlinedVector<int64, 8> std_order(rank); std::iota(std_order.begin(), std_order.end(), 0); SparseTensor input_st; OP_REQUIRES_OK(context, SparseTensor::Create(*input_indices, *input_values, tensor_input_shape, std_order, &input_st)); auto input_shape_t = input_shape->vec<int64>(); const int64 N = input_shape_t(0); Tensor sparse_handles(DT_INT64, TensorShape({N})); auto sparse_handles_t = sparse_handles.vec<int64>(); OP_REQUIRES_OK(context, input_st.IndicesValid()); // We can generate the output shape proto string now, for all // minibatch entries. TensorShape output_shape; OP_REQUIRES_OK(context, TensorShapeUtils::MakeShape( input_shape_t.data() + 1, input_shape->NumElements() - 1, &output_shape)); // Get groups by minibatch dimension std::unordered_set<int64> visited; sparse::GroupIterable minibatch = input_st.group({0}); for (const auto& subset : minibatch) { const int64 b = subset.group()[0]; visited.insert(b); OP_REQUIRES( context, b > -1 && b < N, errors::InvalidArgument( "Received unexpected column 0 value in input SparseTensor: ", b, " < 0 or >= N (= ", N, ")")); const auto indices = subset.indices(); const auto values = subset.values<T>(); const int64 num_entries = values.size(); Tensor output_indices = Tensor(DT_INT64, {num_entries, rank - 1}); Tensor output_values = Tensor(DataTypeToEnum<T>::value, {num_entries}); auto output_indices_t = output_indices.matrix<int64>(); auto output_values_t = output_values.vec<T>(); for (int i = 0; i < num_entries; ++i) { for (int d = 1; d < rank; ++d) { output_indices_t(i, d - 1) = indices(i, d); } output_values_t(i) = values(i); } SparseTensor st_i; OP_REQUIRES_OK(context, SparseTensor::Create(output_indices, output_values, output_shape, &st_i)); int64 handle; OP_REQUIRES_OK(context, map->AddSparseTensor(context, st_i, &handle)); sparse_handles_t(b) = handle; } // Fill in any gaps; we must provide an empty ST for batch entries // the grouper didn't find. if (visited.size() < N) { Tensor empty_indices(DT_INT64, {0, rank - 1}); Tensor empty_values(DataTypeToEnum<T>::value, {0}); SparseTensor empty_st; OP_REQUIRES_OK(context, SparseTensor::Create(empty_indices, empty_values, output_shape, &empty_st)); for (int64 b = 0; b < N; ++b) { // We skipped this batch entry. if (visited.find(b) == visited.end()) { int64 handle; OP_REQUIRES_OK(context, map->AddSparseTensor(context, empty_st, &handle)); sparse_handles_t(b) = handle; } } } context->set_output(0, sparse_handles); }
785
True
1
CVE-2021-29534
False
False
False
False
AV:L/AC:L/Au:N/C:N/I:N/A:P
LOCAL
LOW
NONE
NONE
NONE
PARTIAL
2.1
CVSS:3.1/AV:L/AC:L/PR:L/UI:N/S:U/C:N/I:N/A:H
LOCAL
LOW
LOW
NONE
UNCHANGED
NONE
NONE
HIGH
5.5
MEDIUM
1.8
3.6
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/69c68ecbb24dff3fa0e46da0d16c821a2dd22d7c', 'name': 'https://github.com/tensorflow/tensorflow/commit/69c68ecbb24dff3fa0e46da0d16c821a2dd22d7c', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-6j9c-grc6-5m6g', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-6j9c-grc6-5m6g', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Patch', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-754'}]}]
LOW
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionStartIncluding': '2.4.0', 'versionEndExcluding': '2.4.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionEndExcluding': '2.1.4', 'cpe_name': []}]}]
[{'lang': 'en', 'value': 'TensorFlow is an end-to-end open source platform for machine learning. An attacker can trigger a denial of service via a `CHECK`-fail in `tf.raw_ops.SparseConcat`. This is because the implementation(https://github.com/tensorflow/tensorflow/blob/b432a38fe0e1b4b904a6c222cbce794c39703e87/tensorflow/core/kernels/sparse_concat_op.cc#L76) takes the values specified in `shapes[0]` as dimensions for the output shape. The `TensorShape` constructor(https://github.com/tensorflow/tensorflow/blob/6f9896890c4c703ae0a0845394086e2e1e523299/tensorflow/core/framework/tensor_shape.cc#L183-L188) uses a `CHECK` operation which triggers when `InitDims`(https://github.com/tensorflow/tensorflow/blob/6f9896890c4c703ae0a0845394086e2e1e523299/tensorflow/core/framework/tensor_shape.cc#L212-L296) returns a non-OK status. This is a legacy implementation of the constructor and operations should use `BuildTensorShapeBase` or `AddDimWithStatus` to prevent `CHECK`-failures in the presence of overflows. The fix will be included in TensorFlow 2.5.0. We will also cherrypick this commit on TensorFlow 2.4.2, TensorFlow 2.3.3, TensorFlow 2.2.3 and TensorFlow 2.1.4, as these are also affected and still in supported range.'}]
2021-07-27T17:30Z
2021-05-14T20:15Z
Improper Check for Unusual or Exceptional Conditions
The software does not check or incorrectly checks for unusual or exceptional conditions that are not expected to occur frequently during day to day operation of the software.
The programmer may assume that certain events or conditions will never occur or do not need to be worried about, such as low memory conditions, lack of access to resources due to restrictive permissions, or misbehaving clients or components. However, attackers may intentionally trigger these unusual conditions, thus violating the programmer's assumptions, possibly introducing instability, incorrect behavior, or a vulnerability. Note that this entry is not exclusively about the use of exceptions and exception handling, which are mechanisms for both checking and handling unusual or unexpected conditions.
https://cwe.mitre.org/data/definitions/754.html
0
Amit Patankar
2021-04-20 12:14:41-07:00
Fix overflow CHECK issue with `tf.raw_ops.AddManySparseToTensorsMap`. PiperOrigin-RevId: 369492969 Change-Id: I1d70d6c0c92e3d7a25bc3b3aa2a0c0ac9688bf81
69c68ecbb24dff3fa0e46da0d16c821a2dd22d7c
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tensorflow::AddManySparseToTensorsMapOp::Compute
tensorflow::AddManySparseToTensorsMapOp::Compute( OpKernelContext * context)
['context']
void Compute(OpKernelContext* context) override { const Tensor* input_indices; const Tensor* input_values; const Tensor* input_shape; SparseTensorsMap* map; OP_REQUIRES_OK(context, context->input("sparse_indices", &input_indices)); OP_REQUIRES_OK(context, context->input("sparse_values", &input_values)); OP_REQUIRES_OK(context, context->input("sparse_shape", &input_shape)); OP_REQUIRES_OK(context, GetMap(context, true /* is_writing */, &map)); OP_REQUIRES(context, TensorShapeUtils::IsMatrix(input_indices->shape()), errors::InvalidArgument( "Input indices should be a matrix but received shape ", input_indices->shape().DebugString())); OP_REQUIRES(context, TensorShapeUtils::IsVector(input_values->shape()), errors::InvalidArgument( "Input values should be a vector but received shape ", input_values->shape().DebugString())); OP_REQUIRES(context, TensorShapeUtils::IsVector(input_shape->shape()), errors::InvalidArgument( "Input shape should be a vector but received shape ", input_shape->shape().DebugString())); int rank = input_shape->NumElements(); OP_REQUIRES( context, rank > 1, errors::InvalidArgument( "Rank of input SparseTensor should be > 1, but saw rank: ", rank)); TensorShape tensor_input_shape(input_shape->vec<int64>()); gtl::InlinedVector<int64, 8> std_order(rank); std::iota(std_order.begin(), std_order.end(), 0); SparseTensor input_st; OP_REQUIRES_OK(context, SparseTensor::Create(*input_indices, *input_values, tensor_input_shape, std_order, &input_st)); auto input_shape_t = input_shape->vec<int64>(); const int64 N = input_shape_t(0); Tensor sparse_handles(DT_INT64, TensorShape({N})); auto sparse_handles_t = sparse_handles.vec<int64>(); OP_REQUIRES_OK(context, input_st.IndicesValid()); // We can generate the output shape proto string now, for all // minibatch entries. TensorShape output_shape; OP_REQUIRES_OK(context, TensorShapeUtils::MakeShape( input_shape_t.data() + 1, input_shape->NumElements() - 1, &output_shape)); // Get groups by minibatch dimension std::unordered_set<int64> visited; sparse::GroupIterable minibatch = input_st.group({0}); for (const auto& subset : minibatch) { const int64 b = subset.group()[0]; visited.insert(b); OP_REQUIRES( context, b > -1 && b < N, errors::InvalidArgument( "Received unexpected column 0 value in input SparseTensor: ", b, " < 0 or >= N (= ", N, ")")); const auto indices = subset.indices(); const auto values = subset.values<T>(); const int64 num_entries = values.size(); Tensor output_indices = Tensor(DT_INT64, {num_entries, rank - 1}); Tensor output_values = Tensor(DataTypeToEnum<T>::value, {num_entries}); auto output_indices_t = output_indices.matrix<int64>(); auto output_values_t = output_values.vec<T>(); for (int i = 0; i < num_entries; ++i) { for (int d = 1; d < rank; ++d) { output_indices_t(i, d - 1) = indices(i, d); } output_values_t(i) = values(i); } SparseTensor st_i; OP_REQUIRES_OK(context, SparseTensor::Create(output_indices, output_values, output_shape, &st_i)); int64 handle; OP_REQUIRES_OK(context, map->AddSparseTensor(context, st_i, &handle)); sparse_handles_t(b) = handle; } // Fill in any gaps; we must provide an empty ST for batch entries // the grouper didn't find. if (visited.size() < N) { Tensor empty_indices(DT_INT64, {0, rank - 1}); Tensor empty_values(DataTypeToEnum<T>::value, {0}); SparseTensor empty_st; OP_REQUIRES_OK(context, SparseTensor::Create(empty_indices, empty_values, output_shape, &empty_st)); for (int64 b = 0; b < N; ++b) { // We skipped this batch entry. if (visited.find(b) == visited.end()) { int64 handle; OP_REQUIRES_OK(context, map->AddSparseTensor(context, empty_st, &handle)); sparse_handles_t(b) = handle; } } } context->set_output(0, sparse_handles); }
785
True
1
CVE-2021-29530
False
False
False
False
AV:L/AC:L/Au:N/C:P/I:P/A:P
LOCAL
LOW
NONE
PARTIAL
PARTIAL
PARTIAL
4.6
CVSS:3.1/AV:L/AC:L/PR:L/UI:N/S:U/C:H/I:H/A:H
LOCAL
LOW
LOW
NONE
UNCHANGED
HIGH
HIGH
HIGH
7.8
HIGH
1.8
5.9
False
[{'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-xcwj-wfcm-m23c', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-xcwj-wfcm-m23c', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/e6a7c7cc18c3aaad1ae0872cb0a959f5c923d2bd', 'name': 'https://github.com/tensorflow/tensorflow/commit/e6a7c7cc18c3aaad1ae0872cb0a959f5c923d2bd', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-476'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionStartIncluding': '2.4.0', 'versionEndExcluding': '2.4.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionEndExcluding': '2.1.4', 'cpe_name': []}]}]
[{'lang': 'en', 'value': 'TensorFlow is an end-to-end open source platform for machine learning. An attacker can trigger a null pointer dereference by providing an invalid `permutation` to `tf.raw_ops.SparseMatrixSparseCholesky`. This is because the implementation(https://github.com/tensorflow/tensorflow/blob/080f1d9e257589f78b3ffb75debf584168aa6062/tensorflow/core/kernels/sparse/sparse_cholesky_op.cc#L85-L86) fails to properly validate the input arguments. Although `ValidateInputs` is called and there are checks in the body of this function, the code proceeds to the next line in `ValidateInputs` since `OP_REQUIRES`(https://github.com/tensorflow/tensorflow/blob/080f1d9e257589f78b3ffb75debf584168aa6062/tensorflow/core/framework/op_requires.h#L41-L48) is a macro that only exits the current function. Thus, the first validation condition that fails in `ValidateInputs` will cause an early return from that function. However, the caller will continue execution from the next line. The fix is to either explicitly check `context->status()` or to convert `ValidateInputs` to return a `Status`. The fix will be included in TensorFlow 2.5.0. We will also cherrypick this commit on TensorFlow 2.4.2, TensorFlow 2.3.3, TensorFlow 2.2.3 and TensorFlow 2.1.4, as these are also affected and still in supported range.'}]
2021-05-20T15:49Z
2021-05-14T20:15Z
NULL Pointer Dereference
A NULL pointer dereference occurs when the application dereferences a pointer that it expects to be valid, but is NULL, typically causing a crash or exit.
NULL pointer dereference issues can occur through a number of flaws, including race conditions, and simple programming omissions.
https://cwe.mitre.org/data/definitions/476.html
0
Mihai Maruseac
2021-04-20 14:45:33-07:00
Remove `OP_REQUIRES` call from helper function. Since `OP_REQUIRES` macro expands to a `return;` (among other), calling it in a helper function only ends the helper function's execution earlier, but the kernel will still run from start to end. Thus, all the expected validations are actually broken/useless as the code ploughs through the next crash anyway. PiperOrigin-RevId: 369524386 Change-Id: I54f6cf9328445675ccc392e661b04336b229c9da
e6a7c7cc18c3aaad1ae0872cb0a959f5c923d2bd
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tensorflow::CSRSparseCholeskyCPUOp::Compute
tensorflow::CSRSparseCholeskyCPUOp::Compute( OpKernelContext * ctx)
['ctx']
void Compute(OpKernelContext* ctx) final { // Extract inputs and validate shapes and types. const CSRSparseMatrix* input_matrix; OP_REQUIRES_OK(ctx, ExtractVariantFromInput(ctx, 0, &input_matrix)); const Tensor& input_permutation_indices = ctx->input(1); int64 num_rows; int batch_size; ValidateInputs(ctx, *input_matrix, input_permutation_indices, &batch_size, &num_rows); // Allocate batch pointers. Tensor batch_ptr(cpu_allocator(), DT_INT32, TensorShape({batch_size + 1})); auto batch_ptr_vec = batch_ptr.vec<int32>(); batch_ptr_vec(0) = 0; // Temporary vector of Eigen SparseMatrices to store the Sparse Cholesky // factors. // Note: we use column-compressed (CSC) SparseMatrix because SimplicialLLT // returns the factors in column major format. Since our input should be // symmetric, column major and row major is identical in storage. We just // have to switch to reading the upper triangular part of the input, which // corresponds to the lower triangular part in row major format. std::vector<SparseMatrix> sparse_cholesky_factors(batch_size); // TODO(anudhyan): Tune the cost per unit based on benchmarks. const double nnz_per_row = (input_matrix->total_nnz() / batch_size) / num_rows; const int64 sparse_cholesky_cost_per_batch = nnz_per_row * nnz_per_row * num_rows; // Perform sparse Cholesky factorization of each batch in parallel. auto worker_threads = *(ctx->device()->tensorflow_cpu_worker_threads()); std::atomic<int64> invalid_input_index(-1); Shard(worker_threads.num_threads, worker_threads.workers, batch_size, sparse_cholesky_cost_per_batch, [&](int64 batch_begin, int64 batch_end) { for (int64 batch_index = batch_begin; batch_index < batch_end; ++batch_index) { // Define an Eigen SparseMatrix Map to operate on the // CSRSparseMatrix component without copying the data. Eigen::Map<const SparseMatrix> sparse_matrix( num_rows, num_rows, input_matrix->nnz(batch_index), input_matrix->row_pointers_vec(batch_index).data(), input_matrix->col_indices_vec(batch_index).data(), input_matrix->values_vec<T>(batch_index).data()); Eigen::SimplicialLLT<SparseMatrix, Eigen::Upper, Eigen::NaturalOrdering<int>> solver; auto permutation_indices_flat = input_permutation_indices.flat<int32>().data(); // Invert the fill-in reducing ordering and apply it to the input // sparse matrix. Eigen::Map< Eigen::PermutationMatrix<Eigen::Dynamic, Eigen::Dynamic, int>> permutation(permutation_indices_flat + batch_index * num_rows, num_rows); auto permutation_inverse = permutation.inverse(); SparseMatrix permuted_sparse_matrix; permuted_sparse_matrix.template selfadjointView<Eigen::Upper>() = sparse_matrix.template selfadjointView<Eigen::Upper>() .twistedBy(permutation_inverse); // Compute the Cholesky decomposition. solver.compute(permuted_sparse_matrix); if (solver.info() != Eigen::Success) { invalid_input_index = batch_index; return; } // Get the upper triangular factor, which would end up in the // lower triangular part of the output CSRSparseMatrix when // interpreted in row major format. sparse_cholesky_factors[batch_index] = std::move(solver.matrixU()); // For now, batch_ptr contains the number of nonzeros in each // batch. batch_ptr_vec(batch_index + 1) = sparse_cholesky_factors[batch_index].nonZeros(); } }); // Check for invalid input. OP_REQUIRES( ctx, invalid_input_index == -1, errors::InvalidArgument( "Sparse Cholesky factorization failed for batch index ", invalid_input_index.load(), ". The input might not be valid.")); // Compute a cumulative sum to obtain the batch pointers. std::partial_sum(batch_ptr_vec.data(), batch_ptr_vec.data() + batch_size + 1, batch_ptr_vec.data()); // Allocate output Tensors. const int64 total_nnz = batch_ptr_vec(batch_size); Tensor output_row_ptr(cpu_allocator(), DT_INT32, TensorShape({(num_rows + 1) * batch_size})); Tensor output_col_ind(cpu_allocator(), DT_INT32, TensorShape({total_nnz})); Tensor output_values(cpu_allocator(), DataTypeToEnum<T>::value, TensorShape({total_nnz})); auto output_row_ptr_ptr = output_row_ptr.flat<int32>().data(); auto output_col_ind_ptr = output_col_ind.flat<int32>().data(); auto output_values_ptr = output_values.flat<T>().data(); // Copy the output matrices from each batch into the CSRSparseMatrix // Tensors. // TODO(b/129906419): Factor out the copy from Eigen SparseMatrix to // CSRSparseMatrix into common utils. This is also used in // SparseMatrixSparseMatMul. Shard(worker_threads.num_threads, worker_threads.workers, batch_size, (3 * total_nnz) / batch_size /* cost per unit */, [&](int64 batch_begin, int64 batch_end) { for (int64 batch_index = batch_begin; batch_index < batch_end; ++batch_index) { const SparseMatrix& cholesky_factor = sparse_cholesky_factors[batch_index]; const int64 nnz = cholesky_factor.nonZeros(); std::copy(cholesky_factor.outerIndexPtr(), cholesky_factor.outerIndexPtr() + num_rows + 1, output_row_ptr_ptr + batch_index * (num_rows + 1)); std::copy(cholesky_factor.innerIndexPtr(), cholesky_factor.innerIndexPtr() + nnz, output_col_ind_ptr + batch_ptr_vec(batch_index)); std::copy(cholesky_factor.valuePtr(), cholesky_factor.valuePtr() + nnz, output_values_ptr + batch_ptr_vec(batch_index)); } }); // Create the CSRSparseMatrix instance from its component Tensors and // prepare the Variant output Tensor. CSRSparseMatrix output_csr_matrix; OP_REQUIRES_OK( ctx, CSRSparseMatrix::CreateCSRSparseMatrix( DataTypeToEnum<T>::value, input_matrix->dense_shape(), batch_ptr, output_row_ptr, output_col_ind, output_values, &output_csr_matrix)); Tensor* output_csr_matrix_tensor; AllocatorAttributes cpu_alloc; cpu_alloc.set_on_host(true); OP_REQUIRES_OK( ctx, ctx->allocate_output(0, TensorShape({}), &output_csr_matrix_tensor, cpu_alloc)); output_csr_matrix_tensor->scalar<Variant>()() = std::move(output_csr_matrix); }
848
True
1
CVE-2021-29530
False
False
False
False
AV:L/AC:L/Au:N/C:P/I:P/A:P
LOCAL
LOW
NONE
PARTIAL
PARTIAL
PARTIAL
4.6
CVSS:3.1/AV:L/AC:L/PR:L/UI:N/S:U/C:H/I:H/A:H
LOCAL
LOW
LOW
NONE
UNCHANGED
HIGH
HIGH
HIGH
7.8
HIGH
1.8
5.9
False
[{'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-xcwj-wfcm-m23c', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-xcwj-wfcm-m23c', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/e6a7c7cc18c3aaad1ae0872cb0a959f5c923d2bd', 'name': 'https://github.com/tensorflow/tensorflow/commit/e6a7c7cc18c3aaad1ae0872cb0a959f5c923d2bd', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-476'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionStartIncluding': '2.4.0', 'versionEndExcluding': '2.4.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionEndExcluding': '2.1.4', 'cpe_name': []}]}]
[{'lang': 'en', 'value': 'TensorFlow is an end-to-end open source platform for machine learning. An attacker can trigger a null pointer dereference by providing an invalid `permutation` to `tf.raw_ops.SparseMatrixSparseCholesky`. This is because the implementation(https://github.com/tensorflow/tensorflow/blob/080f1d9e257589f78b3ffb75debf584168aa6062/tensorflow/core/kernels/sparse/sparse_cholesky_op.cc#L85-L86) fails to properly validate the input arguments. Although `ValidateInputs` is called and there are checks in the body of this function, the code proceeds to the next line in `ValidateInputs` since `OP_REQUIRES`(https://github.com/tensorflow/tensorflow/blob/080f1d9e257589f78b3ffb75debf584168aa6062/tensorflow/core/framework/op_requires.h#L41-L48) is a macro that only exits the current function. Thus, the first validation condition that fails in `ValidateInputs` will cause an early return from that function. However, the caller will continue execution from the next line. The fix is to either explicitly check `context->status()` or to convert `ValidateInputs` to return a `Status`. The fix will be included in TensorFlow 2.5.0. We will also cherrypick this commit on TensorFlow 2.4.2, TensorFlow 2.3.3, TensorFlow 2.2.3 and TensorFlow 2.1.4, as these are also affected and still in supported range.'}]
2021-05-20T15:49Z
2021-05-14T20:15Z
NULL Pointer Dereference
A NULL pointer dereference occurs when the application dereferences a pointer that it expects to be valid, but is NULL, typically causing a crash or exit.
NULL pointer dereference issues can occur through a number of flaws, including race conditions, and simple programming omissions.
https://cwe.mitre.org/data/definitions/476.html
0
Mihai Maruseac
2021-04-20 14:45:33-07:00
Remove `OP_REQUIRES` call from helper function. Since `OP_REQUIRES` macro expands to a `return;` (among other), calling it in a helper function only ends the helper function's execution earlier, but the kernel will still run from start to end. Thus, all the expected validations are actually broken/useless as the code ploughs through the next crash anyway. PiperOrigin-RevId: 369524386 Change-Id: I54f6cf9328445675ccc392e661b04336b229c9da
e6a7c7cc18c3aaad1ae0872cb0a959f5c923d2bd
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tensorflow::CSRSparseCholeskyCPUOp::ValidateInputs
tensorflow::CSRSparseCholeskyCPUOp::ValidateInputs( OpKernelContext * ctx , const CSRSparseMatrix & sparse_matrix , const Tensor & permutation_indices , int * batch_size , int64 * num_rows)
['ctx', 'sparse_matrix', 'permutation_indices', 'batch_size', 'num_rows']
void ValidateInputs(OpKernelContext* ctx, const CSRSparseMatrix& sparse_matrix, const Tensor& permutation_indices, int* batch_size, int64* num_rows) { OP_REQUIRES(ctx, sparse_matrix.dtype() == DataTypeToEnum<T>::value, errors::InvalidArgument( "Asked for a CSRSparseMatrix of type ", DataTypeString(DataTypeToEnum<T>::value), " but saw dtype: ", DataTypeString(sparse_matrix.dtype()))); const Tensor& dense_shape = sparse_matrix.dense_shape(); const int rank = dense_shape.dim_size(0); OP_REQUIRES(ctx, rank == 2 || rank == 3, errors::InvalidArgument("sparse matrix must have rank 2 or 3; ", "but dense_shape has size ", rank)); const int row_dim = (rank == 2) ? 0 : 1; auto dense_shape_vec = dense_shape.vec<int64>(); *num_rows = dense_shape_vec(row_dim); const int64 num_cols = dense_shape_vec(row_dim + 1); OP_REQUIRES(ctx, *num_rows == num_cols, errors::InvalidArgument("sparse matrix must be square; got: ", *num_rows, " != ", num_cols)); const TensorShape& perm_shape = permutation_indices.shape(); OP_REQUIRES( ctx, perm_shape.dims() + 1 == rank, errors::InvalidArgument( "sparse matrix must have the same rank as permutation; got: ", rank, " != ", perm_shape.dims(), " + 1.")); OP_REQUIRES( ctx, perm_shape.dim_size(rank - 2) == *num_rows, errors::InvalidArgument( "permutation must have the same number of elements in each batch " "as the number of rows in sparse matrix; got: ", perm_shape.dim_size(rank - 2), " != ", *num_rows)); *batch_size = sparse_matrix.batch_size(); if (*batch_size > 1) { OP_REQUIRES( ctx, perm_shape.dim_size(0) == *batch_size, errors::InvalidArgument("permutation must have the same batch size " "as sparse matrix; got: ", perm_shape.dim_size(0), " != ", *batch_size)); } }
324
True
1
CVE-2021-29533
False
False
False
False
AV:L/AC:L/Au:N/C:N/I:N/A:P
LOCAL
LOW
NONE
NONE
NONE
PARTIAL
2.1
CVSS:3.1/AV:L/AC:L/PR:L/UI:N/S:U/C:N/I:N/A:H
LOCAL
LOW
LOW
NONE
UNCHANGED
NONE
NONE
HIGH
5.5
MEDIUM
1.8
3.6
False
[{'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-393f-2jr3-cp69', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-393f-2jr3-cp69', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/b432a38fe0e1b4b904a6c222cbce794c39703e87', 'name': 'https://github.com/tensorflow/tensorflow/commit/b432a38fe0e1b4b904a6c222cbce794c39703e87', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-754'}]}]
LOW
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionStartIncluding': '2.4.0', 'versionEndExcluding': '2.4.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionEndExcluding': '2.1.4', 'cpe_name': []}]}]
[{'lang': 'en', 'value': 'TensorFlow is an end-to-end open source platform for machine learning. An attacker can trigger a denial of service via a `CHECK` failure by passing an empty image to `tf.raw_ops.DrawBoundingBoxes`. This is because the implementation(https://github.com/tensorflow/tensorflow/blob/ea34a18dc3f5c8d80a40ccca1404f343b5d55f91/tensorflow/core/kernels/image/draw_bounding_box_op.cc#L148-L165) uses `CHECK_*` assertions instead of `OP_REQUIRES` to validate user controlled inputs. Whereas `OP_REQUIRES` allows returning an error condition back to the user, the `CHECK_*` macros result in a crash if the condition is false, similar to `assert`. In this case, `height` is 0 from the `images` input. This results in `max_box_row_clamp` being negative and the assertion being falsified, followed by aborting program execution. The fix will be included in TensorFlow 2.5.0. We will also cherrypick this commit on TensorFlow 2.4.2, TensorFlow 2.3.3, TensorFlow 2.2.3 and TensorFlow 2.1.4, as these are also affected and still in supported range.'}]
2021-07-27T17:30Z
2021-05-14T20:15Z
Improper Check for Unusual or Exceptional Conditions
The software does not check or incorrectly checks for unusual or exceptional conditions that are not expected to occur frequently during day to day operation of the software.
The programmer may assume that certain events or conditions will never occur or do not need to be worried about, such as low memory conditions, lack of access to resources due to restrictive permissions, or misbehaving clients or components. However, attackers may intentionally trigger these unusual conditions, thus violating the programmer's assumptions, possibly introducing instability, incorrect behavior, or a vulnerability. Note that this entry is not exclusively about the use of exceptions and exception handling, which are mechanisms for both checking and handling unusual or unexpected conditions.
https://cwe.mitre.org/data/definitions/754.html
0
Amit Patankar
2021-04-21 15:57:36-07:00
Fix overflow CHECK issue with `tf.raw_ops.DrawBoundingBoxes`. PiperOrigin-RevId: 369753591 Change-Id: I3b45fc98ee0d28a3c20b7e9c995aa647c976ec40
b432a38fe0e1b4b904a6c222cbce794c39703e87
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tensorflow::DrawBoundingBoxesOp::Compute
tensorflow::DrawBoundingBoxesOp::Compute( OpKernelContext * context)
['context']
void Compute(OpKernelContext* context) override { const Tensor& images = context->input(0); const Tensor& boxes = context->input(1); const int64 depth = images.dim_size(3); OP_REQUIRES(context, images.dims() == 4, errors::InvalidArgument("The rank of the images should be 4")); OP_REQUIRES( context, boxes.dims() == 3, errors::InvalidArgument("The rank of the boxes tensor should be 3")); OP_REQUIRES(context, images.dim_size(0) == boxes.dim_size(0), errors::InvalidArgument("The batch sizes should be the same")); OP_REQUIRES( context, depth == 4 || depth == 1 || depth == 3, errors::InvalidArgument("Channel depth should be either 1 (GRY), " "3 (RGB), or 4 (RGBA)")); const int64 batch_size = images.dim_size(0); const int64 height = images.dim_size(1); const int64 width = images.dim_size(2); std::vector<std::vector<float>> color_table; if (context->num_inputs() == 3) { const Tensor& colors_tensor = context->input(2); OP_REQUIRES(context, colors_tensor.shape().dims() == 2, errors::InvalidArgument("colors must be a 2-D matrix", colors_tensor.shape().DebugString())); OP_REQUIRES(context, colors_tensor.shape().dim_size(1) >= depth, errors::InvalidArgument("colors must have equal or more ", "channels than the image provided: ", colors_tensor.shape().DebugString())); if (colors_tensor.NumElements() != 0) { color_table.clear(); auto colors = colors_tensor.matrix<float>(); for (int64 i = 0; i < colors.dimension(0); i++) { std::vector<float> color_value(4); for (int64 j = 0; j < 4; j++) { color_value[j] = colors(i, j); } color_table.emplace_back(color_value); } } } if (color_table.empty()) { color_table = DefaultColorTable(depth); } Tensor* output; OP_REQUIRES_OK( context, context->allocate_output( 0, TensorShape({batch_size, height, width, depth}), &output)); output->tensor<T, 4>() = images.tensor<T, 4>(); auto canvas = output->tensor<T, 4>(); for (int64 b = 0; b < batch_size; ++b) { const int64 num_boxes = boxes.dim_size(1); const auto tboxes = boxes.tensor<T, 3>(); for (int64 bb = 0; bb < num_boxes; ++bb) { int64 color_index = bb % color_table.size(); const int64 min_box_row = static_cast<float>(tboxes(b, bb, 0)) * (height - 1); const int64 min_box_row_clamp = std::max<int64>(min_box_row, int64{0}); const int64 max_box_row = static_cast<float>(tboxes(b, bb, 2)) * (height - 1); const int64 max_box_row_clamp = std::min<int64>(max_box_row, height - 1); const int64 min_box_col = static_cast<float>(tboxes(b, bb, 1)) * (width - 1); const int64 min_box_col_clamp = std::max<int64>(min_box_col, int64{0}); const int64 max_box_col = static_cast<float>(tboxes(b, bb, 3)) * (width - 1); const int64 max_box_col_clamp = std::min<int64>(max_box_col, width - 1); if (min_box_row > max_box_row || min_box_col > max_box_col) { LOG(WARNING) << "Bounding box (" << min_box_row << "," << min_box_col << "," << max_box_row << "," << max_box_col << ") is inverted and will not be drawn."; continue; } if (min_box_row >= height || max_box_row < 0 || min_box_col >= width || max_box_col < 0) { LOG(WARNING) << "Bounding box (" << min_box_row << "," << min_box_col << "," << max_box_row << "," << max_box_col << ") is completely outside the image" << " and will not be drawn."; continue; } // At this point, {min,max}_box_{row,col}_clamp are inside the // image. CHECK_GE(min_box_row_clamp, 0); CHECK_GE(max_box_row_clamp, 0); CHECK_LT(min_box_row_clamp, height); CHECK_LT(max_box_row_clamp, height); CHECK_GE(min_box_col_clamp, 0); CHECK_GE(max_box_col_clamp, 0); CHECK_LT(min_box_col_clamp, width); CHECK_LT(max_box_col_clamp, width); // At this point, the min_box_row and min_box_col are either // in the image or above/left of it, and max_box_row and // max_box_col are either in the image or below/right or it. CHECK_LT(min_box_row, height); CHECK_GE(max_box_row, 0); CHECK_LT(min_box_col, width); CHECK_GE(max_box_col, 0); // Draw top line. if (min_box_row >= 0) { for (int64 j = min_box_col_clamp; j <= max_box_col_clamp; ++j) for (int64 c = 0; c < depth; c++) { canvas(b, min_box_row, j, c) = static_cast<T>(color_table[color_index][c]); } } // Draw bottom line. if (max_box_row < height) { for (int64 j = min_box_col_clamp; j <= max_box_col_clamp; ++j) for (int64 c = 0; c < depth; c++) { canvas(b, max_box_row, j, c) = static_cast<T>(color_table[color_index][c]); } } // Draw left line. if (min_box_col >= 0) { for (int64 i = min_box_row_clamp; i <= max_box_row_clamp; ++i) for (int64 c = 0; c < depth; c++) { canvas(b, i, min_box_col, c) = static_cast<T>(color_table[color_index][c]); } } // Draw right line. if (max_box_col < width) { for (int64 i = min_box_row_clamp; i <= max_box_row_clamp; ++i) for (int64 c = 0; c < depth; c++) { canvas(b, i, max_box_col, c) = static_cast<T>(color_table[color_index][c]); } } } } }
1140
True
1
CVE-2021-29535
False
False
False
False
AV:L/AC:L/Au:N/C:P/I:P/A:P
LOCAL
LOW
NONE
PARTIAL
PARTIAL
PARTIAL
4.6
CVSS:3.1/AV:L/AC:L/PR:L/UI:N/S:U/C:H/I:H/A:H
LOCAL
LOW
LOW
NONE
UNCHANGED
HIGH
HIGH
HIGH
7.8
HIGH
1.8
5.9
False
[{'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-m3f9-w3p3-p669', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-m3f9-w3p3-p669', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/efea03b38fb8d3b81762237dc85e579cc5fc6e87', 'name': 'https://github.com/tensorflow/tensorflow/commit/efea03b38fb8d3b81762237dc85e579cc5fc6e87', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionStartIncluding': '2.4.0', 'versionEndExcluding': '2.4.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionEndExcluding': '2.1.4', 'cpe_name': []}]}]
[{'lang': 'en', 'value': 'TensorFlow is an end-to-end open source platform for machine learning. An attacker can cause a heap buffer overflow in `QuantizedMul` by passing in invalid thresholds for the quantization. This is because the implementation(https://github.com/tensorflow/tensorflow/blob/87cf4d3ea9949051e50ca3f071fc909538a51cd0/tensorflow/core/kernels/quantized_mul_op.cc#L287-L290) assumes that the 4 arguments are always valid scalars and tries to access the numeric value directly. However, if any of these tensors is empty, then `.flat<T>()` is an empty buffer and accessing the element at position 0 results in overflow. The fix will be included in TensorFlow 2.5.0. We will also cherrypick this commit on TensorFlow 2.4.2, TensorFlow 2.3.3, TensorFlow 2.2.3 and TensorFlow 2.1.4, as these are also affected and still in supported range.'}]
2021-07-26T16:02Z
2021-05-14T20:15Z
Out-of-bounds Write
The software writes data past the end, or before the beginning, of the intended buffer.
Typically, this can result in corruption of data, a crash, or code execution. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent write operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/787.html
0
Mihai Maruseac
2021-04-21 16:15:46-07:00
Validate inputs to `QuantizedMul` PiperOrigin-RevId: 369756982 Change-Id: I00d960cc3b9316fd7a86bd37a44e341c96e17624
efea03b38fb8d3b81762237dc85e579cc5fc6e87
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tensorflow::QuantizedMulOp::Compute
tensorflow::QuantizedMulOp::Compute( OpKernelContext * context)
['context']
void Compute(OpKernelContext* context) override { const Tensor& x = context->input(0); const Tensor& y = context->input(1); const float min_x = context->input(2).flat<float>()(0); const float max_x = context->input(3).flat<float>()(0); const float min_y = context->input(4).flat<float>()(0); const float max_y = context->input(5).flat<float>()(0); BCast bcast(BCast::FromShape(x.shape()), BCast::FromShape(y.shape())); if (!bcast.IsValid()) { context->SetStatus(errors::InvalidArgument( "Incompatible shapes: ", x.shape().DebugString(), " vs. ", y.shape().DebugString())); return; } Tensor* z; OP_REQUIRES_OK(context, context->allocate_output( 0, BCast::ToShape(bcast.output_shape()), &z)); // Make sure that we have valid quantization ranges for the input buffers. // If the difference between the min and max is negative or zero, it makes // it hard to do meaningful intermediate operations on the values. OP_REQUIRES(context, (max_x > min_x), errors::InvalidArgument("max_x must be larger than min_a.")); OP_REQUIRES(context, (max_y > min_y), errors::InvalidArgument("max_x must be larger than min_b.")); const int32 offset_x = FloatToQuantizedUnclamped<T>(0.0f, min_x, max_x); const int32 offset_y = FloatToQuantizedUnclamped<T>(0.0f, min_y, max_y); const T* x_data = x.flat<T>().data(); const T* y_data = y.flat<T>().data(); Toutput* z_data = z->flat<Toutput>().data(); const int ndims = bcast.x_reshape().size(); if (ndims <= 1) { if (x.NumElements() == 1) { ScalarMultiply<T, Toutput>(context, y_data, offset_y, y.NumElements(), x_data[0], offset_x, z_data); } else if (y.NumElements() == 1) { ScalarMultiply<T, Toutput>(context, x_data, offset_x, x.NumElements(), y_data[0], offset_y, z_data); } else { VectorMultiply<T, Toutput>(context, x_data, offset_x, y_data, offset_y, x.NumElements(), z_data); } } else if (ndims == 2) { const T* vector_data; int64 vector_num_elements; int32 vector_offset; const T* tensor_data; int64 tensor_num_elements; int32 tensor_offset; if (x.NumElements() < y.NumElements()) { vector_data = x_data; vector_num_elements = x.NumElements(); vector_offset = offset_x; tensor_data = y_data; tensor_num_elements = y.NumElements(); tensor_offset = offset_y; } else { vector_data = y_data; vector_num_elements = y.NumElements(); vector_offset = offset_y; tensor_data = x_data; tensor_num_elements = x.NumElements(); tensor_offset = offset_x; } if (vector_num_elements == 0) { context->SetStatus( errors::InvalidArgument("vector must have at least 1 element")); return; } VectorTensorMultiply<T, Toutput>( vector_data, vector_offset, vector_num_elements, tensor_data, tensor_offset, tensor_num_elements, z_data); } else { LOG(INFO) << "ndims=" << ndims; LOG(INFO) << "bcast.x_reshape()=" << TensorShape(bcast.x_reshape()).DebugString(); LOG(INFO) << "bcast.y_reshape()=" << TensorShape(bcast.y_reshape()).DebugString(); LOG(INFO) << "bcast.x_bcast()=" << TensorShape(bcast.x_bcast()).DebugString(); LOG(INFO) << "bcast.y_bcast()=" << TensorShape(bcast.y_bcast()).DebugString(); context->SetStatus(errors::Unimplemented( "Broadcast between ", context->input(0).shape().DebugString(), " and ", context->input(1).shape().DebugString(), " is not supported yet.")); return; } float min_z_value; float max_z_value; QuantizationRangeForMultiplication<T, T, Toutput>( min_x, max_x, min_y, max_y, &min_z_value, &max_z_value); Tensor* z_min = nullptr; OP_REQUIRES_OK(context, context->allocate_output(1, {}, &z_min)); z_min->flat<float>()(0) = min_z_value; Tensor* z_max = nullptr; OP_REQUIRES_OK(context, context->allocate_output(2, {}, &z_max)); z_max->flat<float>()(0) = max_z_value; }
893
True
1
CVE-2021-29537
False
False
False
False
AV:L/AC:L/Au:N/C:P/I:P/A:P
LOCAL
LOW
NONE
PARTIAL
PARTIAL
PARTIAL
4.6
CVSS:3.1/AV:L/AC:L/PR:L/UI:N/S:U/C:H/I:H/A:H
LOCAL
LOW
LOW
NONE
UNCHANGED
HIGH
HIGH
HIGH
7.8
HIGH
1.8
5.9
False
[{'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-8c89-2vwr-chcq', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-8c89-2vwr-chcq', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/f6c40f0c6cbf00d46c7717a26419f2062f2f8694', 'name': 'https://github.com/tensorflow/tensorflow/commit/f6c40f0c6cbf00d46c7717a26419f2062f2f8694', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionStartIncluding': '2.4.0', 'versionEndExcluding': '2.4.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionEndExcluding': '2.1.4', 'cpe_name': []}]}]
[{'lang': 'en', 'value': 'TensorFlow is an end-to-end open source platform for machine learning. An attacker can cause a heap buffer overflow in `QuantizedResizeBilinear` by passing in invalid thresholds for the quantization. This is because the implementation(https://github.com/tensorflow/tensorflow/blob/50711818d2e61ccce012591eeb4fdf93a8496726/tensorflow/core/kernels/quantized_resize_bilinear_op.cc#L705-L706) assumes that the 2 arguments are always valid scalars and tries to access the numeric value directly. The fix will be included in TensorFlow 2.5.0. We will also cherrypick this commit on TensorFlow 2.4.2, TensorFlow 2.3.3, TensorFlow 2.2.3 and TensorFlow 2.1.4, as these are also affected and still in supported range.'}]
2021-07-26T16:03Z
2021-05-14T20:15Z
Out-of-bounds Write
The software writes data past the end, or before the beginning, of the intended buffer.
Typically, this can result in corruption of data, a crash, or code execution. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent write operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/787.html
0
Mihai Maruseac
2021-04-21 17:00:39-07:00
Validate min and max arguments to `QuantizedResizeBilinear`. PiperOrigin-RevId: 369765091 Change-Id: I33be8b78273ab7d08b97541692fe05cb7f94963a
f6c40f0c6cbf00d46c7717a26419f2062f2f8694
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tensorflow::QuantizedResizeBilinearOp::Compute
tensorflow::QuantizedResizeBilinearOp::Compute( OpKernelContext * context)
['context']
void Compute(OpKernelContext* context) override { const float in_min = context->input(2).flat<float>()(0); const float in_max = context->input(3).flat<float>()(0); ImageResizerState st(align_corners_, false); st.ValidateAndCreateOutput(context); if (!context->status().ok()) return; // Return if the output is empty. if (st.output->NumElements() == 0) return; typename TTypes<T, 4>::ConstTensor image_data( context->input(0).tensor<T, 4>()); typename TTypes<T, 4>::Tensor output_data(st.output->tensor<T, 4>()); ResizeBilinear<T>(image_data, st.height_scale, st.width_scale, in_min, in_max, half_pixel_centers_, &output_data); Tensor* out_min = nullptr; OP_REQUIRES_OK(context, context->allocate_output(1, {}, &out_min)); out_min->flat<float>()(0) = in_min; Tensor* out_max = nullptr; OP_REQUIRES_OK(context, context->allocate_output(2, {}, &out_max)); out_max->flat<float>()(0) = in_max; }
249
True
1
CVE-2021-29536
False
False
False
False
AV:L/AC:L/Au:N/C:P/I:P/A:P
LOCAL
LOW
NONE
PARTIAL
PARTIAL
PARTIAL
4.6
CVSS:3.1/AV:L/AC:L/PR:L/UI:N/S:U/C:H/I:H/A:H
LOCAL
LOW
LOW
NONE
UNCHANGED
HIGH
HIGH
HIGH
7.8
HIGH
1.8
5.9
False
[{'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-2gfx-95x2-5v3x', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-2gfx-95x2-5v3x', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/a324ac84e573fba362a5e53d4e74d5de6729933e', 'name': 'https://github.com/tensorflow/tensorflow/commit/a324ac84e573fba362a5e53d4e74d5de6729933e', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionStartIncluding': '2.4.0', 'versionEndExcluding': '2.4.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionEndExcluding': '2.1.4', 'cpe_name': []}]}]
[{'lang': 'en', 'value': 'TensorFlow is an end-to-end open source platform for machine learning. An attacker can cause a heap buffer overflow in `QuantizedReshape` by passing in invalid thresholds for the quantization. This is because the implementation(https://github.com/tensorflow/tensorflow/blob/a324ac84e573fba362a5e53d4e74d5de6729933e/tensorflow/core/kernels/quantized_reshape_op.cc#L38-L55) assumes that the 2 arguments are always valid scalars and tries to access the numeric value directly. However, if any of these tensors is empty, then `.flat<T>()` is an empty buffer and accessing the element at position 0 results in overflow. The fix will be included in TensorFlow 2.5.0. We will also cherrypick this commit on TensorFlow 2.4.2, TensorFlow 2.3.3, TensorFlow 2.2.3 and TensorFlow 2.1.4, as these are also affected and still in supported range.'}]
2021-07-26T16:02Z
2021-05-14T20:15Z
Out-of-bounds Write
The software writes data past the end, or before the beginning, of the intended buffer.
Typically, this can result in corruption of data, a crash, or code execution. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent write operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/787.html
0
Mihai Maruseac
2021-04-21 18:11:15-07:00
Validate arguments to `QuantizedReshape`. Ensure that validations from `Reshape` also terminate `QuantizedReshape` on failure. PiperOrigin-RevId: 369775421 Change-Id: If8c5342267aceea65b7cb83a4b183304886f1ce8
a324ac84e573fba362a5e53d4e74d5de6729933e
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tensorflow::QuantizedReshapeOp::Compute
tensorflow::QuantizedReshapeOp::Compute( OpKernelContext * ctx)
['ctx']
void Compute(OpKernelContext* ctx) override { // This call processes inputs 1 and 2 to write output 0. ReshapeOp::Compute(ctx); const float input_min_float = ctx->input(2).flat<float>()(0); const float input_max_float = ctx->input(3).flat<float>()(0); Tensor* output_min = nullptr; OP_REQUIRES_OK(ctx, ctx->allocate_output(1, TensorShape({}), &output_min)); output_min->flat<float>()(0) = input_min_float; Tensor* output_max = nullptr; OP_REQUIRES_OK(ctx, ctx->allocate_output(2, TensorShape({}), &output_max)); output_max->flat<float>()(0) = input_max_float; }
140
True
1
CVE-2021-29541
False
False
False
False
AV:L/AC:L/Au:N/C:N/I:N/A:P
LOCAL
LOW
NONE
NONE
NONE
PARTIAL
2.1
CVSS:3.1/AV:L/AC:L/PR:L/UI:N/S:U/C:N/I:N/A:H
LOCAL
LOW
LOW
NONE
UNCHANGED
NONE
NONE
HIGH
5.5
MEDIUM
1.8
3.6
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/ba424dd8f16f7110eea526a8086f1a155f14f22b', 'name': 'https://github.com/tensorflow/tensorflow/commit/ba424dd8f16f7110eea526a8086f1a155f14f22b', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-xqfj-35wv-m3cr', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-xqfj-35wv-m3cr', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Patch', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-476'}]}]
LOW
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionStartIncluding': '2.4.0', 'versionEndExcluding': '2.4.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionEndExcluding': '2.1.4', 'cpe_name': []}]}]
[{'lang': 'en', 'value': 'TensorFlow is an end-to-end open source platform for machine learning. An attacker can trigger a dereference of a null pointer in `tf.raw_ops.StringNGrams`. This is because the implementation(https://github.com/tensorflow/tensorflow/blob/1cdd4da14282210cc759e468d9781741ac7d01bf/tensorflow/core/kernels/string_ngrams_op.cc#L67-L74) does not fully validate the `data_splits` argument. This would result in `ngrams_data`(https://github.com/tensorflow/tensorflow/blob/1cdd4da14282210cc759e468d9781741ac7d01bf/tensorflow/core/kernels/string_ngrams_op.cc#L106-L110) to be a null pointer when the output would be computed to have 0 or negative size. Later writes to the output tensor would then cause a null pointer dereference. The fix will be included in TensorFlow 2.5.0. We will also cherrypick this commit on TensorFlow 2.4.2, TensorFlow 2.3.3, TensorFlow 2.2.3 and TensorFlow 2.1.4, as these are also affected and still in supported range.'}]
2021-05-20T16:21Z
2021-05-14T20:15Z
NULL Pointer Dereference
A NULL pointer dereference occurs when the application dereferences a pointer that it expects to be valid, but is NULL, typically causing a crash or exit.
NULL pointer dereference issues can occur through a number of flaws, including race conditions, and simple programming omissions.
https://cwe.mitre.org/data/definitions/476.html
0
Mihai Maruseac
2021-04-22 13:29:54-07:00
Enhance validation of ngram op and handle case of 0 tokens. PiperOrigin-RevId: 369940178 Change-Id: Ia82f42c09d14efe76e7dc013505b832a42282f0b
ba424dd8f16f7110eea526a8086f1a155f14f22b
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tensorflow::text::StringNGramsOp::Compute
tensorflow::text::StringNGramsOp::Compute( tensorflow :: OpKernelContext * context)
['context']
void Compute(tensorflow::OpKernelContext* context) override { const tensorflow::Tensor* data; OP_REQUIRES_OK(context, context->input("data", &data)); const auto& input_data = data->flat<tstring>().data(); const tensorflow::Tensor* splits; OP_REQUIRES_OK(context, context->input("data_splits", &splits)); const auto& splits_vec = splits->flat<SPLITS_TYPE>(); // Validate that the splits are valid indices into data const int input_data_size = data->flat<tstring>().size(); const int splits_vec_size = splits_vec.size(); for (int i = 0; i < splits_vec_size; ++i) { bool valid_splits = splits_vec(i) >= 0; valid_splits = valid_splits && (splits_vec(i) <= input_data_size); OP_REQUIRES( context, valid_splits, errors::InvalidArgument("Invalid split value ", splits_vec(i), ", must be in [0,", input_data_size, "]")); } int num_batch_items = splits_vec.size() - 1; tensorflow::Tensor* ngrams_splits; OP_REQUIRES_OK( context, context->allocate_output(1, splits->shape(), &ngrams_splits)); auto ngrams_splits_data = ngrams_splits->flat<SPLITS_TYPE>().data(); // If there is no data or size, return an empty RT. if (data->flat<tstring>().size() == 0 || splits_vec.size() == 0) { tensorflow::Tensor* empty; OP_REQUIRES_OK(context, context->allocate_output(0, data->shape(), &empty)); for (int i = 0; i <= num_batch_items; ++i) { ngrams_splits_data[i] = 0; } return; } ngrams_splits_data[0] = 0; for (int i = 1; i <= num_batch_items; ++i) { int length = splits_vec(i) - splits_vec(i - 1); int num_ngrams = 0; for (int ngram_width : ngram_widths_) num_ngrams += get_num_ngrams(length, ngram_width); if (preserve_short_ && length > 0 && num_ngrams == 0) { num_ngrams = 1; } ngrams_splits_data[i] = ngrams_splits_data[i - 1] + num_ngrams; } tensorflow::Tensor* ngrams; OP_REQUIRES_OK( context, context->allocate_output( 0, TensorShape({ngrams_splits_data[num_batch_items]}), &ngrams)); auto ngrams_data = ngrams->flat<tstring>().data(); for (int i = 0; i < num_batch_items; ++i) { auto data_start = &input_data[splits_vec(i)]; int output_start_idx = ngrams_splits_data[i]; for (int ngram_width : ngram_widths_) { auto output_start = &ngrams_data[output_start_idx]; int length = splits_vec(i + 1) - splits_vec(i); int num_ngrams = get_num_ngrams(length, ngram_width); CreateNgrams(data_start, output_start, num_ngrams, ngram_width); output_start_idx += num_ngrams; } // If we're preserving short sequences, check to see if no sequence was // generated by comparing the current output start idx to the original // one (ngram_splits_data). If no ngrams were generated, then they will // be equal (since we increment output_start_idx by num_ngrams every // time we create a set of ngrams.) if (preserve_short_ && output_start_idx == ngrams_splits_data[i]) { int data_length = splits_vec(i + 1) - splits_vec(i); // One legitimate reason to not have any ngrams when preserve_short_ // is true is if the sequence itself is empty. In that case, move on. if (data_length == 0) { continue; } // We don't have to worry about dynamic padding sizes here: if padding // was dynamic, every sequence would have had sufficient padding to // generate at least one ngram. int ngram_width = data_length + 2 * pad_width_; auto output_start = &ngrams_data[output_start_idx]; int num_ngrams = 1; CreateNgrams(data_start, output_start, num_ngrams, ngram_width); } } }
615
True
1
CVE-2021-29542
False
False
False
False
AV:L/AC:L/Au:N/C:N/I:N/A:P
LOCAL
LOW
NONE
NONE
NONE
PARTIAL
2.1
CVSS:3.1/AV:L/AC:L/PR:L/UI:N/S:U/C:N/I:N/A:H
LOCAL
LOW
LOW
NONE
UNCHANGED
NONE
NONE
HIGH
5.5
MEDIUM
1.8
3.6
False
[{'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-4hrh-9vmp-2jgg', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-4hrh-9vmp-2jgg', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/ba424dd8f16f7110eea526a8086f1a155f14f22b', 'name': 'https://github.com/tensorflow/tensorflow/commit/ba424dd8f16f7110eea526a8086f1a155f14f22b', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-787'}]}]
LOW
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionStartIncluding': '2.4.0', 'versionEndExcluding': '2.4.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionEndExcluding': '2.1.4', 'cpe_name': []}]}]
[{'lang': 'en', 'value': 'TensorFlow is an end-to-end open source platform for machine learning. An attacker can cause a heap buffer overflow by passing crafted inputs to `tf.raw_ops.StringNGrams`. This is because the implementation(https://github.com/tensorflow/tensorflow/blob/1cdd4da14282210cc759e468d9781741ac7d01bf/tensorflow/core/kernels/string_ngrams_op.cc#L171-L185) fails to consider corner cases where input would be split in such a way that the generated tokens should only contain padding elements. If input is such that `num_tokens` is 0, then, for `data_start_index=0` (when left padding is present), the marked line would result in reading `data[-1]`. The fix will be included in TensorFlow 2.5.0. We will also cherrypick this commit on TensorFlow 2.4.2, TensorFlow 2.3.3, TensorFlow 2.2.3 and TensorFlow 2.1.4, as these are also affected and still in supported range.'}]
2022-04-25T20:03Z
2021-05-14T20:15Z
Out-of-bounds Write
The software writes data past the end, or before the beginning, of the intended buffer.
Typically, this can result in corruption of data, a crash, or code execution. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent write operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/787.html
0
Mihai Maruseac
2021-04-22 13:29:54-07:00
Enhance validation of ngram op and handle case of 0 tokens. PiperOrigin-RevId: 369940178 Change-Id: Ia82f42c09d14efe76e7dc013505b832a42282f0b
ba424dd8f16f7110eea526a8086f1a155f14f22b
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tensorflow::text::StringNGramsOp::Compute
tensorflow::text::StringNGramsOp::Compute( tensorflow :: OpKernelContext * context)
['context']
void Compute(tensorflow::OpKernelContext* context) override { const tensorflow::Tensor* data; OP_REQUIRES_OK(context, context->input("data", &data)); const auto& input_data = data->flat<tstring>().data(); const tensorflow::Tensor* splits; OP_REQUIRES_OK(context, context->input("data_splits", &splits)); const auto& splits_vec = splits->flat<SPLITS_TYPE>(); // Validate that the splits are valid indices into data const int input_data_size = data->flat<tstring>().size(); const int splits_vec_size = splits_vec.size(); for (int i = 0; i < splits_vec_size; ++i) { bool valid_splits = splits_vec(i) >= 0; valid_splits = valid_splits && (splits_vec(i) <= input_data_size); OP_REQUIRES( context, valid_splits, errors::InvalidArgument("Invalid split value ", splits_vec(i), ", must be in [0,", input_data_size, "]")); } int num_batch_items = splits_vec.size() - 1; tensorflow::Tensor* ngrams_splits; OP_REQUIRES_OK( context, context->allocate_output(1, splits->shape(), &ngrams_splits)); auto ngrams_splits_data = ngrams_splits->flat<SPLITS_TYPE>().data(); // If there is no data or size, return an empty RT. if (data->flat<tstring>().size() == 0 || splits_vec.size() == 0) { tensorflow::Tensor* empty; OP_REQUIRES_OK(context, context->allocate_output(0, data->shape(), &empty)); for (int i = 0; i <= num_batch_items; ++i) { ngrams_splits_data[i] = 0; } return; } ngrams_splits_data[0] = 0; for (int i = 1; i <= num_batch_items; ++i) { int length = splits_vec(i) - splits_vec(i - 1); int num_ngrams = 0; for (int ngram_width : ngram_widths_) num_ngrams += get_num_ngrams(length, ngram_width); if (preserve_short_ && length > 0 && num_ngrams == 0) { num_ngrams = 1; } ngrams_splits_data[i] = ngrams_splits_data[i - 1] + num_ngrams; } tensorflow::Tensor* ngrams; OP_REQUIRES_OK( context, context->allocate_output( 0, TensorShape({ngrams_splits_data[num_batch_items]}), &ngrams)); auto ngrams_data = ngrams->flat<tstring>().data(); for (int i = 0; i < num_batch_items; ++i) { auto data_start = &input_data[splits_vec(i)]; int output_start_idx = ngrams_splits_data[i]; for (int ngram_width : ngram_widths_) { auto output_start = &ngrams_data[output_start_idx]; int length = splits_vec(i + 1) - splits_vec(i); int num_ngrams = get_num_ngrams(length, ngram_width); CreateNgrams(data_start, output_start, num_ngrams, ngram_width); output_start_idx += num_ngrams; } // If we're preserving short sequences, check to see if no sequence was // generated by comparing the current output start idx to the original // one (ngram_splits_data). If no ngrams were generated, then they will // be equal (since we increment output_start_idx by num_ngrams every // time we create a set of ngrams.) if (preserve_short_ && output_start_idx == ngrams_splits_data[i]) { int data_length = splits_vec(i + 1) - splits_vec(i); // One legitimate reason to not have any ngrams when preserve_short_ // is true is if the sequence itself is empty. In that case, move on. if (data_length == 0) { continue; } // We don't have to worry about dynamic padding sizes here: if padding // was dynamic, every sequence would have had sufficient padding to // generate at least one ngram. int ngram_width = data_length + 2 * pad_width_; auto output_start = &ngrams_data[output_start_idx]; int num_ngrams = 1; CreateNgrams(data_start, output_start, num_ngrams, ngram_width); } } }
615
True
1
CVE-2021-29541
False
False
False
False
AV:L/AC:L/Au:N/C:N/I:N/A:P
LOCAL
LOW
NONE
NONE
NONE
PARTIAL
2.1
CVSS:3.1/AV:L/AC:L/PR:L/UI:N/S:U/C:N/I:N/A:H
LOCAL
LOW
LOW
NONE
UNCHANGED
NONE
NONE
HIGH
5.5
MEDIUM
1.8
3.6
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/ba424dd8f16f7110eea526a8086f1a155f14f22b', 'name': 'https://github.com/tensorflow/tensorflow/commit/ba424dd8f16f7110eea526a8086f1a155f14f22b', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-xqfj-35wv-m3cr', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-xqfj-35wv-m3cr', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Patch', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-476'}]}]
LOW
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionStartIncluding': '2.4.0', 'versionEndExcluding': '2.4.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionEndExcluding': '2.1.4', 'cpe_name': []}]}]
[{'lang': 'en', 'value': 'TensorFlow is an end-to-end open source platform for machine learning. An attacker can trigger a dereference of a null pointer in `tf.raw_ops.StringNGrams`. This is because the implementation(https://github.com/tensorflow/tensorflow/blob/1cdd4da14282210cc759e468d9781741ac7d01bf/tensorflow/core/kernels/string_ngrams_op.cc#L67-L74) does not fully validate the `data_splits` argument. This would result in `ngrams_data`(https://github.com/tensorflow/tensorflow/blob/1cdd4da14282210cc759e468d9781741ac7d01bf/tensorflow/core/kernels/string_ngrams_op.cc#L106-L110) to be a null pointer when the output would be computed to have 0 or negative size. Later writes to the output tensor would then cause a null pointer dereference. The fix will be included in TensorFlow 2.5.0. We will also cherrypick this commit on TensorFlow 2.4.2, TensorFlow 2.3.3, TensorFlow 2.2.3 and TensorFlow 2.1.4, as these are also affected and still in supported range.'}]
2021-05-20T16:21Z
2021-05-14T20:15Z
NULL Pointer Dereference
A NULL pointer dereference occurs when the application dereferences a pointer that it expects to be valid, but is NULL, typically causing a crash or exit.
NULL pointer dereference issues can occur through a number of flaws, including race conditions, and simple programming omissions.
https://cwe.mitre.org/data/definitions/476.html
0
Mihai Maruseac
2021-04-22 13:29:54-07:00
Enhance validation of ngram op and handle case of 0 tokens. PiperOrigin-RevId: 369940178 Change-Id: Ia82f42c09d14efe76e7dc013505b832a42282f0b
ba424dd8f16f7110eea526a8086f1a155f14f22b
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tensorflow::text::StringNGramsOp::CreateNgrams
tensorflow::text::StringNGramsOp::CreateNgrams( const tstring * data , tstring * output , int num_ngrams , int ngram_width) const
['data', 'output', 'num_ngrams', 'ngram_width']
void CreateNgrams(const tstring* data, tstring* output, int num_ngrams, int ngram_width) const { for (int ngram_index = 0; ngram_index < num_ngrams; ++ngram_index) { int pad_width = get_pad_width(ngram_width); int left_padding = std::max(0, pad_width - ngram_index); int right_padding = std::max(0, pad_width - (num_ngrams - (ngram_index + 1))); int num_tokens = ngram_width - (left_padding + right_padding); int data_start_index = left_padding > 0 ? 0 : ngram_index - pad_width; // Calculate the total expected size of the ngram so we can reserve the // correct amount of space in the string. int ngram_size = 0; // Size of the left padding. ngram_size += left_padding * left_pad_.length(); // Size of the tokens. for (int n = 0; n < num_tokens; ++n) { ngram_size += data[data_start_index + n].length(); } // Size of the right padding. ngram_size += right_padding * right_pad_.length(); // Size of the separators. int num_separators = left_padding + right_padding + num_tokens - 1; ngram_size += num_separators * separator_.length(); // Build the ngram. tstring* ngram = &output[ngram_index]; ngram->reserve(ngram_size); for (int n = 0; n < left_padding; ++n) { ngram->append(left_pad_); ngram->append(separator_); } for (int n = 0; n < num_tokens - 1; ++n) { ngram->append(data[data_start_index + n]); ngram->append(separator_); } ngram->append(data[data_start_index + num_tokens - 1]); for (int n = 0; n < right_padding; ++n) { ngram->append(separator_); ngram->append(right_pad_); } // In debug mode only: validate that we've reserved enough space for the // ngram. DCHECK_EQ(ngram_size, ngram->size()); } }
318
True
1
CVE-2021-29542
False
False
False
False
AV:L/AC:L/Au:N/C:N/I:N/A:P
LOCAL
LOW
NONE
NONE
NONE
PARTIAL
2.1
CVSS:3.1/AV:L/AC:L/PR:L/UI:N/S:U/C:N/I:N/A:H
LOCAL
LOW
LOW
NONE
UNCHANGED
NONE
NONE
HIGH
5.5
MEDIUM
1.8
3.6
False
[{'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-4hrh-9vmp-2jgg', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-4hrh-9vmp-2jgg', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/ba424dd8f16f7110eea526a8086f1a155f14f22b', 'name': 'https://github.com/tensorflow/tensorflow/commit/ba424dd8f16f7110eea526a8086f1a155f14f22b', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-787'}]}]
LOW
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionStartIncluding': '2.4.0', 'versionEndExcluding': '2.4.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionEndExcluding': '2.1.4', 'cpe_name': []}]}]
[{'lang': 'en', 'value': 'TensorFlow is an end-to-end open source platform for machine learning. An attacker can cause a heap buffer overflow by passing crafted inputs to `tf.raw_ops.StringNGrams`. This is because the implementation(https://github.com/tensorflow/tensorflow/blob/1cdd4da14282210cc759e468d9781741ac7d01bf/tensorflow/core/kernels/string_ngrams_op.cc#L171-L185) fails to consider corner cases where input would be split in such a way that the generated tokens should only contain padding elements. If input is such that `num_tokens` is 0, then, for `data_start_index=0` (when left padding is present), the marked line would result in reading `data[-1]`. The fix will be included in TensorFlow 2.5.0. We will also cherrypick this commit on TensorFlow 2.4.2, TensorFlow 2.3.3, TensorFlow 2.2.3 and TensorFlow 2.1.4, as these are also affected and still in supported range.'}]
2022-04-25T20:03Z
2021-05-14T20:15Z
Out-of-bounds Write
The software writes data past the end, or before the beginning, of the intended buffer.
Typically, this can result in corruption of data, a crash, or code execution. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent write operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/787.html
0
Mihai Maruseac
2021-04-22 13:29:54-07:00
Enhance validation of ngram op and handle case of 0 tokens. PiperOrigin-RevId: 369940178 Change-Id: Ia82f42c09d14efe76e7dc013505b832a42282f0b
ba424dd8f16f7110eea526a8086f1a155f14f22b
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tensorflow::text::StringNGramsOp::CreateNgrams
tensorflow::text::StringNGramsOp::CreateNgrams( const tstring * data , tstring * output , int num_ngrams , int ngram_width) const
['data', 'output', 'num_ngrams', 'ngram_width']
void CreateNgrams(const tstring* data, tstring* output, int num_ngrams, int ngram_width) const { for (int ngram_index = 0; ngram_index < num_ngrams; ++ngram_index) { int pad_width = get_pad_width(ngram_width); int left_padding = std::max(0, pad_width - ngram_index); int right_padding = std::max(0, pad_width - (num_ngrams - (ngram_index + 1))); int num_tokens = ngram_width - (left_padding + right_padding); int data_start_index = left_padding > 0 ? 0 : ngram_index - pad_width; // Calculate the total expected size of the ngram so we can reserve the // correct amount of space in the string. int ngram_size = 0; // Size of the left padding. ngram_size += left_padding * left_pad_.length(); // Size of the tokens. for (int n = 0; n < num_tokens; ++n) { ngram_size += data[data_start_index + n].length(); } // Size of the right padding. ngram_size += right_padding * right_pad_.length(); // Size of the separators. int num_separators = left_padding + right_padding + num_tokens - 1; ngram_size += num_separators * separator_.length(); // Build the ngram. tstring* ngram = &output[ngram_index]; ngram->reserve(ngram_size); for (int n = 0; n < left_padding; ++n) { ngram->append(left_pad_); ngram->append(separator_); } for (int n = 0; n < num_tokens - 1; ++n) { ngram->append(data[data_start_index + n]); ngram->append(separator_); } ngram->append(data[data_start_index + num_tokens - 1]); for (int n = 0; n < right_padding; ++n) { ngram->append(separator_); ngram->append(right_pad_); } // In debug mode only: validate that we've reserved enough space for the // ngram. DCHECK_EQ(ngram_size, ngram->size()); } }
318
True
1
CVE-2021-29615
False
False
False
False
AV:L/AC:L/Au:N/C:N/I:N/A:P
LOCAL
LOW
NONE
NONE
NONE
PARTIAL
2.1
CVSS:3.1/AV:L/AC:L/PR:L/UI:N/S:U/C:N/I:N/A:H
LOCAL
LOW
LOW
NONE
UNCHANGED
NONE
NONE
HIGH
5.5
MEDIUM
1.8
3.6
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/e07e1c3d26492c06f078c7e5bf2d138043e199c1', 'name': 'https://github.com/tensorflow/tensorflow/commit/e07e1c3d26492c06f078c7e5bf2d138043e199c1', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-qw5h-7f53-xrp6', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-qw5h-7f53-xrp6', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Patch', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-674'}]}]
LOW
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionStartIncluding': '2.4.0', 'versionEndExcluding': '2.4.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionEndExcluding': '2.1.4', 'cpe_name': []}]}]
[{'lang': 'en', 'value': 'TensorFlow is an end-to-end open source platform for machine learning. The implementation of `ParseAttrValue`(https://github.com/tensorflow/tensorflow/blob/c22d88d6ff33031aa113e48aa3fc9aa74ed79595/tensorflow/core/framework/attr_value_util.cc#L397-L453) can be tricked into stack overflow due to recursion by giving in a specially crafted input. The fix will be included in TensorFlow 2.5.0. We will also cherrypick this commit on TensorFlow 2.4.2, TensorFlow 2.3.3, TensorFlow 2.2.3 and TensorFlow 2.1.4, as these are also affected and still in supported range.'}]
2021-05-18T18:22Z
2021-05-14T20:15Z
Uncontrolled Recursion
The product does not properly control the amount of recursion which takes place, consuming excessive resources, such as allocated memory or the program stack.
https://cwe.mitre.org/data/definitions/674.html
0
Laura Pak
2021-04-23 10:33:00-07:00
Prevent memory overflow in ParseAttrValue from nested tensors. PiperOrigin-RevId: 370108442 Change-Id: I84d64a5e8895a6aeffbf4749841b4c54d51b5889
e07e1c3d26492c06f078c7e5bf2d138043e199c1
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tensorflow::ParseAttrValue
tensorflow::ParseAttrValue( StringPiece type , StringPiece text , AttrValue * out)
['type', 'text', 'out']
bool ParseAttrValue(StringPiece type, StringPiece text, AttrValue* out) { // Parse type. string field_name; bool is_list = absl::ConsumePrefix(&type, "list("); if (absl::ConsumePrefix(&type, "string")) { field_name = "s"; } else if (absl::ConsumePrefix(&type, "int")) { field_name = "i"; } else if (absl::ConsumePrefix(&type, "float")) { field_name = "f"; } else if (absl::ConsumePrefix(&type, "bool")) { field_name = "b"; } else if (absl::ConsumePrefix(&type, "type")) { field_name = "type"; } else if (absl::ConsumePrefix(&type, "shape")) { field_name = "shape"; } else if (absl::ConsumePrefix(&type, "tensor")) { field_name = "tensor"; } else if (absl::ConsumePrefix(&type, "func")) { field_name = "func"; } else if (absl::ConsumePrefix(&type, "placeholder")) { field_name = "placeholder"; } else { return false; } if (is_list && !absl::ConsumePrefix(&type, ")")) { return false; } // Construct a valid text proto message to parse. string to_parse; if (is_list) { // TextFormat parser considers "i: 7" to be the same as "i: [7]", // but we only want to allow list values with []. StringPiece cleaned = text; str_util::RemoveLeadingWhitespace(&cleaned); str_util::RemoveTrailingWhitespace(&cleaned); if (cleaned.size() < 2 || cleaned[0] != '[' || cleaned[cleaned.size() - 1] != ']') { return false; } cleaned.remove_prefix(1); str_util::RemoveLeadingWhitespace(&cleaned); if (cleaned.size() == 1) { // User wrote "[]", so return empty list without invoking the TextFormat // parse which returns an error for "i: []". out->Clear(); out->mutable_list(); return true; } to_parse = strings::StrCat("list { ", field_name, ": ", text, " }"); } else { to_parse = strings::StrCat(field_name, ": ", text); } return ProtoParseFromString(to_parse, out); }
374
True
1
CVE-2021-29547
False
False
False
False
AV:L/AC:L/Au:N/C:N/I:N/A:P
LOCAL
LOW
NONE
NONE
NONE
PARTIAL
2.1
CVSS:3.1/AV:L/AC:L/PR:L/UI:N/S:U/C:N/I:N/A:H
LOCAL
LOW
LOW
NONE
UNCHANGED
NONE
NONE
HIGH
5.5
MEDIUM
1.8
3.6
False
[{'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-4fg4-p75j-w5xj', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-4fg4-p75j-w5xj', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/d6ed5bcfe1dcab9e85a4d39931bd18d99018e75b', 'name': 'https://github.com/tensorflow/tensorflow/commit/d6ed5bcfe1dcab9e85a4d39931bd18d99018e75b', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}]}]
LOW
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionStartIncluding': '2.4.0', 'versionEndExcluding': '2.4.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionEndExcluding': '2.1.4', 'cpe_name': []}]}]
[{'lang': 'en', 'value': 'TensorFlow is an end-to-end open source platform for machine learning. An attacker can cause a segfault and denial of service via accessing data outside of bounds in `tf.raw_ops.QuantizedBatchNormWithGlobalNormalization`. This is because the implementation(https://github.com/tensorflow/tensorflow/blob/55a97caa9e99c7f37a0bbbeb414dc55553d3ae7f/tensorflow/core/kernels/quantized_batch_norm_op.cc#L176-L189) assumes the inputs are not empty. If any of these inputs is empty, `.flat<T>()` is an empty buffer, so accessing the element at index 0 is accessing data outside of bounds. The fix will be included in TensorFlow 2.5.0. We will also cherrypick this commit on TensorFlow 2.4.2, TensorFlow 2.3.3, TensorFlow 2.2.3 and TensorFlow 2.1.4, as these are also affected and still in supported range.'}]
2021-07-27T17:25Z
2021-05-14T20:15Z
Out-of-bounds Read
The software reads data past the end, or before the beginning, of the intended buffer.
Typically, this can allow attackers to read sensitive information from other memory locations or cause a crash. A crash can occur when the code reads a variable amount of data and assumes that a sentinel exists to stop the read operation, such as a NUL in a string. The expected sentinel might not be located in the out-of-bounds memory, causing excessive data to be read, leading to a segmentation fault or a buffer overflow. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent read operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/125.html
0
Mihai Maruseac
2021-04-23 11:40:06-07:00
Add missing validation in `QuantizedBatchNormWithGlobalNormalization` PiperOrigin-RevId: 370123451 Change-Id: Id234d6dab1ec21230bb8e503dba30f899af87f33
d6ed5bcfe1dcab9e85a4d39931bd18d99018e75b
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tensorflow::QuantizedBatchNormOp::Compute
tensorflow::QuantizedBatchNormOp::Compute( OpKernelContext * context)
['context']
void Compute(OpKernelContext* context) override { const Tensor& input = context->input(0); const float input_min = context->input(1).flat<float>()(0); const float input_max = context->input(2).flat<float>()(0); const Tensor& mean = context->input(3); const float mean_min = context->input(4).flat<float>()(0); const float mean_max = context->input(5).flat<float>()(0); const Tensor& var = context->input(6); const float var_min = context->input(7).flat<float>()(0); const float var_max = context->input(8).flat<float>()(0); const Tensor& beta = context->input(9); const float beta_min = context->input(10).flat<float>()(0); const float beta_max = context->input(11).flat<float>()(0); const Tensor& gamma = context->input(12); const float gamma_min = context->input(13).flat<float>()(0); const float gamma_max = context->input(14).flat<float>()(0); OP_REQUIRES(context, input.dims() == 4, errors::InvalidArgument("input must be 4-dimensional", input.shape().DebugString())); OP_REQUIRES(context, mean.dims() == 1, errors::InvalidArgument("mean must be 1-dimensional", mean.shape().DebugString())); OP_REQUIRES(context, var.dims() == 1, errors::InvalidArgument("var must be 1-dimensional", var.shape().DebugString())); OP_REQUIRES(context, beta.dims() == 1, errors::InvalidArgument("beta must be 1-dimensional", beta.shape().DebugString())); OP_REQUIRES(context, gamma.dims() == 1, errors::InvalidArgument("gamma must be 1-dimensional", gamma.shape().DebugString())); Tensor* output = nullptr; OP_REQUIRES_OK(context, context->allocate_output(0, input.shape(), &output)); float output_min; float output_max; FixedPointBatchNorm<T1, T2>(input, input_min, input_max, mean, mean_min, mean_max, var, var_min, var_max, beta, beta_min, beta_max, gamma, gamma_min, gamma_max, variance_epsilon_, scale_after_normalization_, output, &output_min, &output_max); Tensor* output_min_tensor = nullptr; OP_REQUIRES_OK(context, context->allocate_output(1, {}, &output_min_tensor)); output_min_tensor->flat<float>()(0) = output_min; Tensor* output_max_tensor = nullptr; OP_REQUIRES_OK(context, context->allocate_output(2, {}, &output_max_tensor)); output_max_tensor->flat<float>()(0) = output_max; }
588
True
1
CVE-2021-29548
False
False
False
False
AV:L/AC:L/Au:N/C:N/I:N/A:P
LOCAL
LOW
NONE
NONE
NONE
PARTIAL
2.1
CVSS:3.1/AV:L/AC:L/PR:L/UI:N/S:U/C:N/I:N/A:H
LOCAL
LOW
LOW
NONE
UNCHANGED
NONE
NONE
HIGH
5.5
MEDIUM
1.8
3.6
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/d6ed5bcfe1dcab9e85a4d39931bd18d99018e75b', 'name': 'https://github.com/tensorflow/tensorflow/commit/d6ed5bcfe1dcab9e85a4d39931bd18d99018e75b', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-p45v-v4pw-77jr', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-p45v-v4pw-77jr', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Patch', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-369'}]}]
LOW
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionStartIncluding': '2.4.0', 'versionEndExcluding': '2.4.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionEndExcluding': '2.1.4', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "TensorFlow is an end-to-end open source platform for machine learning. An attacker can cause a runtime division by zero error and denial of service in `tf.raw_ops.QuantizedBatchNormWithGlobalNormalization`. This is because the implementation(https://github.com/tensorflow/tensorflow/blob/55a97caa9e99c7f37a0bbbeb414dc55553d3ae7f/tensorflow/core/kernels/quantized_batch_norm_op.cc) does not validate all constraints specified in the op's contract(https://www.tensorflow.org/api_docs/python/tf/raw_ops/QuantizedBatchNormWithGlobalNormalization). The fix will be included in TensorFlow 2.5.0. We will also cherrypick this commit on TensorFlow 2.4.2, TensorFlow 2.3.3, TensorFlow 2.2.3 and TensorFlow 2.1.4, as these are also affected and still in supported range."}]
2021-05-20T15:17Z
2021-05-14T20:15Z
Divide By Zero
The product divides a value by zero.
This weakness typically occurs when an unexpected value is provided to the product, or if an error occurs that is not properly detected. It frequently occurs in calculations involving physical dimensions such as size, length, width, and height.
https://cwe.mitre.org/data/definitions/369.html
0
Mihai Maruseac
2021-04-23 11:40:06-07:00
Add missing validation in `QuantizedBatchNormWithGlobalNormalization` PiperOrigin-RevId: 370123451 Change-Id: Id234d6dab1ec21230bb8e503dba30f899af87f33
d6ed5bcfe1dcab9e85a4d39931bd18d99018e75b
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tensorflow::QuantizedBatchNormOp::Compute
tensorflow::QuantizedBatchNormOp::Compute( OpKernelContext * context)
['context']
void Compute(OpKernelContext* context) override { const Tensor& input = context->input(0); const float input_min = context->input(1).flat<float>()(0); const float input_max = context->input(2).flat<float>()(0); const Tensor& mean = context->input(3); const float mean_min = context->input(4).flat<float>()(0); const float mean_max = context->input(5).flat<float>()(0); const Tensor& var = context->input(6); const float var_min = context->input(7).flat<float>()(0); const float var_max = context->input(8).flat<float>()(0); const Tensor& beta = context->input(9); const float beta_min = context->input(10).flat<float>()(0); const float beta_max = context->input(11).flat<float>()(0); const Tensor& gamma = context->input(12); const float gamma_min = context->input(13).flat<float>()(0); const float gamma_max = context->input(14).flat<float>()(0); OP_REQUIRES(context, input.dims() == 4, errors::InvalidArgument("input must be 4-dimensional", input.shape().DebugString())); OP_REQUIRES(context, mean.dims() == 1, errors::InvalidArgument("mean must be 1-dimensional", mean.shape().DebugString())); OP_REQUIRES(context, var.dims() == 1, errors::InvalidArgument("var must be 1-dimensional", var.shape().DebugString())); OP_REQUIRES(context, beta.dims() == 1, errors::InvalidArgument("beta must be 1-dimensional", beta.shape().DebugString())); OP_REQUIRES(context, gamma.dims() == 1, errors::InvalidArgument("gamma must be 1-dimensional", gamma.shape().DebugString())); Tensor* output = nullptr; OP_REQUIRES_OK(context, context->allocate_output(0, input.shape(), &output)); float output_min; float output_max; FixedPointBatchNorm<T1, T2>(input, input_min, input_max, mean, mean_min, mean_max, var, var_min, var_max, beta, beta_min, beta_max, gamma, gamma_min, gamma_max, variance_epsilon_, scale_after_normalization_, output, &output_min, &output_max); Tensor* output_min_tensor = nullptr; OP_REQUIRES_OK(context, context->allocate_output(1, {}, &output_min_tensor)); output_min_tensor->flat<float>()(0) = output_min; Tensor* output_max_tensor = nullptr; OP_REQUIRES_OK(context, context->allocate_output(2, {}, &output_max_tensor)); output_max_tensor->flat<float>()(0) = output_max; }
588
True
1
CVE-2021-29551
False
False
False
False
AV:L/AC:L/Au:N/C:N/I:N/A:P
LOCAL
LOW
NONE
NONE
NONE
PARTIAL
2.1
CVSS:3.1/AV:L/AC:L/PR:L/UI:N/S:U/C:N/I:N/A:H
LOCAL
LOW
LOW
NONE
UNCHANGED
NONE
NONE
HIGH
5.5
MEDIUM
1.8
3.6
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/480641e3599775a8895254ffbc0fc45621334f68', 'name': 'https://github.com/tensorflow/tensorflow/commit/480641e3599775a8895254ffbc0fc45621334f68', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-vqw6-72r7-fgw7', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-vqw6-72r7-fgw7', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Patch', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}]}]
LOW
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionStartIncluding': '2.4.0', 'versionEndExcluding': '2.4.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionEndExcluding': '2.1.4', 'cpe_name': []}]}]
[{'lang': 'en', 'value': 'TensorFlow is an end-to-end open source platform for machine learning. The implementation of `MatrixTriangularSolve`(https://github.com/tensorflow/tensorflow/blob/8cae746d8449c7dda5298327353d68613f16e798/tensorflow/core/kernels/linalg/matrix_triangular_solve_op_impl.h#L160-L240) fails to terminate kernel execution if one validation condition fails. The fix will be included in TensorFlow 2.5.0. We will also cherrypick this commit on TensorFlow 2.4.2, TensorFlow 2.3.3, TensorFlow 2.2.3 and TensorFlow 2.1.4, as these are also affected and still in supported range.'}]
2021-07-27T17:18Z
2021-05-14T20:15Z
Out-of-bounds Read
The software reads data past the end, or before the beginning, of the intended buffer.
Typically, this can allow attackers to read sensitive information from other memory locations or cause a crash. A crash can occur when the code reads a variable amount of data and assumes that a sentinel exists to stop the read operation, such as a NUL in a string. The expected sentinel might not be located in the out-of-bounds memory, causing excessive data to be read, leading to a segmentation fault or a buffer overflow. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent read operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/125.html
0
Mihai Maruseac
2021-04-24 16:47:25-07:00
Validate (and ensure validation sticks) inputs for `MatrixTriangularSolve`. PiperOrigin-RevId: 370282444 Change-Id: Iaed61a0b0727cc42c830658b72eb69f785f48dc5
480641e3599775a8895254ffbc0fc45621334f68
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tensorflow::MatrixTriangularSolveOp::ValidateInputTensors
tensorflow::MatrixTriangularSolveOp::ValidateInputTensors( OpKernelContext * ctx , const Tensor & in0 , const Tensor & in1)
['ctx', 'in0', 'in1']
void ValidateInputTensors(OpKernelContext* ctx, const Tensor& in0, const Tensor& in1) override { OP_REQUIRES( ctx, in0.dims() >= 2, errors::InvalidArgument("In[0] ndims must be >= 2: ", in0.dims())); OP_REQUIRES( ctx, in1.dims() >= 2, errors::InvalidArgument("In[0] ndims must be >= 2: ", in1.dims())); }
71
True
1
CVE-2021-29590
False
False
False
False
AV:L/AC:L/Au:N/C:P/I:N/A:P
LOCAL
LOW
NONE
PARTIAL
NONE
PARTIAL
3.6
CVSS:3.1/AV:L/AC:L/PR:L/UI:N/S:U/C:H/I:N/A:H
LOCAL
LOW
LOW
NONE
UNCHANGED
HIGH
NONE
HIGH
7.1
HIGH
1.8
5.2
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/953f28dca13c92839ba389c055587cfe6c723578', 'name': 'https://github.com/tensorflow/tensorflow/commit/953f28dca13c92839ba389c055587cfe6c723578', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-24x6-8c7m-hv3f', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-24x6-8c7m-hv3f', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Patch', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}]}]
LOW
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionStartIncluding': '2.4.0', 'versionEndExcluding': '2.4.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionEndExcluding': '2.1.4', 'cpe_name': []}]}]
[{'lang': 'en', 'value': 'TensorFlow is an end-to-end open source platform for machine learning. The implementations of the `Minimum` and `Maximum` TFLite operators can be used to read data outside of bounds of heap allocated objects, if any of the two input tensor arguments are empty. This is because the broadcasting implementation(https://github.com/tensorflow/tensorflow/blob/0d45ea1ca641b21b73bcf9c00e0179cda284e7e7/tensorflow/lite/kernels/internal/reference/maximum_minimum.h#L52-L56) indexes in both tensors with the same index but does not validate that the index is within bounds. The fix will be included in TensorFlow 2.5.0. We will also cherrypick this commit on TensorFlow 2.4.2, TensorFlow 2.3.3, TensorFlow 2.2.3 and TensorFlow 2.1.4, as these are also affected and still in supported range.'}]
2021-05-19T15:04Z
2021-05-14T20:15Z
Out-of-bounds Read
The software reads data past the end, or before the beginning, of the intended buffer.
Typically, this can allow attackers to read sensitive information from other memory locations or cause a crash. A crash can occur when the code reads a variable amount of data and assumes that a sentinel exists to stop the read operation, such as a NUL in a string. The expected sentinel might not be located in the out-of-bounds memory, causing excessive data to be read, leading to a segmentation fault or a buffer overflow. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent read operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/125.html
0
Mihai Maruseac
2021-04-27 17:46:38-07:00
Prevent a null pointer exception in TFLite PiperOrigin-RevId: 370800206 Change-Id: Idd437ebce4ff224120d8eefc1c14c062173b71d6
953f28dca13c92839ba389c055587cfe6c723578
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::builtin::maximum_minimum::Eval
tflite::ops::builtin::maximum_minimum::Eval( TfLiteContext * context , TfLiteNode * node)
['context', 'node']
TfLiteStatus Eval(TfLiteContext* context, TfLiteNode* node) { OpContext op_context(context, node); switch (op_context.output->type) { case kTfLiteFloat32: TFLiteOperation<kernel_type, float, OpType>(context, node, op_context); break; case kTfLiteUInt8: TFLiteOperation<kernel_type, uint8_t, OpType>(context, node, op_context); break; case kTfLiteInt8: TFLiteOperation<kernel_type, int8_t, OpType>(context, node, op_context); break; case kTfLiteInt32: TFLiteOperation<kernel_type, int32_t, OpType>(context, node, op_context); break; case kTfLiteInt64: TFLiteOperation<kernel_type, int64_t, OpType>(context, node, op_context); break; case kTfLiteInt16: TFLiteOperation<kernel_type, int16_t, OpType>(context, node, op_context); break; default: context->ReportError(context, "Type %d is currently not supported by Maximum.", op_context.output->type); return kTfLiteError; } return kTfLiteOk; }
179
True
1
CVE-2021-29592
False
False
False
False
AV:L/AC:L/Au:N/C:P/I:P/A:P
LOCAL
LOW
NONE
PARTIAL
PARTIAL
PARTIAL
4.6
CVSS:3.1/AV:L/AC:L/PR:L/UI:N/S:U/C:H/I:H/A:H
LOCAL
LOW
LOW
NONE
UNCHANGED
HIGH
HIGH
HIGH
7.8
HIGH
1.8
5.9
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/f8378920345f4f4604202d4ab15ef64b2aceaa16', 'name': 'https://github.com/tensorflow/tensorflow/commit/f8378920345f4f4604202d4ab15ef64b2aceaa16', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-jjr8-m8g8-p6wv', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-jjr8-m8g8-p6wv', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Patch', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-476'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionStartIncluding': '2.4.0', 'versionEndExcluding': '2.4.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionEndExcluding': '2.1.4', 'cpe_name': []}]}]
[{'lang': 'en', 'value': 'TensorFlow is an end-to-end open source platform for machine learning. The fix for CVE-2020-15209(https://cve.mitre.org/cgi-bin/cvename.cgi?name=CVE-2020-15209) missed the case when the target shape of `Reshape` operator is given by the elements of a 1-D tensor. As such, the fix for the vulnerability(https://github.com/tensorflow/tensorflow/blob/9c1dc920d8ffb4893d6c9d27d1f039607b326743/tensorflow/lite/core/subgraph.cc#L1062-L1074) allowed passing a null-buffer-backed tensor with a 1D shape. The fix will be included in TensorFlow 2.5.0. We will also cherrypick this commit on TensorFlow 2.4.2, TensorFlow 2.3.3, TensorFlow 2.2.3 and TensorFlow 2.1.4, as these are also affected and still in supported range.'}]
2021-05-19T16:28Z
2021-05-14T20:15Z
NULL Pointer Dereference
A NULL pointer dereference occurs when the application dereferences a pointer that it expects to be valid, but is NULL, typically causing a crash or exit.
NULL pointer dereference issues can occur through a number of flaws, including race conditions, and simple programming omissions.
https://cwe.mitre.org/data/definitions/476.html
0
Mihai Maruseac
2021-04-27 17:47:59-07:00
Prevent a null pointer dereference in TFLite. PiperOrigin-RevId: 370800353 Change-Id: Ic9c9712ce5c6e384c954dcd640a5bd9ff05c9a05
f8378920345f4f4604202d4ab15ef64b2aceaa16
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::Subgraph::Invoke
tflite::Subgraph::Invoke()
[]
TfLiteStatus Subgraph::Invoke() { if (!consistent_) { ReportError("Invoke called on model that is not consistent."); return kTfLiteError; } TfLiteStatus status = kTfLiteOk; if (state_ == kStateUninvokable) { ReportError("Invoke called on model that is not ready."); return kTfLiteError; } else if (memory_planner_ && !memory_planner_->HasNonPersistentMemory()) { ReportError("Non-persistent memory is not available."); return kTfLiteError; } TFLITE_SCOPED_TAGGED_DEFAULT_PROFILE(profiler_.get(), "Invoke"); // Invocations are always done in node order. // Note that calling Invoke repeatedly will cause the original memory plan to // be reused, unless either ResizeInputTensor() or AllocateTensors() has been // called. for (int execution_plan_index = 0; execution_plan_index < execution_plan_.size(); execution_plan_index++) { if (execution_plan_index == next_execution_plan_index_to_prepare_) { TF_LITE_ENSURE_STATUS(PrepareOpsAndTensors()); TF_LITE_ENSURE(&context_, next_execution_plan_index_to_prepare_ >= execution_plan_index); } int node_index = execution_plan_[execution_plan_index]; TfLiteNode& node = nodes_and_registration_[node_index].first; const TfLiteRegistration& registration = nodes_and_registration_[node_index].second; const char* op_name = nullptr; if (profiler_) op_name = GetTFLiteOpName(registration); TFLITE_SCOPED_TAGGED_OPERATOR_PROFILE(profiler_.get(), op_name, node_index); for (int i = 0; i < node.inputs->size; ++i) { int tensor_index = node.inputs->data[i]; if (tensor_index == kTfLiteOptionalTensor) { continue; } TfLiteTensor* tensor = &tensors_[tensor_index]; if (tensor->delegate && tensor->delegate != node.delegate && tensor->data_is_stale) { TF_LITE_ENSURE_STATUS(EnsureTensorDataIsReadable(tensor_index)); } if (tensor->data.raw == nullptr && tensor->bytes > 0) { if (registration.builtin_code == kTfLiteBuiltinReshape && i == 1) { // In general, having a tensor here with no buffer will be an error. // However, for the reshape operator, the second input tensor is only // used for the shape, not for the data. Thus, null buffer is ok. continue; } else { // In all other cases, we need to return an error as otherwise we will // trigger a null pointer dereference (likely). ReportError("Input tensor %d lacks data", tensor_index); return kTfLiteError; } } } if (check_cancelled_func_ != nullptr && check_cancelled_func_(cancellation_data_)) { ReportError("Client requested cancel during Invoke()"); return kTfLiteError; } EnsureTensorsVectorCapacity(); tensor_resized_since_op_invoke_ = false; if (OpInvoke(registration, &node) != kTfLiteOk) { return ReportOpError(&context_, node, registration, node_index, "failed to invoke"); } // Force execution prep for downstream ops if the latest op triggered the // resize of a dynamic tensor. if (tensor_resized_since_op_invoke_ && HasDynamicTensor(context_, node.outputs)) { next_execution_plan_index_to_prepare_ = execution_plan_index + 1; // This happens when an intermediate dynamic tensor is resized. // We don't have to prepare all the ops, but we need to recompute // the allocation plan. if (next_execution_plan_index_to_plan_allocation_ > next_execution_plan_index_to_prepare_) { next_execution_plan_index_to_plan_allocation_ = next_execution_plan_index_to_prepare_; if (memory_planner_) { TF_LITE_ENSURE_STATUS(memory_planner_->ResetAllocationsAfter( next_execution_plan_index_to_plan_allocation_ - 1)); } } } } return status; }
422
True
1
CVE-2021-29594
False
False
False
False
AV:L/AC:L/Au:N/C:P/I:P/A:P
LOCAL
LOW
NONE
PARTIAL
PARTIAL
PARTIAL
4.6
CVSS:3.1/AV:L/AC:L/PR:L/UI:N/S:U/C:H/I:H/A:H
LOCAL
LOW
LOW
NONE
UNCHANGED
HIGH
HIGH
HIGH
7.8
HIGH
1.8
5.9
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/ff489d95a9006be080ad14feb378f2b4dac35552', 'name': 'https://github.com/tensorflow/tensorflow/commit/ff489d95a9006be080ad14feb378f2b4dac35552', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-3qgw-p4fm-x7gf', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-3qgw-p4fm-x7gf', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Patch', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-369'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionStartIncluding': '2.4.0', 'versionEndExcluding': '2.4.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionEndExcluding': '2.1.4', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "TensorFlow is an end-to-end open source platform for machine learning. TFLite's convolution code(https://github.com/tensorflow/tensorflow/blob/09c73bca7d648e961dd05898292d91a8322a9d45/tensorflow/lite/kernels/conv.cc) has multiple division where the divisor is controlled by the user and not checked to be non-zero. The fix will be included in TensorFlow 2.5.0. We will also cherrypick this commit on TensorFlow 2.4.2, TensorFlow 2.3.3, TensorFlow 2.2.3 and TensorFlow 2.1.4, as these are also affected and still in supported range."}]
2021-05-19T16:33Z
2021-05-14T20:15Z
Divide By Zero
The product divides a value by zero.
This weakness typically occurs when an unexpected value is provided to the product, or if an error occurs that is not properly detected. It frequently occurs in calculations involving physical dimensions such as size, length, width, and height.
https://cwe.mitre.org/data/definitions/369.html
0
Mihai Maruseac
2021-04-28 12:37:35-07:00
Prevent division by 0. PiperOrigin-RevId: 370962554 Change-Id: I0b9b62f4d8e1046dd88f9433f8dfeaf61a901680
ff489d95a9006be080ad14feb378f2b4dac35552
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::builtin::conv::EvalHybrid
tflite::ops::builtin::conv::EvalHybrid( TfLiteContext * context , TfLiteNode * node , TfLiteConvParams * params , OpData * data , const TfLiteTensor * input , const TfLiteTensor * filter , const TfLiteTensor * bias , TfLiteTensor * im2col , TfLiteTensor * accum_scratch , TfLiteTensor * output)
['context', 'node', 'params', 'data', 'input', 'filter', 'bias', 'im2col', 'accum_scratch', 'output']
TfLiteStatus EvalHybrid(TfLiteContext* context, TfLiteNode* node, TfLiteConvParams* params, OpData* data, const TfLiteTensor* input, const TfLiteTensor* filter, const TfLiteTensor* bias, TfLiteTensor* im2col, TfLiteTensor* accum_scratch, TfLiteTensor* output) { float output_activation_min, output_activation_max; CalculateActivationRange(params->activation, &output_activation_min, &output_activation_max); const int input_size = NumElements(input) / SizeOfDimension(input, 0); const int batch_size = SizeOfDimension(input, 0); const float* input_ptr = GetTensorData<float>(input); TfLiteTensor* quantized_input_tensor; TF_LITE_ENSURE_OK(context, GetTemporarySafe(context, node, data->input_quantized_index, &quantized_input_tensor)); int8_t* quantized_input_ptr_batch = GetTensorData<int8_t>(quantized_input_tensor); TfLiteTensor* scaling_factors_tensor; TF_LITE_ENSURE_OK(context, GetTemporarySafe(context, node, data->scaling_factors_index, &scaling_factors_tensor)); float* scaling_factors_ptr = GetTensorData<float>(scaling_factors_tensor); // Per-batch input quantization for higher accuracy. { ruy::profiler::ScopeLabel label("ConvHybridQuantizeInputs"); for (int b = 0; b < batch_size; ++b) { float unused_min, unused_max; const int offset = b * input_size; tensor_utils::SymmetricQuantizeFloats( input_ptr + offset, input_size, quantized_input_ptr_batch + offset, &unused_min, &unused_max, &scaling_factors_ptr[b]); scaling_factors_ptr[b] *= filter->params.scale; } } switch (kernel_type) { case kReference: case kGenericOptimized: case kMultithreadOptimized: case kCblasOptimized: { // There is only one implementation for hybrid kernel. ConvParams op_params; op_params.padding_type = PaddingType::kSame; op_params.padding_values.width = data->padding.width; op_params.padding_values.height = data->padding.height; op_params.stride_width = params->stride_width; op_params.stride_height = params->stride_height; op_params.dilation_width_factor = params->dilation_width_factor; op_params.dilation_height_factor = params->dilation_height_factor; op_params.float_activation_min = output_activation_min; op_params.float_activation_max = output_activation_max; optimized_ops::HybridConv( op_params, scaling_factors_ptr, GetTensorShape(input), quantized_input_ptr_batch, GetTensorShape(filter), GetTensorData<int8_t>(filter), GetTensorShape(bias), GetTensorData<float>(bias), GetTensorShape(accum_scratch), GetTensorData<int32_t>(accum_scratch), GetTensorShape(output), GetTensorData<float>(output), GetTensorShape(im2col), GetTensorData<int8_t>(im2col), CpuBackendContext::GetFromContext(context)); break; } } return kTfLiteOk; }
447
True
1
CVE-2021-29594
False
False
False
False
AV:L/AC:L/Au:N/C:P/I:P/A:P
LOCAL
LOW
NONE
PARTIAL
PARTIAL
PARTIAL
4.6
CVSS:3.1/AV:L/AC:L/PR:L/UI:N/S:U/C:H/I:H/A:H
LOCAL
LOW
LOW
NONE
UNCHANGED
HIGH
HIGH
HIGH
7.8
HIGH
1.8
5.9
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/ff489d95a9006be080ad14feb378f2b4dac35552', 'name': 'https://github.com/tensorflow/tensorflow/commit/ff489d95a9006be080ad14feb378f2b4dac35552', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-3qgw-p4fm-x7gf', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-3qgw-p4fm-x7gf', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Patch', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-369'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionStartIncluding': '2.4.0', 'versionEndExcluding': '2.4.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionEndExcluding': '2.1.4', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "TensorFlow is an end-to-end open source platform for machine learning. TFLite's convolution code(https://github.com/tensorflow/tensorflow/blob/09c73bca7d648e961dd05898292d91a8322a9d45/tensorflow/lite/kernels/conv.cc) has multiple division where the divisor is controlled by the user and not checked to be non-zero. The fix will be included in TensorFlow 2.5.0. We will also cherrypick this commit on TensorFlow 2.4.2, TensorFlow 2.3.3, TensorFlow 2.2.3 and TensorFlow 2.1.4, as these are also affected and still in supported range."}]
2021-05-19T16:33Z
2021-05-14T20:15Z
Divide By Zero
The product divides a value by zero.
This weakness typically occurs when an unexpected value is provided to the product, or if an error occurs that is not properly detected. It frequently occurs in calculations involving physical dimensions such as size, length, width, and height.
https://cwe.mitre.org/data/definitions/369.html
0
Mihai Maruseac
2021-04-28 12:37:35-07:00
Prevent division by 0. PiperOrigin-RevId: 370962554 Change-Id: I0b9b62f4d8e1046dd88f9433f8dfeaf61a901680
ff489d95a9006be080ad14feb378f2b4dac35552
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::builtin::conv::EvalHybridPerChannel
tflite::ops::builtin::conv::EvalHybridPerChannel( TfLiteContext * context , TfLiteNode * node , TfLiteConvParams * params , OpData * data , const TfLiteTensor * input , const TfLiteTensor * filter , const TfLiteTensor * bias , TfLiteTensor * im2col , TfLiteTensor * output)
['context', 'node', 'params', 'data', 'input', 'filter', 'bias', 'im2col', 'output']
TfLiteStatus EvalHybridPerChannel(TfLiteContext* context, TfLiteNode* node, TfLiteConvParams* params, OpData* data, const TfLiteTensor* input, const TfLiteTensor* filter, const TfLiteTensor* bias, TfLiteTensor* im2col, TfLiteTensor* output) { float output_activation_min, output_activation_max; CalculateActivationRange(params->activation, &output_activation_min, &output_activation_max); const int input_size = NumElements(input) / SizeOfDimension(input, 0); const int batch_size = SizeOfDimension(input, 0); TfLiteTensor* quantized_input_tensor; TF_LITE_ENSURE_OK(context, GetTemporarySafe(context, node, data->input_quantized_index, &quantized_input_tensor)); int8_t* quantized_input_ptr_batch = GetTensorData<int8_t>(quantized_input_tensor); TfLiteTensor* scaling_factors_tensor; TF_LITE_ENSURE_OK(context, GetTemporarySafe(context, node, data->scaling_factors_index, &scaling_factors_tensor)); float* scaling_factors_ptr = GetTensorData<float>(scaling_factors_tensor); TfLiteTensor* input_offset_tensor; TF_LITE_ENSURE_OK(context, GetTemporarySafe(context, node, data->input_offset_index, &input_offset_tensor)); int32_t* input_offset_ptr = GetTensorData<int32_t>(input_offset_tensor); for (int b = 0; b < batch_size; ++b) { const int offset = b * input_size; tensor_utils::AsymmetricQuantizeFloats( GetTensorData<float>(input) + offset, input_size, quantized_input_ptr_batch + offset, &scaling_factors_ptr[b], &input_offset_ptr[b]); } int8_t* im2col_ptr = nullptr; int8_t* filter_ptr = nullptr; if (im2col != nullptr) { im2col_ptr = im2col->data.int8; } filter_ptr = filter->data.int8; const auto* affine_quantization = reinterpret_cast<TfLiteAffineQuantization*>(filter->quantization.params); KernelType effective_kernel_type = kernel_type; // We have to fallback to reference execution path when im2col is needed but // disabled because to-be-allocated temporary im2col tensor is too large. // See b/178743262 for the detailed motivation. if (data->im2col_oversized) { effective_kernel_type = kReference; } ConvParams op_params; op_params.padding_type = PaddingType::kSame; op_params.padding_values.width = data->padding.width; op_params.padding_values.height = data->padding.height; op_params.dilation_width_factor = params->dilation_width_factor; op_params.dilation_height_factor = params->dilation_height_factor; op_params.stride_width = params->stride_width; op_params.stride_height = params->stride_height; op_params.float_activation_min = output_activation_min; op_params.float_activation_max = output_activation_max; switch (effective_kernel_type) { case kReference: reference_ops::HybridConvPerChannel( op_params, scaling_factors_ptr, GetTensorShape(input), quantized_input_ptr_batch, GetTensorShape(filter), filter_ptr, GetTensorShape(bias), GetTensorData<float>(bias), GetTensorShape(output), GetTensorData<float>(output), GetTensorShape(im2col), im2col_ptr, affine_quantization->scale->data, input_offset_ptr); break; case kGenericOptimized: case kMultithreadOptimized: case kCblasOptimized: { TfLiteTensor* row_sums; TF_LITE_ENSURE_OK( context, GetTemporarySafe(context, node, data->row_sums_index, &row_sums)); TfLiteTensor* scratch; TF_LITE_ENSURE_OK( context, GetTemporarySafe(context, node, data->accum_scratch_index, &scratch)); optimized_ops::HybridConvPerChannel( op_params, scaling_factors_ptr, GetTensorShape(input), quantized_input_ptr_batch, GetTensorShape(filter), filter_ptr, GetTensorShape(bias), GetTensorData<float>(bias), GetTensorShape(output), GetTensorData<float>(output), GetTensorShape(im2col), im2col_ptr, affine_quantization->scale->data, input_offset_ptr, GetTensorShape(scratch), GetTensorData<int32>(scratch), GetTensorData<int32_t>(row_sums), &data->compute_hybrid_row_sums, CpuBackendContext::GetFromContext(context)); data->compute_hybrid_row_sums = false; break; } } return kTfLiteOk; }
640
True
1
CVE-2021-29602
False
False
False
False
AV:L/AC:L/Au:N/C:N/I:N/A:P
LOCAL
LOW
NONE
NONE
NONE
PARTIAL
2.1
CVSS:3.1/AV:L/AC:L/PR:L/UI:N/S:U/C:N/I:N/A:H
LOCAL
LOW
LOW
NONE
UNCHANGED
NONE
NONE
HIGH
5.5
MEDIUM
1.8
3.6
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/cbda3c6b2dbbd3fbdc482ff8c0170a78ec2e97d0', 'name': 'https://github.com/tensorflow/tensorflow/commit/cbda3c6b2dbbd3fbdc482ff8c0170a78ec2e97d0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-rf3h-xgv5-2q39', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-rf3h-xgv5-2q39', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Patch', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-369'}]}]
LOW
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionStartIncluding': '2.4.0', 'versionEndExcluding': '2.4.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionEndExcluding': '2.1.4', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "TensorFlow is an end-to-end open source platform for machine learning. The implementation of the `DepthwiseConv` TFLite operator is vulnerable to a division by zero error(https://github.com/tensorflow/tensorflow/blob/1a8e885b864c818198a5b2c0cbbeca5a1e833bc8/tensorflow/lite/kernels/depthwise_conv.cc#L287-L288). An attacker can craft a model such that `input`'s fourth dimension would be 0. The fix will be included in TensorFlow 2.5.0. We will also cherrypick this commit on TensorFlow 2.4.2, TensorFlow 2.3.3, TensorFlow 2.2.3 and TensorFlow 2.1.4, as these are also affected and still in supported range."}]
2021-05-18T15:30Z
2021-05-14T20:15Z
Divide By Zero
The product divides a value by zero.
This weakness typically occurs when an unexpected value is provided to the product, or if an error occurs that is not properly detected. It frequently occurs in calculations involving physical dimensions such as size, length, width, and height.
https://cwe.mitre.org/data/definitions/369.html
0
Mihai Maruseac
2021-04-28 15:53:48-07:00
Prevent divisions by 0 PiperOrigin-RevId: 371003153 Change-Id: Idef56c95b9fcaeb97f87e18c7a674dbeb5173204
cbda3c6b2dbbd3fbdc482ff8c0170a78ec2e97d0
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::builtin::depthwise_conv::ComputeDepthMultiplier
tflite::ops::builtin::depthwise_conv::ComputeDepthMultiplier( TfLiteContext * context , const TfLiteTensor * input , const TfLiteTensor * filter , int16 * depth_multiplier)
['context', 'input', 'filter', 'depth_multiplier']
TfLiteStatus ComputeDepthMultiplier(TfLiteContext* context, const TfLiteTensor* input, const TfLiteTensor* filter, int16* depth_multiplier) { int num_filter_channels = SizeOfDimension(filter, 3); int num_input_channels = SizeOfDimension(input, 3); TF_LITE_ENSURE_EQ(context, num_filter_channels % num_input_channels, 0); *depth_multiplier = num_filter_channels / num_input_channels; return kTfLiteOk; }
63
True
1
CVE-2021-29602
False
False
False
False
AV:L/AC:L/Au:N/C:N/I:N/A:P
LOCAL
LOW
NONE
NONE
NONE
PARTIAL
2.1
CVSS:3.1/AV:L/AC:L/PR:L/UI:N/S:U/C:N/I:N/A:H
LOCAL
LOW
LOW
NONE
UNCHANGED
NONE
NONE
HIGH
5.5
MEDIUM
1.8
3.6
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/cbda3c6b2dbbd3fbdc482ff8c0170a78ec2e97d0', 'name': 'https://github.com/tensorflow/tensorflow/commit/cbda3c6b2dbbd3fbdc482ff8c0170a78ec2e97d0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-rf3h-xgv5-2q39', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-rf3h-xgv5-2q39', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Patch', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-369'}]}]
LOW
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionStartIncluding': '2.4.0', 'versionEndExcluding': '2.4.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionEndExcluding': '2.1.4', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "TensorFlow is an end-to-end open source platform for machine learning. The implementation of the `DepthwiseConv` TFLite operator is vulnerable to a division by zero error(https://github.com/tensorflow/tensorflow/blob/1a8e885b864c818198a5b2c0cbbeca5a1e833bc8/tensorflow/lite/kernels/depthwise_conv.cc#L287-L288). An attacker can craft a model such that `input`'s fourth dimension would be 0. The fix will be included in TensorFlow 2.5.0. We will also cherrypick this commit on TensorFlow 2.4.2, TensorFlow 2.3.3, TensorFlow 2.2.3 and TensorFlow 2.1.4, as these are also affected and still in supported range."}]
2021-05-18T15:30Z
2021-05-14T20:15Z
Divide By Zero
The product divides a value by zero.
This weakness typically occurs when an unexpected value is provided to the product, or if an error occurs that is not properly detected. It frequently occurs in calculations involving physical dimensions such as size, length, width, and height.
https://cwe.mitre.org/data/definitions/369.html
0
Mihai Maruseac
2021-04-28 15:53:48-07:00
Prevent divisions by 0 PiperOrigin-RevId: 371003153 Change-Id: Idef56c95b9fcaeb97f87e18c7a674dbeb5173204
cbda3c6b2dbbd3fbdc482ff8c0170a78ec2e97d0
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tflite::ops::builtin::depthwise_conv::EvalHybridPerChannel
tflite::ops::builtin::depthwise_conv::EvalHybridPerChannel( TfLiteContext * context , TfLiteNode * node , TfLiteDepthwiseConvParams * params , OpData * data , const TfLiteTensor * input , const TfLiteTensor * filter , const TfLiteTensor * bias , TfLiteTensor * output)
['context', 'node', 'params', 'data', 'input', 'filter', 'bias', 'output']
TfLiteStatus EvalHybridPerChannel(TfLiteContext* context, TfLiteNode* node, TfLiteDepthwiseConvParams* params, OpData* data, const TfLiteTensor* input, const TfLiteTensor* filter, const TfLiteTensor* bias, TfLiteTensor* output) { float output_activation_min, output_activation_max; CalculateActivationRange(params->activation, &output_activation_min, &output_activation_max); const int input_size = NumElements(input) / SizeOfDimension(input, 0); const int batch_size = SizeOfDimension(input, 0); TfLiteTensor* input_quantized; TF_LITE_ENSURE_OK(context, GetTemporarySafe(context, node, data->input_quantized_index, &input_quantized)); int8_t* quantized_input_ptr_batch = input_quantized->data.int8; TfLiteTensor* scaling_factors_tensor; TF_LITE_ENSURE_OK(context, GetTemporarySafe(context, node, data->scaling_factors_index, &scaling_factors_tensor)); float* scaling_factors_ptr = GetTensorData<float>(scaling_factors_tensor); TfLiteTensor* input_offset_tensor; TF_LITE_ENSURE_OK(context, GetTemporarySafe(context, node, data->input_offset_index, &input_offset_tensor)); int32_t* input_offset_ptr = GetTensorData<int32_t>(input_offset_tensor); for (int b = 0; b < batch_size; ++b) { const int offset = b * input_size; tensor_utils::AsymmetricQuantizeFloats( GetTensorData<float>(input) + offset, input_size, quantized_input_ptr_batch + offset, &scaling_factors_ptr[b], &input_offset_ptr[b]); } DepthwiseParams op_params; op_params.padding_type = PaddingType::kSame; op_params.padding_values.width = data->padding.width; op_params.padding_values.height = data->padding.height; op_params.stride_width = params->stride_width; op_params.stride_height = params->stride_height; op_params.dilation_width_factor = params->dilation_width_factor; op_params.dilation_height_factor = params->dilation_height_factor; op_params.depth_multiplier = params->depth_multiplier; op_params.weights_offset = 0; op_params.float_activation_min = output_activation_min; op_params.float_activation_max = output_activation_max; const auto* affine_quantization = reinterpret_cast<TfLiteAffineQuantization*>(filter->quantization.params); if (kernel_type == kReference) { reference_integer_ops::DepthwiseConvHybridPerChannel( op_params, scaling_factors_ptr, GetTensorShape(input), quantized_input_ptr_batch, GetTensorShape(filter), GetTensorData<int8>(filter), GetTensorShape(bias), GetTensorData<float>(bias), GetTensorShape(output), GetTensorData<float>(output), affine_quantization->scale->data, input_offset_ptr); } else { optimized_integer_ops::DepthwiseConvHybridPerChannel( op_params, scaling_factors_ptr, GetTensorShape(input), quantized_input_ptr_batch, GetTensorShape(filter), GetTensorData<int8>(filter), GetTensorShape(bias), GetTensorData<float>(bias), GetTensorShape(output), GetTensorData<float>(output), affine_quantization->scale->data, input_offset_ptr, CpuBackendContext::GetFromContext(context)); } return kTfLiteOk; }
502
True
1
CVE-2021-29605
False
False
False
False
AV:L/AC:L/Au:N/C:N/I:N/A:P
LOCAL
LOW
NONE
NONE
NONE
PARTIAL
2.1
CVSS:3.1/AV:L/AC:L/PR:L/UI:N/S:U/C:N/I:N/A:H
LOCAL
LOW
LOW
NONE
UNCHANGED
NONE
NONE
HIGH
5.5
MEDIUM
1.8
3.6
False
[{'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-jf7h-7m85-w2v2', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-jf7h-7m85-w2v2', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/7c8cc4ec69cd348e44ad6a2699057ca88faad3e5', 'name': 'https://github.com/tensorflow/tensorflow/commit/7c8cc4ec69cd348e44ad6a2699057ca88faad3e5', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-190'}]}]
LOW
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionStartIncluding': '2.4.0', 'versionEndExcluding': '2.4.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionEndExcluding': '2.1.4', 'cpe_name': []}]}]
[{'lang': 'en', 'value': 'TensorFlow is an end-to-end open source platform for machine learning. The TFLite code for allocating `TFLiteIntArray`s is vulnerable to an integer overflow issue(https://github.com/tensorflow/tensorflow/blob/4ceffae632721e52bf3501b736e4fe9d1221cdfa/tensorflow/lite/c/common.c#L24-L27). An attacker can craft a model such that the `size` multiplier is so large that the return value overflows the `int` datatype and becomes negative. In turn, this results in invalid value being given to `malloc`(https://github.com/tensorflow/tensorflow/blob/4ceffae632721e52bf3501b736e4fe9d1221cdfa/tensorflow/lite/c/common.c#L47-L52). In this case, `ret->size` would dereference an invalid pointer. The fix will be included in TensorFlow 2.5.0. We will also cherrypick this commit on TensorFlow 2.4.2, TensorFlow 2.3.3, TensorFlow 2.2.3 and TensorFlow 2.1.4, as these are also affected and still in supported range.'}]
2021-05-18T15:12Z
2021-05-14T20:15Z
Integer Overflow or Wraparound
The software performs a calculation that can produce an integer overflow or wraparound, when the logic assumes that the resulting value will always be larger than the original value. This can introduce other weaknesses when the calculation is used for resource management or execution control.
An integer overflow or wraparound occurs when an integer value is incremented to a value that is too large to store in the associated representation. When this occurs, the value may wrap to become a very small or negative number. While this may be intended behavior in circumstances that rely on wrapping, it can have security consequences if the wrap is unexpected. This is especially the case if the integer overflow can be triggered using user-supplied inputs. This becomes security-critical when the result is used to control looping, make a security decision, or determine the offset or size in behaviors such as memory allocation, copying, concatenation, etc.
https://cwe.mitre.org/data/definitions/190.html
0
Mihai Maruseac
2021-04-29 19:43:09-07:00
Fix a dangerous integer overflow and a malloc of negative size. PiperOrigin-RevId: 371254154 Change-Id: I250a98a3df26328770167025670235a963a72da0
7c8cc4ec69cd348e44ad6a2699057ca88faad3e5
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
TfLiteIntArrayCreate
TfLiteIntArrayCreate( int size)
['size']
TfLiteIntArray* TfLiteIntArrayCreate(int size) { TfLiteIntArray* ret = (TfLiteIntArray*)malloc(TfLiteIntArrayGetSizeInBytes(size)); ret->size = size; return ret; }
32
True
1
CVE-2021-29614
False
False
False
False
AV:L/AC:L/Au:N/C:P/I:P/A:P
LOCAL
LOW
NONE
PARTIAL
PARTIAL
PARTIAL
4.6
CVSS:3.1/AV:L/AC:L/PR:L/UI:N/S:U/C:H/I:H/A:H
LOCAL
LOW
LOW
NONE
UNCHANGED
HIGH
HIGH
HIGH
7.8
HIGH
1.8
5.9
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/698e01511f62a3c185754db78ebce0eee1f0184d', 'name': 'https://github.com/tensorflow/tensorflow/commit/698e01511f62a3c185754db78ebce0eee1f0184d', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-8pmx-p244-g88h', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-8pmx-p244-g88h', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Patch', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionStartIncluding': '2.4.0', 'versionEndExcluding': '2.4.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionEndExcluding': '2.1.4', 'cpe_name': []}]}]
[{'lang': 'en', 'value': 'TensorFlow is an end-to-end open source platform for machine learning. The implementation of `tf.io.decode_raw` produces incorrect results and crashes the Python interpreter when combining `fixed_length` and wider datatypes. The implementation of the padded version(https://github.com/tensorflow/tensorflow/blob/1d8903e5b167ed0432077a3db6e462daf781d1fe/tensorflow/core/kernels/decode_padded_raw_op.cc) is buggy due to a confusion about pointer arithmetic rules. First, the code computes(https://github.com/tensorflow/tensorflow/blob/1d8903e5b167ed0432077a3db6e462daf781d1fe/tensorflow/core/kernels/decode_padded_raw_op.cc#L61) the width of each output element by dividing the `fixed_length` value to the size of the type argument. The `fixed_length` argument is also used to determine the size needed for the output tensor(https://github.com/tensorflow/tensorflow/blob/1d8903e5b167ed0432077a3db6e462daf781d1fe/tensorflow/core/kernels/decode_padded_raw_op.cc#L63-L79). This is followed by reencoding code(https://github.com/tensorflow/tensorflow/blob/1d8903e5b167ed0432077a3db6e462daf781d1fe/tensorflow/core/kernels/decode_padded_raw_op.cc#L85-L94). The erroneous code is the last line above: it is moving the `out_data` pointer by `fixed_length * sizeof(T)` bytes whereas it only copied at most `fixed_length` bytes from the input. This results in parts of the input not being decoded into the output. Furthermore, because the pointer advance is far wider than desired, this quickly leads to writing to outside the bounds of the backing data. This OOB write leads to interpreter crash in the reproducer mentioned here, but more severe attacks can be mounted too, given that this gadget allows writing to periodically placed locations in memory. The fix will be included in TensorFlow 2.5.0. We will also cherrypick this commit on TensorFlow 2.4.2, TensorFlow 2.3.3, TensorFlow 2.2.3 and TensorFlow 2.1.4, as these are also affected and still in supported range.'}]
2022-04-25T20:09Z
2021-05-14T20:15Z
Out-of-bounds Write
The software writes data past the end, or before the beginning, of the intended buffer.
Typically, this can result in corruption of data, a crash, or code execution. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent write operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/787.html
0
Mihai Maruseac
2021-04-30 06:36:59-07:00
Fix `tf.io.decode_raw` bugs and update documentation. Fixes cases where specifying `fixed_length` resulted in data loss and even segfault and corruption of the Python interpreter. The fix is subtle but needed due to pointer arithmetic rules. Makes sure that `fixed_length` does not change the output when present but not needed. Eliminates needless copy and cast in the main codepath. PiperOrigin-RevId: 371322725 Change-Id: I514ef67a2961c86422f69d05122d31615e87896c
698e01511f62a3c185754db78ebce0eee1f0184d
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tensorflow::DecodePaddedRawOp::Compute
tensorflow::DecodePaddedRawOp::Compute( OpKernelContext * context)
['context']
void Compute(OpKernelContext* context) override { const auto& input = context->input(0); auto flat_in = input.flat<tstring>(); int fixed_length; const auto& length_input = context->input(1); OP_REQUIRES(context, TensorShapeUtils::IsScalar(length_input.shape()), errors::InvalidArgument("k must be scalar, got shape ", length_input.shape().DebugString())); fixed_length = length_input.scalar<int32>()(); OP_REQUIRES( context, fixed_length % sizeof(T) == 0, errors::InvalidArgument( "fixed_length (", fixed_length, ") must be a multiple of the size of out_type (", sizeof(T), ")")); OP_REQUIRES(context, fixed_length > 0, errors::InvalidArgument("fixed_length (", fixed_length, ") must be greater than zero.")); int width = fixed_length / sizeof(T); TensorShape out_shape = input.shape(); out_shape.AddDim(width); Tensor* output_tensor = nullptr; OP_REQUIRES_OK( context, context->allocate_output("output", out_shape, &output_tensor)); if (flat_in.size() == 0) { // Empty input return; } auto out = output_tensor->flat_inner_dims<T>(); T* out_data = out.data(); // Forcibly clear memory - we're going to copy variable length strings in, // and need to ensure that if we don't write to byte N when we copy, that // we're not getting random data. memset(out_data, 0, fixed_length * flat_in.size()); // If the data is already in the host's byte order, or if the width of the // output type is a single byte (meaning the ordering doesn't matter), we // can copy the memory directly. if (!convert_data_endianness_ || sizeof(T) == 1) { for (int64 i = 0; i < flat_in.size(); ++i) { const T* in_data = reinterpret_cast<const T*>(flat_in(i).data()); if (flat_in(i).size() > fixed_length) { memcpy(out_data, in_data, fixed_length); } else { memcpy(out_data, in_data, flat_in(i).size()); } out_data += fixed_length; } } else { // Otherwise, the data is not in the host's byte order, and rather than a // direct copy, we need to reverse the byte ordering of each element. for (int64 i = 0; i < flat_in.size(); ++i) { const char* in_data_bytes = reinterpret_cast<const char*>(flat_in(i).data()); char* out_data_bytes = reinterpret_cast<char*>(out_data); const char* p_in = in_data_bytes; char* p_out = out_data_bytes; for (; p_in < in_data_bytes + fixed_length; p_in += sizeof(T), p_out += sizeof(T)) { std::reverse_copy(p_in, p_in + sizeof(T), p_out); } out_data += fixed_length; } } }
465
True
1
CVE-2021-29565
False
False
False
False
AV:L/AC:L/Au:N/C:N/I:N/A:P
LOCAL
LOW
NONE
NONE
NONE
PARTIAL
2.1
CVSS:3.1/AV:L/AC:L/PR:L/UI:N/S:U/C:N/I:N/A:H
LOCAL
LOW
LOW
NONE
UNCHANGED
NONE
NONE
HIGH
5.5
MEDIUM
1.8
3.6
False
[{'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-r6pg-pjwc-j585', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-r6pg-pjwc-j585', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/faa76f39014ed3b5e2c158593b1335522e573c7f', 'name': 'https://github.com/tensorflow/tensorflow/commit/faa76f39014ed3b5e2c158593b1335522e573c7f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-476'}]}]
LOW
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionStartIncluding': '2.4.0', 'versionEndExcluding': '2.4.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionEndExcluding': '2.1.4', 'cpe_name': []}]}]
[{'lang': 'en', 'value': 'TensorFlow is an end-to-end open source platform for machine learning. An attacker can trigger a null pointer dereference in the implementation of `tf.raw_ops.SparseFillEmptyRows`. This is because of missing validation(https://github.com/tensorflow/tensorflow/blob/fdc82089d206e281c628a93771336bf87863d5e8/tensorflow/core/kernels/sparse_fill_empty_rows_op.cc#L230-L231) that was covered under a `TODO`. If the `dense_shape` tensor is empty, then `dense_shape_t.vec<>()` would cause a null pointer dereference in the implementation of the op. The fix will be included in TensorFlow 2.5.0. We will also cherrypick this commit on TensorFlow 2.4.2, TensorFlow 2.3.3, TensorFlow 2.2.3 and TensorFlow 2.1.4, as these are also affected and still in supported range.'}]
2021-05-20T16:53Z
2021-05-14T20:15Z
NULL Pointer Dereference
A NULL pointer dereference occurs when the application dereferences a pointer that it expects to be valid, but is NULL, typically causing a crash or exit.
NULL pointer dereference issues can occur through a number of flaws, including race conditions, and simple programming omissions.
https://cwe.mitre.org/data/definitions/476.html
0
Amit Patankar
2021-05-04 15:35:39-07:00
Fix heap-buffer-overflow issue with `tf.raw_ops.SparseFillEmptyRows`. PiperOrigin-RevId: 372009178 Change-Id: Ia1a9e9691ecaa072f32fb39a0887b2aabd399210
faa76f39014ed3b5e2c158593b1335522e573c7f
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tensorflow::SparseFillEmptyRowsOpImpl
tensorflow::SparseFillEmptyRowsOpImpl( OpKernelContext * context , AsyncOpKernel :: DoneCallback done = nullptr)
['context', 'done']
void SparseFillEmptyRowsOpImpl(OpKernelContext* context, AsyncOpKernel::DoneCallback done = nullptr) { // Note that setting this empty lambda as the default parameter value directly // can cause strange compiler/linker errors, so we do it like this instead. if (!done) { done = [] {}; } const int kIndicesInput = 0; const int kValuesInput = 1; const int kDenseShapeInput = 2; const int kDefaultValueInput = 3; const Tensor& indices_t = context->input(kIndicesInput); const Tensor& values_t = context->input(kValuesInput); const Tensor& dense_shape_t = context->input(kDenseShapeInput); const Tensor& default_value_t = context->input(kDefaultValueInput); OP_REQUIRES_ASYNC( context, TensorShapeUtils::IsVector(dense_shape_t.shape()), errors::InvalidArgument("dense_shape must be a vector, saw: ", dense_shape_t.shape().DebugString()), done); OP_REQUIRES_ASYNC(context, TensorShapeUtils::IsMatrix(indices_t.shape()), errors::InvalidArgument("indices must be a matrix, saw: ", indices_t.shape().DebugString()), done); OP_REQUIRES_ASYNC(context, TensorShapeUtils::IsVector(values_t.shape()), errors::InvalidArgument("values must be a vector, saw: ", values_t.shape().DebugString()), done); OP_REQUIRES_ASYNC( context, TensorShapeUtils::IsScalar(default_value_t.shape()), errors::InvalidArgument("default_value must be a scalar, saw: ", default_value_t.shape().DebugString()), done); // TODO(ebrevdo): add shape checks between values, indices, // dense_shape. Also add check that dense rank > 0. using FunctorType = functor::SparseFillEmptyRows<Device, T, Tindex>; OP_REQUIRES_OK_ASYNC(context, FunctorType()(context, default_value_t, indices_t, values_t, dense_shape_t, done), done); }
279
True
1
CVE-2021-29566
False
False
False
False
AV:L/AC:L/Au:N/C:P/I:P/A:P
LOCAL
LOW
NONE
PARTIAL
PARTIAL
PARTIAL
4.6
CVSS:3.1/AV:L/AC:L/PR:L/UI:N/S:U/C:H/I:H/A:H
LOCAL
LOW
LOW
NONE
UNCHANGED
HIGH
HIGH
HIGH
7.8
HIGH
1.8
5.9
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/3f6fe4dfef6f57e768260b48166c27d148f3015f', 'name': 'https://github.com/tensorflow/tensorflow/commit/3f6fe4dfef6f57e768260b48166c27d148f3015f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-pvrc-hg3f-58r6', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-pvrc-hg3f-58r6', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Patch', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionStartIncluding': '2.4.0', 'versionEndExcluding': '2.4.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionEndExcluding': '2.1.4', 'cpe_name': []}]}]
[{'lang': 'en', 'value': 'TensorFlow is an end-to-end open source platform for machine learning. An attacker can write outside the bounds of heap allocated arrays by passing invalid arguments to `tf.raw_ops.Dilation2DBackpropInput`. This is because the implementation(https://github.com/tensorflow/tensorflow/blob/afd954e65f15aea4d438d0a219136fc4a63a573d/tensorflow/core/kernels/dilation_ops.cc#L321-L322) does not validate before writing to the output array. The values for `h_out` and `w_out` are guaranteed to be in range for `out_backprop` (as they are loop indices bounded by the size of the array). However, there are no similar guarantees relating `h_in_max`/`w_in_max` and `in_backprop`. The fix will be included in TensorFlow 2.5.0. We will also cherrypick this commit on TensorFlow 2.4.2, TensorFlow 2.3.3, TensorFlow 2.2.3 and TensorFlow 2.1.4, as these are also affected and still in supported range.'}]
2021-07-26T16:20Z
2021-05-14T20:15Z
Out-of-bounds Write
The software writes data past the end, or before the beginning, of the intended buffer.
Typically, this can result in corruption of data, a crash, or code execution. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent write operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/787.html
0
Mihai Maruseac
2021-05-04 18:33:28-07:00
Add missing validations in dillation ops. PiperOrigin-RevId: 372037158 Change-Id: I4ee304c84a02550c030288a6534000b934fc1599
3f6fe4dfef6f57e768260b48166c27d148f3015f
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tensorflow::functor::DilationBackpropFilter<CPUDevice,T>::operator ( )
tensorflow::functor::DilationBackpropFilter<CPUDevice,T>::operator ( )( const CPUDevice & d , typename TTypes<T,4> :: ConstTensor input , typename TTypes<T,3> :: ConstTensor filter , typename TTypes<T,4> :: ConstTensor out_backprop , int stride_rows , int stride_cols , int rate_rows , int rate_cols , int pad_top , int pad_left , typename TTypes<T,3> :: Tensor filter_backprop)
['d', 'input', 'filter', 'out_backprop', 'stride_rows', 'stride_cols', 'rate_rows', 'rate_cols', 'pad_top', 'pad_left', 'filter_backprop']
void operator()(const CPUDevice& d, typename TTypes<T, 4>::ConstTensor input, typename TTypes<T, 3>::ConstTensor filter, typename TTypes<T, 4>::ConstTensor out_backprop, int stride_rows, int stride_cols, int rate_rows, int rate_cols, int pad_top, int pad_left, typename TTypes<T, 3>::Tensor filter_backprop) { const int batch = input.dimension(0); const int input_rows = input.dimension(1); const int input_cols = input.dimension(2); const int depth = input.dimension(3); const int filter_rows = filter.dimension(0); const int filter_cols = filter.dimension(1); const int output_rows = out_backprop.dimension(1); const int output_cols = out_backprop.dimension(2); // Initialize gradient with all zeros. filter_backprop.setZero(); // This is a reference implementation, likely to be slow. // TODO(gpapan): Write multi-threaded implementation. // In the case of multiple argmax branches, we only back-propagate along the // last branch, i.e., the one with largest value of `h * filter_cols + w`, // similarly to the max-pooling backward routines. for (int b = 0; b < batch; ++b) { for (int h_out = 0; h_out < output_rows; ++h_out) { int h_beg = h_out * stride_rows - pad_top; for (int w_out = 0; w_out < output_cols; ++w_out) { int w_beg = w_out * stride_cols - pad_left; for (int d = 0; d < depth; ++d) { T cur_val = Eigen::NumTraits<T>::lowest(); int h_max = 0; int w_max = 0; for (int h = 0; h < filter_rows; ++h) { const int h_in = h_beg + h * rate_rows; if (h_in >= 0 && h_in < input_rows) { for (int w = 0; w < filter_cols; ++w) { const int w_in = w_beg + w * rate_cols; if (w_in >= 0 && w_in < input_cols) { const T val = input(b, h_in, w_in, d) + filter(h, w, d); if (val > cur_val) { cur_val = val; h_max = h; w_max = w; } } } } } filter_backprop(h_max, w_max, d) += out_backprop(b, h_out, w_out, d); } } } } }
413
True
1
CVE-2021-29566
False
False
False
False
AV:L/AC:L/Au:N/C:P/I:P/A:P
LOCAL
LOW
NONE
PARTIAL
PARTIAL
PARTIAL
4.6
CVSS:3.1/AV:L/AC:L/PR:L/UI:N/S:U/C:H/I:H/A:H
LOCAL
LOW
LOW
NONE
UNCHANGED
HIGH
HIGH
HIGH
7.8
HIGH
1.8
5.9
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/3f6fe4dfef6f57e768260b48166c27d148f3015f', 'name': 'https://github.com/tensorflow/tensorflow/commit/3f6fe4dfef6f57e768260b48166c27d148f3015f', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-pvrc-hg3f-58r6', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-pvrc-hg3f-58r6', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Patch', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionStartIncluding': '2.4.0', 'versionEndExcluding': '2.4.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionEndExcluding': '2.1.4', 'cpe_name': []}]}]
[{'lang': 'en', 'value': 'TensorFlow is an end-to-end open source platform for machine learning. An attacker can write outside the bounds of heap allocated arrays by passing invalid arguments to `tf.raw_ops.Dilation2DBackpropInput`. This is because the implementation(https://github.com/tensorflow/tensorflow/blob/afd954e65f15aea4d438d0a219136fc4a63a573d/tensorflow/core/kernels/dilation_ops.cc#L321-L322) does not validate before writing to the output array. The values for `h_out` and `w_out` are guaranteed to be in range for `out_backprop` (as they are loop indices bounded by the size of the array). However, there are no similar guarantees relating `h_in_max`/`w_in_max` and `in_backprop`. The fix will be included in TensorFlow 2.5.0. We will also cherrypick this commit on TensorFlow 2.4.2, TensorFlow 2.3.3, TensorFlow 2.2.3 and TensorFlow 2.1.4, as these are also affected and still in supported range.'}]
2021-07-26T16:20Z
2021-05-14T20:15Z
Out-of-bounds Write
The software writes data past the end, or before the beginning, of the intended buffer.
Typically, this can result in corruption of data, a crash, or code execution. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent write operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/787.html
0
Mihai Maruseac
2021-05-04 18:33:28-07:00
Add missing validations in dillation ops. PiperOrigin-RevId: 372037158 Change-Id: I4ee304c84a02550c030288a6534000b934fc1599
3f6fe4dfef6f57e768260b48166c27d148f3015f
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tensorflow::functor::DilationBackpropInput<CPUDevice,T>::operator ( )
tensorflow::functor::DilationBackpropInput<CPUDevice,T>::operator ( )( const CPUDevice & d , typename TTypes<T,4> :: ConstTensor input , typename TTypes<T,3> :: ConstTensor filter , typename TTypes<T,4> :: ConstTensor out_backprop , int stride_rows , int stride_cols , int rate_rows , int rate_cols , int pad_top , int pad_left , typename TTypes<T,4> :: Tensor in_backprop)
['d', 'input', 'filter', 'out_backprop', 'stride_rows', 'stride_cols', 'rate_rows', 'rate_cols', 'pad_top', 'pad_left', 'in_backprop']
void operator()(const CPUDevice& d, typename TTypes<T, 4>::ConstTensor input, typename TTypes<T, 3>::ConstTensor filter, typename TTypes<T, 4>::ConstTensor out_backprop, int stride_rows, int stride_cols, int rate_rows, int rate_cols, int pad_top, int pad_left, typename TTypes<T, 4>::Tensor in_backprop) { const int batch = input.dimension(0); const int input_rows = input.dimension(1); const int input_cols = input.dimension(2); const int depth = input.dimension(3); const int filter_rows = filter.dimension(0); const int filter_cols = filter.dimension(1); const int output_rows = out_backprop.dimension(1); const int output_cols = out_backprop.dimension(2); // Initialize gradient with all zeros. in_backprop.setZero(); // This is a reference implementation, likely to be slow. // TODO(gpapan): Write multi-threaded implementation. // In the case of multiple argmax branches, we only back-propagate along the // last branch, i.e., the one with largest value of `h * filter_cols + w`, // similarly to the max-pooling backward routines. for (int b = 0; b < batch; ++b) { for (int h_out = 0; h_out < output_rows; ++h_out) { int h_beg = h_out * stride_rows - pad_top; for (int w_out = 0; w_out < output_cols; ++w_out) { int w_beg = w_out * stride_cols - pad_left; for (int d = 0; d < depth; ++d) { T cur_val = Eigen::NumTraits<T>::lowest(); int h_in_max = (h_beg < 0) ? 0 : h_beg; int w_in_max = (w_beg < 0) ? 0 : w_beg; for (int h = 0; h < filter_rows; ++h) { const int h_in = h_beg + h * rate_rows; if (h_in >= 0 && h_in < input_rows) { for (int w = 0; w < filter_cols; ++w) { const int w_in = w_beg + w * rate_cols; if (w_in >= 0 && w_in < input_cols) { const T val = input(b, h_in, w_in, d) + filter(h, w, d); if (val > cur_val) { cur_val = val; h_in_max = h_in; w_in_max = w_in; } } } } } in_backprop(b, h_in_max, w_in_max, d) += out_backprop(b, h_out, w_out, d); } } } } }
431
True
1
CVE-2021-29579
False
False
False
False
AV:L/AC:L/Au:N/C:P/I:P/A:P
LOCAL
LOW
NONE
PARTIAL
PARTIAL
PARTIAL
4.6
CVSS:3.1/AV:L/AC:L/PR:L/UI:N/S:U/C:H/I:H/A:H
LOCAL
LOW
LOW
NONE
UNCHANGED
HIGH
HIGH
HIGH
7.8
HIGH
1.8
5.9
False
[{'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-79fv-9865-4qcv', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-79fv-9865-4qcv', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/a74768f8e4efbda4def9f16ee7e13cf3922ac5f7', 'name': 'https://github.com/tensorflow/tensorflow/commit/a74768f8e4efbda4def9f16ee7e13cf3922ac5f7', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-787'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionStartIncluding': '2.4.0', 'versionEndExcluding': '2.4.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionEndExcluding': '2.1.4', 'cpe_name': []}]}]
[{'lang': 'en', 'value': 'TensorFlow is an end-to-end open source platform for machine learning. The implementation of `tf.raw_ops.MaxPoolGrad` is vulnerable to a heap buffer overflow. The implementation(https://github.com/tensorflow/tensorflow/blob/ab1e644b48c82cb71493f4362b4dd38f4577a1cf/tensorflow/core/kernels/maxpooling_op.cc#L194-L203) fails to validate that indices used to access elements of input/output arrays are valid. Whereas accesses to `input_backprop_flat` are guarded by `FastBoundsCheck`, the indexing in `out_backprop_flat` can result in OOB access. The fix will be included in TensorFlow 2.5.0. We will also cherrypick this commit on TensorFlow 2.4.2, TensorFlow 2.3.3, TensorFlow 2.2.3 and TensorFlow 2.1.4, as these are also affected and still in supported range.'}]
2021-05-20T15:43Z
2021-05-14T20:15Z
Out-of-bounds Write
The software writes data past the end, or before the beginning, of the intended buffer.
Typically, this can result in corruption of data, a crash, or code execution. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent write operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/787.html
0
Mihai Maruseac
2021-05-06 14:24:09-07:00
Prevent heap OOB error in `MaxPoolGrad` PiperOrigin-RevId: 372424854 Change-Id: Idac0f23867ad8b0601cafbaaa52d5e64269e63a7
a74768f8e4efbda4def9f16ee7e13cf3922ac5f7
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tensorflow::SpatialMaxPoolWithArgMaxHelper
tensorflow::SpatialMaxPoolWithArgMaxHelper( OpKernelContext * context , Tensor * output , Tensor * output_arg_max , Tensor * input_backprop , const Tensor & tensor_in , const Tensor & out_backprop , const PoolParameters & params , const bool include_batch_in_index)
['context', 'output', 'output_arg_max', 'input_backprop', 'tensor_in', 'out_backprop', 'params', 'include_batch_in_index']
static void SpatialMaxPoolWithArgMaxHelper( OpKernelContext* context, Tensor* output, Tensor* output_arg_max, Tensor* input_backprop, const Tensor& tensor_in, const Tensor& out_backprop, const PoolParameters& params, const bool include_batch_in_index) { if (input_backprop != nullptr) { OP_REQUIRES( context, include_batch_in_index, errors::Internal( "SpatialMaxPoolWithArgMaxHelper requires include_batch_in_index " "to be True when input_backprop != nullptr")); OP_REQUIRES( context, (std::is_same<Targmax, int64>::value), errors::Internal("SpatialMaxPoolWithArgMaxHelper requires Targmax " "to be int64 when input_backprop != nullptr")); } typedef Eigen::Map<const Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic>> ConstEigenMatrixMap; typedef Eigen::Map<Eigen::Matrix<T, Eigen::Dynamic, Eigen::Dynamic>> EigenMatrixMap; typedef Eigen::Map<Eigen::Matrix<Targmax, Eigen::Dynamic, Eigen::Dynamic>> EigenIndexMatrixMap; ConstEigenMatrixMap in_mat( tensor_in.flat<T>().data(), params.depth, params.tensor_in_cols * params.tensor_in_rows * params.tensor_in_batch); EigenMatrixMap out_mat( output->flat<T>().data(), params.depth, params.out_width * params.out_height * params.tensor_in_batch); EigenIndexMatrixMap out_arg_max_mat( output_arg_max->flat<Targmax>().data(), params.depth, params.out_width * params.out_height * params.tensor_in_batch); const DeviceBase::CpuWorkerThreads& worker_threads = *(context->device()->tensorflow_cpu_worker_threads()); // The following code basically does the following: // 1. Flattens the input and output tensors into two dimensional arrays. // tensor_in_as_matrix: // depth by (tensor_in_cols * tensor_in_rows * tensor_in_batch) // output_as_matrix: // depth by (out_width * out_height * tensor_in_batch) // // 2. Walks through the set of columns in the flattened tensor_in_as_matrix, // and updates the corresponding column(s) in output_as_matrix with the // max value. auto shard = [&params, &in_mat, &out_mat, &out_arg_max_mat, &input_backprop, &output_arg_max, &out_backprop, include_batch_in_index](int64 start, int64 limit) { const int32 depth = params.depth; const int32 in_rows = params.tensor_in_rows; const int32 in_cols = params.tensor_in_cols; const int32 pad_top = params.pad_top; const int32 pad_left = params.pad_left; const int32 window_rows = params.window_rows; const int32 window_cols = params.window_cols; const int32 row_stride = params.row_stride; const int32 col_stride = params.col_stride; const int32 out_height = params.out_height; const int32 out_width = params.out_width; { // Initializes the output tensor with MIN<T>. const int32 output_image_size = out_height * out_width * depth; EigenMatrixMap out_shard(out_mat.data() + start * output_image_size, 1, (limit - start) * output_image_size); out_shard.setConstant(Eigen::NumTraits<T>::lowest()); EigenIndexMatrixMap out_arg_max_shard( out_arg_max_mat.data() + start * output_image_size, 1, (limit - start) * output_image_size); out_arg_max_shard.setConstant(kInvalidMaxPoolingIndex); } for (int64 b = start; b < limit; ++b) { for (int h = 0; h < in_rows; ++h) { for (int w = 0; w < in_cols; ++w) { // (h_start, h_end) * (w_start, w_end) is the range that the input // vector projects to. const int hpad = h + pad_top; const int wpad = w + pad_left; const int h_start = (hpad < window_rows) ? 0 : (hpad - window_rows) / row_stride + 1; const int h_end = std::min(hpad / row_stride + 1, out_height); const int w_start = (wpad < window_cols) ? 0 : (wpad - window_cols) / col_stride + 1; const int w_end = std::min(wpad / col_stride + 1, out_width); // compute elementwise max const int64 in_index = (b * in_rows + h) * in_cols + w; for (int ph = h_start; ph < h_end; ++ph) { const int64 out_index_base = (b * out_height + ph) * out_width; for (int pw = w_start; pw < w_end; ++pw) { const int64 out_index = out_index_base + pw; /// NOTES(zhengxq): not using the eigen matrix operation for /// now. for (int d = 0; d < depth; ++d) { const T& input_ref = in_mat.coeffRef(d, in_index); T& output_ref = out_mat.coeffRef(d, out_index); Targmax& out_arg_max_ref = out_arg_max_mat.coeffRef(d, out_index); if (output_ref < input_ref || out_arg_max_ref == kInvalidMaxPoolingIndex) { output_ref = input_ref; if (include_batch_in_index) { out_arg_max_ref = in_index * depth + d; } else { out_arg_max_ref = (h * in_cols + w) * depth + d; } } } } } } } } if (input_backprop != nullptr) { auto input_backprop_flat = input_backprop->flat<T>(); auto out_arg_max_flat = output_arg_max->flat<int64>(); auto out_backprop_flat = out_backprop.flat<T>(); // Initialize output to 0. const int64 in_size = in_rows * in_cols * depth; const int64 in_start = start * in_size; const int64 in_end = limit * in_size; EigenMatrixMap in_shard(input_backprop_flat.data() + in_start, 1, in_end - in_start); in_shard.setConstant(T(0)); // Backpropagate. const int out_size = out_height * out_width * depth; const int out_start = start * out_size; const int out_end = limit * out_size; for (int index = out_start; index < out_end; ++index) { int input_backprop_index = out_arg_max_flat(index); // Although this check is in the inner loop, it is worth its value // so we don't end up with memory corruptions. Our benchmark shows that // the performance impact is quite small // CHECK(input_backprop_index >= in_start && input_backprop_index < // in_end) FastBoundsCheck(input_backprop_index - in_start, in_end - in_start); input_backprop_flat(input_backprop_index) += out_backprop_flat(index); } } }; const int64 shard_cost = params.tensor_in_rows * params.tensor_in_cols * params.depth * params.window_rows * params.window_cols; Shard(worker_threads.num_threads, worker_threads.workers, params.tensor_in_batch, shard_cost, shard); }
1009
True
1
CVE-2021-29584
False
False
False
False
AV:L/AC:L/Au:N/C:N/I:N/A:P
LOCAL
LOW
NONE
NONE
NONE
PARTIAL
2.1
CVSS:3.1/AV:L/AC:L/PR:L/UI:N/S:U/C:N/I:N/A:H
LOCAL
LOW
LOW
NONE
UNCHANGED
NONE
NONE
HIGH
5.5
MEDIUM
1.8
3.6
False
[{'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-xvjm-fvxx-q3hv', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-xvjm-fvxx-q3hv', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/4c0ee937c0f61c4fc5f5d32d9bb4c67428012a60', 'name': 'https://github.com/tensorflow/tensorflow/commit/4c0ee937c0f61c4fc5f5d32d9bb4c67428012a60', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-190'}]}]
LOW
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionStartIncluding': '2.4.0', 'versionEndExcluding': '2.4.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionEndExcluding': '2.1.4', 'cpe_name': []}]}]
[{'lang': 'en', 'value': 'TensorFlow is an end-to-end open source platform for machine learning. An attacker can trigger a denial of service via a `CHECK`-fail in caused by an integer overflow in constructing a new tensor shape. This is because the implementation(https://github.com/tensorflow/tensorflow/blob/0908c2f2397c099338b901b067f6495a5b96760b/tensorflow/core/kernels/sparse_split_op.cc#L66-L70) builds a dense shape without checking that the dimensions would not result in overflow. The `TensorShape` constructor(https://github.com/tensorflow/tensorflow/blob/6f9896890c4c703ae0a0845394086e2e1e523299/tensorflow/core/framework/tensor_shape.cc#L183-L188) uses a `CHECK` operation which triggers when `InitDims`(https://github.com/tensorflow/tensorflow/blob/6f9896890c4c703ae0a0845394086e2e1e523299/tensorflow/core/framework/tensor_shape.cc#L212-L296) returns a non-OK status. This is a legacy implementation of the constructor and operations should use `BuildTensorShapeBase` or `AddDimWithStatus` to prevent `CHECK`-failures in the presence of overflows. The fix will be included in TensorFlow 2.5.0. We will also cherrypick this commit on TensorFlow 2.4.2, TensorFlow 2.3.3, TensorFlow 2.2.3 and TensorFlow 2.1.4, as these are also affected and still in supported range.'}]
2021-05-20T15:38Z
2021-05-14T20:15Z
Integer Overflow or Wraparound
The software performs a calculation that can produce an integer overflow or wraparound, when the logic assumes that the resulting value will always be larger than the original value. This can introduce other weaknesses when the calculation is used for resource management or execution control.
An integer overflow or wraparound occurs when an integer value is incremented to a value that is too large to store in the associated representation. When this occurs, the value may wrap to become a very small or negative number. While this may be intended behavior in circumstances that rely on wrapping, it can have security consequences if the wrap is unexpected. This is especially the case if the integer overflow can be triggered using user-supplied inputs. This becomes security-critical when the result is used to control looping, make a security decision, or determine the offset or size in behaviors such as memory allocation, copying, concatenation, etc.
https://cwe.mitre.org/data/definitions/190.html
0
Mihai Maruseac
2021-05-06 15:55:00-07:00
Prevent overflow in sparse op PiperOrigin-RevId: 372442006 Change-Id: I60fe31cd7e56fb3501e97c63500caf902ddeee96
4c0ee937c0f61c4fc5f5d32d9bb4c67428012a60
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tensorflow::SparseSplitOp::Compute
tensorflow::SparseSplitOp::Compute( OpKernelContext * context)
['context']
void Compute(OpKernelContext* context) override { const int64 axis_input = context->input(0).scalar<int64>()(); const Tensor& input_indices = context->input(1); const Tensor& input_values = context->input(2); const Tensor& input_shape = context->input(3); OP_REQUIRES(context, TensorShapeUtils::IsMatrix(input_indices.shape()), errors::InvalidArgument( "Input indices should be a matrix but received shape ", input_indices.shape().DebugString())); OP_REQUIRES(context, TensorShapeUtils::IsVector(input_values.shape()), errors::InvalidArgument( "Input values should be a vector but received shape ", input_indices.shape().DebugString())); OP_REQUIRES(context, TensorShapeUtils::IsVector(input_shape.shape()), errors::InvalidArgument( "Input shape should be a vector but received shape ", input_shape.shape().DebugString())); const int64 input_rank = input_shape.vec<int64>().size(); const int64 axis = (axis_input < 0) ? input_rank + axis_input : axis_input; OP_REQUIRES( context, axis >= 0 && axis < input_rank, errors::InvalidArgument("Input axis should be in range [", -input_rank, ", ", input_rank, "), got ", axis_input)); OP_REQUIRES(context, num_split_ >= 1 && num_split_ <= input_shape.vec<int64>()(axis), errors::InvalidArgument("Input num_split should be between 1 " "and the splitting dimension size (", input_shape.vec<int64>()(axis), "), got ", num_split_)); sparse::SparseTensor sparse_tensor; OP_REQUIRES_OK(context, sparse::SparseTensor::Create( input_indices, input_values, TensorShape(input_shape.vec<int64>()), &sparse_tensor)); std::vector<sparse::SparseTensor> outputs; OP_REQUIRES_OK(context, sparse::SparseTensor::Split<T>( sparse_tensor, axis, num_split_, &outputs)); for (int slice_index = 0; slice_index < num_split_; ++slice_index) { context->set_output(slice_index, outputs[slice_index].indices()); context->set_output(slice_index + num_split_, outputs[slice_index].values()); Tensor* shape = nullptr; OP_REQUIRES_OK(context, context->allocate_output( slice_index + 2 * num_split_, {outputs[slice_index].dims()}, &shape)); auto output_shape = outputs[slice_index].shape(); for (int dim = 0; dim < outputs[slice_index].dims(); ++dim) { shape->vec<int64>()(dim) = output_shape[dim]; } } }
483
True
1
CVE-2021-29583
False
False
False
False
AV:L/AC:L/Au:N/C:P/I:P/A:P
LOCAL
LOW
NONE
PARTIAL
PARTIAL
PARTIAL
4.6
CVSS:3.1/AV:L/AC:L/PR:L/UI:N/S:U/C:H/I:H/A:H
LOCAL
LOW
LOW
NONE
UNCHANGED
HIGH
HIGH
HIGH
7.8
HIGH
1.8
5.9
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/6972f9dfe325636b3db4e0bc517ee22a159365c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/6972f9dfe325636b3db4e0bc517ee22a159365c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-9xh4-23q4-v6wr', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-9xh4-23q4-v6wr', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Patch', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-476'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionStartIncluding': '2.4.0', 'versionEndExcluding': '2.4.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionEndExcluding': '2.1.4', 'cpe_name': []}]}]
[{'lang': 'en', 'value': 'TensorFlow is an end-to-end open source platform for machine learning. The implementation of `tf.raw_ops.FusedBatchNorm` is vulnerable to a heap buffer overflow. If the tensors are empty, the same implementation can trigger undefined behavior by dereferencing null pointers. The implementation(https://github.com/tensorflow/tensorflow/blob/57d86e0db5d1365f19adcce848dfc1bf89fdd4c7/tensorflow/core/kernels/fused_batch_norm_op.cc) fails to validate that `scale`, `offset`, `mean` and `variance` (the last two only when required) all have the same number of elements as the number of channels of `x`. This results in heap out of bounds reads when the buffers backing these tensors are indexed past their boundary. If the tensors are empty, the validation mentioned in the above paragraph would also trigger and prevent the undefined behavior. The fix will be included in TensorFlow 2.5.0. We will also cherrypick this commit on TensorFlow 2.4.2, TensorFlow 2.3.3, TensorFlow 2.2.3 and TensorFlow 2.1.4, as these are also affected and still in supported range.'}]
2022-04-25T20:09Z
2021-05-14T20:15Z
Out-of-bounds Read
The software reads data past the end, or before the beginning, of the intended buffer.
Typically, this can allow attackers to read sensitive information from other memory locations or cause a crash. A crash can occur when the code reads a variable amount of data and assumes that a sentinel exists to stop the read operation, such as a NUL in a string. The expected sentinel might not be located in the out-of-bounds memory, causing excessive data to be read, leading to a segmentation fault or a buffer overflow. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent read operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/125.html
0
Mihai Maruseac
2021-05-06 17:45:51-07:00
Add missing valuidation to FusedBatchNorm. PiperOrigin-RevId: 372460336 Change-Id: Ic8c4e4de67c58a741bd87f2e182bed07247d1126
6972f9dfe325636b3db4e0bc517ee22a159365c0
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tensorflow::FusedBatchNormOpBase::ComputeWithReservedSpace
tensorflow::FusedBatchNormOpBase::ComputeWithReservedSpace( OpKernelContext * context , bool use_reserved_space)
['context', 'use_reserved_space']
virtual void ComputeWithReservedSpace(OpKernelContext* context, bool use_reserved_space) { Tensor x = context->input(0); const Tensor& scale = context->input(1); const Tensor& offset = context->input(2); const Tensor& estimated_mean = context->input(3); const Tensor& estimated_variance = context->input(4); const Tensor* side_input = has_side_input_ ? &context->input(5) : nullptr; OP_REQUIRES(context, x.dims() == 4 || x.dims() == 5, errors::InvalidArgument("input must be 4 or 5-dimensional", x.shape().DebugString())); OP_REQUIRES(context, scale.dims() == 1, errors::InvalidArgument("scale must be 1-dimensional", scale.shape().DebugString())); OP_REQUIRES(context, offset.dims() == 1, errors::InvalidArgument("offset must be 1-dimensional", offset.shape().DebugString())); OP_REQUIRES(context, estimated_mean.dims() == 1, errors::InvalidArgument("estimated_mean must be 1-dimensional", estimated_mean.shape().DebugString())); OP_REQUIRES( context, estimated_variance.dims() == 1, errors::InvalidArgument("estimated_variance must be 1-dimensional", estimated_variance.shape().DebugString())); bool use_reshape = (x.dims() == 5); auto x_shape = x.shape(); TensorShape dest_shape; if (use_reshape) { const int64 in_batch = GetTensorDim(x, tensor_format_, 'N'); int64 in_planes = GetTensorDim(x, tensor_format_, '0'); int64 in_rows = GetTensorDim(x, tensor_format_, '1'); int64 in_cols = GetTensorDim(x, tensor_format_, '2'); const int64 in_depth = GetTensorDim(x, tensor_format_, 'C'); dest_shape = ShapeFromFormat(tensor_format_, in_batch, {{in_planes, in_rows * in_cols}}, in_depth); OP_REQUIRES(context, x.CopyFrom(x, dest_shape), errors::InvalidArgument("Error during tensor copy.")); } if (has_side_input_) { OP_REQUIRES(context, side_input->shape() == x.shape(), errors::InvalidArgument( "side_input shape must be equal to input shape: ", side_input->shape().DebugString(), " != ", x.shape().DebugString())); } if (activation_mode_ != FbnActivationMode::kIdentity) { // NOTE(ezhulenev): This requirement is coming from implementation // details of cudnnBatchNormalizationForwardTrainingEx. OP_REQUIRES( context, !is_training_ || x.dim_size(3) % 4 == 0, errors::InvalidArgument("FusedBatchNorm with activation requires " "channel dimension to be a multiple of 4.")); } Tensor* y = nullptr; auto alloc_shape = use_reshape ? dest_shape : x_shape; OP_REQUIRES_OK(context, context->forward_input_or_allocate_output( {0}, 0, alloc_shape, &y)); Tensor* batch_mean = nullptr; OP_REQUIRES_OK(context, context->forward_input_or_allocate_output( {3}, 1, scale.shape(), &batch_mean)); Tensor* batch_var = nullptr; OP_REQUIRES_OK(context, context->forward_input_or_allocate_output( {4}, 2, scale.shape(), &batch_var)); Tensor* saved_mean = nullptr; OP_REQUIRES_OK(context, context->allocate_output(3, scale.shape(), &saved_mean)); Tensor* saved_maybe_inv_var = nullptr; OP_REQUIRES_OK(context, context->allocate_output(4, scale.shape(), &saved_maybe_inv_var)); if (is_training_) { functor::FusedBatchNorm<Device, T, U, true>()( context, x, scale, offset, estimated_mean, estimated_variance, side_input, epsilon_, exponential_avg_factor_, activation_mode_, y, batch_mean, batch_var, saved_mean, saved_maybe_inv_var, tensor_format_, use_reserved_space); } else { functor::FusedBatchNorm<Device, T, U, false>()( context, x, scale, offset, estimated_mean, estimated_variance, side_input, epsilon_, exponential_avg_factor_, activation_mode_, y, batch_mean, batch_var, saved_mean, saved_maybe_inv_var, tensor_format_, use_reserved_space); } if (use_reshape) { OP_REQUIRES(context, y->CopyFrom(*y, x_shape), errors::InvalidArgument("Error during tensor copy.")); } }
757
True
1
CVE-2021-29583
False
False
False
False
AV:L/AC:L/Au:N/C:P/I:P/A:P
LOCAL
LOW
NONE
PARTIAL
PARTIAL
PARTIAL
4.6
CVSS:3.1/AV:L/AC:L/PR:L/UI:N/S:U/C:H/I:H/A:H
LOCAL
LOW
LOW
NONE
UNCHANGED
HIGH
HIGH
HIGH
7.8
HIGH
1.8
5.9
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/6972f9dfe325636b3db4e0bc517ee22a159365c0', 'name': 'https://github.com/tensorflow/tensorflow/commit/6972f9dfe325636b3db4e0bc517ee22a159365c0', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-9xh4-23q4-v6wr', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-9xh4-23q4-v6wr', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Patch', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}, {'lang': 'en', 'value': 'CWE-476'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionStartIncluding': '2.4.0', 'versionEndExcluding': '2.4.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionEndExcluding': '2.1.4', 'cpe_name': []}]}]
[{'lang': 'en', 'value': 'TensorFlow is an end-to-end open source platform for machine learning. The implementation of `tf.raw_ops.FusedBatchNorm` is vulnerable to a heap buffer overflow. If the tensors are empty, the same implementation can trigger undefined behavior by dereferencing null pointers. The implementation(https://github.com/tensorflow/tensorflow/blob/57d86e0db5d1365f19adcce848dfc1bf89fdd4c7/tensorflow/core/kernels/fused_batch_norm_op.cc) fails to validate that `scale`, `offset`, `mean` and `variance` (the last two only when required) all have the same number of elements as the number of channels of `x`. This results in heap out of bounds reads when the buffers backing these tensors are indexed past their boundary. If the tensors are empty, the validation mentioned in the above paragraph would also trigger and prevent the undefined behavior. The fix will be included in TensorFlow 2.5.0. We will also cherrypick this commit on TensorFlow 2.4.2, TensorFlow 2.3.3, TensorFlow 2.2.3 and TensorFlow 2.1.4, as these are also affected and still in supported range.'}]
2022-04-25T20:09Z
2021-05-14T20:15Z
NULL Pointer Dereference
A NULL pointer dereference occurs when the application dereferences a pointer that it expects to be valid, but is NULL, typically causing a crash or exit.
NULL pointer dereference issues can occur through a number of flaws, including race conditions, and simple programming omissions.
https://cwe.mitre.org/data/definitions/476.html
0
Mihai Maruseac
2021-05-06 17:45:51-07:00
Add missing valuidation to FusedBatchNorm. PiperOrigin-RevId: 372460336 Change-Id: Ic8c4e4de67c58a741bd87f2e182bed07247d1126
6972f9dfe325636b3db4e0bc517ee22a159365c0
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tensorflow::FusedBatchNormOpBase::ComputeWithReservedSpace
tensorflow::FusedBatchNormOpBase::ComputeWithReservedSpace( OpKernelContext * context , bool use_reserved_space)
['context', 'use_reserved_space']
virtual void ComputeWithReservedSpace(OpKernelContext* context, bool use_reserved_space) { Tensor x = context->input(0); const Tensor& scale = context->input(1); const Tensor& offset = context->input(2); const Tensor& estimated_mean = context->input(3); const Tensor& estimated_variance = context->input(4); const Tensor* side_input = has_side_input_ ? &context->input(5) : nullptr; OP_REQUIRES(context, x.dims() == 4 || x.dims() == 5, errors::InvalidArgument("input must be 4 or 5-dimensional", x.shape().DebugString())); OP_REQUIRES(context, scale.dims() == 1, errors::InvalidArgument("scale must be 1-dimensional", scale.shape().DebugString())); OP_REQUIRES(context, offset.dims() == 1, errors::InvalidArgument("offset must be 1-dimensional", offset.shape().DebugString())); OP_REQUIRES(context, estimated_mean.dims() == 1, errors::InvalidArgument("estimated_mean must be 1-dimensional", estimated_mean.shape().DebugString())); OP_REQUIRES( context, estimated_variance.dims() == 1, errors::InvalidArgument("estimated_variance must be 1-dimensional", estimated_variance.shape().DebugString())); bool use_reshape = (x.dims() == 5); auto x_shape = x.shape(); TensorShape dest_shape; if (use_reshape) { const int64 in_batch = GetTensorDim(x, tensor_format_, 'N'); int64 in_planes = GetTensorDim(x, tensor_format_, '0'); int64 in_rows = GetTensorDim(x, tensor_format_, '1'); int64 in_cols = GetTensorDim(x, tensor_format_, '2'); const int64 in_depth = GetTensorDim(x, tensor_format_, 'C'); dest_shape = ShapeFromFormat(tensor_format_, in_batch, {{in_planes, in_rows * in_cols}}, in_depth); OP_REQUIRES(context, x.CopyFrom(x, dest_shape), errors::InvalidArgument("Error during tensor copy.")); } if (has_side_input_) { OP_REQUIRES(context, side_input->shape() == x.shape(), errors::InvalidArgument( "side_input shape must be equal to input shape: ", side_input->shape().DebugString(), " != ", x.shape().DebugString())); } if (activation_mode_ != FbnActivationMode::kIdentity) { // NOTE(ezhulenev): This requirement is coming from implementation // details of cudnnBatchNormalizationForwardTrainingEx. OP_REQUIRES( context, !is_training_ || x.dim_size(3) % 4 == 0, errors::InvalidArgument("FusedBatchNorm with activation requires " "channel dimension to be a multiple of 4.")); } Tensor* y = nullptr; auto alloc_shape = use_reshape ? dest_shape : x_shape; OP_REQUIRES_OK(context, context->forward_input_or_allocate_output( {0}, 0, alloc_shape, &y)); Tensor* batch_mean = nullptr; OP_REQUIRES_OK(context, context->forward_input_or_allocate_output( {3}, 1, scale.shape(), &batch_mean)); Tensor* batch_var = nullptr; OP_REQUIRES_OK(context, context->forward_input_or_allocate_output( {4}, 2, scale.shape(), &batch_var)); Tensor* saved_mean = nullptr; OP_REQUIRES_OK(context, context->allocate_output(3, scale.shape(), &saved_mean)); Tensor* saved_maybe_inv_var = nullptr; OP_REQUIRES_OK(context, context->allocate_output(4, scale.shape(), &saved_maybe_inv_var)); if (is_training_) { functor::FusedBatchNorm<Device, T, U, true>()( context, x, scale, offset, estimated_mean, estimated_variance, side_input, epsilon_, exponential_avg_factor_, activation_mode_, y, batch_mean, batch_var, saved_mean, saved_maybe_inv_var, tensor_format_, use_reserved_space); } else { functor::FusedBatchNorm<Device, T, U, false>()( context, x, scale, offset, estimated_mean, estimated_variance, side_input, epsilon_, exponential_avg_factor_, activation_mode_, y, batch_mean, batch_var, saved_mean, saved_maybe_inv_var, tensor_format_, use_reserved_space); } if (use_reshape) { OP_REQUIRES(context, y->CopyFrom(*y, x_shape), errors::InvalidArgument("Error during tensor copy.")); } }
757
True
1
CVE-2021-29608
False
False
False
False
AV:L/AC:L/Au:N/C:P/I:P/A:P
LOCAL
LOW
NONE
PARTIAL
PARTIAL
PARTIAL
4.6
CVSS:3.1/AV:L/AC:L/PR:L/UI:N/S:U/C:H/I:H/A:H
LOCAL
LOW
LOW
NONE
UNCHANGED
HIGH
HIGH
HIGH
7.8
HIGH
1.8
5.9
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/c4d7afb6a5986b04505aca4466ae1951686c80f6', 'name': 'https://github.com/tensorflow/tensorflow/commit/c4d7afb6a5986b04505aca4466ae1951686c80f6', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/f94ef358bb3e91d517446454edff6535bcfe8e4a', 'name': 'https://github.com/tensorflow/tensorflow/commit/f94ef358bb3e91d517446454edff6535bcfe8e4a', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-rgvq-pcvf-hx75', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-rgvq-pcvf-hx75', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/b761c9b652af2107cfbc33efd19be0ce41daa33e', 'name': 'https://github.com/tensorflow/tensorflow/commit/b761c9b652af2107cfbc33efd19be0ce41daa33e', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-131'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionStartIncluding': '2.4.0', 'versionEndExcluding': '2.4.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionEndExcluding': '2.1.4', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "TensorFlow is an end-to-end open source platform for machine learning. Due to lack of validation in `tf.raw_ops.RaggedTensorToTensor`, an attacker can exploit an undefined behavior if input arguments are empty. The implementation(https://github.com/tensorflow/tensorflow/blob/656e7673b14acd7835dc778867f84916c6d1cac2/tensorflow/core/kernels/ragged_tensor_to_tensor_op.cc#L356-L360) only checks that one of the tensors is not empty, but does not check for the other ones. There are multiple `DCHECK` validations to prevent heap OOB, but these are no-op in release builds, hence they don't prevent anything. The fix will be included in TensorFlow 2.5.0. We will also cherrypick these commits on TensorFlow 2.4.2, TensorFlow 2.3.3, TensorFlow 2.2.3 and TensorFlow 2.1.4, as these are also affected and still in supported range."}]
2021-07-26T16:23Z
2021-05-14T20:15Z
Incorrect Calculation of Buffer Size
The software does not correctly calculate the size to be used when allocating a buffer, which could lead to a buffer overflow.
https://cwe.mitre.org/data/definitions/131.html
0
Mihai Maruseac
2021-05-11 15:22:49-07:00
Fix heap OOB / undefined behavior in `RaggedTensorToTensor` PiperOrigin-RevId: 373244623 Change-Id: I2d6cbbc8c67b238a8815bf58097f7586d87c54f2
c4d7afb6a5986b04505aca4466ae1951686c80f6
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tensorflow::RaggedTensorToTensorBaseOp::CalculateOutputIndex
tensorflow::RaggedTensorToTensorBaseOp::CalculateOutputIndex( OpKernelContext * context , int dimension , const vector<INDEX_TYPE> & parent_output_index , INDEX_TYPE output_index_multiplier , INDEX_TYPE output_size , vector<INDEX_TYPE> * result)
['context', 'dimension', 'parent_output_index', 'output_index_multiplier', 'output_size', 'result']
Status CalculateOutputIndex(OpKernelContext* context, int dimension, const vector<INDEX_TYPE>& parent_output_index, INDEX_TYPE output_index_multiplier, INDEX_TYPE output_size, vector<INDEX_TYPE>* result) { const RowPartitionTensor row_partition_tensor = GetRowPartitionTensor(context, dimension); auto partition_type = GetRowPartitionTypeByDimension(dimension); switch (partition_type) { case RowPartitionType::VALUE_ROWIDS: CalculateOutputIndexValueRowID( context, row_partition_tensor, parent_output_index, output_index_multiplier, output_size, result); return tensorflow::Status::OK(); case RowPartitionType::ROW_SPLITS: if (row_partition_tensor.size() - 1 > parent_output_index.size()) { return errors::InvalidArgument( "Row partition size is greater than output size: ", row_partition_tensor.size() - 1, " > ", parent_output_index.size()); } CalculateOutputIndexRowSplit( context, row_partition_tensor, parent_output_index, output_index_multiplier, output_size, result); return tensorflow::Status::OK(); default: return errors::InvalidArgument( "Unsupported partition type:", RowPartitionTypeToString(partition_type)); } }
172
True
1
CVE-2021-29608
False
False
False
False
AV:L/AC:L/Au:N/C:P/I:P/A:P
LOCAL
LOW
NONE
PARTIAL
PARTIAL
PARTIAL
4.6
CVSS:3.1/AV:L/AC:L/PR:L/UI:N/S:U/C:H/I:H/A:H
LOCAL
LOW
LOW
NONE
UNCHANGED
HIGH
HIGH
HIGH
7.8
HIGH
1.8
5.9
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/c4d7afb6a5986b04505aca4466ae1951686c80f6', 'name': 'https://github.com/tensorflow/tensorflow/commit/c4d7afb6a5986b04505aca4466ae1951686c80f6', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/f94ef358bb3e91d517446454edff6535bcfe8e4a', 'name': 'https://github.com/tensorflow/tensorflow/commit/f94ef358bb3e91d517446454edff6535bcfe8e4a', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-rgvq-pcvf-hx75', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-rgvq-pcvf-hx75', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/b761c9b652af2107cfbc33efd19be0ce41daa33e', 'name': 'https://github.com/tensorflow/tensorflow/commit/b761c9b652af2107cfbc33efd19be0ce41daa33e', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-131'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionStartIncluding': '2.4.0', 'versionEndExcluding': '2.4.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionEndExcluding': '2.1.4', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "TensorFlow is an end-to-end open source platform for machine learning. Due to lack of validation in `tf.raw_ops.RaggedTensorToTensor`, an attacker can exploit an undefined behavior if input arguments are empty. The implementation(https://github.com/tensorflow/tensorflow/blob/656e7673b14acd7835dc778867f84916c6d1cac2/tensorflow/core/kernels/ragged_tensor_to_tensor_op.cc#L356-L360) only checks that one of the tensors is not empty, but does not check for the other ones. There are multiple `DCHECK` validations to prevent heap OOB, but these are no-op in release builds, hence they don't prevent anything. The fix will be included in TensorFlow 2.5.0. We will also cherrypick these commits on TensorFlow 2.4.2, TensorFlow 2.3.3, TensorFlow 2.2.3 and TensorFlow 2.1.4, as these are also affected and still in supported range."}]
2021-07-26T16:23Z
2021-05-14T20:15Z
Incorrect Calculation of Buffer Size
The software does not correctly calculate the size to be used when allocating a buffer, which could lead to a buffer overflow.
https://cwe.mitre.org/data/definitions/131.html
0
Mihai Maruseac
2021-05-11 15:22:49-07:00
Fix heap OOB / undefined behavior in `RaggedTensorToTensor` PiperOrigin-RevId: 373244623 Change-Id: I2d6cbbc8c67b238a8815bf58097f7586d87c54f2
c4d7afb6a5986b04505aca4466ae1951686c80f6
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tensorflow::RaggedTensorToTensorBaseOp::CalculateOutputIndexRowSplit
tensorflow::RaggedTensorToTensorBaseOp::CalculateOutputIndexRowSplit( OpKernelContext * context , const RowPartitionTensor & row_split , const vector<INDEX_TYPE> & parent_output_index , INDEX_TYPE output_index_multiplier , INDEX_TYPE output_size , vector<INDEX_TYPE> * result)
['context', 'row_split', 'parent_output_index', 'output_index_multiplier', 'output_size', 'result']
void CalculateOutputIndexRowSplit( OpKernelContext* context, const RowPartitionTensor& row_split, const vector<INDEX_TYPE>& parent_output_index, INDEX_TYPE output_index_multiplier, INDEX_TYPE output_size, vector<INDEX_TYPE>* result) { INDEX_TYPE row_split_size = row_split.size(); if (row_split_size > 0) { result->reserve(row_split(row_split_size - 1)); } for (INDEX_TYPE i = 0; i < row_split_size - 1; ++i) { INDEX_TYPE row_length = row_split(i + 1) - row_split(i); INDEX_TYPE real_length = std::min(output_size, row_length); INDEX_TYPE parent_output_index_current = parent_output_index[i]; if (parent_output_index_current == -1) { real_length = 0; } for (INDEX_TYPE j = 0; j < real_length; ++j) { result->push_back(parent_output_index_current); parent_output_index_current += output_index_multiplier; } for (INDEX_TYPE j = 0; j < row_length - real_length; ++j) { result->push_back(-1); } } if (row_split_size > 0) { OP_REQUIRES(context, result->size() == row_split(row_split_size - 1), errors::InvalidArgument("Invalid row split size.")); } }
215
True
1
CVE-2021-29608
False
False
False
False
AV:L/AC:L/Au:N/C:P/I:P/A:P
LOCAL
LOW
NONE
PARTIAL
PARTIAL
PARTIAL
4.6
CVSS:3.1/AV:L/AC:L/PR:L/UI:N/S:U/C:H/I:H/A:H
LOCAL
LOW
LOW
NONE
UNCHANGED
HIGH
HIGH
HIGH
7.8
HIGH
1.8
5.9
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/c4d7afb6a5986b04505aca4466ae1951686c80f6', 'name': 'https://github.com/tensorflow/tensorflow/commit/c4d7afb6a5986b04505aca4466ae1951686c80f6', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/f94ef358bb3e91d517446454edff6535bcfe8e4a', 'name': 'https://github.com/tensorflow/tensorflow/commit/f94ef358bb3e91d517446454edff6535bcfe8e4a', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-rgvq-pcvf-hx75', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-rgvq-pcvf-hx75', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/b761c9b652af2107cfbc33efd19be0ce41daa33e', 'name': 'https://github.com/tensorflow/tensorflow/commit/b761c9b652af2107cfbc33efd19be0ce41daa33e', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-131'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionStartIncluding': '2.4.0', 'versionEndExcluding': '2.4.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionEndExcluding': '2.1.4', 'cpe_name': []}]}]
[{'lang': 'en', 'value': "TensorFlow is an end-to-end open source platform for machine learning. Due to lack of validation in `tf.raw_ops.RaggedTensorToTensor`, an attacker can exploit an undefined behavior if input arguments are empty. The implementation(https://github.com/tensorflow/tensorflow/blob/656e7673b14acd7835dc778867f84916c6d1cac2/tensorflow/core/kernels/ragged_tensor_to_tensor_op.cc#L356-L360) only checks that one of the tensors is not empty, but does not check for the other ones. There are multiple `DCHECK` validations to prevent heap OOB, but these are no-op in release builds, hence they don't prevent anything. The fix will be included in TensorFlow 2.5.0. We will also cherrypick these commits on TensorFlow 2.4.2, TensorFlow 2.3.3, TensorFlow 2.2.3 and TensorFlow 2.1.4, as these are also affected and still in supported range."}]
2021-07-26T16:23Z
2021-05-14T20:15Z
Incorrect Calculation of Buffer Size
The software does not correctly calculate the size to be used when allocating a buffer, which could lead to a buffer overflow.
https://cwe.mitre.org/data/definitions/131.html
0
Mihai Maruseac
2021-05-11 15:22:49-07:00
Fix heap OOB / undefined behavior in `RaggedTensorToTensor` PiperOrigin-RevId: 373244623 Change-Id: I2d6cbbc8c67b238a8815bf58097f7586d87c54f2
c4d7afb6a5986b04505aca4466ae1951686c80f6
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tensorflow::RaggedTensorToTensorBaseOp::CalculateOutputIndexValueRowID
tensorflow::RaggedTensorToTensorBaseOp::CalculateOutputIndexValueRowID( OpKernelContext * context , const RowPartitionTensor & value_rowids , const vector<INDEX_TYPE> & parent_output_index , INDEX_TYPE output_index_multiplier , INDEX_TYPE output_size , vector<INDEX_TYPE> * result)
['context', 'value_rowids', 'parent_output_index', 'output_index_multiplier', 'output_size', 'result']
void CalculateOutputIndexValueRowID( OpKernelContext* context, const RowPartitionTensor& value_rowids, const vector<INDEX_TYPE>& parent_output_index, INDEX_TYPE output_index_multiplier, INDEX_TYPE output_size, vector<INDEX_TYPE>* result) { const INDEX_TYPE index_size = value_rowids.size(); result->reserve(index_size); if (index_size == 0) { return; } INDEX_TYPE current_output_column = 0; INDEX_TYPE current_value_rowid = value_rowids(0); DCHECK_LT(current_value_rowid, parent_output_index.size()); INDEX_TYPE current_output_index = parent_output_index[current_value_rowid]; result->push_back(current_output_index); for (INDEX_TYPE i = 1; i < index_size; ++i) { INDEX_TYPE next_value_rowid = value_rowids(i); if (next_value_rowid == current_value_rowid) { if (current_output_index >= 0) { ++current_output_column; if (current_output_column < output_size) { current_output_index += output_index_multiplier; } else { current_output_index = -1; } } } else { current_output_column = 0; current_value_rowid = next_value_rowid; DCHECK_LT(next_value_rowid, parent_output_index.size()); current_output_index = parent_output_index[next_value_rowid]; } result->push_back(current_output_index); } OP_REQUIRES(context, result->size() == value_rowids.size(), errors::InvalidArgument("Invalid row ids.")); }
223
True
1
CVE-2021-29609
False
False
False
False
AV:L/AC:L/Au:N/C:P/I:P/A:P
LOCAL
LOW
NONE
PARTIAL
PARTIAL
PARTIAL
4.6
CVSS:3.1/AV:L/AC:L/PR:L/UI:N/S:U/C:H/I:H/A:H
LOCAL
LOW
LOW
NONE
UNCHANGED
HIGH
HIGH
HIGH
7.8
HIGH
1.8
5.9
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/41727ff06111117bdf86b37db198217fd7a143cc', 'name': 'https://github.com/tensorflow/tensorflow/commit/41727ff06111117bdf86b37db198217fd7a143cc', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cjc7-49v2-jp64', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cjc7-49v2-jp64', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/6fd02f44810754ae7481838b6a67c5df7f909ca3', 'name': 'https://github.com/tensorflow/tensorflow/commit/6fd02f44810754ae7481838b6a67c5df7f909ca3', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-787'}, {'lang': 'en', 'value': 'CWE-476'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionStartIncluding': '2.4.0', 'versionEndExcluding': '2.4.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionEndExcluding': '2.1.4', 'cpe_name': []}]}]
[{'lang': 'en', 'value': 'TensorFlow is an end-to-end open source platform for machine learning. Incomplete validation in `SparseAdd` results in allowing attackers to exploit undefined behavior (dereferencing null pointers) as well as write outside of bounds of heap allocated data. The implementation(https://github.com/tensorflow/tensorflow/blob/656e7673b14acd7835dc778867f84916c6d1cac2/tensorflow/core/kernels/sparse_add_op.cc) has a large set of validation for the two sparse tensor inputs (6 tensors in total), but does not validate that the tensors are not empty or that the second dimension of `*_indices` matches the size of corresponding `*_shape`. This allows attackers to send tensor triples that represent invalid sparse tensors to abuse code assumptions that are not protected by validation. The fix will be included in TensorFlow 2.5.0. We will also cherrypick this commit on TensorFlow 2.4.2, TensorFlow 2.3.3, TensorFlow 2.2.3 and TensorFlow 2.1.4, as these are also affected and still in supported range.'}]
2021-05-20T16:45Z
2021-05-14T20:15Z
NULL Pointer Dereference
A NULL pointer dereference occurs when the application dereferences a pointer that it expects to be valid, but is NULL, typically causing a crash or exit.
NULL pointer dereference issues can occur through a number of flaws, including race conditions, and simple programming omissions.
https://cwe.mitre.org/data/definitions/476.html
0
Mihai Maruseac
2021-05-11 15:41:51-07:00
Validate that a and b are proper sparse tensors PiperOrigin-RevId: 373248068 Change-Id: I0a2041a0747901b3f00387a6a3bce9bca6b0b3b1
41727ff06111117bdf86b37db198217fd7a143cc
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tensorflow::SparseAddOp::Compute
tensorflow::SparseAddOp::Compute( OpKernelContext * ctx)
['ctx']
void Compute(OpKernelContext *ctx) override { // (0) validations const Tensor *a_indices, *b_indices, *a_values_t, *b_values_t, *a_shape, *b_shape, *thresh_t; OP_REQUIRES_OK(ctx, ctx->input("a_indices", &a_indices)); OP_REQUIRES_OK(ctx, ctx->input("b_indices", &b_indices)); OP_REQUIRES(ctx, TensorShapeUtils::IsMatrix(a_indices->shape()) && TensorShapeUtils::IsMatrix(b_indices->shape()), errors::InvalidArgument( "Input indices should be matrices but received shapes: ", a_indices->shape().DebugString(), " and ", b_indices->shape().DebugString())); const int64 a_nnz = a_indices->dim_size(0); const int64 b_nnz = b_indices->dim_size(0); OP_REQUIRES_OK(ctx, ctx->input("a_values", &a_values_t)); OP_REQUIRES_OK(ctx, ctx->input("b_values", &b_values_t)); OP_REQUIRES(ctx, TensorShapeUtils::IsVector(a_values_t->shape()) && TensorShapeUtils::IsVector(b_values_t->shape()), errors::InvalidArgument( "Input values should be vectors but received shapes: ", a_values_t->shape().DebugString(), " and ", b_values_t->shape().DebugString())); auto a_values = ctx->input(1).vec<T>(); auto b_values = ctx->input(4).vec<T>(); OP_REQUIRES( ctx, a_values.size() == a_nnz && b_values.size() == b_nnz, errors::InvalidArgument("Expected ", a_nnz, " and ", b_nnz, " non-empty input values, got ", a_values.size(), " and ", b_values.size())); OP_REQUIRES_OK(ctx, ctx->input("a_shape", &a_shape)); OP_REQUIRES_OK(ctx, ctx->input("b_shape", &b_shape)); OP_REQUIRES(ctx, TensorShapeUtils::IsVector(a_shape->shape()) && TensorShapeUtils::IsVector(b_shape->shape()), errors::InvalidArgument( "Input shapes should be a vector but received shapes ", a_shape->shape().DebugString(), " and ", b_shape->shape().DebugString())); OP_REQUIRES( ctx, a_shape->IsSameSize(*b_shape), errors::InvalidArgument( "Operands do not have the same ranks; got shapes: ", a_shape->SummarizeValue(10), " and ", b_shape->SummarizeValue(10))); const auto a_shape_flat = a_shape->flat<int64>(); const auto b_shape_flat = b_shape->flat<int64>(); for (int i = 0; i < a_shape->NumElements(); ++i) { OP_REQUIRES(ctx, a_shape_flat(i) == b_shape_flat(i), errors::InvalidArgument( "Operands' shapes do not match: got ", a_shape_flat(i), " and ", b_shape_flat(i), " for dimension ", i)); } OP_REQUIRES_OK(ctx, ctx->input("thresh", &thresh_t)); OP_REQUIRES(ctx, TensorShapeUtils::IsScalar(thresh_t->shape()), errors::InvalidArgument( "The magnitude threshold must be a scalar: got shape ", thresh_t->shape().DebugString())); // std::abs() so that it works for complex{64,128} values as well const Treal thresh = thresh_t->scalar<Treal>()(); // (1) do a pass over inputs, and append values and indices to vectors auto a_indices_mat = a_indices->matrix<int64>(); auto b_indices_mat = b_indices->matrix<int64>(); std::vector<std::pair<bool, int64>> entries_to_copy; // from_a?, idx entries_to_copy.reserve(a_nnz + b_nnz); std::vector<T> out_values; const int num_dims = a_shape->dim_size(0); OP_REQUIRES(ctx, num_dims > 0, errors::InvalidArgument("Invalid input_a shape. Received: ", a_shape->DebugString())); // The input and output sparse tensors are assumed to be ordered along // increasing dimension number. int64 i = 0, j = 0; T s; while (i < a_nnz && j < b_nnz) { switch (sparse::DimComparator::cmp(a_indices_mat, b_indices_mat, i, j, num_dims)) { case -1: entries_to_copy.emplace_back(true, i); out_values.push_back(a_values(i)); ++i; break; case 0: s = a_values(i) + b_values(j); if (thresh <= std::abs(s)) { entries_to_copy.emplace_back(true, i); out_values.push_back(s); } ++i; ++j; break; case 1: entries_to_copy.emplace_back(false, j); out_values.push_back(b_values(j)); ++j; break; } } #define HANDLE_LEFTOVERS(A_OR_B, IDX, IS_A) \ while (IDX < A_OR_B##_nnz) { \ entries_to_copy.emplace_back(IS_A, IDX); \ out_values.push_back(A_OR_B##_values(IDX)); \ ++IDX; \ } // at most one of these calls appends new values HANDLE_LEFTOVERS(a, i, true); HANDLE_LEFTOVERS(b, j, false); #undef HANDLE_LEFTOVERS // (2) allocate and fill output tensors const int64 sum_nnz = out_values.size(); Tensor *out_indices_t, *out_values_t; OP_REQUIRES_OK(ctx, ctx->allocate_output(0, TensorShape({sum_nnz, num_dims}), &out_indices_t)); OP_REQUIRES_OK( ctx, ctx->allocate_output(1, TensorShape({sum_nnz}), &out_values_t)); auto out_indices_mat = out_indices_t->matrix<int64>(); auto out_values_flat = out_values_t->vec<T>(); for (i = 0; i < sum_nnz; ++i) { const bool from_a = entries_to_copy[i].first; const int64 idx = entries_to_copy[i].second; out_indices_mat.chip<0>(i) = from_a ? a_indices_mat.chip<0>(idx) : b_indices_mat.chip<0>(idx); } if (sum_nnz > 0) { std::copy_n(out_values.begin(), sum_nnz, &out_values_flat(0)); } ctx->set_output(2, *a_shape); }
1031
True
1
CVE-2021-29609
False
False
False
False
AV:L/AC:L/Au:N/C:P/I:P/A:P
LOCAL
LOW
NONE
PARTIAL
PARTIAL
PARTIAL
4.6
CVSS:3.1/AV:L/AC:L/PR:L/UI:N/S:U/C:H/I:H/A:H
LOCAL
LOW
LOW
NONE
UNCHANGED
HIGH
HIGH
HIGH
7.8
HIGH
1.8
5.9
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/41727ff06111117bdf86b37db198217fd7a143cc', 'name': 'https://github.com/tensorflow/tensorflow/commit/41727ff06111117bdf86b37db198217fd7a143cc', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cjc7-49v2-jp64', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-cjc7-49v2-jp64', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/6fd02f44810754ae7481838b6a67c5df7f909ca3', 'name': 'https://github.com/tensorflow/tensorflow/commit/6fd02f44810754ae7481838b6a67c5df7f909ca3', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-787'}, {'lang': 'en', 'value': 'CWE-476'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionStartIncluding': '2.4.0', 'versionEndExcluding': '2.4.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionEndExcluding': '2.1.4', 'cpe_name': []}]}]
[{'lang': 'en', 'value': 'TensorFlow is an end-to-end open source platform for machine learning. Incomplete validation in `SparseAdd` results in allowing attackers to exploit undefined behavior (dereferencing null pointers) as well as write outside of bounds of heap allocated data. The implementation(https://github.com/tensorflow/tensorflow/blob/656e7673b14acd7835dc778867f84916c6d1cac2/tensorflow/core/kernels/sparse_add_op.cc) has a large set of validation for the two sparse tensor inputs (6 tensors in total), but does not validate that the tensors are not empty or that the second dimension of `*_indices` matches the size of corresponding `*_shape`. This allows attackers to send tensor triples that represent invalid sparse tensors to abuse code assumptions that are not protected by validation. The fix will be included in TensorFlow 2.5.0. We will also cherrypick this commit on TensorFlow 2.4.2, TensorFlow 2.3.3, TensorFlow 2.2.3 and TensorFlow 2.1.4, as these are also affected and still in supported range.'}]
2021-05-20T16:45Z
2021-05-14T20:15Z
Out-of-bounds Write
The software writes data past the end, or before the beginning, of the intended buffer.
Typically, this can result in corruption of data, a crash, or code execution. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent write operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/787.html
0
Mihai Maruseac
2021-05-11 15:41:51-07:00
Validate that a and b are proper sparse tensors PiperOrigin-RevId: 373248068 Change-Id: I0a2041a0747901b3f00387a6a3bce9bca6b0b3b1
41727ff06111117bdf86b37db198217fd7a143cc
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tensorflow::SparseAddOp::Compute
tensorflow::SparseAddOp::Compute( OpKernelContext * ctx)
['ctx']
void Compute(OpKernelContext *ctx) override { // (0) validations const Tensor *a_indices, *b_indices, *a_values_t, *b_values_t, *a_shape, *b_shape, *thresh_t; OP_REQUIRES_OK(ctx, ctx->input("a_indices", &a_indices)); OP_REQUIRES_OK(ctx, ctx->input("b_indices", &b_indices)); OP_REQUIRES(ctx, TensorShapeUtils::IsMatrix(a_indices->shape()) && TensorShapeUtils::IsMatrix(b_indices->shape()), errors::InvalidArgument( "Input indices should be matrices but received shapes: ", a_indices->shape().DebugString(), " and ", b_indices->shape().DebugString())); const int64 a_nnz = a_indices->dim_size(0); const int64 b_nnz = b_indices->dim_size(0); OP_REQUIRES_OK(ctx, ctx->input("a_values", &a_values_t)); OP_REQUIRES_OK(ctx, ctx->input("b_values", &b_values_t)); OP_REQUIRES(ctx, TensorShapeUtils::IsVector(a_values_t->shape()) && TensorShapeUtils::IsVector(b_values_t->shape()), errors::InvalidArgument( "Input values should be vectors but received shapes: ", a_values_t->shape().DebugString(), " and ", b_values_t->shape().DebugString())); auto a_values = ctx->input(1).vec<T>(); auto b_values = ctx->input(4).vec<T>(); OP_REQUIRES( ctx, a_values.size() == a_nnz && b_values.size() == b_nnz, errors::InvalidArgument("Expected ", a_nnz, " and ", b_nnz, " non-empty input values, got ", a_values.size(), " and ", b_values.size())); OP_REQUIRES_OK(ctx, ctx->input("a_shape", &a_shape)); OP_REQUIRES_OK(ctx, ctx->input("b_shape", &b_shape)); OP_REQUIRES(ctx, TensorShapeUtils::IsVector(a_shape->shape()) && TensorShapeUtils::IsVector(b_shape->shape()), errors::InvalidArgument( "Input shapes should be a vector but received shapes ", a_shape->shape().DebugString(), " and ", b_shape->shape().DebugString())); OP_REQUIRES( ctx, a_shape->IsSameSize(*b_shape), errors::InvalidArgument( "Operands do not have the same ranks; got shapes: ", a_shape->SummarizeValue(10), " and ", b_shape->SummarizeValue(10))); const auto a_shape_flat = a_shape->flat<int64>(); const auto b_shape_flat = b_shape->flat<int64>(); for (int i = 0; i < a_shape->NumElements(); ++i) { OP_REQUIRES(ctx, a_shape_flat(i) == b_shape_flat(i), errors::InvalidArgument( "Operands' shapes do not match: got ", a_shape_flat(i), " and ", b_shape_flat(i), " for dimension ", i)); } OP_REQUIRES_OK(ctx, ctx->input("thresh", &thresh_t)); OP_REQUIRES(ctx, TensorShapeUtils::IsScalar(thresh_t->shape()), errors::InvalidArgument( "The magnitude threshold must be a scalar: got shape ", thresh_t->shape().DebugString())); // std::abs() so that it works for complex{64,128} values as well const Treal thresh = thresh_t->scalar<Treal>()(); // (1) do a pass over inputs, and append values and indices to vectors auto a_indices_mat = a_indices->matrix<int64>(); auto b_indices_mat = b_indices->matrix<int64>(); std::vector<std::pair<bool, int64>> entries_to_copy; // from_a?, idx entries_to_copy.reserve(a_nnz + b_nnz); std::vector<T> out_values; const int num_dims = a_shape->dim_size(0); OP_REQUIRES(ctx, num_dims > 0, errors::InvalidArgument("Invalid input_a shape. Received: ", a_shape->DebugString())); // The input and output sparse tensors are assumed to be ordered along // increasing dimension number. int64 i = 0, j = 0; T s; while (i < a_nnz && j < b_nnz) { switch (sparse::DimComparator::cmp(a_indices_mat, b_indices_mat, i, j, num_dims)) { case -1: entries_to_copy.emplace_back(true, i); out_values.push_back(a_values(i)); ++i; break; case 0: s = a_values(i) + b_values(j); if (thresh <= std::abs(s)) { entries_to_copy.emplace_back(true, i); out_values.push_back(s); } ++i; ++j; break; case 1: entries_to_copy.emplace_back(false, j); out_values.push_back(b_values(j)); ++j; break; } } #define HANDLE_LEFTOVERS(A_OR_B, IDX, IS_A) \ while (IDX < A_OR_B##_nnz) { \ entries_to_copy.emplace_back(IS_A, IDX); \ out_values.push_back(A_OR_B##_values(IDX)); \ ++IDX; \ } // at most one of these calls appends new values HANDLE_LEFTOVERS(a, i, true); HANDLE_LEFTOVERS(b, j, false); #undef HANDLE_LEFTOVERS // (2) allocate and fill output tensors const int64 sum_nnz = out_values.size(); Tensor *out_indices_t, *out_values_t; OP_REQUIRES_OK(ctx, ctx->allocate_output(0, TensorShape({sum_nnz, num_dims}), &out_indices_t)); OP_REQUIRES_OK( ctx, ctx->allocate_output(1, TensorShape({sum_nnz}), &out_values_t)); auto out_indices_mat = out_indices_t->matrix<int64>(); auto out_values_flat = out_values_t->vec<T>(); for (i = 0; i < sum_nnz; ++i) { const bool from_a = entries_to_copy[i].first; const int64 idx = entries_to_copy[i].second; out_indices_mat.chip<0>(i) = from_a ? a_indices_mat.chip<0>(idx) : b_indices_mat.chip<0>(idx); } if (sum_nnz > 0) { std::copy_n(out_values.begin(), sum_nnz, &out_values_flat(0)); } ctx->set_output(2, *a_shape); }
1031
True
1
CVE-2021-29607
False
False
False
False
AV:L/AC:L/Au:N/C:P/I:P/A:P
LOCAL
LOW
NONE
PARTIAL
PARTIAL
PARTIAL
4.6
CVSS:3.1/AV:L/AC:L/PR:L/UI:N/S:U/C:H/I:H/A:H
LOCAL
LOW
LOW
NONE
UNCHANGED
HIGH
HIGH
HIGH
7.8
HIGH
1.8
5.9
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/ba6822bd7b7324ba201a28b2f278c29a98edbef2', 'name': 'https://github.com/tensorflow/tensorflow/commit/ba6822bd7b7324ba201a28b2f278c29a98edbef2', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/f6fde895ef9c77d848061c0517f19d0ec2682f3a', 'name': 'https://github.com/tensorflow/tensorflow/commit/f6fde895ef9c77d848061c0517f19d0ec2682f3a', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-gv26-jpj9-c8gq', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-gv26-jpj9-c8gq', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Patch', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-754'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionStartIncluding': '2.4.0', 'versionEndExcluding': '2.4.2', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionStartIncluding': '2.2.0', 'versionEndExcluding': '2.2.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionEndExcluding': '2.1.4', 'cpe_name': []}]}]
[{'lang': 'en', 'value': 'TensorFlow is an end-to-end open source platform for machine learning. Incomplete validation in `SparseAdd` results in allowing attackers to exploit undefined behavior (dereferencing null pointers) as well as write outside of bounds of heap allocated data. The implementation(https://github.com/tensorflow/tensorflow/blob/656e7673b14acd7835dc778867f84916c6d1cac2/tensorflow/core/kernels/sparse_sparse_binary_op_shared.cc) has a large set of validation for the two sparse tensor inputs (6 tensors in total), but does not validate that the tensors are not empty or that the second dimension of `*_indices` matches the size of corresponding `*_shape`. This allows attackers to send tensor triples that represent invalid sparse tensors to abuse code assumptions that are not protected by validation. The fix will be included in TensorFlow 2.5.0. We will also cherrypick this commit on TensorFlow 2.4.2, TensorFlow 2.3.3, TensorFlow 2.2.3 and TensorFlow 2.1.4, as these are also affected and still in supported range.'}]
2021-05-18T15:08Z
2021-05-14T20:15Z
Improper Check for Unusual or Exceptional Conditions
The software does not check or incorrectly checks for unusual or exceptional conditions that are not expected to occur frequently during day to day operation of the software.
The programmer may assume that certain events or conditions will never occur or do not need to be worried about, such as low memory conditions, lack of access to resources due to restrictive permissions, or misbehaving clients or components. However, attackers may intentionally trigger these unusual conditions, thus violating the programmer's assumptions, possibly introducing instability, incorrect behavior, or a vulnerability. Note that this entry is not exclusively about the use of exceptions and exception handling, which are mechanisms for both checking and handling unusual or unexpected conditions.
https://cwe.mitre.org/data/definitions/754.html
0
Mihai Maruseac
2021-05-11 18:32:03-07:00
Validate that a and b are proper sparse tensors PiperOrigin-RevId: 373274848 Change-Id: I3a665ac3a29dee9fb69bdf408a939330cb93ea75
f6fde895ef9c77d848061c0517f19d0ec2682f3a
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tensorflow::SparseSparseBinaryOpShared::Compute
tensorflow::SparseSparseBinaryOpShared::Compute( OpKernelContext * ctx)
['ctx']
void Compute(OpKernelContext *ctx) override { const Tensor *a_indices_t, *a_values_t, *a_shape_t, *b_indices_t, *b_values_t, *b_shape_t; OP_REQUIRES_OK(ctx, ctx->input("a_indices", &a_indices_t)); OP_REQUIRES_OK(ctx, ctx->input("a_values", &a_values_t)); OP_REQUIRES_OK(ctx, ctx->input("a_shape", &a_shape_t)); OP_REQUIRES_OK(ctx, ctx->input("b_indices", &b_indices_t)); OP_REQUIRES_OK(ctx, ctx->input("b_values", &b_values_t)); OP_REQUIRES_OK(ctx, ctx->input("b_shape", &b_shape_t)); // Validations. OP_REQUIRES( ctx, TensorShapeUtils::IsMatrix(a_indices_t->shape()) && TensorShapeUtils::IsMatrix(b_indices_t->shape()), errors::InvalidArgument("Inputs a_indices and b_indices should be " "matrices but received shapes: ", a_indices_t->shape().DebugString(), ", ", b_indices_t->shape().DebugString())); OP_REQUIRES(ctx, TensorShapeUtils::IsVector(a_values_t->shape()) && TensorShapeUtils::IsVector(b_values_t->shape()), errors::InvalidArgument( "Inputs a_values and b_values should be vectors " "but received shapes: ", a_values_t->shape().DebugString(), " and ", b_values_t->shape().DebugString())); const int64 a_nnz = a_indices_t->dim_size(0); const int64 b_nnz = b_indices_t->dim_size(0); const auto a_values = a_values_t->vec<T>(); const auto b_values = b_values_t->vec<T>(); OP_REQUIRES( ctx, a_values.size() == a_nnz && b_values.size() == b_nnz, errors::InvalidArgument("Expected ", a_nnz, " and ", b_nnz, " non-empty input values, got ", a_values.size(), " and ", b_values.size())); OP_REQUIRES(ctx, TensorShapeUtils::IsVector(a_shape_t->shape()) && TensorShapeUtils::IsVector(b_shape_t->shape()), errors::InvalidArgument( "Input shapes should be a vector but received shapes ", a_shape_t->shape().DebugString(), " and ", b_shape_t->shape().DebugString())); OP_REQUIRES(ctx, a_shape_t->IsSameSize(*b_shape_t), errors::InvalidArgument( "Operands do not have the same ranks; got shapes: ", a_shape_t->SummarizeValue(10), " and ", b_shape_t->SummarizeValue(10))); const auto a_shape = a_shape_t->flat<int64>(); const auto b_shape = b_shape_t->flat<int64>(); for (int i = 0; i < a_shape_t->NumElements(); ++i) { OP_REQUIRES(ctx, a_shape(i) == b_shape(i), errors::InvalidArgument("Operands' shapes do not match: got ", a_shape(i), " and ", b_shape(i), " for dimension ", i)); } OP_REQUIRES( ctx, a_indices_t->dim_size(1) == b_indices_t->dim_size(1), errors::InvalidArgument( "Indices' dimensions do not match: got ", a_indices_t->dim_size(1), " and ", b_indices_t->dim_size(1), " for the second dimension.")); const int num_dims = a_indices_t->dim_size(1); const auto a_indices_mat = a_indices_t->matrix<int64>(); const auto b_indices_mat = b_indices_t->matrix<int64>(); std::vector<T> a_augmented_values, b_augmented_values; std::vector<std::pair<bool, int64>> entries_to_copy; // from_a?, idx UnionSparseIndicesAndValues(a_indices_mat, a_values, a_nnz, b_indices_mat, b_values, b_nnz, num_dims, &a_augmented_values, &b_augmented_values, &entries_to_copy); // Allocates and fills output tensors. const int64 sum_nnz = a_augmented_values.size(); Tensor *output_indices_t, *output_values_t; OP_REQUIRES_OK(ctx, ctx->allocate_output(0, TensorShape({sum_nnz, num_dims}), &output_indices_t)); OP_REQUIRES_OK( ctx, ctx->allocate_output(1, TensorShape({sum_nnz}), &output_values_t)); auto output_indices_mat = output_indices_t->matrix<int64>(); for (int64 i = 0; i < sum_nnz; ++i) { const bool from_a = entries_to_copy[i].first; const int64 idx = entries_to_copy[i].second; output_indices_mat.chip<0>(i) = from_a ? a_indices_mat.chip<0>(idx) : b_indices_mat.chip<0>(idx); } // Performs the functor operation using Eigen. // // Note that the two stack-allocated std::vector's may not be aligned. Using // allocate_temp() would've given us aligned storage, but we do not know // their sizes in advance, so we couldn't use allocate_temp() anyway. // // TODO(zongheng): measure if it's worthwhile to somehow force alignment. using UnalignedTensorMap = Eigen::TensorMap<Eigen::Tensor<const T, 1, Eigen::RowMajor>, Eigen::Unaligned>; auto a_augmented_values_t = UnalignedTensorMap(a_augmented_values.data(), sum_nnz); auto b_augmented_values_t = UnalignedTensorMap(b_augmented_values.data(), sum_nnz); output_values_t->flat<T>().device(ctx->eigen_device<Device>()) = a_augmented_values_t.binaryExpr(b_augmented_values_t, typename Functor::func()); }
873
True
1
CVE-2021-37637
False
False
False
False
AV:L/AC:L/Au:N/C:N/I:N/A:P
LOCAL
LOW
NONE
NONE
NONE
PARTIAL
2.1
CVSS:3.1/AV:L/AC:L/PR:L/UI:N/S:U/C:N/I:N/A:H
LOCAL
LOW
LOW
NONE
UNCHANGED
NONE
NONE
HIGH
5.5
MEDIUM
1.8
3.6
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/5dc7f6981fdaf74c8c5be41f393df705841fb7c5', 'name': 'https://github.com/tensorflow/tensorflow/commit/5dc7f6981fdaf74c8c5be41f393df705841fb7c5', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-c9qf-r67m-p7cg', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-c9qf-r67m-p7cg', 'refsource': 'CONFIRM', 'tags': ['Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-476'}]}]
LOW
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionStartIncluding': '2.4.0', 'versionEndExcluding': '2.4.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:2.6.0:rc2:*:*:*:*:*:*', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:2.6.0:rc1:*:*:*:*:*:*', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:2.6.0:rc0:*:*:*:*:*:*', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:2.5.0:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': 'TensorFlow is an end-to-end open source platform for machine learning. It is possible to trigger a null pointer dereference in TensorFlow by passing an invalid input to `tf.raw_ops.CompressElement`. The [implementation](https://github.com/tensorflow/tensorflow/blob/47a06f40411a69c99f381495f490536972152ac0/tensorflow/core/data/compression_utils.cc#L34) was accessing the size of a buffer obtained from the return of a separate function call before validating that said buffer is valid. We have patched the issue in GitHub commit 5dc7f6981fdaf74c8c5be41f393df705841fb7c5. The fix will be included in TensorFlow 2.6.0. We will also cherrypick this commit on TensorFlow 2.5.1, TensorFlow 2.4.3, and TensorFlow 2.3.4, as these are also affected and still in supported range.'}]
2021-08-18T17:20Z
2021-08-12T19:15Z
NULL Pointer Dereference
A NULL pointer dereference occurs when the application dereferences a pointer that it expects to be valid, but is NULL, typically causing a crash or exit.
NULL pointer dereference issues can occur through a number of flaws, including race conditions, and simple programming omissions.
https://cwe.mitre.org/data/definitions/476.html
0
A. Unique TensorFlower
2021-05-14 22:07:07-07:00
Fix accessing possible nullptr in tensorflow::data::CompressElement and UncompressElement which are used in tf.data.service. PiperOrigin-RevId: 373920841 Change-Id: Ia88d78aee09fa19bb53a0f163fd19620d0c68743
5dc7f6981fdaf74c8c5be41f393df705841fb7c5
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tensorflow::data::CompressElement
tensorflow::data::CompressElement( const std :: vector<Tensor> & element , CompressedElement * out)
['element', 'out']
Status CompressElement(const std::vector<Tensor>& element, CompressedElement* out) { // Step 1: Determine the total uncompressed size. This requires serializing // non-memcopyable tensors, which we save to use again later. std::vector<TensorProto> non_memcpy_components; int64 total_size = 0; for (auto& component : element) { if (DataTypeCanUseMemcpy(component.dtype())) { // Some datatypes can be memcopied, allowing us to save two copies // (AsProtoTensorContent and SerializeToArray). total_size += DMAHelper::buffer(&component)->size(); } else { non_memcpy_components.emplace_back(); component.AsProtoTensorContent(&non_memcpy_components.back()); total_size += non_memcpy_components.back().ByteSizeLong(); } } // Step 2: Write the tensor data to a buffer, and compress that buffer. // We use tstring for access to resize_uninitialized. tstring uncompressed; uncompressed.resize_uninitialized(total_size); // Position in `uncompressed` to write the next component. char* position = uncompressed.mdata(); int non_memcpy_component_index = 0; for (auto& component : element) { CompressedComponentMetadata* metadata = out->mutable_component_metadata()->Add(); metadata->set_dtype(component.dtype()); component.shape().AsProto(metadata->mutable_tensor_shape()); if (DataTypeCanUseMemcpy(component.dtype())) { const TensorBuffer* buffer = DMAHelper::buffer(&component); memcpy(position, buffer->data(), buffer->size()); metadata->set_tensor_size_bytes(buffer->size()); } else { TensorProto& proto = non_memcpy_components[non_memcpy_component_index++]; proto.SerializeToArray(position, proto.ByteSizeLong()); metadata->set_tensor_size_bytes(proto.ByteSizeLong()); } position += metadata->tensor_size_bytes(); } DCHECK_EQ(position, uncompressed.mdata() + total_size); if (!port::Snappy_Compress(uncompressed.mdata(), total_size, out->mutable_data())) { return errors::Internal("Failed to compress using snappy."); } VLOG(3) << "Compressed element from " << total_size << " bytes to " << out->data().size() << " bytes"; return Status::OK(); }
355
True
1
CVE-2021-37637
False
False
False
False
AV:L/AC:L/Au:N/C:N/I:N/A:P
LOCAL
LOW
NONE
NONE
NONE
PARTIAL
2.1
CVSS:3.1/AV:L/AC:L/PR:L/UI:N/S:U/C:N/I:N/A:H
LOCAL
LOW
LOW
NONE
UNCHANGED
NONE
NONE
HIGH
5.5
MEDIUM
1.8
3.6
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/5dc7f6981fdaf74c8c5be41f393df705841fb7c5', 'name': 'https://github.com/tensorflow/tensorflow/commit/5dc7f6981fdaf74c8c5be41f393df705841fb7c5', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-c9qf-r67m-p7cg', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-c9qf-r67m-p7cg', 'refsource': 'CONFIRM', 'tags': ['Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-476'}]}]
LOW
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionStartIncluding': '2.4.0', 'versionEndExcluding': '2.4.3', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:2.6.0:rc2:*:*:*:*:*:*', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:2.6.0:rc1:*:*:*:*:*:*', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:2.6.0:rc0:*:*:*:*:*:*', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionStartIncluding': '2.3.0', 'versionEndExcluding': '2.3.4', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:2.5.0:*:*:*:*:*:*:*', 'cpe_name': []}]}]
[{'lang': 'en', 'value': 'TensorFlow is an end-to-end open source platform for machine learning. It is possible to trigger a null pointer dereference in TensorFlow by passing an invalid input to `tf.raw_ops.CompressElement`. The [implementation](https://github.com/tensorflow/tensorflow/blob/47a06f40411a69c99f381495f490536972152ac0/tensorflow/core/data/compression_utils.cc#L34) was accessing the size of a buffer obtained from the return of a separate function call before validating that said buffer is valid. We have patched the issue in GitHub commit 5dc7f6981fdaf74c8c5be41f393df705841fb7c5. The fix will be included in TensorFlow 2.6.0. We will also cherrypick this commit on TensorFlow 2.5.1, TensorFlow 2.4.3, and TensorFlow 2.3.4, as these are also affected and still in supported range.'}]
2021-08-18T17:20Z
2021-08-12T19:15Z
NULL Pointer Dereference
A NULL pointer dereference occurs when the application dereferences a pointer that it expects to be valid, but is NULL, typically causing a crash or exit.
NULL pointer dereference issues can occur through a number of flaws, including race conditions, and simple programming omissions.
https://cwe.mitre.org/data/definitions/476.html
0
A. Unique TensorFlower
2021-05-14 22:07:07-07:00
Fix accessing possible nullptr in tensorflow::data::CompressElement and UncompressElement which are used in tf.data.service. PiperOrigin-RevId: 373920841 Change-Id: Ia88d78aee09fa19bb53a0f163fd19620d0c68743
5dc7f6981fdaf74c8c5be41f393df705841fb7c5
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tensorflow::data::UncompressElement
tensorflow::data::UncompressElement( const CompressedElement & compressed , std :: vector<Tensor> * out)
['compressed', 'out']
Status UncompressElement(const CompressedElement& compressed, std::vector<Tensor>* out) { int num_components = compressed.component_metadata_size(); out->clear(); out->reserve(num_components); // Step 1: Prepare the memory that we will uncompress into. std::vector<struct iovec> iov(num_components); // We use tstring for access to resize_uninitialized. std::vector<tstring> tensor_proto_strs; // num_components is a conservative estimate. It is important to reserve // vector space so that the vector doesn't resize itself, which could // invalidate pointers to its strings' data. tensor_proto_strs.reserve(num_components); int64 total_size = 0; for (int i = 0; i < num_components; ++i) { const CompressedComponentMetadata& metadata = compressed.component_metadata(i); if (DataTypeCanUseMemcpy(metadata.dtype())) { out->emplace_back(metadata.dtype(), metadata.tensor_shape()); TensorBuffer* buffer = DMAHelper::buffer(&out->back()); iov[i].iov_base = buffer->data(); iov[i].iov_len = buffer->size(); } else { // Allocate an empty Tensor. We will fill it out later after // uncompressing into the tensor_proto_str. out->emplace_back(); tensor_proto_strs.emplace_back(); tstring& tensor_proto_str = tensor_proto_strs.back(); tensor_proto_str.resize_uninitialized(metadata.tensor_size_bytes()); iov[i].iov_base = tensor_proto_str.mdata(); iov[i].iov_len = tensor_proto_str.size(); } total_size += iov[i].iov_len; } // Step 2: Uncompress into the iovec. const std::string& compressed_data = compressed.data(); size_t uncompressed_size; if (!port::Snappy_GetUncompressedLength( compressed_data.data(), compressed_data.size(), &uncompressed_size)) { return errors::Internal( "Could not get snappy uncompressed length. Compressed data size: ", compressed_data.size()); } if (uncompressed_size != static_cast<size_t>(total_size)) { return errors::Internal( "Uncompressed size mismatch. Snappy expects ", uncompressed_size, " whereas the tensor metadata suggests ", total_size); } if (!port::Snappy_UncompressToIOVec(compressed_data.data(), compressed_data.size(), iov.data(), num_components)) { return errors::Internal("Failed to perform snappy decompression."); } // Step 3: Deserialize tensor proto strings to tensors. int tensor_proto_strs_index = 0; for (int i = 0; i < num_components; ++i) { if (DataTypeCanUseMemcpy(compressed.component_metadata(i).dtype())) { continue; } TensorProto tp; if (!tp.ParseFromString(tensor_proto_strs[tensor_proto_strs_index++])) { return errors::Internal("Could not parse TensorProto"); } if (!out->at(i).FromProto(tp)) { return errors::Internal("Could not parse Tensor"); } } return Status::OK(); }
464
True
1
CVE-2018-21233
False
False
False
True
AV:N/AC:M/Au:N/C:P/I:N/A:N
NETWORK
MEDIUM
NONE
PARTIAL
NONE
NONE
4.3
CVSS:3.1/AV:N/AC:L/PR:N/UI:R/S:U/C:H/I:N/A:N
NETWORK
LOW
NONE
REQUIRED
UNCHANGED
HIGH
NONE
NONE
6.5
MEDIUM
2.8
3.6
False
[{'url': 'https://github.com/tensorflow/tensorflow/commit/49f73c55d56edffebde4bca4a407ad69c1cae433', 'name': 'https://github.com/tensorflow/tensorflow/commit/49f73c55d56edffebde4bca4a407ad69c1cae433', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}, {'url': 'https://github.com/tensorflow/tensorflow/blob/master/tensorflow/security/advisory/tfsa-2018-001.md', 'name': 'https://github.com/tensorflow/tensorflow/blob/master/tensorflow/security/advisory/tfsa-2018-001.md', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionEndExcluding': '1.7.0', 'cpe_name': []}]}]
[{'lang': 'en', 'value': 'TensorFlow before 1.7.0 has an integer overflow that causes an out-of-bounds read, possibly causing disclosure of the contents of process memory. This occurs in the DecodeBmp feature of the BMP decoder in core/kernels/decode_bmp_op.cc.'}]
2020-05-08T19:55Z
2020-05-04T15:15Z
Out-of-bounds Read
The software reads data past the end, or before the beginning, of the intended buffer.
Typically, this can allow attackers to read sensitive information from other memory locations or cause a crash. A crash can occur when the code reads a variable amount of data and assumes that a sentinel exists to stop the read operation, such as a NUL in a string. The expected sentinel might not be located in the out-of-bounds memory, causing excessive data to be read, leading to a segmentation fault or a buffer overflow. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent read operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/125.html
0
David G. Andersen
2018-02-14 18:56:47-08:00
Fix integer overflow in BMP decoder by making the checks in DecodeBmp more stringent. Add fuzzer to improve the robustness of the decoder in the future. PiperOrigin-RevId: 185780111
49f73c55d56edffebde4bca4a407ad69c1cae433
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tensorflow::DecodeBmpOp::Compute
tensorflow::DecodeBmpOp::Compute( OpKernelContext * context)
['context']
void Compute(OpKernelContext* context) override { const Tensor& contents = context->input(0); OP_REQUIRES(context, TensorShapeUtils::IsScalar(contents.shape()), errors::InvalidArgument("contents must be scalar, got shape ", contents.shape().DebugString())); // Start decoding image to get shape details const StringPiece input = contents.scalar<string>()(); OP_REQUIRES(context, (32 <= input.size()), errors::InvalidArgument("Incomplete bmp content, requires at " "least 32 bytes to find the header " "size, width, height, and bpp, got ", input.size(), " bytes")); const uint8* img_bytes = reinterpret_cast<const uint8*>(input.data()); int32 header_size_ = internal::SubtleMustCopy( *(reinterpret_cast<const int32*>(img_bytes + 10))); const int32 header_size = ByteSwapInt32ForBigEndian(header_size_); int32 width_ = internal::SubtleMustCopy( *(reinterpret_cast<const int32*>(img_bytes + 18))); const int32 width = ByteSwapInt32ForBigEndian(width_); int32 height_ = internal::SubtleMustCopy( *(reinterpret_cast<const int32*>(img_bytes + 22))); const int32 height = ByteSwapInt32ForBigEndian(height_); int32 bpp_ = internal::SubtleMustCopy( *(reinterpret_cast<const int32*>(img_bytes + 28))); const int32 bpp = ByteSwapInt32ForBigEndian(bpp_); if (channels_) { OP_REQUIRES(context, (channels_ == bpp / 8), errors::InvalidArgument( "channels attribute ", channels_, " does not match bits per pixel from file ", bpp / 8)); } else { channels_ = bpp / 8; } // Current implementation only supports 1, 3 or 4 channel // bitmaps. OP_REQUIRES(context, (channels_ == 1 || channels_ == 3 || channels_ == 4), errors::InvalidArgument( "Number of channels must be 1, 3 or 4, was ", channels_)); // there may be padding bytes when the width is not a multiple of 4 bytes // 8 * channels == bits per pixel const int row_size = (8 * channels_ * width + 31) / 32 * 4; const int last_pixel_offset = header_size + (abs(height) - 1) * row_size + (width - 1) * channels_; // [expected file size] = [last pixel offset] + [last pixel size=channels] const int expected_file_size = last_pixel_offset + channels_; OP_REQUIRES( context, (expected_file_size <= input.size()), errors::InvalidArgument("Incomplete bmp content, requires at least ", expected_file_size, " bytes, got ", input.size(), " bytes")); // if height is negative, data layout is top down // otherwise, it's bottom up bool top_down = (height < 0); // Decode image, allocating tensor once the image size is known Tensor* output = nullptr; OP_REQUIRES_OK( context, context->allocate_output( 0, TensorShape({abs(height), width, channels_}), &output)); const uint8* bmp_pixels = &img_bytes[header_size]; Decode(bmp_pixels, row_size, output->flat<uint8>().data(), width, abs(height), channels_, top_down); }
490
True
1
CVE-2020-5215
False
False
False
False
AV:N/AC:M/Au:N/C:N/I:N/A:P
NETWORK
MEDIUM
NONE
NONE
NONE
PARTIAL
4.3
CVSS:3.1/AV:N/AC:L/PR:N/UI:N/S:U/C:N/I:N/A:H
NETWORK
LOW
NONE
NONE
UNCHANGED
NONE
NONE
HIGH
7.5
HIGH
3.9
3.6
False
[{'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v1.15.2', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v1.15.2', 'refsource': 'MISC', 'tags': ['Release Notes']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/5ac1b9e24ff6afc465756edf845d2e9660bd34bf', 'name': 'https://github.com/tensorflow/tensorflow/commit/5ac1b9e24ff6afc465756edf845d2e9660bd34bf', 'refsource': 'MISC', 'tags': ['Patch']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.0.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.0.1', 'refsource': 'MISC', 'tags': ['Release Notes']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-977j-xj7q-2jr9', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-977j-xj7q-2jr9', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Patch', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-20'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionEndExcluding': '1.15.2', 'cpe_name': []}]}]
[{'lang': 'en', 'value': 'In TensorFlow before 1.15.2 and 2.0.1, converting a string (from Python) to a tf.float16 value results in a segmentation fault in eager mode as the format checks for this use case are only in the graph mode. This issue can lead to denial of service in inference/training where a malicious attacker can send a data point which contains a string instead of a tf.float16 value. Similar effects can be obtained by manipulating saved models and checkpoints whereby replacing a scalar tf.float16 value with a scalar string will trigger this issue due to automatic conversions. This can be easily reproduced by tf.constant("hello", tf.float16), if eager execution is enabled. This issue is patched in TensorFlow 1.15.1 and 2.0.1 with this vulnerability patched. TensorFlow 2.1.0 was released after we fixed the issue, thus it is not affected. Users are encouraged to switch to TensorFlow 1.15.1, 2.0.1 or 2.1.0.'}]
2020-02-05T21:02Z
2020-01-28T22:15Z
Improper Input Validation
The product receives input or data, but it does not validate or incorrectly validates that the input has the properties that are required to process the data safely and correctly.
Input validation is a frequently-used technique for checking potentially dangerous inputs in order to ensure that the inputs are safe for processing within the code, or when communicating with other components. When software does not validate input properly, an attacker is able to craft the input in a form that is not expected by the rest of the application. This will lead to parts of the system receiving unintended input, which may result in altered control flow, arbitrary control of a resource, or arbitrary code execution. Input validation is not the only technique for processing input, however. Other techniques attempt to transform potentially-dangerous input into something safe, such as filtering (CWE-790) - which attempts to remove dangerous inputs - or encoding/escaping (CWE-116), which attempts to ensure that the input is not misinterpreted when it is included in output to another component. Other techniques exist as well (see CWE-138 for more examples.) Input validation can be applied to: raw data - strings, numbers, parameters, file contents, etc. metadata - information about the raw data, such as headers or size Data can be simple or structured. Structured data can be composed of many nested layers, composed of combinations of metadata and raw data, with other simple or structured data. Many properties of raw data or metadata may need to be validated upon entry into the code, such as: specified quantities such as size, length, frequency, price, rate, number of operations, time, etc. implied or derived quantities, such as the actual size of a file instead of a specified size indexes, offsets, or positions into more complex data structures symbolic keys or other elements into hash tables, associative arrays, etc. well-formedness, i.e. syntactic correctness - compliance with expected syntax lexical token correctness - compliance with rules for what is treated as a token specified or derived type - the actual type of the input (or what the input appears to be) consistency - between individual data elements, between raw data and metadata, between references, etc. conformance to domain-specific rules, e.g. business logic equivalence - ensuring that equivalent inputs are treated the same authenticity, ownership, or other attestations about the input, e.g. a cryptographic signature to prove the source of the data Implied or derived properties of data must often be calculated or inferred by the code itself. Errors in deriving properties may be considered a contributing factor to improper input validation. Note that "input validation" has very different meanings to different people, or within different classification schemes. Caution must be used when referencing this CWE entry or mapping to it. For example, some weaknesses might involve inadvertently giving control to an attacker over an input when they should not be able to provide an input at all, but sometimes this is referred to as input validation. Finally, it is important to emphasize that the distinctions between input validation and output escaping are often blurred, and developers must be careful to understand the difference, including how input validation is not always sufficient to prevent vulnerabilities, especially when less stringent data types must be supported, such as free-form text. Consider a SQL injection scenario in which a person's last name is inserted into a query. The name "O'Reilly" would likely pass the validation step since it is a common last name in the English language. However, this valid name cannot be directly inserted into the database because it contains the "'" apostrophe character, which would need to be escaped or otherwise transformed. In this case, removing the apostrophe might reduce the risk of SQL injection, but it would produce incorrect behavior because the wrong name would be recorded.
https://cwe.mitre.org/data/definitions/20.html
0
Mihai Maruseac
2019-12-20 15:33:46-08:00
Fix segfault when attempting to convert string to float16. To make sure this gets fixed, add test for converting string to any numeric type. PiperOrigin-RevId: 286650886 Change-Id: I81f770ec2bbd33a863e8057ce198c679912fa8e0
5ac1b9e24ff6afc465756edf845d2e9660bd34bf
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tensorflow::ConvertOneFloat
tensorflow::ConvertOneFloat( PyObject * v , T * out)
['v', 'out']
static const char* ConvertOneFloat(PyObject* v, T* out) { if (PyErr_Occurred()) { return nullptr; } if (TF_PREDICT_TRUE(PyFloat_Check(v))) { const double as_double = PyFloat_AS_DOUBLE(v); *out = static_cast<T>(as_double); // Check for overflow if (TF_PREDICT_FALSE(sizeof(T) < sizeof(double) && std::isinf(*out) && std::isfinite(as_double))) { return ErrorOutOfRangeDouble; } return nullptr; } #if PY_MAJOR_VERSION < 3 if (PyInt_Check(v)) { *out = PyInt_AS_LONG(v); return nullptr; } #endif if (PyLong_Check(v)) { *out = PyLong_AsDouble(v); if (PyErr_Occurred()) return ErrorOutOfRangeDouble; return nullptr; } if (PyIsInstance(v, &PyFloatingArrType_Type)) { // NumPy float types Safe_PyObjectPtr as_float = make_safe(PyNumber_Float(v)); if (PyErr_Occurred()) { return nullptr; } return ConvertOneFloat<T>(as_float.get(), out); } if (PyIsInstance(v, &PyIntegerArrType_Type)) { // NumPy integers #if PY_MAJOR_VERSION < 3 Safe_PyObjectPtr as_int = make_safe(PyNumber_Int(v)); #else Safe_PyObjectPtr as_int = make_safe(PyNumber_Long(v)); #endif if (PyErr_Occurred()) { return nullptr; } return ConvertOneFloat<T>(as_int.get(), out); } return ErrorMixedTypes; }
254
True
1
CVE-2020-5215
False
False
False
False
AV:N/AC:M/Au:N/C:N/I:N/A:P
NETWORK
MEDIUM
NONE
NONE
NONE
PARTIAL
4.3
CVSS:3.1/AV:N/AC:L/PR:N/UI:N/S:U/C:N/I:N/A:H
NETWORK
LOW
NONE
NONE
UNCHANGED
NONE
NONE
HIGH
7.5
HIGH
3.9
3.6
False
[{'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v1.15.2', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v1.15.2', 'refsource': 'MISC', 'tags': ['Release Notes']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/5ac1b9e24ff6afc465756edf845d2e9660bd34bf', 'name': 'https://github.com/tensorflow/tensorflow/commit/5ac1b9e24ff6afc465756edf845d2e9660bd34bf', 'refsource': 'MISC', 'tags': ['Patch']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.0.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.0.1', 'refsource': 'MISC', 'tags': ['Release Notes']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-977j-xj7q-2jr9', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-977j-xj7q-2jr9', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Patch', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-20'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionEndExcluding': '1.15.2', 'cpe_name': []}]}]
[{'lang': 'en', 'value': 'In TensorFlow before 1.15.2 and 2.0.1, converting a string (from Python) to a tf.float16 value results in a segmentation fault in eager mode as the format checks for this use case are only in the graph mode. This issue can lead to denial of service in inference/training where a malicious attacker can send a data point which contains a string instead of a tf.float16 value. Similar effects can be obtained by manipulating saved models and checkpoints whereby replacing a scalar tf.float16 value with a scalar string will trigger this issue due to automatic conversions. This can be easily reproduced by tf.constant("hello", tf.float16), if eager execution is enabled. This issue is patched in TensorFlow 1.15.1 and 2.0.1 with this vulnerability patched. TensorFlow 2.1.0 was released after we fixed the issue, thus it is not affected. Users are encouraged to switch to TensorFlow 1.15.1, 2.0.1 or 2.1.0.'}]
2020-02-05T21:02Z
2020-01-28T22:15Z
Improper Input Validation
The product receives input or data, but it does not validate or incorrectly validates that the input has the properties that are required to process the data safely and correctly.
Input validation is a frequently-used technique for checking potentially dangerous inputs in order to ensure that the inputs are safe for processing within the code, or when communicating with other components. When software does not validate input properly, an attacker is able to craft the input in a form that is not expected by the rest of the application. This will lead to parts of the system receiving unintended input, which may result in altered control flow, arbitrary control of a resource, or arbitrary code execution. Input validation is not the only technique for processing input, however. Other techniques attempt to transform potentially-dangerous input into something safe, such as filtering (CWE-790) - which attempts to remove dangerous inputs - or encoding/escaping (CWE-116), which attempts to ensure that the input is not misinterpreted when it is included in output to another component. Other techniques exist as well (see CWE-138 for more examples.) Input validation can be applied to: raw data - strings, numbers, parameters, file contents, etc. metadata - information about the raw data, such as headers or size Data can be simple or structured. Structured data can be composed of many nested layers, composed of combinations of metadata and raw data, with other simple or structured data. Many properties of raw data or metadata may need to be validated upon entry into the code, such as: specified quantities such as size, length, frequency, price, rate, number of operations, time, etc. implied or derived quantities, such as the actual size of a file instead of a specified size indexes, offsets, or positions into more complex data structures symbolic keys or other elements into hash tables, associative arrays, etc. well-formedness, i.e. syntactic correctness - compliance with expected syntax lexical token correctness - compliance with rules for what is treated as a token specified or derived type - the actual type of the input (or what the input appears to be) consistency - between individual data elements, between raw data and metadata, between references, etc. conformance to domain-specific rules, e.g. business logic equivalence - ensuring that equivalent inputs are treated the same authenticity, ownership, or other attestations about the input, e.g. a cryptographic signature to prove the source of the data Implied or derived properties of data must often be calculated or inferred by the code itself. Errors in deriving properties may be considered a contributing factor to improper input validation. Note that "input validation" has very different meanings to different people, or within different classification schemes. Caution must be used when referencing this CWE entry or mapping to it. For example, some weaknesses might involve inadvertently giving control to an attacker over an input when they should not be able to provide an input at all, but sometimes this is referred to as input validation. Finally, it is important to emphasize that the distinctions between input validation and output escaping are often blurred, and developers must be careful to understand the difference, including how input validation is not always sufficient to prevent vulnerabilities, especially when less stringent data types must be supported, such as free-form text. Consider a SQL injection scenario in which a person's last name is inserted into a query. The name "O'Reilly" would likely pass the validation step since it is a common last name in the English language. However, this valid name cannot be directly inserted into the database because it contains the "'" apostrophe character, which would need to be escaped or otherwise transformed. In this case, removing the apostrophe might reduce the risk of SQL injection, but it would produce incorrect behavior because the wrong name would be recorded.
https://cwe.mitre.org/data/definitions/20.html
0
Mihai Maruseac
2019-12-20 15:33:46-08:00
Fix segfault when attempting to convert string to float16. To make sure this gets fixed, add test for converting string to any numeric type. PiperOrigin-RevId: 286650886 Change-Id: I81f770ec2bbd33a863e8057ce198c679912fa8e0
5ac1b9e24ff6afc465756edf845d2e9660bd34bf
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tensorflow::ConverterTraits<Eigen::half>::ConvertScalar
tensorflow::ConverterTraits<Eigen::half>::ConvertScalar( PyObject * v , Eigen :: half * out)
['v', 'out']
static const char* ConvertScalar(PyObject* v, Eigen::half* out) { // NOTE(nareshmodi): Is there a way to convert to C double without the // intermediate Python double? This will help with ConvertOneFloat as well. Safe_PyObjectPtr as_float = make_safe(PyNumber_Float(v)); double v_double = PyFloat_AS_DOUBLE(as_float.get()); *out = Eigen::half(v_double); return nullptr; }
50
True
1
CVE-2020-5215
False
False
False
False
AV:N/AC:M/Au:N/C:N/I:N/A:P
NETWORK
MEDIUM
NONE
NONE
NONE
PARTIAL
4.3
CVSS:3.1/AV:N/AC:L/PR:N/UI:N/S:U/C:N/I:N/A:H
NETWORK
LOW
NONE
NONE
UNCHANGED
NONE
NONE
HIGH
7.5
HIGH
3.9
3.6
False
[{'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v1.15.2', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v1.15.2', 'refsource': 'MISC', 'tags': ['Release Notes']}, {'url': 'https://github.com/tensorflow/tensorflow/commit/5ac1b9e24ff6afc465756edf845d2e9660bd34bf', 'name': 'https://github.com/tensorflow/tensorflow/commit/5ac1b9e24ff6afc465756edf845d2e9660bd34bf', 'refsource': 'MISC', 'tags': ['Patch']}, {'url': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.0.1', 'name': 'https://github.com/tensorflow/tensorflow/releases/tag/v2.0.1', 'refsource': 'MISC', 'tags': ['Release Notes']}, {'url': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-977j-xj7q-2jr9', 'name': 'https://github.com/tensorflow/tensorflow/security/advisories/GHSA-977j-xj7q-2jr9', 'refsource': 'CONFIRM', 'tags': ['Exploit', 'Patch', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-20'}]}]
MEDIUM
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionStartIncluding': '2.0.0', 'versionEndExcluding': '2.0.1', 'cpe_name': []}, {'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:google:tensorflow:*:*:*:*:*:*:*:*', 'versionEndExcluding': '1.15.2', 'cpe_name': []}]}]
[{'lang': 'en', 'value': 'In TensorFlow before 1.15.2 and 2.0.1, converting a string (from Python) to a tf.float16 value results in a segmentation fault in eager mode as the format checks for this use case are only in the graph mode. This issue can lead to denial of service in inference/training where a malicious attacker can send a data point which contains a string instead of a tf.float16 value. Similar effects can be obtained by manipulating saved models and checkpoints whereby replacing a scalar tf.float16 value with a scalar string will trigger this issue due to automatic conversions. This can be easily reproduced by tf.constant("hello", tf.float16), if eager execution is enabled. This issue is patched in TensorFlow 1.15.1 and 2.0.1 with this vulnerability patched. TensorFlow 2.1.0 was released after we fixed the issue, thus it is not affected. Users are encouraged to switch to TensorFlow 1.15.1, 2.0.1 or 2.1.0.'}]
2020-02-05T21:02Z
2020-01-28T22:15Z
Improper Input Validation
The product receives input or data, but it does not validate or incorrectly validates that the input has the properties that are required to process the data safely and correctly.
Input validation is a frequently-used technique for checking potentially dangerous inputs in order to ensure that the inputs are safe for processing within the code, or when communicating with other components. When software does not validate input properly, an attacker is able to craft the input in a form that is not expected by the rest of the application. This will lead to parts of the system receiving unintended input, which may result in altered control flow, arbitrary control of a resource, or arbitrary code execution. Input validation is not the only technique for processing input, however. Other techniques attempt to transform potentially-dangerous input into something safe, such as filtering (CWE-790) - which attempts to remove dangerous inputs - or encoding/escaping (CWE-116), which attempts to ensure that the input is not misinterpreted when it is included in output to another component. Other techniques exist as well (see CWE-138 for more examples.) Input validation can be applied to: raw data - strings, numbers, parameters, file contents, etc. metadata - information about the raw data, such as headers or size Data can be simple or structured. Structured data can be composed of many nested layers, composed of combinations of metadata and raw data, with other simple or structured data. Many properties of raw data or metadata may need to be validated upon entry into the code, such as: specified quantities such as size, length, frequency, price, rate, number of operations, time, etc. implied or derived quantities, such as the actual size of a file instead of a specified size indexes, offsets, or positions into more complex data structures symbolic keys or other elements into hash tables, associative arrays, etc. well-formedness, i.e. syntactic correctness - compliance with expected syntax lexical token correctness - compliance with rules for what is treated as a token specified or derived type - the actual type of the input (or what the input appears to be) consistency - between individual data elements, between raw data and metadata, between references, etc. conformance to domain-specific rules, e.g. business logic equivalence - ensuring that equivalent inputs are treated the same authenticity, ownership, or other attestations about the input, e.g. a cryptographic signature to prove the source of the data Implied or derived properties of data must often be calculated or inferred by the code itself. Errors in deriving properties may be considered a contributing factor to improper input validation. Note that "input validation" has very different meanings to different people, or within different classification schemes. Caution must be used when referencing this CWE entry or mapping to it. For example, some weaknesses might involve inadvertently giving control to an attacker over an input when they should not be able to provide an input at all, but sometimes this is referred to as input validation. Finally, it is important to emphasize that the distinctions between input validation and output escaping are often blurred, and developers must be careful to understand the difference, including how input validation is not always sufficient to prevent vulnerabilities, especially when less stringent data types must be supported, such as free-form text. Consider a SQL injection scenario in which a person's last name is inserted into a query. The name "O'Reilly" would likely pass the validation step since it is a common last name in the English language. However, this valid name cannot be directly inserted into the database because it contains the "'" apostrophe character, which would need to be escaped or otherwise transformed. In this case, removing the apostrophe might reduce the risk of SQL injection, but it would produce incorrect behavior because the wrong name would be recorded.
https://cwe.mitre.org/data/definitions/20.html
0
Mihai Maruseac
2019-12-20 15:33:46-08:00
Fix segfault when attempting to convert string to float16. To make sure this gets fixed, add test for converting string to any numeric type. PiperOrigin-RevId: 286650886 Change-Id: I81f770ec2bbd33a863e8057ce198c679912fa8e0
5ac1b9e24ff6afc465756edf845d2e9660bd34bf
False
tensorflow/tensorflow
An Open Source Machine Learning Framework for Everyone
2015-11-07 01:19:20
2022-08-27 17:32:40
https://tensorflow.org
tensorflow
167391.0
87115.0
tensorflow::PySeqToTensor
tensorflow::PySeqToTensor( PyObject * obj , DataType dtype , Tensor * ret)
['obj', 'dtype', 'ret']
Status PySeqToTensor(PyObject* obj, DataType dtype, Tensor* ret) { ConverterState state; TF_RETURN_IF_ERROR(InferShapeAndType(obj, &state)); DataType requested_dtype = DT_INVALID; if (dtype != DT_INVALID) { requested_dtype = dtype; } // NOTE(josh11b): If don't successfully convert to the requested type, // we just try instead to create a tensor of the inferred type and // let the caller convert it to the requested type using a cast // operation. switch (requested_dtype) { case DT_FLOAT: if (FloatConverter::Convert(obj, &state, ret) == nullptr) return Status::OK(); break; case DT_DOUBLE: if (DoubleConverter::Convert(obj, &state, ret) == nullptr) return Status::OK(); break; case DT_HALF: RETURN_STRING_AS_STATUS(NumpyHalfConverter::Convert(obj, &state, ret)); case DT_INT64: if (Int64Converter::Convert(obj, &state, ret) == nullptr) return Status::OK(); break; case DT_INT32: if (Int32Converter::Convert(obj, &state, ret) == nullptr) return Status::OK(); break; case DT_UINT64: if (UInt64Converter::Convert(obj, &state, ret) == nullptr) return Status::OK(); break; case DT_COMPLEX128: if (Complex128Converter::Convert(obj, &state, ret) == nullptr) return Status::OK(); break; case DT_STRING: if (StringConverter::Convert(obj, &state, ret) == nullptr) return Status::OK(); break; case DT_BOOL: if (BoolConverter::Convert(obj, &state, ret) == nullptr) return Status::OK(); break; default: break; } switch (state.inferred_dtype) { case DT_FLOAT: // TODO(josh11b): Handle mixed floats and complex numbers? if (requested_dtype == DT_INVALID) { // TensorFlow uses float32s to represent floating point numbers // by default (for space and speed over using doubles). RETURN_STRING_AS_STATUS(FloatConverter::Convert(obj, &state, ret)); } else { // We are going to do a cast to the user's requested dtype // after this. We use doubles for this intermediate result so // we don't lose precision that might be representable in the // final type. RETURN_STRING_AS_STATUS(DoubleConverter::Convert(obj, &state, ret)); } case DT_DOUBLE: RETURN_STRING_AS_STATUS(DoubleConverter::Convert(obj, &state, ret)); case DT_HALF: RETURN_STRING_AS_STATUS(NumpyHalfConverter::Convert(obj, &state, ret)); case DT_INT64: if (requested_dtype == DT_INVALID) { const char* error = Int32Converter::Convert(obj, &state, ret); if (error == ErrorFoundInt64) { error = Int64Converter::Convert(obj, &state, ret); } if (error == ErrorFoundFloat) { error = FloatConverter::Convert(obj, &state, ret); } // TODO(josh11b): May also want to fall back to using doubles if // error == ErrorOutOfRange? RETURN_STRING_AS_STATUS(error); } else { const char* error = Int64Converter::Convert(obj, &state, ret); if (error == ErrorFoundFloat) { error = DoubleConverter::Convert(obj, &state, ret); } RETURN_STRING_AS_STATUS(error); } case DT_STRING: RETURN_STRING_AS_STATUS(StringConverter::Convert(obj, &state, ret)); case DT_COMPLEX128: RETURN_STRING_AS_STATUS(Complex128Converter::Convert(obj, &state, ret)); case DT_BOOL: RETURN_STRING_AS_STATUS(BoolConverter::Convert(obj, &state, ret)); case DT_INVALID: // Only occurs for empty tensors. *ret = Tensor(requested_dtype == DT_INVALID ? DT_FLOAT : requested_dtype, state.inferred_shape); return Status::OK(); default: return errors::Unimplemented("Missing Python -> Tensor conversion for ", DataTypeString(state.inferred_dtype)); } return Status::OK(); }
616
True
1
CVE-2019-11934
False
False
False
False
AV:N/AC:L/Au:N/C:P/I:P/A:P
NETWORK
LOW
NONE
PARTIAL
PARTIAL
PARTIAL
7.5
CVSS:3.1/AV:N/AC:L/PR:N/UI:N/S:U/C:H/I:H/A:H
NETWORK
LOW
NONE
NONE
UNCHANGED
HIGH
HIGH
HIGH
9.8
CRITICAL
3.9
5.9
False
[{'url': 'https://www.facebook.com/security/advisories/cve-2019-11934', 'name': 'https://www.facebook.com/security/advisories/cve-2019-11934', 'refsource': 'CONFIRM', 'tags': ['Third Party Advisory']}, {'url': 'https://github.com/facebook/folly/commit/c321eb588909646c15aefde035fd3133ba32cdee', 'name': 'https://github.com/facebook/folly/commit/c321eb588909646c15aefde035fd3133ba32cdee', 'refsource': 'MISC', 'tags': ['Patch', 'Third Party Advisory']}]
[{'description': [{'lang': 'en', 'value': 'CWE-125'}]}]
HIGH
[{'operator': 'OR', 'children': [], 'cpe_match': [{'vulnerable': True, 'cpe23Uri': 'cpe:2.3:a:facebook:folly:*:*:*:*:*:*:*:*', 'versionEndExcluding': '2019.11.04.00', 'cpe_name': []}]}]
[{'lang': 'en', 'value': 'Improper handling of close_notify alerts can result in an out-of-bounds read in AsyncSSLSocket. This issue affects folly prior to v2019.11.04.00.'}]
2019-12-13T19:28Z
2019-12-04T17:16Z
Out-of-bounds Read
The software reads data past the end, or before the beginning, of the intended buffer.
Typically, this can allow attackers to read sensitive information from other memory locations or cause a crash. A crash can occur when the code reads a variable amount of data and assumes that a sentinel exists to stop the read operation, such as a NUL in a string. The expected sentinel might not be located in the out-of-bounds memory, causing excessive data to be read, leading to a segmentation fault or a buffer overflow. The software may modify an index or perform pointer arithmetic that references a memory location that is outside of the boundaries of the buffer. A subsequent read operation then produces undefined or unexpected results.
https://cwe.mitre.org/data/definitions/125.html
0
Kyle Nekritz
2019-10-28 15:47:35-07:00
Handle close_notify as standard writeErr in AsyncSSLSocket. Summary: Fixes CVE-2019-11934 Reviewed By: mingtaoy Differential Revision: D18020613 fbshipit-source-id: db82bb250e53f0d225f1280bd67bc74abd417836
c321eb588909646c15aefde035fd3133ba32cdee
False
facebook/folly
An open-source C++ library developed and used at Facebook.
2012-06-01 20:49:04
2022-08-27 13:38:56
https://groups.google.com/forum/?fromgroups#!forum/facebook-folly
facebook
23010.0
4804.0
folly::AsyncSSLSocket::interpretSSLError
folly::AsyncSSLSocket::interpretSSLError( int rc , int error)
['rc', 'error']
AsyncSocket::WriteResult AsyncSSLSocket::interpretSSLError(int rc, int error) { if (error == SSL_ERROR_WANT_READ) { // Even though we are attempting to write data, SSL_write() may // need to read data if renegotiation is being performed. We currently // don't support this and just fail the write. LOG(ERROR) << "AsyncSSLSocket(fd=" << fd_ << ", state=" << int(state_) << ", sslState=" << sslState_ << ", events=" << eventFlags_ << "): " << "unsupported SSL renegotiation during write"; return WriteResult( WRITE_ERROR, std::make_unique<SSLException>(SSLError::INVALID_RENEGOTIATION)); } else { if (zero_return(error, rc, errno)) { return WriteResult(0); } auto errError = ERR_get_error(); VLOG(3) << "ERROR: AsyncSSLSocket(fd=" << fd_ << ", state=" << int(state_) << ", sslState=" << sslState_ << ", events=" << eventFlags_ << "): " << "SSL error: " << error << ", errno: " << errno << ", func: " << ERR_func_error_string(errError) << ", reason: " << ERR_reason_error_string(errError); return WriteResult( WRITE_ERROR, std::make_unique<SSLException>(error, errError, rc, errno)); } }
192
True
1