url
stringlengths
63
63
repository_url
stringclasses
1 value
labels_url
stringlengths
77
77
comments_url
stringlengths
72
72
events_url
stringlengths
70
70
html_url
stringlengths
51
53
id
int64
1.57B
2.35B
node_id
stringlengths
18
19
number
int64
59.5k
69.6k
title
stringlengths
1
554
user
dict
labels
listlengths
0
8
state
stringclasses
2 values
locked
bool
2 classes
assignee
dict
assignees
listlengths
0
8
milestone
null
comments
listlengths
0
30
created_at
timestamp[s]
updated_at
timestamp[s]
closed_at
timestamp[s]
author_association
stringclasses
4 values
active_lock_reason
stringclasses
3 values
draft
bool
2 classes
pull_request
dict
body
stringlengths
1
65.4k
reactions
dict
timeline_url
stringlengths
72
72
performed_via_github_app
null
state_reason
stringclasses
3 values
is_pull_request
bool
2 classes
https://api.github.com/repos/tensorflow/tensorflow/issues/62585
https://api.github.com/repos/tensorflow/tensorflow
https://api.github.com/repos/tensorflow/tensorflow/issues/62585/labels{/name}
https://api.github.com/repos/tensorflow/tensorflow/issues/62585/comments
https://api.github.com/repos/tensorflow/tensorflow/issues/62585/events
https://github.com/tensorflow/tensorflow/issues/62585
2,030,210,354
I_kwDOArmXAs55Ao0y
62,585
TFLite MirrorPad in reflect mode yields incorrect results.
{ "login": "Pop-korn", "id": 121626389, "node_id": "U_kgDOBz_fFQ", "avatar_url": "https://avatars.githubusercontent.com/u/121626389?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Pop-korn", "html_url": "https://github.com/Pop-korn", "followers_url": "https://api.github.com/users/Pop-korn/followers", "following_url": "https://api.github.com/users/Pop-korn/following{/other_user}", "gists_url": "https://api.github.com/users/Pop-korn/gists{/gist_id}", "starred_url": "https://api.github.com/users/Pop-korn/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Pop-korn/subscriptions", "organizations_url": "https://api.github.com/users/Pop-korn/orgs", "repos_url": "https://api.github.com/users/Pop-korn/repos", "events_url": "https://api.github.com/users/Pop-korn/events{/privacy}", "received_events_url": "https://api.github.com/users/Pop-korn/received_events", "type": "User", "site_admin": false }
[ { "id": 386191887, "node_id": "MDU6TGFiZWwzODYxOTE4ODc=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/stat:awaiting%20response", "name": "stat:awaiting response", "color": "f4b400", "default": false, "description": "Status - Awaiting response from author" }, { "id": 474725938, "node_id": "MDU6TGFiZWw0NzQ3MjU5Mzg=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/stale", "name": "stale", "color": "d4c5f9", "default": false, "description": "This label marks the issue/pr stale - to be closed automatically if no activity" }, { "id": 750616506, "node_id": "MDU6TGFiZWw3NTA2MTY1MDY=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/comp:lite", "name": "comp:lite", "color": "0052cc", "default": false, "description": "TF Lite related issues" }, { "id": 1097547147, "node_id": "MDU6TGFiZWwxMDk3NTQ3MTQ3", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/comp:ops", "name": "comp:ops", "color": "0052cc", "default": false, "description": "OPs related issues" }, { "id": 1661751498, "node_id": "MDU6TGFiZWwxNjYxNzUxNDk4", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/TFLiteConverter", "name": "TFLiteConverter", "color": "bfdadc", "default": false, "description": "For issues related to TFLite converter" }, { "id": 5206407904, "node_id": "LA_kwDOArmXAs8AAAABNlN64A", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/TF%202.12", "name": "TF 2.12", "color": "c5def5", "default": false, "description": "For issues related to Tensorflow 2.12" } ]
closed
false
{ "login": "LakshmiKalaKadali", "id": 149650845, "node_id": "U_kgDOCOt9nQ", "avatar_url": "https://avatars.githubusercontent.com/u/149650845?v=4", "gravatar_id": "", "url": "https://api.github.com/users/LakshmiKalaKadali", "html_url": "https://github.com/LakshmiKalaKadali", "followers_url": "https://api.github.com/users/LakshmiKalaKadali/followers", "following_url": "https://api.github.com/users/LakshmiKalaKadali/following{/other_user}", "gists_url": "https://api.github.com/users/LakshmiKalaKadali/gists{/gist_id}", "starred_url": "https://api.github.com/users/LakshmiKalaKadali/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/LakshmiKalaKadali/subscriptions", "organizations_url": "https://api.github.com/users/LakshmiKalaKadali/orgs", "repos_url": "https://api.github.com/users/LakshmiKalaKadali/repos", "events_url": "https://api.github.com/users/LakshmiKalaKadali/events{/privacy}", "received_events_url": "https://api.github.com/users/LakshmiKalaKadali/received_events", "type": "User", "site_admin": false }
[ { "login": "LakshmiKalaKadali", "id": 149650845, "node_id": "U_kgDOCOt9nQ", "avatar_url": "https://avatars.githubusercontent.com/u/149650845?v=4", "gravatar_id": "", "url": "https://api.github.com/users/LakshmiKalaKadali", "html_url": "https://github.com/LakshmiKalaKadali", "followers_url": "https://api.github.com/users/LakshmiKalaKadali/followers", "following_url": "https://api.github.com/users/LakshmiKalaKadali/following{/other_user}", "gists_url": "https://api.github.com/users/LakshmiKalaKadali/gists{/gist_id}", "starred_url": "https://api.github.com/users/LakshmiKalaKadali/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/LakshmiKalaKadali/subscriptions", "organizations_url": "https://api.github.com/users/LakshmiKalaKadali/orgs", "repos_url": "https://api.github.com/users/LakshmiKalaKadali/repos", "events_url": "https://api.github.com/users/LakshmiKalaKadali/events{/privacy}", "received_events_url": "https://api.github.com/users/LakshmiKalaKadali/received_events", "type": "User", "site_admin": false } ]
null
[ "Hi @Pop-korn,\r\n\r\n Both paddings[D, 0] and [D, 1] must be no greater than input.dim_size(D) as per the [documentation](https://www.tensorflow.org/api_docs/cc/class/tensorflow/ops/mirror-pad). pad: [[2 2],[2 2]] gives invalid argument error. As well I tried the MirrorPad operation with Reflect mode for a few more examples. It is working as expected.Please find the [gist](https://colab.research.google.com/gist/LakshmiKalaKadali/926c177d041d9a7906d55320de778ce4/-62585.ipynb)\r\n\r\nThank You", "This issue is stale because it has been open for 7 days with no activity. It will be closed if no further activity occurs. Thank you.", "This issue was closed because it has been inactive for 7 days since being marked as stale. Please reopen if you'd like to work on this further." ]
2023-12-07T08:49:06
2024-01-02T01:48:43
2024-01-02T01:48:43
NONE
null
null
null
### System information - Windows 11 Enterprise - TensorFlow installed from source - TensorFlow version 2.12.1 - Python version 3.10.11 ### Describe the problem The TFLite MirrorPad operator in reflect mode produces incorrect results in some cases. For example in this case: input: [[0. 1.] [2. 3.]] pad: [[2 2] [2 2]] mode: 'REFLECT' output : [[0. 0. 0. 0. 0. 0.] [0. 3. 2. 3. 2. 2.] [2. 1. 0. 1. 0. 0.] [0. 3. 2. 3. 2. 2.] [2. 1. 0. 1. 0. 0.] [2. 1. 0. 1. 0. 0.]] (The output value at [0][0] is also sometimes randomly -1.3739614e-30 instead of 0.0) I believe the correct output should be: [[0. 1. 0. 1. 0. 1.] [2. 3. 2. 3. 2. 3.] [0. 1. 0. 1. 0. 1.] [2. 3. 2. 3. 2. 3.] [0. 1. 0. 1. 0. 1.] [2. 3. 2. 3. 2. 3.]] Or something similar. ### Source code / logs A simple model which demonstrates this issue can be found at https://github.com/Pop-korn/Tmp/blob/main/mirror_pad_bug.tflite
{ "url": "https://api.github.com/repos/tensorflow/tensorflow/issues/62585/reactions", "total_count": 1, "+1": 1, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/tensorflow/tensorflow/issues/62585/timeline
null
completed
false
https://api.github.com/repos/tensorflow/tensorflow/issues/62584
https://api.github.com/repos/tensorflow/tensorflow
https://api.github.com/repos/tensorflow/tensorflow/issues/62584/labels{/name}
https://api.github.com/repos/tensorflow/tensorflow/issues/62584/comments
https://api.github.com/repos/tensorflow/tensorflow/issues/62584/events
https://github.com/tensorflow/tensorflow/issues/62584
2,030,129,111
I_kwDOArmXAs55AU_X
62,584
A bug in tf.maximum
{ "login": "cheyennee", "id": 45327670, "node_id": "MDQ6VXNlcjQ1MzI3Njcw", "avatar_url": "https://avatars.githubusercontent.com/u/45327670?v=4", "gravatar_id": "", "url": "https://api.github.com/users/cheyennee", "html_url": "https://github.com/cheyennee", "followers_url": "https://api.github.com/users/cheyennee/followers", "following_url": "https://api.github.com/users/cheyennee/following{/other_user}", "gists_url": "https://api.github.com/users/cheyennee/gists{/gist_id}", "starred_url": "https://api.github.com/users/cheyennee/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/cheyennee/subscriptions", "organizations_url": "https://api.github.com/users/cheyennee/orgs", "repos_url": "https://api.github.com/users/cheyennee/repos", "events_url": "https://api.github.com/users/cheyennee/events{/privacy}", "received_events_url": "https://api.github.com/users/cheyennee/received_events", "type": "User", "site_admin": false }
[ { "id": 386191887, "node_id": "MDU6TGFiZWwzODYxOTE4ODc=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/stat:awaiting%20response", "name": "stat:awaiting response", "color": "f4b400", "default": false, "description": "Status - Awaiting response from author" }, { "id": 473172988, "node_id": "MDU6TGFiZWw0NzMxNzI5ODg=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/type:bug", "name": "type:bug", "color": "159b2e", "default": false, "description": "Bug" }, { "id": 474725938, "node_id": "MDU6TGFiZWw0NzQ3MjU5Mzg=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/stale", "name": "stale", "color": "d4c5f9", "default": false, "description": "This label marks the issue/pr stale - to be closed automatically if no activity" }, { "id": 1097547147, "node_id": "MDU6TGFiZWwxMDk3NTQ3MTQ3", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/comp:ops", "name": "comp:ops", "color": "0052cc", "default": false, "description": "OPs related issues" }, { "id": 5922361893, "node_id": "LA_kwDOArmXAs8AAAABYQASJQ", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/TF2.14", "name": "TF2.14", "color": "b60205", "default": false, "description": "For issues related to Tensorflow 2.14.x" } ]
closed
false
{ "login": "sachinprasadhs", "id": 73069040, "node_id": "MDQ6VXNlcjczMDY5MDQw", "avatar_url": "https://avatars.githubusercontent.com/u/73069040?v=4", "gravatar_id": "", "url": "https://api.github.com/users/sachinprasadhs", "html_url": "https://github.com/sachinprasadhs", "followers_url": "https://api.github.com/users/sachinprasadhs/followers", "following_url": "https://api.github.com/users/sachinprasadhs/following{/other_user}", "gists_url": "https://api.github.com/users/sachinprasadhs/gists{/gist_id}", "starred_url": "https://api.github.com/users/sachinprasadhs/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/sachinprasadhs/subscriptions", "organizations_url": "https://api.github.com/users/sachinprasadhs/orgs", "repos_url": "https://api.github.com/users/sachinprasadhs/repos", "events_url": "https://api.github.com/users/sachinprasadhs/events{/privacy}", "received_events_url": "https://api.github.com/users/sachinprasadhs/received_events", "type": "User", "site_admin": false }
[ { "login": "sachinprasadhs", "id": 73069040, "node_id": "MDQ6VXNlcjczMDY5MDQw", "avatar_url": "https://avatars.githubusercontent.com/u/73069040?v=4", "gravatar_id": "", "url": "https://api.github.com/users/sachinprasadhs", "html_url": "https://github.com/sachinprasadhs", "followers_url": "https://api.github.com/users/sachinprasadhs/followers", "following_url": "https://api.github.com/users/sachinprasadhs/following{/other_user}", "gists_url": "https://api.github.com/users/sachinprasadhs/gists{/gist_id}", "starred_url": "https://api.github.com/users/sachinprasadhs/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/sachinprasadhs/subscriptions", "organizations_url": "https://api.github.com/users/sachinprasadhs/orgs", "repos_url": "https://api.github.com/users/sachinprasadhs/repos", "events_url": "https://api.github.com/users/sachinprasadhs/events{/privacy}", "received_events_url": "https://api.github.com/users/sachinprasadhs/received_events", "type": "User", "site_admin": false } ]
null
[ "@sachinprasadhs,\r\nI was able to reproduce the issue on tensorflow v2.14, v2.15 and [tf-nightly](https://colab.research.google.com/gist/tilakrayal/710cc9dd4fe87370b15de2bc7c038996/untitled1588.ipynb). Kindly find the gist of it [here](https://colab.research.google.com/gist/tilakrayal/8a70b90740d8d2382c90ac81f3b0f9dc/untitled1587.ipynb).", "Hi, \r\n\r\nFor the first instance, the casting is performed by default internally which casts both the inputs based on the first input `dtype`, which is `float`.\r\nIn second case, this can nlt be performed since `epsilon` value can not be converted to `int`. \r\nYou need to perform the explicit `tf.cast` to a desired `dtype` to continue with the operation. ", "This issue is stale because it has been open for 7 days with no activity. It will be closed if no further activity occurs. Thank you.", "This issue was closed because it has been inactive for 7 days since being marked as stale. Please reopen if you'd like to work on this further.", "Are you satisfied with the resolution of your issue?\n<a href=\"https://docs.google.com/forms/d/e/1FAIpQLSfaP12TRhd9xSxjXZjcZFNXPGk4kc1-qMdv3gc6bEP90vY1ew/viewform?entry.85265664=Yes&entry.2137816233=https://github.com/tensorflow/tensorflow/issues/62584\">Yes</a>\n<a href=\"https://docs.google.com/forms/d/e/1FAIpQLSfaP12TRhd9xSxjXZjcZFNXPGk4kc1-qMdv3gc6bEP90vY1ew/viewform?entry.85265664=No&entry.2137816233=https://github.com/tensorflow/tensorflow/issues/62584\">No</a>\n" ]
2023-12-07T08:05:05
2024-01-11T01:49:35
2024-01-11T01:49:31
NONE
null
null
null
### Issue type Bug ### Have you reproduced the bug with TensorFlow Nightly? Yes ### Source source ### TensorFlow version tf 2.14.0 ### Custom code Yes ### OS platform and distribution windows colab ### Mobile device _No response_ ### Python version 3.10 ### Bazel version _No response_ ### GCC/compiler version _No response_ ### CUDA/cuDNN version _No response_ ### GPU model and memory _No response_ ### Current behavior? According to [tf.maximum doc](https://www.tensorflow.org/api_docs/python/tf/math/maximum), the two inputs of `tf.maximum` must have the same type. In following code, when first input is float type, second input is int type, `tf.maximum` outputs a tensor. While when first input is int type, second input is float type, `tf.maximum` throws error. IMO, If `tf.maximum(epsilon, x)` is working I think `tf.maximum(x, epsilon)` should also work and generate same output. Actually, I have a model crash when training due to this small difference. ### Standalone code to reproduce the issue ```shell import tensorflow as tf x=5 epsilon = 1e-12 print(tf.maximum(epsilon, x)) print(tf.maximum(x, epsilon)) ``` ### Relevant log output ```shell tf.Tensor(5.0, shape=(), dtype=float32) --------------------------------------------------------------------------- TypeError Traceback (most recent call last) <ipython-input-4-095832aa68e6> in <cell line: 5>() 3 epsilon = 1e-12 4 print(tf.maximum(epsilon, x)) ----> 5 print(tf.maximum(x, epsilon)) 6 7 #tf.Tensor(5.0, shape=(), dtype=float32) /usr/local/lib/python3.10/dist-packages/tensorflow/python/ops/gen_math_ops.py in maximum(x, y, name) 6339 if tld.is_eager: 6340 try: -> 6341 _result = pywrap_tfe.TFE_Py_FastPathExecute( 6342 _ctx, "Maximum", name, x, y) 6343 return _result TypeError: Cannot convert 1e-12 to EagerTensor of dtype int32 ```
{ "url": "https://api.github.com/repos/tensorflow/tensorflow/issues/62584/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/tensorflow/tensorflow/issues/62584/timeline
null
completed
false
https://api.github.com/repos/tensorflow/tensorflow/issues/62581
https://api.github.com/repos/tensorflow/tensorflow
https://api.github.com/repos/tensorflow/tensorflow/issues/62581/labels{/name}
https://api.github.com/repos/tensorflow/tensorflow/issues/62581/comments
https://api.github.com/repos/tensorflow/tensorflow/issues/62581/events
https://github.com/tensorflow/tensorflow/issues/62581
2,029,625,248
I_kwDOArmXAs54-Z-g
62,581
tflite_runtime 2.14 x86_64 wheels distributed on pypi are not pep600 compliant
{ "login": "mshawcroft", "id": 6357566, "node_id": "MDQ6VXNlcjYzNTc1NjY=", "avatar_url": "https://avatars.githubusercontent.com/u/6357566?v=4", "gravatar_id": "", "url": "https://api.github.com/users/mshawcroft", "html_url": "https://github.com/mshawcroft", "followers_url": "https://api.github.com/users/mshawcroft/followers", "following_url": "https://api.github.com/users/mshawcroft/following{/other_user}", "gists_url": "https://api.github.com/users/mshawcroft/gists{/gist_id}", "starred_url": "https://api.github.com/users/mshawcroft/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/mshawcroft/subscriptions", "organizations_url": "https://api.github.com/users/mshawcroft/orgs", "repos_url": "https://api.github.com/users/mshawcroft/repos", "events_url": "https://api.github.com/users/mshawcroft/events{/privacy}", "received_events_url": "https://api.github.com/users/mshawcroft/received_events", "type": "User", "site_admin": false }
[ { "id": 404586594, "node_id": "MDU6TGFiZWw0MDQ1ODY1OTQ=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/stat:awaiting%20tensorflower", "name": "stat:awaiting tensorflower", "color": "f4b400", "default": false, "description": "Status - Awaiting response from tensorflower" }, { "id": 473172988, "node_id": "MDU6TGFiZWw0NzMxNzI5ODg=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/type:bug", "name": "type:bug", "color": "159b2e", "default": false, "description": "Bug" }, { "id": 473173351, "node_id": "MDU6TGFiZWw0NzMxNzMzNTE=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/type:build/install", "name": "type:build/install", "color": "159b2e", "default": false, "description": "Build and install issues" }, { "id": 750616506, "node_id": "MDU6TGFiZWw3NTA2MTY1MDY=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/comp:lite", "name": "comp:lite", "color": "0052cc", "default": false, "description": "TF Lite related issues" }, { "id": 5922361893, "node_id": "LA_kwDOArmXAs8AAAABYQASJQ", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/TF2.14", "name": "TF2.14", "color": "b60205", "default": false, "description": "For issues related to Tensorflow 2.14.x" } ]
open
false
{ "login": "terryheo", "id": 2908505, "node_id": "MDQ6VXNlcjI5MDg1MDU=", "avatar_url": "https://avatars.githubusercontent.com/u/2908505?v=4", "gravatar_id": "", "url": "https://api.github.com/users/terryheo", "html_url": "https://github.com/terryheo", "followers_url": "https://api.github.com/users/terryheo/followers", "following_url": "https://api.github.com/users/terryheo/following{/other_user}", "gists_url": "https://api.github.com/users/terryheo/gists{/gist_id}", "starred_url": "https://api.github.com/users/terryheo/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/terryheo/subscriptions", "organizations_url": "https://api.github.com/users/terryheo/orgs", "repos_url": "https://api.github.com/users/terryheo/repos", "events_url": "https://api.github.com/users/terryheo/events{/privacy}", "received_events_url": "https://api.github.com/users/terryheo/received_events", "type": "User", "site_admin": false }
[ { "login": "terryheo", "id": 2908505, "node_id": "MDQ6VXNlcjI5MDg1MDU=", "avatar_url": "https://avatars.githubusercontent.com/u/2908505?v=4", "gravatar_id": "", "url": "https://api.github.com/users/terryheo", "html_url": "https://github.com/terryheo", "followers_url": "https://api.github.com/users/terryheo/followers", "following_url": "https://api.github.com/users/terryheo/following{/other_user}", "gists_url": "https://api.github.com/users/terryheo/gists{/gist_id}", "starred_url": "https://api.github.com/users/terryheo/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/terryheo/subscriptions", "organizations_url": "https://api.github.com/users/terryheo/orgs", "repos_url": "https://api.github.com/users/terryheo/repos", "events_url": "https://api.github.com/users/terryheo/events{/privacy}", "received_events_url": "https://api.github.com/users/terryheo/received_events", "type": "User", "site_admin": false }, { "login": "pkgoogle", "id": 132095473, "node_id": "U_kgDOB9-d8Q", "avatar_url": "https://avatars.githubusercontent.com/u/132095473?v=4", "gravatar_id": "", "url": "https://api.github.com/users/pkgoogle", "html_url": "https://github.com/pkgoogle", "followers_url": "https://api.github.com/users/pkgoogle/followers", "following_url": "https://api.github.com/users/pkgoogle/following{/other_user}", "gists_url": "https://api.github.com/users/pkgoogle/gists{/gist_id}", "starred_url": "https://api.github.com/users/pkgoogle/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/pkgoogle/subscriptions", "organizations_url": "https://api.github.com/users/pkgoogle/orgs", "repos_url": "https://api.github.com/users/pkgoogle/repos", "events_url": "https://api.github.com/users/pkgoogle/events{/privacy}", "received_events_url": "https://api.github.com/users/pkgoogle/received_events", "type": "User", "site_admin": false } ]
null
[ "Hi @terryheo, can you do the same as what you did with https://github.com/tensorflow/tensorflow/issues/60738 effectively? Thanks." ]
2023-12-07T00:27:34
2023-12-14T22:55:46
null
NONE
null
null
null
### Issue type Bug ### Have you reproduced the bug with TensorFlow Nightly? No ### Source binary ### TensorFlow version 2.14 ### Custom code No ### OS platform and distribution linux ### Mobile device _No response_ ### Python version 3.8, 3.9, 3.10, 3.11 ### Bazel version _No response_ ### GCC/compiler version _No response_ ### CUDA/cuDNN version _No response_ ### GPU model and memory _No response_ ### Current behavior? The pre-built x86_64 wheels for tflite_runtime 2.14 distributed on pypi are not pep600 compliant. Here is the prefix of the output from auditwheel on one of the 2.14 x86_64 wheels: `$ auditwheel show tflite_runtime-2.14.0-cp311-cp311-manylinux2014_x86_64.whl tflite_runtime-2.14.0-cp311-cp311-manylinux2014_x86_64.whl is consistent with the following platform tag: "manylinux_2_31_x86_64". ` In this case the filename uses the platform tag "manylinux2014_x86_64", PEP600 https://peps.python.org/pep-0600 defines that: manylinux2014_x86_64 is now an alias for manylinux_2_17_x86_64 The wheel being distributed depends on glibc-2.31 hence is not compatible with the tag manylinux2014_x86_64. The solution to this is either to rename the offending wheels with the correct platform tag as indicated by auditwheel, or rebuild the offending wheels in an environment compatible with manylinux2014_x86_64 (for example the docker environment provided by the pypa manylinux project). This issue is related to the issue previously reported for aarch64 prebuilt wheels https://github.com/tensorflow/tensorflow/issues/60738 ### Standalone code to reproduce the issue ```shell Here is the prefix of the output from auditwheel on one of the 2.14 x86_64 wheels: `$ auditwheel show tflite_runtime-2.14.0-cp311-cp311-manylinux2014_x86_64.whl tflite_runtime-2.14.0-cp311-cp311-manylinux2014_x86_64.whl is consistent with the following platform tag: "manylinux_2_31_x86_64". ` ``` ### Relevant log output _No response_
{ "url": "https://api.github.com/repos/tensorflow/tensorflow/issues/62581/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/tensorflow/tensorflow/issues/62581/timeline
null
null
false
https://api.github.com/repos/tensorflow/tensorflow/issues/62580
https://api.github.com/repos/tensorflow/tensorflow
https://api.github.com/repos/tensorflow/tensorflow/issues/62580/labels{/name}
https://api.github.com/repos/tensorflow/tensorflow/issues/62580/comments
https://api.github.com/repos/tensorflow/tensorflow/issues/62580/events
https://github.com/tensorflow/tensorflow/issues/62580
2,028,931,535
I_kwDOArmXAs547wnP
62,580
No module named 'tensorflow.compat' in 2.10 version tensoflow
{ "login": "ziyuuuuuu", "id": 124778258, "node_id": "U_kgDOB2_3Eg", "avatar_url": "https://avatars.githubusercontent.com/u/124778258?v=4", "gravatar_id": "", "url": "https://api.github.com/users/ziyuuuuuu", "html_url": "https://github.com/ziyuuuuuu", "followers_url": "https://api.github.com/users/ziyuuuuuu/followers", "following_url": "https://api.github.com/users/ziyuuuuuu/following{/other_user}", "gists_url": "https://api.github.com/users/ziyuuuuuu/gists{/gist_id}", "starred_url": "https://api.github.com/users/ziyuuuuuu/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/ziyuuuuuu/subscriptions", "organizations_url": "https://api.github.com/users/ziyuuuuuu/orgs", "repos_url": "https://api.github.com/users/ziyuuuuuu/repos", "events_url": "https://api.github.com/users/ziyuuuuuu/events{/privacy}", "received_events_url": "https://api.github.com/users/ziyuuuuuu/received_events", "type": "User", "site_admin": false }
[ { "id": 386191887, "node_id": "MDU6TGFiZWwzODYxOTE4ODc=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/stat:awaiting%20response", "name": "stat:awaiting response", "color": "f4b400", "default": false, "description": "Status - Awaiting response from author" }, { "id": 473172988, "node_id": "MDU6TGFiZWw0NzMxNzI5ODg=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/type:bug", "name": "type:bug", "color": "159b2e", "default": false, "description": "Bug" }, { "id": 474725938, "node_id": "MDU6TGFiZWw0NzQ3MjU5Mzg=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/stale", "name": "stale", "color": "d4c5f9", "default": false, "description": "This label marks the issue/pr stale - to be closed automatically if no activity" }, { "id": 1097545817, "node_id": "MDU6TGFiZWwxMDk3NTQ1ODE3", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/comp:apis", "name": "comp:apis", "color": "0052cc", "default": false, "description": "Highlevel API related issues" }, { "id": 6218999181, "node_id": "LA_kwDOArmXAs8AAAABcq5ljQ", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/TF%202.15", "name": "TF 2.15", "color": "9162CB", "default": false, "description": "For issues related to 2.15.x" } ]
closed
false
{ "login": "sushreebarsa", "id": 84765720, "node_id": "MDQ6VXNlcjg0NzY1NzIw", "avatar_url": "https://avatars.githubusercontent.com/u/84765720?v=4", "gravatar_id": "", "url": "https://api.github.com/users/sushreebarsa", "html_url": "https://github.com/sushreebarsa", "followers_url": "https://api.github.com/users/sushreebarsa/followers", "following_url": "https://api.github.com/users/sushreebarsa/following{/other_user}", "gists_url": "https://api.github.com/users/sushreebarsa/gists{/gist_id}", "starred_url": "https://api.github.com/users/sushreebarsa/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/sushreebarsa/subscriptions", "organizations_url": "https://api.github.com/users/sushreebarsa/orgs", "repos_url": "https://api.github.com/users/sushreebarsa/repos", "events_url": "https://api.github.com/users/sushreebarsa/events{/privacy}", "received_events_url": "https://api.github.com/users/sushreebarsa/received_events", "type": "User", "site_admin": false }
[ { "login": "sushreebarsa", "id": 84765720, "node_id": "MDQ6VXNlcjg0NzY1NzIw", "avatar_url": "https://avatars.githubusercontent.com/u/84765720?v=4", "gravatar_id": "", "url": "https://api.github.com/users/sushreebarsa", "html_url": "https://github.com/sushreebarsa", "followers_url": "https://api.github.com/users/sushreebarsa/followers", "following_url": "https://api.github.com/users/sushreebarsa/following{/other_user}", "gists_url": "https://api.github.com/users/sushreebarsa/gists{/gist_id}", "starred_url": "https://api.github.com/users/sushreebarsa/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/sushreebarsa/subscriptions", "organizations_url": "https://api.github.com/users/sushreebarsa/orgs", "repos_url": "https://api.github.com/users/sushreebarsa/repos", "events_url": "https://api.github.com/users/sushreebarsa/events{/privacy}", "received_events_url": "https://api.github.com/users/sushreebarsa/received_events", "type": "User", "site_admin": false } ]
null
[ "@ziyuuuuuu I was able to run the code provided successfully using TF version 2.15 on colab, could you please have a look at this [gist](https://colab.research.google.com/gist/sushreebarsa/6d84a0c3e9fbce0f04266ff99bacfc38/62580.ipynb)?\r\nThank you!", "> @ziyuuuuuu I was able to run the code provided successfully using TF version 2.15 on colab, could you please have a look at this [gist](https://colab.research.google.com/gist/sushreebarsa/6d84a0c3e9fbce0f04266ff99bacfc38/62580.ipynb)? Thank you!\r\n\r\nHi, i have checked it and rerun the code at anaconda visual studio. The porblem is still there, it might be an enviroment issue, do you know how should i do here, have you tried with Anaconda?\r\n![image](https://github.com/tensorflow/tensorflow/assets/124778258/531d2f0e-5f55-4640-8ea3-8cb95ac640e6)\r\n", "@ziyuuuuuu Thank you for your reply here.\r\nCould you try to create a dedicated virtual environment for your project. This isolates the specific versions of libraries used and avoids conflicts with other installed libraries.\r\nPlease upgrade to the latest TF version and let us know the outcome?\r\n\r\nThank you!", "This issue is stale because it has been open for 7 days with no activity. It will be closed if no further activity occurs. Thank you.", "This issue was closed because it has been inactive for 7 days since being marked as stale. Please reopen if you'd like to work on this further.", "Are you satisfied with the resolution of your issue?\n<a href=\"https://docs.google.com/forms/d/e/1FAIpQLSfaP12TRhd9xSxjXZjcZFNXPGk4kc1-qMdv3gc6bEP90vY1ew/viewform?entry.85265664=Yes&entry.2137816233=https://github.com/tensorflow/tensorflow/issues/62580\">Yes</a>\n<a href=\"https://docs.google.com/forms/d/e/1FAIpQLSfaP12TRhd9xSxjXZjcZFNXPGk4kc1-qMdv3gc6bEP90vY1ew/viewform?entry.85265664=No&entry.2137816233=https://github.com/tensorflow/tensorflow/issues/62580\">No</a>\n" ]
2023-12-06T16:23:50
2023-12-24T01:49:10
2023-12-24T01:49:07
NONE
null
null
null
### Issue type Bug ### Have you reproduced the bug with TensorFlow Nightly? No ### Source source ### TensorFlow version 2.10 ### Custom code Yes ### OS platform and distribution anaconda ### Mobile device _No response_ ### Python version 3.7 ### Bazel version _No response_ ### GCC/compiler version _No response_ ### CUDA/cuDNN version _No response_ ### GPU model and memory _No response_ ### Current behavior? Hi everyone I was planned to train a ANN model with the help of Tensoflow and Skilearn. Since i have already made it before so everything went well, but then since it is required to us Skikeras so i installed this liberary then my tensorflow started to have the error: --------------------------------------------------------------------------- ModuleNotFoundError Traceback (most recent call last) [~\AppData\Local\Temp\ipykernel_24108\3624083267.py](https://untitled+.vscode-resource.vscode-cdn.net/~/AppData/Local/Temp/ipykernel_24108/3624083267.py) in <module> 1 np.random.seed(42) ----> 2 from keras.models import Sequential 3 from keras.layers import Dense 4 from tensorflow.keras.optimizers import Adam, RMSprop 5 from scikeras.wrappers import KerasRegressor [c:\Users\liziyu\Anaconda3\envs\tf\lib\site-packages\keras\__init__.py](file:///C:/Users/liziyu/Anaconda3/envs/tf/lib/site-packages/keras/__init__.py) in <module> 19 """ 20 from keras import distribute ---> 21 from keras import models 22 from keras.engine.input_layer import Input 23 from keras.engine.sequential import Sequential [c:\Users\liziyu\Anaconda3\envs\tf\lib\site-packages\keras\models\__init__.py](file:///C:/Users/liziyu/Anaconda3/envs/tf/lib/site-packages/keras/models/__init__.py) in <module> 16 17 ---> 18 from keras.engine.functional import Functional 19 from keras.engine.sequential import Sequential 20 from keras.engine.training import Model [c:\Users\liziyu\Anaconda3\envs\tf\lib\site-packages\keras\engine\functional.py](file:///C:/Users/liziyu/Anaconda3/envs/tf/lib/site-packages/keras/engine/functional.py) in <module> 22 import warnings ... ---> 24 import tensorflow.compat.v2 as tf 25 26 from keras import backend ModuleNotFoundError: No module named 'tensorflow.compat' I have already tried to uninstall and reinstall the tensorflow several times but this bug report is still there.. here is a screen shot from conda prompt, and i also tried the way that mentioned at another report but it is not working for me, do y'all have any idea what happened here and what should i do? ![image](https://github.com/tensorflow/tensorflow/assets/124778258/4776b11d-50d9-4153-b602-7897ca44ee8c) ### Standalone code to reproduce the issue ```shell np.random.seed(42) from keras.models import Sequential from keras.layers import Dense from tensorflow.keras.optimizers import Adam, RMSprop from scikeras.wrappers import KerasRegressor from sklearn.model_selection import GridSearchCV from sklearn.model_selection import KFold ``` ### Relevant log output _No response_
{ "url": "https://api.github.com/repos/tensorflow/tensorflow/issues/62580/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/tensorflow/tensorflow/issues/62580/timeline
null
completed
false
https://api.github.com/repos/tensorflow/tensorflow/issues/62579
https://api.github.com/repos/tensorflow/tensorflow
https://api.github.com/repos/tensorflow/tensorflow/issues/62579/labels{/name}
https://api.github.com/repos/tensorflow/tensorflow/issues/62579/comments
https://api.github.com/repos/tensorflow/tensorflow/issues/62579/events
https://github.com/tensorflow/tensorflow/issues/62579
2,028,923,710
I_kwDOArmXAs547us-
62,579
Missing symbols in official Windows TensorFlow native builds
{ "login": "karllessard", "id": 10109534, "node_id": "MDQ6VXNlcjEwMTA5NTM0", "avatar_url": "https://avatars.githubusercontent.com/u/10109534?v=4", "gravatar_id": "", "url": "https://api.github.com/users/karllessard", "html_url": "https://github.com/karllessard", "followers_url": "https://api.github.com/users/karllessard/followers", "following_url": "https://api.github.com/users/karllessard/following{/other_user}", "gists_url": "https://api.github.com/users/karllessard/gists{/gist_id}", "starred_url": "https://api.github.com/users/karllessard/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/karllessard/subscriptions", "organizations_url": "https://api.github.com/users/karllessard/orgs", "repos_url": "https://api.github.com/users/karllessard/repos", "events_url": "https://api.github.com/users/karllessard/events{/privacy}", "received_events_url": "https://api.github.com/users/karllessard/received_events", "type": "User", "site_admin": false }
[ { "id": 404586594, "node_id": "MDU6TGFiZWw0MDQ1ODY1OTQ=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/stat:awaiting%20tensorflower", "name": "stat:awaiting tensorflower", "color": "f4b400", "default": false, "description": "Status - Awaiting response from tensorflower" }, { "id": 473173351, "node_id": "MDU6TGFiZWw0NzMxNzMzNTE=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/type:build/install", "name": "type:build/install", "color": "159b2e", "default": false, "description": "Build and install issues" }, { "id": 1188421838, "node_id": "MDU6TGFiZWwxMTg4NDIxODM4", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/subtype:windows", "name": "subtype:windows", "color": "b619ea", "default": false, "description": "Windows Build/Installation Issues" }, { "id": 1478826728, "node_id": "MDU6TGFiZWwxNDc4ODI2NzI4", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/comp:core", "name": "comp:core", "color": "024391", "default": false, "description": "issues related to core part of tensorflow" }, { "id": 5922361893, "node_id": "LA_kwDOArmXAs8AAAABYQASJQ", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/TF2.14", "name": "TF2.14", "color": "b60205", "default": false, "description": "For issues related to Tensorflow 2.14.x" } ]
open
false
{ "login": "sachinprasadhs", "id": 73069040, "node_id": "MDQ6VXNlcjczMDY5MDQw", "avatar_url": "https://avatars.githubusercontent.com/u/73069040?v=4", "gravatar_id": "", "url": "https://api.github.com/users/sachinprasadhs", "html_url": "https://github.com/sachinprasadhs", "followers_url": "https://api.github.com/users/sachinprasadhs/followers", "following_url": "https://api.github.com/users/sachinprasadhs/following{/other_user}", "gists_url": "https://api.github.com/users/sachinprasadhs/gists{/gist_id}", "starred_url": "https://api.github.com/users/sachinprasadhs/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/sachinprasadhs/subscriptions", "organizations_url": "https://api.github.com/users/sachinprasadhs/orgs", "repos_url": "https://api.github.com/users/sachinprasadhs/repos", "events_url": "https://api.github.com/users/sachinprasadhs/events{/privacy}", "received_events_url": "https://api.github.com/users/sachinprasadhs/received_events", "type": "User", "site_admin": false }
[ { "login": "sachinprasadhs", "id": 73069040, "node_id": "MDQ6VXNlcjczMDY5MDQw", "avatar_url": "https://avatars.githubusercontent.com/u/73069040?v=4", "gravatar_id": "", "url": "https://api.github.com/users/sachinprasadhs", "html_url": "https://github.com/sachinprasadhs", "followers_url": "https://api.github.com/users/sachinprasadhs/followers", "following_url": "https://api.github.com/users/sachinprasadhs/following{/other_user}", "gists_url": "https://api.github.com/users/sachinprasadhs/gists{/gist_id}", "starred_url": "https://api.github.com/users/sachinprasadhs/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/sachinprasadhs/subscriptions", "organizations_url": "https://api.github.com/users/sachinprasadhs/orgs", "repos_url": "https://api.github.com/users/sachinprasadhs/repos", "events_url": "https://api.github.com/users/sachinprasadhs/events{/privacy}", "received_events_url": "https://api.github.com/users/sachinprasadhs/received_events", "type": "User", "site_admin": false } ]
null
[ "It seems like Intel has fixed something related that will land in 2.16: https://github.com/tensorflow/tensorflow/issues/61830\r\n\r\nDoes that help?", "Thanks @angerson , the ticket seems to talk about the missing include folders (which we've also noticed but found a workaround for), though it does not mention anything about the non-exported symbols in the binaries. Is there a nightly build of 2.16 of the C API for Windows I can quickly test on?", "Is there any Windows build available in the [libtensorflow-nightly](https://storage.googleapis.com/libtensorflow-nightly) bucket?", "> Is there any Windows build available in the [libtensorflow-nightly](https://storage.googleapis.com/libtensorflow-nightly) bucket?\r\n\r\nThe latest nightly libtensorflow archive for Windows can be found [here](https://storage.googleapis.com/libtensorflow-nightly/prod/tensorflow/release/windows/latest/cpu/windows_cpu_libtensorflow_binaries.tar.gz). ", "Thanks @nitins17 , we just checked and while the nightly builds do have some of the TSL headers, the `TSL_` symbols are still missing from the binaries. cc\\ @angerson ", "Any updates about this/any PR coming soon? Thanks!", "Hi @karllessard, PR is in progress, will have it merged to master within a couple of days.", "Adding a reference to this comment here as well, problem is still not fixed after merging #62874: https://github.com/tensorflow/tensorflow/pull/62874#issuecomment-1926007848", "Can you provide a short reproducible code snippet for this?", "I can work on a something yes, though I don't have myself a Windows machine and it may take me some time. Meanwhile, you can look at our CI/CD where our own build (TF Java) is failing: https://github.com/tensorflow/java/actions/runs/7777626622/job/21206146416#step:6:1655 (compiler command is [here](https://github.com/tensorflow/java/actions/runs/7777626622/job/21206146416#step:6:1593)).\r\n\r\nAll other platforms (linux, macos) succeed to link successfully to the C lib, including to TSL symbols.", "Just to highlight here that it would be great to have this fix ready for the 2.16 cut, so that TF Java can continue to support Windows properly in its forthcoming next release." ]
2023-12-06T16:19:21
2024-02-12T14:21:13
null
CONTRIBUTOR
null
null
null
Hi, we (SIG-JVM) are trying to leverage the Windows C Library builds available under https://storage.googleapis.com/tensorflow/libtensorflow/ which, I think, are built by Google. Starting from TF 2.14.x, these builds are broken because they do not export the TSL symbols required by the TensorFlow C API. Here is the list of missing symbols that we (TF Java) do depend on: ``` jnitensorflow.obj : error LNK2001: unresolved external symbol TSL_DeleteStatus jnitensorflow.obj : error LNK2001: unresolved external symbol TSL_SetStatus jnitensorflow.obj : error LNK2001: unresolved external symbol TSL_SetStatusFromIOError jnitensorflow.obj : error LNK2001: unresolved external symbol TSL_Status jnitensorflow.obj : error LNK2001: unresolved external symbol TSL_Message jnitensorflow.obj : error LNK2001: unresolved external symbol TSL_ForEachPayload jnitensorflow.obj : error LNK2001: unresolved external symbol TSL_NewStatus jnitensorflow.obj : error LNK2001: unresolved external symbol TSL_GetCode jnitensorflow.obj : error LNK2001: unresolved external symbol TSL_SetPayload ``` Can someone look at this please? (modifié)
{ "url": "https://api.github.com/repos/tensorflow/tensorflow/issues/62579/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/tensorflow/tensorflow/issues/62579/timeline
null
null
false
https://api.github.com/repos/tensorflow/tensorflow/issues/62578
https://api.github.com/repos/tensorflow/tensorflow
https://api.github.com/repos/tensorflow/tensorflow/issues/62578/labels{/name}
https://api.github.com/repos/tensorflow/tensorflow/issues/62578/comments
https://api.github.com/repos/tensorflow/tensorflow/issues/62578/events
https://github.com/tensorflow/tensorflow/pull/62578
2,028,793,270
PR_kwDOArmXAs5hUU2b
62,578
A workaround for a string C++11 ABI issue on Centos7
{ "login": "ekuznetsov139", "id": 12205429, "node_id": "MDQ6VXNlcjEyMjA1NDI5", "avatar_url": "https://avatars.githubusercontent.com/u/12205429?v=4", "gravatar_id": "", "url": "https://api.github.com/users/ekuznetsov139", "html_url": "https://github.com/ekuznetsov139", "followers_url": "https://api.github.com/users/ekuznetsov139/followers", "following_url": "https://api.github.com/users/ekuznetsov139/following{/other_user}", "gists_url": "https://api.github.com/users/ekuznetsov139/gists{/gist_id}", "starred_url": "https://api.github.com/users/ekuznetsov139/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/ekuznetsov139/subscriptions", "organizations_url": "https://api.github.com/users/ekuznetsov139/orgs", "repos_url": "https://api.github.com/users/ekuznetsov139/repos", "events_url": "https://api.github.com/users/ekuznetsov139/events{/privacy}", "received_events_url": "https://api.github.com/users/ekuznetsov139/received_events", "type": "User", "site_admin": false }
[ { "id": 1169365494, "node_id": "MDU6TGFiZWwxMTY5MzY1NDk0", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/size:M", "name": "size:M", "color": "adafea", "default": false, "description": "CL Change Size: Medium" } ]
closed
false
{ "login": "gbaned", "id": 48215717, "node_id": "MDQ6VXNlcjQ4MjE1NzE3", "avatar_url": "https://avatars.githubusercontent.com/u/48215717?v=4", "gravatar_id": "", "url": "https://api.github.com/users/gbaned", "html_url": "https://github.com/gbaned", "followers_url": "https://api.github.com/users/gbaned/followers", "following_url": "https://api.github.com/users/gbaned/following{/other_user}", "gists_url": "https://api.github.com/users/gbaned/gists{/gist_id}", "starred_url": "https://api.github.com/users/gbaned/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/gbaned/subscriptions", "organizations_url": "https://api.github.com/users/gbaned/orgs", "repos_url": "https://api.github.com/users/gbaned/repos", "events_url": "https://api.github.com/users/gbaned/events{/privacy}", "received_events_url": "https://api.github.com/users/gbaned/received_events", "type": "User", "site_admin": false }
[ { "login": "gbaned", "id": 48215717, "node_id": "MDQ6VXNlcjQ4MjE1NzE3", "avatar_url": "https://avatars.githubusercontent.com/u/48215717?v=4", "gravatar_id": "", "url": "https://api.github.com/users/gbaned", "html_url": "https://github.com/gbaned", "followers_url": "https://api.github.com/users/gbaned/followers", "following_url": "https://api.github.com/users/gbaned/following{/other_user}", "gists_url": "https://api.github.com/users/gbaned/gists{/gist_id}", "starred_url": "https://api.github.com/users/gbaned/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/gbaned/subscriptions", "organizations_url": "https://api.github.com/users/gbaned/orgs", "repos_url": "https://api.github.com/users/gbaned/repos", "events_url": "https://api.github.com/users/gbaned/events{/privacy}", "received_events_url": "https://api.github.com/users/gbaned/received_events", "type": "User", "site_admin": false } ]
null
[ "Thank you for the explanation and for sending in this PR. Unfortunately, we cannot accept it because TensorFlow has official support only for Ubuntu and Clang. In addition, we are in process of updating our Protobuf dependency so that patch file might go away in the near future. We recommend including this fix in your copy of TensorFlow repo. " ]
2023-12-06T15:20:18
2023-12-09T01:18:52
2023-12-09T01:16:24
CONTRIBUTOR
null
false
{ "url": "https://api.github.com/repos/tensorflow/tensorflow/pulls/62578", "html_url": "https://github.com/tensorflow/tensorflow/pull/62578", "diff_url": "https://github.com/tensorflow/tensorflow/pull/62578.diff", "patch_url": "https://github.com/tensorflow/tensorflow/pull/62578.patch", "merged_at": null }
I've discovered that a large number of TF unit tests fail when built on CentOS 7. Investigation revealed the following. * CentOS 7 (or rather the gcc it comes with) uses the "old" C++11 string ABI. * The significant difference between ABIs is that, in the "old" ABI, std::string with internal representation containing all zeros is illegal, but, in the "new" ABI, it is just an empty string: https://godbolt.org/z/dYWK1Mh8P * Many TF actions involving protobuf use an internal protobuf object ("fixed_address_empty_string") which is supposed to represent a global empty string. This object (or, more precisely, the std::string it encapsulates) is never constructed and contains all zeros. (Protobuf makes an effort to prevent it from being automatically constructed at initialization time.) Protobuf's authors may have expected that an explicit constructor method would be executed by the client (in our case, TF, some time early during initialization - I'm not sure where that would go in TF source), but, since, with the new ABI, everything works fine as-is, this omission was never noticed. * As soon as someone tries to access that object, there's a segfault. This patch adds explicit construction of that object on first access, as a workaround.
{ "url": "https://api.github.com/repos/tensorflow/tensorflow/issues/62578/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/tensorflow/tensorflow/issues/62578/timeline
null
null
true
https://api.github.com/repos/tensorflow/tensorflow/issues/62577
https://api.github.com/repos/tensorflow/tensorflow
https://api.github.com/repos/tensorflow/tensorflow/issues/62577/labels{/name}
https://api.github.com/repos/tensorflow/tensorflow/issues/62577/comments
https://api.github.com/repos/tensorflow/tensorflow/issues/62577/events
https://github.com/tensorflow/tensorflow/issues/62577
2,028,747,246
I_kwDOArmXAs547Dnu
62,577
Seeded LecunNormal initializer produces the same random values across multiple calls
{ "login": "ItayXD", "id": 1885454, "node_id": "MDQ6VXNlcjE4ODU0NTQ=", "avatar_url": "https://avatars.githubusercontent.com/u/1885454?v=4", "gravatar_id": "", "url": "https://api.github.com/users/ItayXD", "html_url": "https://github.com/ItayXD", "followers_url": "https://api.github.com/users/ItayXD/followers", "following_url": "https://api.github.com/users/ItayXD/following{/other_user}", "gists_url": "https://api.github.com/users/ItayXD/gists{/gist_id}", "starred_url": "https://api.github.com/users/ItayXD/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/ItayXD/subscriptions", "organizations_url": "https://api.github.com/users/ItayXD/orgs", "repos_url": "https://api.github.com/users/ItayXD/repos", "events_url": "https://api.github.com/users/ItayXD/events{/privacy}", "received_events_url": "https://api.github.com/users/ItayXD/received_events", "type": "User", "site_admin": false }
[ { "id": 404586594, "node_id": "MDU6TGFiZWw0MDQ1ODY1OTQ=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/stat:awaiting%20tensorflower", "name": "stat:awaiting tensorflower", "color": "f4b400", "default": false, "description": "Status - Awaiting response from tensorflower" }, { "id": 473172988, "node_id": "MDU6TGFiZWw0NzMxNzI5ODg=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/type:bug", "name": "type:bug", "color": "159b2e", "default": false, "description": "Bug" }, { "id": 1097546578, "node_id": "MDU6TGFiZWwxMDk3NTQ2NTc4", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/comp:keras", "name": "comp:keras", "color": "0052cc", "default": false, "description": "Keras related issues" }, { "id": 5922361893, "node_id": "LA_kwDOArmXAs8AAAABYQASJQ", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/TF2.14", "name": "TF2.14", "color": "b60205", "default": false, "description": "For issues related to Tensorflow 2.14.x" } ]
open
false
{ "login": "tilakrayal", "id": 81610181, "node_id": "MDQ6VXNlcjgxNjEwMTgx", "avatar_url": "https://avatars.githubusercontent.com/u/81610181?v=4", "gravatar_id": "", "url": "https://api.github.com/users/tilakrayal", "html_url": "https://github.com/tilakrayal", "followers_url": "https://api.github.com/users/tilakrayal/followers", "following_url": "https://api.github.com/users/tilakrayal/following{/other_user}", "gists_url": "https://api.github.com/users/tilakrayal/gists{/gist_id}", "starred_url": "https://api.github.com/users/tilakrayal/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/tilakrayal/subscriptions", "organizations_url": "https://api.github.com/users/tilakrayal/orgs", "repos_url": "https://api.github.com/users/tilakrayal/repos", "events_url": "https://api.github.com/users/tilakrayal/events{/privacy}", "received_events_url": "https://api.github.com/users/tilakrayal/received_events", "type": "User", "site_admin": false }
[ { "login": "tilakrayal", "id": 81610181, "node_id": "MDQ6VXNlcjgxNjEwMTgx", "avatar_url": "https://avatars.githubusercontent.com/u/81610181?v=4", "gravatar_id": "", "url": "https://api.github.com/users/tilakrayal", "html_url": "https://github.com/tilakrayal", "followers_url": "https://api.github.com/users/tilakrayal/followers", "following_url": "https://api.github.com/users/tilakrayal/following{/other_user}", "gists_url": "https://api.github.com/users/tilakrayal/gists{/gist_id}", "starred_url": "https://api.github.com/users/tilakrayal/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/tilakrayal/subscriptions", "organizations_url": "https://api.github.com/users/tilakrayal/orgs", "repos_url": "https://api.github.com/users/tilakrayal/repos", "events_url": "https://api.github.com/users/tilakrayal/events{/privacy}", "received_events_url": "https://api.github.com/users/tilakrayal/received_events", "type": "User", "site_admin": false } ]
null
[ "@ItayXD,\r\nI tried to execute the mentioned code with the alternative numbers on tensorflow v2.15 and the LecunNormal api providing the results as mentioned in the document. Kindly find the gist of it [here](https://colab.research.google.com/gist/tilakrayal/507b3e3e4a3f97bfcdb983315bf7d68a/untitled1581.ipynb). Thank you!", "Hi @tilakrayal,\r\nThanks for getting back to me!\r\nIt is my understanding that in the first case\r\n```\r\ninit0 = keras.initializers.LecunNormal(seed=0)\r\nw0 = init0((2, 2))\r\nw1 = init0((2, 2))\r\nw2 = init0((2, 2))\r\nprint(w0)\r\nprint(w1)\r\nprint(w2)\r\n```\r\nit should hold that `w0 != w1`, were in you example `w0 == w1`.\r\n\r\nNote that the described behavior for `VarianceScaling` for example is different: \"Note that a seeded initializer will produce the same random values across multiple calls.\" yet they give similar results, see here:\r\nhttps://colab.research.google.com/drive/1iyLVXC-auC3d-DceADzf-DCxS6W68Pee?usp=sharing" ]
2023-12-06T14:57:47
2024-01-09T12:27:05
null
NONE
null
null
null
### Issue type Bug ### Have you reproduced the bug with TensorFlow Nightly? Yes ### Source source ### TensorFlow version 2.14 ### Custom code No ### OS platform and distribution Ubuntu 22.04 ### Mobile device _No response_ ### Python version 3.10 ### Bazel version _No response_ ### GCC/compiler version _No response_ ### CUDA/cuDNN version _No response_ ### GPU model and memory _No response_ ### Current behavior? Contrary to the description in the documentation of LecunNormal, seeded initializers produces the same random values across multiple calls. Quoting from the documentation "Note that a seeded initializer will not produce the same random values across multiple calls". EDIT: After further investigation it seems that the standard across initializers is to produce the that values across multiple calls, some of them mention it correctly in the documentation but some say it will give different values. Note that one would also receive warnings suggesting to add a seed for multiple uses (i.e. to get different values across multiple calls). I see the standard now is stateless random generators. How can one get different values across multiple calls? ### Standalone code to reproduce the issue ```shell init0 = tf.keras.initializers.LecunNormal(seed=0) w0 = init0((2, 2)) w1 = init0((2, 2)) w2 = init0((2, 2)) print(w0) print(w1) print(w2) ``` ### Relevant log output ```shell tf.Tensor( [[0.43390435 0.8674912 ] [0.4000091 1.1794121 ]], shape=(2, 2), dtype=float32) tf.Tensor( [[0.43390435 0.8674912 ] [0.4000091 1.1794121 ]], shape=(2, 2), dtype=float32) tf.Tensor( [[0.43390435 0.8674912 ] [0.4000091 1.1794121 ]], shape=(2, 2), dtype=float32) ```
{ "url": "https://api.github.com/repos/tensorflow/tensorflow/issues/62577/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/tensorflow/tensorflow/issues/62577/timeline
null
null
false
https://api.github.com/repos/tensorflow/tensorflow/issues/62576
https://api.github.com/repos/tensorflow/tensorflow
https://api.github.com/repos/tensorflow/tensorflow/issues/62576/labels{/name}
https://api.github.com/repos/tensorflow/tensorflow/issues/62576/comments
https://api.github.com/repos/tensorflow/tensorflow/issues/62576/events
https://github.com/tensorflow/tensorflow/issues/62576
2,028,732,359
I_kwDOArmXAs546__H
62,576
Keras two-head controllable gradient flow
{ "login": "ymuv", "id": 1627149, "node_id": "MDQ6VXNlcjE2MjcxNDk=", "avatar_url": "https://avatars.githubusercontent.com/u/1627149?v=4", "gravatar_id": "", "url": "https://api.github.com/users/ymuv", "html_url": "https://github.com/ymuv", "followers_url": "https://api.github.com/users/ymuv/followers", "following_url": "https://api.github.com/users/ymuv/following{/other_user}", "gists_url": "https://api.github.com/users/ymuv/gists{/gist_id}", "starred_url": "https://api.github.com/users/ymuv/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/ymuv/subscriptions", "organizations_url": "https://api.github.com/users/ymuv/orgs", "repos_url": "https://api.github.com/users/ymuv/repos", "events_url": "https://api.github.com/users/ymuv/events{/privacy}", "received_events_url": "https://api.github.com/users/ymuv/received_events", "type": "User", "site_admin": false }
[ { "id": 404586594, "node_id": "MDU6TGFiZWw0MDQ1ODY1OTQ=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/stat:awaiting%20tensorflower", "name": "stat:awaiting tensorflower", "color": "f4b400", "default": false, "description": "Status - Awaiting response from tensorflower" }, { "id": 473184161, "node_id": "MDU6TGFiZWw0NzMxODQxNjE=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/type:support", "name": "type:support", "color": "159b2e", "default": false, "description": "Support issues" }, { "id": 1097546578, "node_id": "MDU6TGFiZWwxMDk3NTQ2NTc4", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/comp:keras", "name": "comp:keras", "color": "0052cc", "default": false, "description": "Keras related issues" }, { "id": 4829271983, "node_id": "LA_kwDOArmXAs8AAAABH9jXrw", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/TF%202.11", "name": "TF 2.11", "color": "46B4D7", "default": false, "description": "Issues related to TF 2.11" } ]
open
false
{ "login": "sampathweb", "id": 1437573, "node_id": "MDQ6VXNlcjE0Mzc1NzM=", "avatar_url": "https://avatars.githubusercontent.com/u/1437573?v=4", "gravatar_id": "", "url": "https://api.github.com/users/sampathweb", "html_url": "https://github.com/sampathweb", "followers_url": "https://api.github.com/users/sampathweb/followers", "following_url": "https://api.github.com/users/sampathweb/following{/other_user}", "gists_url": "https://api.github.com/users/sampathweb/gists{/gist_id}", "starred_url": "https://api.github.com/users/sampathweb/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/sampathweb/subscriptions", "organizations_url": "https://api.github.com/users/sampathweb/orgs", "repos_url": "https://api.github.com/users/sampathweb/repos", "events_url": "https://api.github.com/users/sampathweb/events{/privacy}", "received_events_url": "https://api.github.com/users/sampathweb/received_events", "type": "User", "site_admin": false }
[ { "login": "sampathweb", "id": 1437573, "node_id": "MDQ6VXNlcjE0Mzc1NzM=", "avatar_url": "https://avatars.githubusercontent.com/u/1437573?v=4", "gravatar_id": "", "url": "https://api.github.com/users/sampathweb", "html_url": "https://github.com/sampathweb", "followers_url": "https://api.github.com/users/sampathweb/followers", "following_url": "https://api.github.com/users/sampathweb/following{/other_user}", "gists_url": "https://api.github.com/users/sampathweb/gists{/gist_id}", "starred_url": "https://api.github.com/users/sampathweb/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/sampathweb/subscriptions", "organizations_url": "https://api.github.com/users/sampathweb/orgs", "repos_url": "https://api.github.com/users/sampathweb/repos", "events_url": "https://api.github.com/users/sampathweb/events{/privacy}", "received_events_url": "https://api.github.com/users/sampathweb/received_events", "type": "User", "site_admin": false }, { "login": "sachinprasadhs", "id": 73069040, "node_id": "MDQ6VXNlcjczMDY5MDQw", "avatar_url": "https://avatars.githubusercontent.com/u/73069040?v=4", "gravatar_id": "", "url": "https://api.github.com/users/sachinprasadhs", "html_url": "https://github.com/sachinprasadhs", "followers_url": "https://api.github.com/users/sachinprasadhs/followers", "following_url": "https://api.github.com/users/sachinprasadhs/following{/other_user}", "gists_url": "https://api.github.com/users/sachinprasadhs/gists{/gist_id}", "starred_url": "https://api.github.com/users/sachinprasadhs/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/sachinprasadhs/subscriptions", "organizations_url": "https://api.github.com/users/sachinprasadhs/orgs", "repos_url": "https://api.github.com/users/sachinprasadhs/repos", "events_url": "https://api.github.com/users/sachinprasadhs/events{/privacy}", "received_events_url": "https://api.github.com/users/sachinprasadhs/received_events", "type": "User", "site_admin": false } ]
null
[]
2023-12-06T14:50:42
2023-12-27T21:43:05
null
NONE
null
null
null
### Issue type Support ### Have you reproduced the bug with TensorFlow Nightly? Yes ### Source binary ### TensorFlow version 2.11.0 ### Custom code No ### OS platform and distribution Windows 10 ### Mobile device _No response_ ### Python version 3.9.11 ### Bazel version _No response_ ### GCC/compiler version _No response_ ### CUDA/cuDNN version _No response_ ### GPU model and memory _No response_ ### Current behavior? We use a two-head model. We want only one head to be involved in the training, the other - not. And after that - vice versa. So the gradient spreads to the body from one head, and not from the other, after which the heads change roles. The approach is similar to https://arxiv.org/abs/2210.05657 . We have tried some ways to achieve this: 1) Before every head we add dropout with rate = 1 or ~0 and in the generator class change drop rate ((for example, on method __getitem__() or on_epoch_end() get model layers and change rate: self.model.get_layer('dr1').rate=0 or 1). Please see example 1. 2) Use a custom layer before every head. This layer mul its input to some const. Also, we have tried to change this const var to 0/1 in data. Please see example 2. 3) We have tried to recompile the model after applying changes from paragraphs 1) and 2), but it cause error: tmp_logs = self.train_function(iterator) TypeError: 'NoneType' object is not callable Please see example 3. Unfortunately, all upper mentioned approaches didn't work in our case: we can see that the weights have changed in the model, but the output of the model doesn`t change. Prediction output for the model with const=0 or const=1 is the same I want to ask, is it a normal behavior or a bug? ### Standalone code to reproduce the issue ```shell #example.py import numpy as np import tensorflow.keras.backend as K import tensorflow as tf from tensorflow.keras import regularizers from tensorflow.keras.callbacks import TensorBoard from tensorflow.keras.layers import Dense, GRU, Input, Lambda, Dropout, Layer from tensorflow.keras.models import Model as KerasModel from tensorflow.keras.utils import Sequence maxValue = 1 - tf.keras.backend.epsilon() dataSize = 100 BS = 10 dataShape = (10, 2) outShape = 1 def getModel(): input_layer = Input(shape=dataShape, name="input") x = input_layer x = GRU(units=10, unroll=True, name="gru")(x) dropoutTraining = True x1 = Dropout(maxValue, name="head1")(x, training=dropoutTraining) x2 = Dropout(0, name="head2")(x, training=dropoutTraining) out1 = Dense(outShape, use_bias=False)(x1) out2 = Dense(outShape, use_bias=False)(x2) model = KerasModel(input_layer, [out1, out2]) model.summary() model.compile( loss="binary_crossentropy", optimizer='Adam', ) return model class DataGenerator(Sequence): def __init__(self, model): self.model = model self.id = 0 def __len__(self): return 10 def __getitem__(self, index): X = np.random.rand(BS, *dataShape) out_vec1 = np.random.rand(BS, outShape) out_vec2 = np.random.rand(BS, outShape) return X, [out_vec1, out_vec2] def on_epoch_end(self): self.id+=1 X = np.random.rand(1, *dataShape) y = self.model.predict(X) print("Before switch", y) head1 = self.model.get_layer('head1') head2 = self.model.get_layer('head2') if self.id % 2 == 0: head1.rate = maxValue head2.rate = 0 else: head1.rate = 0 head2.rate = maxValue y = self.model.predict(X) print("After switch", y) model = getModel() gen = DataGenerator(model) steps_per_epoch = dataSize // BS history = model.fit_generator( gen, steps_per_epoch = steps_per_epoch, epochs = 10, verbose = 1) #example2.py import numpy as np import tensorflow.keras.backend as K import tensorflow as tf from tensorflow.keras import regularizers from tensorflow.keras.callbacks import TensorBoard from tensorflow.keras.layers import Dense, GRU, Input, Lambda, Dropout, Layer from tensorflow.keras.models import Model as KerasModel from tensorflow.keras.utils import Sequence maxValue = 1 - tf.keras.backend.epsilon() dataSize = 100 BS = 10 dataShape = (10, 2) outShape = 1 class ConstMul(Layer): def __init__(self, const_val, *args, **kwargs): super().__init__(*args, **kwargs) self.const = const_val def call(self, inputs, **kwargs): return inputs * self.const def getModel(): input_layer = Input(shape=dataShape, name="input") x = input_layer x = GRU(units=10, unroll=True, name="gru")(x) dropoutTraining = True x1 = ConstMul(1, name="head1")(x, training=dropoutTraining) x2 = ConstMul(0, name="head2")(x, training=dropoutTraining) out1 = Dense(outShape, use_bias=False)(x1) out2 = Dense(outShape, use_bias=False)(x2) model = KerasModel(input_layer, [out1, out2]) model.summary() model.compile( loss="binary_crossentropy", optimizer='Adam') return model class DataGenerator(Sequence): def __init__(self, model): self.model = model self.id = 0 def __len__(self): return 10 def __getitem__(self, index): X = np.random.rand(BS, *dataShape) out_vec1 = np.random.rand(BS, outShape) out_vec2 = np.random.rand(BS, outShape) return X, [out_vec1, out_vec2] def on_epoch_end(self): self.id+=1 X = np.random.rand(1, *dataShape) y = self.model.predict(X) print("Before switch", y) head1 = self.model.get_layer('head1') head2 = self.model.get_layer('head2') if self.id % 2 == 0: head1.const = maxValue head2.const = 0 else: head1.const = 0 head2.const = maxValue y = self.model.predict(X) print("After switch", y) model = getModel() gen = DataGenerator(model) steps_per_epoch = dataSize // BS history = model.fit_generator( gen, steps_per_epoch = steps_per_epoch, epochs = 10, verbose = 1) #example3.py import numpy as np import tensorflow.keras.backend as K import tensorflow as tf from tensorflow.keras import regularizers from tensorflow.keras.callbacks import TensorBoard from tensorflow.keras.layers import Dense, GRU, Input, Lambda, Dropout, Layer from tensorflow.keras.models import Model as KerasModel from tensorflow.keras.utils import Sequence maxValue = 1 - tf.keras.backend.epsilon() dataSize = 100 BS = 10 dataShape = (10, 2) outShape = 1 class ConstMul(Layer): def __init__(self, const_val, *args, **kwargs): super().__init__(*args, **kwargs) self.const = const_val def call(self, inputs, **kwargs): return inputs * self.const def getModel(): input_layer = Input(shape=dataShape, name="input") x = input_layer x = GRU(units=10, unroll=True, name="gru")(x) dropoutTraining = True x1 = ConstMul(1, name="head1")(x, training=dropoutTraining) x2 = ConstMul(0, name="head2")(x, training=dropoutTraining) out1 = Dense(outShape, use_bias=False)(x1) out2 = Dense(outShape, use_bias=False)(x2) model = KerasModel(input_layer, [out1, out2]) model.summary() model.compile( loss="binary_crossentropy", optimizer='Adam') return model class DataGenerator(Sequence): def __init__(self, model): self.model = model self.id = 0 def __len__(self): return 10 def __getitem__(self, index): X = np.random.rand(BS, *dataShape) out_vec1 = np.random.rand(BS, outShape) out_vec2 = np.random.rand(BS, outShape) return X, [out_vec1, out_vec2] def on_epoch_end(self): self.id+=1 X = np.random.rand(1, *dataShape) y = self.model.predict(X) print("Before switch", y) head1 = self.model.get_layer('head1') head2 = self.model.get_layer('head2') if self.id % 2 == 0: head1.const = maxValue head2.const = 0 else: head1.const = 0 head2.const = maxValue self.model.compile( loss="binary_crossentropy", optimizer='Adam') y = self.model.predict(X) print("After switch", y) model = getModel() gen = DataGenerator(model) steps_per_epoch = dataSize // BS history = model.fit_generator( gen, steps_per_epoch = steps_per_epoch, epochs = 10, verbose = 1) ``` ### Relevant log output _No response_
{ "url": "https://api.github.com/repos/tensorflow/tensorflow/issues/62576/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/tensorflow/tensorflow/issues/62576/timeline
null
null
false
https://api.github.com/repos/tensorflow/tensorflow/issues/62575
https://api.github.com/repos/tensorflow/tensorflow
https://api.github.com/repos/tensorflow/tensorflow/issues/62575/labels{/name}
https://api.github.com/repos/tensorflow/tensorflow/issues/62575/comments
https://api.github.com/repos/tensorflow/tensorflow/issues/62575/events
https://github.com/tensorflow/tensorflow/issues/62575
2,027,820,145
I_kwDOArmXAs543hRx
62,575
How to verify that the onednn optimizations are enabled in runtime?
{ "login": "lizhangzhan", "id": 2328803, "node_id": "MDQ6VXNlcjIzMjg4MDM=", "avatar_url": "https://avatars.githubusercontent.com/u/2328803?v=4", "gravatar_id": "", "url": "https://api.github.com/users/lizhangzhan", "html_url": "https://github.com/lizhangzhan", "followers_url": "https://api.github.com/users/lizhangzhan/followers", "following_url": "https://api.github.com/users/lizhangzhan/following{/other_user}", "gists_url": "https://api.github.com/users/lizhangzhan/gists{/gist_id}", "starred_url": "https://api.github.com/users/lizhangzhan/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/lizhangzhan/subscriptions", "organizations_url": "https://api.github.com/users/lizhangzhan/orgs", "repos_url": "https://api.github.com/users/lizhangzhan/repos", "events_url": "https://api.github.com/users/lizhangzhan/events{/privacy}", "received_events_url": "https://api.github.com/users/lizhangzhan/received_events", "type": "User", "site_admin": false }
[ { "id": 386191887, "node_id": "MDU6TGFiZWwzODYxOTE4ODc=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/stat:awaiting%20response", "name": "stat:awaiting response", "color": "f4b400", "default": false, "description": "Status - Awaiting response from author" }, { "id": 473173351, "node_id": "MDU6TGFiZWw0NzMxNzMzNTE=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/type:build/install", "name": "type:build/install", "color": "159b2e", "default": false, "description": "Build and install issues" }, { "id": 474725938, "node_id": "MDU6TGFiZWw0NzQ3MjU5Mzg=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/stale", "name": "stale", "color": "d4c5f9", "default": false, "description": "This label marks the issue/pr stale - to be closed automatically if no activity" }, { "id": 1205615612, "node_id": "MDU6TGFiZWwxMjA1NjE1NjEy", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/subtype:%20ubuntu/linux", "name": "subtype: ubuntu/linux", "color": "b619ea", "default": false, "description": "Ubuntu/Linux Build/Installation Issues" }, { "id": 4032183365, "node_id": "LA_kwDOArmXAs7wVjxF", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/TF%202.9", "name": "TF 2.9", "color": "1CF842", "default": false, "description": "Issues found in the TF 2.9 release (or RCs)" } ]
closed
false
{ "login": "sushreebarsa", "id": 84765720, "node_id": "MDQ6VXNlcjg0NzY1NzIw", "avatar_url": "https://avatars.githubusercontent.com/u/84765720?v=4", "gravatar_id": "", "url": "https://api.github.com/users/sushreebarsa", "html_url": "https://github.com/sushreebarsa", "followers_url": "https://api.github.com/users/sushreebarsa/followers", "following_url": "https://api.github.com/users/sushreebarsa/following{/other_user}", "gists_url": "https://api.github.com/users/sushreebarsa/gists{/gist_id}", "starred_url": "https://api.github.com/users/sushreebarsa/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/sushreebarsa/subscriptions", "organizations_url": "https://api.github.com/users/sushreebarsa/orgs", "repos_url": "https://api.github.com/users/sushreebarsa/repos", "events_url": "https://api.github.com/users/sushreebarsa/events{/privacy}", "received_events_url": "https://api.github.com/users/sushreebarsa/received_events", "type": "User", "site_admin": false }
[ { "login": "sushreebarsa", "id": 84765720, "node_id": "MDQ6VXNlcjg0NzY1NzIw", "avatar_url": "https://avatars.githubusercontent.com/u/84765720?v=4", "gravatar_id": "", "url": "https://api.github.com/users/sushreebarsa", "html_url": "https://github.com/sushreebarsa", "followers_url": "https://api.github.com/users/sushreebarsa/followers", "following_url": "https://api.github.com/users/sushreebarsa/following{/other_user}", "gists_url": "https://api.github.com/users/sushreebarsa/gists{/gist_id}", "starred_url": "https://api.github.com/users/sushreebarsa/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/sushreebarsa/subscriptions", "organizations_url": "https://api.github.com/users/sushreebarsa/orgs", "repos_url": "https://api.github.com/users/sushreebarsa/repos", "events_url": "https://api.github.com/users/sushreebarsa/events{/privacy}", "received_events_url": "https://api.github.com/users/sushreebarsa/received_events", "type": "User", "site_admin": false } ]
null
[ "https://github.com/tensorflow/tensorflow/blob/v2.9.0/tensorflow/core/util/util.cc#L135C8-L135C18\r\nhttps://github.com/tensorflow/tensorflow/blob/v2.9.0/tensorflow/core/util/util.cc#L169\r\nAccording to the implementation logics in tensorflow/core/util/util.cc, seems it enter the first branch after IsMKLEnabled() called. \r\n```python\r\nfrom tensorflow.python.util import _pywrap_util_port \r\nif (_pywrap_util_port.IsMklEnabled()) {\r\n print(\"Mkl is enabled\") # True\r\n}\r\n```", "@lizhangzhan Ensure you have installed the latest version of TensorFlow and OneDNN.\r\nMake sure you are using the correct backend when launching your TensorFlow program. For TPUs, this should be \"tpu\".\r\nPlease check this latest master branch https://github.com/tensorflow/tensorflow/blob/master/tensorflow/core/util/util.cc\r\nThank you!", "I have verified the log issue. In build phase, \"--config=mkl\" option influences the log printing. ", "@lizhangzhan Could you install the CUDA libraries If you haven't already, download and install the CUDA libraries for your specific operating system and GPU model. Please ensure the NVIDIA driver is installed and running. You can use the nvidia-smi command to check the status of the driver.\r\nThank you!", "This issue is stale because it has been open for 7 days with no activity. It will be closed if no further activity occurs. Thank you.", "This issue was closed because it has been inactive for 7 days since being marked as stale. Please reopen if you'd like to work on this further.", "Are you satisfied with the resolution of your issue?\n<a href=\"https://docs.google.com/forms/d/e/1FAIpQLSfaP12TRhd9xSxjXZjcZFNXPGk4kc1-qMdv3gc6bEP90vY1ew/viewform?entry.85265664=Yes&entry.2137816233=https://github.com/tensorflow/tensorflow/issues/62575\">Yes</a>\n<a href=\"https://docs.google.com/forms/d/e/1FAIpQLSfaP12TRhd9xSxjXZjcZFNXPGk4kc1-qMdv3gc6bEP90vY1ew/viewform?entry.85265664=No&entry.2137816233=https://github.com/tensorflow/tensorflow/issues/62575\">No</a>\n" ]
2023-12-06T07:12:22
2023-12-23T01:47:30
2023-12-23T01:47:27
NONE
null
null
null
### Issue type Build/Install ### Have you reproduced the bug with TensorFlow Nightly? No ### Source source ### TensorFlow version tf2.9.0 ### Custom code No ### OS platform and distribution linux ubuntu20.04 ### Mobile device _No response_ ### Python version 3.8.10 ### Bazel version 5.0.0 ### GCC/compiler version 9.4.0 ### CUDA/cuDNN version 11.6.2-cudnn8 ### GPU model and memory _No response_ ### Current behavior? Expect to see a message beginning with "oneDNN custom operations are on" in my app log in startup phase ### Standalone code to reproduce the issue ```shell 1. Build tensorflow_cc library with a conanfile https://gist.github.com/lizhangzhan/8fdc974fc9f8958478196e607de57cb3 2. Set TF_ENABLE_ONEDNN_OPTS=1 to start my application to load a graph proto file and build a session to execute in a CPU machine. 3. Unable to see a message beginning with "oneDNN custom operations are on" in my app log in startup phase. ``` ### Relevant log output ```shell 2023-12-06 04:05:32.663256: W tensorflow/stream_executor/platform/default/dso_loader.cc:64] Could not load dynamic library 'libcuda.so.1'; dlerror: libcuda.so.1: cannot open shared object file: No such file or directory; LD_LIBRARY_PATH: /workspace/lib/tensorflow_2.9.0/:/usr/local/nvidia/lib:/usr/local/nvidia/lib64 2023-12-06 04:05:32.663273: W tensorflow/stream_executor/cuda/cuda_driver.cc:269] failed call to cuInit: UNKNOWN ERROR (303) 2023-12-06 04:05:32.663291: I tensorflow/stream_executor/cuda/cuda_diagnostics.cc:156] kernel driver does not appear to be running on this host (training-worker-0): /proc/driver/nvidia/version does not exist 2023-12-06 04:05:32.663387: I tensorflow/core/common_runtime/process_util.cc:146] Creating new thread pool with default inter op setting: 16. Tune using inter_op_parallelism_threads for best performance. 2023-12-06 04:05:32.704571: I tensorflow/compiler/mlir/mlir_graph_optimization_pass.cc:354] MLIR V1 optimization pass is not enabled ```
{ "url": "https://api.github.com/repos/tensorflow/tensorflow/issues/62575/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/tensorflow/tensorflow/issues/62575/timeline
null
completed
false
https://api.github.com/repos/tensorflow/tensorflow/issues/62574
https://api.github.com/repos/tensorflow/tensorflow
https://api.github.com/repos/tensorflow/tensorflow/issues/62574/labels{/name}
https://api.github.com/repos/tensorflow/tensorflow/issues/62574/comments
https://api.github.com/repos/tensorflow/tensorflow/issues/62574/events
https://github.com/tensorflow/tensorflow/issues/62574
2,027,248,244
I_kwDOArmXAs541Vp0
62,574
data.ops.iterator_ops.OwnedIterator flattened in profile session
{ "login": "annaa-ka", "id": 83917358, "node_id": "MDQ6VXNlcjgzOTE3MzU4", "avatar_url": "https://avatars.githubusercontent.com/u/83917358?v=4", "gravatar_id": "", "url": "https://api.github.com/users/annaa-ka", "html_url": "https://github.com/annaa-ka", "followers_url": "https://api.github.com/users/annaa-ka/followers", "following_url": "https://api.github.com/users/annaa-ka/following{/other_user}", "gists_url": "https://api.github.com/users/annaa-ka/gists{/gist_id}", "starred_url": "https://api.github.com/users/annaa-ka/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/annaa-ka/subscriptions", "organizations_url": "https://api.github.com/users/annaa-ka/orgs", "repos_url": "https://api.github.com/users/annaa-ka/repos", "events_url": "https://api.github.com/users/annaa-ka/events{/privacy}", "received_events_url": "https://api.github.com/users/annaa-ka/received_events", "type": "User", "site_admin": false }
[ { "id": 386191887, "node_id": "MDU6TGFiZWwzODYxOTE4ODc=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/stat:awaiting%20response", "name": "stat:awaiting response", "color": "f4b400", "default": false, "description": "Status - Awaiting response from author" }, { "id": 473184161, "node_id": "MDU6TGFiZWw0NzMxODQxNjE=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/type:support", "name": "type:support", "color": "159b2e", "default": false, "description": "Support issues" }, { "id": 474725938, "node_id": "MDU6TGFiZWw0NzQ3MjU5Mzg=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/stale", "name": "stale", "color": "d4c5f9", "default": false, "description": "This label marks the issue/pr stale - to be closed automatically if no activity" }, { "id": 1114343535, "node_id": "MDU6TGFiZWwxMTE0MzQzNTM1", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/comp:data", "name": "comp:data", "color": "0052cc", "default": false, "description": "tf.data related issues" }, { "id": 3255468475, "node_id": "MDU6TGFiZWwzMjU1NDY4NDc1", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/2.6.0", "name": "2.6.0", "color": "FA96B6", "default": false, "description": "" } ]
closed
false
{ "login": "SuryanarayanaY", "id": 116063290, "node_id": "U_kgDOBur8Og", "avatar_url": "https://avatars.githubusercontent.com/u/116063290?v=4", "gravatar_id": "", "url": "https://api.github.com/users/SuryanarayanaY", "html_url": "https://github.com/SuryanarayanaY", "followers_url": "https://api.github.com/users/SuryanarayanaY/followers", "following_url": "https://api.github.com/users/SuryanarayanaY/following{/other_user}", "gists_url": "https://api.github.com/users/SuryanarayanaY/gists{/gist_id}", "starred_url": "https://api.github.com/users/SuryanarayanaY/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/SuryanarayanaY/subscriptions", "organizations_url": "https://api.github.com/users/SuryanarayanaY/orgs", "repos_url": "https://api.github.com/users/SuryanarayanaY/repos", "events_url": "https://api.github.com/users/SuryanarayanaY/events{/privacy}", "received_events_url": "https://api.github.com/users/SuryanarayanaY/received_events", "type": "User", "site_admin": false }
[ { "login": "SuryanarayanaY", "id": 116063290, "node_id": "U_kgDOBur8Og", "avatar_url": "https://avatars.githubusercontent.com/u/116063290?v=4", "gravatar_id": "", "url": "https://api.github.com/users/SuryanarayanaY", "html_url": "https://github.com/SuryanarayanaY", "followers_url": "https://api.github.com/users/SuryanarayanaY/followers", "following_url": "https://api.github.com/users/SuryanarayanaY/following{/other_user}", "gists_url": "https://api.github.com/users/SuryanarayanaY/gists{/gist_id}", "starred_url": "https://api.github.com/users/SuryanarayanaY/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/SuryanarayanaY/subscriptions", "organizations_url": "https://api.github.com/users/SuryanarayanaY/orgs", "repos_url": "https://api.github.com/users/SuryanarayanaY/repos", "events_url": "https://api.github.com/users/SuryanarayanaY/events{/privacy}", "received_events_url": "https://api.github.com/users/SuryanarayanaY/received_events", "type": "User", "site_admin": false } ]
null
[ "Hi @annaa-ka ,\r\n\r\nTf2.6v is quiet older and not supported right now as current version is TF2.15v. Could you please try with latest version as it possible many bugs resolved with each latest version.\r\n\r\nAlso without code snippet it's hard to analyse the issue.Could you submit the code in the form of a colab gist with latest versions?", "> Hi @annaa-ka ,\r\n> \r\n> Tf2.6v is quiet older and not supported right now as current version is TF2.15v. Could you please try with latest version as it possible many bugs resolved with each latest version.\r\n> \r\n> Also without code snippet it's hard to analyse the issue.Could you submit the code in the form of a colab gist with latest versions?\r\n\r\nUnfortunately, I cannot upgrade tf up to 2.15v, as we have a very big project and a lot of checks should be made before the upgrade.\r\n\r\nAs far as I understood, profiler does some extra checks. Maybe there are any methods that I can use in order to understand what this iterator is used for, or in which function uses it? And we have a custom code for multihost train, i do not know how to paste it into collab\r\n\r\n", "Please have a look into the source code and doc string of [OwnedIterator](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/python/data/ops/iterator_ops.py#L658) in case it is of any help.", "This issue is stale because it has been open for 7 days with no activity. It will be closed if no further activity occurs. Thank you.", "This issue was closed because it has been inactive for 7 days since being marked as stale. Please reopen if you'd like to work on this further.", "Are you satisfied with the resolution of your issue?\n<a href=\"https://docs.google.com/forms/d/e/1FAIpQLSfaP12TRhd9xSxjXZjcZFNXPGk4kc1-qMdv3gc6bEP90vY1ew/viewform?entry.85265664=Yes&entry.2137816233=https://github.com/tensorflow/tensorflow/issues/62574\">Yes</a>\n<a href=\"https://docs.google.com/forms/d/e/1FAIpQLSfaP12TRhd9xSxjXZjcZFNXPGk4kc1-qMdv3gc6bEP90vY1ew/viewform?entry.85265664=No&entry.2137816233=https://github.com/tensorflow/tensorflow/issues/62574\">No</a>\n" ]
2023-12-05T22:34:35
2023-12-27T01:48:04
2023-12-27T01:47:58
NONE
null
null
null
### Issue type Support ### Have you reproduced the bug with TensorFlow Nightly? No ### Source binary ### TensorFlow version 2.6.2 ### Custom code Yes ### OS platform and distribution Ubuntu 20.04.6 LTS ### Mobile device _No response_ ### Python version Python 3.8.10 ### Bazel version bazel 5.2.0-1 ### GCC/compiler version gcc (Ubuntu 9.4.0-1ubuntu1~20.04.1) 9.4.0 ### CUDA/cuDNN version CUDA Version: 12.0 ### GPU model and memory _No response_ ### Current behavior? When I try to profile train using TensorBoard callback , i got stuck on such problem https://pastebin.com/Npzjb1x7 and it results in infinite loop because of such problem (https://github.com/OpenMined/PySyft/issues/7042) ### Standalone code to reproduce the issue ```shell I do not actually know, what to paste as an example, actually I came to ask for help how to debug whether it is a normal behaviour ``` ### Relevant log output _No response_
{ "url": "https://api.github.com/repos/tensorflow/tensorflow/issues/62574/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/tensorflow/tensorflow/issues/62574/timeline
null
completed
false
https://api.github.com/repos/tensorflow/tensorflow/issues/62573
https://api.github.com/repos/tensorflow/tensorflow
https://api.github.com/repos/tensorflow/tensorflow/issues/62573/labels{/name}
https://api.github.com/repos/tensorflow/tensorflow/issues/62573/comments
https://api.github.com/repos/tensorflow/tensorflow/issues/62573/events
https://github.com/tensorflow/tensorflow/pull/62573
2,027,242,133
PR_kwDOArmXAs5hPBwY
62,573
Update RELEASE.md
{ "login": "ronaghy", "id": 147886872, "node_id": "U_kgDOCNCTGA", "avatar_url": "https://avatars.githubusercontent.com/u/147886872?v=4", "gravatar_id": "", "url": "https://api.github.com/users/ronaghy", "html_url": "https://github.com/ronaghy", "followers_url": "https://api.github.com/users/ronaghy/followers", "following_url": "https://api.github.com/users/ronaghy/following{/other_user}", "gists_url": "https://api.github.com/users/ronaghy/gists{/gist_id}", "starred_url": "https://api.github.com/users/ronaghy/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/ronaghy/subscriptions", "organizations_url": "https://api.github.com/users/ronaghy/orgs", "repos_url": "https://api.github.com/users/ronaghy/repos", "events_url": "https://api.github.com/users/ronaghy/events{/privacy}", "received_events_url": "https://api.github.com/users/ronaghy/received_events", "type": "User", "site_admin": false }
[ { "id": 987666414, "node_id": "MDU6TGFiZWw5ODc2NjY0MTQ=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/ready%20to%20pull", "name": "ready to pull", "color": "2cd643", "default": false, "description": "PR ready for merge process" }, { "id": 1169364458, "node_id": "MDU6TGFiZWwxMTY5MzY0NDU4", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/size:S", "name": "size:S", "color": "adafea", "default": false, "description": "CL Change Size: Small" } ]
closed
false
{ "login": "gbaned", "id": 48215717, "node_id": "MDQ6VXNlcjQ4MjE1NzE3", "avatar_url": "https://avatars.githubusercontent.com/u/48215717?v=4", "gravatar_id": "", "url": "https://api.github.com/users/gbaned", "html_url": "https://github.com/gbaned", "followers_url": "https://api.github.com/users/gbaned/followers", "following_url": "https://api.github.com/users/gbaned/following{/other_user}", "gists_url": "https://api.github.com/users/gbaned/gists{/gist_id}", "starred_url": "https://api.github.com/users/gbaned/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/gbaned/subscriptions", "organizations_url": "https://api.github.com/users/gbaned/orgs", "repos_url": "https://api.github.com/users/gbaned/repos", "events_url": "https://api.github.com/users/gbaned/events{/privacy}", "received_events_url": "https://api.github.com/users/gbaned/received_events", "type": "User", "site_admin": false }
[ { "login": "gbaned", "id": 48215717, "node_id": "MDQ6VXNlcjQ4MjE1NzE3", "avatar_url": "https://avatars.githubusercontent.com/u/48215717?v=4", "gravatar_id": "", "url": "https://api.github.com/users/gbaned", "html_url": "https://github.com/gbaned", "followers_url": "https://api.github.com/users/gbaned/followers", "following_url": "https://api.github.com/users/gbaned/following{/other_user}", "gists_url": "https://api.github.com/users/gbaned/gists{/gist_id}", "starred_url": "https://api.github.com/users/gbaned/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/gbaned/subscriptions", "organizations_url": "https://api.github.com/users/gbaned/orgs", "repos_url": "https://api.github.com/users/gbaned/repos", "events_url": "https://api.github.com/users/gbaned/events{/privacy}", "received_events_url": "https://api.github.com/users/gbaned/received_events", "type": "User", "site_admin": false } ]
null
[]
2023-12-05T22:30:24
2023-12-06T02:32:17
2023-12-06T02:32:16
CONTRIBUTOR
null
false
{ "url": "https://api.github.com/repos/tensorflow/tensorflow/pulls/62573", "html_url": "https://github.com/tensorflow/tensorflow/pull/62573", "diff_url": "https://github.com/tensorflow/tensorflow/pull/62573.diff", "patch_url": "https://github.com/tensorflow/tensorflow/pull/62573.patch", "merged_at": "2023-12-06T02:32:16" }
Added release notes for 2.15.0.post1
{ "url": "https://api.github.com/repos/tensorflow/tensorflow/issues/62573/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/tensorflow/tensorflow/issues/62573/timeline
null
null
true
https://api.github.com/repos/tensorflow/tensorflow/issues/62572
https://api.github.com/repos/tensorflow/tensorflow
https://api.github.com/repos/tensorflow/tensorflow/issues/62572/labels{/name}
https://api.github.com/repos/tensorflow/tensorflow/issues/62572/comments
https://api.github.com/repos/tensorflow/tensorflow/issues/62572/events
https://github.com/tensorflow/tensorflow/issues/62572
2,025,809,179
I_kwDOArmXAs54v2Ub
62,572
tf.sqrt crashing
{ "login": "nhuet", "id": 23269019, "node_id": "MDQ6VXNlcjIzMjY5MDE5", "avatar_url": "https://avatars.githubusercontent.com/u/23269019?v=4", "gravatar_id": "", "url": "https://api.github.com/users/nhuet", "html_url": "https://github.com/nhuet", "followers_url": "https://api.github.com/users/nhuet/followers", "following_url": "https://api.github.com/users/nhuet/following{/other_user}", "gists_url": "https://api.github.com/users/nhuet/gists{/gist_id}", "starred_url": "https://api.github.com/users/nhuet/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/nhuet/subscriptions", "organizations_url": "https://api.github.com/users/nhuet/orgs", "repos_url": "https://api.github.com/users/nhuet/repos", "events_url": "https://api.github.com/users/nhuet/events{/privacy}", "received_events_url": "https://api.github.com/users/nhuet/received_events", "type": "User", "site_admin": false }
[ { "id": 386191887, "node_id": "MDU6TGFiZWwzODYxOTE4ODc=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/stat:awaiting%20response", "name": "stat:awaiting response", "color": "f4b400", "default": false, "description": "Status - Awaiting response from author" }, { "id": 473172988, "node_id": "MDU6TGFiZWw0NzMxNzI5ODg=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/type:bug", "name": "type:bug", "color": "159b2e", "default": false, "description": "Bug" }, { "id": 1097547147, "node_id": "MDU6TGFiZWwxMDk3NTQ3MTQ3", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/comp:ops", "name": "comp:ops", "color": "0052cc", "default": false, "description": "OPs related issues" }, { "id": 6218999181, "node_id": "LA_kwDOArmXAs8AAAABcq5ljQ", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/TF%202.15", "name": "TF 2.15", "color": "9162CB", "default": false, "description": "For issues related to 2.15.x" } ]
closed
false
{ "login": "sushreebarsa", "id": 84765720, "node_id": "MDQ6VXNlcjg0NzY1NzIw", "avatar_url": "https://avatars.githubusercontent.com/u/84765720?v=4", "gravatar_id": "", "url": "https://api.github.com/users/sushreebarsa", "html_url": "https://github.com/sushreebarsa", "followers_url": "https://api.github.com/users/sushreebarsa/followers", "following_url": "https://api.github.com/users/sushreebarsa/following{/other_user}", "gists_url": "https://api.github.com/users/sushreebarsa/gists{/gist_id}", "starred_url": "https://api.github.com/users/sushreebarsa/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/sushreebarsa/subscriptions", "organizations_url": "https://api.github.com/users/sushreebarsa/orgs", "repos_url": "https://api.github.com/users/sushreebarsa/repos", "events_url": "https://api.github.com/users/sushreebarsa/events{/privacy}", "received_events_url": "https://api.github.com/users/sushreebarsa/received_events", "type": "User", "site_admin": false }
[ { "login": "sushreebarsa", "id": 84765720, "node_id": "MDQ6VXNlcjg0NzY1NzIw", "avatar_url": "https://avatars.githubusercontent.com/u/84765720?v=4", "gravatar_id": "", "url": "https://api.github.com/users/sushreebarsa", "html_url": "https://github.com/sushreebarsa", "followers_url": "https://api.github.com/users/sushreebarsa/followers", "following_url": "https://api.github.com/users/sushreebarsa/following{/other_user}", "gists_url": "https://api.github.com/users/sushreebarsa/gists{/gist_id}", "starred_url": "https://api.github.com/users/sushreebarsa/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/sushreebarsa/subscriptions", "organizations_url": "https://api.github.com/users/sushreebarsa/orgs", "repos_url": "https://api.github.com/users/sushreebarsa/repos", "events_url": "https://api.github.com/users/sushreebarsa/events{/privacy}", "received_events_url": "https://api.github.com/users/sushreebarsa/received_events", "type": "User", "site_admin": false } ]
null
[ "Hey , can I work on this?\r\n", "@nhuet I tried to replicate the issue on colab but didn't face the error reported. Please find the gist [here](https://colab.research.google.com/gist/sushreebarsa/8e405910794bfd917950ef5b7a584bfd/62572.ipynb) and the output was as follows;\r\n```\r\n<tf.Tensor: shape=(2,), dtype=float32, numpy=array([1., 1.], dtype=float32)>\r\n```\r\nThank you!", "Indeed, since the last post-release https://pypi.org/project/tensorflow/2.15.0.post1/ on December, 5th, it seems to work now.\r\nBut maybe this post release was connected to this issue ;) ?", "Are you satisfied with the resolution of your issue?\n<a href=\"https://docs.google.com/forms/d/e/1FAIpQLSfaP12TRhd9xSxjXZjcZFNXPGk4kc1-qMdv3gc6bEP90vY1ew/viewform?entry.85265664=Yes&entry.2137816233=https://github.com/tensorflow/tensorflow/issues/62572\">Yes</a>\n<a href=\"https://docs.google.com/forms/d/e/1FAIpQLSfaP12TRhd9xSxjXZjcZFNXPGk4kc1-qMdv3gc6bEP90vY1ew/viewform?entry.85265664=No&entry.2137816233=https://github.com/tensorflow/tensorflow/issues/62572\">No</a>\n" ]
2023-12-05T10:02:18
2023-12-12T17:33:02
2023-12-12T17:32:58
NONE
null
null
null
### Issue type Bug ### Have you reproduced the bug with TensorFlow Nightly? No ### Source binary ### TensorFlow version v2.15.0-rc1-8-g6887368d6d4 2.15.0 ### Custom code No ### OS platform and distribution Debian GNU/Linux 12 (bookworm) ### Mobile device _No response_ ### Python version 3.9.12 ### Bazel version _No response_ ### GCC/compiler version _No response_ ### CUDA/cuDNN version _No response_ ### GPU model and memory _No response_ ### Current behavior? `tf.sqrt` is raising an error instead of computing the square root. ### Standalone code to reproduce the issue ```shell import tensorflow as tf tf.sqrt(tf.ones((2,))) ``` ### Relevant log output ```shell 2023-12-05 10:54:59.322693: W tensorflow/core/framework/op_kernel.cc:1827] UNKNOWN: JIT compilation failed. --------------------------------------------------------------------------- UnknownError Traceback (most recent call last) Cell In[3], line 2 1 import tensorflow as tf ----> 2 tf.sqrt(tf.ones((2,))) File ~/venv/lib/python3.9/site-packages/tensorflow/python/ops/weak_tensor_ops.py:88, in weak_tensor_unary_op_wrapper.<locals>.wrapper(*args, **kwargs) 86 def wrapper(*args, **kwargs): 87 if not ops.is_auto_dtype_conversion_enabled(): ---> 88 return op(*args, **kwargs) 89 bound_arguments = signature.bind(*args, **kwargs) 90 bound_arguments.apply_defaults() File ~/venv/lib/python3.9/site-packages/tensorflow/python/util/traceback_utils.py:153, in filter_traceback.<locals>.error_handler(*args, **kwargs) 151 except Exception as e: 152 filtered_tb = _process_traceback_frames(e.__traceback__) --> 153 raise e.with_traceback(filtered_tb) from None 154 finally: 155 del filtered_tb File ~/venv/lib/python3.9/site-packages/tensorflow/python/framework/ops.py:5883, in raise_from_not_ok_status(e, name) 5881 def raise_from_not_ok_status(e, name) -> NoReturn: 5882 e.message += (" name: " + str(name if name is not None else "")) -> 5883 raise core._status_to_exception(e) from None UnknownError: {{function_node __wrapped__Sqrt_device_/job:localhost/replica:0/task:0/device:GPU:0}} JIT compilation failed. [Op:Sqrt] name: ```
{ "url": "https://api.github.com/repos/tensorflow/tensorflow/issues/62572/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/tensorflow/tensorflow/issues/62572/timeline
null
completed
false
https://api.github.com/repos/tensorflow/tensorflow/issues/62571
https://api.github.com/repos/tensorflow/tensorflow
https://api.github.com/repos/tensorflow/tensorflow/issues/62571/labels{/name}
https://api.github.com/repos/tensorflow/tensorflow/issues/62571/comments
https://api.github.com/repos/tensorflow/tensorflow/issues/62571/events
https://github.com/tensorflow/tensorflow/issues/62571
2,025,646,086
I_kwDOArmXAs54vOgG
62,571
tf.keras.layers.GlobalAveragePooling2D() freezes tf.distribute.MirroredStrategy()
{ "login": "matthewhelmi", "id": 58693864, "node_id": "MDQ6VXNlcjU4NjkzODY0", "avatar_url": "https://avatars.githubusercontent.com/u/58693864?v=4", "gravatar_id": "", "url": "https://api.github.com/users/matthewhelmi", "html_url": "https://github.com/matthewhelmi", "followers_url": "https://api.github.com/users/matthewhelmi/followers", "following_url": "https://api.github.com/users/matthewhelmi/following{/other_user}", "gists_url": "https://api.github.com/users/matthewhelmi/gists{/gist_id}", "starred_url": "https://api.github.com/users/matthewhelmi/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/matthewhelmi/subscriptions", "organizations_url": "https://api.github.com/users/matthewhelmi/orgs", "repos_url": "https://api.github.com/users/matthewhelmi/repos", "events_url": "https://api.github.com/users/matthewhelmi/events{/privacy}", "received_events_url": "https://api.github.com/users/matthewhelmi/received_events", "type": "User", "site_admin": false }
[ { "id": 404586594, "node_id": "MDU6TGFiZWw0MDQ1ODY1OTQ=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/stat:awaiting%20tensorflower", "name": "stat:awaiting tensorflower", "color": "f4b400", "default": false, "description": "Status - Awaiting response from tensorflower" }, { "id": 473172988, "node_id": "MDU6TGFiZWw0NzMxNzI5ODg=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/type:bug", "name": "type:bug", "color": "159b2e", "default": false, "description": "Bug" }, { "id": 996845227, "node_id": "MDU6TGFiZWw5OTY4NDUyMjc=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/comp:dist-strat", "name": "comp:dist-strat", "color": "0052cc", "default": false, "description": "Distribution Strategy related issues" }, { "id": 1097546578, "node_id": "MDU6TGFiZWwxMDk3NTQ2NTc4", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/comp:keras", "name": "comp:keras", "color": "0052cc", "default": false, "description": "Keras related issues" }, { "id": 5508003926, "node_id": "LA_kwDOArmXAs8AAAABSE14Vg", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/TF%202.13", "name": "TF 2.13", "color": "B13ACB", "default": false, "description": "For issues related to Tensorflow 2.13" } ]
open
false
{ "login": "sachinprasadhs", "id": 73069040, "node_id": "MDQ6VXNlcjczMDY5MDQw", "avatar_url": "https://avatars.githubusercontent.com/u/73069040?v=4", "gravatar_id": "", "url": "https://api.github.com/users/sachinprasadhs", "html_url": "https://github.com/sachinprasadhs", "followers_url": "https://api.github.com/users/sachinprasadhs/followers", "following_url": "https://api.github.com/users/sachinprasadhs/following{/other_user}", "gists_url": "https://api.github.com/users/sachinprasadhs/gists{/gist_id}", "starred_url": "https://api.github.com/users/sachinprasadhs/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/sachinprasadhs/subscriptions", "organizations_url": "https://api.github.com/users/sachinprasadhs/orgs", "repos_url": "https://api.github.com/users/sachinprasadhs/repos", "events_url": "https://api.github.com/users/sachinprasadhs/events{/privacy}", "received_events_url": "https://api.github.com/users/sachinprasadhs/received_events", "type": "User", "site_admin": false }
[ { "login": "sachinprasadhs", "id": 73069040, "node_id": "MDQ6VXNlcjczMDY5MDQw", "avatar_url": "https://avatars.githubusercontent.com/u/73069040?v=4", "gravatar_id": "", "url": "https://api.github.com/users/sachinprasadhs", "html_url": "https://github.com/sachinprasadhs", "followers_url": "https://api.github.com/users/sachinprasadhs/followers", "following_url": "https://api.github.com/users/sachinprasadhs/following{/other_user}", "gists_url": "https://api.github.com/users/sachinprasadhs/gists{/gist_id}", "starred_url": "https://api.github.com/users/sachinprasadhs/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/sachinprasadhs/subscriptions", "organizations_url": "https://api.github.com/users/sachinprasadhs/orgs", "repos_url": "https://api.github.com/users/sachinprasadhs/repos", "events_url": "https://api.github.com/users/sachinprasadhs/events{/privacy}", "received_events_url": "https://api.github.com/users/sachinprasadhs/received_events", "type": "User", "site_admin": false } ]
null
[ "It may related to those two issues : #62234 #61314", "Thanks for the reply @edwardyehuang!\r\n\r\nI had a look at the two issues.\r\n\r\nIn this case the hangs with the specific model architecture above, but runs smooth with the minor modification. And on a single gpu everything runs. ", "> Thanks for the reply @edwardyehuang!\r\n> \r\n> I had a look at the two issues.\r\n> \r\n> In this case the hangs with the specific model architecture above, but runs smooth with the minor modification. And on a single gpu everything runs.\r\n\r\nIn the above issues, TensorFlow got stuck when using XLA + multi-gpus.", "Hi **@matthewhelmi**,\r\nIt looks like this is a duplicate of issue #[18862](https://github.com/keras-team/keras/issues/18862). Can you please close this issue, since it is already being tracked there.\r\n\r\nThank you!", "Thanks @edwardyehuang! is there something I can try related to the XLA+multigpu issue?", "Hi @Venkat6871,\r\n\r\nI was told on the other issue to open it here as they believed it was TensorFlow related.\r\n\r\nWould you suggest I close the other one?", "> Hi @Venkat6871,\r\n> \r\n> I was told on the other issue to open it here as they believed it was TensorFlow related.\r\n> \r\n> Would you suggest I close the other one?\r\n\r\nYes, you can keep this open and close the other one. " ]
2023-12-05T08:52:11
2023-12-11T21:33:53
null
NONE
null
null
null
### Issue type Bug ### Have you reproduced the bug with TensorFlow Nightly? No ### Source source ### TensorFlow version 2.13.1 ### Custom code No ### OS platform and distribution NAME="Amazon Linux" VERSION="2" ID="amzn" ID_LIKE="centos rhel fedora" ### Mobile device _No response_ ### Python version _No response_ ### Bazel version _No response_ ### GCC/compiler version _No response_ ### CUDA/cuDNN version _No response_ ### GPU model and memory NVIDIA T4 (4 GPU) - VRAM 64gb ### Current behavior? I was preparing my own training script for multi-gpu support to achieve higher batch sizes using tf.distribute.MirroredStrategy(). Things were working initially but began freezing (seemingly out of the blue) on NVIDIA T4 (4 GPU) - VRAM 64gb, but still runs fine on NVIDIA A10G (4 GPU) - VRAM 96gb. I used the following example to debug on NVIDIA T4 (4 GPU) - VRAM 64gb: [Custom training with tf.distribute.Strategy](https://colab.research.google.com/github/tensorflow/docs/blob/master/site/en/tutorials/distribute/custom_training.ipynb#scrollTo=F2VeZUWUj5S4) I found that if I substitute their model (See link) with ``` model = tf.keras.Sequential([ tf.keras.layers.Conv2D(10, kernel_size=3, activation="relu"), tf.keras.layers.GlobalAveragePooling2D(), tf.keras.layers.Dense(10, activation="relu") ]) ``` the training freezes from the start (just like my own code, which is using a similar model). My symptoms are similar to [Multi-GPU training not starting or freezing. GPUs at 100%](https://github.com/tensorflow/tensorflow/issues/54325) It stops freezing if I replace GlobalAveragePooling2D() with a Flatten(). ``` model = tf.keras.Sequential([ tf.keras.layers.Conv2D(10, kernel_size=3, activation="relu"), tf.keras.layers.Flatten(), tf.keras.layers.Dense(10, activation="relu") ]) ``` I have also noticed freezing when introducing stride > 1 in the conv layer. Everything runs fine on a single-gpu. Freezing only occurs when using >1 GPU. TF v2.13.1 ### Standalone code to reproduce the issue ```shell https://colab.research.google.com/github/tensorflow/docs/blob/master/site/en/tutorials/distribute/custom_training.ipynb#scrollTo=F2VeZUWUj5S4 ``` ### Relevant log output ```shell INFO:tensorflow:Collective all_reduce tensors: 4 all_reduces, num_devices = 4, group_size = 4, implementation = CommunicationImplementation.NCCL, num_packs = 1 INFO:tensorflow:Reduce to /job:localhost/replica:0/task:0/device:CPU:0 then broadcast to ('/job:localhost/replica:0/task:0/device:CPU:0',). INFO:tensorflow:Collective all_reduce tensors: 4 all_reduces, num_devices = 4, group_size = 4, implementation = CommunicationImplementation.NCCL, num_packs = 1 INFO:tensorflow:Reduce to /job:localhost/replica:0/task:0/device:CPU:0 then broadcast to ('/job:localhost/replica:0/task:0/device:CPU:0',). ```
{ "url": "https://api.github.com/repos/tensorflow/tensorflow/issues/62571/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/tensorflow/tensorflow/issues/62571/timeline
null
null
false
https://api.github.com/repos/tensorflow/tensorflow/issues/62570
https://api.github.com/repos/tensorflow/tensorflow
https://api.github.com/repos/tensorflow/tensorflow/issues/62570/labels{/name}
https://api.github.com/repos/tensorflow/tensorflow/issues/62570/comments
https://api.github.com/repos/tensorflow/tensorflow/issues/62570/events
https://github.com/tensorflow/tensorflow/pull/62570
2,025,238,667
PR_kwDOArmXAs5hIGrT
62,570
Correct broken endpoint links for Release 1.10
{ "login": "akhilnev", "id": 106297042, "node_id": "U_kgDOBlX20g", "avatar_url": "https://avatars.githubusercontent.com/u/106297042?v=4", "gravatar_id": "", "url": "https://api.github.com/users/akhilnev", "html_url": "https://github.com/akhilnev", "followers_url": "https://api.github.com/users/akhilnev/followers", "following_url": "https://api.github.com/users/akhilnev/following{/other_user}", "gists_url": "https://api.github.com/users/akhilnev/gists{/gist_id}", "starred_url": "https://api.github.com/users/akhilnev/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/akhilnev/subscriptions", "organizations_url": "https://api.github.com/users/akhilnev/orgs", "repos_url": "https://api.github.com/users/akhilnev/repos", "events_url": "https://api.github.com/users/akhilnev/events{/privacy}", "received_events_url": "https://api.github.com/users/akhilnev/received_events", "type": "User", "site_admin": false }
[ { "id": 474725938, "node_id": "MDU6TGFiZWw0NzQ3MjU5Mzg=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/stale", "name": "stale", "color": "d4c5f9", "default": false, "description": "This label marks the issue/pr stale - to be closed automatically if no activity" }, { "id": 1169364458, "node_id": "MDU6TGFiZWwxMTY5MzY0NDU4", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/size:S", "name": "size:S", "color": "adafea", "default": false, "description": "CL Change Size: Small" } ]
closed
false
{ "login": "gbaned", "id": 48215717, "node_id": "MDQ6VXNlcjQ4MjE1NzE3", "avatar_url": "https://avatars.githubusercontent.com/u/48215717?v=4", "gravatar_id": "", "url": "https://api.github.com/users/gbaned", "html_url": "https://github.com/gbaned", "followers_url": "https://api.github.com/users/gbaned/followers", "following_url": "https://api.github.com/users/gbaned/following{/other_user}", "gists_url": "https://api.github.com/users/gbaned/gists{/gist_id}", "starred_url": "https://api.github.com/users/gbaned/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/gbaned/subscriptions", "organizations_url": "https://api.github.com/users/gbaned/orgs", "repos_url": "https://api.github.com/users/gbaned/repos", "events_url": "https://api.github.com/users/gbaned/events{/privacy}", "received_events_url": "https://api.github.com/users/gbaned/received_events", "type": "User", "site_admin": false }
[ { "login": "gbaned", "id": 48215717, "node_id": "MDQ6VXNlcjQ4MjE1NzE3", "avatar_url": "https://avatars.githubusercontent.com/u/48215717?v=4", "gravatar_id": "", "url": "https://api.github.com/users/gbaned", "html_url": "https://github.com/gbaned", "followers_url": "https://api.github.com/users/gbaned/followers", "following_url": "https://api.github.com/users/gbaned/following{/other_user}", "gists_url": "https://api.github.com/users/gbaned/gists{/gist_id}", "starred_url": "https://api.github.com/users/gbaned/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/gbaned/subscriptions", "organizations_url": "https://api.github.com/users/gbaned/orgs", "repos_url": "https://api.github.com/users/gbaned/repos", "events_url": "https://api.github.com/users/gbaned/events{/privacy}", "received_events_url": "https://api.github.com/users/gbaned/received_events", "type": "User", "site_admin": false } ]
null
[ "Thank you for the reply! The Release 1.10 on the website points directly to GitHub source code for the specific end-points which looks like this for tf.string:\r\n[\r\n<img width=\"1002\" alt=\"Screen Shot 2023-12-07 at 2 59 12 AM\" src=\"https://github.com/tensorflow/tensorflow/assets/106297042/074c1ce8-218e-4059-99b5-078bdd37a393\">\r\n](url)\r\n\r\nPlease let me know if the endpoint links to the the GitHub repository similar to the one above is okay ? I will then replace all the links to use the TF 1.10 versions", "Hi @mihaimaruseac Can you please review this PR? Thank you!", "The URLs are still not based on https://www.tensorflow.org/versions/r2.10/api_docs/python/tf", "Hi @akhilnev Can you please check @mihaimaruseac's [comments](https://github.com/tensorflow/tensorflow/pull/62570#issuecomment-1872377159) and keep us posted ? Thank you!", "This PR is stale because it has been open for 14 days with no activity. It will be closed if no further activity occurs. Thank you.", "Hi @akhilnev Any update on this PR? Please. Thank you!", "This PR is stale because it has been open for 14 days with no activity. It will be closed if no further activity occurs. Thank you.", "This PR was closed because it has been inactive for 14 days since being marked as stale. Please reopen if you'd like to work on this further." ]
2023-12-05T04:07:33
2024-04-05T01:47:28
2024-04-05T01:47:23
NONE
null
false
{ "url": "https://api.github.com/repos/tensorflow/tensorflow/pulls/62570", "html_url": "https://github.com/tensorflow/tensorflow/pull/62570", "diff_url": "https://github.com/tensorflow/tensorflow/pull/62570.diff", "patch_url": "https://github.com/tensorflow/tensorflow/pull/62570.patch", "merged_at": null }
This commit addresses broken endpoint links inside the `Readme.md` file in the context of Release 1.10. The broken links are replaced with functional links pointing to the corresponding documentation on the [TensorFlow website](https://www.tensorflow.org/api_docs/python/)
{ "url": "https://api.github.com/repos/tensorflow/tensorflow/issues/62570/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/tensorflow/tensorflow/issues/62570/timeline
null
null
true
https://api.github.com/repos/tensorflow/tensorflow/issues/62569
https://api.github.com/repos/tensorflow/tensorflow
https://api.github.com/repos/tensorflow/tensorflow/issues/62569/labels{/name}
https://api.github.com/repos/tensorflow/tensorflow/issues/62569/comments
https://api.github.com/repos/tensorflow/tensorflow/issues/62569/events
https://github.com/tensorflow/tensorflow/pull/62569
2,025,180,994
PR_kwDOArmXAs5hH6XM
62,569
Refine link in build_convert.md for including and using models in programs
{ "login": "akhilnev", "id": 106297042, "node_id": "U_kgDOBlX20g", "avatar_url": "https://avatars.githubusercontent.com/u/106297042?v=4", "gravatar_id": "", "url": "https://api.github.com/users/akhilnev", "html_url": "https://github.com/akhilnev", "followers_url": "https://api.github.com/users/akhilnev/followers", "following_url": "https://api.github.com/users/akhilnev/following{/other_user}", "gists_url": "https://api.github.com/users/akhilnev/gists{/gist_id}", "starred_url": "https://api.github.com/users/akhilnev/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/akhilnev/subscriptions", "organizations_url": "https://api.github.com/users/akhilnev/orgs", "repos_url": "https://api.github.com/users/akhilnev/repos", "events_url": "https://api.github.com/users/akhilnev/events{/privacy}", "received_events_url": "https://api.github.com/users/akhilnev/received_events", "type": "User", "site_admin": false }
[ { "id": 750616506, "node_id": "MDU6TGFiZWw3NTA2MTY1MDY=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/comp:lite", "name": "comp:lite", "color": "0052cc", "default": false, "description": "TF Lite related issues" }, { "id": 1169364259, "node_id": "MDU6TGFiZWwxMTY5MzY0MjU5", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/size:XS", "name": "size:XS", "color": "adafea", "default": false, "description": "CL Change Size: Extra Small" } ]
closed
false
{ "login": "gbaned", "id": 48215717, "node_id": "MDQ6VXNlcjQ4MjE1NzE3", "avatar_url": "https://avatars.githubusercontent.com/u/48215717?v=4", "gravatar_id": "", "url": "https://api.github.com/users/gbaned", "html_url": "https://github.com/gbaned", "followers_url": "https://api.github.com/users/gbaned/followers", "following_url": "https://api.github.com/users/gbaned/following{/other_user}", "gists_url": "https://api.github.com/users/gbaned/gists{/gist_id}", "starred_url": "https://api.github.com/users/gbaned/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/gbaned/subscriptions", "organizations_url": "https://api.github.com/users/gbaned/orgs", "repos_url": "https://api.github.com/users/gbaned/repos", "events_url": "https://api.github.com/users/gbaned/events{/privacy}", "received_events_url": "https://api.github.com/users/gbaned/received_events", "type": "User", "site_admin": false }
[ { "login": "gbaned", "id": 48215717, "node_id": "MDQ6VXNlcjQ4MjE1NzE3", "avatar_url": "https://avatars.githubusercontent.com/u/48215717?v=4", "gravatar_id": "", "url": "https://api.github.com/users/gbaned", "html_url": "https://github.com/gbaned", "followers_url": "https://api.github.com/users/gbaned/followers", "following_url": "https://api.github.com/users/gbaned/following{/other_user}", "gists_url": "https://api.github.com/users/gbaned/gists{/gist_id}", "starred_url": "https://api.github.com/users/gbaned/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/gbaned/subscriptions", "organizations_url": "https://api.github.com/users/gbaned/orgs", "repos_url": "https://api.github.com/users/gbaned/repos", "events_url": "https://api.github.com/users/gbaned/events{/privacy}", "received_events_url": "https://api.github.com/users/gbaned/received_events", "type": "User", "site_admin": false } ]
null
[ "Covered by https://github.com/tensorflow/tensorflow/pull/62242" ]
2023-12-05T03:01:25
2023-12-06T16:16:38
2023-12-06T16:16:33
NONE
null
false
{ "url": "https://api.github.com/repos/tensorflow/tensorflow/pulls/62569", "html_url": "https://github.com/tensorflow/tensorflow/pull/62569", "diff_url": "https://github.com/tensorflow/tensorflow/pull/62569.diff", "patch_url": "https://github.com/tensorflow/tensorflow/pull/62569.patch", "merged_at": null }
This commit updates the file reference in build_convert.md, replacing the non-existent evaluate.cc link with hello_world_test.cc. The change is reflected in the commit history, as shown in the attached screenshot: <img width="893" alt="Screen Shot 2023-12-05 at 11 38 21 AM" src="https://github.com/tensorflow/tensorflow/assets/106297042/4ae031a2-0b64-4189-8368-597fb47a9d36">
{ "url": "https://api.github.com/repos/tensorflow/tensorflow/issues/62569/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/tensorflow/tensorflow/issues/62569/timeline
null
null
true
https://api.github.com/repos/tensorflow/tensorflow/issues/62568
https://api.github.com/repos/tensorflow/tensorflow
https://api.github.com/repos/tensorflow/tensorflow/issues/62568/labels{/name}
https://api.github.com/repos/tensorflow/tensorflow/issues/62568/comments
https://api.github.com/repos/tensorflow/tensorflow/issues/62568/events
https://github.com/tensorflow/tensorflow/pull/62568
2,025,146,961
PR_kwDOArmXAs5hHzOP
62,568
Ensure the global uniqueness of the tensor name.
{ "login": "kevint324", "id": 8800468, "node_id": "MDQ6VXNlcjg4MDA0Njg=", "avatar_url": "https://avatars.githubusercontent.com/u/8800468?v=4", "gravatar_id": "", "url": "https://api.github.com/users/kevint324", "html_url": "https://github.com/kevint324", "followers_url": "https://api.github.com/users/kevint324/followers", "following_url": "https://api.github.com/users/kevint324/following{/other_user}", "gists_url": "https://api.github.com/users/kevint324/gists{/gist_id}", "starred_url": "https://api.github.com/users/kevint324/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/kevint324/subscriptions", "organizations_url": "https://api.github.com/users/kevint324/orgs", "repos_url": "https://api.github.com/users/kevint324/repos", "events_url": "https://api.github.com/users/kevint324/events{/privacy}", "received_events_url": "https://api.github.com/users/kevint324/received_events", "type": "User", "site_admin": false }
[ { "id": 987666414, "node_id": "MDU6TGFiZWw5ODc2NjY0MTQ=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/ready%20to%20pull", "name": "ready to pull", "color": "2cd643", "default": false, "description": "PR ready for merge process" }, { "id": 1169364458, "node_id": "MDU6TGFiZWwxMTY5MzY0NDU4", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/size:S", "name": "size:S", "color": "adafea", "default": false, "description": "CL Change Size: Small" }, { "id": 1478826728, "node_id": "MDU6TGFiZWwxNDc4ODI2NzI4", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/comp:core", "name": "comp:core", "color": "024391", "default": false, "description": "issues related to core part of tensorflow" } ]
open
false
{ "login": "gbaned", "id": 48215717, "node_id": "MDQ6VXNlcjQ4MjE1NzE3", "avatar_url": "https://avatars.githubusercontent.com/u/48215717?v=4", "gravatar_id": "", "url": "https://api.github.com/users/gbaned", "html_url": "https://github.com/gbaned", "followers_url": "https://api.github.com/users/gbaned/followers", "following_url": "https://api.github.com/users/gbaned/following{/other_user}", "gists_url": "https://api.github.com/users/gbaned/gists{/gist_id}", "starred_url": "https://api.github.com/users/gbaned/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/gbaned/subscriptions", "organizations_url": "https://api.github.com/users/gbaned/orgs", "repos_url": "https://api.github.com/users/gbaned/repos", "events_url": "https://api.github.com/users/gbaned/events{/privacy}", "received_events_url": "https://api.github.com/users/gbaned/received_events", "type": "User", "site_admin": false }
[ { "login": "gbaned", "id": 48215717, "node_id": "MDQ6VXNlcjQ4MjE1NzE3", "avatar_url": "https://avatars.githubusercontent.com/u/48215717?v=4", "gravatar_id": "", "url": "https://api.github.com/users/gbaned", "html_url": "https://github.com/gbaned", "followers_url": "https://api.github.com/users/gbaned/followers", "following_url": "https://api.github.com/users/gbaned/following{/other_user}", "gists_url": "https://api.github.com/users/gbaned/gists{/gist_id}", "starred_url": "https://api.github.com/users/gbaned/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/gbaned/subscriptions", "organizations_url": "https://api.github.com/users/gbaned/orgs", "repos_url": "https://api.github.com/users/gbaned/repos", "events_url": "https://api.github.com/users/gbaned/events{/privacy}", "received_events_url": "https://api.github.com/users/gbaned/received_events", "type": "User", "site_admin": false } ]
null
[ "`\r\ngcc: error: unrecognized command line option '-mavxvnni'; did you mean '-mavx512vnni'?\r\n`\r\nThe rocm build failure has nothing to do with my cl.", "Hi @rohan100jain Can you please review this PR ? Thank you!", "Hi @rohan100jain Can you please review this PR ? Thank you!", "Seems someone is having a really long xmas.", "Hi @gbaned \r\n@rohan100jain hasn't been active on github for over 6 months. \r\nI do believe this bug if fundamental to all TF users.\r\nCan you try someone else please?\r\n\r\nThanks", "Hi @kevint324 Can you please check @sagunb's [comments](https://github.com/tensorflow/tensorflow/pull/62568#pullrequestreview-1825593936) and keep us posted ? Thank you!", "hi @gbaned @sagunb \r\n\r\nSorry for the late reply. \r\nIt took me sometime to figure out how to write a proper test for this function.\r\n\r\nHope the unit test explains the changes.\r\n\r\nCheers\r\nKevin", "hi @gbaned @sagunb\r\nAny updates on the review?\r\n\r\nThanks", "hi @gbaned \r\n\r\nMay I know what is currently blocking this PR.\r\n\r\nThanks\r\n", "> hi @gbaned\r\n> \r\n> May I know what is currently blocking this PR.\r\n> \r\n> Thanks\r\n\r\nHi @kevint324 Sorry for the delay, this PR is waiting for the internal approval and we are working on it. Thank you for your contribution!\r\n\r\n" ]
2023-12-05T02:23:04
2024-06-05T08:19:53
null
NONE
null
false
{ "url": "https://api.github.com/repos/tensorflow/tensorflow/pulls/62568", "html_url": "https://github.com/tensorflow/tensorflow/pull/62568", "diff_url": "https://github.com/tensorflow/tensorflow/pull/62568.diff", "patch_url": "https://github.com/tensorflow/tensorflow/pull/62568.patch", "merged_at": null }
Due to the tensor name generation mechanism, different OPs may generate the same rendezvous key. In the case of multi-thread race condition, it is possible for incorrect communication to occur resulting in incorrect data being received. This patch avoids key conflicts by ensuring the uniqueness of tensor names. See #62523.
{ "url": "https://api.github.com/repos/tensorflow/tensorflow/issues/62568/reactions", "total_count": 1, "+1": 1, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/tensorflow/tensorflow/issues/62568/timeline
null
null
true
https://api.github.com/repos/tensorflow/tensorflow/issues/62567
https://api.github.com/repos/tensorflow/tensorflow
https://api.github.com/repos/tensorflow/tensorflow/issues/62567/labels{/name}
https://api.github.com/repos/tensorflow/tensorflow/issues/62567/comments
https://api.github.com/repos/tensorflow/tensorflow/issues/62567/events
https://github.com/tensorflow/tensorflow/pull/62567
2,025,098,542
PR_kwDOArmXAs5hHpQ0
62,567
Update README.md
{ "login": "akhilnev", "id": 106297042, "node_id": "U_kgDOBlX20g", "avatar_url": "https://avatars.githubusercontent.com/u/106297042?v=4", "gravatar_id": "", "url": "https://api.github.com/users/akhilnev", "html_url": "https://github.com/akhilnev", "followers_url": "https://api.github.com/users/akhilnev/followers", "following_url": "https://api.github.com/users/akhilnev/following{/other_user}", "gists_url": "https://api.github.com/users/akhilnev/gists{/gist_id}", "starred_url": "https://api.github.com/users/akhilnev/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/akhilnev/subscriptions", "organizations_url": "https://api.github.com/users/akhilnev/orgs", "repos_url": "https://api.github.com/users/akhilnev/repos", "events_url": "https://api.github.com/users/akhilnev/events{/privacy}", "received_events_url": "https://api.github.com/users/akhilnev/received_events", "type": "User", "site_admin": false }
[ { "id": 1169364259, "node_id": "MDU6TGFiZWwxMTY5MzY0MjU5", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/size:XS", "name": "size:XS", "color": "adafea", "default": false, "description": "CL Change Size: Extra Small" } ]
closed
false
{ "login": "gbaned", "id": 48215717, "node_id": "MDQ6VXNlcjQ4MjE1NzE3", "avatar_url": "https://avatars.githubusercontent.com/u/48215717?v=4", "gravatar_id": "", "url": "https://api.github.com/users/gbaned", "html_url": "https://github.com/gbaned", "followers_url": "https://api.github.com/users/gbaned/followers", "following_url": "https://api.github.com/users/gbaned/following{/other_user}", "gists_url": "https://api.github.com/users/gbaned/gists{/gist_id}", "starred_url": "https://api.github.com/users/gbaned/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/gbaned/subscriptions", "organizations_url": "https://api.github.com/users/gbaned/orgs", "repos_url": "https://api.github.com/users/gbaned/repos", "events_url": "https://api.github.com/users/gbaned/events{/privacy}", "received_events_url": "https://api.github.com/users/gbaned/received_events", "type": "User", "site_admin": false }
[ { "login": "gbaned", "id": 48215717, "node_id": "MDQ6VXNlcjQ4MjE1NzE3", "avatar_url": "https://avatars.githubusercontent.com/u/48215717?v=4", "gravatar_id": "", "url": "https://api.github.com/users/gbaned", "html_url": "https://github.com/gbaned", "followers_url": "https://api.github.com/users/gbaned/followers", "following_url": "https://api.github.com/users/gbaned/following{/other_user}", "gists_url": "https://api.github.com/users/gbaned/gists{/gist_id}", "starred_url": "https://api.github.com/users/gbaned/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/gbaned/subscriptions", "organizations_url": "https://api.github.com/users/gbaned/orgs", "repos_url": "https://api.github.com/users/gbaned/repos", "events_url": "https://api.github.com/users/gbaned/events{/privacy}", "received_events_url": "https://api.github.com/users/gbaned/received_events", "type": "User", "site_admin": false } ]
null
[ "Thanks for your pull request! It looks like this may be your first contribution to a Google open source project. Before we can look at your pull request, you'll need to sign a Contributor License Agreement (CLA).\n\nView this [failed invocation](https://github.com/tensorflow/tensorflow/pull/62567/checks?check_run_id=19310880067) of the CLA check for more information.\n\nFor the most up to date status, view the checks section at the bottom of the pull request.", "Hi @akhilnev This PR is duplicate of PR[#62039](https://github.com/tensorflow/tensorflow/pull/62039). Hence closing this PR. Thank you for your contribution. \r\n\r\n", "Please don't use \"add file\"/\"update file\"/\"fix file\"/etc. commit messages. These are hard to reason about when looking at the history of the file/repository. Instead, please write explanatory git commit messages.\r\n\r\nThe commit message is also the title of the PR if the PR has only one commit. It is thus twice important to have commit messages that are relevant, as PRs would be easier to understand and easier to analyze in search results.\r\n\r\nFor how to write good quality git commit messages, please consult https://cbea.ms/git-commit/ ", "Thank you so much for your comment, I will make sure to consult the attached link and improve my commit messages to make it easier to analyze what is going on from next time." ]
2023-12-05T01:23:06
2023-12-05T17:15:30
2023-12-05T06:16:35
NONE
null
false
{ "url": "https://api.github.com/repos/tensorflow/tensorflow/pulls/62567", "html_url": "https://github.com/tensorflow/tensorflow/pull/62567", "diff_url": "https://github.com/tensorflow/tensorflow/pull/62567.diff", "patch_url": "https://github.com/tensorflow/tensorflow/pull/62567.patch", "merged_at": null }
Changed the 404 link addressed in Issue #62019 and added suggested link, please let me know if this is fine !
{ "url": "https://api.github.com/repos/tensorflow/tensorflow/issues/62567/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/tensorflow/tensorflow/issues/62567/timeline
null
null
true
https://api.github.com/repos/tensorflow/tensorflow/issues/62566
https://api.github.com/repos/tensorflow/tensorflow
https://api.github.com/repos/tensorflow/tensorflow/issues/62566/labels{/name}
https://api.github.com/repos/tensorflow/tensorflow/issues/62566/comments
https://api.github.com/repos/tensorflow/tensorflow/issues/62566/events
https://github.com/tensorflow/tensorflow/issues/62566
2,024,230,195
I_kwDOArmXAs54p00z
62,566
[MLIR] tf-mlir-translate not working for keras sequential model
{ "login": "monowaranjum", "id": 19803082, "node_id": "MDQ6VXNlcjE5ODAzMDgy", "avatar_url": "https://avatars.githubusercontent.com/u/19803082?v=4", "gravatar_id": "", "url": "https://api.github.com/users/monowaranjum", "html_url": "https://github.com/monowaranjum", "followers_url": "https://api.github.com/users/monowaranjum/followers", "following_url": "https://api.github.com/users/monowaranjum/following{/other_user}", "gists_url": "https://api.github.com/users/monowaranjum/gists{/gist_id}", "starred_url": "https://api.github.com/users/monowaranjum/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/monowaranjum/subscriptions", "organizations_url": "https://api.github.com/users/monowaranjum/orgs", "repos_url": "https://api.github.com/users/monowaranjum/repos", "events_url": "https://api.github.com/users/monowaranjum/events{/privacy}", "received_events_url": "https://api.github.com/users/monowaranjum/received_events", "type": "User", "site_admin": false }
[ { "id": 473173272, "node_id": "MDU6TGFiZWw0NzMxNzMyNzI=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/type:feature", "name": "type:feature", "color": "159b2e", "default": false, "description": "Feature requests" }, { "id": 2012480497, "node_id": "MDU6TGFiZWwyMDEyNDgwNDk3", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/type:docs-feature", "name": "type:docs-feature", "color": "159b2e", "default": false, "description": "Doc issues for new feature, or clarifications about functionality" }, { "id": 6218999181, "node_id": "LA_kwDOArmXAs8AAAABcq5ljQ", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/TF%202.15", "name": "TF 2.15", "color": "9162CB", "default": false, "description": "For issues related to 2.15.x" } ]
closed
false
{ "login": "tilakrayal", "id": 81610181, "node_id": "MDQ6VXNlcjgxNjEwMTgx", "avatar_url": "https://avatars.githubusercontent.com/u/81610181?v=4", "gravatar_id": "", "url": "https://api.github.com/users/tilakrayal", "html_url": "https://github.com/tilakrayal", "followers_url": "https://api.github.com/users/tilakrayal/followers", "following_url": "https://api.github.com/users/tilakrayal/following{/other_user}", "gists_url": "https://api.github.com/users/tilakrayal/gists{/gist_id}", "starred_url": "https://api.github.com/users/tilakrayal/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/tilakrayal/subscriptions", "organizations_url": "https://api.github.com/users/tilakrayal/orgs", "repos_url": "https://api.github.com/users/tilakrayal/repos", "events_url": "https://api.github.com/users/tilakrayal/events{/privacy}", "received_events_url": "https://api.github.com/users/tilakrayal/received_events", "type": "User", "site_admin": false }
[ { "login": "tilakrayal", "id": 81610181, "node_id": "MDQ6VXNlcjgxNjEwMTgx", "avatar_url": "https://avatars.githubusercontent.com/u/81610181?v=4", "gravatar_id": "", "url": "https://api.github.com/users/tilakrayal", "html_url": "https://github.com/tilakrayal", "followers_url": "https://api.github.com/users/tilakrayal/followers", "following_url": "https://api.github.com/users/tilakrayal/following{/other_user}", "gists_url": "https://api.github.com/users/tilakrayal/gists{/gist_id}", "starred_url": "https://api.github.com/users/tilakrayal/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/tilakrayal/subscriptions", "organizations_url": "https://api.github.com/users/tilakrayal/orgs", "repos_url": "https://api.github.com/users/tilakrayal/repos", "events_url": "https://api.github.com/users/tilakrayal/events{/privacy}", "received_events_url": "https://api.github.com/users/tilakrayal/received_events", "type": "User", "site_admin": false } ]
null
[ "If you can reproduce the issue, please comment if there are any command line mitigation for that. I would really not like to rewrite existing models using tf.function.", "@monowaranjum,\r\nYou need to create a wrapper [like this](https://github.com/tensorflow/tensorflow/blob/b41bee7adb423973821d1b93193b8becbddead56/tensorflow/compiler/mlir/tensorflow/tests/tf_saved_model/keras.py#L36) and save it (with the wrapper, you specify an explicit @tf.function input_signature=, as the error was suggesting). \r\n\r\nThen, when you load it, you need to specify \"exported names\" for just the functions in your wrapper (in the case of the link I specified, it is just one function \"predict\").\r\n\r\nSo after saving the wrapper tf.Module you would load it with something like this:\r\n\r\n`tf-mlir-translate --savedmodel-objectgraph-to-mlir --tf-savedmodel-exported-names=predict /path/to/tf2_model -o out.mlir`\r\n\r\n(I notice that you are using a somewhat out of date version of tf-mlir-translate; you should be able to find the corresponding flags in your version; I think the -savedmodel-to-mlir -> -savedmodel-objectgraph-to-mlir change is the only non-obvious renaming)\r\n\r\nWe also have a Python API that does the same. You can find how it is invoked in [common.py](https://github.com/tensorflow/tensorflow/blob/b9fce9ec2edede6ccce183f0dca6e89162371388/tensorflow/compiler/mlir/tensorflow/tests/tf_saved_model/common.py#L87) which underlies the keras test I linked above.\r\n\r\nThank you!", "Thanks, it worked. " ]
2023-12-04T16:16:48
2023-12-07T17:30:20
2023-12-07T17:30:20
NONE
null
null
null
### Issue type Documentation Feature Request ### Have you reproduced the bug with TensorFlow Nightly? Yes ### Source source ### TensorFlow version 2.15 ### Custom code Yes ### OS platform and distribution Ubuntu 22.04 ### Mobile device _No response_ ### Python version 3.10 ### Bazel version 5.1.1 ### GCC/compiler version 11.4 ### CUDA/cuDNN version _No response_ ### GPU model and memory _No response_ ### Current behavior? I wrote a small model to test a few compiler features in tensorflow. The model is saved to a directory using ```SavedModel``` format. Then I tried using ```tf-mlir-translate``` to convert the model into tf MLIR code. However, that did not work. The conversion terminated abruptly while giving the error message that function with multiple signature detected. I inspected the model with ```saved-model-cli``` and found that indeed that ```__call__``` function has two concrete functions with two different set of parameters. I have no idea why the ```__call__``` function has two concrete instantiations. Especially when I never really have any control over how the internal functions get instantiated except for ```model.fit()``` call. I expected the MLIR file to be generated. But instead, it is continuously showing error about multiple concrete functions. ### Standalone code to reproduce the issue ```shell Here is the model code: import tensorflow as tf import numpy as np def get_linear_classifier(): model = tf.keras.Sequential() model.add(tf.keras.layers.Input(shape = (784))) model.add(tf.keras.layers.Dense(32, activation = 'relu')) model.add(tf.keras.layers.Dense(10)) model.summary() return model def dense_train(model, batch_num, train_set): train_examples = train_set['data']/255.0 train_labels = train_set['label'] num_examples = train_examples.shape[0] image_width = train_examples.shape[1] image_height = train_examples.shape[2] train_examples = np.reshape(train_examples, (num_examples, image_width * image_height)) temp = np.zeros((60000, 10)) for i in range(len(train_labels)): temp[i][train_labels[i]] = 1.0 print("Shapes: {} {}".format(train_examples.shape, temp.shape)) history = model.fit(train_examples, temp, batch_size = 100, epochs = 40) return history tf.random.set_seed(42) tf.keras.backend.clear_session() model = get_linear_classifier() (train_x, train_y), (test_x, test_y) = tf.keras.datasets.mnist.load_data() model.compile( optimizer= tf.keras.optimizers.Adam(learning_rate=0.01), loss=tf.keras.losses.CategoricalCrossentropy(from_logits=True), metrics=[tf.keras.metrics.CategoricalAccuracy()]) train_data = {'data': train_x, 'label': train_y} training_history = dense_train(model, 100, train_data) model.save('/path/to/working/directory/graph-artifacts/toy_model1') ``` Then I ran the following command: ``` tf-mlir-translate /path/to/working/directory/graph-artifacts/toy_model1 --savedmodel-objectgraph-to-mlir ``` I got the following error message: ``` tensorflow/compiler/mlir/tensorflow/translate/tf_mlir_translate.cc:172] SavedModel import failed: INVALID_ARGUMENT: Exported function with exported name(s) '__call__', 'keras_api.__call__' with multiple concrete functions. Add @tf.function(input_signature=[...]) on this function, or use a narrower list of exported names that excludes this function. ``` ``` ### Relevant log output ```shell saved-model-cli shows the following output after running the command: saved_model_cli show --dir=/home/rashik/Documents/tensorfow_mlir_test/TF-ESRGAN/graph-artifacts/toy_model1 --all Output: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Could not find TensorRT MetaGraphDef with tag-set: 'serve' contains the following SignatureDefs: signature_def['__saved_model_init_op']: The given SavedModel SignatureDef contains the following input(s): The given SavedModel SignatureDef contains the following output(s): outputs['__saved_model_init_op'] tensor_info: dtype: DT_INVALID shape: unknown_rank name: NoOp Method name is: signature_def['serving_default']: The given SavedModel SignatureDef contains the following input(s): inputs['input_1'] tensor_info: dtype: DT_FLOAT shape: (-1, 784) name: serving_default_input_1:0 The given SavedModel SignatureDef contains the following output(s): outputs['dense_1'] tensor_info: dtype: DT_FLOAT shape: (-1, 10) name: StatefulPartitionedCall:0 Method name is: tensorflow/serving/predict The MetaGraph with tag set ['serve'] contains the following ops: {'ShardedFilename', 'ReadVariableOp', 'BiasAdd', 'StringJoin', 'SaveV2', 'VarHandleOp', 'NoOp', 'Select', 'AssignVariableOp', 'RestoreV2', 'Const', 'MergeV2Checkpoints', 'StatefulPartitionedCall', 'StaticRegexFullMatch', 'Identity', 'DisableCopyOnRead', 'Relu', 'MatMul', 'Placeholder', 'Pack'} Concrete Functions: Function Name: '__call__' Option #1 Callable with: Argument #1 input_1: TensorSpec(shape=(None, 784), dtype=tf.float32, name='input_1') Argument #2 DType: bool Value: True Argument #3 DType: NoneType Value: None Option #2 Callable with: Argument #1 input_1: TensorSpec(shape=(None, 784), dtype=tf.float32, name='input_1') Argument #2 DType: bool Value: False Argument #3 DType: NoneType Value: None Function Name: '_default_save_signature' Option #1 Callable with: Argument #1 input_1: TensorSpec(shape=(None, 784), dtype=tf.float32, name='input_1') Function Name: 'call_and_return_all_conditional_losses' Option #1 Callable with: Argument #1 input_1: TensorSpec(shape=(None, 784), dtype=tf.float32, name='input_1') Argument #2 DType: bool Value: True Argument #3 DType: NoneType Value: None Option #2 Callable with: Argument #1 input_1: TensorSpec(shape=(None, 784), dtype=tf.float32, name='input_1') Argument #2 DType: bool Value: False Argument #3 DType: NoneType Value: None ```
{ "url": "https://api.github.com/repos/tensorflow/tensorflow/issues/62566/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/tensorflow/tensorflow/issues/62566/timeline
null
completed
false
https://api.github.com/repos/tensorflow/tensorflow/issues/62565
https://api.github.com/repos/tensorflow/tensorflow
https://api.github.com/repos/tensorflow/tensorflow/issues/62565/labels{/name}
https://api.github.com/repos/tensorflow/tensorflow/issues/62565/comments
https://api.github.com/repos/tensorflow/tensorflow/issues/62565/events
https://github.com/tensorflow/tensorflow/issues/62565
2,024,104,392
I_kwDOArmXAs54pWHI
62,565
"ERROR: tensorflow/lite/util.cc BytesRequired number of elements overflowed." when benchmarking TFLite model with dynamic input size
{ "login": "JakubGorski", "id": 23618864, "node_id": "MDQ6VXNlcjIzNjE4ODY0", "avatar_url": "https://avatars.githubusercontent.com/u/23618864?v=4", "gravatar_id": "", "url": "https://api.github.com/users/JakubGorski", "html_url": "https://github.com/JakubGorski", "followers_url": "https://api.github.com/users/JakubGorski/followers", "following_url": "https://api.github.com/users/JakubGorski/following{/other_user}", "gists_url": "https://api.github.com/users/JakubGorski/gists{/gist_id}", "starred_url": "https://api.github.com/users/JakubGorski/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/JakubGorski/subscriptions", "organizations_url": "https://api.github.com/users/JakubGorski/orgs", "repos_url": "https://api.github.com/users/JakubGorski/repos", "events_url": "https://api.github.com/users/JakubGorski/events{/privacy}", "received_events_url": "https://api.github.com/users/JakubGorski/received_events", "type": "User", "site_admin": false }
[ { "id": 386191887, "node_id": "MDU6TGFiZWwzODYxOTE4ODc=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/stat:awaiting%20response", "name": "stat:awaiting response", "color": "f4b400", "default": false, "description": "Status - Awaiting response from author" }, { "id": 473184161, "node_id": "MDU6TGFiZWw0NzMxODQxNjE=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/type:support", "name": "type:support", "color": "159b2e", "default": false, "description": "Support issues" }, { "id": 750616506, "node_id": "MDU6TGFiZWw3NTA2MTY1MDY=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/comp:lite", "name": "comp:lite", "color": "0052cc", "default": false, "description": "TF Lite related issues" }, { "id": 2671339633, "node_id": "MDU6TGFiZWwyNjcxMzM5NjMz", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/TFLiteGpuDelegate", "name": "TFLiteGpuDelegate", "color": "F71F04", "default": false, "description": "TFLite Gpu delegate issue" }, { "id": 4511033337, "node_id": "LA_kwDOArmXAs8AAAABDODn-Q", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/TF%202.10", "name": "TF 2.10", "color": "C15088", "default": false, "description": "" } ]
closed
false
{ "login": "pkgoogle", "id": 132095473, "node_id": "U_kgDOB9-d8Q", "avatar_url": "https://avatars.githubusercontent.com/u/132095473?v=4", "gravatar_id": "", "url": "https://api.github.com/users/pkgoogle", "html_url": "https://github.com/pkgoogle", "followers_url": "https://api.github.com/users/pkgoogle/followers", "following_url": "https://api.github.com/users/pkgoogle/following{/other_user}", "gists_url": "https://api.github.com/users/pkgoogle/gists{/gist_id}", "starred_url": "https://api.github.com/users/pkgoogle/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/pkgoogle/subscriptions", "organizations_url": "https://api.github.com/users/pkgoogle/orgs", "repos_url": "https://api.github.com/users/pkgoogle/repos", "events_url": "https://api.github.com/users/pkgoogle/events{/privacy}", "received_events_url": "https://api.github.com/users/pkgoogle/received_events", "type": "User", "site_admin": false }
[ { "login": "pkgoogle", "id": 132095473, "node_id": "U_kgDOB9-d8Q", "avatar_url": "https://avatars.githubusercontent.com/u/132095473?v=4", "gravatar_id": "", "url": "https://api.github.com/users/pkgoogle", "html_url": "https://github.com/pkgoogle", "followers_url": "https://api.github.com/users/pkgoogle/followers", "following_url": "https://api.github.com/users/pkgoogle/following{/other_user}", "gists_url": "https://api.github.com/users/pkgoogle/gists{/gist_id}", "starred_url": "https://api.github.com/users/pkgoogle/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/pkgoogle/subscriptions", "organizations_url": "https://api.github.com/users/pkgoogle/orgs", "repos_url": "https://api.github.com/users/pkgoogle/repos", "events_url": "https://api.github.com/users/pkgoogle/events{/privacy}", "received_events_url": "https://api.github.com/users/pkgoogle/received_events", "type": "User", "site_admin": false } ]
null
[ "@LakshmiKalaKadali did You have a look into this? Are You able to help?", "Hi @pkgoogle , Please look into the issue\r\n\r\nThank You", "Hi @JakubGorski, did you get the binary from somewhere? or did you build android_aarch64_benchmark_model_plus_flex from source? If you built it from source can you let us know the command you used? Also if so, what OS/platform did you use to build the binary? Did you run the configure script prior to building it? If so did you build with Cuda or RocM support? Thanks for all the additional information which will help.", "Hi @pkgoogle. Thanks for Your help. \r\n\r\nFirst I used the binary from https://storage.googleapis.com/tensorflow-nightly-public/prod/tensorflow/release/lite/tools/nightly/latest/android_aarch64_benchmark_model_plus_flex.\r\n\r\nThen I was trying to get accustomed with the benchmark model code and fix the error so I was building it from source.\r\nI used the command:\r\n`sudo bazel build -c opt --cxxopt=--std=c++11 --config=android_arm64 --define=xnn_enable_arm_i8mm=false tensorflow/lite/tools/benchmark:benchmark_model` on **Linux Ubuntu 20.04.6**.\r\nI ran the configure script before - I build it with Cuda support (without RocM).\r\n**Both binaries gave me the same error.**\r\n\r\n### I will provide some additional info, maybe will be helpful:\r\nWhen I use the TFLite benchmark model tool with `--use_gpu=false --use_xnnpack=true --input_layer=input_1 --input_layer_shape=1, 512, 512, 3` , the model created with dynamic input shapes also fails (with the same error).\r\nIt only executes correctly with the default delegate. What is more weird, the default delegate is actually also the XNNPACK.\r\n\r\nThe difference lies in the order of the execution in the TFLite benchmark tool:\r\n\r\n1. When default delegate is used, the `interpreter_->ResizeInputTensor(i, input.shape);` call is first, then `interpreter_->AllocateTensors()` is called.\r\n2. When delegate is specified explicitly (gpu, xnnpack etc.), first the `interpreter_->ModifyGraphWithDelegate(delegate)` call is executed. This call invokes the `Subgraph::EnsureMemoryAllocations()` where there is `AllocateTensors()` call. During this call the error arises.\r\nIf I call the `interpreter_->ResizeInputTensor(i, input.shape)` function first (before `interpreter_->ModifyGraphWithDelegate(delegate)`) evertything seems to be working fine.\r\n\r\nBut maybe the best solution is to export my tflite model with dummy input size? But I read here:\r\nhttps://github.com/tensorflow/tensorflow/issues/41807#issuecomment-673161339\r\nthat it's not the right way.\r\n\r\n\r\n", "Hi @JakubGorski, a dummy input size/making things static before it enters our ecosystem will generally always work but is a little bit clunky and not the best developer experience, I believe what you are doing should still work but if you need to continue your work ASAP feel free to go ahead with that solution, while we figure this out -- but as always we appreciate your help and the additional information you provided definitely helps. I'll look into this further.", "Hi @JakubGorski, this is now updated @https://github.com/tensorflow/tensorflow/commit/d6e68d61084f98d6a09151cdc91b59e36e6701b2, please retest your workflow and let us know if it resolves your issue.", "Hi @pkgoogle, I retested my workflow. The problem is fixed! Thank You for Your help :)", "Np, @JakubGorski, if you have no other open items regarding this, please feel free to close.", "Are you satisfied with the resolution of your issue?\n<a href=\"https://docs.google.com/forms/d/e/1FAIpQLSfaP12TRhd9xSxjXZjcZFNXPGk4kc1-qMdv3gc6bEP90vY1ew/viewform?entry.85265664=Yes&entry.2137816233=https://github.com/tensorflow/tensorflow/issues/62565\">Yes</a>\n<a href=\"https://docs.google.com/forms/d/e/1FAIpQLSfaP12TRhd9xSxjXZjcZFNXPGk4kc1-qMdv3gc6bEP90vY1ew/viewform?entry.85265664=No&entry.2137816233=https://github.com/tensorflow/tensorflow/issues/62565\">No</a>\n" ]
2023-12-04T15:17:44
2024-01-16T11:08:44
2024-01-16T11:08:42
NONE
null
null
null
### Issue type Support ### Have you reproduced the bug with TensorFlow Nightly? No ### Source source ### TensorFlow version 2.10 ### Custom code Yes ### OS platform and distribution Linux Ubuntu 20.04.6 ### Mobile device Samsung S23 ### Python version 3.7.12 ### Bazel version _No response_ ### GCC/compiler version _No response_ ### CUDA/cuDNN version _No response_ ### GPU model and memory _No response_ ### Current behavior? I am exporting my tensorflow model to TFLite. I want to be able to run the TFLite model on mobile device GPU with different input shapes. I looked in many places, but I am missing a reliable info in documentation stating how such models should be exported. For that, I was using the following code: ``` # create model def get_model(input_shape=(None, None, 3)): inputs = tf.keras.Input(shape=input_shape) x = tf.keras.layers.Conv2D( 16, kernel_size=3, padding='same')(inputs) x = tf.keras.layers.Conv2D( 16 * 2, kernel_size=3, padding='same', strides=2)(x) x = tf.keras.layers.Conv2D( 16 * 4, kernel_size=3, use_bias=False)(x) x = tf.keras.layers.Conv2D( 3, kernel_size=3, padding='same')(x) model = tf.keras.Model(inputs, x) model.compile(optimizer=tf.keras.optimizers.Adam(), loss="mean_squared_error") return model # Util functions def save_tflite_model(output_model_path, tflite_model): with open(output_model_path, 'wb') as f: f.write(tflite_model) def convert_model_from_concrete(model_path, output_model_path, input_shape=(1, None, None, 3)): model = tf.saved_model.load(model_path) concrete_func = model.signatures[ tf.saved_model.DEFAULT_SERVING_SIGNATURE_DEF_KEY] concrete_func.inputs[0].set_shape(input_shape) converter = tf.lite.TFLiteConverter.from_concrete_functions([concrete_func]) converter.experimental_new_converter = True converter.target_spec.supported_ops = [ tf.lite.OpsSet.TFLITE_BUILTINS, tf.lite.OpsSet.SELECT_TF_OPS ] tflite_model = converter.convert() print(tf.lite.experimental.Analyzer.analyze(model_content=tflite_model, gpu_compatibility=True)) save_tflite_model(output_model_path, tflite_model) #Code for exporting my model to TFLite model = get_model() model.save("my_model_dynamic") convert_model_from_concrete("my_model_dynamic","my_model_dynamic.tflite") ``` Everything works fine, and the model is exported. From the `tf.lite.experimental.Analyzer` I get the following log: `Your model looks compatibile with GPU delegate with TFLite runtime version 2.10.0.` I can then locally load the model and run it with any valid shape: ``` interpreter = tf.lite.Interpreter("my_model_dynamic.tflite") custom_shape = [1, 512, 512, 3] interpreter.resize_tensor_input(interpreter.get_input_details()[0]['index'], custom_shape) interpreter.allocate_tensors() input = numpy.random.rand(*custom_shape).astype(np.float32) input_details = interpreter.get_input_details() interpreter.set_tensor(input_details[0]['index'], input) interpreter.invoke() ``` However when I use the TFLite benchmark model tool, with the GPU delegate and custom input shape - adb command: `./android_aarch64_benchmark_model_plus_flex --graph=./my_model_dynamic.tflite --use_gpu=true --input_layer=input_1 --input_layer_shape=1, 512, 512, 3` the benchmark fails with the following error: ``` INFO: STARTING! INFO: Log parameter values verbosely: [0] INFO: Graph: [./my_model_dynamic.tflite] INFO: Input layers: [input_1] INFO: Input shapes: [1,512,512,3] INFO: Use gpu: [1] INFO: Loaded model ./my_model_dynamic.tflite INFO: Initialized TensorFlow Lite runtime. INFO: Created TensorFlow Lite delegate for GPU. INFO: GPU delegate created. ERROR: tensorflow/lite/util.cc BytesRequired number of elements overflowed. ERROR: Node number 2 (CONV_2D) failed to prepare. ERROR: Failed to apply GPU delegate. ``` When I do not set the GPU delegate (execute without the `--use_gpu=true` flag), everything works fine. Also, when I export my model with a pre-defined dummy shape: ``` model = get_model(input_shape=(16, 16, 3) model.save("my_model_16") convert_model_from_concrete("my_model_16","my_model_16.tflite", input_shape=(1, 16, 16, 3)) ``` The same adb command I pasted above works well. Could You clarify why the first approach fails? Is it a bug in the benchmark model app? Or should I just use the second one (with dummy input shapes)? ### Standalone code to reproduce the issue ```shell The code above is enough to reproduce the problem. Also, I used this native binary of benchmark model tool: https://storage.googleapis.com/tensorflow-nightly-public/prod/tensorflow/release/lite/tools/nightly/latest/android_aarch64_benchmark_model_plus_flex ``` ### Relevant log output _No response_
{ "url": "https://api.github.com/repos/tensorflow/tensorflow/issues/62565/reactions", "total_count": 1, "+1": 1, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/tensorflow/tensorflow/issues/62565/timeline
null
completed
false
https://api.github.com/repos/tensorflow/tensorflow/issues/62564
https://api.github.com/repos/tensorflow/tensorflow
https://api.github.com/repos/tensorflow/tensorflow/issues/62564/labels{/name}
https://api.github.com/repos/tensorflow/tensorflow/issues/62564/comments
https://api.github.com/repos/tensorflow/tensorflow/issues/62564/events
https://github.com/tensorflow/tensorflow/issues/62564
2,023,342,054
I_kwDOArmXAs54mb_m
62,564
Unable to load saved model with TPU using mixed precision bfloat16
{ "login": "rcalonso", "id": 19788974, "node_id": "MDQ6VXNlcjE5Nzg4OTc0", "avatar_url": "https://avatars.githubusercontent.com/u/19788974?v=4", "gravatar_id": "", "url": "https://api.github.com/users/rcalonso", "html_url": "https://github.com/rcalonso", "followers_url": "https://api.github.com/users/rcalonso/followers", "following_url": "https://api.github.com/users/rcalonso/following{/other_user}", "gists_url": "https://api.github.com/users/rcalonso/gists{/gist_id}", "starred_url": "https://api.github.com/users/rcalonso/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/rcalonso/subscriptions", "organizations_url": "https://api.github.com/users/rcalonso/orgs", "repos_url": "https://api.github.com/users/rcalonso/repos", "events_url": "https://api.github.com/users/rcalonso/events{/privacy}", "received_events_url": "https://api.github.com/users/rcalonso/received_events", "type": "User", "site_admin": false }
[ { "id": 386191887, "node_id": "MDU6TGFiZWwzODYxOTE4ODc=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/stat:awaiting%20response", "name": "stat:awaiting response", "color": "f4b400", "default": false, "description": "Status - Awaiting response from author" }, { "id": 473172988, "node_id": "MDU6TGFiZWw0NzMxNzI5ODg=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/type:bug", "name": "type:bug", "color": "159b2e", "default": false, "description": "Bug" }, { "id": 474725938, "node_id": "MDU6TGFiZWw0NzQ3MjU5Mzg=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/stale", "name": "stale", "color": "d4c5f9", "default": false, "description": "This label marks the issue/pr stale - to be closed automatically if no activity" }, { "id": 996845227, "node_id": "MDU6TGFiZWw5OTY4NDUyMjc=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/comp:dist-strat", "name": "comp:dist-strat", "color": "0052cc", "default": false, "description": "Distribution Strategy related issues" }, { "id": 5922361893, "node_id": "LA_kwDOArmXAs8AAAABYQASJQ", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/TF2.14", "name": "TF2.14", "color": "b60205", "default": false, "description": "For issues related to Tensorflow 2.14.x" } ]
closed
false
{ "login": "SuryanarayanaY", "id": 116063290, "node_id": "U_kgDOBur8Og", "avatar_url": "https://avatars.githubusercontent.com/u/116063290?v=4", "gravatar_id": "", "url": "https://api.github.com/users/SuryanarayanaY", "html_url": "https://github.com/SuryanarayanaY", "followers_url": "https://api.github.com/users/SuryanarayanaY/followers", "following_url": "https://api.github.com/users/SuryanarayanaY/following{/other_user}", "gists_url": "https://api.github.com/users/SuryanarayanaY/gists{/gist_id}", "starred_url": "https://api.github.com/users/SuryanarayanaY/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/SuryanarayanaY/subscriptions", "organizations_url": "https://api.github.com/users/SuryanarayanaY/orgs", "repos_url": "https://api.github.com/users/SuryanarayanaY/repos", "events_url": "https://api.github.com/users/SuryanarayanaY/events{/privacy}", "received_events_url": "https://api.github.com/users/SuryanarayanaY/received_events", "type": "User", "site_admin": false }
[ { "login": "SuryanarayanaY", "id": 116063290, "node_id": "U_kgDOBur8Og", "avatar_url": "https://avatars.githubusercontent.com/u/116063290?v=4", "gravatar_id": "", "url": "https://api.github.com/users/SuryanarayanaY", "html_url": "https://github.com/SuryanarayanaY", "followers_url": "https://api.github.com/users/SuryanarayanaY/followers", "following_url": "https://api.github.com/users/SuryanarayanaY/following{/other_user}", "gists_url": "https://api.github.com/users/SuryanarayanaY/gists{/gist_id}", "starred_url": "https://api.github.com/users/SuryanarayanaY/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/SuryanarayanaY/subscriptions", "organizations_url": "https://api.github.com/users/SuryanarayanaY/orgs", "repos_url": "https://api.github.com/users/SuryanarayanaY/repos", "events_url": "https://api.github.com/users/SuryanarayanaY/events{/privacy}", "received_events_url": "https://api.github.com/users/SuryanarayanaY/received_events", "type": "User", "site_admin": false } ]
null
[ "Hello, @rcalonso!\r\nCould you try to use the tf.keras.experimental.load_from_saved_model function with the policy=\"infer_float32_vars\" argument. This will load the model with the appropriate mixed precision policy for inference. Also could you try with latest TF v2.15 or nightly and let us know?\r\nThank you!", "Hi @sushreebarsa, thanks for the response.\r\n\r\nThe function `tf.keras.experimental.load_from_saved_model` is not available in TF 2.14.1 and 2.15. According to https://github.com/tensorflow/tensorflow/blob/v2.15.0/tensorflow/python/keras/saving/saved_model_experimental.py#L407-L409 the function `tf.keras.experimental.load_from_saved_model` is deprecated.\r\n\r\nAlso, using the policy `infer_float32_vars` is not possible because is deprecated.\r\n```bash\r\nValueError: Policies ending in '_float32_vars' have been removed from TensorFlow. Please use the 'mixed_float16' or 'mixed_bfloat16' policy instead. Got policy name: 'infer_float32_vars'\r\n```\r\n\r\nI tried the code I provided in the description with TF 2.15 in cloud TPU and the same error was raised.", "@rcalonso Thanks for replying here.\r\nIt seems that there is incompatibility between the saved model format and the current execution environment. This usually happens when a model trained with mixed precision and TPUs is attempted to be loaded on a CPU or without setting the appropriate mixed-precision settings. Install or update the tensorflow-model-optimization library if needed\r\n\r\nThank you!", "This issue is stale because it has been open for 7 days with no activity. It will be closed if no further activity occurs. Thank you.", "@sushreebarsa I'm saving the model in `.keras` format because when saving with the saved model format (`tf`) the custom train step was not saved. I created an issue for this https://github.com/tensorflow/tensorflow/issues/62450\r\n\r\nWhat would be the appropriate mixed-precision settings? I've tried loading the model with float32 policy but the same error was raised. The tensorflow-model-optimization version used is the latest available.", "HI @rcalonso ,\r\n\r\nApologies for the delay. I have modified the code to Keras3 compatible and its executing fine.Please refer the attached [gist](https://colab.research.google.com/gist/SuryanarayanaY/a331152fd4c11a6681a62b9d6702c397/62564_tpu_keras3.ipynb). Could you please check and confirm ?\r\n\r\nThanks!", "This issue is stale because it has been open for 7 days with no activity. It will be closed if no further activity occurs. Thank you.", "This issue was closed because it has been inactive for 7 days since being marked as stale. Please reopen if you'd like to work on this further.", "Are you satisfied with the resolution of your issue?\n<a href=\"https://docs.google.com/forms/d/e/1FAIpQLSfaP12TRhd9xSxjXZjcZFNXPGk4kc1-qMdv3gc6bEP90vY1ew/viewform?entry.85265664=Yes&entry.2137816233=https://github.com/tensorflow/tensorflow/issues/62564\">Yes</a>\n<a href=\"https://docs.google.com/forms/d/e/1FAIpQLSfaP12TRhd9xSxjXZjcZFNXPGk4kc1-qMdv3gc6bEP90vY1ew/viewform?entry.85265664=No&entry.2137816233=https://github.com/tensorflow/tensorflow/issues/62564\">No</a>\n" ]
2023-12-04T08:56:55
2024-02-07T01:46:39
2024-02-07T01:46:33
NONE
null
null
null
### Issue type Bug ### Have you reproduced the bug with TensorFlow Nightly? Yes ### Source source ### TensorFlow version 2.14.1 ### Custom code Yes ### OS platform and distribution _No response_ ### Mobile device _No response_ ### Python version 3.10.12 ### Bazel version _No response_ ### GCC/compiler version _No response_ ### CUDA/cuDNN version _No response_ ### GPU model and memory _No response_ ### Current behavior? I'm trying to train a custom model with TPU using mixed precision bfloat16. I'm able to train the model and save checkpoints in `.keras` format during training. However, when trying to load a saved model with `tf.keras.models.load_model` I'm getting an error which I'm not able to figure out. The custom model I'm trying to train uses Efficientnet as backbone and the error is raised when trying to load the mean and variance from the normalization layer of the Efficientnet model. I've reproduced the issue using only the Efficientnet model. Using full precision, the error doesn't happen. Also, when training with mixed precision float16 using a single gpu the error doesn't happen either. ### Standalone code to reproduce the issue ```shell import tensorflow as tf tf.keras.mixed_precision.set_global_policy("mixed_bfloat16") ds = ( tf.data.Dataset.from_tensor_slices( ( tf.ones([1, 224, 224, 3], dtype=tf.bfloat16), tf.ones([1, 1000], dtype=tf.float32), ) ) .repeat() .batch(batch_size=1) ) resolver = tf.distribute.cluster_resolver.TPUClusterResolver( tpu="local", ) tf.config.experimental_connect_to_cluster(resolver) # This is the TPU initialization code that has to be at the beginning. tf.tpu.experimental.initialize_tpu_system(resolver) strategy = tf.distribute.TPUStrategy(resolver) print(f"All TPU devices: {tf.config.list_logical_devices('TPU')}") with strategy.scope(): model = tf.keras.applications.EfficientNetB0() model.compile(loss="binary_crossentropy") model.fit( ds, steps_per_epoch=1, epochs=1, callbacks=[ tf.keras.callbacks.ModelCheckpoint( filepath="epoch_{epoch:03d}.keras", monitor="loss", verbose=1, save_weights_only=False, save_freq="epoch", options=None, ) ] ) print("Loading model") with strategy.scope(): loaded_model = tf.keras.models.load_model("epoch_001.keras") print("Continue training...") model.fit( ds, steps_per_epoch=1, epochs=1, callbacks=[ tf.keras.callbacks.ModelCheckpoint( filepath="epoch_{epoch:03d}.keras", monitor="loss", verbose=1, save_weights_only=False, save_freq="epoch", options=None, ) ] ) ``` ### Relevant log output ```shell D1204 08:52:31.331000367 42266 lb_policy_registry.cc:47] registering LB policy factory for "cds_experimental" D1204 08:52:31.331008502 42266 lb_policy_registry.cc:47] registering LB policy factory for "xds_cluster_resolver_experimental" D1204 08:52:31.331016540 42266 lb_policy_registry.cc:47] registering LB policy factory for "xds_override_host_experimental" D1204 08:52:31.331024721 42266 lb_policy_registry.cc:47] registering LB policy factory for "xds_wrr_locality_experimental" D1204 08:52:31.331033024 42266 lb_policy_registry.cc:47] registering LB policy factory for "ring_hash_experimental" D1204 08:52:31.331041050 42266 certificate_provider_registry.cc:33] registering certificate provider factory for "file_watcher" I1204 08:52:31.331790894 42266 ev_epoll1_linux.cc:360] grpc epoll fd: 12 I1204 08:52:31.355651140 42266 socket_utils_common_posix.cc:366] TCP_USER_TIMEOUT is available. TCP_USER_TIMEOUT will be used thereafter 2023-12-04 08:52:31.444721: I tensorflow/core/platform/cpu_feature_guard.cc:182] This TensorFlow binary is optimized to use available CPU instructions in performance-critical operations. To enable the following instructions: AVX2 AVX512F FMA, in other operations, rebuild TensorFlow with the appropriate compiler flags. 2023-12-04 08:52:37.879040: I tensorflow/compiler/xla/service/service.cc:168] XLA service 0x55c5b9887040 initialized for platform TPU (this does not guarantee that XLA will be used). Devices: 2023-12-04 08:52:37.879099: I tensorflow/compiler/xla/service/service.cc:176] StreamExecutor device (0): TPU, 2a886c8 2023-12-04 08:52:37.879114: I tensorflow/compiler/xla/service/service.cc:176] StreamExecutor device (1): TPU, 2a886c8 2023-12-04 08:52:37.879127: I tensorflow/compiler/xla/service/service.cc:176] StreamExecutor device (2): TPU, 2a886c8 2023-12-04 08:52:37.879140: I tensorflow/compiler/xla/service/service.cc:176] StreamExecutor device (3): TPU, 2a886c8 2023-12-04 08:52:37.879153: I tensorflow/compiler/xla/service/service.cc:176] StreamExecutor device (4): TPU, 2a886c8 2023-12-04 08:52:37.879166: I tensorflow/compiler/xla/service/service.cc:176] StreamExecutor device (5): TPU, 2a886c8 2023-12-04 08:52:37.879179: I tensorflow/compiler/xla/service/service.cc:176] StreamExecutor device (6): TPU, 2a886c8 2023-12-04 08:52:37.879192: I tensorflow/compiler/xla/service/service.cc:176] StreamExecutor device (7): TPU, 2a886c8 2023-12-04 08:52:37.879339: E ./tensorflow/compiler/xla/stream_executor/stream_executor_internal.h:124] SetPriority unimplemented for this stream. 2023-12-04 08:52:37.879424: E ./tensorflow/compiler/xla/stream_executor/stream_executor_internal.h:124] SetPriority unimplemented for this stream. 2023-12-04 08:52:37.879493: E ./tensorflow/compiler/xla/stream_executor/stream_executor_internal.h:124] SetPriority unimplemented for this stream. 2023-12-04 08:52:37.879562: E ./tensorflow/compiler/xla/stream_executor/stream_executor_internal.h:124] SetPriority unimplemented for this stream. 2023-12-04 08:52:37.879636: E ./tensorflow/compiler/xla/stream_executor/stream_executor_internal.h:124] SetPriority unimplemented for this stream. 2023-12-04 08:52:37.879830: E ./tensorflow/compiler/xla/stream_executor/stream_executor_internal.h:124] SetPriority unimplemented for this stream. 2023-12-04 08:52:37.879898: E ./tensorflow/compiler/xla/stream_executor/stream_executor_internal.h:124] SetPriority unimplemented for this stream. 2023-12-04 08:52:37.879957: E ./tensorflow/compiler/xla/stream_executor/stream_executor_internal.h:124] SetPriority unimplemented for this stream. 2023-12-04 08:52:37.880013: E ./tensorflow/compiler/xla/stream_executor/stream_executor_internal.h:124] SetPriority unimplemented for this stream. 2023-12-04 08:52:37.880085: E ./tensorflow/compiler/xla/stream_executor/stream_executor_internal.h:124] SetPriority unimplemented for this stream. 2023-12-04 08:52:37.880270: E ./tensorflow/compiler/xla/stream_executor/stream_executor_internal.h:124] SetPriority unimplemented for this stream. 2023-12-04 08:52:37.880349: E ./tensorflow/compiler/xla/stream_executor/stream_executor_internal.h:124] SetPriority unimplemented for this stream. 2023-12-04 08:52:37.880407: E ./tensorflow/compiler/xla/stream_executor/stream_executor_internal.h:124] SetPriority unimplemented for this stream. 2023-12-04 08:52:37.880468: E ./tensorflow/compiler/xla/stream_executor/stream_executor_internal.h:124] SetPriority unimplemented for this stream. 2023-12-04 08:52:37.880533: E ./tensorflow/compiler/xla/stream_executor/stream_executor_internal.h:124] SetPriority unimplemented for this stream. 2023-12-04 08:52:37.880711: E ./tensorflow/compiler/xla/stream_executor/stream_executor_internal.h:124] SetPriority unimplemented for this stream. 2023-12-04 08:52:37.880778: E ./tensorflow/compiler/xla/stream_executor/stream_executor_internal.h:124] SetPriority unimplemented for this stream. 2023-12-04 08:52:37.880845: E ./tensorflow/compiler/xla/stream_executor/stream_executor_internal.h:124] SetPriority unimplemented for this stream. 2023-12-04 08:52:37.880917: E ./tensorflow/compiler/xla/stream_executor/stream_executor_internal.h:124] SetPriority unimplemented for this stream. 2023-12-04 08:52:37.880996: E ./tensorflow/compiler/xla/stream_executor/stream_executor_internal.h:124] SetPriority unimplemented for this stream. 2023-12-04 08:52:37.881181: E ./tensorflow/compiler/xla/stream_executor/stream_executor_internal.h:124] SetPriority unimplemented for this stream. 2023-12-04 08:52:37.881243: E ./tensorflow/compiler/xla/stream_executor/stream_executor_internal.h:124] SetPriority unimplemented for this stream. 2023-12-04 08:52:37.881307: E ./tensorflow/compiler/xla/stream_executor/stream_executor_internal.h:124] SetPriority unimplemented for this stream. 2023-12-04 08:52:37.881374: E ./tensorflow/compiler/xla/stream_executor/stream_executor_internal.h:124] SetPriority unimplemented for this stream. 2023-12-04 08:52:37.881447: E ./tensorflow/compiler/xla/stream_executor/stream_executor_internal.h:124] SetPriority unimplemented for this stream. 2023-12-04 08:52:37.881635: E ./tensorflow/compiler/xla/stream_executor/stream_executor_internal.h:124] SetPriority unimplemented for this stream. 2023-12-04 08:52:37.881702: E ./tensorflow/compiler/xla/stream_executor/stream_executor_internal.h:124] SetPriority unimplemented for this stream. 2023-12-04 08:52:37.881758: E ./tensorflow/compiler/xla/stream_executor/stream_executor_internal.h:124] SetPriority unimplemented for this stream. 2023-12-04 08:52:37.881822: E ./tensorflow/compiler/xla/stream_executor/stream_executor_internal.h:124] SetPriority unimplemented for this stream. 2023-12-04 08:52:37.881887: E ./tensorflow/compiler/xla/stream_executor/stream_executor_internal.h:124] SetPriority unimplemented for this stream. 2023-12-04 08:52:37.882085: E ./tensorflow/compiler/xla/stream_executor/stream_executor_internal.h:124] SetPriority unimplemented for this stream. 2023-12-04 08:52:37.882163: E ./tensorflow/compiler/xla/stream_executor/stream_executor_internal.h:124] SetPriority unimplemented for this stream. 2023-12-04 08:52:37.882223: E ./tensorflow/compiler/xla/stream_executor/stream_executor_internal.h:124] SetPriority unimplemented for this stream. 2023-12-04 08:52:37.882287: E ./tensorflow/compiler/xla/stream_executor/stream_executor_internal.h:124] SetPriority unimplemented for this stream. 2023-12-04 08:52:37.882347: E ./tensorflow/compiler/xla/stream_executor/stream_executor_internal.h:124] SetPriority unimplemented for this stream. 2023-12-04 08:52:37.882567: E ./tensorflow/compiler/xla/stream_executor/stream_executor_internal.h:124] SetPriority unimplemented for this stream. 2023-12-04 08:52:37.882641: E ./tensorflow/compiler/xla/stream_executor/stream_executor_internal.h:124] SetPriority unimplemented for this stream. 2023-12-04 08:52:37.882716: E ./tensorflow/compiler/xla/stream_executor/stream_executor_internal.h:124] SetPriority unimplemented for this stream. 2023-12-04 08:52:37.882800: E ./tensorflow/compiler/xla/stream_executor/stream_executor_internal.h:124] SetPriority unimplemented for this stream. 2023-12-04 08:52:37.882864: E ./tensorflow/compiler/xla/stream_executor/stream_executor_internal.h:124] SetPriority unimplemented for this stream. 2023-12-04 08:52:42.574430: I tensorflow/compiler/mlir/tensorflow/utils/dump_mlir_util.cc:269] disabling MLIR crash reproducer, set env var `MLIR_CRASH_REPRODUCER_DIRECTORY` to enable. 2023-12-04 08:52:42.615217: I ./tensorflow/compiler/jit/device_compiler.h:186] Compiled cluster using XLA! This line is logged at most once for the lifetime of the process. 2023-12-04 08:52:42.641545: E ./tensorflow/compiler/xla/stream_executor/stream_executor_internal.h:124] SetPriority unimplemented for this stream. 2023-12-04 08:52:42.940869: E ./tensorflow/compiler/xla/stream_executor/stream_executor_internal.h:124] SetPriority unimplemented for this stream. 2023-12-04 08:53:00.898837: E ./tensorflow/compiler/xla/stream_executor/stream_executor_internal.h:124] SetPriority unimplemented for this stream. 2023-12-04 08:53:00.899128: E ./tensorflow/compiler/xla/stream_executor/stream_executor_internal.h:124] SetPriority unimplemented for this stream. 2023-12-04 08:53:00.899419: E ./tensorflow/compiler/xla/stream_executor/stream_executor_internal.h:124] SetPriority unimplemented for this stream. 2023-12-04 08:53:00.899711: E ./tensorflow/compiler/xla/stream_executor/stream_executor_internal.h:124] SetPriority unimplemented for this stream. 2023-12-04 08:53:00.899955: E ./tensorflow/compiler/xla/stream_executor/stream_executor_internal.h:124] SetPriority unimplemented for this stream. 2023-12-04 08:53:00.900451: E ./tensorflow/compiler/xla/stream_executor/stream_executor_internal.h:124] SetPriority unimplemented for this stream. 2023-12-04 08:53:00.900836: E ./tensorflow/compiler/xla/stream_executor/stream_executor_internal.h:124] SetPriority unimplemented for this stream. 2023-12-04 08:53:43.886267: I tensorflow/core/tpu/graph_rewrite/encapsulate_tpu_computations_pass.cc:241] Subgraph fingerprint:16085197774278075878 2023-12-04 08:53:45.781615: E tensorflow/core/grappler/optimizers/meta_optimizer.cc:961] model_pruner failed: INVALID_ARGUMENT: Graph does not contain terminal node AssignAddVariableOp. 2023-12-04 08:53:48.820860: E ./tensorflow/compiler/xla/stream_executor/stream_executor_internal.h:124] SetPriority unimplemented for this stream. 2023-12-04 08:53:48.822228: I tensorflow/core/tpu/kernels/tpu_compilation_cache_interface.cc:441] TPU host compilation cache miss: cache_key(590d039eb1480a5c:0:0), session_name() 2023-12-04 08:54:54.362634: I tensorflow/core/tpu/kernels/tpu_compile_op_common.cc:226] Compilation of 590d039eb1480a5c:0:0 with session name took 1m5.54031078s and succeeded 2023-12-04 08:54:54.567413: I tensorflow/core/tpu/kernels/tpu_compilation_cache_interface.cc:475] TPU host compilation cache: compilation complete for cache_key(590d039eb1480a5c:0:0), session_name(), subgraph_key(std::string(property.function_name) = "cluster_train_function_16085197774278075878", property.function_library_fingerprint = 6042109918039831297, property.mlir_module_fingerprint = 0, property.num_replicas = 8, topology.chip_bounds().x = 2, topology.chip_bounds().y = 2, topology.chip_bounds().z = 1, topology.wrap().x = false, topology.wrap().y = false, topology.wrap().z = false, std::string(property.shapes_prefix) = "2,224,224,3,;2,1000,;", property.guaranteed_constants_size = 0, embedding_partitions_fingerprint = "1688352644216761960") 2023-12-04 08:54:54.567501: I tensorflow/core/tpu/kernels/tpu_compilation_cache_interface.cc:541] After adding entry for key 590d039eb1480a5c:0:0 with session_name cache is 1 entries (112491064 bytes), marked for eviction 0 entries (0 bytes). 2023-12-04 08:54:54.580690: E ./tensorflow/compiler/xla/stream_executor/stream_executor_internal.h:124] SetPriority unimplemented for this stream. 2023-12-04 08:54:54.580856: E ./tensorflow/compiler/xla/stream_executor/stream_executor_internal.h:124] SetPriority unimplemented for this stream. 2023-12-04 08:54:54.580995: E ./tensorflow/compiler/xla/stream_executor/stream_executor_internal.h:124] SetPriority unimplemented for this stream. 2023-12-04 08:54:54.581069: E ./tensorflow/compiler/xla/stream_executor/stream_executor_internal.h:124] SetPriority unimplemented for this stream. 2023-12-04 08:54:54.581148: E ./tensorflow/compiler/xla/stream_executor/stream_executor_internal.h:124] SetPriority unimplemented for this stream. 2023-12-04 08:54:54.581236: E ./tensorflow/compiler/xla/stream_executor/stream_executor_internal.h:124] SetPriority unimplemented for this stream. 2023-12-04 08:54:54.581315: E ./tensorflow/compiler/xla/stream_executor/stream_executor_internal.h:124] SetPriority unimplemented for this stream. 2023-12-04 08:54:54.581378: E ./tensorflow/compiler/xla/stream_executor/stream_executor_internal.h:124] SetPriority unimplemented for this stream. All TPU devices: [LogicalDevice(name='/device:TPU:0', device_type='TPU'), LogicalDevice(name='/device:TPU:1', device_type='TPU'), LogicalDevice(name='/device:TPU:2', device_type='TPU'), LogicalDevice(name='/device:TPU:3', device_type='TPU'), LogicalDevice(name='/device:TPU:4', device_type='TPU'), LogicalDevice(name='/device:TPU:5', device_type='TPU'), LogicalDevice(name='/device:TPU:6', device_type='TPU'), LogicalDevice(name='/device:TPU:7', device_type='TPU')] 1/1 [==============================] - ETA: 0s - loss: 0.7109 Epoch 1: saving model to epoch_001.keras 1/1 [==============================] - 101s 101s/step - loss: 0.7109 Loading model Traceback (most recent call last): File "/home/ricardo_alonso/tpu_mixed_precision.py", line 48, in <module> loaded_model = tf.keras.models.load_model("epoch_001.keras") File "/home/ricardo_alonso/venv310/lib/python3.10/site-packages/keras/src/saving/saving_api.py", line 254, in load_model return saving_lib.load_model( File "/home/ricardo_alonso/venv310/lib/python3.10/site-packages/keras/src/saving/saving_lib.py", line 281, in load_model raise e File "/home/ricardo_alonso/venv310/lib/python3.10/site-packages/keras/src/saving/saving_lib.py", line 269, in load_model _load_state( File "/home/ricardo_alonso/venv310/lib/python3.10/site-packages/keras/src/saving/saving_lib.py", line 466, in _load_state _load_container_state( File "/home/ricardo_alonso/venv310/lib/python3.10/site-packages/keras/src/saving/saving_lib.py", line 534, in _load_container_state _load_state( File "/home/ricardo_alonso/venv310/lib/python3.10/site-packages/keras/src/saving/saving_lib.py", line 435, in _load_state trackable.load_own_variables(weights_store.get(inner_path)) File "/home/ricardo_alonso/venv310/lib/python3.10/site-packages/keras/src/layers/preprocessing/normalization.py", line 392, in load_own_variables super().load_own_variables(store) File "/home/ricardo_alonso/venv310/lib/python3.10/site-packages/keras/src/engine/base_layer.py", line 3539, in load_own_variables v.assign(store[f"{i}"]) File "/home/ricardo_alonso/venv310/lib/python3.10/site-packages/keras/src/mixed_precision/autocast_variable.py", line 297, in assign return self._apply_assign_update( File "/home/ricardo_alonso/venv310/lib/python3.10/site-packages/keras/src/mixed_precision/autocast_variable.py", line 269, in _apply_assign_update assign_op = update_fn(value, use_locking, name, False) File "/home/ricardo_alonso/venv310/lib/python3.10/site-packages/tensorflow/python/distribute/tpu_values.py", line 167, in assign return self._policy.assign( File "/home/ricardo_alonso/venv310/lib/python3.10/site-packages/tensorflow/python/distribute/tpu_values.py", line 488, in assign return assign( File "/home/ricardo_alonso/venv310/lib/python3.10/site-packages/tensorflow/python/distribute/tpu_values.py", line 427, in assign return var._update( # pylint: disable=protected-access File "/home/ricardo_alonso/venv310/lib/python3.10/site-packages/tensorflow/python/distribute/values.py", line 1024, in _update return self._update_cross_replica(update_fn, value, **kwargs) File "/home/ricardo_alonso/venv310/lib/python3.10/site-packages/tensorflow/python/distribute/values.py", line 973, in _update_cross_replica return self.distribute_strategy.extended.update( File "/home/ricardo_alonso/venv310/lib/python3.10/site-packages/tensorflow/python/distribute/distribute_lib.py", line 3011, in update return self._update(var, fn, args, kwargs, group) File "/home/ricardo_alonso/venv310/lib/python3.10/site-packages/tensorflow/python/distribute/tpu_strategy.py", line 1655, in _update fn(value, *distribute_utils.select_replica(i, args), File "/home/ricardo_alonso/venv310/lib/python3.10/site-packages/tensorflow/python/autograph/impl/api.py", line 596, in wrapper return func(*args, **kwargs) File "/home/ricardo_alonso/venv310/lib/python3.10/site-packages/tensorflow/python/distribute/tpu_util.py", line 101, in assign_fn handle, ops.convert_to_tensor(value, dtype=var.dtype), name=name) File "/home/ricardo_alonso/venv310/lib/python3.10/site-packages/tensorflow/python/profiler/trace.py", line 183, in wrapped return func(*args, **kwargs) File "/home/ricardo_alonso/venv310/lib/python3.10/site-packages/tensorflow/python/framework/ops.py", line 698, in convert_to_tensor return tensor_conversion_registry.convert( File "/home/ricardo_alonso/venv310/lib/python3.10/site-packages/tensorflow/python/framework/tensor_conversion_registry.py", line 234, in convert ret = conversion_func(value, dtype=dtype, name=name, as_ref=as_ref) File "/home/ricardo_alonso/venv310/lib/python3.10/site-packages/tensorflow/python/framework/constant_op.py", line 328, in _constant_tensor_conversion_function return constant(v, dtype=dtype, name=name) File "/home/ricardo_alonso/venv310/lib/python3.10/site-packages/tensorflow/python/framework/constant_op.py", line 267, in constant return _constant_impl(value, dtype, shape, name, verify_shape=False, File "/home/ricardo_alonso/venv310/lib/python3.10/site-packages/tensorflow/python/framework/constant_op.py", line 279, in _constant_impl return _constant_eager_impl(ctx, value, dtype, shape, verify_shape) File "/home/ricardo_alonso/venv310/lib/python3.10/site-packages/tensorflow/python/framework/constant_op.py", line 289, in _constant_eager_impl t = convert_to_eager_tensor(value, ctx, dtype) File "/home/ricardo_alonso/venv310/lib/python3.10/site-packages/tensorflow/python/framework/constant_op.py", line 102, in convert_to_eager_tensor return ops.EagerTensor(value, ctx.device_name, dtype) ValueError: No cast function available. ```
{ "url": "https://api.github.com/repos/tensorflow/tensorflow/issues/62564/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/tensorflow/tensorflow/issues/62564/timeline
null
completed
false
https://api.github.com/repos/tensorflow/tensorflow/issues/62563
https://api.github.com/repos/tensorflow/tensorflow
https://api.github.com/repos/tensorflow/tensorflow/issues/62563/labels{/name}
https://api.github.com/repos/tensorflow/tensorflow/issues/62563/comments
https://api.github.com/repos/tensorflow/tensorflow/issues/62563/events
https://github.com/tensorflow/tensorflow/issues/62563
2,023,003,752
I_kwDOArmXAs54lJZo
62,563
inconsistency when invoking tf.math.top_k and tf.negative
{ "login": "beanduan22", "id": 149338034, "node_id": "U_kgDOCOa3sg", "avatar_url": "https://avatars.githubusercontent.com/u/149338034?v=4", "gravatar_id": "", "url": "https://api.github.com/users/beanduan22", "html_url": "https://github.com/beanduan22", "followers_url": "https://api.github.com/users/beanduan22/followers", "following_url": "https://api.github.com/users/beanduan22/following{/other_user}", "gists_url": "https://api.github.com/users/beanduan22/gists{/gist_id}", "starred_url": "https://api.github.com/users/beanduan22/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/beanduan22/subscriptions", "organizations_url": "https://api.github.com/users/beanduan22/orgs", "repos_url": "https://api.github.com/users/beanduan22/repos", "events_url": "https://api.github.com/users/beanduan22/events{/privacy}", "received_events_url": "https://api.github.com/users/beanduan22/received_events", "type": "User", "site_admin": false }
[ { "id": 386191887, "node_id": "MDU6TGFiZWwzODYxOTE4ODc=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/stat:awaiting%20response", "name": "stat:awaiting response", "color": "f4b400", "default": false, "description": "Status - Awaiting response from author" }, { "id": 473172988, "node_id": "MDU6TGFiZWw0NzMxNzI5ODg=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/type:bug", "name": "type:bug", "color": "159b2e", "default": false, "description": "Bug" }, { "id": 474725938, "node_id": "MDU6TGFiZWw0NzQ3MjU5Mzg=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/stale", "name": "stale", "color": "d4c5f9", "default": false, "description": "This label marks the issue/pr stale - to be closed automatically if no activity" }, { "id": 1097547147, "node_id": "MDU6TGFiZWwxMDk3NTQ3MTQ3", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/comp:ops", "name": "comp:ops", "color": "0052cc", "default": false, "description": "OPs related issues" }, { "id": 5508003926, "node_id": "LA_kwDOArmXAs8AAAABSE14Vg", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/TF%202.13", "name": "TF 2.13", "color": "B13ACB", "default": false, "description": "For issues related to Tensorflow 2.13" } ]
closed
false
{ "login": "Venkat6871", "id": 147127861, "node_id": "U_kgDOCMT-NQ", "avatar_url": "https://avatars.githubusercontent.com/u/147127861?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Venkat6871", "html_url": "https://github.com/Venkat6871", "followers_url": "https://api.github.com/users/Venkat6871/followers", "following_url": "https://api.github.com/users/Venkat6871/following{/other_user}", "gists_url": "https://api.github.com/users/Venkat6871/gists{/gist_id}", "starred_url": "https://api.github.com/users/Venkat6871/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Venkat6871/subscriptions", "organizations_url": "https://api.github.com/users/Venkat6871/orgs", "repos_url": "https://api.github.com/users/Venkat6871/repos", "events_url": "https://api.github.com/users/Venkat6871/events{/privacy}", "received_events_url": "https://api.github.com/users/Venkat6871/received_events", "type": "User", "site_admin": false }
[ { "login": "Venkat6871", "id": 147127861, "node_id": "U_kgDOCMT-NQ", "avatar_url": "https://avatars.githubusercontent.com/u/147127861?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Venkat6871", "html_url": "https://github.com/Venkat6871", "followers_url": "https://api.github.com/users/Venkat6871/followers", "following_url": "https://api.github.com/users/Venkat6871/following{/other_user}", "gists_url": "https://api.github.com/users/Venkat6871/gists{/gist_id}", "starred_url": "https://api.github.com/users/Venkat6871/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Venkat6871/subscriptions", "organizations_url": "https://api.github.com/users/Venkat6871/orgs", "repos_url": "https://api.github.com/users/Venkat6871/repos", "events_url": "https://api.github.com/users/Venkat6871/events{/privacy}", "received_events_url": "https://api.github.com/users/Venkat6871/received_events", "type": "User", "site_admin": false } ]
null
[ "The tf.math.top_k operation returns both the values and their indices, making it incompatible with the unary - operator. You'll need to specify whether you want to apply the negative operation to the values or the indices.\r\n\r\nSolution 1 : \r\nvalues, indices = tf.math.top_k(A)\r\nneg_values = -values\r\n\r\nSolution 2: create a wrapper or a helper function\r\ndef custom_negate_top_values(tensor):\r\n top_values, top_indices = tf.math.top_k(tensor)\r\n neg_top_values = -top_values\r\n return neg_top_values, top_indices\r\n\r\n\r\nOther Solution : can be to make changes to source code so it automatically support \" - \" unary operator.", "can anyone help me find the source code for math.top_k .\r\n", "Hi @beanduan22 ,\r\nSorry for the delay. I Was able to reproduce the issue on colab using TF v2.14 , 2.15, and TF-nightly, Please find the [gist](https://colab.research.google.com/gist/Venkat6871/3bc44fc05599118792714bcd635ceaa1/62563_2-14_2-15-nightly.ipynb) here for reference. We need to dig for this issue. I will give an update in a few days.\r\n\r\nThank you!", "Hi **@beanduan22**,\r\n\r\nWhen you apply the negative operation directly to the result of tf.math.top_k, you are trying to negate the entire TopKV2 object, which leads to a TypeError.\r\nYou need to separately apply the negative operation to the values part of the TopKV2 result and keep the indices unchanged. Here I am adding a [gist ](https://colab.research.google.com/gist/Venkat6871/852e781277ede158d660b722fbb447f9/62563_2-15-v.ipynb)for your reference please go through it once.\r\n\r\nThank you", "This issue is stale because it has been open for 7 days with no activity. It will be closed if no further activity occurs. Thank you.", "This issue was closed because it has been inactive for 7 days since being marked as stale. Please reopen if you'd like to work on this further.", "Are you satisfied with the resolution of your issue?\n<a href=\"https://docs.google.com/forms/d/e/1FAIpQLSfaP12TRhd9xSxjXZjcZFNXPGk4kc1-qMdv3gc6bEP90vY1ew/viewform?entry.85265664=Yes&entry.2137816233=https://github.com/tensorflow/tensorflow/issues/62563\">Yes</a>\n<a href=\"https://docs.google.com/forms/d/e/1FAIpQLSfaP12TRhd9xSxjXZjcZFNXPGk4kc1-qMdv3gc6bEP90vY1ew/viewform?entry.85265664=No&entry.2137816233=https://github.com/tensorflow/tensorflow/issues/62563\">No</a>\n" ]
2023-12-04T04:32:16
2024-01-19T01:49:25
2024-01-19T01:49:23
NONE
null
null
null
### Issue type Bug ### Have you reproduced the bug with TensorFlow Nightly? Yes ### Source source ### TensorFlow version tf 2.13 ### Custom code Yes ### OS platform and distribution _No response_ ### Mobile device _No response_ ### Python version _No response_ ### Bazel version _No response_ ### GCC/compiler version _No response_ ### CUDA/cuDNN version _No response_ ### GPU model and memory _No response_ ### Current behavior? ```python import tensorflow as tf A = tf.constant([0, -2, 1, -4, 3]) print(-A) print(tf.negative(A)) y = tf.negative(tf.math.top_k(tf.negative(A))) x = -tf.math.top_k(-A) print(y) print(x) ####################################### import tensorflow as tf A = tf.constant([0, -2, 1, -4, 3]) print(-A) print(tf.negative(A)) print(tf.math.top_k(tf.negative(A))) y = tf.negative(tf.math.top_k(tf.negative(A))) print(y) ### Standalone code to reproduce the issue ```shell when invoking tf.math.top_k, the tf.negative have different result with - ``` ### Relevant log output ```shell tf.Tensor([ 0 2 -1 4 -3], shape=(5,), dtype=int32) tf.Tensor([ 0 2 -1 4 -3], shape=(5,), dtype=int32) Traceback (most recent call last): File "C:\Users\uqbduan\Desktop\pythonProject\test.py", line 9, in <module> x = -tf.math.top_k(-A) TypeError: bad operand type for unary -: 'TopKV2' ################################## tf.Tensor([ 0 2 -1 4 -3], shape=(5,), dtype=int32) tf.Tensor([ 0 2 -1 4 -3], shape=(5,), dtype=int32) TopKV2(values=<tf.Tensor: shape=(1,), dtype=int32, numpy=array([4])>, indices=<tf.Tensor: shape=(1,), dtype=int32, numpy=array([3])>) tf.Tensor( [[-4] [-3]], shape=(2, 1), dtype=int32) ```
{ "url": "https://api.github.com/repos/tensorflow/tensorflow/issues/62563/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/tensorflow/tensorflow/issues/62563/timeline
null
completed
false
https://api.github.com/repos/tensorflow/tensorflow/issues/62562
https://api.github.com/repos/tensorflow/tensorflow
https://api.github.com/repos/tensorflow/tensorflow/issues/62562/labels{/name}
https://api.github.com/repos/tensorflow/tensorflow/issues/62562/comments
https://api.github.com/repos/tensorflow/tensorflow/issues/62562/events
https://github.com/tensorflow/tensorflow/issues/62562
2,022,981,668
I_kwDOArmXAs54lEAk
62,562
crash when invoking tf.quantization.fake_quant_with_min_max_vars_per_channel_gradient, and the crash is uncertain
{ "login": "beanduan22", "id": 149338034, "node_id": "U_kgDOCOa3sg", "avatar_url": "https://avatars.githubusercontent.com/u/149338034?v=4", "gravatar_id": "", "url": "https://api.github.com/users/beanduan22", "html_url": "https://github.com/beanduan22", "followers_url": "https://api.github.com/users/beanduan22/followers", "following_url": "https://api.github.com/users/beanduan22/following{/other_user}", "gists_url": "https://api.github.com/users/beanduan22/gists{/gist_id}", "starred_url": "https://api.github.com/users/beanduan22/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/beanduan22/subscriptions", "organizations_url": "https://api.github.com/users/beanduan22/orgs", "repos_url": "https://api.github.com/users/beanduan22/repos", "events_url": "https://api.github.com/users/beanduan22/events{/privacy}", "received_events_url": "https://api.github.com/users/beanduan22/received_events", "type": "User", "site_admin": false }
[ { "id": 386191887, "node_id": "MDU6TGFiZWwzODYxOTE4ODc=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/stat:awaiting%20response", "name": "stat:awaiting response", "color": "f4b400", "default": false, "description": "Status - Awaiting response from author" }, { "id": 473172988, "node_id": "MDU6TGFiZWw0NzMxNzI5ODg=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/type:bug", "name": "type:bug", "color": "159b2e", "default": false, "description": "Bug" }, { "id": 474725938, "node_id": "MDU6TGFiZWw0NzQ3MjU5Mzg=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/stale", "name": "stale", "color": "d4c5f9", "default": false, "description": "This label marks the issue/pr stale - to be closed automatically if no activity" }, { "id": 1097547147, "node_id": "MDU6TGFiZWwxMDk3NTQ3MTQ3", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/comp:ops", "name": "comp:ops", "color": "0052cc", "default": false, "description": "OPs related issues" }, { "id": 5508003926, "node_id": "LA_kwDOArmXAs8AAAABSE14Vg", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/TF%202.13", "name": "TF 2.13", "color": "B13ACB", "default": false, "description": "For issues related to Tensorflow 2.13" } ]
closed
false
{ "login": "tilakrayal", "id": 81610181, "node_id": "MDQ6VXNlcjgxNjEwMTgx", "avatar_url": "https://avatars.githubusercontent.com/u/81610181?v=4", "gravatar_id": "", "url": "https://api.github.com/users/tilakrayal", "html_url": "https://github.com/tilakrayal", "followers_url": "https://api.github.com/users/tilakrayal/followers", "following_url": "https://api.github.com/users/tilakrayal/following{/other_user}", "gists_url": "https://api.github.com/users/tilakrayal/gists{/gist_id}", "starred_url": "https://api.github.com/users/tilakrayal/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/tilakrayal/subscriptions", "organizations_url": "https://api.github.com/users/tilakrayal/orgs", "repos_url": "https://api.github.com/users/tilakrayal/repos", "events_url": "https://api.github.com/users/tilakrayal/events{/privacy}", "received_events_url": "https://api.github.com/users/tilakrayal/received_events", "type": "User", "site_admin": false }
[ { "login": "tilakrayal", "id": 81610181, "node_id": "MDQ6VXNlcjgxNjEwMTgx", "avatar_url": "https://avatars.githubusercontent.com/u/81610181?v=4", "gravatar_id": "", "url": "https://api.github.com/users/tilakrayal", "html_url": "https://github.com/tilakrayal", "followers_url": "https://api.github.com/users/tilakrayal/followers", "following_url": "https://api.github.com/users/tilakrayal/following{/other_user}", "gists_url": "https://api.github.com/users/tilakrayal/gists{/gist_id}", "starred_url": "https://api.github.com/users/tilakrayal/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/tilakrayal/subscriptions", "organizations_url": "https://api.github.com/users/tilakrayal/orgs", "repos_url": "https://api.github.com/users/tilakrayal/repos", "events_url": "https://api.github.com/users/tilakrayal/events{/privacy}", "received_events_url": "https://api.github.com/users/tilakrayal/received_events", "type": "User", "site_admin": false } ]
null
[ "@beanduan22,\r\nI request you please take a look at this issue https://github.com/tensorflow/tensorflow/issues/46910 where a similar issue has been raised and it is still open. Could you please follow that issue, since it is already being tracked there? Thank you!", "This issue is stale because it has been open for 7 days with no activity. It will be closed if no further activity occurs. Thank you.", "This issue was closed because it has been inactive for 7 days since being marked as stale. Please reopen if you'd like to work on this further.", "Are you satisfied with the resolution of your issue?\n<a href=\"https://docs.google.com/forms/d/e/1FAIpQLSfaP12TRhd9xSxjXZjcZFNXPGk4kc1-qMdv3gc6bEP90vY1ew/viewform?entry.85265664=Yes&entry.2137816233=https://github.com/tensorflow/tensorflow/issues/62562\">Yes</a>\n<a href=\"https://docs.google.com/forms/d/e/1FAIpQLSfaP12TRhd9xSxjXZjcZFNXPGk4kc1-qMdv3gc6bEP90vY1ew/viewform?entry.85265664=No&entry.2137816233=https://github.com/tensorflow/tensorflow/issues/62562\">No</a>\n" ]
2023-12-04T04:12:05
2023-12-21T01:48:34
2023-12-21T01:48:31
NONE
null
null
null
### Issue type Bug ### Have you reproduced the bug with TensorFlow Nightly? Yes ### Source source ### TensorFlow version tf 2.13.0 ### Custom code Yes ### OS platform and distribution _No response_ ### Mobile device _No response_ ### Python version _No response_ ### Bazel version _No response_ ### GCC/compiler version _No response_ ### CUDA/cuDNN version _No response_ ### GPU model and memory _No response_ ### Current behavior? ```python import tensorflow as tf import numpy as np tf.quantization.fake_quant_with_min_max_vars_per_channel_gradient(inputs=1, gradients=1, max=[], min=np.ones(0)) ### Standalone code to reproduce the issue ```shell when you run this, the crash output is uncertain some times it shown min has incorrect size, expected 26950 was 0 some times it shown min has incorrect size, expected 97 was 0 some times it no crash. ``` ### Relevant log output ```shell 2023-12-04 14:06:28.318553: I tensorflow/core/platform/cpu_feature_guard.cc:182] This TensorFlow binary is optimized to use available CPU instructions in performance-critical operations. To enable the following instructions: SSE SSE2 SSE3 SSE4.1 SSE4.2 AVX AVX2 AVX512F AVX512_VNNI FMA, in other operations, rebuild TensorFlow with the appropriate compiler flags. Traceback (most recent call last): File "C:\Users\uqbduan\Desktop\pythonProject\test.py", line 6, in <module> tf.quantization.fake_quant_with_min_max_vars_per_channel_gradient(inputs=1, gradients=1, max=[], min=np.ones(0)) File "C:\Users\uqbduan\Anaconda3\envs\tensorflow2\lib\site-packages\tensorflow\python\ops\gen_array_ops.py", line 3978, in fake_quant_with_min_max_vars_per_channel_gradient return fake_quant_with_min_max_vars_per_channel_gradient_eager_fallback( File "C:\Users\uqbduan\Anaconda3\envs\tensorflow2\lib\site-packages\tensorflow\python\ops\gen_array_ops.py", line 4056, in fake_quant_with_min_max_vars_per_channel_gradient_eager_fallback _result = _execute.execute(b"FakeQuantWithMinMaxVarsPerChannelGradient", 3, File "C:\Users\uqbduan\Anaconda3\envs\tensorflow2\lib\site-packages\tensorflow\python\eager\execute.py", line 53, in quick_execute tensors = pywrap_tfe.TFE_Py_Execute(ctx._handle, device_name, op_name, tensorflow.python.framework.errors_impl.InvalidArgumentError: {{function_node __wrapped__FakeQuantWithMinMaxVarsPerChannelGradient_device_/job:localhost/replica:0/task:0/device:CPU:0}} min has incorrect size, expected 97 was 0 [Op:FakeQuantWithMinMaxVarsPerChannelGradient] or Traceback (most recent call last): File "C:\Users\uqbduan\Desktop\pythonProject\test.py", line 6, in <module> tf.quantization.fake_quant_with_min_max_vars_per_channel_gradient(inputs=1, gradients=1, max=[], min=np.ones(0)) File "C:\Users\uqbduan\Anaconda3\envs\tensorflow2\lib\site-packages\tensorflow\python\ops\gen_array_ops.py", line 3978, in fake_quant_with_min_max_vars_per_channel_gradient return fake_quant_with_min_max_vars_per_channel_gradient_eager_fallback( File "C:\Users\uqbduan\Anaconda3\envs\tensorflow2\lib\site-packages\tensorflow\python\ops\gen_array_ops.py", line 4056, in fake_quant_with_min_max_vars_per_channel_gradient_eager_fallback _result = _execute.execute(b"FakeQuantWithMinMaxVarsPerChannelGradient", 3, File "C:\Users\uqbduan\Anaconda3\envs\tensorflow2\lib\site-packages\tensorflow\python\eager\execute.py", line 53, in quick_execute tensors = pywrap_tfe.TFE_Py_Execute(ctx._handle, device_name, op_name, tensorflow.python.framework.errors_impl.InvalidArgumentError: {{function_node __wrapped__FakeQuantWithMinMaxVarsPerChannelGradient_device_/job:localhost/replica:0/task:0/device:CPU:0}} min has incorrect size, expected 26950 was 0 [Op:FakeQuantWithMinMaxVarsPerChannelGradient] or no crash ```
{ "url": "https://api.github.com/repos/tensorflow/tensorflow/issues/62562/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/tensorflow/tensorflow/issues/62562/timeline
null
completed
false
https://api.github.com/repos/tensorflow/tensorflow/issues/62561
https://api.github.com/repos/tensorflow/tensorflow
https://api.github.com/repos/tensorflow/tensorflow/issues/62561/labels{/name}
https://api.github.com/repos/tensorflow/tensorflow/issues/62561/comments
https://api.github.com/repos/tensorflow/tensorflow/issues/62561/events
https://github.com/tensorflow/tensorflow/issues/62561
2,022,930,402
I_kwDOArmXAs54k3fi
62,561
Add PR Template
{ "login": "ghost", "id": 10137, "node_id": "MDQ6VXNlcjEwMTM3", "avatar_url": "https://avatars.githubusercontent.com/u/10137?v=4", "gravatar_id": "", "url": "https://api.github.com/users/ghost", "html_url": "https://github.com/ghost", "followers_url": "https://api.github.com/users/ghost/followers", "following_url": "https://api.github.com/users/ghost/following{/other_user}", "gists_url": "https://api.github.com/users/ghost/gists{/gist_id}", "starred_url": "https://api.github.com/users/ghost/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/ghost/subscriptions", "organizations_url": "https://api.github.com/users/ghost/orgs", "repos_url": "https://api.github.com/users/ghost/repos", "events_url": "https://api.github.com/users/ghost/events{/privacy}", "received_events_url": "https://api.github.com/users/ghost/received_events", "type": "User", "site_admin": false }
[ { "id": 2012480497, "node_id": "MDU6TGFiZWwyMDEyNDgwNDk3", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/type:docs-feature", "name": "type:docs-feature", "color": "159b2e", "default": false, "description": "Doc issues for new feature, or clarifications about functionality" } ]
closed
false
{ "login": "SuryanarayanaY", "id": 116063290, "node_id": "U_kgDOBur8Og", "avatar_url": "https://avatars.githubusercontent.com/u/116063290?v=4", "gravatar_id": "", "url": "https://api.github.com/users/SuryanarayanaY", "html_url": "https://github.com/SuryanarayanaY", "followers_url": "https://api.github.com/users/SuryanarayanaY/followers", "following_url": "https://api.github.com/users/SuryanarayanaY/following{/other_user}", "gists_url": "https://api.github.com/users/SuryanarayanaY/gists{/gist_id}", "starred_url": "https://api.github.com/users/SuryanarayanaY/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/SuryanarayanaY/subscriptions", "organizations_url": "https://api.github.com/users/SuryanarayanaY/orgs", "repos_url": "https://api.github.com/users/SuryanarayanaY/repos", "events_url": "https://api.github.com/users/SuryanarayanaY/events{/privacy}", "received_events_url": "https://api.github.com/users/SuryanarayanaY/received_events", "type": "User", "site_admin": false }
[ { "login": "SuryanarayanaY", "id": 116063290, "node_id": "U_kgDOBur8Og", "avatar_url": "https://avatars.githubusercontent.com/u/116063290?v=4", "gravatar_id": "", "url": "https://api.github.com/users/SuryanarayanaY", "html_url": "https://github.com/SuryanarayanaY", "followers_url": "https://api.github.com/users/SuryanarayanaY/followers", "following_url": "https://api.github.com/users/SuryanarayanaY/following{/other_user}", "gists_url": "https://api.github.com/users/SuryanarayanaY/gists{/gist_id}", "starred_url": "https://api.github.com/users/SuryanarayanaY/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/SuryanarayanaY/subscriptions", "organizations_url": "https://api.github.com/users/SuryanarayanaY/orgs", "repos_url": "https://api.github.com/users/SuryanarayanaY/repos", "events_url": "https://api.github.com/users/SuryanarayanaY/events{/privacy}", "received_events_url": "https://api.github.com/users/SuryanarayanaY/received_events", "type": "User", "site_admin": false } ]
null
[ "Hi @Yuheshpandian ,\r\n\r\nWe have [CONTRIBUTING.md](https://github.com/tensorflow/tensorflow/blob/master/CONTRIBUTING.md) for users to guide about how to contribute to the source code and the process flow.\r\n\r\nI think adding PR template is not required here.Could you need to add more info or example templates to review?", "ok that's good to hear , I thought having a PR template would actually help new skilled ones in contributing to your project. But anyway thank you for responding and informing me", "I really thought adding PR_TEMPLATE is very essential and that is why it is there in the community standards section in insights but I understood it is not so required.\r\n\r\nThank you", "Hi @Yuheshpandian ,\r\n\r\nYes, PR template is not much required ask, since most of the details as mentioned in issue template are irrelevant to PR and while raising PR user is asked for Title and the description which is enough. In some cases if test cases are required Dev team will ask for it anyways on case to case basis.We do add suitable labels for the PR once it is created.\r\n\r\nPlease feel free to close the issue if satisfied with the responses.Thanks!\r\n\r\n", "Ok thank you for the response " ]
2023-12-04T03:10:53
2023-12-05T13:49:45
2023-12-05T13:49:45
NONE
null
null
null
Hi @tensorflow I'm @Yuheshpandian. I personally love your project. I love to use it in python but still i'm learning many intermediate stuff in python. I saw many contributions to your repository. So it would be better if you add a PR_Template(Pull Request Template) to your Project. Thank You
{ "url": "https://api.github.com/repos/tensorflow/tensorflow/issues/62561/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/tensorflow/tensorflow/issues/62561/timeline
null
completed
false
https://api.github.com/repos/tensorflow/tensorflow/issues/62560
https://api.github.com/repos/tensorflow/tensorflow
https://api.github.com/repos/tensorflow/tensorflow/issues/62560/labels{/name}
https://api.github.com/repos/tensorflow/tensorflow/issues/62560/comments
https://api.github.com/repos/tensorflow/tensorflow/issues/62560/events
https://github.com/tensorflow/tensorflow/issues/62560
2,022,917,556
I_kwDOArmXAs54k0W0
62,560
crash : 'tuple' object has no attribute 'shape' when calling api combination
{ "login": "beanduan22", "id": 149338034, "node_id": "U_kgDOCOa3sg", "avatar_url": "https://avatars.githubusercontent.com/u/149338034?v=4", "gravatar_id": "", "url": "https://api.github.com/users/beanduan22", "html_url": "https://github.com/beanduan22", "followers_url": "https://api.github.com/users/beanduan22/followers", "following_url": "https://api.github.com/users/beanduan22/following{/other_user}", "gists_url": "https://api.github.com/users/beanduan22/gists{/gist_id}", "starred_url": "https://api.github.com/users/beanduan22/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/beanduan22/subscriptions", "organizations_url": "https://api.github.com/users/beanduan22/orgs", "repos_url": "https://api.github.com/users/beanduan22/repos", "events_url": "https://api.github.com/users/beanduan22/events{/privacy}", "received_events_url": "https://api.github.com/users/beanduan22/received_events", "type": "User", "site_admin": false }
[ { "id": 386191887, "node_id": "MDU6TGFiZWwzODYxOTE4ODc=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/stat:awaiting%20response", "name": "stat:awaiting response", "color": "f4b400", "default": false, "description": "Status - Awaiting response from author" }, { "id": 473172988, "node_id": "MDU6TGFiZWw0NzMxNzI5ODg=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/type:bug", "name": "type:bug", "color": "159b2e", "default": false, "description": "Bug" }, { "id": 474725938, "node_id": "MDU6TGFiZWw0NzQ3MjU5Mzg=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/stale", "name": "stale", "color": "d4c5f9", "default": false, "description": "This label marks the issue/pr stale - to be closed automatically if no activity" }, { "id": 1097545817, "node_id": "MDU6TGFiZWwxMDk3NTQ1ODE3", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/comp:apis", "name": "comp:apis", "color": "0052cc", "default": false, "description": "Highlevel API related issues" }, { "id": 5508003926, "node_id": "LA_kwDOArmXAs8AAAABSE14Vg", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/TF%202.13", "name": "TF 2.13", "color": "B13ACB", "default": false, "description": "For issues related to Tensorflow 2.13" } ]
closed
false
{ "login": "sushreebarsa", "id": 84765720, "node_id": "MDQ6VXNlcjg0NzY1NzIw", "avatar_url": "https://avatars.githubusercontent.com/u/84765720?v=4", "gravatar_id": "", "url": "https://api.github.com/users/sushreebarsa", "html_url": "https://github.com/sushreebarsa", "followers_url": "https://api.github.com/users/sushreebarsa/followers", "following_url": "https://api.github.com/users/sushreebarsa/following{/other_user}", "gists_url": "https://api.github.com/users/sushreebarsa/gists{/gist_id}", "starred_url": "https://api.github.com/users/sushreebarsa/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/sushreebarsa/subscriptions", "organizations_url": "https://api.github.com/users/sushreebarsa/orgs", "repos_url": "https://api.github.com/users/sushreebarsa/repos", "events_url": "https://api.github.com/users/sushreebarsa/events{/privacy}", "received_events_url": "https://api.github.com/users/sushreebarsa/received_events", "type": "User", "site_admin": false }
[ { "login": "sushreebarsa", "id": 84765720, "node_id": "MDQ6VXNlcjg0NzY1NzIw", "avatar_url": "https://avatars.githubusercontent.com/u/84765720?v=4", "gravatar_id": "", "url": "https://api.github.com/users/sushreebarsa", "html_url": "https://github.com/sushreebarsa", "followers_url": "https://api.github.com/users/sushreebarsa/followers", "following_url": "https://api.github.com/users/sushreebarsa/following{/other_user}", "gists_url": "https://api.github.com/users/sushreebarsa/gists{/gist_id}", "starred_url": "https://api.github.com/users/sushreebarsa/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/sushreebarsa/subscriptions", "organizations_url": "https://api.github.com/users/sushreebarsa/orgs", "repos_url": "https://api.github.com/users/sushreebarsa/repos", "events_url": "https://api.github.com/users/sushreebarsa/events{/privacy}", "received_events_url": "https://api.github.com/users/sushreebarsa/received_events", "type": "User", "site_admin": false } ]
null
[ "@beanduan22 Could you modify the sq() function to return a single tensor instead of a tuple. One approach is to concatenate the individual tensors into a single tensor using the tf.stack() function. Thank you!", "This issue is stale because it has been open for 7 days with no activity. It will be closed if no further activity occurs. Thank you.", "This issue was closed because it has been inactive for 7 days since being marked as stale. Please reopen if you'd like to work on this further.", "Are you satisfied with the resolution of your issue?\n<a href=\"https://docs.google.com/forms/d/e/1FAIpQLSfaP12TRhd9xSxjXZjcZFNXPGk4kc1-qMdv3gc6bEP90vY1ew/viewform?entry.85265664=Yes&entry.2137816233=https://github.com/tensorflow/tensorflow/issues/62560\">Yes</a>\n<a href=\"https://docs.google.com/forms/d/e/1FAIpQLSfaP12TRhd9xSxjXZjcZFNXPGk4kc1-qMdv3gc6bEP90vY1ew/viewform?entry.85265664=No&entry.2137816233=https://github.com/tensorflow/tensorflow/issues/62560\">No</a>\n" ]
2023-12-04T02:56:37
2023-12-20T01:42:47
2023-12-20T01:42:38
NONE
null
null
null
### Issue type Bug ### Have you reproduced the bug with TensorFlow Nightly? Yes ### Source source ### TensorFlow version tf 2.13.0 ### Custom code Yes ### OS platform and distribution _No response_ ### Mobile device _No response_ ### Python version _No response_ ### Bazel version _No response_ ### GCC/compiler version _No response_ ### CUDA/cuDNN version _No response_ ### GPU model and memory _No response_ ### Current behavior? ```python import tensorflow as tf with tf.device("/cpu:0"): def sq(a): return a ** 0, a ** 1, a ** 2, a ** 3 def test(a) : with tf.GradientTape() as tape: tape.watch(a) y = sq(a) jaco = tape.jacobian(y, a) return jaco a = tf.constant([0, 0.1, 0.2, 0.3], dtype=tf.float32) cpu_out = test(a) with tf.device("/gpu:0"): def sq(a): return a ** 0, a ** 1, a ** 2, a ** 3 def test(a): with tf.GradientTape() as tape: tape.watch(a) y = sq(a) jaco = tape.jacobian(y, a) return jaco a = tf.constant([0, 1, 2, 3], dtype=tf.float32) gpu_out = test(a) print(cpu_out) print(gpu_out) ### Standalone code to reproduce the issue ```shell AttributeError: 'tuple' object has no attribute 'shape' ``` ### Relevant log output ```shell 2023-12-04 12:53:33.670377: I tensorflow/core/platform/cpu_feature_guard.cc:182] This TensorFlow binary is optimized to use available CPU instructions in performance-critical operations. To enable the following instructions: SSE SSE2 SSE3 SSE4.1 SSE4.2 AVX AVX2 AVX512F AVX512_VNNI FMA, in other operations, rebuild TensorFlow with the appropriate compiler flags. Traceback (most recent call last): File "C:\Users\uqbduan\Desktop\pythonProject\test.py", line 15, in <module> cpu_out = test(a) File "C:\Users\uqbduan\Desktop\pythonProject\test.py", line 12, in test jaco = tape.jacobian(y, a) File "C:\Users\uqbduan\Anaconda3\envs\tensorflow2\lib\site-packages\tensorflow\python\eager\backprop.py", line 1147, in jacobian target_static_shape = target.shape AttributeError: 'tuple' object has no attribute 'shape' ```
{ "url": "https://api.github.com/repos/tensorflow/tensorflow/issues/62560/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/tensorflow/tensorflow/issues/62560/timeline
null
completed
false
https://api.github.com/repos/tensorflow/tensorflow/issues/62559
https://api.github.com/repos/tensorflow/tensorflow
https://api.github.com/repos/tensorflow/tensorflow/issues/62559/labels{/name}
https://api.github.com/repos/tensorflow/tensorflow/issues/62559/comments
https://api.github.com/repos/tensorflow/tensorflow/issues/62559/events
https://github.com/tensorflow/tensorflow/issues/62559
2,022,904,531
I_kwDOArmXAs54kxLT
62,559
crash when calling api combinations of tf.py_function() tf.math.log() tf.GradientTape().jacobian both on GPU and CPU
{ "login": "beanduan22", "id": 149338034, "node_id": "U_kgDOCOa3sg", "avatar_url": "https://avatars.githubusercontent.com/u/149338034?v=4", "gravatar_id": "", "url": "https://api.github.com/users/beanduan22", "html_url": "https://github.com/beanduan22", "followers_url": "https://api.github.com/users/beanduan22/followers", "following_url": "https://api.github.com/users/beanduan22/following{/other_user}", "gists_url": "https://api.github.com/users/beanduan22/gists{/gist_id}", "starred_url": "https://api.github.com/users/beanduan22/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/beanduan22/subscriptions", "organizations_url": "https://api.github.com/users/beanduan22/orgs", "repos_url": "https://api.github.com/users/beanduan22/repos", "events_url": "https://api.github.com/users/beanduan22/events{/privacy}", "received_events_url": "https://api.github.com/users/beanduan22/received_events", "type": "User", "site_admin": false }
[ { "id": 386191887, "node_id": "MDU6TGFiZWwzODYxOTE4ODc=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/stat:awaiting%20response", "name": "stat:awaiting response", "color": "f4b400", "default": false, "description": "Status - Awaiting response from author" }, { "id": 473172988, "node_id": "MDU6TGFiZWw0NzMxNzI5ODg=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/type:bug", "name": "type:bug", "color": "159b2e", "default": false, "description": "Bug" }, { "id": 474725938, "node_id": "MDU6TGFiZWw0NzQ3MjU5Mzg=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/stale", "name": "stale", "color": "d4c5f9", "default": false, "description": "This label marks the issue/pr stale - to be closed automatically if no activity" }, { "id": 1097545817, "node_id": "MDU6TGFiZWwxMDk3NTQ1ODE3", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/comp:apis", "name": "comp:apis", "color": "0052cc", "default": false, "description": "Highlevel API related issues" }, { "id": 5508003926, "node_id": "LA_kwDOArmXAs8AAAABSE14Vg", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/TF%202.13", "name": "TF 2.13", "color": "B13ACB", "default": false, "description": "For issues related to Tensorflow 2.13" } ]
closed
false
{ "login": "Venkat6871", "id": 147127861, "node_id": "U_kgDOCMT-NQ", "avatar_url": "https://avatars.githubusercontent.com/u/147127861?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Venkat6871", "html_url": "https://github.com/Venkat6871", "followers_url": "https://api.github.com/users/Venkat6871/followers", "following_url": "https://api.github.com/users/Venkat6871/following{/other_user}", "gists_url": "https://api.github.com/users/Venkat6871/gists{/gist_id}", "starred_url": "https://api.github.com/users/Venkat6871/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Venkat6871/subscriptions", "organizations_url": "https://api.github.com/users/Venkat6871/orgs", "repos_url": "https://api.github.com/users/Venkat6871/repos", "events_url": "https://api.github.com/users/Venkat6871/events{/privacy}", "received_events_url": "https://api.github.com/users/Venkat6871/received_events", "type": "User", "site_admin": false }
[ { "login": "Venkat6871", "id": 147127861, "node_id": "U_kgDOCMT-NQ", "avatar_url": "https://avatars.githubusercontent.com/u/147127861?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Venkat6871", "html_url": "https://github.com/Venkat6871", "followers_url": "https://api.github.com/users/Venkat6871/followers", "following_url": "https://api.github.com/users/Venkat6871/following{/other_user}", "gists_url": "https://api.github.com/users/Venkat6871/gists{/gist_id}", "starred_url": "https://api.github.com/users/Venkat6871/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Venkat6871/subscriptions", "organizations_url": "https://api.github.com/users/Venkat6871/orgs", "repos_url": "https://api.github.com/users/Venkat6871/repos", "events_url": "https://api.github.com/users/Venkat6871/events{/privacy}", "received_events_url": "https://api.github.com/users/Venkat6871/received_events", "type": "User", "site_admin": false } ]
null
[ "Hi **@beanduan22** ,\r\nSorry for the delay. I Was able to reproduce the issue on colab using TF v2.14 , 2.15, and TF-nightly, Please find the [gist](https://colab.research.google.com/gist/Venkat6871/012ae46e4f2181daa763828e7affe2e2/62559_2-14_2-15-nightly-v.ipynb) here for reference. We need to dig for this issue. I will give an update in a few days.\r\n\r\nThank you!\r\n", "Hi **@beanduan22**,\r\n\r\nThere is an issue with the combination of tf.py_function() and GPU usage. The tf.py_function may not be compatible with GPU execution in some cases. And it may work well on the CPU.\r\nWhen using tf.py_function on the GPU, it is recommended to use tf.autodiff.ForwardAccumulator instead of tf.GradientTape. Here I added a [gist](https://colab.research.google.com/gist/Venkat6871/1ac82907b2599f6fa924d80284c812a8/62559_2-15-v.ipynb) for your reference. Once you go through this gist.\r\n\r\nThank you!", "This issue is stale because it has been open for 7 days with no activity. It will be closed if no further activity occurs. Thank you.", "This issue was closed because it has been inactive for 7 days since being marked as stale. Please reopen if you'd like to work on this further.", "Are you satisfied with the resolution of your issue?\n<a href=\"https://docs.google.com/forms/d/e/1FAIpQLSfaP12TRhd9xSxjXZjcZFNXPGk4kc1-qMdv3gc6bEP90vY1ew/viewform?entry.85265664=Yes&entry.2137816233=https://github.com/tensorflow/tensorflow/issues/62559\">Yes</a>\n<a href=\"https://docs.google.com/forms/d/e/1FAIpQLSfaP12TRhd9xSxjXZjcZFNXPGk4kc1-qMdv3gc6bEP90vY1ew/viewform?entry.85265664=No&entry.2137816233=https://github.com/tensorflow/tensorflow/issues/62559\">No</a>\n" ]
2023-12-04T02:42:16
2024-01-19T01:49:29
2024-01-19T01:49:24
NONE
null
null
null
### Issue type Bug ### Have you reproduced the bug with TensorFlow Nightly? Yes ### Source source ### TensorFlow version tf 2.13 ### Custom code Yes ### OS platform and distribution _No response_ ### Mobile device _No response_ ### Python version _No response_ ### Bazel version _No response_ ### GCC/compiler version _No response_ ### CUDA/cuDNN version _No response_ ### GPU model and memory _No response_ ### Current behavior? ```python import tensorflow as tf with tf.device("/cpu:0"): def log(a): return tf.py_function(lambda a: tf.math.log(a), [a], a.dtype) def test(a) : with tf.GradientTape() as tape: tape.watch(a) y = log(a) jaco = tape.jacobian(y, a) return jaco a = tf.constant([0, 2, 3], dtype=tf.float32) cpu_out = test(a) with tf.device("/gpu:0"): def log(a): return tf.py_function(lambda a: tf.math.log(a), [a], a.dtype) def test(a) : with tf.GradientTape() as tape: tape.watch(a) y = log(a) jaco = tape.jacobian(y, a) return jaco a = tf.constant([0, 2, 3], dtype=tf.float32) gpu_out = test(a) print(cpu_out) print(gpu_out) ### Standalone code to reproduce the issue ```shell crash when calling api combinations of tf.py_function() tf.math.log() tf.GradientTape().jacobian both on GPU and CPU ``` ### Relevant log output ```shell 2023-12-04 12:40:51.556810: I tensorflow/core/platform/cpu_feature_guard.cc:182] This TensorFlow binary is optimized to use available CPU instructions in performance-critical operations. To enable the following instructions: SSE SSE2 SSE3 SSE4.1 SSE4.2 AVX AVX2 AVX512F AVX512_VNNI FMA, in other operations, rebuild TensorFlow with the appropriate compiler flags. 2023-12-04 12:40:51.779640: W tensorflow/core/framework/op_kernel.cc:1816] UNKNOWN: KeyError: b'pyfunc_0' Traceback (most recent call last): File "C:\Users\uqbduan\Anaconda3\envs\tensorflow2\lib\site-packages\tensorflow\python\ops\script_ops.py", line 266, in __call__ return func(device, token, args) File "C:\Users\uqbduan\Anaconda3\envs\tensorflow2\lib\site-packages\tensorflow\python\ops\script_ops.py", line 144, in __call__ outputs = self._call(device, args) File "C:\Users\uqbduan\Anaconda3\envs\tensorflow2\lib\site-packages\tensorflow\python\ops\script_ops.py", line 151, in _call ret = self._func(*args) File "C:\Users\uqbduan\Anaconda3\envs\tensorflow2\lib\site-packages\tensorflow\python\autograph\impl\api.py", line 643, in wrapper return func(*args, **kwargs) File "C:\Users\uqbduan\Anaconda3\envs\tensorflow2\lib\site-packages\tensorflow\python\ops\script_ops.py", line 399, in eagerly_executed_grad tape, eager_inputs, eager_outputs = tape_cache.pop(compat.as_bytes(token)) KeyError: b'pyfunc_0' Traceback (most recent call last): File "C:\Users\uqbduan\Desktop\pythonProject\test.py", line 16, in <module> cpu_out = test(a) File "C:\Users\uqbduan\Desktop\pythonProject\test.py", line 13, in test jaco = tape.jacobian(y, a) File "C:\Users\uqbduan\Anaconda3\envs\tensorflow2\lib\site-packages\tensorflow\python\eager\backprop.py", line 1167, in jacobian output = pfor_ops.pfor(loop_fn, target_size, File "C:\Users\uqbduan\Anaconda3\envs\tensorflow2\lib\site-packages\tensorflow\python\ops\parallel_for\control_flow_ops.py", line 227, in pfor outputs = f() File "C:\Users\uqbduan\Anaconda3\envs\tensorflow2\lib\site-packages\tensorflow\python\util\traceback_utils.py", line 153, in error_handler raise e.with_traceback(filtered_tb) from None File "C:\Users\uqbduan\Anaconda3\envs\tensorflow2\lib\site-packages\tensorflow\python\eager\execute.py", line 53, in quick_execute tensors = pywrap_tfe.TFE_Py_Execute(ctx._handle, device_name, op_name, tensorflow.python.framework.errors_impl.UnknownError: Graph execution error: Detected at node 'gradient_tape/EagerPyFunc/pfor/while/EagerPyFunc' defined at (most recent call last): File "C:\Users\uqbduan\Desktop\pythonProject\test.py", line 16, in <module> cpu_out = test(a) File "C:\Users\uqbduan\Desktop\pythonProject\test.py", line 13, in test jaco = tape.jacobian(y, a) File "C:\Users\uqbduan\Anaconda3\envs\tensorflow2\lib\site-packages\tensorflow\python\ops\parallel_for\control_flow_ops.py", line 208, in f iters, Node: 'gradient_tape/EagerPyFunc/pfor/while/EagerPyFunc' KeyError: b'pyfunc_0' Traceback (most recent call last): File "C:\Users\uqbduan\Anaconda3\envs\tensorflow2\lib\site-packages\tensorflow\python\ops\script_ops.py", line 266, in __call__ return func(device, token, args) File "C:\Users\uqbduan\Anaconda3\envs\tensorflow2\lib\site-packages\tensorflow\python\ops\script_ops.py", line 144, in __call__ outputs = self._call(device, args) File "C:\Users\uqbduan\Anaconda3\envs\tensorflow2\lib\site-packages\tensorflow\python\ops\script_ops.py", line 151, in _call ret = self._func(*args) File "C:\Users\uqbduan\Anaconda3\envs\tensorflow2\lib\site-packages\tensorflow\python\autograph\impl\api.py", line 643, in wrapper return func(*args, **kwargs) File "C:\Users\uqbduan\Anaconda3\envs\tensorflow2\lib\site-packages\tensorflow\python\ops\script_ops.py", line 399, in eagerly_executed_grad tape, eager_inputs, eager_outputs = tape_cache.pop(compat.as_bytes(token)) KeyError: b'pyfunc_0' [[{{node gradient_tape/EagerPyFunc/pfor/while/EagerPyFunc}}]] [Op:__inference_f_147] ```
{ "url": "https://api.github.com/repos/tensorflow/tensorflow/issues/62559/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/tensorflow/tensorflow/issues/62559/timeline
null
completed
false
https://api.github.com/repos/tensorflow/tensorflow/issues/62558
https://api.github.com/repos/tensorflow/tensorflow
https://api.github.com/repos/tensorflow/tensorflow/issues/62558/labels{/name}
https://api.github.com/repos/tensorflow/tensorflow/issues/62558/comments
https://api.github.com/repos/tensorflow/tensorflow/issues/62558/events
https://github.com/tensorflow/tensorflow/issues/62558
2,022,858,204
I_kwDOArmXAs54kl3c
62,558
Unexpected NaN when invoking apis combination of tf.GradientTape().jacobian and tf.multiply and tf.math.reciprocal
{ "login": "beanduan22", "id": 149338034, "node_id": "U_kgDOCOa3sg", "avatar_url": "https://avatars.githubusercontent.com/u/149338034?v=4", "gravatar_id": "", "url": "https://api.github.com/users/beanduan22", "html_url": "https://github.com/beanduan22", "followers_url": "https://api.github.com/users/beanduan22/followers", "following_url": "https://api.github.com/users/beanduan22/following{/other_user}", "gists_url": "https://api.github.com/users/beanduan22/gists{/gist_id}", "starred_url": "https://api.github.com/users/beanduan22/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/beanduan22/subscriptions", "organizations_url": "https://api.github.com/users/beanduan22/orgs", "repos_url": "https://api.github.com/users/beanduan22/repos", "events_url": "https://api.github.com/users/beanduan22/events{/privacy}", "received_events_url": "https://api.github.com/users/beanduan22/received_events", "type": "User", "site_admin": false }
[ { "id": 404586594, "node_id": "MDU6TGFiZWw0MDQ1ODY1OTQ=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/stat:awaiting%20tensorflower", "name": "stat:awaiting tensorflower", "color": "f4b400", "default": false, "description": "Status - Awaiting response from tensorflower" }, { "id": 473172988, "node_id": "MDU6TGFiZWw0NzMxNzI5ODg=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/type:bug", "name": "type:bug", "color": "159b2e", "default": false, "description": "Bug" }, { "id": 1097547147, "node_id": "MDU6TGFiZWwxMDk3NTQ3MTQ3", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/comp:ops", "name": "comp:ops", "color": "0052cc", "default": false, "description": "OPs related issues" }, { "id": 6671388216, "node_id": "LA_kwDOArmXAs8AAAABjaVOOA", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/TF%202.16", "name": "TF 2.16", "color": "FB9F9D", "default": false, "description": "" } ]
open
false
{ "login": "sachinprasadhs", "id": 73069040, "node_id": "MDQ6VXNlcjczMDY5MDQw", "avatar_url": "https://avatars.githubusercontent.com/u/73069040?v=4", "gravatar_id": "", "url": "https://api.github.com/users/sachinprasadhs", "html_url": "https://github.com/sachinprasadhs", "followers_url": "https://api.github.com/users/sachinprasadhs/followers", "following_url": "https://api.github.com/users/sachinprasadhs/following{/other_user}", "gists_url": "https://api.github.com/users/sachinprasadhs/gists{/gist_id}", "starred_url": "https://api.github.com/users/sachinprasadhs/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/sachinprasadhs/subscriptions", "organizations_url": "https://api.github.com/users/sachinprasadhs/orgs", "repos_url": "https://api.github.com/users/sachinprasadhs/repos", "events_url": "https://api.github.com/users/sachinprasadhs/events{/privacy}", "received_events_url": "https://api.github.com/users/sachinprasadhs/received_events", "type": "User", "site_admin": false }
[ { "login": "sachinprasadhs", "id": 73069040, "node_id": "MDQ6VXNlcjczMDY5MDQw", "avatar_url": "https://avatars.githubusercontent.com/u/73069040?v=4", "gravatar_id": "", "url": "https://api.github.com/users/sachinprasadhs", "html_url": "https://github.com/sachinprasadhs", "followers_url": "https://api.github.com/users/sachinprasadhs/followers", "following_url": "https://api.github.com/users/sachinprasadhs/following{/other_user}", "gists_url": "https://api.github.com/users/sachinprasadhs/gists{/gist_id}", "starred_url": "https://api.github.com/users/sachinprasadhs/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/sachinprasadhs/subscriptions", "organizations_url": "https://api.github.com/users/sachinprasadhs/orgs", "repos_url": "https://api.github.com/users/sachinprasadhs/repos", "events_url": "https://api.github.com/users/sachinprasadhs/events{/privacy}", "received_events_url": "https://api.github.com/users/sachinprasadhs/received_events", "type": "User", "site_admin": false } ]
null
[ "@sachinprasadhs,\r\nI was able to reproduce the issue on tensorflow v2.14, v2.15 and tf-nightly. Kindly find the gist of it [here](https://colab.research.google.com/gist/tilakrayal/3a14ff730af805b46e67d991f49604c7/untitled1610.ipynb)." ]
2023-12-04T01:46:32
2024-04-10T01:30:52
null
NONE
null
null
null
### Issue type Bug ### Have you reproduced the bug with TensorFlow Nightly? Yes ### Source source ### TensorFlow version tf 2.13 ### Custom code Yes ### OS platform and distribution _No response_ ### Mobile device _No response_ ### Python version _No response_ ### Bazel version _No response_ ### GCC/compiler version _No response_ ### CUDA/cuDNN version _No response_ ### GPU model and memory _No response_ ### Current behavior? ```python import tensorflow as tf import numpy as np with tf.device("/cpu:0"): def test(a,b): with tf.GradientTape() as tape_1: tape_1.watch(a) tape_1.watch(b) w = a * tf.math.reciprocal(b) print(w) grad_1 = tape_1.jacobian(w, a) return grad_1 a = tf.constant([3], dtype=tf.float32) b = tf.constant([0, 2, 3], dtype=tf.float32) cpu_output_1 = test(a,b) with tf.device("/gpu:0"): def test(a, b): with tf.GradientTape() as tape_1: tape_1.watch(a) tape_1.watch(b) w = a * tf.math.reciprocal(b) grad_1 = tape_1.jacobian(w, a) return grad_1 a = tf.constant([3], dtype=tf.float32) b = tf.constant([0, 2, 3], dtype=tf.float32) gpu_output_1 = test(a, b) print(cpu_output_1) print(gpu_output_1) ### Standalone code to reproduce the issue ```shell when i fuzzing on tensorflow, found unexpected NaN when invoking apis combination of tf.GradientTape().jacobian and tf.multiply and tf.math.reciprocal ``` ### Relevant log output ```shell 2023-12-04 11:42:40.628266: I tensorflow/core/platform/cpu_feature_guard.cc:182] This TensorFlow binary is optimized to use available CPU instructions in performance-critical operations. To enable the following instructions: SSE SSE2 SSE3 SSE4.1 SSE4.2 AVX AVX2 AVX512F AVX512_VNNI FMA, in other operations, rebuild TensorFlow with the appropriate compiler flags. tf.Tensor([inf 1.5 1. ], shape=(3,), dtype=float32) tf.Tensor( [[inf] [nan] [nan]], shape=(3, 1), dtype=float32) tf.Tensor( [[inf] [nan] [nan]], shape=(3, 1), dtype=float32) ```
{ "url": "https://api.github.com/repos/tensorflow/tensorflow/issues/62558/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/tensorflow/tensorflow/issues/62558/timeline
null
null
false
https://api.github.com/repos/tensorflow/tensorflow/issues/62557
https://api.github.com/repos/tensorflow/tensorflow
https://api.github.com/repos/tensorflow/tensorflow/issues/62557/labels{/name}
https://api.github.com/repos/tensorflow/tensorflow/issues/62557/comments
https://api.github.com/repos/tensorflow/tensorflow/issues/62557/events
https://github.com/tensorflow/tensorflow/issues/62557
2,022,856,749
I_kwDOArmXAs54klgt
62,557
Unexpected NaN when invoking apis combination of tf.GradientTape().jacobian and tf.multiply and tf.math.reciprocal
{ "login": "beanduan22", "id": 149338034, "node_id": "U_kgDOCOa3sg", "avatar_url": "https://avatars.githubusercontent.com/u/149338034?v=4", "gravatar_id": "", "url": "https://api.github.com/users/beanduan22", "html_url": "https://github.com/beanduan22", "followers_url": "https://api.github.com/users/beanduan22/followers", "following_url": "https://api.github.com/users/beanduan22/following{/other_user}", "gists_url": "https://api.github.com/users/beanduan22/gists{/gist_id}", "starred_url": "https://api.github.com/users/beanduan22/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/beanduan22/subscriptions", "organizations_url": "https://api.github.com/users/beanduan22/orgs", "repos_url": "https://api.github.com/users/beanduan22/repos", "events_url": "https://api.github.com/users/beanduan22/events{/privacy}", "received_events_url": "https://api.github.com/users/beanduan22/received_events", "type": "User", "site_admin": false }
[ { "id": 473172988, "node_id": "MDU6TGFiZWw0NzMxNzI5ODg=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/type:bug", "name": "type:bug", "color": "159b2e", "default": false, "description": "Bug" } ]
closed
false
{ "login": "SuryanarayanaY", "id": 116063290, "node_id": "U_kgDOBur8Og", "avatar_url": "https://avatars.githubusercontent.com/u/116063290?v=4", "gravatar_id": "", "url": "https://api.github.com/users/SuryanarayanaY", "html_url": "https://github.com/SuryanarayanaY", "followers_url": "https://api.github.com/users/SuryanarayanaY/followers", "following_url": "https://api.github.com/users/SuryanarayanaY/following{/other_user}", "gists_url": "https://api.github.com/users/SuryanarayanaY/gists{/gist_id}", "starred_url": "https://api.github.com/users/SuryanarayanaY/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/SuryanarayanaY/subscriptions", "organizations_url": "https://api.github.com/users/SuryanarayanaY/orgs", "repos_url": "https://api.github.com/users/SuryanarayanaY/repos", "events_url": "https://api.github.com/users/SuryanarayanaY/events{/privacy}", "received_events_url": "https://api.github.com/users/SuryanarayanaY/received_events", "type": "User", "site_admin": false }
[ { "login": "SuryanarayanaY", "id": 116063290, "node_id": "U_kgDOBur8Og", "avatar_url": "https://avatars.githubusercontent.com/u/116063290?v=4", "gravatar_id": "", "url": "https://api.github.com/users/SuryanarayanaY", "html_url": "https://github.com/SuryanarayanaY", "followers_url": "https://api.github.com/users/SuryanarayanaY/followers", "following_url": "https://api.github.com/users/SuryanarayanaY/following{/other_user}", "gists_url": "https://api.github.com/users/SuryanarayanaY/gists{/gist_id}", "starred_url": "https://api.github.com/users/SuryanarayanaY/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/SuryanarayanaY/subscriptions", "organizations_url": "https://api.github.com/users/SuryanarayanaY/orgs", "repos_url": "https://api.github.com/users/SuryanarayanaY/repos", "events_url": "https://api.github.com/users/SuryanarayanaY/events{/privacy}", "received_events_url": "https://api.github.com/users/SuryanarayanaY/received_events", "type": "User", "site_admin": false } ]
null
[ "Are you satisfied with the resolution of your issue?\n<a href=\"https://docs.google.com/forms/d/e/1FAIpQLSfaP12TRhd9xSxjXZjcZFNXPGk4kc1-qMdv3gc6bEP90vY1ew/viewform?entry.85265664=Yes&entry.2137816233=https://github.com/tensorflow/tensorflow/issues/62557\">Yes</a>\n<a href=\"https://docs.google.com/forms/d/e/1FAIpQLSfaP12TRhd9xSxjXZjcZFNXPGk4kc1-qMdv3gc6bEP90vY1ew/viewform?entry.85265664=No&entry.2137816233=https://github.com/tensorflow/tensorflow/issues/62557\">No</a>\n", "Are you satisfied with the resolution of your issue?\n<a href=\"https://docs.google.com/forms/d/e/1FAIpQLSfaP12TRhd9xSxjXZjcZFNXPGk4kc1-qMdv3gc6bEP90vY1ew/viewform?entry.85265664=Yes&entry.2137816233=https://github.com/tensorflow/tensorflow/issues/62557\">Yes</a>\n<a href=\"https://docs.google.com/forms/d/e/1FAIpQLSfaP12TRhd9xSxjXZjcZFNXPGk4kc1-qMdv3gc6bEP90vY1ew/viewform?entry.85265664=No&entry.2137816233=https://github.com/tensorflow/tensorflow/issues/62557\">No</a>\n" ]
2023-12-04T01:44:54
2023-12-04T01:45:36
2023-12-04T01:45:33
NONE
null
null
null
### Issue type Bug ### Have you reproduced the bug with TensorFlow Nightly? Yes ### Source source ### TensorFlow version tf 2.13 ### Custom code Yes ### OS platform and distribution _No response_ ### Mobile device _No response_ ### Python version _No response_ ### Bazel version _No response_ ### GCC/compiler version _No response_ ### CUDA/cuDNN version _No response_ ### GPU model and memory _No response_ ### Current behavior? ```python import tensorflow as tf import numpy as np with tf.device("/cpu:0"): def test(a,b): with tf.GradientTape() as tape_1: tape_1.watch(a) tape_1.watch(b) w = a * tf.math.reciprocal(b) print(w) grad_1 = tape_1.jacobian(w, a) return grad_1 a = tf.constant([3], dtype=tf.float32) b = tf.constant([0, 2, 3], dtype=tf.float32) cpu_output_1 = test(a,b) with tf.device("/gpu:0"): def test(a, b): with tf.GradientTape() as tape_1: tape_1.watch(a) tape_1.watch(b) w = a * tf.math.reciprocal(b) grad_1 = tape_1.jacobian(w, a) return grad_1 a = tf.constant([3], dtype=tf.float32) b = tf.constant([0, 2, 3], dtype=tf.float32) gpu_output_1 = test(a, b) print(cpu_output_1) print(gpu_output_1) ### Standalone code to reproduce the issue ```shell Unexpected NaN when invoking apis combination of tf.GradientTape().jacobian and tf.multiply and tf.math.reciprocal ``` ### Relevant log output ```shell tf.Tensor([inf 1.5 1. ], shape=(3,), dtype=float32) tf.Tensor( [[inf] [nan] [nan]], shape=(3, 1), dtype=float32) tf.Tensor( [[inf] [nan] [nan]], shape=(3, 1), dtype=float32) ```
{ "url": "https://api.github.com/repos/tensorflow/tensorflow/issues/62557/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/tensorflow/tensorflow/issues/62557/timeline
null
completed
false
https://api.github.com/repos/tensorflow/tensorflow/issues/62556
https://api.github.com/repos/tensorflow/tensorflow
https://api.github.com/repos/tensorflow/tensorflow/issues/62556/labels{/name}
https://api.github.com/repos/tensorflow/tensorflow/issues/62556/comments
https://api.github.com/repos/tensorflow/tensorflow/issues/62556/events
https://github.com/tensorflow/tensorflow/issues/62556
2,022,805,635
I_kwDOArmXAs54kZCD
62,556
Unexpected 0 when invoking tf.GradientTape() and tf.math.reduce_prod() both on CPU and GPU
{ "login": "beanduan22", "id": 149338034, "node_id": "U_kgDOCOa3sg", "avatar_url": "https://avatars.githubusercontent.com/u/149338034?v=4", "gravatar_id": "", "url": "https://api.github.com/users/beanduan22", "html_url": "https://github.com/beanduan22", "followers_url": "https://api.github.com/users/beanduan22/followers", "following_url": "https://api.github.com/users/beanduan22/following{/other_user}", "gists_url": "https://api.github.com/users/beanduan22/gists{/gist_id}", "starred_url": "https://api.github.com/users/beanduan22/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/beanduan22/subscriptions", "organizations_url": "https://api.github.com/users/beanduan22/orgs", "repos_url": "https://api.github.com/users/beanduan22/repos", "events_url": "https://api.github.com/users/beanduan22/events{/privacy}", "received_events_url": "https://api.github.com/users/beanduan22/received_events", "type": "User", "site_admin": false }
[ { "id": 473172988, "node_id": "MDU6TGFiZWw0NzMxNzI5ODg=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/type:bug", "name": "type:bug", "color": "159b2e", "default": false, "description": "Bug" } ]
closed
false
{ "login": "sushreebarsa", "id": 84765720, "node_id": "MDQ6VXNlcjg0NzY1NzIw", "avatar_url": "https://avatars.githubusercontent.com/u/84765720?v=4", "gravatar_id": "", "url": "https://api.github.com/users/sushreebarsa", "html_url": "https://github.com/sushreebarsa", "followers_url": "https://api.github.com/users/sushreebarsa/followers", "following_url": "https://api.github.com/users/sushreebarsa/following{/other_user}", "gists_url": "https://api.github.com/users/sushreebarsa/gists{/gist_id}", "starred_url": "https://api.github.com/users/sushreebarsa/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/sushreebarsa/subscriptions", "organizations_url": "https://api.github.com/users/sushreebarsa/orgs", "repos_url": "https://api.github.com/users/sushreebarsa/repos", "events_url": "https://api.github.com/users/sushreebarsa/events{/privacy}", "received_events_url": "https://api.github.com/users/sushreebarsa/received_events", "type": "User", "site_admin": false }
[ { "login": "sushreebarsa", "id": 84765720, "node_id": "MDQ6VXNlcjg0NzY1NzIw", "avatar_url": "https://avatars.githubusercontent.com/u/84765720?v=4", "gravatar_id": "", "url": "https://api.github.com/users/sushreebarsa", "html_url": "https://github.com/sushreebarsa", "followers_url": "https://api.github.com/users/sushreebarsa/followers", "following_url": "https://api.github.com/users/sushreebarsa/following{/other_user}", "gists_url": "https://api.github.com/users/sushreebarsa/gists{/gist_id}", "starred_url": "https://api.github.com/users/sushreebarsa/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/sushreebarsa/subscriptions", "organizations_url": "https://api.github.com/users/sushreebarsa/orgs", "repos_url": "https://api.github.com/users/sushreebarsa/repos", "events_url": "https://api.github.com/users/sushreebarsa/events{/privacy}", "received_events_url": "https://api.github.com/users/sushreebarsa/received_events", "type": "User", "site_admin": false } ]
null
[ "Are you satisfied with the resolution of your issue?\n<a href=\"https://docs.google.com/forms/d/e/1FAIpQLSfaP12TRhd9xSxjXZjcZFNXPGk4kc1-qMdv3gc6bEP90vY1ew/viewform?entry.85265664=Yes&entry.2137816233=https://github.com/tensorflow/tensorflow/issues/62556\">Yes</a>\n<a href=\"https://docs.google.com/forms/d/e/1FAIpQLSfaP12TRhd9xSxjXZjcZFNXPGk4kc1-qMdv3gc6bEP90vY1ew/viewform?entry.85265664=No&entry.2137816233=https://github.com/tensorflow/tensorflow/issues/62556\">No</a>\n" ]
2023-12-04T00:42:59
2023-12-04T01:21:49
2023-12-04T01:21:46
NONE
null
null
null
### Issue type Bug ### Have you reproduced the bug with TensorFlow Nightly? Yes ### Source source ### TensorFlow version tf 2.13 ### Custom code Yes ### OS platform and distribution _No response_ ### Mobile device _No response_ ### Python version _No response_ ### Bazel version _No response_ ### GCC/compiler version _No response_ ### CUDA/cuDNN version _No response_ ### GPU model and memory _No response_ ### Current behavior? ```python import tensorflow as tf import numpy as np with tf.device("/cpu:0"): def test(x): with tf.GradientTape() as tape_1: tape_1.watch(x) w = tf.math.reduce_prod(x) grad_1 = tape_1.gradient(w, x) return grad_1 x = tf.constant([[0, 0.1,0.2],[0, 0.1,0.2]],dtype=tf.float32) cpu_output_1 = test(x) with tf.device("/gpu:0"): def test(x): with tf.GradientTape() as tape_1: tape_1.watch(x) w = tf.math.reduce_prod(x) grad_1 = tape_1.gradient(w, x) return grad_1 x = tf.constant([[0, 0.1,0.2],[0, 0.1,0.2]],dtype=tf.float32) gpu_output_1 = test(x) expect_out_1 = tf.constant([[0,0.2,0.4],[0,2,4.]],dtype=tf.float32) print(expect_out_1) print(cpu_output_1) print(gpu_output_1) ### Standalone code to reproduce the issue ```shell Unexpected 0 when invoking tf.GradientTape() and tf.math.reduce_prod() both on CPU and GPU ``` ### Relevant log output ```shell tf.Tensor( [[0. 0.2 0.4] [0. 2. 4. ]], shape=(2, 3), dtype=float32) tf.Tensor( [[0. 0. 0.] [0. 0. 0.]], shape=(2, 3), dtype=float32) tf.Tensor( [[0. 0. 0.] [0. 0. 0.]], shape=(2, 3), dtype=float32) ```
{ "url": "https://api.github.com/repos/tensorflow/tensorflow/issues/62556/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/tensorflow/tensorflow/issues/62556/timeline
null
completed
false
https://api.github.com/repos/tensorflow/tensorflow/issues/62555
https://api.github.com/repos/tensorflow/tensorflow
https://api.github.com/repos/tensorflow/tensorflow/issues/62555/labels{/name}
https://api.github.com/repos/tensorflow/tensorflow/issues/62555/comments
https://api.github.com/repos/tensorflow/tensorflow/issues/62555/events
https://github.com/tensorflow/tensorflow/issues/62555
2,022,801,401
I_kwDOArmXAs54kX_5
62,555
Unexpect 0 in the API combination on GPU and CPU when invoking tf.GradientTape() and tf.math.reduce_prod(x)
{ "login": "beanduan22", "id": 149338034, "node_id": "U_kgDOCOa3sg", "avatar_url": "https://avatars.githubusercontent.com/u/149338034?v=4", "gravatar_id": "", "url": "https://api.github.com/users/beanduan22", "html_url": "https://github.com/beanduan22", "followers_url": "https://api.github.com/users/beanduan22/followers", "following_url": "https://api.github.com/users/beanduan22/following{/other_user}", "gists_url": "https://api.github.com/users/beanduan22/gists{/gist_id}", "starred_url": "https://api.github.com/users/beanduan22/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/beanduan22/subscriptions", "organizations_url": "https://api.github.com/users/beanduan22/orgs", "repos_url": "https://api.github.com/users/beanduan22/repos", "events_url": "https://api.github.com/users/beanduan22/events{/privacy}", "received_events_url": "https://api.github.com/users/beanduan22/received_events", "type": "User", "site_admin": false }
[ { "id": 386191887, "node_id": "MDU6TGFiZWwzODYxOTE4ODc=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/stat:awaiting%20response", "name": "stat:awaiting response", "color": "f4b400", "default": false, "description": "Status - Awaiting response from author" }, { "id": 473172988, "node_id": "MDU6TGFiZWw0NzMxNzI5ODg=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/type:bug", "name": "type:bug", "color": "159b2e", "default": false, "description": "Bug" }, { "id": 474725938, "node_id": "MDU6TGFiZWw0NzQ3MjU5Mzg=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/stale", "name": "stale", "color": "d4c5f9", "default": false, "description": "This label marks the issue/pr stale - to be closed automatically if no activity" }, { "id": 1097545817, "node_id": "MDU6TGFiZWwxMDk3NTQ1ODE3", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/comp:apis", "name": "comp:apis", "color": "0052cc", "default": false, "description": "Highlevel API related issues" }, { "id": 5508003926, "node_id": "LA_kwDOArmXAs8AAAABSE14Vg", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/TF%202.13", "name": "TF 2.13", "color": "B13ACB", "default": false, "description": "For issues related to Tensorflow 2.13" } ]
closed
false
{ "login": "Venkat6871", "id": 147127861, "node_id": "U_kgDOCMT-NQ", "avatar_url": "https://avatars.githubusercontent.com/u/147127861?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Venkat6871", "html_url": "https://github.com/Venkat6871", "followers_url": "https://api.github.com/users/Venkat6871/followers", "following_url": "https://api.github.com/users/Venkat6871/following{/other_user}", "gists_url": "https://api.github.com/users/Venkat6871/gists{/gist_id}", "starred_url": "https://api.github.com/users/Venkat6871/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Venkat6871/subscriptions", "organizations_url": "https://api.github.com/users/Venkat6871/orgs", "repos_url": "https://api.github.com/users/Venkat6871/repos", "events_url": "https://api.github.com/users/Venkat6871/events{/privacy}", "received_events_url": "https://api.github.com/users/Venkat6871/received_events", "type": "User", "site_admin": false }
[ { "login": "Venkat6871", "id": 147127861, "node_id": "U_kgDOCMT-NQ", "avatar_url": "https://avatars.githubusercontent.com/u/147127861?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Venkat6871", "html_url": "https://github.com/Venkat6871", "followers_url": "https://api.github.com/users/Venkat6871/followers", "following_url": "https://api.github.com/users/Venkat6871/following{/other_user}", "gists_url": "https://api.github.com/users/Venkat6871/gists{/gist_id}", "starred_url": "https://api.github.com/users/Venkat6871/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Venkat6871/subscriptions", "organizations_url": "https://api.github.com/users/Venkat6871/orgs", "repos_url": "https://api.github.com/users/Venkat6871/repos", "events_url": "https://api.github.com/users/Venkat6871/events{/privacy}", "received_events_url": "https://api.github.com/users/Venkat6871/received_events", "type": "User", "site_admin": false } ]
null
[ "Hi **@beanduan22** ,\r\nSorry for the delay. The tf.math.reduce_prod function multiplies the elements along the specified axis, and if there are zeros in the tensor, the result will be zero.If you want to compute the gradient of the product of elements in x, you might want to handle the zeros differently, for example, by adding a small constant to avoid issues with zero gradients. I added that small constant in the code and provided a [gist](https://colab.research.google.com/gist/Venkat6871/29cd011588bce1fc07f1dde6b1d0efba/62555_2-14_2-15_tf-nightly.ipynb) for your reference. Could you go through it once and let me know the update.\r\n\r\nThank you!", "This issue is stale because it has been open for 7 days with no activity. It will be closed if no further activity occurs. Thank you.", "This issue was closed because it has been inactive for 7 days since being marked as stale. Please reopen if you'd like to work on this further.", "Are you satisfied with the resolution of your issue?\n<a href=\"https://docs.google.com/forms/d/e/1FAIpQLSfaP12TRhd9xSxjXZjcZFNXPGk4kc1-qMdv3gc6bEP90vY1ew/viewform?entry.85265664=Yes&entry.2137816233=https://github.com/tensorflow/tensorflow/issues/62555\">Yes</a>\n<a href=\"https://docs.google.com/forms/d/e/1FAIpQLSfaP12TRhd9xSxjXZjcZFNXPGk4kc1-qMdv3gc6bEP90vY1ew/viewform?entry.85265664=No&entry.2137816233=https://github.com/tensorflow/tensorflow/issues/62555\">No</a>\n" ]
2023-12-04T00:36:04
2023-12-23T01:47:33
2023-12-23T01:47:29
NONE
null
null
null
### Issue type Bug ### Have you reproduced the bug with TensorFlow Nightly? Yes ### Source source ### TensorFlow version tf 2.13 ### Custom code Yes ### OS platform and distribution _No response_ ### Mobile device _No response_ ### Python version _No response_ ### Bazel version _No response_ ### GCC/compiler version _No response_ ### CUDA/cuDNN version _No response_ ### GPU model and memory _No response_ ### Current behavior? ```python import tensorflow as tf import numpy as np with tf.device("/cpu:0"): def test(x): with tf.GradientTape() as tape_1: tape_1.watch(x) w = tf.math.reduce_prod(x) grad_1 = tape_1.gradient(w, x) return grad_1 x = tf.constant([[0,0.1,0.2],[0,1,2.]],dtype=tf.float32) cpu_output_1 = test(x) with tf.device("/gpu:0"): def test(x): with tf.GradientTape() as tape_1: tape_1.watch(x) w = tf.math.reduce_prod(x) grad_1 = tape_1.gradient(w, x) return grad_1 x = tf.constant([[0, 0.1,0.2],[0, 1, 2.]],dtype=tf.float32) gpu_output_1 = test(x) print(cpu_output_1) print(gpu_output_1) ### Standalone code to reproduce the issue ```shell Unexpect 0 in the API combination on GPU and CPU when invoking tf.GradientTape() and tf.math.reduce_prod(x) ``` ### Relevant log output ```shell tf.Tensor( [[0. 0. 0.] [0. 0. 0.]], shape=(2, 3), dtype=float32) tf.Tensor( [[0. 0. 0.] [0. 0. 0.]], shape=(2, 3), dtype=float32) ```
{ "url": "https://api.github.com/repos/tensorflow/tensorflow/issues/62555/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/tensorflow/tensorflow/issues/62555/timeline
null
completed
false
https://api.github.com/repos/tensorflow/tensorflow/issues/62554
https://api.github.com/repos/tensorflow/tensorflow
https://api.github.com/repos/tensorflow/tensorflow/issues/62554/labels{/name}
https://api.github.com/repos/tensorflow/tensorflow/issues/62554/comments
https://api.github.com/repos/tensorflow/tensorflow/issues/62554/events
https://github.com/tensorflow/tensorflow/issues/62554
2,022,768,841
I_kwDOArmXAs54kQDJ
62,554
Unexpected Nan value on GPU and CPU when invoking tf.GradientTape(), tf.divide()
{ "login": "beanduan22", "id": 149338034, "node_id": "U_kgDOCOa3sg", "avatar_url": "https://avatars.githubusercontent.com/u/149338034?v=4", "gravatar_id": "", "url": "https://api.github.com/users/beanduan22", "html_url": "https://github.com/beanduan22", "followers_url": "https://api.github.com/users/beanduan22/followers", "following_url": "https://api.github.com/users/beanduan22/following{/other_user}", "gists_url": "https://api.github.com/users/beanduan22/gists{/gist_id}", "starred_url": "https://api.github.com/users/beanduan22/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/beanduan22/subscriptions", "organizations_url": "https://api.github.com/users/beanduan22/orgs", "repos_url": "https://api.github.com/users/beanduan22/repos", "events_url": "https://api.github.com/users/beanduan22/events{/privacy}", "received_events_url": "https://api.github.com/users/beanduan22/received_events", "type": "User", "site_admin": false }
[ { "id": 386191887, "node_id": "MDU6TGFiZWwzODYxOTE4ODc=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/stat:awaiting%20response", "name": "stat:awaiting response", "color": "f4b400", "default": false, "description": "Status - Awaiting response from author" }, { "id": 473172988, "node_id": "MDU6TGFiZWw0NzMxNzI5ODg=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/type:bug", "name": "type:bug", "color": "159b2e", "default": false, "description": "Bug" }, { "id": 474725938, "node_id": "MDU6TGFiZWw0NzQ3MjU5Mzg=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/stale", "name": "stale", "color": "d4c5f9", "default": false, "description": "This label marks the issue/pr stale - to be closed automatically if no activity" }, { "id": 1097547147, "node_id": "MDU6TGFiZWwxMDk3NTQ3MTQ3", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/comp:ops", "name": "comp:ops", "color": "0052cc", "default": false, "description": "OPs related issues" }, { "id": 5508003926, "node_id": "LA_kwDOArmXAs8AAAABSE14Vg", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/TF%202.13", "name": "TF 2.13", "color": "B13ACB", "default": false, "description": "For issues related to Tensorflow 2.13" } ]
closed
false
{ "login": "tilakrayal", "id": 81610181, "node_id": "MDQ6VXNlcjgxNjEwMTgx", "avatar_url": "https://avatars.githubusercontent.com/u/81610181?v=4", "gravatar_id": "", "url": "https://api.github.com/users/tilakrayal", "html_url": "https://github.com/tilakrayal", "followers_url": "https://api.github.com/users/tilakrayal/followers", "following_url": "https://api.github.com/users/tilakrayal/following{/other_user}", "gists_url": "https://api.github.com/users/tilakrayal/gists{/gist_id}", "starred_url": "https://api.github.com/users/tilakrayal/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/tilakrayal/subscriptions", "organizations_url": "https://api.github.com/users/tilakrayal/orgs", "repos_url": "https://api.github.com/users/tilakrayal/repos", "events_url": "https://api.github.com/users/tilakrayal/events{/privacy}", "received_events_url": "https://api.github.com/users/tilakrayal/received_events", "type": "User", "site_admin": false }
[ { "login": "tilakrayal", "id": 81610181, "node_id": "MDQ6VXNlcjgxNjEwMTgx", "avatar_url": "https://avatars.githubusercontent.com/u/81610181?v=4", "gravatar_id": "", "url": "https://api.github.com/users/tilakrayal", "html_url": "https://github.com/tilakrayal", "followers_url": "https://api.github.com/users/tilakrayal/followers", "following_url": "https://api.github.com/users/tilakrayal/following{/other_user}", "gists_url": "https://api.github.com/users/tilakrayal/gists{/gist_id}", "starred_url": "https://api.github.com/users/tilakrayal/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/tilakrayal/subscriptions", "organizations_url": "https://api.github.com/users/tilakrayal/orgs", "repos_url": "https://api.github.com/users/tilakrayal/repos", "events_url": "https://api.github.com/users/tilakrayal/events{/privacy}", "received_events_url": "https://api.github.com/users/tilakrayal/received_events", "type": "User", "site_admin": false } ]
null
[ "@beanduan22,\r\nI tried to execute the mentioned code with the alternative approach and the values and it was executed and provided the output as intended. Kindly find the gist of it [here](https://colab.research.google.com/gist/tilakrayal/732777e8a0a36f0e0570f0b022ec44e2/untitled1554.ipynb).\r\n\r\nAlso the Jacobian of **z = x / y** with respect to x. As you can see, the Jacobian is a matrix that contains the partial derivatives of `z` with respect to each element of `x`. The first element of the Jacobian is the partial derivative of z with respect to x[0], the second element of the Jacobian is the partial derivative of z with respect to x[1], and so on. Thank you!\r\n\r\n", "This issue is stale because it has been open for 7 days with no activity. It will be closed if no further activity occurs. Thank you.", "This issue was closed because it has been inactive for 7 days since being marked as stale. Please reopen if you'd like to work on this further.", "Are you satisfied with the resolution of your issue?\n<a href=\"https://docs.google.com/forms/d/e/1FAIpQLSfaP12TRhd9xSxjXZjcZFNXPGk4kc1-qMdv3gc6bEP90vY1ew/viewform?entry.85265664=Yes&entry.2137816233=https://github.com/tensorflow/tensorflow/issues/62554\">Yes</a>\n<a href=\"https://docs.google.com/forms/d/e/1FAIpQLSfaP12TRhd9xSxjXZjcZFNXPGk4kc1-qMdv3gc6bEP90vY1ew/viewform?entry.85265664=No&entry.2137816233=https://github.com/tensorflow/tensorflow/issues/62554\">No</a>\n" ]
2023-12-03T23:34:55
2023-12-21T01:48:36
2023-12-21T01:48:33
NONE
null
null
null
### Issue type Bug ### Have you reproduced the bug with TensorFlow Nightly? Yes ### Source source ### TensorFlow version tf 2.13 ### Custom code Yes ### OS platform and distribution _No response_ ### Mobile device _No response_ ### Python version _No response_ ### Bazel version _No response_ ### GCC/compiler version _No response_ ### CUDA/cuDNN version _No response_ ### GPU model and memory _No response_ ### Current behavior? ```python import tensorflow as tf import numpy as np with tf.device("/cpu:0"): def test(x, y): with tf.GradientTape() as tape: tape.watch(x) tape.watch(y) z = tf.divide(x, y) jaco = tape.jacobian(z, x) return jaco x = tf.constant([1], dtype=tf.float32) y = tf.constant([0, 1, 2], dtype=tf.float32) cpu_output = test(x,y) with tf.device("/gpu:0"): def test(x, y): with tf.GradientTape() as tape: tape.watch(x) tape.watch(y) z = tf.divide(x, y) jaco = tape.jacobian(z, x) return jaco x = tf.constant([1], dtype=tf.float32) y = tf.constant([0, 1, 2], dtype=tf.float32) gpu_output = test(x,y) expected_output= tf.constant([[np.inf], [1], [0.5]]) print(cpu_output) print(gpu_output) print(expected_output) ### Standalone code to reproduce the issue ```shell When I fuzzing on the tensorflow, found unexpected NaN both on GPU and CPU ``` ### Relevant log output ```shell tf.Tensor( [[inf] [nan] [nan]], shape=(3, 1), dtype=float32) tf.Tensor( [[inf] [nan] [nan]], shape=(3, 1), dtype=float32) tf.Tensor( [[inf] [1. ] [0.5]], shape=(3, 1), dtype=float32) ```
{ "url": "https://api.github.com/repos/tensorflow/tensorflow/issues/62554/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/tensorflow/tensorflow/issues/62554/timeline
null
completed
false
https://api.github.com/repos/tensorflow/tensorflow/issues/62553
https://api.github.com/repos/tensorflow/tensorflow
https://api.github.com/repos/tensorflow/tensorflow/issues/62553/labels{/name}
https://api.github.com/repos/tensorflow/tensorflow/issues/62553/comments
https://api.github.com/repos/tensorflow/tensorflow/issues/62553/events
https://github.com/tensorflow/tensorflow/issues/62553
2,022,764,815
I_kwDOArmXAs54kPEP
62,553
found unexpected Nan result on API combination of tf.GradientTape() and tf.divide() both on GPU and CPU
{ "login": "beanduan22", "id": 149338034, "node_id": "U_kgDOCOa3sg", "avatar_url": "https://avatars.githubusercontent.com/u/149338034?v=4", "gravatar_id": "", "url": "https://api.github.com/users/beanduan22", "html_url": "https://github.com/beanduan22", "followers_url": "https://api.github.com/users/beanduan22/followers", "following_url": "https://api.github.com/users/beanduan22/following{/other_user}", "gists_url": "https://api.github.com/users/beanduan22/gists{/gist_id}", "starred_url": "https://api.github.com/users/beanduan22/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/beanduan22/subscriptions", "organizations_url": "https://api.github.com/users/beanduan22/orgs", "repos_url": "https://api.github.com/users/beanduan22/repos", "events_url": "https://api.github.com/users/beanduan22/events{/privacy}", "received_events_url": "https://api.github.com/users/beanduan22/received_events", "type": "User", "site_admin": false }
[ { "id": 473172988, "node_id": "MDU6TGFiZWw0NzMxNzI5ODg=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/type:bug", "name": "type:bug", "color": "159b2e", "default": false, "description": "Bug" } ]
closed
false
{ "login": "SuryanarayanaY", "id": 116063290, "node_id": "U_kgDOBur8Og", "avatar_url": "https://avatars.githubusercontent.com/u/116063290?v=4", "gravatar_id": "", "url": "https://api.github.com/users/SuryanarayanaY", "html_url": "https://github.com/SuryanarayanaY", "followers_url": "https://api.github.com/users/SuryanarayanaY/followers", "following_url": "https://api.github.com/users/SuryanarayanaY/following{/other_user}", "gists_url": "https://api.github.com/users/SuryanarayanaY/gists{/gist_id}", "starred_url": "https://api.github.com/users/SuryanarayanaY/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/SuryanarayanaY/subscriptions", "organizations_url": "https://api.github.com/users/SuryanarayanaY/orgs", "repos_url": "https://api.github.com/users/SuryanarayanaY/repos", "events_url": "https://api.github.com/users/SuryanarayanaY/events{/privacy}", "received_events_url": "https://api.github.com/users/SuryanarayanaY/received_events", "type": "User", "site_admin": false }
[ { "login": "SuryanarayanaY", "id": 116063290, "node_id": "U_kgDOBur8Og", "avatar_url": "https://avatars.githubusercontent.com/u/116063290?v=4", "gravatar_id": "", "url": "https://api.github.com/users/SuryanarayanaY", "html_url": "https://github.com/SuryanarayanaY", "followers_url": "https://api.github.com/users/SuryanarayanaY/followers", "following_url": "https://api.github.com/users/SuryanarayanaY/following{/other_user}", "gists_url": "https://api.github.com/users/SuryanarayanaY/gists{/gist_id}", "starred_url": "https://api.github.com/users/SuryanarayanaY/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/SuryanarayanaY/subscriptions", "organizations_url": "https://api.github.com/users/SuryanarayanaY/orgs", "repos_url": "https://api.github.com/users/SuryanarayanaY/repos", "events_url": "https://api.github.com/users/SuryanarayanaY/events{/privacy}", "received_events_url": "https://api.github.com/users/SuryanarayanaY/received_events", "type": "User", "site_admin": false } ]
null
[ "Are you satisfied with the resolution of your issue?\n<a href=\"https://docs.google.com/forms/d/e/1FAIpQLSfaP12TRhd9xSxjXZjcZFNXPGk4kc1-qMdv3gc6bEP90vY1ew/viewform?entry.85265664=Yes&entry.2137816233=https://github.com/tensorflow/tensorflow/issues/62553\">Yes</a>\n<a href=\"https://docs.google.com/forms/d/e/1FAIpQLSfaP12TRhd9xSxjXZjcZFNXPGk4kc1-qMdv3gc6bEP90vY1ew/viewform?entry.85265664=No&entry.2137816233=https://github.com/tensorflow/tensorflow/issues/62553\">No</a>\n" ]
2023-12-03T23:24:11
2023-12-03T23:25:00
2023-12-03T23:24:56
NONE
null
null
null
### Issue type Bug ### Have you reproduced the bug with TensorFlow Nightly? Yes ### Source source ### TensorFlow version tf 2.13 ### Custom code Yes ### OS platform and distribution _No response_ ### Mobile device _No response_ ### Python version _No response_ ### Bazel version _No response_ ### GCC/compiler version _No response_ ### CUDA/cuDNN version _No response_ ### GPU model and memory _No response_ ### Current behavior? ```python import tensorflow as tf import numpy as np with tf.device("/cpu:0"): def test(x, y): with tf.GradientTape() as tape: tape.watch(x) tape.watch(y) z = tf.divide(x, y) jaco = tape.jacobian(z, x) return jaco x = tf.constant([1], dtype=tf.float32) y = tf.constant([0, 1, 2], dtype=tf.float32) cpu_output = test(x,y) with tf.device("/gpu:0"): def test(x, y): with tf.GradientTape() as tape: tape.watch(x) tape.watch(y) z = tf.divide(x, y) jaco = tape.jacobian(z, x) return jaco x = tf.constant([1], dtype=tf.float32) y = tf.constant([0, 1, 2], dtype=tf.float32) gpu_output = test(x,y) expected_output= tf.constant([[np.inf], [1 / 1], [1 / 2]]) print(cpu_output) print(gpu_output) print(expected_output) ### Standalone code to reproduce the issue ```shell Unexpected NaN on the result both in GPU and CPU ``` ### Relevant log output ```shell tf.Tensor( [[inf] [nan] [nan]], shape=(3, 1), dtype=float32) tf.Tensor( [[inf] [nan] [nan]], shape=(3, 1), dtype=float32) tf.Tensor( [[inf] [1. ] [0.5]], shape=(3, 1), dtype=float32) ```
{ "url": "https://api.github.com/repos/tensorflow/tensorflow/issues/62553/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/tensorflow/tensorflow/issues/62553/timeline
null
completed
false
https://api.github.com/repos/tensorflow/tensorflow/issues/62552
https://api.github.com/repos/tensorflow/tensorflow
https://api.github.com/repos/tensorflow/tensorflow/issues/62552/labels{/name}
https://api.github.com/repos/tensorflow/tensorflow/issues/62552/comments
https://api.github.com/repos/tensorflow/tensorflow/issues/62552/events
https://github.com/tensorflow/tensorflow/issues/62552
2,022,623,606
I_kwDOArmXAs54jsl2
62,552
Cannot build tensorflow-cpu with cuda installed
{ "login": "xuesu", "id": 7857599, "node_id": "MDQ6VXNlcjc4NTc1OTk=", "avatar_url": "https://avatars.githubusercontent.com/u/7857599?v=4", "gravatar_id": "", "url": "https://api.github.com/users/xuesu", "html_url": "https://github.com/xuesu", "followers_url": "https://api.github.com/users/xuesu/followers", "following_url": "https://api.github.com/users/xuesu/following{/other_user}", "gists_url": "https://api.github.com/users/xuesu/gists{/gist_id}", "starred_url": "https://api.github.com/users/xuesu/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/xuesu/subscriptions", "organizations_url": "https://api.github.com/users/xuesu/orgs", "repos_url": "https://api.github.com/users/xuesu/repos", "events_url": "https://api.github.com/users/xuesu/events{/privacy}", "received_events_url": "https://api.github.com/users/xuesu/received_events", "type": "User", "site_admin": false }
[ { "id": 404586594, "node_id": "MDU6TGFiZWw0MDQ1ODY1OTQ=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/stat:awaiting%20tensorflower", "name": "stat:awaiting tensorflower", "color": "f4b400", "default": false, "description": "Status - Awaiting response from tensorflower" }, { "id": 473173351, "node_id": "MDU6TGFiZWw0NzMxNzMzNTE=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/type:build/install", "name": "type:build/install", "color": "159b2e", "default": false, "description": "Build and install issues" }, { "id": 1205615612, "node_id": "MDU6TGFiZWwxMjA1NjE1NjEy", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/subtype:%20ubuntu/linux", "name": "subtype: ubuntu/linux", "color": "b619ea", "default": false, "description": "Ubuntu/Linux Build/Installation Issues" }, { "id": 5922361893, "node_id": "LA_kwDOArmXAs8AAAABYQASJQ", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/TF2.14", "name": "TF2.14", "color": "b60205", "default": false, "description": "For issues related to Tensorflow 2.14.x" } ]
open
false
{ "login": "sachinprasadhs", "id": 73069040, "node_id": "MDQ6VXNlcjczMDY5MDQw", "avatar_url": "https://avatars.githubusercontent.com/u/73069040?v=4", "gravatar_id": "", "url": "https://api.github.com/users/sachinprasadhs", "html_url": "https://github.com/sachinprasadhs", "followers_url": "https://api.github.com/users/sachinprasadhs/followers", "following_url": "https://api.github.com/users/sachinprasadhs/following{/other_user}", "gists_url": "https://api.github.com/users/sachinprasadhs/gists{/gist_id}", "starred_url": "https://api.github.com/users/sachinprasadhs/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/sachinprasadhs/subscriptions", "organizations_url": "https://api.github.com/users/sachinprasadhs/orgs", "repos_url": "https://api.github.com/users/sachinprasadhs/repos", "events_url": "https://api.github.com/users/sachinprasadhs/events{/privacy}", "received_events_url": "https://api.github.com/users/sachinprasadhs/received_events", "type": "User", "site_admin": false }
[ { "login": "sachinprasadhs", "id": 73069040, "node_id": "MDQ6VXNlcjczMDY5MDQw", "avatar_url": "https://avatars.githubusercontent.com/u/73069040?v=4", "gravatar_id": "", "url": "https://api.github.com/users/sachinprasadhs", "html_url": "https://github.com/sachinprasadhs", "followers_url": "https://api.github.com/users/sachinprasadhs/followers", "following_url": "https://api.github.com/users/sachinprasadhs/following{/other_user}", "gists_url": "https://api.github.com/users/sachinprasadhs/gists{/gist_id}", "starred_url": "https://api.github.com/users/sachinprasadhs/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/sachinprasadhs/subscriptions", "organizations_url": "https://api.github.com/users/sachinprasadhs/orgs", "repos_url": "https://api.github.com/users/sachinprasadhs/repos", "events_url": "https://api.github.com/users/sachinprasadhs/events{/privacy}", "received_events_url": "https://api.github.com/users/sachinprasadhs/received_events", "type": "User", "site_admin": false } ]
null
[ "@xuesu If you don't explicitly require CUDA for other applications, consider uninstalling it before building TensorFlow-CPU. This will ensure that TensorFlow-CPU doesn't attempt to utilize CUDA libraries during the build process.\r\nIf you need both TensorFlow-CPU and CUDA for different projects, consider using separate virtual environments for each. This will isolate the CUDA environment from the TensorFlow-CPU environment, preventing conflicts between the two.\r\n\r\nThank you!", "I cannot build with cuda installed with `bazel build --config=opt --config=cuda //tensorflow/tools/pip_package:build_pip_package` either...same error...which means I figured out how to build cpu version while unable to build gpu version....I tried Dockerfile in both tensorflow/build and ci ", "Dear @sushreebarsa , can I get the dockerfile of tensorflow/tensorflow:devel? I tried multiple resources and only this image can both find cuda and c++ standard library....however, cuda 11.2 is too low to compile...." ]
2023-12-03T16:57:33
2023-12-26T23:15:41
null
NONE
null
null
null
### Issue type Build/Install ### Have you reproduced the bug with TensorFlow Nightly? Yes ### Source source ### TensorFlow version 2.14.0 ### Custom code No ### OS platform and distribution Linux Ubuntu22.04 ### Mobile device _No response_ ### Python version 3.11 ### Bazel version 6.1.0 ### GCC/compiler version clang 16.0.6 ### CUDA/cuDNN version _No response_ ### GPU model and memory _No response_ ### Current behavior? If I installed CUDA, then I would be unable to build TensorFlow with cpu without CUDA. I installed clang by ``` RUN wget -qO- https://apt.llvm.org/llvm-snapshot.gpg.key | tee /etc/apt/trusted.gpg.d/apt.llvm.org.asc RUN echo "deb http://apt.llvm.org/jammy/ llvm-toolchain-jammy-16 main" >> /etc/apt/sources.list RUN echo "deb-src http://apt.llvm.org/jammy/ llvm-toolchain-jammy-16 main" >> /etc/apt/sources.list RUN apt-get update -y && apt-get install -y libllvm-16-ocaml-dev libllvm16 llvm-16 llvm-16-dev llvm-16-doc llvm-16-examples llvm-16-runtime clang-16 clang-tools-16 clang-16-doc libclang-common-16-dev libclang-16-dev libclang1-16 clang-format-16 python3-clang-16 clangd-16 clang-tidy-16 libc++-16-dev libc++abi-16-dev ``` ### Standalone code to reproduce the issue ```shell wget https://developer.download.nvidia.com/compute/cuda/repos/ubuntu2204/x86_64/cuda-ubuntu2204.pin \ && mv cuda-ubuntu2204.pin /etc/apt/preferences.d/cuda-repository-pin-600 \ && wget https://developer.download.nvidia.com/compute/cuda/11.8.0/local_installers/cuda-repo-ubuntu2204-11-8-local_11.8.0-520.61.05-1_amd64.deb \ && dpkg -i cuda-repo-ubuntu2204-11-8-local_11.8.0-520.61.05-1_amd64.deb \ && cp /var/cuda-repo-ubuntu2204-11-8-local/cuda-*-keyring.gpg /usr/share/keyrings/ \ && apt-get update \ && apt-get -y install cuda ``` then `bazel clean --expunge && bazel build -c opt --cxxopt=-D_GLIBCXX_USE_CXX11_ABI=1 //tensorflow/tools/pip_package:build_pip_package` ``` ### Relevant log output ```shell I will unable to build tensorflow from source since it keeps warning cannot find the c++ standard library ERROR: /root/.cache/bazel/_bazel_root/f3e6cea3d5621cddee645150a95a9495/external/llvm-project/llvm/BUILD.bazel:191:11: Compiling llvm/lib/Demangle/Demangle.cpp [for tool] failed: (Exit 1): clang failed: error executing command (from target @llvm-project//llvm:Demangle) /usr/lib/llvm-16/bin/clang -U_FORTIFY_SOURCE -fstack-protector -Wall -Wthread-safety -Wself-assign -Wunused-but-set-parameter -Wno-free-nonheap-object -fcolor-diagnostics -fno-omit-frame-pointer -g0 ... (remaining 70 arguments skipped) In file included from external/llvm-project/llvm/lib/Demangle/Demangle.cpp:13: external/llvm-project/llvm/include/llvm/Demangle/Demangle.h:12:10: fatal error: 'cstddef' file not found #include <cstddef> ^~~~~~~~~ 1 error generated. Target //tensorflow/tools/pip_package:build_pip_package failed to build Use --verbose_failures to see the command lines of failed build steps. INFO: Elapsed time: 0.324s, Critical Path: 0.12s INFO: 39 processes: 36 internal, 3 local. FAILED: Build did NOT complete successfully ``` ```
{ "url": "https://api.github.com/repos/tensorflow/tensorflow/issues/62552/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/tensorflow/tensorflow/issues/62552/timeline
null
null
false
https://api.github.com/repos/tensorflow/tensorflow/issues/62551
https://api.github.com/repos/tensorflow/tensorflow
https://api.github.com/repos/tensorflow/tensorflow/issues/62551/labels{/name}
https://api.github.com/repos/tensorflow/tensorflow/issues/62551/comments
https://api.github.com/repos/tensorflow/tensorflow/issues/62551/events
https://github.com/tensorflow/tensorflow/issues/62551
2,022,617,209
I_kwDOArmXAs54jrB5
62,551
Python 3.11 TF memory leaks
{ "login": "0x0L", "id": 3621629, "node_id": "MDQ6VXNlcjM2MjE2Mjk=", "avatar_url": "https://avatars.githubusercontent.com/u/3621629?v=4", "gravatar_id": "", "url": "https://api.github.com/users/0x0L", "html_url": "https://github.com/0x0L", "followers_url": "https://api.github.com/users/0x0L/followers", "following_url": "https://api.github.com/users/0x0L/following{/other_user}", "gists_url": "https://api.github.com/users/0x0L/gists{/gist_id}", "starred_url": "https://api.github.com/users/0x0L/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/0x0L/subscriptions", "organizations_url": "https://api.github.com/users/0x0L/orgs", "repos_url": "https://api.github.com/users/0x0L/repos", "events_url": "https://api.github.com/users/0x0L/events{/privacy}", "received_events_url": "https://api.github.com/users/0x0L/received_events", "type": "User", "site_admin": false }
[ { "id": 404586594, "node_id": "MDU6TGFiZWw0MDQ1ODY1OTQ=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/stat:awaiting%20tensorflower", "name": "stat:awaiting tensorflower", "color": "f4b400", "default": false, "description": "Status - Awaiting response from tensorflower" }, { "id": 473173351, "node_id": "MDU6TGFiZWw0NzMxNzMzNTE=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/type:build/install", "name": "type:build/install", "color": "159b2e", "default": false, "description": "Build and install issues" }, { "id": 1097543484, "node_id": "MDU6TGFiZWwxMDk3NTQzNDg0", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/comp:runtime", "name": "comp:runtime", "color": "0052cc", "default": false, "description": "c++ runtime, performance issues (cpu)" }, { "id": 1205615612, "node_id": "MDU6TGFiZWwxMjA1NjE1NjEy", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/subtype:%20ubuntu/linux", "name": "subtype: ubuntu/linux", "color": "b619ea", "default": false, "description": "Ubuntu/Linux Build/Installation Issues" }, { "id": 6218999181, "node_id": "LA_kwDOArmXAs8AAAABcq5ljQ", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/TF%202.15", "name": "TF 2.15", "color": "9162CB", "default": false, "description": "For issues related to 2.15.x" } ]
open
false
{ "login": "sachinprasadhs", "id": 73069040, "node_id": "MDQ6VXNlcjczMDY5MDQw", "avatar_url": "https://avatars.githubusercontent.com/u/73069040?v=4", "gravatar_id": "", "url": "https://api.github.com/users/sachinprasadhs", "html_url": "https://github.com/sachinprasadhs", "followers_url": "https://api.github.com/users/sachinprasadhs/followers", "following_url": "https://api.github.com/users/sachinprasadhs/following{/other_user}", "gists_url": "https://api.github.com/users/sachinprasadhs/gists{/gist_id}", "starred_url": "https://api.github.com/users/sachinprasadhs/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/sachinprasadhs/subscriptions", "organizations_url": "https://api.github.com/users/sachinprasadhs/orgs", "repos_url": "https://api.github.com/users/sachinprasadhs/repos", "events_url": "https://api.github.com/users/sachinprasadhs/events{/privacy}", "received_events_url": "https://api.github.com/users/sachinprasadhs/received_events", "type": "User", "site_admin": false }
[ { "login": "sachinprasadhs", "id": 73069040, "node_id": "MDQ6VXNlcjczMDY5MDQw", "avatar_url": "https://avatars.githubusercontent.com/u/73069040?v=4", "gravatar_id": "", "url": "https://api.github.com/users/sachinprasadhs", "html_url": "https://github.com/sachinprasadhs", "followers_url": "https://api.github.com/users/sachinprasadhs/followers", "following_url": "https://api.github.com/users/sachinprasadhs/following{/other_user}", "gists_url": "https://api.github.com/users/sachinprasadhs/gists{/gist_id}", "starred_url": "https://api.github.com/users/sachinprasadhs/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/sachinprasadhs/subscriptions", "organizations_url": "https://api.github.com/users/sachinprasadhs/orgs", "repos_url": "https://api.github.com/users/sachinprasadhs/repos", "events_url": "https://api.github.com/users/sachinprasadhs/events{/privacy}", "received_events_url": "https://api.github.com/users/sachinprasadhs/received_events", "type": "User", "site_admin": false } ]
null
[ "@sachinprasadhs @SuryanarayanaY\nAny news on this issue? ", "I'm having same issue. @0x0L have you fixed this?", "@qiangpei I tried using tcmalloc but that didn't change a thing... \n\nI am switching to torch for my next projects " ]
2023-12-03T16:39:19
2024-02-21T07:50:28
null
NONE
null
null
null
### Issue type Bug ### Have you reproduced the bug with TensorFlow Nightly? Yes ### Source source ### TensorFlow version 2.15 ### Custom code Yes ### OS platform and distribution Debian GNU/Linux 11 (bullseye) ### Mobile device _No response_ ### Python version 3.11 ### Bazel version _No response_ ### GCC/compiler version _No response_ ### CUDA/cuDNN version _No response_ ### GPU model and memory _No response_ ### Current behavior? Basic tensorflow calls leak memory in python 3.11 Running in basic docker envs: ```Dockerfile # FROM python:3.10.11 FROM python:3.11.2 RUN pip install psutil tensorflow # RUN pip install psutil tf-nightly ``` The following code (which only runs `tf.zeros(shape=(1,))` in a loop !!!) ```python import sys import time print("PYTHON VERSION", sys.version) import psutil process = psutil.Process() import tensorflow as tf print("TF VERSION", tf.__version__) def bench(fn): t = time.monotonic() r = [] for i in range(100): for j in range(5000): fn() mem = process.memory_info().rss / 1024 ** 2 r.append(mem) return r[-1] - r[0], time.monotonic() - t def fn(): return tf.zeros(shape=(1,)) tf_fn = tf.function(fn) if __name__ == "__main__": fn() # warmup tf_fn() mem_usage, elapsed = bench(fn) print(f"EAGER memory growth {mem_usage:.1f} MB in {elapsed:.1f} s") mem_usage, elapsed = bench(tf_fn) print(f"TF FUNC memory growth {mem_usage:.1f} MB in {elapsed:.1f} s") ``` produces the following matrix of results: ``` PYTHON VERSION 3.10.11 (main, May 23 2023, 13:58:30) [GCC 10.2.1 20210110] TF VERSION 2.15.0 EAGER memory growth 0.0 MB in 47.9 s TF FUNC memory growth 0.0 MB in 149.4 s ``` ``` PYTHON VERSION 3.10.11 (main, May 23 2023, 13:58:30) [GCC 10.2.1 20210110] TF VERSION 2.16.0-dev20231203 EAGER memory growth 0.0 MB in 47.1 s TF FUNC memory growth 0.0 MB in 154.1 s ``` ``` PYTHON VERSION 3.11.2 (main, Mar 23 2023, 17:12:29) [GCC 10.2.1 20210110] TF VERSION 2.15.0 EAGER memory growth 45.6 MB in 46.2 s TF FUNC memory growth 15.2 MB in 154.4 s ``` ``` PYTHON VERSION 3.11.2 (main, Mar 23 2023, 17:12:29) [GCC 10.2.1 20210110] TF VERSION 2.16.0-dev20231203 EAGER memory growth 45.6 MB in 46.8 s TF FUNC memory growth 15.2 MB in 154.9 s ``` The issue has been there since the first available 3.11 version (tf 2.12rc0 if I remember correctly) ### Standalone code to reproduce the issue ```shell - ``` ### Relevant log output _No response_
{ "url": "https://api.github.com/repos/tensorflow/tensorflow/issues/62551/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/tensorflow/tensorflow/issues/62551/timeline
null
null
false
https://api.github.com/repos/tensorflow/tensorflow/issues/62550
https://api.github.com/repos/tensorflow/tensorflow
https://api.github.com/repos/tensorflow/tensorflow/issues/62550/labels{/name}
https://api.github.com/repos/tensorflow/tensorflow/issues/62550/comments
https://api.github.com/repos/tensorflow/tensorflow/issues/62550/events
https://github.com/tensorflow/tensorflow/issues/62550
2,022,597,005
I_kwDOArmXAs54jmGN
62,550
Inconsistency in XLA Compiled Model with `tf.minimum` and Additional Outputs on GPU
{ "login": "Gwihwan-Go", "id": 83144588, "node_id": "MDQ6VXNlcjgzMTQ0NTg4", "avatar_url": "https://avatars.githubusercontent.com/u/83144588?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Gwihwan-Go", "html_url": "https://github.com/Gwihwan-Go", "followers_url": "https://api.github.com/users/Gwihwan-Go/followers", "following_url": "https://api.github.com/users/Gwihwan-Go/following{/other_user}", "gists_url": "https://api.github.com/users/Gwihwan-Go/gists{/gist_id}", "starred_url": "https://api.github.com/users/Gwihwan-Go/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Gwihwan-Go/subscriptions", "organizations_url": "https://api.github.com/users/Gwihwan-Go/orgs", "repos_url": "https://api.github.com/users/Gwihwan-Go/repos", "events_url": "https://api.github.com/users/Gwihwan-Go/events{/privacy}", "received_events_url": "https://api.github.com/users/Gwihwan-Go/received_events", "type": "User", "site_admin": false }
[ { "id": 386191887, "node_id": "MDU6TGFiZWwzODYxOTE4ODc=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/stat:awaiting%20response", "name": "stat:awaiting response", "color": "f4b400", "default": false, "description": "Status - Awaiting response from author" }, { "id": 473172988, "node_id": "MDU6TGFiZWw0NzMxNzI5ODg=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/type:bug", "name": "type:bug", "color": "159b2e", "default": false, "description": "Bug" }, { "id": 474725938, "node_id": "MDU6TGFiZWw0NzQ3MjU5Mzg=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/stale", "name": "stale", "color": "d4c5f9", "default": false, "description": "This label marks the issue/pr stale - to be closed automatically if no activity" }, { "id": 1133285679, "node_id": "MDU6TGFiZWwxMTMzMjg1Njc5", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/comp:xla", "name": "comp:xla", "color": "0052cc", "default": false, "description": "XLA" }, { "id": 6218999181, "node_id": "LA_kwDOArmXAs8AAAABcq5ljQ", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/TF%202.15", "name": "TF 2.15", "color": "9162CB", "default": false, "description": "For issues related to 2.15.x" } ]
closed
false
{ "login": "sachinprasadhs", "id": 73069040, "node_id": "MDQ6VXNlcjczMDY5MDQw", "avatar_url": "https://avatars.githubusercontent.com/u/73069040?v=4", "gravatar_id": "", "url": "https://api.github.com/users/sachinprasadhs", "html_url": "https://github.com/sachinprasadhs", "followers_url": "https://api.github.com/users/sachinprasadhs/followers", "following_url": "https://api.github.com/users/sachinprasadhs/following{/other_user}", "gists_url": "https://api.github.com/users/sachinprasadhs/gists{/gist_id}", "starred_url": "https://api.github.com/users/sachinprasadhs/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/sachinprasadhs/subscriptions", "organizations_url": "https://api.github.com/users/sachinprasadhs/orgs", "repos_url": "https://api.github.com/users/sachinprasadhs/repos", "events_url": "https://api.github.com/users/sachinprasadhs/events{/privacy}", "received_events_url": "https://api.github.com/users/sachinprasadhs/received_events", "type": "User", "site_admin": false }
[ { "login": "sachinprasadhs", "id": 73069040, "node_id": "MDQ6VXNlcjczMDY5MDQw", "avatar_url": "https://avatars.githubusercontent.com/u/73069040?v=4", "gravatar_id": "", "url": "https://api.github.com/users/sachinprasadhs", "html_url": "https://github.com/sachinprasadhs", "followers_url": "https://api.github.com/users/sachinprasadhs/followers", "following_url": "https://api.github.com/users/sachinprasadhs/following{/other_user}", "gists_url": "https://api.github.com/users/sachinprasadhs/gists{/gist_id}", "starred_url": "https://api.github.com/users/sachinprasadhs/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/sachinprasadhs/subscriptions", "organizations_url": "https://api.github.com/users/sachinprasadhs/orgs", "repos_url": "https://api.github.com/users/sachinprasadhs/repos", "events_url": "https://api.github.com/users/sachinprasadhs/events{/privacy}", "received_events_url": "https://api.github.com/users/sachinprasadhs/received_events", "type": "User", "site_admin": false } ]
null
[ "@GwiHwan-Go,\r\nThe code you have mentioned has random number generation, the outputs cannot be guaranteed for each run.\r\nI have tried by setting seed and enabling op determinism and it still produces random results.\r\n\r\n```\r\ntf.random.set_seed(42)\r\ntf.config.experimental.enable_op_determinism() \r\n```", "Hi @tilakrayal,\r\n\r\nI don't believe that the problem you mentioned is influencing this issue. This is because we generated random inputs before feeding them to the models and ensured that the same inputs were given to both models. Additionally, the fact that eager mode does not trigger this behavior seems to support this perspective.", "@Gwihwan-Go,\r\nInside the call function you are generating the value for tensor, which in each calls generates different random values because XLA currently ignores TF seeds to random operations which makes the output different for obvious reason. Please refer [known](https://www.tensorflow.org/xla/known_issues#random_number_generation_ignores_tf_seed) issues from XLA section. Also Providing additional outputs does not result in matching outputs. This behavior is expected. Thank you!", "@tilakrayal, could you explain which APIs in the code above are used to generate a tensor? They include tf.transpose, tf.split(trans, 2, axis=0), tf.cast(trans, dtype=tf.int64), slice, and tf.minimum.", "Hi,\r\n\r\nProviding additional outputs does not result in matching outputs. This behavior is expected.", "This issue is stale because it has been open for 7 days with no activity. It will be closed if no further activity occurs. Thank you.", "This issue was closed because it has been inactive for 7 days since being marked as stale. Please reopen if you'd like to work on this further.", "Are you satisfied with the resolution of your issue?\n<a href=\"https://docs.google.com/forms/d/e/1FAIpQLSfaP12TRhd9xSxjXZjcZFNXPGk4kc1-qMdv3gc6bEP90vY1ew/viewform?entry.85265664=Yes&entry.2137816233=https://github.com/tensorflow/tensorflow/issues/62550\">Yes</a>\n<a href=\"https://docs.google.com/forms/d/e/1FAIpQLSfaP12TRhd9xSxjXZjcZFNXPGk4kc1-qMdv3gc6bEP90vY1ew/viewform?entry.85265664=No&entry.2137816233=https://github.com/tensorflow/tensorflow/issues/62550\">No</a>\n" ]
2023-12-03T15:46:33
2024-02-28T01:47:13
2024-02-28T01:47:10
NONE
null
null
null
### Issue type Bug ### Have you reproduced the bug with TensorFlow Nightly? Yes ### Source source ### TensorFlow version 2.15.0 ### Custom code Yes ### OS platform and distribution Ubuntu 22.04.3 LTS ### Mobile device _No response_ ### Python version 3.10.0 ### Bazel version _No response_ ### GCC/compiler version _No response_ ### CUDA/cuDNN version cuda : 12.2 / cudnn 8.9.04 ### GPU model and memory Tesla V100S-PCIE-32GB ### Current behavior? An inconsistency arises in a TensorFlow model under XLA compilation when using `tf.minimum` along with an additional output nodes. This inconsistency does not occur when either the `tf.minimum` or the extra output node is removed. ### Testing and Observations - Removing the `tf.transpose` operation or one instance of `tf.minimum` prevents the inconsistency, suggesting their involvement in the issue. - Intriguingly, removing either the second `tf.minimum` (referred to as `min2`) or the outputs of `tf.split` also resolves the inconsistency. This observation indicates that the specific combination and arrangement of these operations contribute to the problem. This error is seen on specific input tensor. Please refer this [colab notebook](https://colab.research.google.com/drive/1P58ohVZEf_fxr_kndlD9l5sM6bKEAf1e?usp=sharing). Please reproduce the bug with the V100 GPU. ### Standalone code to reproduce the issue ```shell class Model1(tf.keras.Model): def __init__(self): super().__init__() # Tensor objects (with comments for shapes) # Layers or other Keras model objects @tf.function(jit_compile=True) def __call__(self, inp): # Forward pass logic using TensorFlow operations # inp: [17, 10, 42] : complex128 trans = tf.transpose(inp, perm=[2, 1, 0]) cast = tf.cast(trans, dtype=tf.int64) sliced = cast[(slice(None, None, None), slice(-1, 9223372036854775807, 1), slice(None, None, None))] min1 = tf.minimum(cast, sliced) min2 = tf.minimum(min1, min1) return min1, min2, class Model2(tf.keras.Model): def __init__(self): super().__init__() # Tensor objects (with comments for shapes) # Layers or other Keras model objects @tf.function(jit_compile=True) def __call__(self, inp): # Forward pass logic using TensorFlow operations # inp: [17, 10, 42] : complex128 trans = tf.transpose(inp, perm=[2, 1, 0]) v6_0, v6_1 = tf.split(trans, 2, axis=0) cast = tf.cast(trans, dtype=tf.int64) sliced = cast[(slice(None, None, None), slice(-1, 9223372036854775807, 1), slice(None, None, None))] min1 = tf.minimum(cast, sliced) min2 = tf.minimum(min1, min1) return min1, min2, v6_0, v6_1 ``` ### Relevant log output ```shell =========RUNNING WITH PICKLE FILES=========== =========eager_output(version:2.15.0)================ XLA_eager does not trigger assertion =========compiled_output(version:2.15.0)================ XLA_complie triggers assertion Not equal to tolerance rtol=0.001, atol=0.001 at checking 0th Mismatched elements: 3261 / 7140 (45.7%) Max absolute difference: 197 Max relative difference: 93. x: array([[[-25, 35, -17, ..., -87, 44, 12], [-75, -40, 90, ..., 57, -40, 13], [-31, 16, -95, ..., -82, -65, 14],... y: array([[[-25, -94, -94, ..., -87, -49, -2], [-75, -94, -94, ..., 13, -49, -2], [-31, -94, -95, ..., -82, -65, -2],... ```
{ "url": "https://api.github.com/repos/tensorflow/tensorflow/issues/62550/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/tensorflow/tensorflow/issues/62550/timeline
null
completed
false
https://api.github.com/repos/tensorflow/tensorflow/issues/62549
https://api.github.com/repos/tensorflow/tensorflow
https://api.github.com/repos/tensorflow/tensorflow/issues/62549/labels{/name}
https://api.github.com/repos/tensorflow/tensorflow/issues/62549/comments
https://api.github.com/repos/tensorflow/tensorflow/issues/62549/events
https://github.com/tensorflow/tensorflow/issues/62549
2,022,580,306
I_kwDOArmXAs54jiBS
62,549
Inconsistency in XLA Cotionmpila with Operand Order Swap in `tf.add` with Specific Operators on GPU
{ "login": "Gwihwan-Go", "id": 83144588, "node_id": "MDQ6VXNlcjgzMTQ0NTg4", "avatar_url": "https://avatars.githubusercontent.com/u/83144588?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Gwihwan-Go", "html_url": "https://github.com/Gwihwan-Go", "followers_url": "https://api.github.com/users/Gwihwan-Go/followers", "following_url": "https://api.github.com/users/Gwihwan-Go/following{/other_user}", "gists_url": "https://api.github.com/users/Gwihwan-Go/gists{/gist_id}", "starred_url": "https://api.github.com/users/Gwihwan-Go/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Gwihwan-Go/subscriptions", "organizations_url": "https://api.github.com/users/Gwihwan-Go/orgs", "repos_url": "https://api.github.com/users/Gwihwan-Go/repos", "events_url": "https://api.github.com/users/Gwihwan-Go/events{/privacy}", "received_events_url": "https://api.github.com/users/Gwihwan-Go/received_events", "type": "User", "site_admin": false }
[ { "id": 404586594, "node_id": "MDU6TGFiZWw0MDQ1ODY1OTQ=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/stat:awaiting%20tensorflower", "name": "stat:awaiting tensorflower", "color": "f4b400", "default": false, "description": "Status - Awaiting response from tensorflower" }, { "id": 473172988, "node_id": "MDU6TGFiZWw0NzMxNzI5ODg=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/type:bug", "name": "type:bug", "color": "159b2e", "default": false, "description": "Bug" }, { "id": 1097547538, "node_id": "MDU6TGFiZWwxMDk3NTQ3NTM4", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/comp:gpu", "name": "comp:gpu", "color": "0052cc", "default": false, "description": "GPU related issues" }, { "id": 1133285679, "node_id": "MDU6TGFiZWwxMTMzMjg1Njc5", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/comp:xla", "name": "comp:xla", "color": "0052cc", "default": false, "description": "XLA" }, { "id": 1315098405, "node_id": "MDU6TGFiZWwxMzE1MDk4NDA1", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/regression%20issue", "name": "regression issue", "color": "50bcc4", "default": false, "description": "To spot regression issues in latest version" }, { "id": 6218999181, "node_id": "LA_kwDOArmXAs8AAAABcq5ljQ", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/TF%202.15", "name": "TF 2.15", "color": "9162CB", "default": false, "description": "For issues related to 2.15.x" } ]
closed
false
{ "login": "SuryanarayanaY", "id": 116063290, "node_id": "U_kgDOBur8Og", "avatar_url": "https://avatars.githubusercontent.com/u/116063290?v=4", "gravatar_id": "", "url": "https://api.github.com/users/SuryanarayanaY", "html_url": "https://github.com/SuryanarayanaY", "followers_url": "https://api.github.com/users/SuryanarayanaY/followers", "following_url": "https://api.github.com/users/SuryanarayanaY/following{/other_user}", "gists_url": "https://api.github.com/users/SuryanarayanaY/gists{/gist_id}", "starred_url": "https://api.github.com/users/SuryanarayanaY/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/SuryanarayanaY/subscriptions", "organizations_url": "https://api.github.com/users/SuryanarayanaY/orgs", "repos_url": "https://api.github.com/users/SuryanarayanaY/repos", "events_url": "https://api.github.com/users/SuryanarayanaY/events{/privacy}", "received_events_url": "https://api.github.com/users/SuryanarayanaY/received_events", "type": "User", "site_admin": false }
[ { "login": "SuryanarayanaY", "id": 116063290, "node_id": "U_kgDOBur8Og", "avatar_url": "https://avatars.githubusercontent.com/u/116063290?v=4", "gravatar_id": "", "url": "https://api.github.com/users/SuryanarayanaY", "html_url": "https://github.com/SuryanarayanaY", "followers_url": "https://api.github.com/users/SuryanarayanaY/followers", "following_url": "https://api.github.com/users/SuryanarayanaY/following{/other_user}", "gists_url": "https://api.github.com/users/SuryanarayanaY/gists{/gist_id}", "starred_url": "https://api.github.com/users/SuryanarayanaY/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/SuryanarayanaY/subscriptions", "organizations_url": "https://api.github.com/users/SuryanarayanaY/orgs", "repos_url": "https://api.github.com/users/SuryanarayanaY/repos", "events_url": "https://api.github.com/users/SuryanarayanaY/events{/privacy}", "received_events_url": "https://api.github.com/users/SuryanarayanaY/received_events", "type": "User", "site_admin": false } ]
null
[ "Hi @GwiHwan-Go ,\r\n\r\nI have tested the code with Tf2.14v and this behaviour was not observed. TF2.15v CUDA package has an issue woth tensorrt and unable to install on colab. But with CPU and Tf2.15v also no such behaviour. Seems the problem is with TF2.15v with GPU.Attaching [gist](https://colab.sandbox.google.com/gist/SuryanarayanaY/01410bdcf0e31c17b91c0c4385e4599c/62549.ipynb) for reference.\r\n\r\nCould you please confirm whether this is not an issue with TF2.14v, so that whether this can be marked as regression issue?\r\n\r\nSInce I can't install 2.15V GPU package, I am unable to replicate the reported behaviour. It seems you have installed the CUDA packages manually right?", "Hello @SuryanarayanaY,\r\n\r\nWe tested the code using TensorFlow version 2.14 with a V100 GPU, as per your indication, and did not encounter the error. This test included a manual installation of CUDA packages before executing the TensorFlow program. However, upon re-running the same program with TensorFlow version 2.15, the bug reappeared. Given the significant mismatch rate (up to 100%) and the considerable inconsistency value, we believe this error is not trivial.", "Hi @GwiHwan-Go ,\r\n\r\nThanks for clarification. Seems like regression issue.", "Are you satisfied with the resolution of your issue?\n<a href=\"https://docs.google.com/forms/d/e/1FAIpQLSfaP12TRhd9xSxjXZjcZFNXPGk4kc1-qMdv3gc6bEP90vY1ew/viewform?entry.85265664=Yes&entry.2137816233=https://github.com/tensorflow/tensorflow/issues/62549\">Yes</a>\n<a href=\"https://docs.google.com/forms/d/e/1FAIpQLSfaP12TRhd9xSxjXZjcZFNXPGk4kc1-qMdv3gc6bEP90vY1ew/viewform?entry.85265664=No&entry.2137816233=https://github.com/tensorflow/tensorflow/issues/62549\">No</a>\n" ]
2023-12-03T15:19:37
2023-12-19T12:18:58
2023-12-19T12:18:55
NONE
null
null
null
### Issue type Bug ### Have you reproduced the bug with TensorFlow Nightly? Yes ### Source source ### TensorFlow version 2.15.0 ### Custom code Yes ### OS platform and distribution Ubuntu 22.04.3 LTS ### Mobile device _No response_ ### Python version 3.10.0 ### Bazel version _No response_ ### GCC/compiler version _No response_ ### CUDA/cuDNN version cuda : 12.2 / cudnn 8.9.04 ### GPU model and memory Tesla V100S-PCIE-32GB ### Current behavior? We've identified a bug in TensorFlow where swapping the order of operands in `tf.add`, when combined with specific operators like `tf.transpose`, `tf.reverse`, and `tf.math.subtract`, results in inconsistent outputs under XLA compilation. This behavior is only seen on **GPU.** ### Observations and Troubleshooting: - Removing operations such as `tf.transpose`, `tf.reverse`, or `tf.math.subtract` from the model prevents the inconsistency, indicating that these operations are integral to the error manifestation. - Interestingly, swapping the order of operands in `tf.add` also resolves the inconsistency, highlighting the significance of operand order in `tf.add` under XLA compilation. ### Standalone code to reproduce the issue ```shell from typing import Dict import tensorflow as tf import pickle import os import numpy as np gpus = tf.config.experimental.list_physical_devices('GPU') if gpus: try: # Currently, memory growth needs to be the same across GPUs for gpu in gpus: tf.config.experimental.set_memory_growth(gpu, True) logical_gpus = tf.config.experimental.list_logical_devices('GPU') print(len(gpus), "Physical GPUs,", len(logical_gpus), "Logical GPUs") except RuntimeError as e: # Memory growth must be set before GPUs have been initialized print(e) params = [ ] class Model1(tf.keras.Model): @tf.function(jit_compile=True) def __call__(self, inp): trans = tf.transpose(inp, perm=[1, 0]) rev = tf.reverse(trans, axis=[0, 1]) subtract = tf.math.subtract(trans, trans) add = tf.add(rev, subtract) return add, class Model2(tf.keras.Model): def __call__(self, inp): trans = tf.transpose(inp, perm=[1, 0]) rev = tf.reverse(trans, axis=[0, 1]) substract = tf.math.subtract(trans, trans) add = tf.add(substract, rev) return add, inputs = [ tf.random.uniform(shape=[16, 16], dtype=tf.float64), ] model1 = Model1() model2 = Model2() device = "gpu" with tf.device(device): tf.config.run_functions_eagerly(True) out1 = model1(*inputs) out2 = model2(*inputs) print(f'=========eager_output(version:{tf.__version__})================') try : for i in range(min(len(out1),len(out2))): np.testing.assert_allclose(out1[i].numpy(), out2[i].numpy(), rtol=0.001, atol=0.001, err_msg=f'at checking {i}th') print("XLA_eager does not trigger assertion") except AssertionError as e: print("XLA_eager triggers assertion") print(e) tf.config.run_functions_eagerly(False) out1 = model1(*inputs) out2 = model2(*inputs) print(f'=========compiled_output(version:{tf.__version__})================') try : for i in range(min(len(out1),len(out2))): np.testing.assert_allclose(out1[i].numpy(), out2[i].numpy(), rtol=0.001, atol=0.001, err_msg=f'at checking {i}th') print("XLA_complie does not trigger assertion") except AssertionError as e: print("XLA_complie triggers assertion") print(e) ``` ### Relevant log output ```shell =========eager_output(version:2.15.0)================ XLA_eager does not trigger assertion =========compiled_output(version:2.15.0)================ XLA_complie triggers assertion Not equal to tolerance rtol=0.001, atol=0.001 at checking 0th Mismatched elements: 1550 / 1550 (100%) Max absolute difference: 196.66151428 Max relative difference: 2487.16949153 x: array([[-44.869186, -55.12228 , -88.184502, ..., -36.286663, 86.54509 , -47.566055], [ 56.423187, 62.918427, -16.908859, ..., -9.663177, -0.339844,... y: array([[ -6.97467 , 26.228836, 82.672806, ..., -90.296532, -74.700211, -35.530495], [-48.684883, -46.956802, 31.971481, ..., 77.690094, -84.941338,... ```
{ "url": "https://api.github.com/repos/tensorflow/tensorflow/issues/62549/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/tensorflow/tensorflow/issues/62549/timeline
null
completed
false
https://api.github.com/repos/tensorflow/tensorflow/issues/62548
https://api.github.com/repos/tensorflow/tensorflow
https://api.github.com/repos/tensorflow/tensorflow/issues/62548/labels{/name}
https://api.github.com/repos/tensorflow/tensorflow/issues/62548/comments
https://api.github.com/repos/tensorflow/tensorflow/issues/62548/events
https://github.com/tensorflow/tensorflow/issues/62548
2,022,573,378
I_kwDOArmXAs54jgVC
62,548
Output Inconsistency in XLA Compiled Model with `tf.floor, tf.reverse` and Additional Concat Nodes on GPU
{ "login": "Gwihwan-Go", "id": 83144588, "node_id": "MDQ6VXNlcjgzMTQ0NTg4", "avatar_url": "https://avatars.githubusercontent.com/u/83144588?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Gwihwan-Go", "html_url": "https://github.com/Gwihwan-Go", "followers_url": "https://api.github.com/users/Gwihwan-Go/followers", "following_url": "https://api.github.com/users/Gwihwan-Go/following{/other_user}", "gists_url": "https://api.github.com/users/Gwihwan-Go/gists{/gist_id}", "starred_url": "https://api.github.com/users/Gwihwan-Go/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Gwihwan-Go/subscriptions", "organizations_url": "https://api.github.com/users/Gwihwan-Go/orgs", "repos_url": "https://api.github.com/users/Gwihwan-Go/repos", "events_url": "https://api.github.com/users/Gwihwan-Go/events{/privacy}", "received_events_url": "https://api.github.com/users/Gwihwan-Go/received_events", "type": "User", "site_admin": false }
[ { "id": 386191887, "node_id": "MDU6TGFiZWwzODYxOTE4ODc=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/stat:awaiting%20response", "name": "stat:awaiting response", "color": "f4b400", "default": false, "description": "Status - Awaiting response from author" }, { "id": 473172988, "node_id": "MDU6TGFiZWw0NzMxNzI5ODg=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/type:bug", "name": "type:bug", "color": "159b2e", "default": false, "description": "Bug" }, { "id": 474725938, "node_id": "MDU6TGFiZWw0NzQ3MjU5Mzg=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/stale", "name": "stale", "color": "d4c5f9", "default": false, "description": "This label marks the issue/pr stale - to be closed automatically if no activity" }, { "id": 1133285679, "node_id": "MDU6TGFiZWwxMTMzMjg1Njc5", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/comp:xla", "name": "comp:xla", "color": "0052cc", "default": false, "description": "XLA" }, { "id": 6218999181, "node_id": "LA_kwDOArmXAs8AAAABcq5ljQ", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/TF%202.15", "name": "TF 2.15", "color": "9162CB", "default": false, "description": "For issues related to 2.15.x" } ]
closed
false
{ "login": "sushreebarsa", "id": 84765720, "node_id": "MDQ6VXNlcjg0NzY1NzIw", "avatar_url": "https://avatars.githubusercontent.com/u/84765720?v=4", "gravatar_id": "", "url": "https://api.github.com/users/sushreebarsa", "html_url": "https://github.com/sushreebarsa", "followers_url": "https://api.github.com/users/sushreebarsa/followers", "following_url": "https://api.github.com/users/sushreebarsa/following{/other_user}", "gists_url": "https://api.github.com/users/sushreebarsa/gists{/gist_id}", "starred_url": "https://api.github.com/users/sushreebarsa/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/sushreebarsa/subscriptions", "organizations_url": "https://api.github.com/users/sushreebarsa/orgs", "repos_url": "https://api.github.com/users/sushreebarsa/repos", "events_url": "https://api.github.com/users/sushreebarsa/events{/privacy}", "received_events_url": "https://api.github.com/users/sushreebarsa/received_events", "type": "User", "site_admin": false }
[ { "login": "sushreebarsa", "id": 84765720, "node_id": "MDQ6VXNlcjg0NzY1NzIw", "avatar_url": "https://avatars.githubusercontent.com/u/84765720?v=4", "gravatar_id": "", "url": "https://api.github.com/users/sushreebarsa", "html_url": "https://github.com/sushreebarsa", "followers_url": "https://api.github.com/users/sushreebarsa/followers", "following_url": "https://api.github.com/users/sushreebarsa/following{/other_user}", "gists_url": "https://api.github.com/users/sushreebarsa/gists{/gist_id}", "starred_url": "https://api.github.com/users/sushreebarsa/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/sushreebarsa/subscriptions", "organizations_url": "https://api.github.com/users/sushreebarsa/orgs", "repos_url": "https://api.github.com/users/sushreebarsa/repos", "events_url": "https://api.github.com/users/sushreebarsa/events{/privacy}", "received_events_url": "https://api.github.com/users/sushreebarsa/received_events", "type": "User", "site_admin": false } ]
null
[ "@GwiHwan-Go I wasn't able to replicate this error reported, please have a look at this gists of [cpu](https://colab.research.google.com/gist/sushreebarsa/7c1bae96f5faebbe9d805474a9a9d16e/untitled915.ipynb#scrollTo=Shbx4IHeqP3Y) and [gpu](https://colab.research.google.com/gist/sushreebarsa/61ec1deddc1c2322753598e2bc468e63/62548.ipynb).\r\nThank you!", "Hi @sushreebarsa , have you tried this with V100 GPU? I can reproduce this issue with V100 GPU.", "@GwiHwan-Go Inside the call function you are generating the value for tensor, which in each calls generates different random values because XLA currently ignores TF seeds to random operations which makes the output different for obvious reason. Please refer [known](https://www.tensorflow.org/xla/known_issues#random_number_generation_ignores_tf_seed) issues from XLA section.\r\nThank you!", "This issue is stale because it has been open for 7 days with no activity. It will be closed if no further activity occurs. Thank you.", "This issue was closed because it has been inactive for 7 days since being marked as stale. Please reopen if you'd like to work on this further.", "Are you satisfied with the resolution of your issue?\n<a href=\"https://docs.google.com/forms/d/e/1FAIpQLSfaP12TRhd9xSxjXZjcZFNXPGk4kc1-qMdv3gc6bEP90vY1ew/viewform?entry.85265664=Yes&entry.2137816233=https://github.com/tensorflow/tensorflow/issues/62548\">Yes</a>\n<a href=\"https://docs.google.com/forms/d/e/1FAIpQLSfaP12TRhd9xSxjXZjcZFNXPGk4kc1-qMdv3gc6bEP90vY1ew/viewform?entry.85265664=No&entry.2137816233=https://github.com/tensorflow/tensorflow/issues/62548\">No</a>\n" ]
2023-12-03T15:10:06
2024-01-06T01:48:43
2024-01-06T01:48:37
NONE
null
null
null
### Issue type Bug ### Have you reproduced the bug with TensorFlow Nightly? Yes ### Source source ### TensorFlow version 2.15.0 ### Custom code Yes ### OS platform and distribution Ubuntu 22.04.3 LTS ### Mobile device _No response_ ### Python version 3.10.0 ### Bazel version _No response_ ### GCC/compiler version _No response_ ### CUDA/cuDNN version cuda : 12.2 / cudnn 8.9.04 ### GPU model and memory Tesla V100S-PCIE-32GB ### Current behavior? A significant output inconsistency occurs under XLA compilation in a TensorFlow model that uses `tf.transpose`, `tf.floor`, `tf.reverse`, and `tf.add`, followed by returning additional concatenation nodes. This behavior is only seen on **GPU.** ### Troubleshooting and Findings - Removing any one of the operations (`tf.floor`, `tf.reverse`, or `tf.add`) eliminates the inconsistency. This suggests that the XLA's optimization strategies for these operations might be contributing to the issue. - Additionally, swapping the order of operands in `tf.add` also prevents the inconsistency, indicating that operand order affects the XLA compilation outcome. ### Standalone code to reproduce the issue ```shell from typing import Dict import tensorflow as tf import pickle import os import numpy as np gpus = tf.config.experimental.list_physical_devices('GPU') if gpus: try: # Currently, memory growth needs to be the same across GPUs for gpu in gpus: tf.config.experimental.set_memory_growth(gpu, True) logical_gpus = tf.config.experimental.list_logical_devices('GPU') print(len(gpus), "Physical GPUs,", len(logical_gpus), "Logical GPUs") except RuntimeError as e: # Memory growth must be set before GPUs have been initialized print(e) params = [ ] class Model1(tf.keras.Model): @tf.function(jit_compile=True) def __call__(self, inp): trans = tf.transpose(inp, perm=[1, 0]) flo = tf.floor(trans) rev = tf.reverse(trans, axis=[0]) add = tf.add(rev, flo) return add, class Model2(tf.keras.Model): @tf.function(jit_compile=True) def __call__(self, inp): concat = tf.concat([inp, inp], axis=0) trans = tf.transpose(inp, perm=[1, 0]) flo = tf.floor(trans) rev = tf.reverse(trans, axis=[0]) add = tf.add(flo, rev) return add, concat, #v5_0, inputs = [ tf.random.uniform(shape=[20, 20], dtype=tf.float64), ] model1 = Model1() model2 = Model2() device = "gpu" with tf.device(device): tf.config.run_functions_eagerly(True) out1 = model1(*inputs) out2 = model2(*inputs) print(f'=========eager_output(version:{tf.__version__})================') try : for i in range(min(len(out1),len(out2))): np.testing.assert_allclose(out1[i].numpy(), out2[i].numpy(), rtol=0.001, atol=0.001, err_msg=f'at checking {i}th') print("XLA_eager does not trigger assertion") except AssertionError as e: print("XLA_eager triggers assertion") print(e) tf.config.run_functions_eagerly(False) out1 = model1(*inputs) out2 = model2(*inputs) print(f'=========compiled_output(version:{tf.__version__})================') try : for i in range(min(len(out1),len(out2))): np.testing.assert_allclose(out1[i].numpy(), out2[i].numpy(), rtol=0.001, atol=0.001, err_msg=f'at checking {i}th') print("XLA_complie does not trigger assertion") except AssertionError as e: print("XLA_complie triggers assertion") print(e) ``` ### Relevant log output ```shell =========eager_output(version:2.15.0)================ XLA_eager does not trigger assertion =========compiled_output(version:2.15.0)================ XLA_complie triggers assertion Not equal to tolerance rtol=0.001, atol=0.001 at checking 0th Mismatched elements: 398 / 400 (99.5%) Max absolute difference: 0.95656053 Max relative difference: 297.73353716 x: array([[0.385214, 0.821024, 0.628158, 0.563975, 0.91364 , 0.01333 , 0.314812, 0.788177, 0.219594, 0.532986, 0.839771, 0.163734, 0.785233, 0.788404, 0.97439 , 0.469803, 0.412193, 0.702105,... y: array([[0.850826, 0.587126, 0.269039, 0.623998, 0.800557, 0.788701, 0.483439, 0.970967, 0.663002, 0.772746, 0.634461, 0.091355, 0.399323, 0.13452 , 0.229845, 0.657769, 0.668356, 0.510788,... ```
{ "url": "https://api.github.com/repos/tensorflow/tensorflow/issues/62548/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/tensorflow/tensorflow/issues/62548/timeline
null
completed
false
https://api.github.com/repos/tensorflow/tensorflow/issues/62547
https://api.github.com/repos/tensorflow/tensorflow
https://api.github.com/repos/tensorflow/tensorflow/issues/62547/labels{/name}
https://api.github.com/repos/tensorflow/tensorflow/issues/62547/comments
https://api.github.com/repos/tensorflow/tensorflow/issues/62547/events
https://github.com/tensorflow/tensorflow/issues/62547
2,022,556,578
I_kwDOArmXAs54jcOi
62,547
Inconsistency in XLA Compilation with `tf.split` and Additional Outputs on GPU
{ "login": "Gwihwan-Go", "id": 83144588, "node_id": "MDQ6VXNlcjgzMTQ0NTg4", "avatar_url": "https://avatars.githubusercontent.com/u/83144588?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Gwihwan-Go", "html_url": "https://github.com/Gwihwan-Go", "followers_url": "https://api.github.com/users/Gwihwan-Go/followers", "following_url": "https://api.github.com/users/Gwihwan-Go/following{/other_user}", "gists_url": "https://api.github.com/users/Gwihwan-Go/gists{/gist_id}", "starred_url": "https://api.github.com/users/Gwihwan-Go/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Gwihwan-Go/subscriptions", "organizations_url": "https://api.github.com/users/Gwihwan-Go/orgs", "repos_url": "https://api.github.com/users/Gwihwan-Go/repos", "events_url": "https://api.github.com/users/Gwihwan-Go/events{/privacy}", "received_events_url": "https://api.github.com/users/Gwihwan-Go/received_events", "type": "User", "site_admin": false }
[ { "id": 386191887, "node_id": "MDU6TGFiZWwzODYxOTE4ODc=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/stat:awaiting%20response", "name": "stat:awaiting response", "color": "f4b400", "default": false, "description": "Status - Awaiting response from author" }, { "id": 473172988, "node_id": "MDU6TGFiZWw0NzMxNzI5ODg=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/type:bug", "name": "type:bug", "color": "159b2e", "default": false, "description": "Bug" }, { "id": 474725938, "node_id": "MDU6TGFiZWw0NzQ3MjU5Mzg=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/stale", "name": "stale", "color": "d4c5f9", "default": false, "description": "This label marks the issue/pr stale - to be closed automatically if no activity" }, { "id": 1133285679, "node_id": "MDU6TGFiZWwxMTMzMjg1Njc5", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/comp:xla", "name": "comp:xla", "color": "0052cc", "default": false, "description": "XLA" }, { "id": 6218999181, "node_id": "LA_kwDOArmXAs8AAAABcq5ljQ", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/TF%202.15", "name": "TF 2.15", "color": "9162CB", "default": false, "description": "For issues related to 2.15.x" } ]
closed
false
{ "login": "sachinprasadhs", "id": 73069040, "node_id": "MDQ6VXNlcjczMDY5MDQw", "avatar_url": "https://avatars.githubusercontent.com/u/73069040?v=4", "gravatar_id": "", "url": "https://api.github.com/users/sachinprasadhs", "html_url": "https://github.com/sachinprasadhs", "followers_url": "https://api.github.com/users/sachinprasadhs/followers", "following_url": "https://api.github.com/users/sachinprasadhs/following{/other_user}", "gists_url": "https://api.github.com/users/sachinprasadhs/gists{/gist_id}", "starred_url": "https://api.github.com/users/sachinprasadhs/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/sachinprasadhs/subscriptions", "organizations_url": "https://api.github.com/users/sachinprasadhs/orgs", "repos_url": "https://api.github.com/users/sachinprasadhs/repos", "events_url": "https://api.github.com/users/sachinprasadhs/events{/privacy}", "received_events_url": "https://api.github.com/users/sachinprasadhs/received_events", "type": "User", "site_admin": false }
[ { "login": "sachinprasadhs", "id": 73069040, "node_id": "MDQ6VXNlcjczMDY5MDQw", "avatar_url": "https://avatars.githubusercontent.com/u/73069040?v=4", "gravatar_id": "", "url": "https://api.github.com/users/sachinprasadhs", "html_url": "https://github.com/sachinprasadhs", "followers_url": "https://api.github.com/users/sachinprasadhs/followers", "following_url": "https://api.github.com/users/sachinprasadhs/following{/other_user}", "gists_url": "https://api.github.com/users/sachinprasadhs/gists{/gist_id}", "starred_url": "https://api.github.com/users/sachinprasadhs/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/sachinprasadhs/subscriptions", "organizations_url": "https://api.github.com/users/sachinprasadhs/orgs", "repos_url": "https://api.github.com/users/sachinprasadhs/repos", "events_url": "https://api.github.com/users/sachinprasadhs/events{/privacy}", "received_events_url": "https://api.github.com/users/sachinprasadhs/received_events", "type": "User", "site_admin": false } ]
null
[ "Hi @GwiHwan-Go ,\r\n\r\nI have replicated the reported behaviour with colab using TF v2.14, 2.15 and nightly. Please find the [gist](https://colab.research.google.com/gist/Venkat6871/d4f3c7dfcd17d92e7584c14def2777df/62547_2-14_2-15_nightly-v.ipynb) here for reference.\r\n\r\nThank you!", "Hi,\r\n\r\nProviding additional outputs does not result in matching outputs. This behavior is expected.", "This issue is stale because it has been open for 7 days with no activity. It will be closed if no further activity occurs. Thank you.", "This issue was closed because it has been inactive for 7 days since being marked as stale. Please reopen if you'd like to work on this further.", "Are you satisfied with the resolution of your issue?\n<a href=\"https://docs.google.com/forms/d/e/1FAIpQLSfaP12TRhd9xSxjXZjcZFNXPGk4kc1-qMdv3gc6bEP90vY1ew/viewform?entry.85265664=Yes&entry.2137816233=https://github.com/tensorflow/tensorflow/issues/62547\">Yes</a>\n<a href=\"https://docs.google.com/forms/d/e/1FAIpQLSfaP12TRhd9xSxjXZjcZFNXPGk4kc1-qMdv3gc6bEP90vY1ew/viewform?entry.85265664=No&entry.2137816233=https://github.com/tensorflow/tensorflow/issues/62547\">No</a>\n" ]
2023-12-03T14:48:27
2024-01-11T01:49:39
2024-01-11T01:49:36
NONE
null
null
null
### Issue type Bug ### Have you reproduced the bug with TensorFlow Nightly? Yes ### Source source ### TensorFlow version 2.15.0 ### Custom code Yes ### OS platform and distribution Ubuntu 22.04.3 LTS ### Mobile device _No response_ ### Python version 3.10.0 ### Bazel version _No response_ ### GCC/compiler version _No response_ ### CUDA/cuDNN version cuda : 12.2 / cudnn 8.9.04 ### GPU model and memory Tesla V100S-PCIE-32GB ### Current behavior? An inconsistency has been observed in TensorFlow's XLA compilation when using `tf.split` in conjunction with additional output nodes. This behavior is only seen on **GPU**. #### Testing and Observations: - Removing any one of the operations (`tf.reverse`, `tf.add`, or `tf.transpose`) from the model's dataflow does not lead to the inconsistency, indicating that the combined effect of these operations might be a factor in triggering the error. - Altering the order of operands in the `tf.add` operation prevents the inconsistency, suggesting that operand order plays a role in the observed behavior. - Notably, returning only one output from `tf.split` in Model2 also avoids the error, highlighting a potential issue with how additional outputs are handled under XLA compilation. ### Standalone code to reproduce the issue ```shell import tensorflow as tf import numpy as np class Model1(tf.keras.Model): @tf.function(jit_compile=True) def __call__(self, inp): trans = tf.transpose(inp, perm=[4, 1, 2, 3, 0]) rev = tf.reverse(trans, axis=[0, 2, 3, 4]) add = tf.add(rev, trans) split1, split2 = tf.split(add, 2, axis=1) return split1, class Model2(tf.keras.Model): @tf.function(jit_compile=True) def __call__(self, inp): trans = tf.transpose(inp, perm=[4, 1, 2, 3, 0]) trans_output = tf.transpose(tf.concat([trans, trans], axis=0), perm=[1, 0, 2, 3, 4]) rev = tf.reverse(trans, axis=[0, 2, 3, 4]) add = tf.add(trans, rev) split1, split2 = tf.split(add, 2, axis=1) return split1, split2, trans_output, inputs = [ tf.random.uniform([1, 40, 1, 31, 49], dtype=tf.float64) ] model1 = Model1() model2 = Model2() device = "gpu" with tf.device(device): tf.config.run_functions_eagerly(True) out1 = model1(*inputs) out2 = model2(*inputs) print(f'=========eager_output(version:{tf.__version__})================') try : for i in range(min(len(out1),len(out2))): np.testing.assert_allclose(out1[i].numpy(), out2[i].numpy(), rtol=0.001, atol=0.001, err_msg=f'at checking {i}th') print("XLA_eager does not trigger assertion") except AssertionError as e: print("XLA_eager triggers assertion") print(e) tf.config.run_functions_eagerly(False) out1 = model1(*inputs) out2 = model2(*inputs) print(f'=========compiled_output(version:{tf.__version__})================') try : for i in range(min(len(out1),len(out2))): np.testing.assert_allclose(out1[i].numpy(), out2[i].numpy(), rtol=0.001, atol=0.001, err_msg=f'at checking {i}th') print("XLA_complie does not trigger assertion") except AssertionError as e: print("XLA_complie triggers assertion") print(e) ``` ### Relevant log output ```shell =========eager_output(version:2.15.0)================ XLA_eager does not trigger assertion =========compiled_output(version:2.15.0)================ XLA_complie triggers assertion Not equal to tolerance rtol=0.001, atol=0.001 at checking 0th Mismatched elements: 30332 / 30380 (99.8%) Max absolute difference: 198.11893463 Max relative difference: 2174.86236609 x: array([[[[[ 67.104973+0.j], [-166.796616+0.j], [ 99.377243+0.j],... y: array([[[[[ 114.813599+0.j], [-178.685852+0.j], [ 184.664337+0.j],... ```
{ "url": "https://api.github.com/repos/tensorflow/tensorflow/issues/62547/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/tensorflow/tensorflow/issues/62547/timeline
null
completed
false
https://api.github.com/repos/tensorflow/tensorflow/issues/62546
https://api.github.com/repos/tensorflow/tensorflow
https://api.github.com/repos/tensorflow/tensorflow/issues/62546/labels{/name}
https://api.github.com/repos/tensorflow/tensorflow/issues/62546/comments
https://api.github.com/repos/tensorflow/tensorflow/issues/62546/events
https://github.com/tensorflow/tensorflow/issues/62546
2,022,545,005
I_kwDOArmXAs54jZZt
62,546
Output Inconsistency in XLA Compilation with Order Swapping of `tf.multiply` on GPU
{ "login": "Gwihwan-Go", "id": 83144588, "node_id": "MDQ6VXNlcjgzMTQ0NTg4", "avatar_url": "https://avatars.githubusercontent.com/u/83144588?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Gwihwan-Go", "html_url": "https://github.com/Gwihwan-Go", "followers_url": "https://api.github.com/users/Gwihwan-Go/followers", "following_url": "https://api.github.com/users/Gwihwan-Go/following{/other_user}", "gists_url": "https://api.github.com/users/Gwihwan-Go/gists{/gist_id}", "starred_url": "https://api.github.com/users/Gwihwan-Go/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Gwihwan-Go/subscriptions", "organizations_url": "https://api.github.com/users/Gwihwan-Go/orgs", "repos_url": "https://api.github.com/users/Gwihwan-Go/repos", "events_url": "https://api.github.com/users/Gwihwan-Go/events{/privacy}", "received_events_url": "https://api.github.com/users/Gwihwan-Go/received_events", "type": "User", "site_admin": false }
[ { "id": 386191887, "node_id": "MDU6TGFiZWwzODYxOTE4ODc=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/stat:awaiting%20response", "name": "stat:awaiting response", "color": "f4b400", "default": false, "description": "Status - Awaiting response from author" }, { "id": 473172988, "node_id": "MDU6TGFiZWw0NzMxNzI5ODg=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/type:bug", "name": "type:bug", "color": "159b2e", "default": false, "description": "Bug" }, { "id": 474725938, "node_id": "MDU6TGFiZWw0NzQ3MjU5Mzg=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/stale", "name": "stale", "color": "d4c5f9", "default": false, "description": "This label marks the issue/pr stale - to be closed automatically if no activity" }, { "id": 1133285679, "node_id": "MDU6TGFiZWwxMTMzMjg1Njc5", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/comp:xla", "name": "comp:xla", "color": "0052cc", "default": false, "description": "XLA" }, { "id": 6218999181, "node_id": "LA_kwDOArmXAs8AAAABcq5ljQ", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/TF%202.15", "name": "TF 2.15", "color": "9162CB", "default": false, "description": "For issues related to 2.15.x" } ]
closed
false
{ "login": "sachinprasadhs", "id": 73069040, "node_id": "MDQ6VXNlcjczMDY5MDQw", "avatar_url": "https://avatars.githubusercontent.com/u/73069040?v=4", "gravatar_id": "", "url": "https://api.github.com/users/sachinprasadhs", "html_url": "https://github.com/sachinprasadhs", "followers_url": "https://api.github.com/users/sachinprasadhs/followers", "following_url": "https://api.github.com/users/sachinprasadhs/following{/other_user}", "gists_url": "https://api.github.com/users/sachinprasadhs/gists{/gist_id}", "starred_url": "https://api.github.com/users/sachinprasadhs/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/sachinprasadhs/subscriptions", "organizations_url": "https://api.github.com/users/sachinprasadhs/orgs", "repos_url": "https://api.github.com/users/sachinprasadhs/repos", "events_url": "https://api.github.com/users/sachinprasadhs/events{/privacy}", "received_events_url": "https://api.github.com/users/sachinprasadhs/received_events", "type": "User", "site_admin": false }
[ { "login": "sachinprasadhs", "id": 73069040, "node_id": "MDQ6VXNlcjczMDY5MDQw", "avatar_url": "https://avatars.githubusercontent.com/u/73069040?v=4", "gravatar_id": "", "url": "https://api.github.com/users/sachinprasadhs", "html_url": "https://github.com/sachinprasadhs", "followers_url": "https://api.github.com/users/sachinprasadhs/followers", "following_url": "https://api.github.com/users/sachinprasadhs/following{/other_user}", "gists_url": "https://api.github.com/users/sachinprasadhs/gists{/gist_id}", "starred_url": "https://api.github.com/users/sachinprasadhs/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/sachinprasadhs/subscriptions", "organizations_url": "https://api.github.com/users/sachinprasadhs/orgs", "repos_url": "https://api.github.com/users/sachinprasadhs/repos", "events_url": "https://api.github.com/users/sachinprasadhs/events{/privacy}", "received_events_url": "https://api.github.com/users/sachinprasadhs/received_events", "type": "User", "site_admin": false } ]
null
[ "The code you have mentioned has random number generation, the outputs cannot be guaranteed for each run.\r\nI have tried by setting seed and enabling op determinism and it still produces random results.\r\n\r\n```\r\ntf.random.set_seed(42)\r\ntf.config.experimental.enable_op_determinism() \r\n```", "Hi @tilakrayal,\r\n\r\nI don't believe that the problem you mentioned is influencing this issue. This is because we generated random inputs before feeding them to the models and ensured that the same inputs were given to both models. Additionally, the fact that eager mode does not trigger this behavior seems to support this perspective.", "Change in order of operation will take the different code path and the results are expected.\r\nThis is not considered as a bug.", "This issue is stale because it has been open for 7 days with no activity. It will be closed if no further activity occurs. Thank you.", "This issue was closed because it has been inactive for 7 days since being marked as stale. Please reopen if you'd like to work on this further.", "Are you satisfied with the resolution of your issue?\n<a href=\"https://docs.google.com/forms/d/e/1FAIpQLSfaP12TRhd9xSxjXZjcZFNXPGk4kc1-qMdv3gc6bEP90vY1ew/viewform?entry.85265664=Yes&entry.2137816233=https://github.com/tensorflow/tensorflow/issues/62546\">Yes</a>\n<a href=\"https://docs.google.com/forms/d/e/1FAIpQLSfaP12TRhd9xSxjXZjcZFNXPGk4kc1-qMdv3gc6bEP90vY1ew/viewform?entry.85265664=No&entry.2137816233=https://github.com/tensorflow/tensorflow/issues/62546\">No</a>\n" ]
2023-12-03T14:28:05
2024-01-19T01:49:31
2024-01-19T01:49:26
NONE
null
null
null
### Issue type Bug ### Have you reproduced the bug with TensorFlow Nightly? Yes ### Source source ### TensorFlow version 2.15.0 ### Custom code Yes ### OS platform and distribution Ubuntu 22.04.3 LTS ### Mobile device _No response_ ### Python version 3.10.0 ### Bazel version _No response_ ### GCC/compiler version _No response_ ### CUDA/cuDNN version cuda : 12.2 / cudnn 8.9.04 ### GPU model and memory Tesla V100S-PCIE-32GB ### Current behavior? We have identified a significant inconsistency in TensorFlow's XLA compilation when the order of operands in `tf.multiply` is swapped. This behavior is only seen on **gpu.** ### Additional Testing Conducted To further investigate the issue, we conducted the following tests: 1. **Altering `tf.transpose` and `tf.multiply`**: Removing either `tf.transpose` or the first instance of `tf.multiply` prevents the inconsistency, suggesting their involvement in the issue. 2. **Replacing `tf.squeeze`**: Substituting `tf.squeeze` with `tf.math.reduce_min` also triggers the error. This finding implies that the root cause of the inconsistency may not lie within the `tf.squeeze` API. We hope this detailed report will assist in identifying and resolving the root cause of this inconsistency in TensorFlow's XLA compilation process. ### Standalone code to reproduce the issue ```shell from typing import Dict import tensorflow as tf import pickle import os import numpy as np class Model1(tf.keras.Model): @tf.function(jit_compile=True) def __call__(self, inp1, inp2): trans = tf.transpose(inp1, perm=[1, 0]) gather = tf.gather(trans, tf.clip_by_value(inp2, 0, 63), axis=0) squeeze = tf.squeeze(gather, axis=1) # replace with tf.math.reduce_min also trigger the error mul1 = tf.multiply(squeeze, squeeze) mul2 = tf.multiply(trans, mul1) return mul2, class Model2(tf.keras.Model): @tf.function(jit_compile=True) def __call__(self, inp1, inp2): # Forward pass logic using TensorFlow operations # inp1: [64, 64] : complex128 # inp2: [1, 1, 64] : int32 trans = tf.transpose(inp1, perm=[1, 0]) gather = tf.gather(trans, tf.clip_by_value(inp2, 0, 63), axis=0) squeeze = tf.squeeze(gather, axis=1) # replace with tf.math.reduce_min also trigger the error mul1 = tf.multiply(squeeze, squeeze) mul2 = tf.multiply(mul1, trans) return mul2, inputs = [ tf.random.uniform([64, 64], dtype=tf.float64), tf.random.uniform(shape=[1, 1, 64], minval=-100, maxval=100, dtype=tf.int32), ] model1 = Model1() model2 = Model2() device = "gpu" with tf.device(device): tf.config.run_functions_eagerly(True) out1 = model1(*inputs) out2 = model2(*inputs) print(f'=========eager_output(version:{tf.__version__})================') try : for i in range(min(len(out1),len(out2))): np.testing.assert_allclose(out1[i].numpy(), out2[i].numpy(), rtol=0.001, atol=0.001, err_msg=f'at checking {i}th') print("XLA_eager does not trigger assertion") except AssertionError as e: print("XLA_eager triggers assertion") print(e) tf.config.run_functions_eagerly(False) out1 = model1(*inputs) out2 = model2(*inputs) print(f'=========compiled_output(version:{tf.__version__})================') try : for i in range(min(len(out1),len(out2))): np.testing.assert_allclose(out1[i].numpy(), out2[i].numpy(), rtol=0.001, atol=0.001, err_msg=f'at checking {i}th') print("XLA_complie does not trigger assertion") except AssertionError as e: print("XLA_complie triggers assertion") print(e) ``` ### Relevant log output ```shell =========eager_output(version:2.15.0)================ XLA_eager does not trigger assertion WARNING: All log messages before absl::InitializeLog() is called are written to STDERR I0000 00:00:1701613506.470278 2323572 device_compiler.h:186] Compiled cluster using XLA! This line is logged at most once for the lifetime of the process. =========compiled_output(version:2.15.0)================ XLA_complie triggers assertion Not equal to tolerance rtol=0.001, atol=0.001 at checking 0th Mismatched elements: 3916 / 4096 (95.6%) Max absolute difference: 0.99773916 Max relative difference: 8.1213542e+09 x: array([[[9.954084e-04, 1.219345e-02, 2.660004e-02, ..., 1.034067e-01, 2.044861e-01, 2.751211e-01], [8.196792e-01, 1.095101e-04, 3.054382e-01, ..., 3.612774e-02,... y: array([[[0.002754, 0.195666, 0.044557, ..., 0.013043, 0.305706, 0.210448], [0.386147, 0.014669, 0.420922, ..., 0.0958 , 0.017486,... ```
{ "url": "https://api.github.com/repos/tensorflow/tensorflow/issues/62546/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/tensorflow/tensorflow/issues/62546/timeline
null
completed
false
https://api.github.com/repos/tensorflow/tensorflow/issues/62545
https://api.github.com/repos/tensorflow/tensorflow
https://api.github.com/repos/tensorflow/tensorflow/issues/62545/labels{/name}
https://api.github.com/repos/tensorflow/tensorflow/issues/62545/comments
https://api.github.com/repos/tensorflow/tensorflow/issues/62545/events
https://github.com/tensorflow/tensorflow/pull/62545
2,022,464,501
PR_kwDOArmXAs5g-xnD
62,545
Symbolic link on build_devtoolset.sh
{ "login": "dante-tech", "id": 148709693, "node_id": "U_kgDOCN0hPQ", "avatar_url": "https://avatars.githubusercontent.com/u/148709693?v=4", "gravatar_id": "", "url": "https://api.github.com/users/dante-tech", "html_url": "https://github.com/dante-tech", "followers_url": "https://api.github.com/users/dante-tech/followers", "following_url": "https://api.github.com/users/dante-tech/following{/other_user}", "gists_url": "https://api.github.com/users/dante-tech/gists{/gist_id}", "starred_url": "https://api.github.com/users/dante-tech/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/dante-tech/subscriptions", "organizations_url": "https://api.github.com/users/dante-tech/orgs", "repos_url": "https://api.github.com/users/dante-tech/repos", "events_url": "https://api.github.com/users/dante-tech/events{/privacy}", "received_events_url": "https://api.github.com/users/dante-tech/received_events", "type": "User", "site_admin": false }
[ { "id": 1169364458, "node_id": "MDU6TGFiZWwxMTY5MzY0NDU4", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/size:S", "name": "size:S", "color": "adafea", "default": false, "description": "CL Change Size: Small" } ]
closed
false
{ "login": "gbaned", "id": 48215717, "node_id": "MDQ6VXNlcjQ4MjE1NzE3", "avatar_url": "https://avatars.githubusercontent.com/u/48215717?v=4", "gravatar_id": "", "url": "https://api.github.com/users/gbaned", "html_url": "https://github.com/gbaned", "followers_url": "https://api.github.com/users/gbaned/followers", "following_url": "https://api.github.com/users/gbaned/following{/other_user}", "gists_url": "https://api.github.com/users/gbaned/gists{/gist_id}", "starred_url": "https://api.github.com/users/gbaned/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/gbaned/subscriptions", "organizations_url": "https://api.github.com/users/gbaned/orgs", "repos_url": "https://api.github.com/users/gbaned/repos", "events_url": "https://api.github.com/users/gbaned/events{/privacy}", "received_events_url": "https://api.github.com/users/gbaned/received_events", "type": "User", "site_admin": false }
[ { "login": "gbaned", "id": 48215717, "node_id": "MDQ6VXNlcjQ4MjE1NzE3", "avatar_url": "https://avatars.githubusercontent.com/u/48215717?v=4", "gravatar_id": "", "url": "https://api.github.com/users/gbaned", "html_url": "https://github.com/gbaned", "followers_url": "https://api.github.com/users/gbaned/followers", "following_url": "https://api.github.com/users/gbaned/following{/other_user}", "gists_url": "https://api.github.com/users/gbaned/gists{/gist_id}", "starred_url": "https://api.github.com/users/gbaned/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/gbaned/subscriptions", "organizations_url": "https://api.github.com/users/gbaned/orgs", "repos_url": "https://api.github.com/users/gbaned/repos", "events_url": "https://api.github.com/users/gbaned/events{/privacy}", "received_events_url": "https://api.github.com/users/gbaned/received_events", "type": "User", "site_admin": false } ]
null
[ "I am unsure of what problem this is really solving. This only executes in a docker container where these things are not changing. This seemingly complicates the script for little benefit. Do you have an issue describing the problem this is causing? ", "Sorry, we're not looking for \"generally better\" changes for this script right now. If there's a very specific bug that you have encountered, please explicitly state and fix it.", "This commit right here was made with the intent of bettering the structure and build of the code. Here's are some key differences between the original version and my proposed one:\r\n\r\n- The first code does not check for the existence of the Python interpreter or the symbolic link, while the second code does.\r\n- The first code does not execute any Python code or handle any errors, while the second code does.\r\n- The first code creates the directory unconditionally, while the second code creates it only if the Python interpreter is found.\r\n- The first code uses a hard-coded array of Python versions, while the second code uses a variable that can be changed easily.\r\n\r\nAs you can see, my main objective wasn't trying to fix a bug, rather to increase the robustness of the code." ]
2023-12-03T10:48:46
2023-12-04T22:53:29
2023-12-04T20:49:04
NONE
null
false
{ "url": "https://api.github.com/repos/tensorflow/tensorflow/pulls/62545", "html_url": "https://github.com/tensorflow/tensorflow/pull/62545", "diff_url": "https://github.com/tensorflow/tensorflow/pull/62545.diff", "patch_url": "https://github.com/tensorflow/tensorflow/pull/62545.patch", "merged_at": null }
Commit that aims at bettering the overall symbolic link connection for python versions. I also included some error handling and it prints `"No suitable Python version found"` in case it doesn't find the correct Python version.
{ "url": "https://api.github.com/repos/tensorflow/tensorflow/issues/62545/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/tensorflow/tensorflow/issues/62545/timeline
null
null
true
https://api.github.com/repos/tensorflow/tensorflow/issues/62544
https://api.github.com/repos/tensorflow/tensorflow
https://api.github.com/repos/tensorflow/tensorflow/issues/62544/labels{/name}
https://api.github.com/repos/tensorflow/tensorflow/issues/62544/comments
https://api.github.com/repos/tensorflow/tensorflow/issues/62544/events
https://github.com/tensorflow/tensorflow/issues/62544
2,022,394,096
I_kwDOArmXAs54i0jw
62,544
ValueError: batch_dims != 0 is not supported for ragged gather yet.
{ "login": "beanduan22", "id": 149338034, "node_id": "U_kgDOCOa3sg", "avatar_url": "https://avatars.githubusercontent.com/u/149338034?v=4", "gravatar_id": "", "url": "https://api.github.com/users/beanduan22", "html_url": "https://github.com/beanduan22", "followers_url": "https://api.github.com/users/beanduan22/followers", "following_url": "https://api.github.com/users/beanduan22/following{/other_user}", "gists_url": "https://api.github.com/users/beanduan22/gists{/gist_id}", "starred_url": "https://api.github.com/users/beanduan22/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/beanduan22/subscriptions", "organizations_url": "https://api.github.com/users/beanduan22/orgs", "repos_url": "https://api.github.com/users/beanduan22/repos", "events_url": "https://api.github.com/users/beanduan22/events{/privacy}", "received_events_url": "https://api.github.com/users/beanduan22/received_events", "type": "User", "site_admin": false }
[ { "id": 386191887, "node_id": "MDU6TGFiZWwzODYxOTE4ODc=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/stat:awaiting%20response", "name": "stat:awaiting response", "color": "f4b400", "default": false, "description": "Status - Awaiting response from author" }, { "id": 473172988, "node_id": "MDU6TGFiZWw0NzMxNzI5ODg=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/type:bug", "name": "type:bug", "color": "159b2e", "default": false, "description": "Bug" }, { "id": 474725938, "node_id": "MDU6TGFiZWw0NzQ3MjU5Mzg=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/stale", "name": "stale", "color": "d4c5f9", "default": false, "description": "This label marks the issue/pr stale - to be closed automatically if no activity" }, { "id": 1097547147, "node_id": "MDU6TGFiZWwxMDk3NTQ3MTQ3", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/comp:ops", "name": "comp:ops", "color": "0052cc", "default": false, "description": "OPs related issues" }, { "id": 5508003926, "node_id": "LA_kwDOArmXAs8AAAABSE14Vg", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/TF%202.13", "name": "TF 2.13", "color": "B13ACB", "default": false, "description": "For issues related to Tensorflow 2.13" } ]
closed
false
{ "login": "SuryanarayanaY", "id": 116063290, "node_id": "U_kgDOBur8Og", "avatar_url": "https://avatars.githubusercontent.com/u/116063290?v=4", "gravatar_id": "", "url": "https://api.github.com/users/SuryanarayanaY", "html_url": "https://github.com/SuryanarayanaY", "followers_url": "https://api.github.com/users/SuryanarayanaY/followers", "following_url": "https://api.github.com/users/SuryanarayanaY/following{/other_user}", "gists_url": "https://api.github.com/users/SuryanarayanaY/gists{/gist_id}", "starred_url": "https://api.github.com/users/SuryanarayanaY/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/SuryanarayanaY/subscriptions", "organizations_url": "https://api.github.com/users/SuryanarayanaY/orgs", "repos_url": "https://api.github.com/users/SuryanarayanaY/repos", "events_url": "https://api.github.com/users/SuryanarayanaY/events{/privacy}", "received_events_url": "https://api.github.com/users/SuryanarayanaY/received_events", "type": "User", "site_admin": false }
[ { "login": "SuryanarayanaY", "id": 116063290, "node_id": "U_kgDOBur8Og", "avatar_url": "https://avatars.githubusercontent.com/u/116063290?v=4", "gravatar_id": "", "url": "https://api.github.com/users/SuryanarayanaY", "html_url": "https://github.com/SuryanarayanaY", "followers_url": "https://api.github.com/users/SuryanarayanaY/followers", "following_url": "https://api.github.com/users/SuryanarayanaY/following{/other_user}", "gists_url": "https://api.github.com/users/SuryanarayanaY/gists{/gist_id}", "starred_url": "https://api.github.com/users/SuryanarayanaY/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/SuryanarayanaY/subscriptions", "organizations_url": "https://api.github.com/users/SuryanarayanaY/orgs", "repos_url": "https://api.github.com/users/SuryanarayanaY/repos", "events_url": "https://api.github.com/users/SuryanarayanaY/events{/privacy}", "received_events_url": "https://api.github.com/users/SuryanarayanaY/received_events", "type": "User", "site_admin": false } ]
null
[ "Hi @beanduan22 ,\r\n\r\nAs the exception indicates ragged tensor gather won't yet support with `batch_dims != 0`. It is intended behaviour.\r\n\r\nAlso the innermost dimension of indices should not be ragged.The indices were changed to make it workable. Please refer attached [gist](https://colab.sandbox.google.com/gist/SuryanarayanaY/8cd19a1e2b75506962626142a4e5eda2/62544.ipynb).\r\n\r\nThanks!", "This issue is stale because it has been open for 7 days with no activity. It will be closed if no further activity occurs. Thank you.", "This issue was closed because it has been inactive for 7 days since being marked as stale. Please reopen if you'd like to work on this further.", "Are you satisfied with the resolution of your issue?\n<a href=\"https://docs.google.com/forms/d/e/1FAIpQLSfaP12TRhd9xSxjXZjcZFNXPGk4kc1-qMdv3gc6bEP90vY1ew/viewform?entry.85265664=Yes&entry.2137816233=https://github.com/tensorflow/tensorflow/issues/62544\">Yes</a>\n<a href=\"https://docs.google.com/forms/d/e/1FAIpQLSfaP12TRhd9xSxjXZjcZFNXPGk4kc1-qMdv3gc6bEP90vY1ew/viewform?entry.85265664=No&entry.2137816233=https://github.com/tensorflow/tensorflow/issues/62544\">No</a>\n" ]
2023-12-03T06:43:15
2023-12-20T01:42:50
2023-12-20T01:42:40
NONE
null
null
null
### Issue type Bug ### Have you reproduced the bug with TensorFlow Nightly? Yes ### Source source ### TensorFlow version tf 2.13 ### Custom code Yes ### OS platform and distribution _No response_ ### Mobile device _No response_ ### Python version _No response_ ### Bazel version _No response_ ### GCC/compiler version _No response_ ### CUDA/cuDNN version _No response_ ### GPU model and memory _No response_ ### Current behavior? ```python import tensorflow as tf a = tf.ragged.constant([[1, 1]]) b = tf.ragged.constant([[[0], [0]]], ragged_rank=1) oytput = tf.gather_nd(a, b, batch_dims=1) print(oytput) ### Standalone code to reproduce the issue ```shell raise ValueError('batch_dims != 0 is not supported for ragged gather yet.') ValueError: batch_dims != 0 is not supported for ragged gather yet. ``` ### Relevant log output _No response_
{ "url": "https://api.github.com/repos/tensorflow/tensorflow/issues/62544/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/tensorflow/tensorflow/issues/62544/timeline
null
completed
false
https://api.github.com/repos/tensorflow/tensorflow/issues/62543
https://api.github.com/repos/tensorflow/tensorflow
https://api.github.com/repos/tensorflow/tensorflow/issues/62543/labels{/name}
https://api.github.com/repos/tensorflow/tensorflow/issues/62543/comments
https://api.github.com/repos/tensorflow/tensorflow/issues/62543/events
https://github.com/tensorflow/tensorflow/issues/62543
2,022,361,350
I_kwDOArmXAs54iskG
62,543
got NaN when invoking tf.experimental.numpy.arcsin() and the input not over range
{ "login": "beanduan22", "id": 149338034, "node_id": "U_kgDOCOa3sg", "avatar_url": "https://avatars.githubusercontent.com/u/149338034?v=4", "gravatar_id": "", "url": "https://api.github.com/users/beanduan22", "html_url": "https://github.com/beanduan22", "followers_url": "https://api.github.com/users/beanduan22/followers", "following_url": "https://api.github.com/users/beanduan22/following{/other_user}", "gists_url": "https://api.github.com/users/beanduan22/gists{/gist_id}", "starred_url": "https://api.github.com/users/beanduan22/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/beanduan22/subscriptions", "organizations_url": "https://api.github.com/users/beanduan22/orgs", "repos_url": "https://api.github.com/users/beanduan22/repos", "events_url": "https://api.github.com/users/beanduan22/events{/privacy}", "received_events_url": "https://api.github.com/users/beanduan22/received_events", "type": "User", "site_admin": false }
[ { "id": 473172988, "node_id": "MDU6TGFiZWw0NzMxNzI5ODg=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/type:bug", "name": "type:bug", "color": "159b2e", "default": false, "description": "Bug" } ]
closed
false
{ "login": "sushreebarsa", "id": 84765720, "node_id": "MDQ6VXNlcjg0NzY1NzIw", "avatar_url": "https://avatars.githubusercontent.com/u/84765720?v=4", "gravatar_id": "", "url": "https://api.github.com/users/sushreebarsa", "html_url": "https://github.com/sushreebarsa", "followers_url": "https://api.github.com/users/sushreebarsa/followers", "following_url": "https://api.github.com/users/sushreebarsa/following{/other_user}", "gists_url": "https://api.github.com/users/sushreebarsa/gists{/gist_id}", "starred_url": "https://api.github.com/users/sushreebarsa/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/sushreebarsa/subscriptions", "organizations_url": "https://api.github.com/users/sushreebarsa/orgs", "repos_url": "https://api.github.com/users/sushreebarsa/repos", "events_url": "https://api.github.com/users/sushreebarsa/events{/privacy}", "received_events_url": "https://api.github.com/users/sushreebarsa/received_events", "type": "User", "site_admin": false }
[ { "login": "sushreebarsa", "id": 84765720, "node_id": "MDQ6VXNlcjg0NzY1NzIw", "avatar_url": "https://avatars.githubusercontent.com/u/84765720?v=4", "gravatar_id": "", "url": "https://api.github.com/users/sushreebarsa", "html_url": "https://github.com/sushreebarsa", "followers_url": "https://api.github.com/users/sushreebarsa/followers", "following_url": "https://api.github.com/users/sushreebarsa/following{/other_user}", "gists_url": "https://api.github.com/users/sushreebarsa/gists{/gist_id}", "starred_url": "https://api.github.com/users/sushreebarsa/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/sushreebarsa/subscriptions", "organizations_url": "https://api.github.com/users/sushreebarsa/orgs", "repos_url": "https://api.github.com/users/sushreebarsa/repos", "events_url": "https://api.github.com/users/sushreebarsa/events{/privacy}", "received_events_url": "https://api.github.com/users/sushreebarsa/received_events", "type": "User", "site_admin": false } ]
null
[ "Are you satisfied with the resolution of your issue?\n<a href=\"https://docs.google.com/forms/d/e/1FAIpQLSfaP12TRhd9xSxjXZjcZFNXPGk4kc1-qMdv3gc6bEP90vY1ew/viewform?entry.85265664=Yes&entry.2137816233=https://github.com/tensorflow/tensorflow/issues/62543\">Yes</a>\n<a href=\"https://docs.google.com/forms/d/e/1FAIpQLSfaP12TRhd9xSxjXZjcZFNXPGk4kc1-qMdv3gc6bEP90vY1ew/viewform?entry.85265664=No&entry.2137816233=https://github.com/tensorflow/tensorflow/issues/62543\">No</a>\n" ]
2023-12-03T04:20:30
2023-12-03T04:21:29
2023-12-03T04:21:22
NONE
null
null
null
### Issue type Bug ### Have you reproduced the bug with TensorFlow Nightly? Yes ### Source source ### TensorFlow version tf 2.13.0 ### Custom code Yes ### OS platform and distribution _No response_ ### Mobile device _No response_ ### Python version _No response_ ### Bazel version _No response_ ### GCC/compiler version _No response_ ### CUDA/cuDNN version _No response_ ### GPU model and memory _No response_ ### Current behavior? got NaN when invoking tf.experimental.numpy.arcsin() and the input not over range ### Standalone code to reproduce the issue ```shell import tensorflow as tf

with tf.device('cpu'):
 input_data = tf.constant([[0. , 0.99455555, 0.5230071 , 0. , 0.72101855,
 0.5321333 , 0.7079337 , 0. , 0. , 0.02282775],
 [0. , 0.49104163, 0.6195026 , 0. , 0.8540474 ,
 0.9993153 , 2.0230498 , 0. , 0. , 0.0270395 ],
 [0. , 0.3878699 , 0.48934025, 0. , 0.6746053 ,
 0.3691354 , 0.597991 , 0. , 0. , 0.02135829],
 [0. , 0.44674954, 0.5636233 , 0. , 0.7770121 ,
 0.7287762 , 0.8405701 , 0. , 0. , 0.02460053]])

 def nu_arcsin(input_data):
 return tf.experimental.numpy.arcsin(
 input_data)
 output = nu_arcsin(input_data)
 print(output)

with tf.device('gpu'):
 input_data = tf.constant([[0. , 0.99455555, 0.5230071 , 0. , 0.72101855,
 0.5321333 , 0.7079337 , 0. , 0. , 0.02282775],
 [0. , 0.49104163, 0.6195026 , 0. , 0.8540474 ,
 0.9993153 , 2.0230498 , 0. , 0. , 0.0270395 ],
 [0. , 0.3878699 , 0.48934025, 0. , 0.6746053 ,
 0.3691354 , 0.597991 , 0. , 0. , 0.02135829],
 [0. , 0.44674954, 0.5636233 , 0. , 0.7770121 ,
 0.7287762 , 0.8405701 , 0. , 0. , 0.02460053]])
 def nu_arcsin(input_data):
 return tf.experimental.numpy.arcsin(
 input_data)
 output = nu_arcsin(input_data)
 print(output) ``` ### Relevant log output _No response_
{ "url": "https://api.github.com/repos/tensorflow/tensorflow/issues/62543/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/tensorflow/tensorflow/issues/62543/timeline
null
completed
false
https://api.github.com/repos/tensorflow/tensorflow/issues/62542
https://api.github.com/repos/tensorflow/tensorflow
https://api.github.com/repos/tensorflow/tensorflow/issues/62542/labels{/name}
https://api.github.com/repos/tensorflow/tensorflow/issues/62542/comments
https://api.github.com/repos/tensorflow/tensorflow/issues/62542/events
https://github.com/tensorflow/tensorflow/pull/62542
2,022,200,594
PR_kwDOArmXAs5g97Fu
62,542
Properly handle the Python version when looking for Python binary
{ "login": "dante-tech", "id": 148709693, "node_id": "U_kgDOCN0hPQ", "avatar_url": "https://avatars.githubusercontent.com/u/148709693?v=4", "gravatar_id": "", "url": "https://api.github.com/users/dante-tech", "html_url": "https://github.com/dante-tech", "followers_url": "https://api.github.com/users/dante-tech/followers", "following_url": "https://api.github.com/users/dante-tech/following{/other_user}", "gists_url": "https://api.github.com/users/dante-tech/gists{/gist_id}", "starred_url": "https://api.github.com/users/dante-tech/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/dante-tech/subscriptions", "organizations_url": "https://api.github.com/users/dante-tech/orgs", "repos_url": "https://api.github.com/users/dante-tech/repos", "events_url": "https://api.github.com/users/dante-tech/events{/privacy}", "received_events_url": "https://api.github.com/users/dante-tech/received_events", "type": "User", "site_admin": false }
[ { "id": 1169364458, "node_id": "MDU6TGFiZWwxMTY5MzY0NDU4", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/size:S", "name": "size:S", "color": "adafea", "default": false, "description": "CL Change Size: Small" } ]
closed
false
{ "login": "gbaned", "id": 48215717, "node_id": "MDQ6VXNlcjQ4MjE1NzE3", "avatar_url": "https://avatars.githubusercontent.com/u/48215717?v=4", "gravatar_id": "", "url": "https://api.github.com/users/gbaned", "html_url": "https://github.com/gbaned", "followers_url": "https://api.github.com/users/gbaned/followers", "following_url": "https://api.github.com/users/gbaned/following{/other_user}", "gists_url": "https://api.github.com/users/gbaned/gists{/gist_id}", "starred_url": "https://api.github.com/users/gbaned/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/gbaned/subscriptions", "organizations_url": "https://api.github.com/users/gbaned/orgs", "repos_url": "https://api.github.com/users/gbaned/repos", "events_url": "https://api.github.com/users/gbaned/events{/privacy}", "received_events_url": "https://api.github.com/users/gbaned/received_events", "type": "User", "site_admin": false }
[ { "login": "gbaned", "id": 48215717, "node_id": "MDQ6VXNlcjQ4MjE1NzE3", "avatar_url": "https://avatars.githubusercontent.com/u/48215717?v=4", "gravatar_id": "", "url": "https://api.github.com/users/gbaned", "html_url": "https://github.com/gbaned", "followers_url": "https://api.github.com/users/gbaned/followers", "following_url": "https://api.github.com/users/gbaned/following{/other_user}", "gists_url": "https://api.github.com/users/gbaned/gists{/gist_id}", "starred_url": "https://api.github.com/users/gbaned/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/gbaned/subscriptions", "organizations_url": "https://api.github.com/users/gbaned/orgs", "repos_url": "https://api.github.com/users/gbaned/repos", "events_url": "https://api.github.com/users/gbaned/events{/privacy}", "received_events_url": "https://api.github.com/users/gbaned/received_events", "type": "User", "site_admin": false } ]
null
[ "Please don't use \"add file\"/\"update file\"/\"fix file\"/etc. commit messages. These are hard to reason about when looking at the history of the file/repository. Instead, please write explanatory git commit messages.\r\n\r\nThe commit message is also the title of the PR if the PR has only one commit. It is thus twice important to have commit messages that are relevant, as PRs would be easier to understand and easier to analyze in search results.\r\n\r\nFor how to write good quality git commit messages, please consult https://cbea.ms/git-commit/ ", "Sure, I have edited the commit message that should dive deeper into why I have made the changes and what the code does. I'm sorry for my previous commit message, I'm new to open source. \r\n\r\nHope you'll like it!", "Sure, I can separate the two conditions by using two `if` statements. Such that: \r\n```\r\nif not (os.path.isfile(python_bin_path)): \r\n if os.access(python_bin_path, os.X_OK)):\r\n raise FileNotFoundError('The python path {} is not valid.'.format(python_bin_path))\r\n\r\n``` I will edit the commit accordingly", "I added a few changes via suggested edits to make this pass internal tests and land.\r\n\r\nThank you very much for the PR!", "Perfect, I'm glad to help! Is there anything else I should edit or review?", "I think for now it should be good. There are a few CI jobs that take longer and then there needs to be another approver in the internal system (2 pair of eyes required on any change to prevent insider risk). After that, this will merge automatically.\r\n\r\nThank you!", "Very good! I'll look forward to solve another issue. \r\n\r\nThank you for your time, have a nice day!", "Hi, thanks for your work as a first-time OSS contributor. I can see that you're trying to make the code more resilient and readable -- but these changes are not useful, e.g.:\r\n\r\n- They make this specific function behave much differently than everything else in this configure.py script, which is confusing for anyone trying to understand the whole script\r\n- There is no reason to remove the utility function\r\n- Removing the loop means that users have only one chance to get the path right, or else the script quits (this behavior should instead be changed explicitly and across the whole script)\r\n\r\nI don't see what specific problem this change will fix, but instead, it makes the code, overall, less consistent. I'd like you to reduce the change to **only fix a bug that you found and nothing else.** I see that you originally indicated that this change would fix a bug where PYTHON_BIN_PATH would be ignored. What do you mean by that?", "I see your perspective, let me clarify your points further.\r\n\r\n- **First**: the commit made is done purposefully using standard functions that handle edge cases and errors, also, by respecting the existing environment variables.\r\n\r\n- **Second**: built-in functions are more efficient and optimized than custom functions. They are implemented in C or other low-level languages and can run faster and use less memory than custom functions. Custom functions may have redundant code that slows down the execution or wastes resources. Therefore, even for just one instance, I preferred to use built-in functions over the custom one.\r\n\r\n- **Third**: it manages to solve a bug. The bug I'm trying to solve and addressing, is related to the `PYTHON_BIN_PATH` environment variable. The original code snippet ignores the `PYTHON_BIN_PATH` environment variable if it is already set. This means that it does not respect the user’s preference and may use a different Python binary than the one the user intended. This can cause problems if the user wants to use a specific version of Python or a virtual environment. This is also raised by one of the latest issues. My changes respects the `PYTHON_BIN_PATH` environment variable and uses it as the default value. This means that it will use the Python binary that the user specified, unless it is not valid or not found. This way, the user can control which Python binary to use and avoid potential conflicts or errors.\r\n\r\nI hope this clarifies it all, have a good day!", "> * built-in functions are more efficient and optimized than custom functions. They are implemented in C or other low-level languages and can run faster and use less memory than custom functions\r\n\r\nThis seems weird. There is no C involved here, if I understand correctly", "@mihaimaruseac I was referring myself to the built-in functions, not the code", "Thanks for clarifying. This change is not necessary.", "Thank you for your time, however there's something I'm missing @angerson; why the change is not necessary?" ]
2023-12-02T20:54:48
2023-12-05T00:19:50
2023-12-04T23:45:26
NONE
null
false
{ "url": "https://api.github.com/repos/tensorflow/tensorflow/pulls/62542", "html_url": "https://github.com/tensorflow/tensorflow/pull/62542", "diff_url": "https://github.com/tensorflow/tensorflow/pull/62542.diff", "patch_url": "https://github.com/tensorflow/tensorflow/pull/62542.patch", "merged_at": null }
The second code snippet is a simplified version of the first one. It does not use a custom function, but instead uses the built-in `os.environ.get` function to get the value of the `PYTHON_BIN_PATH` environment variable, or the default value if it is not set. It also uses the built-in `shutil.which` function to find the path of the Python binary in the system path. This function returns `None` if the path is not found, so the code checks for that and raises a `FileNotFoundError` exception. It also checks the Python version by using another custom function called `get_python_major_version` and raises a `ValueError` exception if the version is not 3. If any exception is raised, the code exits with an error code. Otherwise, it sets the `PYTHON_BIN_PATH` environment variable to the valid path and converts it to Windows style if needed. The main differences between the original snippet and the edited one are: - The original code snippet uses a custom function to get the Python binary path, while the second code snippet uses built-in functions. - The original code snippet asks the user to input the Python binary path if it is not found or invalid, while the second code snippet raises an exception and exits. - The original code snippet does not check the Python version, while the second code snippet does and requires it to be 3. - The original code snippet ignores the `PYTHON_BIN_PATH` environment variable if it is already set, while the second code snippet respects it. Why I have considered committing these changes: - It is more concise and readable, using fewer lines of code and avoiding unnecessary custom functions. - It is more robust and consistent, using standard functions that handle edge cases and errors, and respecting the existing environment variables. - It is more secure and compatible, checking the Python version and ensuring it is 3, which is the recommended version for most Python projects.
{ "url": "https://api.github.com/repos/tensorflow/tensorflow/issues/62542/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/tensorflow/tensorflow/issues/62542/timeline
null
null
true
https://api.github.com/repos/tensorflow/tensorflow/issues/62541
https://api.github.com/repos/tensorflow/tensorflow
https://api.github.com/repos/tensorflow/tensorflow/issues/62541/labels{/name}
https://api.github.com/repos/tensorflow/tensorflow/issues/62541/comments
https://api.github.com/repos/tensorflow/tensorflow/issues/62541/events
https://github.com/tensorflow/tensorflow/issues/62541
2,022,151,019
I_kwDOArmXAs54h5Nr
62,541
pip Install Error: I couldn't find a version of TensorFlow that meets the requirements.
{ "login": "MSR-07", "id": 102695412, "node_id": "U_kgDOBh8B9A", "avatar_url": "https://avatars.githubusercontent.com/u/102695412?v=4", "gravatar_id": "", "url": "https://api.github.com/users/MSR-07", "html_url": "https://github.com/MSR-07", "followers_url": "https://api.github.com/users/MSR-07/followers", "following_url": "https://api.github.com/users/MSR-07/following{/other_user}", "gists_url": "https://api.github.com/users/MSR-07/gists{/gist_id}", "starred_url": "https://api.github.com/users/MSR-07/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/MSR-07/subscriptions", "organizations_url": "https://api.github.com/users/MSR-07/orgs", "repos_url": "https://api.github.com/users/MSR-07/repos", "events_url": "https://api.github.com/users/MSR-07/events{/privacy}", "received_events_url": "https://api.github.com/users/MSR-07/received_events", "type": "User", "site_admin": false }
[ { "id": 386191887, "node_id": "MDU6TGFiZWwzODYxOTE4ODc=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/stat:awaiting%20response", "name": "stat:awaiting response", "color": "f4b400", "default": false, "description": "Status - Awaiting response from author" }, { "id": 473173351, "node_id": "MDU6TGFiZWw0NzMxNzMzNTE=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/type:build/install", "name": "type:build/install", "color": "159b2e", "default": false, "description": "Build and install issues" }, { "id": 474725938, "node_id": "MDU6TGFiZWw0NzQ3MjU5Mzg=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/stale", "name": "stale", "color": "d4c5f9", "default": false, "description": "This label marks the issue/pr stale - to be closed automatically if no activity" }, { "id": 1097545817, "node_id": "MDU6TGFiZWwxMDk3NTQ1ODE3", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/comp:apis", "name": "comp:apis", "color": "0052cc", "default": false, "description": "Highlevel API related issues" }, { "id": 1188421838, "node_id": "MDU6TGFiZWwxMTg4NDIxODM4", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/subtype:windows", "name": "subtype:windows", "color": "b619ea", "default": false, "description": "Windows Build/Installation Issues" }, { "id": 3531398540, "node_id": "LA_kwDOArmXAs7SfN2M", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/TF%202.7", "name": "TF 2.7", "color": "77237D", "default": false, "description": "Issues related to TF 2.7.0" } ]
closed
false
{ "login": "Venkat6871", "id": 147127861, "node_id": "U_kgDOCMT-NQ", "avatar_url": "https://avatars.githubusercontent.com/u/147127861?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Venkat6871", "html_url": "https://github.com/Venkat6871", "followers_url": "https://api.github.com/users/Venkat6871/followers", "following_url": "https://api.github.com/users/Venkat6871/following{/other_user}", "gists_url": "https://api.github.com/users/Venkat6871/gists{/gist_id}", "starred_url": "https://api.github.com/users/Venkat6871/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Venkat6871/subscriptions", "organizations_url": "https://api.github.com/users/Venkat6871/orgs", "repos_url": "https://api.github.com/users/Venkat6871/repos", "events_url": "https://api.github.com/users/Venkat6871/events{/privacy}", "received_events_url": "https://api.github.com/users/Venkat6871/received_events", "type": "User", "site_admin": false }
[ { "login": "Venkat6871", "id": 147127861, "node_id": "U_kgDOCMT-NQ", "avatar_url": "https://avatars.githubusercontent.com/u/147127861?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Venkat6871", "html_url": "https://github.com/Venkat6871", "followers_url": "https://api.github.com/users/Venkat6871/followers", "following_url": "https://api.github.com/users/Venkat6871/following{/other_user}", "gists_url": "https://api.github.com/users/Venkat6871/gists{/gist_id}", "starred_url": "https://api.github.com/users/Venkat6871/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Venkat6871/subscriptions", "organizations_url": "https://api.github.com/users/Venkat6871/orgs", "repos_url": "https://api.github.com/users/Venkat6871/repos", "events_url": "https://api.github.com/users/Venkat6871/events{/privacy}", "received_events_url": "https://api.github.com/users/Venkat6871/received_events", "type": "User", "site_admin": false } ]
null
[ "Hi **@MSR-07** ,\r\nCould you please go through these documentations [pip](https://www.tensorflow.org/install/pip), [windows](https://www.tensorflow.org/install/source_windows) once. And please try to provide a text then it will help us to reproduce. if you have any doubts please feel free to ask.\r\n\r\nThank you!", "This issue is stale because it has been open for 7 days with no activity. It will be closed if no further activity occurs. Thank you.", "Are you satisfied with the resolution of your issue?\n<a href=\"https://docs.google.com/forms/d/e/1FAIpQLSfaP12TRhd9xSxjXZjcZFNXPGk4kc1-qMdv3gc6bEP90vY1ew/viewform?entry.85265664=Yes&entry.2137816233=https://github.com/tensorflow/tensorflow/issues/62541\">Yes</a>\n<a href=\"https://docs.google.com/forms/d/e/1FAIpQLSfaP12TRhd9xSxjXZjcZFNXPGk4kc1-qMdv3gc6bEP90vY1ew/viewform?entry.85265664=No&entry.2137816233=https://github.com/tensorflow/tensorflow/issues/62541\">No</a>\n" ]
2023-12-02T18:12:17
2023-12-13T05:50:47
2023-12-13T05:50:44
NONE
null
null
null
### Issue type Build/Install ### Have you reproduced the bug with TensorFlow Nightly? Yes ### Source source ### TensorFlow version 2.7.0 ### Custom code Yes ### OS platform and distribution Win 10 pro ### Mobile device _No response_ ### Python version 3.12.0 ### Bazel version _No response_ ### GCC/compiler version _No response_ ### CUDA/cuDNN version _No response_ ### GPU model and memory _No response_ ### Current behavior? ![Screenshot 2023-12-02 231146](https://github.com/tensorflow/tensorflow/assets/102695412/eb9c763e-be89-4286-9e95-6bd162552f25) ### Standalone code to reproduce the issue ```shell Jupyter Notebook ``` ### Relevant log output _No response_
{ "url": "https://api.github.com/repos/tensorflow/tensorflow/issues/62541/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/tensorflow/tensorflow/issues/62541/timeline
null
completed
false
https://api.github.com/repos/tensorflow/tensorflow/issues/62540
https://api.github.com/repos/tensorflow/tensorflow
https://api.github.com/repos/tensorflow/tensorflow/issues/62540/labels{/name}
https://api.github.com/repos/tensorflow/tensorflow/issues/62540/comments
https://api.github.com/repos/tensorflow/tensorflow/issues/62540/events
https://github.com/tensorflow/tensorflow/pull/62540
2,022,123,230
PR_kwDOArmXAs5g9r6y
62,540
Update configure.py
{ "login": "dante-tech", "id": 148709693, "node_id": "U_kgDOCN0hPQ", "avatar_url": "https://avatars.githubusercontent.com/u/148709693?v=4", "gravatar_id": "", "url": "https://api.github.com/users/dante-tech", "html_url": "https://github.com/dante-tech", "followers_url": "https://api.github.com/users/dante-tech/followers", "following_url": "https://api.github.com/users/dante-tech/following{/other_user}", "gists_url": "https://api.github.com/users/dante-tech/gists{/gist_id}", "starred_url": "https://api.github.com/users/dante-tech/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/dante-tech/subscriptions", "organizations_url": "https://api.github.com/users/dante-tech/orgs", "repos_url": "https://api.github.com/users/dante-tech/repos", "events_url": "https://api.github.com/users/dante-tech/events{/privacy}", "received_events_url": "https://api.github.com/users/dante-tech/received_events", "type": "User", "site_admin": false }
[ { "id": 1169364458, "node_id": "MDU6TGFiZWwxMTY5MzY0NDU4", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/size:S", "name": "size:S", "color": "adafea", "default": false, "description": "CL Change Size: Small" } ]
closed
false
{ "login": "gbaned", "id": 48215717, "node_id": "MDQ6VXNlcjQ4MjE1NzE3", "avatar_url": "https://avatars.githubusercontent.com/u/48215717?v=4", "gravatar_id": "", "url": "https://api.github.com/users/gbaned", "html_url": "https://github.com/gbaned", "followers_url": "https://api.github.com/users/gbaned/followers", "following_url": "https://api.github.com/users/gbaned/following{/other_user}", "gists_url": "https://api.github.com/users/gbaned/gists{/gist_id}", "starred_url": "https://api.github.com/users/gbaned/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/gbaned/subscriptions", "organizations_url": "https://api.github.com/users/gbaned/orgs", "repos_url": "https://api.github.com/users/gbaned/repos", "events_url": "https://api.github.com/users/gbaned/events{/privacy}", "received_events_url": "https://api.github.com/users/gbaned/received_events", "type": "User", "site_admin": false }
[ { "login": "gbaned", "id": 48215717, "node_id": "MDQ6VXNlcjQ4MjE1NzE3", "avatar_url": "https://avatars.githubusercontent.com/u/48215717?v=4", "gravatar_id": "", "url": "https://api.github.com/users/gbaned", "html_url": "https://github.com/gbaned", "followers_url": "https://api.github.com/users/gbaned/followers", "following_url": "https://api.github.com/users/gbaned/following{/other_user}", "gists_url": "https://api.github.com/users/gbaned/gists{/gist_id}", "starred_url": "https://api.github.com/users/gbaned/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/gbaned/subscriptions", "organizations_url": "https://api.github.com/users/gbaned/orgs", "repos_url": "https://api.github.com/users/gbaned/repos", "events_url": "https://api.github.com/users/gbaned/events{/privacy}", "received_events_url": "https://api.github.com/users/gbaned/received_events", "type": "User", "site_admin": false } ]
null
[ "Thanks for your pull request! It looks like this may be your first contribution to a Google open source project. Before we can look at your pull request, you'll need to sign a Contributor License Agreement (CLA).\n\nView this [failed invocation](https://github.com/tensorflow/tensorflow/pull/62540/checks?check_run_id=19249188653) of the CLA check for more information.\n\nFor the most up to date status, view the checks section at the bottom of the pull request." ]
2023-12-02T16:56:54
2023-12-02T17:26:39
2023-12-02T17:26:30
NONE
null
false
{ "url": "https://api.github.com/repos/tensorflow/tensorflow/pulls/62540", "html_url": "https://github.com/tensorflow/tensorflow/pull/62540", "diff_url": "https://github.com/tensorflow/tensorflow/pull/62540.diff", "patch_url": "https://github.com/tensorflow/tensorflow/pull/62540.patch", "merged_at": null }
This edit is made to address the bug in the configuration where the build system ignores the `PYTHON_BIN_PATH` environment variable, which was one of the issues raised in the last few days. I followed the style and code of conduct proposed for making changes in the Tensorflow library. Also, I have added comments that explain what the code is doing. I hope you will take these edits in consideration, have a great day!
{ "url": "https://api.github.com/repos/tensorflow/tensorflow/issues/62540/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/tensorflow/tensorflow/issues/62540/timeline
null
null
true
https://api.github.com/repos/tensorflow/tensorflow/issues/62539
https://api.github.com/repos/tensorflow/tensorflow
https://api.github.com/repos/tensorflow/tensorflow/issues/62539/labels{/name}
https://api.github.com/repos/tensorflow/tensorflow/issues/62539/comments
https://api.github.com/repos/tensorflow/tensorflow/issues/62539/events
https://github.com/tensorflow/tensorflow/issues/62539
2,022,111,470
I_kwDOArmXAs54hvju
62,539
Inconsistency in XLA Compiled Model Involving `tf.nn.softmax, tf.cast, tf.math.reduce_sum` and Additional Concat Output
{ "login": "Gwihwan-Go", "id": 83144588, "node_id": "MDQ6VXNlcjgzMTQ0NTg4", "avatar_url": "https://avatars.githubusercontent.com/u/83144588?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Gwihwan-Go", "html_url": "https://github.com/Gwihwan-Go", "followers_url": "https://api.github.com/users/Gwihwan-Go/followers", "following_url": "https://api.github.com/users/Gwihwan-Go/following{/other_user}", "gists_url": "https://api.github.com/users/Gwihwan-Go/gists{/gist_id}", "starred_url": "https://api.github.com/users/Gwihwan-Go/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Gwihwan-Go/subscriptions", "organizations_url": "https://api.github.com/users/Gwihwan-Go/orgs", "repos_url": "https://api.github.com/users/Gwihwan-Go/repos", "events_url": "https://api.github.com/users/Gwihwan-Go/events{/privacy}", "received_events_url": "https://api.github.com/users/Gwihwan-Go/received_events", "type": "User", "site_admin": false }
[ { "id": 386191887, "node_id": "MDU6TGFiZWwzODYxOTE4ODc=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/stat:awaiting%20response", "name": "stat:awaiting response", "color": "f4b400", "default": false, "description": "Status - Awaiting response from author" }, { "id": 473172988, "node_id": "MDU6TGFiZWw0NzMxNzI5ODg=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/type:bug", "name": "type:bug", "color": "159b2e", "default": false, "description": "Bug" }, { "id": 474725938, "node_id": "MDU6TGFiZWw0NzQ3MjU5Mzg=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/stale", "name": "stale", "color": "d4c5f9", "default": false, "description": "This label marks the issue/pr stale - to be closed automatically if no activity" }, { "id": 1133285679, "node_id": "MDU6TGFiZWwxMTMzMjg1Njc5", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/comp:xla", "name": "comp:xla", "color": "0052cc", "default": false, "description": "XLA" }, { "id": 6218999181, "node_id": "LA_kwDOArmXAs8AAAABcq5ljQ", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/TF%202.15", "name": "TF 2.15", "color": "9162CB", "default": false, "description": "For issues related to 2.15.x" } ]
closed
false
{ "login": "sachinprasadhs", "id": 73069040, "node_id": "MDQ6VXNlcjczMDY5MDQw", "avatar_url": "https://avatars.githubusercontent.com/u/73069040?v=4", "gravatar_id": "", "url": "https://api.github.com/users/sachinprasadhs", "html_url": "https://github.com/sachinprasadhs", "followers_url": "https://api.github.com/users/sachinprasadhs/followers", "following_url": "https://api.github.com/users/sachinprasadhs/following{/other_user}", "gists_url": "https://api.github.com/users/sachinprasadhs/gists{/gist_id}", "starred_url": "https://api.github.com/users/sachinprasadhs/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/sachinprasadhs/subscriptions", "organizations_url": "https://api.github.com/users/sachinprasadhs/orgs", "repos_url": "https://api.github.com/users/sachinprasadhs/repos", "events_url": "https://api.github.com/users/sachinprasadhs/events{/privacy}", "received_events_url": "https://api.github.com/users/sachinprasadhs/received_events", "type": "User", "site_admin": false }
[ { "login": "sachinprasadhs", "id": 73069040, "node_id": "MDQ6VXNlcjczMDY5MDQw", "avatar_url": "https://avatars.githubusercontent.com/u/73069040?v=4", "gravatar_id": "", "url": "https://api.github.com/users/sachinprasadhs", "html_url": "https://github.com/sachinprasadhs", "followers_url": "https://api.github.com/users/sachinprasadhs/followers", "following_url": "https://api.github.com/users/sachinprasadhs/following{/other_user}", "gists_url": "https://api.github.com/users/sachinprasadhs/gists{/gist_id}", "starred_url": "https://api.github.com/users/sachinprasadhs/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/sachinprasadhs/subscriptions", "organizations_url": "https://api.github.com/users/sachinprasadhs/orgs", "repos_url": "https://api.github.com/users/sachinprasadhs/repos", "events_url": "https://api.github.com/users/sachinprasadhs/events{/privacy}", "received_events_url": "https://api.github.com/users/sachinprasadhs/received_events", "type": "User", "site_admin": false } ]
null
[ "@tilakrayal Hi, have you reproduced this behavior?", "@sachinprasadhs,\r\nI was able to reproduce the issue on tensorflow v2.14, v2.15 and tf-nightly. Kindly find the gist of it [here](https://colab.research.google.com/gist/tilakrayal/83b3500341f22fd9e35b5efe23936a21/untitled1609.ipynb).", "It is not expected to get the desired matching results when you **Altering Output Structure**, the behavior which you are observing is intended and it is not a bug.", "This issue is stale because it has been open for 7 days with no activity. It will be closed if no further activity occurs. Thank you.", "This issue was closed because it has been inactive for 7 days since being marked as stale. Please reopen if you'd like to work on this further.", "Are you satisfied with the resolution of your issue?\n<a href=\"https://docs.google.com/forms/d/e/1FAIpQLSfaP12TRhd9xSxjXZjcZFNXPGk4kc1-qMdv3gc6bEP90vY1ew/viewform?entry.85265664=Yes&entry.2137816233=https://github.com/tensorflow/tensorflow/issues/62539\">Yes</a>\n<a href=\"https://docs.google.com/forms/d/e/1FAIpQLSfaP12TRhd9xSxjXZjcZFNXPGk4kc1-qMdv3gc6bEP90vY1ew/viewform?entry.85265664=No&entry.2137816233=https://github.com/tensorflow/tensorflow/issues/62539\">No</a>\n" ]
2023-12-02T16:24:30
2024-01-11T01:49:45
2024-01-11T01:49:40
NONE
null
null
null
### Issue type Bug ### Have you reproduced the bug with TensorFlow Nightly? Yes ### Source source ### TensorFlow version 2.15.0 ### Custom code Yes ### OS platform and distribution Ubuntu 22.04.3 LTS ### Mobile device _No response_ ### Python version 3.10.0 ### Bazel version _No response_ ### GCC/compiler version _No response_ ### CUDA/cuDNN version _No response_ ### GPU model and memory _No response_ ### Current behavior? I have identified a inconsistency issue in TensorFlow when running a model under XLA compilation. The model uses a combination of `tf.nn.softmax`, `tf.cast`, and `tf.math.reduce_sum` operations, and the inconsistency arises when an additional `tf.concat` output node is included. ### Troubleshooting Conducted To isolate the cause of this issue, the following tests were conducted: 1. **Removing Individual Operations**: We methodically removed each operation (`tf.nn.softmax`, `tf.transpose`, `tf.math.reduce_sum`, and `tf.cast`) from the model. In each case, this alteration prevented the inconsistency error, suggesting that the combination of these operations contributes to the issue. 2. **Altering Output Structure**: Removing the `tf.concat` nodes from Model2's return values also resolved the inconsistency, indicating that the manner in which outputs are structured and concatenated is a factor. 3. Run the model on **gpu** device. I hope these findings will assist in identifying and resolving the root cause of this inconsistency issue in TensorFlow's XLA compilation. ### Standalone code to reproduce the issue ```shell import tensorflow as tf import numpy as np class Model1(tf.keras.Model): @tf.function(jit_compile=True) def __call__(self, inp): # Forward pass logic using TensorFlow operations # inp: [28, 60, 1] : float64 softmax = tf.nn.softmax(inp, axis=0) trans = tf.transpose(softmax, perm=[0, 2, 1]) reduce_sum = tf.math.reduce_sum(trans, axis=0) cast = tf.cast(reduce_sum, dtype=tf.int32) return reduce_sum, cast, class Model2(tf.keras.Model): @tf.function(jit_compile=True) def __call__(self, inp): # Forward pass logic using TensorFlow operations # inp: [28, 60, 1] : float64 softmax = tf.nn.softmax(inp, axis=0) trans = tf.transpose(softmax, perm=[0, 2, 1]) concat = tf.concat([trans, trans], axis=1) reduce_sum = tf.math.reduce_sum(trans, axis=0) cast = tf.cast(reduce_sum, dtype=tf.int32) return reduce_sum, cast, concat, inputs = [ tf.random.uniform(shape=[20, 30, 1], dtype=tf.float32), ] model1 = Model1() model2 = Model2() device = "cpu" with tf.device(device): tf.config.run_functions_eagerly(True) out1 = model1(*inputs) out2 = model2(*inputs) print(f'=========eager_output(version:{tf.__version__})================') try : for i in range(min(len(out1),len(out2))): np.testing.assert_allclose(out1[i].numpy(), out2[i].numpy(), rtol=0.001, atol=0.001, err_msg=f'at checking {i}th') print("XLA_eager does not trigger assertion") except AssertionError as e: print("XLA_eager triggers assertion") print(e) tf.config.run_functions_eagerly(False) out1 = model1(*inputs) out2 = model2(*inputs) print(f'=========compiled_output(version:{tf.__version__})================') try : for i in range(min(len(out1),len(out2))): np.testing.assert_allclose(out1[i].numpy(), out2[i].numpy(), rtol=0.001, atol=0.001, err_msg=f'at checking {i}th') print("XLA_complie does not trigger assertion") except AssertionError as e: print("XLA_complie triggers assertion") print(e) ``` ### Relevant log output ```shell =========eager_output(version:2.15.0)================ XLA_eager does not trigger assertion =========compiled_output(version:2.15.0)================ XLA_complie triggers assertion Not equal to tolerance rtol=0.001, atol=0.001 at checking 1th Mismatched elements: 9 / 30 (30%) Max absolute difference: 1 Max relative difference: 1. x: array([[1, 1, 1, 1, 1, 0, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 1, 1, 1, 1, 1, 1, 0, 1, 0]], dtype=int32) y: array([[0, 1, 0, 1, 0, 0, 1, 0, 1, 1, 1, 1, 1, 1, 1, 1, 0, 1, 1, 0, 1, 0, 0, 1, 1, 0, 1, 1, 1, 0]], dtype=int32) ```
{ "url": "https://api.github.com/repos/tensorflow/tensorflow/issues/62539/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/tensorflow/tensorflow/issues/62539/timeline
null
completed
false
https://api.github.com/repos/tensorflow/tensorflow/issues/62538
https://api.github.com/repos/tensorflow/tensorflow
https://api.github.com/repos/tensorflow/tensorflow/issues/62538/labels{/name}
https://api.github.com/repos/tensorflow/tensorflow/issues/62538/comments
https://api.github.com/repos/tensorflow/tensorflow/issues/62538/events
https://github.com/tensorflow/tensorflow/issues/62538
2,022,066,799
I_kwDOArmXAs54hkpv
62,538
TensorFlow Lite Flex Op Request: modulo operator
{ "login": "kjabon", "id": 2325839, "node_id": "MDQ6VXNlcjIzMjU4Mzk=", "avatar_url": "https://avatars.githubusercontent.com/u/2325839?v=4", "gravatar_id": "", "url": "https://api.github.com/users/kjabon", "html_url": "https://github.com/kjabon", "followers_url": "https://api.github.com/users/kjabon/followers", "following_url": "https://api.github.com/users/kjabon/following{/other_user}", "gists_url": "https://api.github.com/users/kjabon/gists{/gist_id}", "starred_url": "https://api.github.com/users/kjabon/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/kjabon/subscriptions", "organizations_url": "https://api.github.com/users/kjabon/orgs", "repos_url": "https://api.github.com/users/kjabon/repos", "events_url": "https://api.github.com/users/kjabon/events{/privacy}", "received_events_url": "https://api.github.com/users/kjabon/received_events", "type": "User", "site_admin": false }
[ { "id": 473184161, "node_id": "MDU6TGFiZWw0NzMxODQxNjE=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/type:support", "name": "type:support", "color": "159b2e", "default": false, "description": "Support issues" }, { "id": 750616506, "node_id": "MDU6TGFiZWw3NTA2MTY1MDY=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/comp:lite", "name": "comp:lite", "color": "0052cc", "default": false, "description": "TF Lite related issues" }, { "id": 2915920098, "node_id": "MDU6TGFiZWwyOTE1OTIwMDk4", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/comp:lite-flex", "name": "comp:lite-flex", "color": "2E0DFE", "default": false, "description": "" }, { "id": 6218999181, "node_id": "LA_kwDOArmXAs8AAAABcq5ljQ", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/TF%202.15", "name": "TF 2.15", "color": "9162CB", "default": false, "description": "For issues related to 2.15.x" } ]
closed
false
{ "login": "LakshmiKalaKadali", "id": 149650845, "node_id": "U_kgDOCOt9nQ", "avatar_url": "https://avatars.githubusercontent.com/u/149650845?v=4", "gravatar_id": "", "url": "https://api.github.com/users/LakshmiKalaKadali", "html_url": "https://github.com/LakshmiKalaKadali", "followers_url": "https://api.github.com/users/LakshmiKalaKadali/followers", "following_url": "https://api.github.com/users/LakshmiKalaKadali/following{/other_user}", "gists_url": "https://api.github.com/users/LakshmiKalaKadali/gists{/gist_id}", "starred_url": "https://api.github.com/users/LakshmiKalaKadali/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/LakshmiKalaKadali/subscriptions", "organizations_url": "https://api.github.com/users/LakshmiKalaKadali/orgs", "repos_url": "https://api.github.com/users/LakshmiKalaKadali/repos", "events_url": "https://api.github.com/users/LakshmiKalaKadali/events{/privacy}", "received_events_url": "https://api.github.com/users/LakshmiKalaKadali/received_events", "type": "User", "site_admin": false }
[ { "login": "LakshmiKalaKadali", "id": 149650845, "node_id": "U_kgDOCOt9nQ", "avatar_url": "https://avatars.githubusercontent.com/u/149650845?v=4", "gravatar_id": "", "url": "https://api.github.com/users/LakshmiKalaKadali", "html_url": "https://github.com/LakshmiKalaKadali", "followers_url": "https://api.github.com/users/LakshmiKalaKadali/followers", "following_url": "https://api.github.com/users/LakshmiKalaKadali/following{/other_user}", "gists_url": "https://api.github.com/users/LakshmiKalaKadali/gists{/gist_id}", "starred_url": "https://api.github.com/users/LakshmiKalaKadali/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/LakshmiKalaKadali/subscriptions", "organizations_url": "https://api.github.com/users/LakshmiKalaKadali/orgs", "repos_url": "https://api.github.com/users/LakshmiKalaKadali/repos", "events_url": "https://api.github.com/users/LakshmiKalaKadali/events{/privacy}", "received_events_url": "https://api.github.com/users/LakshmiKalaKadali/received_events", "type": "User", "site_admin": false } ]
null
[ "Hi @kjabon, I have reproduced the code. As per the [documentation](https://www.tensorflow.org/lite/guide/ops_custom), it is recommended to set converter.allow_custom_ops = True. As you mentioned, custom op doesnot solve your issue completely for your usecase, could you please provide some more details for better understanding the problem.\r\n\r\n\r\nThank You", "I've worked around this issue entirely by using tfcompile - much fewer headaches since jax2tf wraps stablehlo which is converted directly to xla.\r\n\r\nBut to answer your question, the source is in jax - so it it's not clear to me how one would use both tf (custom ops) and jax/lax functions and then pass that to jax2tf. ", "Are you satisfied with the resolution of your issue?\n<a href=\"https://docs.google.com/forms/d/e/1FAIpQLSfaP12TRhd9xSxjXZjcZFNXPGk4kc1-qMdv3gc6bEP90vY1ew/viewform?entry.85265664=Yes&entry.2137816233=https://github.com/tensorflow/tensorflow/issues/62538\">Yes</a>\n<a href=\"https://docs.google.com/forms/d/e/1FAIpQLSfaP12TRhd9xSxjXZjcZFNXPGk4kc1-qMdv3gc6bEP90vY1ew/viewform?entry.85265664=No&entry.2137816233=https://github.com/tensorflow/tensorflow/issues/62538\">No</a>\n" ]
2023-12-02T14:44:23
2023-12-07T00:12:42
2023-12-07T00:12:39
NONE
null
null
null
**System information** - OS Platform and Distribution (e.g., Linux Ubuntu 16.04): Ubuntu 22.04, but will extend to windows. - TensorFlow installed from (source or binary): binary/pip - TensorFlow version (or github SHA if from source): 2.15.0 **Provide the text output from tflite_convert** ``` error: failed while converting: 'jit(policy_interp)/jit(main)/while/body/while[cond_nconsts=0 body_nconsts=21]_body': Some ops in the model are custom ops, See instructions to implement custom ops: https://www.tensorflow.org/lite/guide/ops_custom Custom ops: Mod Details: tf.Mod(tensor<i32>, tensor<i32>) -> (tensor<i32>) ``` **Standalone code to reproduce the issue** [colab here](https://colab.research.google.com/drive/1nN4_EqViGlgaDmQnU6tqB1a-bdXSs0lx?usp=sharing) Coming from [here:](https://www.tensorflow.org/lite/guide/op_select_allowlist#add_tensorflow_core_operators_to_the_allowed_list) I would like to be able to add tf.Mod to the allowed list. As noted in the Colab, I'm not sure what "best practice" is here. Is it better to set allow_custom_ops to True and move on? I would like to deploy on mobile, and am concerned about file size and performance. Edit: making a custom op doesn't cleanly solve my issue, since the code I'm trying to convert originates from JAX. I'm ok with submitting a PR myself too, but might like some help. Further edit: it turns out I can work around this issue by refactoring my model. This won't be too bad, but since this would be a nice feature to have anyway for me and potentially others, not closing the issue yet.
{ "url": "https://api.github.com/repos/tensorflow/tensorflow/issues/62538/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/tensorflow/tensorflow/issues/62538/timeline
null
completed
false
https://api.github.com/repos/tensorflow/tensorflow/issues/62537
https://api.github.com/repos/tensorflow/tensorflow
https://api.github.com/repos/tensorflow/tensorflow/issues/62537/labels{/name}
https://api.github.com/repos/tensorflow/tensorflow/issues/62537/comments
https://api.github.com/repos/tensorflow/tensorflow/issues/62537/events
https://github.com/tensorflow/tensorflow/issues/62537
2,022,054,882
I_kwDOArmXAs54hhvi
62,537
Inconsistency in XLA Compiled Models with `tf.matmul, tf.concat` with Extra Transpose Output
{ "login": "Gwihwan-Go", "id": 83144588, "node_id": "MDQ6VXNlcjgzMTQ0NTg4", "avatar_url": "https://avatars.githubusercontent.com/u/83144588?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Gwihwan-Go", "html_url": "https://github.com/Gwihwan-Go", "followers_url": "https://api.github.com/users/Gwihwan-Go/followers", "following_url": "https://api.github.com/users/Gwihwan-Go/following{/other_user}", "gists_url": "https://api.github.com/users/Gwihwan-Go/gists{/gist_id}", "starred_url": "https://api.github.com/users/Gwihwan-Go/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Gwihwan-Go/subscriptions", "organizations_url": "https://api.github.com/users/Gwihwan-Go/orgs", "repos_url": "https://api.github.com/users/Gwihwan-Go/repos", "events_url": "https://api.github.com/users/Gwihwan-Go/events{/privacy}", "received_events_url": "https://api.github.com/users/Gwihwan-Go/received_events", "type": "User", "site_admin": false }
[ { "id": 386191887, "node_id": "MDU6TGFiZWwzODYxOTE4ODc=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/stat:awaiting%20response", "name": "stat:awaiting response", "color": "f4b400", "default": false, "description": "Status - Awaiting response from author" }, { "id": 473172988, "node_id": "MDU6TGFiZWw0NzMxNzI5ODg=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/type:bug", "name": "type:bug", "color": "159b2e", "default": false, "description": "Bug" }, { "id": 474725938, "node_id": "MDU6TGFiZWw0NzQ3MjU5Mzg=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/stale", "name": "stale", "color": "d4c5f9", "default": false, "description": "This label marks the issue/pr stale - to be closed automatically if no activity" }, { "id": 1133285679, "node_id": "MDU6TGFiZWwxMTMzMjg1Njc5", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/comp:xla", "name": "comp:xla", "color": "0052cc", "default": false, "description": "XLA" }, { "id": 6218999181, "node_id": "LA_kwDOArmXAs8AAAABcq5ljQ", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/TF%202.15", "name": "TF 2.15", "color": "9162CB", "default": false, "description": "For issues related to 2.15.x" } ]
closed
false
{ "login": "sachinprasadhs", "id": 73069040, "node_id": "MDQ6VXNlcjczMDY5MDQw", "avatar_url": "https://avatars.githubusercontent.com/u/73069040?v=4", "gravatar_id": "", "url": "https://api.github.com/users/sachinprasadhs", "html_url": "https://github.com/sachinprasadhs", "followers_url": "https://api.github.com/users/sachinprasadhs/followers", "following_url": "https://api.github.com/users/sachinprasadhs/following{/other_user}", "gists_url": "https://api.github.com/users/sachinprasadhs/gists{/gist_id}", "starred_url": "https://api.github.com/users/sachinprasadhs/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/sachinprasadhs/subscriptions", "organizations_url": "https://api.github.com/users/sachinprasadhs/orgs", "repos_url": "https://api.github.com/users/sachinprasadhs/repos", "events_url": "https://api.github.com/users/sachinprasadhs/events{/privacy}", "received_events_url": "https://api.github.com/users/sachinprasadhs/received_events", "type": "User", "site_admin": false }
[ { "login": "sachinprasadhs", "id": 73069040, "node_id": "MDQ6VXNlcjczMDY5MDQw", "avatar_url": "https://avatars.githubusercontent.com/u/73069040?v=4", "gravatar_id": "", "url": "https://api.github.com/users/sachinprasadhs", "html_url": "https://github.com/sachinprasadhs", "followers_url": "https://api.github.com/users/sachinprasadhs/followers", "following_url": "https://api.github.com/users/sachinprasadhs/following{/other_user}", "gists_url": "https://api.github.com/users/sachinprasadhs/gists{/gist_id}", "starred_url": "https://api.github.com/users/sachinprasadhs/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/sachinprasadhs/subscriptions", "organizations_url": "https://api.github.com/users/sachinprasadhs/orgs", "repos_url": "https://api.github.com/users/sachinprasadhs/repos", "events_url": "https://api.github.com/users/sachinprasadhs/events{/privacy}", "received_events_url": "https://api.github.com/users/sachinprasadhs/received_events", "type": "User", "site_admin": false } ]
null
[ "@sachinprasadhs I was able to replicate this error in [gpu](https://colab.research.google.com/gist/sushreebarsa/85c1c4c72b0b4a292476ba6ad3ad2783/62537-gpu.ipynb) sand [cpu](https://colab.research.google.com/gist/sushreebarsa/6c3a404872b5525c8a6a40c1f6dac369/62537.ipynb). Please find the attached gists.\r\nThank you!", "When the two outputs differ with the extra output, the mismatch is expected.\r\nWhen you make the returns equal then there will not be any error. This is not a bug.", "This issue is stale because it has been open for 7 days with no activity. It will be closed if no further activity occurs. Thank you.", "This issue was closed because it has been inactive for 7 days since being marked as stale. Please reopen if you'd like to work on this further.", "Are you satisfied with the resolution of your issue?\n<a href=\"https://docs.google.com/forms/d/e/1FAIpQLSfaP12TRhd9xSxjXZjcZFNXPGk4kc1-qMdv3gc6bEP90vY1ew/viewform?entry.85265664=Yes&entry.2137816233=https://github.com/tensorflow/tensorflow/issues/62537\">Yes</a>\n<a href=\"https://docs.google.com/forms/d/e/1FAIpQLSfaP12TRhd9xSxjXZjcZFNXPGk4kc1-qMdv3gc6bEP90vY1ew/viewform?entry.85265664=No&entry.2137816233=https://github.com/tensorflow/tensorflow/issues/62537\">No</a>\n" ]
2023-12-02T14:11:06
2024-01-11T01:49:46
2024-01-11T01:49:42
NONE
null
null
null
### Issue type Bug ### Have you reproduced the bug with TensorFlow Nightly? Yes ### Source source ### TensorFlow version 2.15.0 ### Custom code Yes ### OS platform and distribution Ubuntu 22.04.3 LTS ### Mobile device _No response_ ### Python version 3.10.0 ### Bazel version _No response_ ### GCC/compiler version _No response_ ### CUDA/cuDNN version _No response_ ### GPU model and memory _No response_ ### Current behavior? I've encountered a bug exhibiting inconsistency under XLA compilation when using `tf.matmu` with extra transpose output. In an effort to isolate the issue, we experimented with reducing the complexity of the models by altering the concatenation operation. We found that either removing one of the tensors involved in the `tf.concat` or `changing the shapes` of the tensors being concatenated prevents the error from occurring. This behavior suggests that the specific combination of tensor shapes and the concatenation operation could be the root cause for the inconsistency. I hope this information aids in pinpointing the root cause of the error. ### Standalone code to reproduce the issue ```shell import tensorflow as tf import numpy as np class Model1(tf.keras.Model): @tf.function(jit_compile=True) def __call__(self, inp1, inp2, inp3, inp4, inp5): concat = tf.concat([inp4, inp3, inp2, inp1], axis=0) matmul = tf.matmul(inp5, concat) out = tf.transpose(matmul, perm=[1, 0]) extra_trans = tf.transpose(matmul, perm=[1, 0]) return out, extra_trans, class Model2(tf.keras.Model): @tf.function(jit_compile=True) def __call__(self, inp1, inp2, inp3, inp4, inp5): concat = tf.concat([inp4, inp3, inp2, inp1], axis=0) matmul = tf.matmul(inp5, concat) out = tf.transpose(matmul, perm=[1, 0]) return out, inputs = [ tf.random.uniform(shape=[1, 8], dtype=tf.float64), tf.random.uniform(shape=[1, 8], dtype=tf.float64), tf.random.uniform(shape=[1, 8], dtype=tf.float64), tf.random.uniform(shape=[1, 8], dtype=tf.float64), tf.random.uniform(shape=[61, 4], dtype=tf.float64), ] model1 = Model1() model2 = Model2() device = "cpu" with tf.device(device): tf.config.run_functions_eagerly(True) out1 = model1(*inputs) out2 = model2(*inputs) print(f'=========eager_output(version:{tf.__version__})================') try : for i in range(min(len(out1),len(out2))): np.testing.assert_allclose(out1[i].numpy(), out2[i].numpy(), rtol=0.001, atol=0.001, err_msg=f'at checking {i}th') print("XLA_eager does not trigger assertion") except AssertionError as e: print("XLA_eager triggers assertion") print(e) tf.config.run_functions_eagerly(False) out1 = model1(*inputs) out2 = model2(*inputs) print(f'=========compiled_output(version:{tf.__version__})================') try : for i in range(min(len(out1),len(out2))): np.testing.assert_allclose(out1[i].numpy(), out2[i].numpy(), rtol=0.001, atol=0.001, err_msg=f'at checking {i}th') print("XLA_complie does not trigger assertion") except AssertionError as e: print("XLA_complie triggers assertion") print(e) ``` ### Relevant log output ```shell =========eager_output(version:2.15.0)================ XLA_eager does not trigger assertion =========compiled_output(version:2.15.0)================ XLA_complie triggers assertion Not equal to tolerance rtol=0.001, atol=0.001 at checking 0th Mismatched elements: 485 / 488 (99.4%) Max absolute difference: 1.85405643 Max relative difference: 14.07911993 x: array([[0.760483, 1.281166, 1.877058, 1.311199, 2.140326, 1.761177, 0.658837, 0.750503, 0.631657, 1.055799, 0.810465, 1.464139, 0.645256, 1.319309, 1.004365, 0.408758, 0.189622, 0.616012,... y: array([[0.760483, 1.403855, 1.624952, 0.935791, 1.262206, 1.063867, 0.501015, 0.444913, 1.281166, 1.38788 , 0.780569, 1.458368, 1.646151, 1.374998, 1.941431, 1.119391, 1.877058, 0.899551,... ```
{ "url": "https://api.github.com/repos/tensorflow/tensorflow/issues/62537/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/tensorflow/tensorflow/issues/62537/timeline
null
completed
false
https://api.github.com/repos/tensorflow/tensorflow/issues/62536
https://api.github.com/repos/tensorflow/tensorflow
https://api.github.com/repos/tensorflow/tensorflow/issues/62536/labels{/name}
https://api.github.com/repos/tensorflow/tensorflow/issues/62536/comments
https://api.github.com/repos/tensorflow/tensorflow/issues/62536/events
https://github.com/tensorflow/tensorflow/pull/62536
2,022,043,826
PR_kwDOArmXAs5g9cIE
62,536
Correct a mistake in matvec's docstring in math_ops.py
{ "login": "WenjieZ", "id": 6860682, "node_id": "MDQ6VXNlcjY4NjA2ODI=", "avatar_url": "https://avatars.githubusercontent.com/u/6860682?v=4", "gravatar_id": "", "url": "https://api.github.com/users/WenjieZ", "html_url": "https://github.com/WenjieZ", "followers_url": "https://api.github.com/users/WenjieZ/followers", "following_url": "https://api.github.com/users/WenjieZ/following{/other_user}", "gists_url": "https://api.github.com/users/WenjieZ/gists{/gist_id}", "starred_url": "https://api.github.com/users/WenjieZ/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/WenjieZ/subscriptions", "organizations_url": "https://api.github.com/users/WenjieZ/orgs", "repos_url": "https://api.github.com/users/WenjieZ/repos", "events_url": "https://api.github.com/users/WenjieZ/events{/privacy}", "received_events_url": "https://api.github.com/users/WenjieZ/received_events", "type": "User", "site_admin": false }
[ { "id": 390482148, "node_id": "MDU6TGFiZWwzOTA0ODIxNDg=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/awaiting%20review", "name": "awaiting review", "color": "bc3869", "default": false, "description": "Pull request awaiting review" }, { "id": 987666414, "node_id": "MDU6TGFiZWw5ODc2NjY0MTQ=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/ready%20to%20pull", "name": "ready to pull", "color": "2cd643", "default": false, "description": "PR ready for merge process" }, { "id": 1097547147, "node_id": "MDU6TGFiZWwxMDk3NTQ3MTQ3", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/comp:ops", "name": "comp:ops", "color": "0052cc", "default": false, "description": "OPs related issues" }, { "id": 1169364259, "node_id": "MDU6TGFiZWwxMTY5MzY0MjU5", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/size:XS", "name": "size:XS", "color": "adafea", "default": false, "description": "CL Change Size: Extra Small" } ]
closed
false
{ "login": "gbaned", "id": 48215717, "node_id": "MDQ6VXNlcjQ4MjE1NzE3", "avatar_url": "https://avatars.githubusercontent.com/u/48215717?v=4", "gravatar_id": "", "url": "https://api.github.com/users/gbaned", "html_url": "https://github.com/gbaned", "followers_url": "https://api.github.com/users/gbaned/followers", "following_url": "https://api.github.com/users/gbaned/following{/other_user}", "gists_url": "https://api.github.com/users/gbaned/gists{/gist_id}", "starred_url": "https://api.github.com/users/gbaned/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/gbaned/subscriptions", "organizations_url": "https://api.github.com/users/gbaned/orgs", "repos_url": "https://api.github.com/users/gbaned/repos", "events_url": "https://api.github.com/users/gbaned/events{/privacy}", "received_events_url": "https://api.github.com/users/gbaned/received_events", "type": "User", "site_admin": false }
[ { "login": "gbaned", "id": 48215717, "node_id": "MDQ6VXNlcjQ4MjE1NzE3", "avatar_url": "https://avatars.githubusercontent.com/u/48215717?v=4", "gravatar_id": "", "url": "https://api.github.com/users/gbaned", "html_url": "https://github.com/gbaned", "followers_url": "https://api.github.com/users/gbaned/followers", "following_url": "https://api.github.com/users/gbaned/following{/other_user}", "gists_url": "https://api.github.com/users/gbaned/gists{/gist_id}", "starred_url": "https://api.github.com/users/gbaned/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/gbaned/subscriptions", "organizations_url": "https://api.github.com/users/gbaned/orgs", "repos_url": "https://api.github.com/users/gbaned/repos", "events_url": "https://api.github.com/users/gbaned/events{/privacy}", "received_events_url": "https://api.github.com/users/gbaned/received_events", "type": "User", "site_admin": false } ]
null
[ "Thanks for your pull request! It looks like this may be your first contribution to a Google open source project. Before we can look at your pull request, you'll need to sign a Contributor License Agreement (CLA).\n\nView this [failed invocation](https://github.com/tensorflow/tensorflow/pull/62536/checks?check_run_id=19247355982) of the CLA check for more information.\n\nFor the most up to date status, view the checks section at the bottom of the pull request." ]
2023-12-02T13:37:54
2023-12-05T09:20:48
2023-12-05T09:20:47
CONTRIBUTOR
null
false
{ "url": "https://api.github.com/repos/tensorflow/tensorflow/pulls/62536", "html_url": "https://github.com/tensorflow/tensorflow/pull/62536", "diff_url": "https://github.com/tensorflow/tensorflow/pull/62536.diff", "patch_url": "https://github.com/tensorflow/tensorflow/pull/62536.patch", "merged_at": "2023-12-05T09:20:47" }
The example in `matvec`'s docstring was wrongly computed: 64->139
{ "url": "https://api.github.com/repos/tensorflow/tensorflow/issues/62536/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/tensorflow/tensorflow/issues/62536/timeline
null
null
true
https://api.github.com/repos/tensorflow/tensorflow/issues/62535
https://api.github.com/repos/tensorflow/tensorflow
https://api.github.com/repos/tensorflow/tensorflow/issues/62535/labels{/name}
https://api.github.com/repos/tensorflow/tensorflow/issues/62535/comments
https://api.github.com/repos/tensorflow/tensorflow/issues/62535/events
https://github.com/tensorflow/tensorflow/issues/62535
2,021,985,672
I_kwDOArmXAs54hQ2I
62,535
Inconsistencies in XLA Compiled Models Using `raw_ops.LRN` and `tf.math.reduce_min` with Extra Outpu
{ "login": "Gwihwan-Go", "id": 83144588, "node_id": "MDQ6VXNlcjgzMTQ0NTg4", "avatar_url": "https://avatars.githubusercontent.com/u/83144588?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Gwihwan-Go", "html_url": "https://github.com/Gwihwan-Go", "followers_url": "https://api.github.com/users/Gwihwan-Go/followers", "following_url": "https://api.github.com/users/Gwihwan-Go/following{/other_user}", "gists_url": "https://api.github.com/users/Gwihwan-Go/gists{/gist_id}", "starred_url": "https://api.github.com/users/Gwihwan-Go/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Gwihwan-Go/subscriptions", "organizations_url": "https://api.github.com/users/Gwihwan-Go/orgs", "repos_url": "https://api.github.com/users/Gwihwan-Go/repos", "events_url": "https://api.github.com/users/Gwihwan-Go/events{/privacy}", "received_events_url": "https://api.github.com/users/Gwihwan-Go/received_events", "type": "User", "site_admin": false }
[ { "id": 386191887, "node_id": "MDU6TGFiZWwzODYxOTE4ODc=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/stat:awaiting%20response", "name": "stat:awaiting response", "color": "f4b400", "default": false, "description": "Status - Awaiting response from author" }, { "id": 473172988, "node_id": "MDU6TGFiZWw0NzMxNzI5ODg=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/type:bug", "name": "type:bug", "color": "159b2e", "default": false, "description": "Bug" }, { "id": 474725938, "node_id": "MDU6TGFiZWw0NzQ3MjU5Mzg=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/stale", "name": "stale", "color": "d4c5f9", "default": false, "description": "This label marks the issue/pr stale - to be closed automatically if no activity" }, { "id": 1133285679, "node_id": "MDU6TGFiZWwxMTMzMjg1Njc5", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/comp:xla", "name": "comp:xla", "color": "0052cc", "default": false, "description": "XLA" }, { "id": 6218999181, "node_id": "LA_kwDOArmXAs8AAAABcq5ljQ", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/TF%202.15", "name": "TF 2.15", "color": "9162CB", "default": false, "description": "For issues related to 2.15.x" } ]
closed
false
{ "login": "sachinprasadhs", "id": 73069040, "node_id": "MDQ6VXNlcjczMDY5MDQw", "avatar_url": "https://avatars.githubusercontent.com/u/73069040?v=4", "gravatar_id": "", "url": "https://api.github.com/users/sachinprasadhs", "html_url": "https://github.com/sachinprasadhs", "followers_url": "https://api.github.com/users/sachinprasadhs/followers", "following_url": "https://api.github.com/users/sachinprasadhs/following{/other_user}", "gists_url": "https://api.github.com/users/sachinprasadhs/gists{/gist_id}", "starred_url": "https://api.github.com/users/sachinprasadhs/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/sachinprasadhs/subscriptions", "organizations_url": "https://api.github.com/users/sachinprasadhs/orgs", "repos_url": "https://api.github.com/users/sachinprasadhs/repos", "events_url": "https://api.github.com/users/sachinprasadhs/events{/privacy}", "received_events_url": "https://api.github.com/users/sachinprasadhs/received_events", "type": "User", "site_admin": false }
[ { "login": "sachinprasadhs", "id": 73069040, "node_id": "MDQ6VXNlcjczMDY5MDQw", "avatar_url": "https://avatars.githubusercontent.com/u/73069040?v=4", "gravatar_id": "", "url": "https://api.github.com/users/sachinprasadhs", "html_url": "https://github.com/sachinprasadhs", "followers_url": "https://api.github.com/users/sachinprasadhs/followers", "following_url": "https://api.github.com/users/sachinprasadhs/following{/other_user}", "gists_url": "https://api.github.com/users/sachinprasadhs/gists{/gist_id}", "starred_url": "https://api.github.com/users/sachinprasadhs/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/sachinprasadhs/subscriptions", "organizations_url": "https://api.github.com/users/sachinprasadhs/orgs", "repos_url": "https://api.github.com/users/sachinprasadhs/repos", "events_url": "https://api.github.com/users/sachinprasadhs/events{/privacy}", "received_events_url": "https://api.github.com/users/sachinprasadhs/received_events", "type": "User", "site_admin": false } ]
null
[ "Hi @GwiHwan-Go,\r\n\r\nI have replicated the reported behaviour with colab using TF v2.14, 2.15 and nightly. Please find the [gist](https://colab.research.google.com/gist/Venkat6871/f1eb68d213b7fbfe9e01f22b768c6ac6/62531_2-14_2-15-nightly-v.ipynb) here for reference.\r\n\r\nThank you!", "Hi,\r\n\r\nProviding an extra output will not result in matching outputs when comparing.\r\nThis behavior is expected.", "This issue is stale because it has been open for 7 days with no activity. It will be closed if no further activity occurs. Thank you.", "This issue was closed because it has been inactive for 7 days since being marked as stale. Please reopen if you'd like to work on this further.", "Are you satisfied with the resolution of your issue?\n<a href=\"https://docs.google.com/forms/d/e/1FAIpQLSfaP12TRhd9xSxjXZjcZFNXPGk4kc1-qMdv3gc6bEP90vY1ew/viewform?entry.85265664=Yes&entry.2137816233=https://github.com/tensorflow/tensorflow/issues/62535\">Yes</a>\n<a href=\"https://docs.google.com/forms/d/e/1FAIpQLSfaP12TRhd9xSxjXZjcZFNXPGk4kc1-qMdv3gc6bEP90vY1ew/viewform?entry.85265664=No&entry.2137816233=https://github.com/tensorflow/tensorflow/issues/62535\">No</a>\n" ]
2023-12-02T10:33:16
2024-01-11T01:49:50
2024-01-11T01:49:44
NONE
null
null
null
### Issue type Bug ### Have you reproduced the bug with TensorFlow Nightly? Yes ### Source source ### TensorFlow version 2.15 ### Custom code Yes ### OS platform and distribution Ubuntu 22.04.3 LTS ### Mobile device _No response_ ### Python version 3.10.0 ### Bazel version _No response_ ### GCC/compiler version _No response_ ### CUDA/cuDNN version cuda : 12.2 / cudnn 8.9.04 ### GPU model and memory Tesla V100S-PCIE-32GB ### Current behavior? I've discovered an inconsistency in TensorFlow's XLA compilation for models that utilize `raw_ops.LRN` combined with `tf.math.reduce_min`. The issue results in inconsistent outputs when XLA compilation is enabled. Through rigorous testing, the following observations were made: 1. **Removing tf.math.reduce_min** from the model (and replacing it with an operation like tf.squeeze(axis=3)) prevents the error from occurring. 2. **Removing the transpose output** from the model's return outputs also avoids triggering the inconsistency. 3. **Directly returning** the result of `tf.math.reduce_min` prevents the error too. Interestingly, while random input values can trigger this inconsistency, we've identified that specific input tensors exacerbate the discrepancy in outputs. To assist in resolving this issue, we've prepared a [Google Colab notebook](https://colab.research.google.com/drive/18RR5wfk_DGrtkgRsaKZGkebH-IS20e4H?usp=sharing) that robustly reproduces the problem. This Colab notebook includes a specific set of inputs that consistently trigger the observed inconsistency, providing a reliable test case for debugging purposes. However, since I cannot access the V100 GPU on colab, I was unable to reproduce the error on colab. Please let me know if you cannot reproduce the error on V100 GPU. ### Standalone code to reproduce the issue ```python from typing import Dict import tensorflow as tf import pickle import os import numpy as np class Model1(tf.keras.Model): @tf.function(jit_compile=True) def __call__(self, inp): cos = tf.cos(inp) transpose = tf.transpose(cos, perm=[4, 1, 2, 3, 0]) reduce_min = tf.math.reduce_min(transpose, axis=2) lrn = tf.raw_ops.LRN(input=reduce_min, depth_radius=1, bias=62.98211185437273, alpha=22.83989611654185, beta=0.9124946866870809) return lrn, class Model2(tf.keras.Model): @tf.function(jit_compile=True) def __call__(self, inp): cos = tf.cos(inp) transpose = tf.transpose(cos, perm=[4, 1, 2, 3, 0]) reduce_min = tf.math.reduce_min(transpose, axis=2) # tf.squeeze(transpose, axis=3) lrn = tf.raw_ops.LRN(input=reduce_min, depth_radius=1, bias=62.98211185437273, alpha=22.83989611654185, beta=0.9124946866870809) return lrn, transpose, inputs = [ tf.random.uniform(shape=[21, 27, 10, 1, 1], dtype=tf.float32), ] model1 = Model1() model2 = Model2() device = "gpu" with tf.device(device): tf.config.run_functions_eagerly(True) out1 = model1(*inputs) out2 = model2(*inputs) print(f'=========eager_output(version:{tf.__version__})================') try : for i in range(min(len(out1),len(out2))): np.testing.assert_allclose(out1[i].numpy(), out2[i].numpy(), rtol=0.001, atol=0.001, err_msg=f'at checking {i}th') print("XLA_eager does not trigger assertion") except AssertionError as e: print("XLA_eager triggers assertion") print(e) tf.config.run_functions_eagerly(False) out1 = model1(*inputs) out2 = model2(*inputs) print(f'=========compiled_output(version:{tf.__version__})================') try : for i in range(min(len(out1),len(out2))): np.testing.assert_allclose(out1[i].numpy(), out2[i].numpy(), rtol=0.001, atol=0.001, err_msg=f'at checking {i}th') print("XLA_complie does not trigger assertion") except AssertionError as e: print("XLA_complie triggers assertion") print(e) ``` ### Relevant log output ```shell =========eager_output(version:2.15.0)================ XLA_eager does not trigger assertion =========compiled_output(version:2.15.0)================ XLA_complie triggers assertion Not equal to tolerance rtol=0.001, atol=0.001 at checking 0th Mismatched elements: 21 / 567 (3.7%) Max absolute difference: 0.00186182 Max relative difference: 0.14022177 x: array([[[[0.01146 , 0.010013, 0.010936, 0.009743, 0.01017 , 0.010607, 0.01057 , 0.010013, 0.010744, 0.00963 , 0.010538, 0.010292, 0.009591, 0.010573, 0.01096 , 0.009638, 0.010813, 0.010375,... y: array([[[[0.011634, 0.009475, 0.011923, 0.009137, 0.010123, 0.01081 , 0.010774, 0.009508, 0.011514, 0.008942, 0.010976, 0.010411, 0.009055, 0.010694, 0.011764, 0.008721, 0.011516, 0.01012 ,... =========RUNNING WITH PICKLE FILES=========== =========eager_output(version:2.15.0)================ XLA_eager does not trigger assertion =========compiled_output(version:2.15.0)================ XLA_complie triggers assertion Not equal to tolerance rtol=0.001, atol=0.001 at checking 0th Mismatched elements: 101 / 567 (17.8%) Max absolute difference: 0.00307472 Max relative difference: 0.5682127 x: array([[[[-0.002965, -0.01124 , -0.011004, -0.011653, -0.011652, -0.011463, -0.008849, -0.011621, -0.011612, -0.011653, -0.011243, -0.011639, -0.010928, -0.011649, -0.011555,... y: array([[[[-0.002476, -0.013035, -0.009902, -0.012564, -0.012114, -0.012089, -0.006702, -0.01318 , -0.01141 , -0.012575, -0.009897, -0.013295, -0.009197, -0.01293 , -0.010948,... ```
{ "url": "https://api.github.com/repos/tensorflow/tensorflow/issues/62535/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/tensorflow/tensorflow/issues/62535/timeline
null
completed
false
https://api.github.com/repos/tensorflow/tensorflow/issues/62534
https://api.github.com/repos/tensorflow/tensorflow
https://api.github.com/repos/tensorflow/tensorflow/issues/62534/labels{/name}
https://api.github.com/repos/tensorflow/tensorflow/issues/62534/comments
https://api.github.com/repos/tensorflow/tensorflow/issues/62534/events
https://github.com/tensorflow/tensorflow/issues/62534
2,021,966,101
I_kwDOArmXAs54hMEV
62,534
Inconsistency in XLA Compiled Models with `tf.experimental.numpy.triu` and `tf.math.reduce_min` with Extra Concat Output
{ "login": "Gwihwan-Go", "id": 83144588, "node_id": "MDQ6VXNlcjgzMTQ0NTg4", "avatar_url": "https://avatars.githubusercontent.com/u/83144588?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Gwihwan-Go", "html_url": "https://github.com/Gwihwan-Go", "followers_url": "https://api.github.com/users/Gwihwan-Go/followers", "following_url": "https://api.github.com/users/Gwihwan-Go/following{/other_user}", "gists_url": "https://api.github.com/users/Gwihwan-Go/gists{/gist_id}", "starred_url": "https://api.github.com/users/Gwihwan-Go/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Gwihwan-Go/subscriptions", "organizations_url": "https://api.github.com/users/Gwihwan-Go/orgs", "repos_url": "https://api.github.com/users/Gwihwan-Go/repos", "events_url": "https://api.github.com/users/Gwihwan-Go/events{/privacy}", "received_events_url": "https://api.github.com/users/Gwihwan-Go/received_events", "type": "User", "site_admin": false }
[ { "id": 386191887, "node_id": "MDU6TGFiZWwzODYxOTE4ODc=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/stat:awaiting%20response", "name": "stat:awaiting response", "color": "f4b400", "default": false, "description": "Status - Awaiting response from author" }, { "id": 473172988, "node_id": "MDU6TGFiZWw0NzMxNzI5ODg=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/type:bug", "name": "type:bug", "color": "159b2e", "default": false, "description": "Bug" }, { "id": 474725938, "node_id": "MDU6TGFiZWw0NzQ3MjU5Mzg=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/stale", "name": "stale", "color": "d4c5f9", "default": false, "description": "This label marks the issue/pr stale - to be closed automatically if no activity" }, { "id": 1133285679, "node_id": "MDU6TGFiZWwxMTMzMjg1Njc5", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/comp:xla", "name": "comp:xla", "color": "0052cc", "default": false, "description": "XLA" }, { "id": 6218999181, "node_id": "LA_kwDOArmXAs8AAAABcq5ljQ", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/TF%202.15", "name": "TF 2.15", "color": "9162CB", "default": false, "description": "For issues related to 2.15.x" } ]
closed
false
{ "login": "tilakrayal", "id": 81610181, "node_id": "MDQ6VXNlcjgxNjEwMTgx", "avatar_url": "https://avatars.githubusercontent.com/u/81610181?v=4", "gravatar_id": "", "url": "https://api.github.com/users/tilakrayal", "html_url": "https://github.com/tilakrayal", "followers_url": "https://api.github.com/users/tilakrayal/followers", "following_url": "https://api.github.com/users/tilakrayal/following{/other_user}", "gists_url": "https://api.github.com/users/tilakrayal/gists{/gist_id}", "starred_url": "https://api.github.com/users/tilakrayal/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/tilakrayal/subscriptions", "organizations_url": "https://api.github.com/users/tilakrayal/orgs", "repos_url": "https://api.github.com/users/tilakrayal/repos", "events_url": "https://api.github.com/users/tilakrayal/events{/privacy}", "received_events_url": "https://api.github.com/users/tilakrayal/received_events", "type": "User", "site_admin": false }
[ { "login": "tilakrayal", "id": 81610181, "node_id": "MDQ6VXNlcjgxNjEwMTgx", "avatar_url": "https://avatars.githubusercontent.com/u/81610181?v=4", "gravatar_id": "", "url": "https://api.github.com/users/tilakrayal", "html_url": "https://github.com/tilakrayal", "followers_url": "https://api.github.com/users/tilakrayal/followers", "following_url": "https://api.github.com/users/tilakrayal/following{/other_user}", "gists_url": "https://api.github.com/users/tilakrayal/gists{/gist_id}", "starred_url": "https://api.github.com/users/tilakrayal/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/tilakrayal/subscriptions", "organizations_url": "https://api.github.com/users/tilakrayal/orgs", "repos_url": "https://api.github.com/users/tilakrayal/repos", "events_url": "https://api.github.com/users/tilakrayal/events{/privacy}", "received_events_url": "https://api.github.com/users/tilakrayal/received_events", "type": "User", "site_admin": false } ]
null
[ "@Gwihwan-Go,\r\nHi,\r\n\r\nProviding additional outputs does not result in matching outputs. This behavior is expected. Inside the call function you are generating the value for tensor, which in each calls generates different random values because XLA currently ignores TF seeds to random operations which makes the output different for obvious reason. Please refer [known](https://www.tensorflow.org/xla/known_issues#random_number_generation_ignores_tf_seed) issues from XLA section. Also Providing additional outputs does not result in matching outputs. This behavior is expected. Thank you!", "This issue is stale because it has been open for 7 days with no activity. It will be closed if no further activity occurs. Thank you.", "This issue was closed because it has been inactive for 7 days since being marked as stale. Please reopen if you'd like to work on this further.", "Are you satisfied with the resolution of your issue?\n<a href=\"https://docs.google.com/forms/d/e/1FAIpQLSfaP12TRhd9xSxjXZjcZFNXPGk4kc1-qMdv3gc6bEP90vY1ew/viewform?entry.85265664=Yes&entry.2137816233=https://github.com/tensorflow/tensorflow/issues/62534\">Yes</a>\n<a href=\"https://docs.google.com/forms/d/e/1FAIpQLSfaP12TRhd9xSxjXZjcZFNXPGk4kc1-qMdv3gc6bEP90vY1ew/viewform?entry.85265664=No&entry.2137816233=https://github.com/tensorflow/tensorflow/issues/62534\">No</a>\n" ]
2023-12-02T09:30:54
2024-05-25T01:49:01
2024-05-25T01:48:52
NONE
null
null
null
### Issue type Bug ### Have you reproduced the bug with TensorFlow Nightly? Yes ### Source source ### TensorFlow version 2.15.0 ### Custom code Yes ### OS platform and distribution Ubuntu 22.04.3 LTS ### Mobile device _No response_ ### Python version 3.10.0 ### Bazel version _No response_ ### GCC/compiler version _No response_ ### CUDA/cuDNN version cuda : 12.2 / cudnn 8.9.04 ### GPU model and memory Tesla V100S-PCIE-32GB ### Current behavior? I've encountered inconsistent behavior in XLA compiled models when using `tf.experimental.numpy.triu` combined with `tf.math.reduce_min`, and an additional concatenation node is returned from the model. This issue seems to be similar to #62533, particularly regarding the conditions that trigger the error—specifically, the data type(only int8 input tensor will trigger this error) involved and the dataflow of the models. This behavior is only seen on **gpu.** ### Standalone code to reproduce the issue ```shell import tensorflow as tf import numpy as np class Model1(tf.keras.Model): @tf.function(jit_compile=True) def __call__(self, inp): triu = tf.experimental.numpy.triu(inp, k=0) reduce_min = tf.math.reduce_min(triu, axis=0) return reduce_min, class Model2(tf.keras.Model): @tf.function(jit_compile=True) def __call__(self, inp): # Forward pass logic using TensorFlow operations # inp: [57, 22] : int8 triu = tf.experimental.numpy.triu(inp, k=0) trans = tf.transpose(triu, perm=[1, 0]) concat = tf.concat([trans, trans], axis=0) reduce_min = tf.math.reduce_min(triu, axis=0) return reduce_min, concat, inputs = [ tf.cast(tf.random.uniform(shape=[57, 22], minval=-128, maxval=128, dtype=tf.int32), tf.int8), ] model1 = Model1() model2 = Model2() device = "gpu" with tf.device(device): tf.config.run_functions_eagerly(True) out1 = model1(*inputs) out2 = model2(*inputs) print(f'=========eager_output(version:{tf.__version__})================') try : for i in range(min(len(out1),len(out2))): np.testing.assert_allclose(out1[i].numpy(), out2[i].numpy(), rtol=0.001, atol=0.001, err_msg=f'at checking {i}th') print("XLA_eager does not trigger assertion") except AssertionError as e: print("XLA_eager triggers assertion") print(e) tf.config.run_functions_eagerly(False) out1 = model1(*inputs) out2 = model2(*inputs) print(f'=========compiled_output(version:{tf.__version__})================') try : for i in range(min(len(out1),len(out2))): np.testing.assert_allclose(out1[i].numpy(), out2[i].numpy(), rtol=0.001, atol=0.001, err_msg=f'at checking {i}th') print("XLA_complie does not trigger assertion") except AssertionError as e: print("XLA_complie triggers assertion") print(e) ``` ### Relevant log output ```shell =========eager_output(version:2.15.0)================ XLA_eager does not trigger assertion =========compiled_output(version:2.15.0)================ XLA_complie triggers assertion Not equal to tolerance rtol=0.001, atol=0.001 at checking 0th Mismatched elements: 21 / 22 (95.5%) Max absolute difference: 127 Max relative difference: 1. x: array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0], dtype=int8) y: array([ -92, 0, -124, -38, -61, -30, -112, -118, -75, -120, -127, -112, -86, -96, -126, -127, -120, -125, -75, -122, -109, -125], dtype=int8) ```
{ "url": "https://api.github.com/repos/tensorflow/tensorflow/issues/62534/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/tensorflow/tensorflow/issues/62534/timeline
null
completed
false
https://api.github.com/repos/tensorflow/tensorflow/issues/62533
https://api.github.com/repos/tensorflow/tensorflow
https://api.github.com/repos/tensorflow/tensorflow/issues/62533/labels{/name}
https://api.github.com/repos/tensorflow/tensorflow/issues/62533/comments
https://api.github.com/repos/tensorflow/tensorflow/issues/62533/events
https://github.com/tensorflow/tensorflow/issues/62533
2,021,959,767
I_kwDOArmXAs54hKhX
62,533
Inconsistency Behavior in XLA Compiled Model with `tf.greater` and `tf.round` Followed by Multiple Extra Outputs
{ "login": "Gwihwan-Go", "id": 83144588, "node_id": "MDQ6VXNlcjgzMTQ0NTg4", "avatar_url": "https://avatars.githubusercontent.com/u/83144588?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Gwihwan-Go", "html_url": "https://github.com/Gwihwan-Go", "followers_url": "https://api.github.com/users/Gwihwan-Go/followers", "following_url": "https://api.github.com/users/Gwihwan-Go/following{/other_user}", "gists_url": "https://api.github.com/users/Gwihwan-Go/gists{/gist_id}", "starred_url": "https://api.github.com/users/Gwihwan-Go/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Gwihwan-Go/subscriptions", "organizations_url": "https://api.github.com/users/Gwihwan-Go/orgs", "repos_url": "https://api.github.com/users/Gwihwan-Go/repos", "events_url": "https://api.github.com/users/Gwihwan-Go/events{/privacy}", "received_events_url": "https://api.github.com/users/Gwihwan-Go/received_events", "type": "User", "site_admin": false }
[ { "id": 473172988, "node_id": "MDU6TGFiZWw0NzMxNzI5ODg=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/type:bug", "name": "type:bug", "color": "159b2e", "default": false, "description": "Bug" }, { "id": 1097547538, "node_id": "MDU6TGFiZWwxMDk3NTQ3NTM4", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/comp:gpu", "name": "comp:gpu", "color": "0052cc", "default": false, "description": "GPU related issues" }, { "id": 1133285679, "node_id": "MDU6TGFiZWwxMTMzMjg1Njc5", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/comp:xla", "name": "comp:xla", "color": "0052cc", "default": false, "description": "XLA" }, { "id": 6218999181, "node_id": "LA_kwDOArmXAs8AAAABcq5ljQ", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/TF%202.15", "name": "TF 2.15", "color": "9162CB", "default": false, "description": "For issues related to 2.15.x" } ]
closed
false
{ "login": "SuryanarayanaY", "id": 116063290, "node_id": "U_kgDOBur8Og", "avatar_url": "https://avatars.githubusercontent.com/u/116063290?v=4", "gravatar_id": "", "url": "https://api.github.com/users/SuryanarayanaY", "html_url": "https://github.com/SuryanarayanaY", "followers_url": "https://api.github.com/users/SuryanarayanaY/followers", "following_url": "https://api.github.com/users/SuryanarayanaY/following{/other_user}", "gists_url": "https://api.github.com/users/SuryanarayanaY/gists{/gist_id}", "starred_url": "https://api.github.com/users/SuryanarayanaY/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/SuryanarayanaY/subscriptions", "organizations_url": "https://api.github.com/users/SuryanarayanaY/orgs", "repos_url": "https://api.github.com/users/SuryanarayanaY/repos", "events_url": "https://api.github.com/users/SuryanarayanaY/events{/privacy}", "received_events_url": "https://api.github.com/users/SuryanarayanaY/received_events", "type": "User", "site_admin": false }
[ { "login": "SuryanarayanaY", "id": 116063290, "node_id": "U_kgDOBur8Og", "avatar_url": "https://avatars.githubusercontent.com/u/116063290?v=4", "gravatar_id": "", "url": "https://api.github.com/users/SuryanarayanaY", "html_url": "https://github.com/SuryanarayanaY", "followers_url": "https://api.github.com/users/SuryanarayanaY/followers", "following_url": "https://api.github.com/users/SuryanarayanaY/following{/other_user}", "gists_url": "https://api.github.com/users/SuryanarayanaY/gists{/gist_id}", "starred_url": "https://api.github.com/users/SuryanarayanaY/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/SuryanarayanaY/subscriptions", "organizations_url": "https://api.github.com/users/SuryanarayanaY/orgs", "repos_url": "https://api.github.com/users/SuryanarayanaY/repos", "events_url": "https://api.github.com/users/SuryanarayanaY/events{/privacy}", "received_events_url": "https://api.github.com/users/SuryanarayanaY/received_events", "type": "User", "site_admin": false } ]
null
[ "Hi @GwiHwan-Go ,\r\n\r\nThe issue was replicated in TF2.14v and with GPU runtime. On CPU it works fine with both Tf2.14 and 2.15 versions.\r\n\r\nAttached [gist](https://colab.sandbox.google.com/gist/SuryanarayanaY/2775daae9eeb131e0d2be8514fbde315/62528.ipynb) for reference.\r\n\r\n", "Hi @GwiHwan-Go ,\r\n\r\nIt seems the difference is only precision related as the assertion became success if I change rtol to 0.01 from 0.001 . Attached [gist](https://colab.sandbox.google.com/gist/SuryanarayanaY/a6ad45993f6b3bd9395e31fbe6c138fc/62533_r1.ipynb) for reference.\r\n\r\nAs mentioned [here](https://github.com/tensorflow/tensorflow/issues/62287#issuecomment-1809045878) the XLA will undergo intermediate fusion and conversions to `float32` it is expected some precisonal differences with XLA.", "I understand there exist numerical precision problem in Deep-Learning Compiler.\r\nAs it is expected behavior as you mentioned, I'm closing the issue.", "Are you satisfied with the resolution of your issue?\n<a href=\"https://docs.google.com/forms/d/e/1FAIpQLSfaP12TRhd9xSxjXZjcZFNXPGk4kc1-qMdv3gc6bEP90vY1ew/viewform?entry.85265664=Yes&entry.2137816233=https://github.com/tensorflow/tensorflow/issues/62533\">Yes</a>\n<a href=\"https://docs.google.com/forms/d/e/1FAIpQLSfaP12TRhd9xSxjXZjcZFNXPGk4kc1-qMdv3gc6bEP90vY1ew/viewform?entry.85265664=No&entry.2137816233=https://github.com/tensorflow/tensorflow/issues/62533\">No</a>\n" ]
2023-12-02T09:14:18
2023-12-06T12:47:42
2023-12-06T12:47:39
NONE
null
null
null
### Issue type Bug ### Have you reproduced the bug with TensorFlow Nightly? Yes ### Source source ### TensorFlow version 2.15.0 ### Custom code Yes ### OS platform and distribution Ubuntu 22.04.3 LTS ### Mobile device _No response_ ### Python version 3.10.0 ### Bazel version _No response_ ### GCC/compiler version _No response_ ### CUDA/cuDNN version cuda : 12.2 / cudnn 8.9.04 ### GPU model and memory Tesla V100S-PCIE-32GB ### Current behavior? I've encountered a bug in TensorFlow where the `tf.greate`r operation, when combined with tf.round and followed by the addition of two extra nodes, triggers inconsistent results. This behavior is only seen on **gpu**. Furthermore, our experiments indicate that: 1. Removing one of the additional output nodes resolves the inconsistency. 2. Using `tf.less` in place of `tf.greater` does not cause the same issue. 3. Omitting the `tf.round` operation from the models prevents the error. We hope that the details of our experiments will assist you in pinpointing the root cause of this erratic behavior. ### Standalone code to reproduce the issue ```shell import tensorflow as tf import numpy as np class Model1(tf.keras.Model): @tf.function(jit_compile=True) def __call__(self, inp): trans = tf.transpose(inp, perm=[0, 1, 3, 2]) round = tf.round(trans) greater = tf.greater(tf.reverse(round, axis=[0, 2]), round) logical_and = tf.logical_and(greater, greater) return greater, logical_and, class Model2(tf.keras.Model): @tf.function(jit_compile=True) def __call__(self, inp): trans = tf.transpose(inp, perm=[0, 1, 3, 2]) round = tf.round(trans) greater = tf.greater(tf.reverse(round, axis=[0, 2]), round) logical_and = tf.logical_and(greater, greater) return greater, logical_and, trans, inputs = [ tf.random.uniform(shape=[15, 1, 50, 35], dtype=tf.float64), ] model1 = Model1() model2 = Model2() device = "gpu" with tf.device(device): tf.config.run_functions_eagerly(True) out1 = model1(*inputs) out2 = model2(*inputs) print(f'=========eager_output(version:{tf.__version__})================') try : for i in range(min(len(out1),len(out2))): np.testing.assert_allclose(out1[i].numpy(), out2[i].numpy(), rtol=0.001, atol=0.001, err_msg=f'at checking {i}th') print("XLA_eager does not trigger assertion") except AssertionError as e: print("XLA_eager triggers assertion") print(e) tf.config.run_functions_eagerly(False) out1 = model1(*inputs) out2 = model2(*inputs) print(f'=========compiled_output(version:{tf.__version__})================') try : for i in range(min(len(out1),len(out2))): np.testing.assert_allclose(out1[i].numpy(), out2[i].numpy(), rtol=0.001, atol=0.001, err_msg=f'at checking {i}th') print("XLA_complie does not trigger assertion") except AssertionError as e: print("XLA_complie triggers assertion") print(e) ``` ### Relevant log output ```shell =========compiled_output(version:2.15.0)================ XLA_complie triggers assertion =========eager_output(version:2.15.0)================ XLA_eager does not trigger assertion Not equal to tolerance rtol=0.001, atol=0.001 at checking 0th Mismatched elements: 6550 / 26250 (25%) x: array([[[[False, False, False, ..., False, False, False], [False, False, False, ..., False, False, False], [False, False, False, ..., False, False, False],... y: array([[[[False, False, False, ..., False, False, False], [False, True, True, ..., False, False, False], [False, False, True, ..., True, False, False],... ```
{ "url": "https://api.github.com/repos/tensorflow/tensorflow/issues/62533/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/tensorflow/tensorflow/issues/62533/timeline
null
completed
false
https://api.github.com/repos/tensorflow/tensorflow/issues/62532
https://api.github.com/repos/tensorflow/tensorflow
https://api.github.com/repos/tensorflow/tensorflow/issues/62532/labels{/name}
https://api.github.com/repos/tensorflow/tensorflow/issues/62532/comments
https://api.github.com/repos/tensorflow/tensorflow/issues/62532/events
https://github.com/tensorflow/tensorflow/issues/62532
2,021,947,542
I_kwDOArmXAs54hHiW
62,532
Inconsistency in XLA Compiled Model with Additional `tf.math.ceil` and` tf.transpose` Outputs
{ "login": "Gwihwan-Go", "id": 83144588, "node_id": "MDQ6VXNlcjgzMTQ0NTg4", "avatar_url": "https://avatars.githubusercontent.com/u/83144588?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Gwihwan-Go", "html_url": "https://github.com/Gwihwan-Go", "followers_url": "https://api.github.com/users/Gwihwan-Go/followers", "following_url": "https://api.github.com/users/Gwihwan-Go/following{/other_user}", "gists_url": "https://api.github.com/users/Gwihwan-Go/gists{/gist_id}", "starred_url": "https://api.github.com/users/Gwihwan-Go/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Gwihwan-Go/subscriptions", "organizations_url": "https://api.github.com/users/Gwihwan-Go/orgs", "repos_url": "https://api.github.com/users/Gwihwan-Go/repos", "events_url": "https://api.github.com/users/Gwihwan-Go/events{/privacy}", "received_events_url": "https://api.github.com/users/Gwihwan-Go/received_events", "type": "User", "site_admin": false }
[ { "id": 386191887, "node_id": "MDU6TGFiZWwzODYxOTE4ODc=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/stat:awaiting%20response", "name": "stat:awaiting response", "color": "f4b400", "default": false, "description": "Status - Awaiting response from author" }, { "id": 473172988, "node_id": "MDU6TGFiZWw0NzMxNzI5ODg=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/type:bug", "name": "type:bug", "color": "159b2e", "default": false, "description": "Bug" }, { "id": 474725938, "node_id": "MDU6TGFiZWw0NzQ3MjU5Mzg=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/stale", "name": "stale", "color": "d4c5f9", "default": false, "description": "This label marks the issue/pr stale - to be closed automatically if no activity" }, { "id": 1133285679, "node_id": "MDU6TGFiZWwxMTMzMjg1Njc5", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/comp:xla", "name": "comp:xla", "color": "0052cc", "default": false, "description": "XLA" }, { "id": 6218999181, "node_id": "LA_kwDOArmXAs8AAAABcq5ljQ", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/TF%202.15", "name": "TF 2.15", "color": "9162CB", "default": false, "description": "For issues related to 2.15.x" } ]
closed
false
{ "login": "sushreebarsa", "id": 84765720, "node_id": "MDQ6VXNlcjg0NzY1NzIw", "avatar_url": "https://avatars.githubusercontent.com/u/84765720?v=4", "gravatar_id": "", "url": "https://api.github.com/users/sushreebarsa", "html_url": "https://github.com/sushreebarsa", "followers_url": "https://api.github.com/users/sushreebarsa/followers", "following_url": "https://api.github.com/users/sushreebarsa/following{/other_user}", "gists_url": "https://api.github.com/users/sushreebarsa/gists{/gist_id}", "starred_url": "https://api.github.com/users/sushreebarsa/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/sushreebarsa/subscriptions", "organizations_url": "https://api.github.com/users/sushreebarsa/orgs", "repos_url": "https://api.github.com/users/sushreebarsa/repos", "events_url": "https://api.github.com/users/sushreebarsa/events{/privacy}", "received_events_url": "https://api.github.com/users/sushreebarsa/received_events", "type": "User", "site_admin": false }
[ { "login": "sushreebarsa", "id": 84765720, "node_id": "MDQ6VXNlcjg0NzY1NzIw", "avatar_url": "https://avatars.githubusercontent.com/u/84765720?v=4", "gravatar_id": "", "url": "https://api.github.com/users/sushreebarsa", "html_url": "https://github.com/sushreebarsa", "followers_url": "https://api.github.com/users/sushreebarsa/followers", "following_url": "https://api.github.com/users/sushreebarsa/following{/other_user}", "gists_url": "https://api.github.com/users/sushreebarsa/gists{/gist_id}", "starred_url": "https://api.github.com/users/sushreebarsa/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/sushreebarsa/subscriptions", "organizations_url": "https://api.github.com/users/sushreebarsa/orgs", "repos_url": "https://api.github.com/users/sushreebarsa/repos", "events_url": "https://api.github.com/users/sushreebarsa/events{/privacy}", "received_events_url": "https://api.github.com/users/sushreebarsa/received_events", "type": "User", "site_admin": false } ]
null
[ "@GwiHwan-Go I was not able to replicate the issue on both [cpu](https://colab.research.google.com/gist/sushreebarsa/05ee5336d2a118927a9e8c8f42dbb6d1/62532.ipynb) and [gpu](https://colab.research.google.com/gist/sushreebarsa/585f1b475a6e6f6bd72ba2e7ad44713a/62532.ipynb) using colab. Please have a look at these gist and let us know if I am missing something here?\r\nThank you!", "Hi @sushreebarsa , have you tried this with V100 GPU?", "@GwiHwan-Go Inside the call function you are generating the value for tensor, which in each calls generates different random values because XLA currently ignores TF seeds to random operations which makes the output different for obvious reason. Please refer [known](https://www.tensorflow.org/xla/known_issues#random_number_generation_ignores_tf_seed) issues from XLA section.", "This issue is stale because it has been open for 7 days with no activity. It will be closed if no further activity occurs. Thank you.", "This issue was closed because it has been inactive for 7 days since being marked as stale. Please reopen if you'd like to work on this further.", "Are you satisfied with the resolution of your issue?\n<a href=\"https://docs.google.com/forms/d/e/1FAIpQLSfaP12TRhd9xSxjXZjcZFNXPGk4kc1-qMdv3gc6bEP90vY1ew/viewform?entry.85265664=Yes&entry.2137816233=https://github.com/tensorflow/tensorflow/issues/62532\">Yes</a>\n<a href=\"https://docs.google.com/forms/d/e/1FAIpQLSfaP12TRhd9xSxjXZjcZFNXPGk4kc1-qMdv3gc6bEP90vY1ew/viewform?entry.85265664=No&entry.2137816233=https://github.com/tensorflow/tensorflow/issues/62532\">No</a>\n" ]
2023-12-02T08:42:39
2024-01-06T01:48:46
2024-01-06T01:48:39
NONE
null
null
null
### Issue type Bug ### Have you reproduced the bug with TensorFlow Nightly? Yes ### Source source ### TensorFlow version 2.15.0 ### Custom code Yes ### OS platform and distribution Ubuntu 22.04.3 LTS ### Mobile device _No response_ ### Python version 3.10.0 ### Bazel version _No response_ ### GCC/compiler version _No response_ ### CUDA/cuDNN version cuda : 12.2 / cudnn 8.9.04 ### GPU model and memory Tesla V100S-PCIE-32GB ### Current behavior? I've encountereda bug in TensorFlow where a model with additional `tf.math.ceil` and `tf.transpose` outputs yields inconsistent results under XLA compilation. This error is only seen on **gpu**. After a series of experiments to locate the root cause of this behavior, we found that (1) altering the outputs of Model2 (either by adding more output tensors or removing one of the current output tensors), or (2) removing the casting operation (along with changing the input tensor data type), or (3) eliminating one of the operators within the model2, does not trigger this error. These findings suggest that the inconsistency may be linked to the specific combination of operations and outputs in the XLA-compiled execution path. We hope these insights will assist you in pinpointing the root cause of the bug. ### Standalone code to reproduce the issue ```shell import tensorflow as tf import numpy as np densenet = tf.keras.layers.Dense(units=1, dtype=tf.float32, autocast=False) class Model1(tf.keras.Model): @tf.function(jit_compile=True) def __call__(self, inp): cast = tf.cast(inp, dtype=tf.float32) dense = densenet(cast) add = tf.add(dense, cast) reduce_min = tf.math.reduce_min(add, axis=1) return reduce_min, class Model2(tf.keras.Model): @tf.function(jit_compile=True) def __call__(self, inp): cast = tf.cast(inp, dtype=tf.float32) ceil = tf.math.ceil(cast) dense = densenet(cast) add = tf.add(cast, dense) trans1 = tf.transpose(add, perm=[1, 0]) reduce_min = tf.math.reduce_min(add, axis=1) return reduce_min, ceil, trans1, shape = [45, 29] inputs = [ tf.complex(tf.random.uniform(shape, dtype=tf.float64), tf.random.uniform(shape, dtype=tf.float64)), ] model1 = Model1() model2 = Model2() device = "gpu" with tf.device(device): tf.config.run_functions_eagerly(True) out1 = model1(*inputs) out2 = model2(*inputs) print(f'=========eager_output(version:{tf.__version__})================') try : for i in range(min(len(out1),len(out2))): np.testing.assert_allclose(out1[i].numpy(), out2[i].numpy(), rtol=0.001, atol=0.001, err_msg=f'at checking {i}th') print("XLA_eager does not trigger assertion") except AssertionError as e: print("XLA_eager triggers assertion") print(e) tf.config.run_functions_eagerly(False) out1 = model1(*inputs) out2 = model2(*inputs) print(f'=========compiled_output(version:{tf.__version__})================') try : for i in range(min(len(out1),len(out2))): np.testing.assert_allclose(out1[i].numpy(), out2[i].numpy(), rtol=0.001, atol=0.001, err_msg=f'at checking {i}th') print("XLA_complie does not trigger assertion") except AssertionError as e: print("XLA_complie triggers assertion") print(e) ``` ### Relevant log output ```shell =========eager_output(version:2.15.0)================ XLA_eager does not trigger assertion =========compiled_output(version:2.15.0)================ XLA_complie triggers assertion Not equal to tolerance rtol=0.001, atol=0.001 at checking 0th Mismatched elements: 45 / 45 (100%) Max absolute difference: 1.0970391 Max relative difference: 2398.6377 x: array([ 0.178823, 0.136228, 0.895783, 1.074548, 0.868067, 0.623781, 1.129867, 0.734073, 0.032901, -0.044131, 0.242464, 0.834015, 0.091566, 0.332864, -0.192198, 0.392632, -0.049305, 0.347389,... y: array([7.386290e-02, 1.035314e-02, 7.214887e-02, 2.470637e-02, 2.454510e-03, 2.196119e-02, 1.195706e-01, 3.828454e-04, 4.757595e-02, 1.569487e-02, 1.624023e-03, 1.525528e-01,... ```
{ "url": "https://api.github.com/repos/tensorflow/tensorflow/issues/62532/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/tensorflow/tensorflow/issues/62532/timeline
null
completed
false
https://api.github.com/repos/tensorflow/tensorflow/issues/62531
https://api.github.com/repos/tensorflow/tensorflow
https://api.github.com/repos/tensorflow/tensorflow/issues/62531/labels{/name}
https://api.github.com/repos/tensorflow/tensorflow/issues/62531/comments
https://api.github.com/repos/tensorflow/tensorflow/issues/62531/events
https://github.com/tensorflow/tensorflow/issues/62531
2,021,783,395
I_kwDOArmXAs54gfdj
62,531
AttributeError: module 'tensorflow' has no attribute 'contrib'
{ "login": "JT-Studios", "id": 106934268, "node_id": "U_kgDOBl-v_A", "avatar_url": "https://avatars.githubusercontent.com/u/106934268?v=4", "gravatar_id": "", "url": "https://api.github.com/users/JT-Studios", "html_url": "https://github.com/JT-Studios", "followers_url": "https://api.github.com/users/JT-Studios/followers", "following_url": "https://api.github.com/users/JT-Studios/following{/other_user}", "gists_url": "https://api.github.com/users/JT-Studios/gists{/gist_id}", "starred_url": "https://api.github.com/users/JT-Studios/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/JT-Studios/subscriptions", "organizations_url": "https://api.github.com/users/JT-Studios/orgs", "repos_url": "https://api.github.com/users/JT-Studios/repos", "events_url": "https://api.github.com/users/JT-Studios/events{/privacy}", "received_events_url": "https://api.github.com/users/JT-Studios/received_events", "type": "User", "site_admin": false }
[ { "id": 386191887, "node_id": "MDU6TGFiZWwzODYxOTE4ODc=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/stat:awaiting%20response", "name": "stat:awaiting response", "color": "f4b400", "default": false, "description": "Status - Awaiting response from author" }, { "id": 473172988, "node_id": "MDU6TGFiZWw0NzMxNzI5ODg=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/type:bug", "name": "type:bug", "color": "159b2e", "default": false, "description": "Bug" }, { "id": 474725938, "node_id": "MDU6TGFiZWw0NzQ3MjU5Mzg=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/stale", "name": "stale", "color": "d4c5f9", "default": false, "description": "This label marks the issue/pr stale - to be closed automatically if no activity" }, { "id": 1097545817, "node_id": "MDU6TGFiZWwxMDk3NTQ1ODE3", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/comp:apis", "name": "comp:apis", "color": "0052cc", "default": false, "description": "Highlevel API related issues" }, { "id": 6218999181, "node_id": "LA_kwDOArmXAs8AAAABcq5ljQ", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/TF%202.15", "name": "TF 2.15", "color": "9162CB", "default": false, "description": "For issues related to 2.15.x" } ]
closed
false
{ "login": "Venkat6871", "id": 147127861, "node_id": "U_kgDOCMT-NQ", "avatar_url": "https://avatars.githubusercontent.com/u/147127861?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Venkat6871", "html_url": "https://github.com/Venkat6871", "followers_url": "https://api.github.com/users/Venkat6871/followers", "following_url": "https://api.github.com/users/Venkat6871/following{/other_user}", "gists_url": "https://api.github.com/users/Venkat6871/gists{/gist_id}", "starred_url": "https://api.github.com/users/Venkat6871/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Venkat6871/subscriptions", "organizations_url": "https://api.github.com/users/Venkat6871/orgs", "repos_url": "https://api.github.com/users/Venkat6871/repos", "events_url": "https://api.github.com/users/Venkat6871/events{/privacy}", "received_events_url": "https://api.github.com/users/Venkat6871/received_events", "type": "User", "site_admin": false }
[ { "login": "Venkat6871", "id": 147127861, "node_id": "U_kgDOCMT-NQ", "avatar_url": "https://avatars.githubusercontent.com/u/147127861?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Venkat6871", "html_url": "https://github.com/Venkat6871", "followers_url": "https://api.github.com/users/Venkat6871/followers", "following_url": "https://api.github.com/users/Venkat6871/following{/other_user}", "gists_url": "https://api.github.com/users/Venkat6871/gists{/gist_id}", "starred_url": "https://api.github.com/users/Venkat6871/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Venkat6871/subscriptions", "organizations_url": "https://api.github.com/users/Venkat6871/orgs", "repos_url": "https://api.github.com/users/Venkat6871/repos", "events_url": "https://api.github.com/users/Venkat6871/events{/privacy}", "received_events_url": "https://api.github.com/users/Venkat6871/received_events", "type": "User", "site_admin": false } ]
null
[ "Hi **@JT-Studios** ,\r\nCould you please provide a code snippet to reproduce the issue reported here. And the error you're encountering, specifically the AttributeError stating that the module 'tensorflow' has no attribute 'contrib,' is likely due to changes in TensorFlow versions. The 'contrib' module was present in earlier versions of TensorFlow but has been deprecated in recent releases.\r\n\r\nThank you!", "This issue is stale because it has been open for 7 days with no activity. It will be closed if no further activity occurs. Thank you.", "This issue is stale because it has been open for 7 days with no activity. It will be closed if no further activity occurs. Thank you.", "This issue was closed because it has been inactive for 7 days since being marked as stale. Please reopen if you'd like to work on this further.", "Are you satisfied with the resolution of your issue?\n<a href=\"https://docs.google.com/forms/d/e/1FAIpQLSfaP12TRhd9xSxjXZjcZFNXPGk4kc1-qMdv3gc6bEP90vY1ew/viewform?entry.85265664=Yes&entry.2137816233=https://github.com/tensorflow/tensorflow/issues/62531\">Yes</a>\n<a href=\"https://docs.google.com/forms/d/e/1FAIpQLSfaP12TRhd9xSxjXZjcZFNXPGk4kc1-qMdv3gc6bEP90vY1ew/viewform?entry.85265664=No&entry.2137816233=https://github.com/tensorflow/tensorflow/issues/62531\">No</a>\n" ]
2023-12-02T01:05:04
2024-01-10T01:49:24
2024-01-10T01:49:21
NONE
null
null
null
### Issue type Bug ### Have you reproduced the bug with TensorFlow Nightly? Yes ### Source source ### TensorFlow version 2.15.0 I think ### Custom code Yes ### OS platform and distribution _No response_ ### Mobile device _No response_ ### Python version 3.10.9 ### Bazel version _No response_ ### GCC/compiler version _No response_ ### CUDA/cuDNN version _No response_ ### GPU model and memory _No response_ ### Current behavior? I know this error happens because I am using a newer version of tensorflow but I can't downgrade my python version due to the hardware I am using. Is it possible to get the right tensorflow version for this (I think 2.4 should work) or is there some other way I can solve this error? Thanks! ### Standalone code to reproduce the issue ```shell I am trying to run this command: `python Tensorflow/models/research/object_detection/model_main_tf2.py --model_dir=Tensorflow/workspace/models/my_ssd_mobnet --pipeline_config_path=Tensorflow/workspace/models/my_ssd_mobnet/pipeline.config --num_train_steps=2000` ``` ### Relevant log output ```shell AttributeError: module 'tensorflow' has no attribute 'contrib' ```
{ "url": "https://api.github.com/repos/tensorflow/tensorflow/issues/62531/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/tensorflow/tensorflow/issues/62531/timeline
null
completed
false
https://api.github.com/repos/tensorflow/tensorflow/issues/62530
https://api.github.com/repos/tensorflow/tensorflow
https://api.github.com/repos/tensorflow/tensorflow/issues/62530/labels{/name}
https://api.github.com/repos/tensorflow/tensorflow/issues/62530/comments
https://api.github.com/repos/tensorflow/tensorflow/issues/62530/events
https://github.com/tensorflow/tensorflow/issues/62530
2,021,461,779
I_kwDOArmXAs54fQ8T
62,530
Internal quantize ops don't match external quantization
{ "login": "EClemMarq", "id": 79734663, "node_id": "MDQ6VXNlcjc5NzM0NjYz", "avatar_url": "https://avatars.githubusercontent.com/u/79734663?v=4", "gravatar_id": "", "url": "https://api.github.com/users/EClemMarq", "html_url": "https://github.com/EClemMarq", "followers_url": "https://api.github.com/users/EClemMarq/followers", "following_url": "https://api.github.com/users/EClemMarq/following{/other_user}", "gists_url": "https://api.github.com/users/EClemMarq/gists{/gist_id}", "starred_url": "https://api.github.com/users/EClemMarq/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/EClemMarq/subscriptions", "organizations_url": "https://api.github.com/users/EClemMarq/orgs", "repos_url": "https://api.github.com/users/EClemMarq/repos", "events_url": "https://api.github.com/users/EClemMarq/events{/privacy}", "received_events_url": "https://api.github.com/users/EClemMarq/received_events", "type": "User", "site_admin": false }
[ { "id": 404586594, "node_id": "MDU6TGFiZWw0MDQ1ODY1OTQ=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/stat:awaiting%20tensorflower", "name": "stat:awaiting tensorflower", "color": "f4b400", "default": false, "description": "Status - Awaiting response from tensorflower" }, { "id": 473172988, "node_id": "MDU6TGFiZWw0NzMxNzI5ODg=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/type:bug", "name": "type:bug", "color": "159b2e", "default": false, "description": "Bug" }, { "id": 750616506, "node_id": "MDU6TGFiZWw3NTA2MTY1MDY=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/comp:lite", "name": "comp:lite", "color": "0052cc", "default": false, "description": "TF Lite related issues" }, { "id": 1097547147, "node_id": "MDU6TGFiZWwxMDk3NTQ3MTQ3", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/comp:ops", "name": "comp:ops", "color": "0052cc", "default": false, "description": "OPs related issues" }, { "id": 1463677878, "node_id": "MDU6TGFiZWwxNDYzNjc3ODc4", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/type:performance", "name": "type:performance", "color": "159b2e", "default": false, "description": "Performance Issue" }, { "id": 1661751498, "node_id": "MDU6TGFiZWwxNjYxNzUxNDk4", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/TFLiteConverter", "name": "TFLiteConverter", "color": "bfdadc", "default": false, "description": "For issues related to TFLite converter" }, { "id": 2671351731, "node_id": "MDU6TGFiZWwyNjcxMzUxNzMx", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/ModelOptimizationToolkit", "name": "ModelOptimizationToolkit", "color": "BFD629", "default": false, "description": "TF Model Optimization Toolkit" }, { "id": 5922361893, "node_id": "LA_kwDOArmXAs8AAAABYQASJQ", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/TF2.14", "name": "TF2.14", "color": "b60205", "default": false, "description": "For issues related to Tensorflow 2.14.x" } ]
open
false
{ "login": "abattery", "id": 3203059, "node_id": "MDQ6VXNlcjMyMDMwNTk=", "avatar_url": "https://avatars.githubusercontent.com/u/3203059?v=4", "gravatar_id": "", "url": "https://api.github.com/users/abattery", "html_url": "https://github.com/abattery", "followers_url": "https://api.github.com/users/abattery/followers", "following_url": "https://api.github.com/users/abattery/following{/other_user}", "gists_url": "https://api.github.com/users/abattery/gists{/gist_id}", "starred_url": "https://api.github.com/users/abattery/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/abattery/subscriptions", "organizations_url": "https://api.github.com/users/abattery/orgs", "repos_url": "https://api.github.com/users/abattery/repos", "events_url": "https://api.github.com/users/abattery/events{/privacy}", "received_events_url": "https://api.github.com/users/abattery/received_events", "type": "User", "site_admin": false }
[ { "login": "abattery", "id": 3203059, "node_id": "MDQ6VXNlcjMyMDMwNTk=", "avatar_url": "https://avatars.githubusercontent.com/u/3203059?v=4", "gravatar_id": "", "url": "https://api.github.com/users/abattery", "html_url": "https://github.com/abattery", "followers_url": "https://api.github.com/users/abattery/followers", "following_url": "https://api.github.com/users/abattery/following{/other_user}", "gists_url": "https://api.github.com/users/abattery/gists{/gist_id}", "starred_url": "https://api.github.com/users/abattery/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/abattery/subscriptions", "organizations_url": "https://api.github.com/users/abattery/orgs", "repos_url": "https://api.github.com/users/abattery/repos", "events_url": "https://api.github.com/users/abattery/events{/privacy}", "received_events_url": "https://api.github.com/users/abattery/received_events", "type": "User", "site_admin": false }, { "login": "pkgoogle", "id": 132095473, "node_id": "U_kgDOB9-d8Q", "avatar_url": "https://avatars.githubusercontent.com/u/132095473?v=4", "gravatar_id": "", "url": "https://api.github.com/users/pkgoogle", "html_url": "https://github.com/pkgoogle", "followers_url": "https://api.github.com/users/pkgoogle/followers", "following_url": "https://api.github.com/users/pkgoogle/following{/other_user}", "gists_url": "https://api.github.com/users/pkgoogle/gists{/gist_id}", "starred_url": "https://api.github.com/users/pkgoogle/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/pkgoogle/subscriptions", "organizations_url": "https://api.github.com/users/pkgoogle/orgs", "repos_url": "https://api.github.com/users/pkgoogle/repos", "events_url": "https://api.github.com/users/pkgoogle/events{/privacy}", "received_events_url": "https://api.github.com/users/pkgoogle/received_events", "type": "User", "site_admin": false }, { "login": "sawantkumar", "id": 166358452, "node_id": "U_kgDOCepttA", "avatar_url": "https://avatars.githubusercontent.com/u/166358452?v=4", "gravatar_id": "", "url": "https://api.github.com/users/sawantkumar", "html_url": "https://github.com/sawantkumar", "followers_url": "https://api.github.com/users/sawantkumar/followers", "following_url": "https://api.github.com/users/sawantkumar/following{/other_user}", "gists_url": "https://api.github.com/users/sawantkumar/gists{/gist_id}", "starred_url": "https://api.github.com/users/sawantkumar/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/sawantkumar/subscriptions", "organizations_url": "https://api.github.com/users/sawantkumar/orgs", "repos_url": "https://api.github.com/users/sawantkumar/repos", "events_url": "https://api.github.com/users/sawantkumar/events{/privacy}", "received_events_url": "https://api.github.com/users/sawantkumar/received_events", "type": "User", "site_admin": false } ]
null
[ " Hi @pkgoogle,\r\n\r\nI have verified that the weights and biases are the same for both the model but zero point and scale are not the same for these two models. Please find the [gist.](https://colab.research.google.com/gist/LakshmiKalaKadali/4b031619787edc6b8952d653cb3f3009/external_internal_quantize-tf2-15.ipynb) Could you please look into the issue. \r\n\r\n\r\nThank You", "I tried looking at the MLIR representations:\r\n\r\nInternal model:\r\n```sh\r\n$ ./flatbuffer_translate --tflite-flatbuffer-to-mlir MNIST_PTQ_internal.tflite\r\n...\r\n%0 = \"tfl.quantize\"(%arg0) {qtype = tensor<?x28x28x!quant.uniform<i8:f32, 0.0039215688593685627:-128>>} : (tensor<?x28x28xf32>) -> tensor<?x28x28x!quant.uniform<i8:f32, 0.0039215688593685627:-128>>\r\n...\r\n%10 = \"tfl.dequantize\"(%9) : (tensor<?x10x!quant.uniform<i8:f32, 3.906250e-03:-128>>) -> tensor<?x10xf32>\r\n```\r\n\r\nExternal model:\r\n```sh\r\n$ ./flatbuffer_translate --tflite-flatbuffer-to-mlir MNIST_PTQ_external.tflite\r\n...\r\nfunc.func @main(%arg0: tensor<?x28x28x!quant.uniform<i8:f32, 0.0039215688593685627:-128>> {tf_saved_model.index_path = [\"flatten_input\"]}) -> (tensor<?x10x!quant.uniform<i8:f32, 3.906250e-03:-128>> {tf_saved_model.index_path = [\"softmax\"]}) attributes {tf.entry_function = {inputs = \"serving_default_flatten_input:0\", outputs = \"StatefulPartitionedCall:0\"}, tf_saved_model.exported_names = [\"serving_default\"]}\r\n...\r\nreturn %8 : tensor<?x10x!quant.uniform<i8:f32, 3.906250e-03:-128>>\r\n```\r\n\r\nAs you said the scale factors are equivalent. I tried explicitly casting the external computations to np.float32 (just in case we got the rare python floats are actually 64bit issues) but that didn't change anything. I also cloned the colab and did each model in their separate notebooks and still got this discrepancy (just in case there's some randomization from quantizing/evaluating in a different order or one after another).\r\n\r\n@abattery, can you please take a look? Thanks.\r\n\r\n", "Hi @EClemMarq , if you are able to access a linux system you may be able to resolve your issue by using [AI-Edge-Torch](https://github.com/google-ai-edge/ai-edge-torch), you can find more information here: [googleblog](https://developers.googleblog.com/en/ai-edge-torch-high-performance-inference-of-pytorch-models-on-mobile-devices/).\r\n\r\nI have actually created a simple script for converting your model here , you can quantize your model after this step:\r\n\r\n```\r\nimport torch\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\nimport ai_edge_torch\r\n\r\nclass SimpleNN(nn.Module):\r\n def __init__(self):\r\n super(SimpleNN, self).__init__()\r\n self.flatten = nn.Flatten()\r\n self.fc1 = nn.Linear(28 * 28, 128)\r\n self.dropout = nn.Dropout(0.2)\r\n self.fc2 = nn.Linear(128, 10)\r\n self.softmax = nn.Softmax(dim=1)\r\n\r\n def forward(self, x):\r\n x = self.flatten(x)\r\n x = F.relu(self.fc1(x))\r\n x = self.dropout(x)\r\n x = self.fc2(x)\r\n x = self.softmax(x)\r\n return x\r\n\r\nmodel = SimpleNN()\r\n\r\nexample_input = torch.randn(1, 1, 28, 28)\r\n\r\nedge_model = ai_edge_torch.convert(model.eval(), (example_input,))\r\n\r\nedge_model.export('simple_nn.tflite')\r\n```\r\n\r\n\r\nIf you want to, you can actually try visualizing the result in [model-explorer](https://github.com/google-ai-edge/model-explorer) as well.\r\n\r\nPlease try them out and let us know if this resolves your issue. If you still need further help, feel free to open a new issue at the respective repo.\r\n\r\n" ]
2023-12-01T19:27:53
2024-06-12T11:23:36
null
NONE
null
null
null
### 1. System information - Occurs in Google Colab w/ TF 2.14 - Have also verified w. TF 2.7 (Anaconda) on Windows 10 ### 2. Code [Colab to reproduce issue](https://colab.research.google.com/drive/17aDzDSG-1DcE3sJn16B4Q4zd9QewTXkT?usp=sharing) The Colab trains a basic MNIST model (deterministically, so your model should be the same as mine) and performs Post-Training Quantization. One quantized model (left side of image) uses INT8 input/output, requiring the provided external quant/de-quant code. The other quantized model (right side of image) takes FP32 input, embedding the quantize/dequantize layers into the model graph. ![image](https://github.com/tensorflow/tensorflow/assets/79734663/a91ab1cc-912e-455b-a711-1b433b736088) ### 3. Failure after conversion Conversion executes successfully, but model outputs differ. Why? Shouldn't the two methods be interchangeable? I have verified that the weights, biases, quantization zero points and quantization scales match between the two models. I have also run into this issue using a CPU-only Colab runtime, meaning it doesn't happen because one approach quantizes on the CPU while the other quantizes on the GPU/TPU. Obviously, the difference between 0.5938 and 0.5468 is not significant in this MNIST model. However, in applications where softmax outputs are averaged to produce a final output, such small discrepancies can significantly alter model accuracy. Thank you!
{ "url": "https://api.github.com/repos/tensorflow/tensorflow/issues/62530/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/tensorflow/tensorflow/issues/62530/timeline
null
null
false
https://api.github.com/repos/tensorflow/tensorflow/issues/62529
https://api.github.com/repos/tensorflow/tensorflow
https://api.github.com/repos/tensorflow/tensorflow/issues/62529/labels{/name}
https://api.github.com/repos/tensorflow/tensorflow/issues/62529/comments
https://api.github.com/repos/tensorflow/tensorflow/issues/62529/events
https://github.com/tensorflow/tensorflow/pull/62529
2,021,393,243
PR_kwDOArmXAs5g7Rvd
62,529
[oneDNN] Adding a unit test for Conv + BiasAdd + Add + <activation> fusion
{ "login": "othakkar", "id": 87341429, "node_id": "MDQ6VXNlcjg3MzQxNDI5", "avatar_url": "https://avatars.githubusercontent.com/u/87341429?v=4", "gravatar_id": "", "url": "https://api.github.com/users/othakkar", "html_url": "https://github.com/othakkar", "followers_url": "https://api.github.com/users/othakkar/followers", "following_url": "https://api.github.com/users/othakkar/following{/other_user}", "gists_url": "https://api.github.com/users/othakkar/gists{/gist_id}", "starred_url": "https://api.github.com/users/othakkar/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/othakkar/subscriptions", "organizations_url": "https://api.github.com/users/othakkar/orgs", "repos_url": "https://api.github.com/users/othakkar/repos", "events_url": "https://api.github.com/users/othakkar/events{/privacy}", "received_events_url": "https://api.github.com/users/othakkar/received_events", "type": "User", "site_admin": false }
[ { "id": 390482148, "node_id": "MDU6TGFiZWwzOTA0ODIxNDg=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/awaiting%20review", "name": "awaiting review", "color": "bc3869", "default": false, "description": "Pull request awaiting review" }, { "id": 987666414, "node_id": "MDU6TGFiZWw5ODc2NjY0MTQ=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/ready%20to%20pull", "name": "ready to pull", "color": "2cd643", "default": false, "description": "PR ready for merge process" }, { "id": 1097545273, "node_id": "MDU6TGFiZWwxMDk3NTQ1Mjcz", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/comp:grappler", "name": "comp:grappler", "color": "0052cc", "default": false, "description": "Grappler related issues" }, { "id": 1169365682, "node_id": "MDU6TGFiZWwxMTY5MzY1Njgy", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/size:L", "name": "size:L", "color": "adafea", "default": false, "description": "CL Change Size: Large" } ]
closed
false
{ "login": "gbaned", "id": 48215717, "node_id": "MDQ6VXNlcjQ4MjE1NzE3", "avatar_url": "https://avatars.githubusercontent.com/u/48215717?v=4", "gravatar_id": "", "url": "https://api.github.com/users/gbaned", "html_url": "https://github.com/gbaned", "followers_url": "https://api.github.com/users/gbaned/followers", "following_url": "https://api.github.com/users/gbaned/following{/other_user}", "gists_url": "https://api.github.com/users/gbaned/gists{/gist_id}", "starred_url": "https://api.github.com/users/gbaned/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/gbaned/subscriptions", "organizations_url": "https://api.github.com/users/gbaned/orgs", "repos_url": "https://api.github.com/users/gbaned/repos", "events_url": "https://api.github.com/users/gbaned/events{/privacy}", "received_events_url": "https://api.github.com/users/gbaned/received_events", "type": "User", "site_admin": false }
[ { "login": "gbaned", "id": 48215717, "node_id": "MDQ6VXNlcjQ4MjE1NzE3", "avatar_url": "https://avatars.githubusercontent.com/u/48215717?v=4", "gravatar_id": "", "url": "https://api.github.com/users/gbaned", "html_url": "https://github.com/gbaned", "followers_url": "https://api.github.com/users/gbaned/followers", "following_url": "https://api.github.com/users/gbaned/following{/other_user}", "gists_url": "https://api.github.com/users/gbaned/gists{/gist_id}", "starred_url": "https://api.github.com/users/gbaned/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/gbaned/subscriptions", "organizations_url": "https://api.github.com/users/gbaned/orgs", "repos_url": "https://api.github.com/users/gbaned/repos", "events_url": "https://api.github.com/users/gbaned/events{/privacy}", "received_events_url": "https://api.github.com/users/gbaned/received_events", "type": "User", "site_admin": false } ]
null
[]
2023-12-01T18:34:35
2023-12-04T04:25:28
2023-12-04T04:25:28
CONTRIBUTOR
null
false
{ "url": "https://api.github.com/repos/tensorflow/tensorflow/pulls/62529", "html_url": "https://github.com/tensorflow/tensorflow/pull/62529", "diff_url": "https://github.com/tensorflow/tensorflow/pull/62529.diff", "patch_url": "https://github.com/tensorflow/tensorflow/pull/62529.patch", "merged_at": "2023-12-04T04:25:28" }
This PR adds a generic unit test for Conv2D/3D + BiasAdd + Add + (various) <activation> fusion.
{ "url": "https://api.github.com/repos/tensorflow/tensorflow/issues/62529/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/tensorflow/tensorflow/issues/62529/timeline
null
null
true
https://api.github.com/repos/tensorflow/tensorflow/issues/62528
https://api.github.com/repos/tensorflow/tensorflow
https://api.github.com/repos/tensorflow/tensorflow/issues/62528/labels{/name}
https://api.github.com/repos/tensorflow/tensorflow/issues/62528/comments
https://api.github.com/repos/tensorflow/tensorflow/issues/62528/events
https://github.com/tensorflow/tensorflow/issues/62528
2,021,192,943
I_kwDOArmXAs54ePTv
62,528
Inconsistency in XLA Compiled Model with `tf.add, tf.experimental.numpy.triu, tf.math.reduce_max` and Additional Transpose Output on GPU
{ "login": "Gwihwan-Go", "id": 83144588, "node_id": "MDQ6VXNlcjgzMTQ0NTg4", "avatar_url": "https://avatars.githubusercontent.com/u/83144588?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Gwihwan-Go", "html_url": "https://github.com/Gwihwan-Go", "followers_url": "https://api.github.com/users/Gwihwan-Go/followers", "following_url": "https://api.github.com/users/Gwihwan-Go/following{/other_user}", "gists_url": "https://api.github.com/users/Gwihwan-Go/gists{/gist_id}", "starred_url": "https://api.github.com/users/Gwihwan-Go/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Gwihwan-Go/subscriptions", "organizations_url": "https://api.github.com/users/Gwihwan-Go/orgs", "repos_url": "https://api.github.com/users/Gwihwan-Go/repos", "events_url": "https://api.github.com/users/Gwihwan-Go/events{/privacy}", "received_events_url": "https://api.github.com/users/Gwihwan-Go/received_events", "type": "User", "site_admin": false }
[ { "id": 404586594, "node_id": "MDU6TGFiZWw0MDQ1ODY1OTQ=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/stat:awaiting%20tensorflower", "name": "stat:awaiting tensorflower", "color": "f4b400", "default": false, "description": "Status - Awaiting response from tensorflower" }, { "id": 473172988, "node_id": "MDU6TGFiZWw0NzMxNzI5ODg=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/type:bug", "name": "type:bug", "color": "159b2e", "default": false, "description": "Bug" }, { "id": 1133285679, "node_id": "MDU6TGFiZWwxMTMzMjg1Njc5", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/comp:xla", "name": "comp:xla", "color": "0052cc", "default": false, "description": "XLA" }, { "id": 1315098405, "node_id": "MDU6TGFiZWwxMzE1MDk4NDA1", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/regression%20issue", "name": "regression issue", "color": "50bcc4", "default": false, "description": "To spot regression issues in latest version" }, { "id": 6218999181, "node_id": "LA_kwDOArmXAs8AAAABcq5ljQ", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/TF%202.15", "name": "TF 2.15", "color": "9162CB", "default": false, "description": "For issues related to 2.15.x" } ]
open
false
{ "login": "SuryanarayanaY", "id": 116063290, "node_id": "U_kgDOBur8Og", "avatar_url": "https://avatars.githubusercontent.com/u/116063290?v=4", "gravatar_id": "", "url": "https://api.github.com/users/SuryanarayanaY", "html_url": "https://github.com/SuryanarayanaY", "followers_url": "https://api.github.com/users/SuryanarayanaY/followers", "following_url": "https://api.github.com/users/SuryanarayanaY/following{/other_user}", "gists_url": "https://api.github.com/users/SuryanarayanaY/gists{/gist_id}", "starred_url": "https://api.github.com/users/SuryanarayanaY/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/SuryanarayanaY/subscriptions", "organizations_url": "https://api.github.com/users/SuryanarayanaY/orgs", "repos_url": "https://api.github.com/users/SuryanarayanaY/repos", "events_url": "https://api.github.com/users/SuryanarayanaY/events{/privacy}", "received_events_url": "https://api.github.com/users/SuryanarayanaY/received_events", "type": "User", "site_admin": false }
[ { "login": "SuryanarayanaY", "id": 116063290, "node_id": "U_kgDOBur8Og", "avatar_url": "https://avatars.githubusercontent.com/u/116063290?v=4", "gravatar_id": "", "url": "https://api.github.com/users/SuryanarayanaY", "html_url": "https://github.com/SuryanarayanaY", "followers_url": "https://api.github.com/users/SuryanarayanaY/followers", "following_url": "https://api.github.com/users/SuryanarayanaY/following{/other_user}", "gists_url": "https://api.github.com/users/SuryanarayanaY/gists{/gist_id}", "starred_url": "https://api.github.com/users/SuryanarayanaY/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/SuryanarayanaY/subscriptions", "organizations_url": "https://api.github.com/users/SuryanarayanaY/orgs", "repos_url": "https://api.github.com/users/SuryanarayanaY/repos", "events_url": "https://api.github.com/users/SuryanarayanaY/events{/privacy}", "received_events_url": "https://api.github.com/users/SuryanarayanaY/received_events", "type": "User", "site_admin": false } ]
null
[ "Hi @GwiHwan-Go ,\r\n\r\nThe issue does not replicable in TF2.14v and TF2.15 GPU package not installable on colab now. Could you please confirm the behaviour with TF2.14v also.With Tf2.15 and CPU the code works fine. Attached [gist](https://colab.sandbox.google.com/gist/SuryanarayanaY/b2f560f2a44359856157f0cbcb9efd06/62528.ipynb) for reference.\r\n\r\nThanks!", "Hi @SuryanarayanaY ,\r\n\r\nWe executed above code using Tf2.14v with same environment and encountered no errors. However, when we ran the refered behavior on Tf2.15v.", "@GwiHwan-Go ,\r\n\r\nThanks for clarification. Then it seems a regression issue." ]
2023-12-01T16:18:38
2023-12-06T07:10:30
null
NONE
null
null
null
### Issue type Bug ### Have you reproduced the bug with TensorFlow Nightly? Yes ### Source source ### TensorFlow version 2.15.0 ### Custom code Yes ### OS platform and distribution Ubuntu 22.04.3 LTS ### Mobile device _No response_ ### Python version 3.10 ### Bazel version _No response_ ### GCC/compiler version _No response_ ### CUDA/cuDNN version cuda : 12.2 / cudnn 8.9.04 ### GPU model and memory Tesla V100S-PCIE-32GB ### Current behavior? A combination of `tf.add`, `tf.experimental.numpy.triu`, and `tf.math.reduce_max`, with an additional transpose output results in inconsistent outputs under XLA compilation. Interestingly, this inconsistency is not triggered when altering the composition of these operations or changing the data type from `int8`. This behavior is only seen on **gpu.**(Tesla V100) ### Standalone code to reproduce the issue ```shell import tensorflow as tf import numpy as np class Model1(tf.keras.Model): @tf.function(jit_compile=True) def __call__(self, inp1, inp2): # Forward pass logic using TensorFlow operations # inp1: [13, 1] : int8 # inp2: [13, 60] : int8 add = tf.add(inp2, inp1) triu = tf.experimental.numpy.triu(add, k=0) reduce_max = tf.math.reduce_max(triu, axis=1) return triu, reduce_max, class Model2(tf.keras.Model): @tf.function(jit_compile=True) def __call__(self, inp1, inp2): # Forward pass logic using TensorFlow operations # inp1: [13, 1] : int8 # inp2: [13, 60] : int8 add = tf.add(inp2, inp1) transpose = tf.transpose(add, perm=[1, 0]) triu = tf.experimental.numpy.triu(add, k=0) reduce_max = tf.math.reduce_max(triu, axis=1) return triu, reduce_max, transpose inputs = [ tf.cast(tf.random.uniform(shape=[13, 1], minval=-128, maxval=128, dtype=tf.int32), tf.int8), tf.cast(tf.random.uniform(shape=[13, 60], minval=-128, maxval=128, dtype=tf.int32), tf.int8), ] model1 = Model1() model2 = Model2() device = "gpu" with tf.device(device): tf.config.run_functions_eagerly(True) out1 = model1(*inputs) out2 = model2(*inputs) print(f'=========eager_output(version:{tf.__version__})================') try : for i in range(min(len(out1),len(out2))): np.testing.assert_allclose(out1[i].numpy(), out2[i].numpy(), rtol=0.001, atol=0.001, err_msg=f'at checking {i}th') print("XLA_eager does not trigger assertion") except AssertionError as e: print("XLA_eager triggers assertion") print(e) tf.config.run_functions_eagerly(False) out1 = model1(*inputs) out2 = model2(*inputs) print(f'=========compiled_output(version:{tf.__version__})================') try : for i in range(min(len(out1),len(out2))): np.testing.assert_allclose(out1[i].numpy(), out2[i].numpy(), rtol=0.001, atol=0.001, err_msg=f'at checking {i}th') print("XLA_complie does not trigger assertion") except AssertionError as e: print("XLA_complie triggers assertion") print(e) ``` ### Relevant log output ```shell =========eager_output(version:2.15.0)================ XLA_eager does not trigger assertion 2023-12-01 16:14:42.359231: I external/local_xla/xla/service/service.cc:168] XLA service 0x5621a8b5fae0 initialized for platform CUDA (this does not guarantee that XLA will be used). =========compiled_output(version:2.15.0)================ XLA_complie triggers assertion Not equal to tolerance rtol=0.001, atol=0.001 at checking 1th Mismatched elements: 12 / 13 (92.3%) Max absolute difference: 126 Max relative difference: 18. x: array([119, 126, 126, 123, 127, 127, 120, 127, 127, 107, 127, 126, 109], dtype=int8) y: array([ -7, -10, 114, 114, 86, 125, 117, 120, 127, 86, 56, 114, 108], dtype=int8) ```
{ "url": "https://api.github.com/repos/tensorflow/tensorflow/issues/62528/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/tensorflow/tensorflow/issues/62528/timeline
null
null
false
https://api.github.com/repos/tensorflow/tensorflow/issues/62527
https://api.github.com/repos/tensorflow/tensorflow
https://api.github.com/repos/tensorflow/tensorflow/issues/62527/labels{/name}
https://api.github.com/repos/tensorflow/tensorflow/issues/62527/comments
https://api.github.com/repos/tensorflow/tensorflow/issues/62527/events
https://github.com/tensorflow/tensorflow/issues/62527
2,021,125,182
I_kwDOArmXAs54d-w-
62,527
Inconsistency in XLA compiled Models with Sliced Matrix Multiplication and Additional Transpose Output on GPU
{ "login": "Gwihwan-Go", "id": 83144588, "node_id": "MDQ6VXNlcjgzMTQ0NTg4", "avatar_url": "https://avatars.githubusercontent.com/u/83144588?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Gwihwan-Go", "html_url": "https://github.com/Gwihwan-Go", "followers_url": "https://api.github.com/users/Gwihwan-Go/followers", "following_url": "https://api.github.com/users/Gwihwan-Go/following{/other_user}", "gists_url": "https://api.github.com/users/Gwihwan-Go/gists{/gist_id}", "starred_url": "https://api.github.com/users/Gwihwan-Go/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Gwihwan-Go/subscriptions", "organizations_url": "https://api.github.com/users/Gwihwan-Go/orgs", "repos_url": "https://api.github.com/users/Gwihwan-Go/repos", "events_url": "https://api.github.com/users/Gwihwan-Go/events{/privacy}", "received_events_url": "https://api.github.com/users/Gwihwan-Go/received_events", "type": "User", "site_admin": false }
[ { "id": 386191887, "node_id": "MDU6TGFiZWwzODYxOTE4ODc=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/stat:awaiting%20response", "name": "stat:awaiting response", "color": "f4b400", "default": false, "description": "Status - Awaiting response from author" }, { "id": 473172988, "node_id": "MDU6TGFiZWw0NzMxNzI5ODg=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/type:bug", "name": "type:bug", "color": "159b2e", "default": false, "description": "Bug" }, { "id": 474725938, "node_id": "MDU6TGFiZWw0NzQ3MjU5Mzg=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/stale", "name": "stale", "color": "d4c5f9", "default": false, "description": "This label marks the issue/pr stale - to be closed automatically if no activity" }, { "id": 1133285679, "node_id": "MDU6TGFiZWwxMTMzMjg1Njc5", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/comp:xla", "name": "comp:xla", "color": "0052cc", "default": false, "description": "XLA" }, { "id": 6218999181, "node_id": "LA_kwDOArmXAs8AAAABcq5ljQ", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/TF%202.15", "name": "TF 2.15", "color": "9162CB", "default": false, "description": "For issues related to 2.15.x" } ]
closed
false
{ "login": "sushreebarsa", "id": 84765720, "node_id": "MDQ6VXNlcjg0NzY1NzIw", "avatar_url": "https://avatars.githubusercontent.com/u/84765720?v=4", "gravatar_id": "", "url": "https://api.github.com/users/sushreebarsa", "html_url": "https://github.com/sushreebarsa", "followers_url": "https://api.github.com/users/sushreebarsa/followers", "following_url": "https://api.github.com/users/sushreebarsa/following{/other_user}", "gists_url": "https://api.github.com/users/sushreebarsa/gists{/gist_id}", "starred_url": "https://api.github.com/users/sushreebarsa/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/sushreebarsa/subscriptions", "organizations_url": "https://api.github.com/users/sushreebarsa/orgs", "repos_url": "https://api.github.com/users/sushreebarsa/repos", "events_url": "https://api.github.com/users/sushreebarsa/events{/privacy}", "received_events_url": "https://api.github.com/users/sushreebarsa/received_events", "type": "User", "site_admin": false }
[ { "login": "sushreebarsa", "id": 84765720, "node_id": "MDQ6VXNlcjg0NzY1NzIw", "avatar_url": "https://avatars.githubusercontent.com/u/84765720?v=4", "gravatar_id": "", "url": "https://api.github.com/users/sushreebarsa", "html_url": "https://github.com/sushreebarsa", "followers_url": "https://api.github.com/users/sushreebarsa/followers", "following_url": "https://api.github.com/users/sushreebarsa/following{/other_user}", "gists_url": "https://api.github.com/users/sushreebarsa/gists{/gist_id}", "starred_url": "https://api.github.com/users/sushreebarsa/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/sushreebarsa/subscriptions", "organizations_url": "https://api.github.com/users/sushreebarsa/orgs", "repos_url": "https://api.github.com/users/sushreebarsa/repos", "events_url": "https://api.github.com/users/sushreebarsa/events{/privacy}", "received_events_url": "https://api.github.com/users/sushreebarsa/received_events", "type": "User", "site_admin": false } ]
null
[ "@GwiHwan-Go I didn't observe the error reported in both [gpu](https://colab.research.google.com/gist/sushreebarsa/d91675515a38b3c2ddd7b333b6549812/62527-cpu.ipynb#scrollTo=YlqadxaeNgnH) and [cpu](https://colab.research.google.com/gist/sushreebarsa/c8d3a720a012fd4c2091c9c195d97a54/62527-cpu.ipynb). Please find the attached gists and confirm the same.\r\nThank you!", "Hi, @sushreebarsa have you tried this with V100 GPU?", "@GwiHwan-Go Inside the call function you are generating the value for tensor, which in each calls generates different random values because XLA currently ignores TF seeds to random operations which makes the output different for obvious reason. Please refer [known](https://www.tensorflow.org/xla/known_issues#random_number_generation_ignores_tf_seed) issues from XLA section.\r\n\r\nThank you!", "This issue is stale because it has been open for 7 days with no activity. It will be closed if no further activity occurs. Thank you.", "This issue was closed because it has been inactive for 7 days since being marked as stale. Please reopen if you'd like to work on this further.", "Are you satisfied with the resolution of your issue?\n<a href=\"https://docs.google.com/forms/d/e/1FAIpQLSfaP12TRhd9xSxjXZjcZFNXPGk4kc1-qMdv3gc6bEP90vY1ew/viewform?entry.85265664=Yes&entry.2137816233=https://github.com/tensorflow/tensorflow/issues/62527\">Yes</a>\n<a href=\"https://docs.google.com/forms/d/e/1FAIpQLSfaP12TRhd9xSxjXZjcZFNXPGk4kc1-qMdv3gc6bEP90vY1ew/viewform?entry.85265664=No&entry.2137816233=https://github.com/tensorflow/tensorflow/issues/62527\">No</a>\n" ]
2023-12-01T15:42:25
2024-01-06T01:48:48
2024-01-06T01:48:41
NONE
null
null
null
### Issue type Bug ### Have you reproduced the bug with TensorFlow Nightly? Yes ### Source source ### TensorFlow version 2.15.0 ### Custom code Yes ### OS platform and distribution Ubuntu 22.04.3 LTS ### Mobile device _No response_ ### Python version 3.10 ### Bazel version _No response_ ### GCC/compiler version _No response_ ### CUDA/cuDNN version cuda : 12.2 / cudnn 8.9.04 ### GPU model and memory Tesla V100S-PCIE-32GB ### Current behavior? In TensorFlow 2.15.0, I've encountered a inconsistency under XLA compilation involving sliced matrix multiplication combined with an extra transposed output. This discrepancy is not observed **on CPU** or when the additional **transpose output is removed.** ### Standalone code to reproduce the issue ```shell import tensorflow as tf import numpy as np class Model1(tf.keras.Model): def __init__(self): super().__init__() # Tensor objects (with comments for shapes) @tf.function(jit_compile=True) def __call__(self, inp, inp2): concat = tf.concat([inp, inp2], axis=1) sliced = concat[:, -17:17:4] matmul = tf.matmul(sliced, sliced) return matmul, class Model2(tf.keras.Model): def __init__(self): super().__init__() @tf.function(jit_compile=True) def __call__(self, inp, inp2): concat = tf.concat([inp, inp2], axis=1) transposed = tf.transpose(concat, perm=[1, 0]) sliced = concat[:, -17:17:4] matmul = tf.matmul(sliced, sliced) return matmul, transposed, inputs = [ tf.random.uniform(shape=[5, 1], dtype=tf.float16), tf.random.uniform(shape=[5, 16], dtype=tf.float16), ] model1 = Model1() model2 = Model2() device = "gpu" with tf.device(device): tf.config.run_functions_eagerly(True) out1 = model1(*inputs) out2 = model2(*inputs) print(f'=========eager_output(version:{tf.__version__})================') try : for i in range(min(len(out1),len(out2))): np.testing.assert_allclose(out1[i].numpy(), out2[i].numpy(), rtol=0.001, atol=0.001, err_msg=f'at checking {i}th') print("XLA_eager does not trigger assertion") except AssertionError as e: print("XLA_eager triggers assertion") print(e) tf.config.run_functions_eagerly(False) out1 = model1(*inputs) out2 = model2(*inputs) print(f'=========compiled_output(version:{tf.__version__})================') try : for i in range(min(len(out1),len(out2))): np.testing.assert_allclose(out1[i].numpy(), out2[i].numpy(), rtol=0.001, atol=0.001, err_msg=f'at checking {i}th') print("XLA_complie does not trigger assertion") except AssertionError as e: print("XLA_complie triggers assertion") print(e) ``` ### Relevant log output ```shell =========eager_output(version:2.15.0)================ XLA_eager does not trigger assertion 2023-12-01 15:39:59.507327: I external/local_xla/xla/service/service.cc:168] XLA service 0x5629c95d1e50 initialized for platform CUDA (this does not guarantee that XLA w =========compiled_output(version:2.15.0)================ XLA_complie triggers assertion Not equal to tolerance rtol=0.001, atol=0.001 at checking 0th Mismatched elements: 25 / 25 (100%) Max absolute difference: 1.549 Max relative difference: 0.904 x: array([[2.295 , 2.365 , 1.842 , 2.246 , 2.33 ], [1.873 , 1.657 , 1.174 , 1.865 , 1.745 ], [1.298 , 1.384 , 1.203 , 1.486 , 1.062 ],... y: array([[2.408 , 2.37 , 2.574 , 2.504 , 2.104 ], [0.984 , 1.614 , 1.528 , 1.3955, 1.48 ], [1.441 , 1.555 , 1.679 , 1.263 , 0.9844],... ```
{ "url": "https://api.github.com/repos/tensorflow/tensorflow/issues/62527/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/tensorflow/tensorflow/issues/62527/timeline
null
completed
false
https://api.github.com/repos/tensorflow/tensorflow/issues/62526
https://api.github.com/repos/tensorflow/tensorflow
https://api.github.com/repos/tensorflow/tensorflow/issues/62526/labels{/name}
https://api.github.com/repos/tensorflow/tensorflow/issues/62526/comments
https://api.github.com/repos/tensorflow/tensorflow/issues/62526/events
https://github.com/tensorflow/tensorflow/issues/62526
2,020,978,366
I_kwDOArmXAs54da6-
62,526
1D/2D Tensor strided sliding window
{ "login": "niemiaszek", "id": 46727980, "node_id": "MDQ6VXNlcjQ2NzI3OTgw", "avatar_url": "https://avatars.githubusercontent.com/u/46727980?v=4", "gravatar_id": "", "url": "https://api.github.com/users/niemiaszek", "html_url": "https://github.com/niemiaszek", "followers_url": "https://api.github.com/users/niemiaszek/followers", "following_url": "https://api.github.com/users/niemiaszek/following{/other_user}", "gists_url": "https://api.github.com/users/niemiaszek/gists{/gist_id}", "starred_url": "https://api.github.com/users/niemiaszek/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/niemiaszek/subscriptions", "organizations_url": "https://api.github.com/users/niemiaszek/orgs", "repos_url": "https://api.github.com/users/niemiaszek/repos", "events_url": "https://api.github.com/users/niemiaszek/events{/privacy}", "received_events_url": "https://api.github.com/users/niemiaszek/received_events", "type": "User", "site_admin": false }
[ { "id": 386191887, "node_id": "MDU6TGFiZWwzODYxOTE4ODc=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/stat:awaiting%20response", "name": "stat:awaiting response", "color": "f4b400", "default": false, "description": "Status - Awaiting response from author" }, { "id": 473184161, "node_id": "MDU6TGFiZWw0NzMxODQxNjE=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/type:support", "name": "type:support", "color": "159b2e", "default": false, "description": "Support issues" }, { "id": 474725938, "node_id": "MDU6TGFiZWw0NzQ3MjU5Mzg=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/stale", "name": "stale", "color": "d4c5f9", "default": false, "description": "This label marks the issue/pr stale - to be closed automatically if no activity" }, { "id": 1097547147, "node_id": "MDU6TGFiZWwxMDk3NTQ3MTQ3", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/comp:ops", "name": "comp:ops", "color": "0052cc", "default": false, "description": "OPs related issues" }, { "id": 5508003926, "node_id": "LA_kwDOArmXAs8AAAABSE14Vg", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/TF%202.13", "name": "TF 2.13", "color": "B13ACB", "default": false, "description": "For issues related to Tensorflow 2.13" } ]
closed
false
{ "login": "Venkat6871", "id": 147127861, "node_id": "U_kgDOCMT-NQ", "avatar_url": "https://avatars.githubusercontent.com/u/147127861?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Venkat6871", "html_url": "https://github.com/Venkat6871", "followers_url": "https://api.github.com/users/Venkat6871/followers", "following_url": "https://api.github.com/users/Venkat6871/following{/other_user}", "gists_url": "https://api.github.com/users/Venkat6871/gists{/gist_id}", "starred_url": "https://api.github.com/users/Venkat6871/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Venkat6871/subscriptions", "organizations_url": "https://api.github.com/users/Venkat6871/orgs", "repos_url": "https://api.github.com/users/Venkat6871/repos", "events_url": "https://api.github.com/users/Venkat6871/events{/privacy}", "received_events_url": "https://api.github.com/users/Venkat6871/received_events", "type": "User", "site_admin": false }
[ { "login": "Venkat6871", "id": 147127861, "node_id": "U_kgDOCMT-NQ", "avatar_url": "https://avatars.githubusercontent.com/u/147127861?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Venkat6871", "html_url": "https://github.com/Venkat6871", "followers_url": "https://api.github.com/users/Venkat6871/followers", "following_url": "https://api.github.com/users/Venkat6871/following{/other_user}", "gists_url": "https://api.github.com/users/Venkat6871/gists{/gist_id}", "starred_url": "https://api.github.com/users/Venkat6871/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Venkat6871/subscriptions", "organizations_url": "https://api.github.com/users/Venkat6871/orgs", "repos_url": "https://api.github.com/users/Venkat6871/repos", "events_url": "https://api.github.com/users/Venkat6871/events{/privacy}", "received_events_url": "https://api.github.com/users/Venkat6871/received_events", "type": "User", "site_admin": false } ]
null
[ "Hi @niemiaszek ,\r\n\r\nI have replicated the reported behaviour with colab using TF v2.14, 2.15 and tf-nightly. Please find the [gist ](https://colab.research.google.com/gist/Venkat6871/534a54b6bfe499c8bd7b41fb148a02c6/62526_2-14_2-15-nightly.ipynb)here for reference.\r\n\r\nThank you!", "> Hi @moberweger ,\r\n> \r\n> I have replicated the reported behaviour with colab using TF v2.14, 2.15 and tf-nightly. Please find the [gist ](https://colab.research.google.com/gist/Venkat6871/534a54b6bfe499c8bd7b41fb148a02c6/62526_2-14_2-15-nightly.ipynb)here for reference.\r\n> \r\n> Thank you!\r\n\r\n@Venkat6871 I guess you pinged the wrong person under this issue, as this person created different issue.\r\nAnyway, I've checked your gist and it's accurate, replicating my behaviour. Thanks.", "Hello @Venkat6871 , @SuryanarayanaY . Is it possible to get some tensorflower assigned to help with this issue?\r\n\r\nI've managed to resolve this issue by using `tf.image.extract_patches` as slicing mechanism for 1D or 2D arrays, but honestly this is not an universal solution and I can imagine getting into trouble in future where there is no such native high-level abstract op.\r\n\r\nI know that it's probably my skill issue in writing tf.function code for usage in tf.data pipelines, but I couldn't really find any general solution", "Hi **@niemiaszek** ,\r\n\r\nSorry for the delay, I will share another approach with you now is to use tf.signal.frame, which is designed for creating frames of a signal (which can be your audio data or melspectrogram data) with specified frame length and step. This function will be suitable for creating a strided sliding window.\r\n\r\nHere you are encountering error is due to trying to use python's native iteration and slicing mechanisms directly on tensorflow tensors within a graph-executed context such as within @tf.function or when using tf.data.Dataset.map.\r\n\r\nTensorFlow's execution model doesn't allow for Python-level iteration over tensors directly because the shapes and operations need to be graph-compile-time determinable for performance and distributability.\r\n\r\nHere's how you can adjust your function to use tf.signal.frame for creating a sliding window over a 1D tensor (like your audio data). If your tensor has more dimensions (like your MelSpectrogram [time, freq]), you may need to adjust the approach slightly, but the principle remains the same:\r\n\r\nHere I am providing [gist](https://colab.sandbox.google.com/gist/Venkat6871/6e9d8b31f110c495c7c5ffa1abd3955a/62526_2-15-nightly-v.ipynb) for your reference.\r\n\r\nThis code uses tf.signal.frame to create the sliding window over the tensor. Note that tf.signal.frame expects at least a 2D tensor, so if you're working with a 1D tensor (like an audio waveform), you might need to expand its dimensions first. After processing, you can optionally squeeze the tensor to remove any unwanted dimensions.\r\n\r\nThank you!", "This issue is stale because it has been open for 7 days with no activity. It will be closed if no further activity occurs. Thank you.", "Are you satisfied with the resolution of your issue?\n<a href=\"https://docs.google.com/forms/d/e/1FAIpQLSfaP12TRhd9xSxjXZjcZFNXPGk4kc1-qMdv3gc6bEP90vY1ew/viewform?entry.85265664=Yes&entry.2137816233=https://github.com/tensorflow/tensorflow/issues/62526\">Yes</a>\n<a href=\"https://docs.google.com/forms/d/e/1FAIpQLSfaP12TRhd9xSxjXZjcZFNXPGk4kc1-qMdv3gc6bEP90vY1ew/viewform?entry.85265664=No&entry.2137816233=https://github.com/tensorflow/tensorflow/issues/62526\">No</a>\n" ]
2023-12-01T14:20:27
2024-03-10T21:10:47
2024-03-10T21:10:44
NONE
null
null
null
### Issue type Support ### Have you reproduced the bug with TensorFlow Nightly? No ### Source binary ### TensorFlow version 2.13 ### Custom code Yes ### OS platform and distribution ubuntu 22.04 ### Mobile device _No response_ ### Python version 3.10 ### Bazel version _No response_ ### GCC/compiler version _No response_ ### CUDA/cuDNN version _No response_ ### GPU model and memory _No response_ ### Current behavior? I tried to implement strided sliding window over Audio `[None,]` or MelSpectrogram `[time, freq]`. It seems like very straight-forward thing to make over regular python array. I had some implementation in notebook which worked in eager mode and with @tf.function decorator: ```python @tf.function def slice_tensor(tensor): s_size = 3 s_stride = 2 slices = tf.stack([input[i : i + s_size , ...] for i in range(0, tensor.shape[0]-s_size+1, s_stride)]) return slices slice_tensor(tf.range(9)) ``` ``` <tf.Tensor: shape=(4, 3), dtype=int32, numpy= array([[0, 1, 2], [2, 3, 4], [4, 5, 6], [6, 7, 8]], dtype=int32)> ``` However, using it in tf.data pipeline resulted first in: ```python tf.data.Dataset.range(8,12).map(lambda x: tf.range(x)).map(slice_tensor) ``` `TypeError: unsupported operand type(s) for -: 'NoneType' and 'int'`. I switched from `tensor.shape[0]` to `tf.shape(tensor)[0]`, which results in: ```OperatorNotAllowedInGraphError: Iterating over a symbolic `tf.Tensor` is not allowed: AutoGraph did convert this function. This might indicate you are trying to use an unsupported feature.``` I was struggling to come up with some solution to this and ended using `tf.image.extract_patches()`, but it makes my data pipeline more convoluted and seems like an overkill for my task. I tried looking up for `extract_patches()` implementation for some inspiration but it's not python op. Does anyone know how to deal with such problem? I can imagine it requires using some tensorflow-native mapping or looping mechanisms like `tf.map_fn()`. ### Standalone code to reproduce the issue ```shell @tf.function def slice_tensor(tensor): s_size = 3 s_stride = 2 slices = tf.stack([input[i : i + s_size , ...] for i in range(0, tf.shape(tensor)[0]-s_size+1, s_stride)]) return slices tf.data.Dataset.range(8,12).map(lambda x: tf.range(x)).map(slice_tensor) ``` ### Relevant log output ```shell OperatorNotAllowedInGraphError: in user code: File "/tmp/ipykernel_14140/728857128.py", line 5, in slice_tensor * slices = tf.stack([input[i : i + s_size , ...] for i in range(0, tf.shape(tensor)[0]-s_size+1, s_stride)]) OperatorNotAllowedInGraphError: Iterating over a symbolic `tf.Tensor` is not allowed: AutoGraph did convert this function. This might indicate you are trying to use an unsupported feature. ```
{ "url": "https://api.github.com/repos/tensorflow/tensorflow/issues/62526/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/tensorflow/tensorflow/issues/62526/timeline
null
completed
false
https://api.github.com/repos/tensorflow/tensorflow/issues/62525
https://api.github.com/repos/tensorflow/tensorflow
https://api.github.com/repos/tensorflow/tensorflow/issues/62525/labels{/name}
https://api.github.com/repos/tensorflow/tensorflow/issues/62525/comments
https://api.github.com/repos/tensorflow/tensorflow/issues/62525/events
https://github.com/tensorflow/tensorflow/issues/62525
2,020,570,614
I_kwDOArmXAs54b3X2
62,525
tf.compat.v1.conv2d_backprop_input does not work well with bfloat16 input.
{ "login": "Unireverse", "id": 25277574, "node_id": "MDQ6VXNlcjI1Mjc3NTc0", "avatar_url": "https://avatars.githubusercontent.com/u/25277574?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Unireverse", "html_url": "https://github.com/Unireverse", "followers_url": "https://api.github.com/users/Unireverse/followers", "following_url": "https://api.github.com/users/Unireverse/following{/other_user}", "gists_url": "https://api.github.com/users/Unireverse/gists{/gist_id}", "starred_url": "https://api.github.com/users/Unireverse/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Unireverse/subscriptions", "organizations_url": "https://api.github.com/users/Unireverse/orgs", "repos_url": "https://api.github.com/users/Unireverse/repos", "events_url": "https://api.github.com/users/Unireverse/events{/privacy}", "received_events_url": "https://api.github.com/users/Unireverse/received_events", "type": "User", "site_admin": false }
[ { "id": 386191887, "node_id": "MDU6TGFiZWwzODYxOTE4ODc=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/stat:awaiting%20response", "name": "stat:awaiting response", "color": "f4b400", "default": false, "description": "Status - Awaiting response from author" }, { "id": 473172988, "node_id": "MDU6TGFiZWw0NzMxNzI5ODg=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/type:bug", "name": "type:bug", "color": "159b2e", "default": false, "description": "Bug" }, { "id": 474725938, "node_id": "MDU6TGFiZWw0NzQ3MjU5Mzg=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/stale", "name": "stale", "color": "d4c5f9", "default": false, "description": "This label marks the issue/pr stale - to be closed automatically if no activity" }, { "id": 1097547147, "node_id": "MDU6TGFiZWwxMDk3NTQ3MTQ3", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/comp:ops", "name": "comp:ops", "color": "0052cc", "default": false, "description": "OPs related issues" }, { "id": 2498949452, "node_id": "MDU6TGFiZWwyNDk4OTQ5NDUy", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/TF%202.5", "name": "TF 2.5", "color": "5319e7", "default": false, "description": "Issues related to TF 2.5" } ]
closed
false
{ "login": "tilakrayal", "id": 81610181, "node_id": "MDQ6VXNlcjgxNjEwMTgx", "avatar_url": "https://avatars.githubusercontent.com/u/81610181?v=4", "gravatar_id": "", "url": "https://api.github.com/users/tilakrayal", "html_url": "https://github.com/tilakrayal", "followers_url": "https://api.github.com/users/tilakrayal/followers", "following_url": "https://api.github.com/users/tilakrayal/following{/other_user}", "gists_url": "https://api.github.com/users/tilakrayal/gists{/gist_id}", "starred_url": "https://api.github.com/users/tilakrayal/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/tilakrayal/subscriptions", "organizations_url": "https://api.github.com/users/tilakrayal/orgs", "repos_url": "https://api.github.com/users/tilakrayal/repos", "events_url": "https://api.github.com/users/tilakrayal/events{/privacy}", "received_events_url": "https://api.github.com/users/tilakrayal/received_events", "type": "User", "site_admin": false }
[ { "login": "tilakrayal", "id": 81610181, "node_id": "MDQ6VXNlcjgxNjEwMTgx", "avatar_url": "https://avatars.githubusercontent.com/u/81610181?v=4", "gravatar_id": "", "url": "https://api.github.com/users/tilakrayal", "html_url": "https://github.com/tilakrayal", "followers_url": "https://api.github.com/users/tilakrayal/followers", "following_url": "https://api.github.com/users/tilakrayal/following{/other_user}", "gists_url": "https://api.github.com/users/tilakrayal/gists{/gist_id}", "starred_url": "https://api.github.com/users/tilakrayal/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/tilakrayal/subscriptions", "organizations_url": "https://api.github.com/users/tilakrayal/orgs", "repos_url": "https://api.github.com/users/tilakrayal/repos", "events_url": "https://api.github.com/users/tilakrayal/events{/privacy}", "received_events_url": "https://api.github.com/users/tilakrayal/received_events", "type": "User", "site_admin": false } ]
null
[ "@Unireverse,\r\nI tried to execute the mentioned code on latest tensorflow v2.14 with the both [CPU](https://colab.research.google.com/gist/tilakrayal/a83cfc4c363a62e2754fe3c7144446ae/untitled1544.ipynb) and GPU enviroment, and it was executed without any issue/error & also observed that the output is also intended. Kindly find the gist of it [here](https://colab.research.google.com/gist/tilakrayal/854dc4975edbdab70999baf2424ad394/untitled1545_gpu.ipynb). \r\n\r\nAlso the tensorflow v2.5 which is pretty older, recommended to use the latest tensorflow versions. Thank you!\r\n\r\n", "This issue is stale because it has been open for 7 days with no activity. It will be closed if no further activity occurs. Thank you.", "This issue was closed because it has been inactive for 7 days since being marked as stale. Please reopen if you'd like to work on this further.", "Are you satisfied with the resolution of your issue?\n<a href=\"https://docs.google.com/forms/d/e/1FAIpQLSfaP12TRhd9xSxjXZjcZFNXPGk4kc1-qMdv3gc6bEP90vY1ew/viewform?entry.85265664=Yes&entry.2137816233=https://github.com/tensorflow/tensorflow/issues/62525\">Yes</a>\n<a href=\"https://docs.google.com/forms/d/e/1FAIpQLSfaP12TRhd9xSxjXZjcZFNXPGk4kc1-qMdv3gc6bEP90vY1ew/viewform?entry.85265664=No&entry.2137816233=https://github.com/tensorflow/tensorflow/issues/62525\">No</a>\n" ]
2023-12-01T10:23:45
2023-12-20T01:42:53
2023-12-20T01:42:42
NONE
null
null
null
### Issue type Bug ### Have you reproduced the bug with TensorFlow Nightly? No ### Source source ### TensorFlow version tf2.5.3 ### Custom code Yes ### OS platform and distribution Linux Ubuntu20.04 ### Mobile device _No response_ ### Python version 3.8.10 ### Bazel version _No response_ ### GCC/compiler version _No response_ ### CUDA/cuDNN version 11.7/cudnn8 ### GPU model and memory A100-40G ### Current behavior? When using bfloat16 as type of filter and out_backprop in tf.compat.v1.nn.conv2d_backprop_input API, the output is empty. I WONDER if this is a bug of bfloat16. ### Standalone code to reproduce the issue ```shell import tensorflow as tf a = tf.ones([2,3,3,6], dtype=tf.blfoat16) b = tf.ones([3,3,1,6], dtype=tf.bfloat16) c = tf.compat.v1.nn.conv2d_backprop_input( input_sizes=[2,7,7,1], filter=b, out_backprop=a, strides=[1,2,2,1], padding="VALID", data_format="NHWC", dilation=[1,1,1,1] ) print(c) ``` ### Relevant log output ```shell The output is : tf.Tensor([], shape=(0,), dtype=float32) ```
{ "url": "https://api.github.com/repos/tensorflow/tensorflow/issues/62525/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/tensorflow/tensorflow/issues/62525/timeline
null
completed
false
https://api.github.com/repos/tensorflow/tensorflow/issues/62524
https://api.github.com/repos/tensorflow/tensorflow
https://api.github.com/repos/tensorflow/tensorflow/issues/62524/labels{/name}
https://api.github.com/repos/tensorflow/tensorflow/issues/62524/comments
https://api.github.com/repos/tensorflow/tensorflow/issues/62524/events
https://github.com/tensorflow/tensorflow/issues/62524
2,020,482,769
I_kwDOArmXAs54bh7R
62,524
keras
{ "login": "aoki9999", "id": 87139777, "node_id": "MDQ6VXNlcjg3MTM5Nzc3", "avatar_url": "https://avatars.githubusercontent.com/u/87139777?v=4", "gravatar_id": "", "url": "https://api.github.com/users/aoki9999", "html_url": "https://github.com/aoki9999", "followers_url": "https://api.github.com/users/aoki9999/followers", "following_url": "https://api.github.com/users/aoki9999/following{/other_user}", "gists_url": "https://api.github.com/users/aoki9999/gists{/gist_id}", "starred_url": "https://api.github.com/users/aoki9999/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/aoki9999/subscriptions", "organizations_url": "https://api.github.com/users/aoki9999/orgs", "repos_url": "https://api.github.com/users/aoki9999/repos", "events_url": "https://api.github.com/users/aoki9999/events{/privacy}", "received_events_url": "https://api.github.com/users/aoki9999/received_events", "type": "User", "site_admin": false }
[ { "id": 1593512946, "node_id": "MDU6TGFiZWwxNTkzNTEyOTQ2", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/invalid", "name": "invalid", "color": "db6f57", "default": true, "description": "Hacktoberfest spam PR" } ]
closed
true
{ "login": "SuryanarayanaY", "id": 116063290, "node_id": "U_kgDOBur8Og", "avatar_url": "https://avatars.githubusercontent.com/u/116063290?v=4", "gravatar_id": "", "url": "https://api.github.com/users/SuryanarayanaY", "html_url": "https://github.com/SuryanarayanaY", "followers_url": "https://api.github.com/users/SuryanarayanaY/followers", "following_url": "https://api.github.com/users/SuryanarayanaY/following{/other_user}", "gists_url": "https://api.github.com/users/SuryanarayanaY/gists{/gist_id}", "starred_url": "https://api.github.com/users/SuryanarayanaY/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/SuryanarayanaY/subscriptions", "organizations_url": "https://api.github.com/users/SuryanarayanaY/orgs", "repos_url": "https://api.github.com/users/SuryanarayanaY/repos", "events_url": "https://api.github.com/users/SuryanarayanaY/events{/privacy}", "received_events_url": "https://api.github.com/users/SuryanarayanaY/received_events", "type": "User", "site_admin": false }
[ { "login": "SuryanarayanaY", "id": 116063290, "node_id": "U_kgDOBur8Og", "avatar_url": "https://avatars.githubusercontent.com/u/116063290?v=4", "gravatar_id": "", "url": "https://api.github.com/users/SuryanarayanaY", "html_url": "https://github.com/SuryanarayanaY", "followers_url": "https://api.github.com/users/SuryanarayanaY/followers", "following_url": "https://api.github.com/users/SuryanarayanaY/following{/other_user}", "gists_url": "https://api.github.com/users/SuryanarayanaY/gists{/gist_id}", "starred_url": "https://api.github.com/users/SuryanarayanaY/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/SuryanarayanaY/subscriptions", "organizations_url": "https://api.github.com/users/SuryanarayanaY/orgs", "repos_url": "https://api.github.com/users/SuryanarayanaY/repos", "events_url": "https://api.github.com/users/SuryanarayanaY/events{/privacy}", "received_events_url": "https://api.github.com/users/SuryanarayanaY/received_events", "type": "User", "site_admin": false } ]
null
[ "Hi @aoki9999 ,\r\nWould you like to report please fill up the template completely. Thanks." ]
2023-12-01T09:36:21
2023-12-02T01:57:24
2023-12-02T01:57:18
NONE
spam
null
null
null
{ "url": "https://api.github.com/repos/tensorflow/tensorflow/issues/62524/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/tensorflow/tensorflow/issues/62524/timeline
null
completed
false
https://api.github.com/repos/tensorflow/tensorflow/issues/62523
https://api.github.com/repos/tensorflow/tensorflow
https://api.github.com/repos/tensorflow/tensorflow/issues/62523/labels{/name}
https://api.github.com/repos/tensorflow/tensorflow/issues/62523/comments
https://api.github.com/repos/tensorflow/tensorflow/issues/62523/events
https://github.com/tensorflow/tensorflow/issues/62523
2,020,458,893
I_kwDOArmXAs54bcGN
62,523
[BUG] race condition in local rendezvous
{ "login": "kevint324", "id": 8800468, "node_id": "MDQ6VXNlcjg4MDA0Njg=", "avatar_url": "https://avatars.githubusercontent.com/u/8800468?v=4", "gravatar_id": "", "url": "https://api.github.com/users/kevint324", "html_url": "https://github.com/kevint324", "followers_url": "https://api.github.com/users/kevint324/followers", "following_url": "https://api.github.com/users/kevint324/following{/other_user}", "gists_url": "https://api.github.com/users/kevint324/gists{/gist_id}", "starred_url": "https://api.github.com/users/kevint324/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/kevint324/subscriptions", "organizations_url": "https://api.github.com/users/kevint324/orgs", "repos_url": "https://api.github.com/users/kevint324/repos", "events_url": "https://api.github.com/users/kevint324/events{/privacy}", "received_events_url": "https://api.github.com/users/kevint324/received_events", "type": "User", "site_admin": false }
[ { "id": 390482148, "node_id": "MDU6TGFiZWwzOTA0ODIxNDg=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/awaiting%20review", "name": "awaiting review", "color": "bc3869", "default": false, "description": "Pull request awaiting review" }, { "id": 473172988, "node_id": "MDU6TGFiZWw0NzMxNzI5ODg=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/type:bug", "name": "type:bug", "color": "159b2e", "default": false, "description": "Bug" }, { "id": 1478826728, "node_id": "MDU6TGFiZWwxNDc4ODI2NzI4", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/comp:core", "name": "comp:core", "color": "024391", "default": false, "description": "issues related to core part of tensorflow" }, { "id": 5206407904, "node_id": "LA_kwDOArmXAs8AAAABNlN64A", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/TF%202.12", "name": "TF 2.12", "color": "c5def5", "default": false, "description": "For issues related to Tensorflow 2.12" } ]
open
false
{ "login": "SuryanarayanaY", "id": 116063290, "node_id": "U_kgDOBur8Og", "avatar_url": "https://avatars.githubusercontent.com/u/116063290?v=4", "gravatar_id": "", "url": "https://api.github.com/users/SuryanarayanaY", "html_url": "https://github.com/SuryanarayanaY", "followers_url": "https://api.github.com/users/SuryanarayanaY/followers", "following_url": "https://api.github.com/users/SuryanarayanaY/following{/other_user}", "gists_url": "https://api.github.com/users/SuryanarayanaY/gists{/gist_id}", "starred_url": "https://api.github.com/users/SuryanarayanaY/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/SuryanarayanaY/subscriptions", "organizations_url": "https://api.github.com/users/SuryanarayanaY/orgs", "repos_url": "https://api.github.com/users/SuryanarayanaY/repos", "events_url": "https://api.github.com/users/SuryanarayanaY/events{/privacy}", "received_events_url": "https://api.github.com/users/SuryanarayanaY/received_events", "type": "User", "site_admin": false }
[ { "login": "SuryanarayanaY", "id": 116063290, "node_id": "U_kgDOBur8Og", "avatar_url": "https://avatars.githubusercontent.com/u/116063290?v=4", "gravatar_id": "", "url": "https://api.github.com/users/SuryanarayanaY", "html_url": "https://github.com/SuryanarayanaY", "followers_url": "https://api.github.com/users/SuryanarayanaY/followers", "following_url": "https://api.github.com/users/SuryanarayanaY/following{/other_user}", "gists_url": "https://api.github.com/users/SuryanarayanaY/gists{/gist_id}", "starred_url": "https://api.github.com/users/SuryanarayanaY/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/SuryanarayanaY/subscriptions", "organizations_url": "https://api.github.com/users/SuryanarayanaY/orgs", "repos_url": "https://api.github.com/users/SuryanarayanaY/repos", "events_url": "https://api.github.com/users/SuryanarayanaY/events{/privacy}", "received_events_url": "https://api.github.com/users/SuryanarayanaY/received_events", "type": "User", "site_admin": false } ]
null
[ "@kevint324 This race condition can be avoided by using a mutex or semaphore to protect the shared Rendezvous key. Proper synchronization technique should be used. Thank you!", "@sushreebarsa \r\nIt's not easy to do it with mutex during runtime. You'll have to create-then-grab a mutex for each parsed key at SendOp::Compute and release corresponding mutex when RecvOp::ComputeAsync is done.\r\n\r\nAlso it doens't smell right that SEND/RECV should clean the mess that eager runtime created.\r\n\r\nEager context should create tensors with unique `tensor_name` so that SEND/RECV should not worry about parsed key confliction but it will affect lots of things.\r\n\r\n~~Another \"fix\" is to create unique parsed key at the ctor of SendOp and RecvOp so that no send/recv pairs has the same parsed key globally.~~\r\n\r\n```\r\n--- a/tensorflow/core/kernels/sendrecv_ops.cc\r\n+++ b/tensorflow/core/kernels/sendrecv_ops.cc\r\n@@ -57,6 +57,26 @@ static FrameAndIter GetFrameAndIter(OpKernelContext* ctx,\r\n }\r\n }\r\n \r\n+static absl::flat_hash_map<std::string, int> key_prefix_map;\r\n+static absl::Mutex key_prefix_map_mutex;\r\n+\r\n+static std::string UniqueKeyPrefix(const std::string& key_prefix) {\r\n+ absl::MutexLock lock(&key_prefix_map_mutex);\r\n+\r\n+ auto it = key_prefix_map.find(key_prefix);\r\n+ if (it != key_prefix_map.end()) {\r\n+ it->second++;\r\n+ // For each SEND/RECV pair we need to use same key prefix.\r\n+ if (it->second == 1) {\r\n+ return key_prefix;\r\n+ }\r\n+ return absl::StrCat(key_prefix, \"_\", it->second / 2);\r\n+ } else {\r\n+ key_prefix_map[key_prefix] = 0;\r\n+ return key_prefix;\r\n+ }\r\n+}\r\n+\r\n SendOp::SendOp(OpKernelConstruction* ctx) : OpKernel(ctx) {\r\n string send_device;\r\n OP_REQUIRES_OK(ctx, ctx->GetAttr(\"send_device\", &send_device));\r\n@@ -70,6 +90,7 @@ SendOp::SendOp(OpKernelConstruction* ctx) : OpKernel(ctx) {\r\n OP_REQUIRES_OK(ctx, ctx->GetAttr(\"tensor_name\", &tensor_name));\r\n key_prefix_ = GetRendezvousKeyPrefix(send_device, recv_device,\r\n send_device_incarnation, tensor_name);\r\n+ key_prefix_ = UniqueKeyPrefix(key_prefix_);\r\n // The vast majority of Send nodes are outside any loop context, so\r\n // proactively cache the rendezvous key for the top-level.\r\n GetRendezvousKey(key_prefix_, {0, 0}, &parsed_key_.buf_);\r\n@@ -151,6 +172,7 @@ RecvOp::RecvOp(OpKernelConstruction* ctx) : AsyncOpKernel(ctx) {\r\n OP_REQUIRES_OK(ctx, ctx->GetAttr(\"tensor_name\", &tensor_name));\r\n key_prefix_ = GetRendezvousKeyPrefix(send_device, recv_device,\r\n send_device_incarnation, tensor_name);\r\n+ key_prefix_ = UniqueKeyPrefix(key_prefix_);\r\n // The vast majority of Recv nodes are outside any loop context, so\r\n // proactively cache the rendezvous key for the top-level.\r\n GetRendezvousKey(key_prefix_, {0, 0}, &parsed_key_.buf_);\r\n@@ -185,6 +207,8 @@ Rendezvous::DoneCallback make_recv_callback(OpKernelContext* ctx,\r\n // The runtime checks whether the tensor received here is\r\n // the same type.\r\n if (!is_dead) {\r\n+ VLOG(1) << \"make_recv_callback tensor ptr \" << &val\r\n+ << \" shape \" << val.shape().DebugString();\r\n ctx->set_output(0, val);\r\n }\r\n }\r\n```\r\n\r\n~~Still it makes send recv ops looks a little bit weird since it's clean someone else's mess but it has the minimal damage.If this looks good to you I'll file a PR.~~\r\n\r\nSorry, above patch won't work. Send/Recv nodes are instantiated out of order and it won't be easy to match correct send-recv pair with a global map. I think the right path is still try to fix the origin.\r\n\r\nThanks", "another possible fix is to manipulate the generation of `tensor name`\r\n\r\n```diff\r\ndiff --git a/tensorflow/core/common_runtime/eager/execute.cc b/tensorflow/core/common_runtime/eager/execute.cc\r\nindex 28aa93a7bbd..328468ada31 100644\r\n--- a/tensorflow/core/common_runtime/eager/execute.cc\r\n+++ b/tensorflow/core/common_runtime/eager/execute.cc\r\n@@ -890,10 +890,27 @@ Status WrapInCallOp(EagerOperation* op, EagerOperation** wrapped_op) {\r\n NodeDef* ndef = fdef.add_node_def();\r\n ndef->set_op(op->Name());\r\n ndef->set_name(op->Name()); // This could be anything.\r\n+\r\n+ static std::atomic<int64_t> unique_input_id = 0;\r\n+ auto mutable_signature = fdef.mutable_signature();\r\n+ for (size_t i = 0; i < mutable_signature->input_arg_size(); i++) {\r\n+ const std::string& original_name =\r\n+ mutable_signature->mutable_input_arg(i)->name();\r\n+ int64_t uid = unique_input_id.fetch_add(1);\r\n+ mutable_signature->mutable_input_arg(i)->set_name(\r\n+ absl::StrCat(original_name, \"_\", uid));\r\n+ }\r\n+\r\n const auto& signature = fdef.signature();\r\n for (size_t i = 0; i < signature.input_arg_size(); i++) {\r\n ndef->add_input(absl::StrCat(fdef.signature().input_arg(i).name(), \":0\"));\r\n }\r\n+\r\n```\r\n\r\n", "@SuryanarayanaY @sushreebarsa \r\nI filled a PR to avoid the key conflict.\r\n\r\nhttps://github.com/tensorflow/tensorflow/pull/62568\r\n\r\n Thanks", "@SuryanarayanaY @rohan100jain @gbaned \r\n\r\nhttps://colab.research.google.com/drive/1CUiyuG2Aob-Fj8xllrmbx2w3cNBXFYqU?usp=sharing\r\n\r\nHere is a minimum case derived from the [DLRM from NV DLE](https://github.com/NVIDIA/DeepLearningExamples/tree/3f82b7f982a1d9a9c5392bd110a5b93dfa7e80eb/Tensorflow2/Recommendation/DLRM) \r\n\r\nClick run all and you can reproduce the error in a few seconds.\r\n\r\n", "We are currently using TF 2.13.0 and I can confirm a segfault due to a race condition in local rendezvous.\r\nWhen inspecting the `core` file with gdb, this is the obtained stack trace:\r\n```\r\n(gdb) bt full\r\n#0 0x00007f9441158cf0 in tensorflow::LocalRendezvous::~LocalRendezvous() () from /usr/local/lib/python3.10/dist-packages/tensorflow/python/platform/../../libtensorflow_framework.so.2\r\nNo symbol table info available.\r\n#1 0x00007f94414dff89 in tensorflow::RefCountedIntraProcessRendezvous::~RefCountedIntraProcessRendezvous() () from /usr/local/lib/python3.10/dist-packages/tensorflow/python/platform/../../libtensorflow_framework.so.2\r\nNo symbol table info available.\r\n#2 0x00007f94414e00d2 in tensorflow::RefCountedIntraProcessRendezvous::~RefCountedIntraProcessRendezvous() () from /usr/local/lib/python3.10/dist-packages/tensorflow/python/platform/../../libtensorflow_framework.so.2\r\nNo symbol table info available.\r\n#3 0x00007f9441467808 in std::_Function_handler<void (absl::lts_20230125::Status const&), tensorflow::ProcessFunctionLibraryRuntime::ApplyCleanUpToDoneCallback(std::vector<std::unique_ptr<tensorflow::ProcessFunctionLibraryRuntime::CleanUpItem, std::default_delete<tensorflow::ProcessFunctionLibraryRuntime::CleanUpItem> >, std::allocator<std::unique_ptr<tensorflow::ProcessFunctionLibraryRuntime::CleanUpItem, std::default_delete<tensorflow::ProcessFunctionLibraryRuntime::CleanUpItem> > > >*, std::function<void (absl::lts_20230125::Status const&)>, tensorflow::FunctionLibraryRuntime::Options const&, tsl::core::RefCountPtr<tensorflow::Rendezvous>) const::{lambda(absl::lts_20230125::Status const&)#1}>::_M_invoke(std::_Any_data const&, absl::lts_20230125::Status const&) ()\r\n from /usr/local/lib/python3.10/dist-packages/tensorflow/python/platform/../../libtensorflow_framework.so.2\r\nNo symbol table info available.\r\n#4 0x00007f94414600a3 in std::_Function_handler<void (absl::lts_20230125::Status const&), tensorflow::ProcessFunctionLibraryRuntime::Run(tensorflow::FunctionLibraryRuntime::Options const&, unsigned long, absl::lts_20230125::Span<tensorflow::Tensor const>, std::vector<tensorflow::Tensor, std::allocator<tensorflow::Tensor> >*, std::function<void (absl::lts_20230125::Status const&)>) const::{lambda(absl::lts_20230125::Status const&)#1}>::_M_invoke(std::_Any_data const&, absl::lts_20230125::Status const&) ()\r\n from /usr/local/lib/python3.10/dist-packages/tensorflow/python/platform/../../libtensorflow_framework.so.2\r\nNo symbol table info available.\r\n#5 0x00007f944145f7bc in tensorflow::ReffedStatusCallback::~ReffedStatusCallback() () from /usr/local/lib/python3.10/dist-packages/tensorflow/python/platform/../../libtensorflow_framework.so.2\r\nNo symbol table info available.\r\n#6 0x00007f9441467f34 in tensorflow::ProcessFunctionLibraryRuntime::RunMultiDeviceAsync(tensorflow::FunctionLibraryRuntime::Options const&, unsigned long, std::vector<std::variant<tensorflow::Tensor, tensorflow::TensorShape>, std::allocator<std::variant<tensorflow::Tensor, tensorflow::TensorShape> > >*, std::vector<std::unique_ptr<tensorflow::ProcessFunctionLibraryRuntime::CleanUpItem, std::default_delete<tensorflow::ProcessFunctionLibraryRuntime::CleanUpItem> >, std::allocator<std::unique_ptr<tensorflow::ProcessFunctionLibraryRuntime::CleanUpItem, std::default_delete<tensorflow::ProcessFunctionLibraryRuntime::CleanUpItem> > > >*, std::function<void (absl::lts_20230125::Status const&)>, std::function<absl::lts_20230125::Status (tensorflow::ProcessFunctionLibraryRuntime::ComponentFunctionData const&, tensorflow::ProcessFunctionLibraryRuntime::InternalArgs*)>) const::{lambda(absl::lts_20230125::Status const&)#2}::operator()(absl::lts_20230125::Status const&) const () from /usr/local/lib/python3.10/dist-packages/tensorflow/python/platform/../../libtensorflow_framework.so.2\r\nNo symbol table info available.\r\n#7 0x00007f9441461570 in std::_Function_handler<void (absl::lts_20230125::Status const&), tensorflow::(anonymous namespace)::TensorsToFunctionRetsDoneCallback(std::vector<std::variant<tensorflow::Tensor, tensorflow::TensorShape>, std::allocator<std::variant<tensorflow::Tensor, tensorflow::TensorShape> > >*, std::vector<tensorflow::Tensor, std::allocator<tensorflow::Tensor> >*, std::function<void (absl::lts_20230125::Status const&)>)::{lambda(absl::lts_20230125::Status const&)#1}>::_M_invoke(std::_Any_data const&, absl::lts_20230125::Status const&) () from /usr/local/lib/python3.10/dist-packages/tensorflow/python/platform/../../libtensorflow_framework.so.2\r\nNo symbol table info available.\r\n#8 0x00007f944144db50 in std::_Function_handler<void (absl::lts_20230125::Status const&), tensorflow::FunctionLibraryRuntimeImpl::Run(tensorflow::FunctionLibraryRuntime::Options const&, unsigned long, absl::lts_20230125::Span<tensorflow::Tensor const>, std::vector<tensorflow::Tensor, std::allocator<tensorflow::Tensor> >*, std::function<void (absl::lts_20230125::Status const&)>)::{lambda(absl::lts_20230125::Status const&)#3}>::_M_invoke(std::_Any_data const&, absl::lts_20230125::Status const&) ()\r\n from /usr/local/lib/python3.10/dist-packages/tensorflow/python/platform/../../libtensorflow_framework.so.2\r\nNo symbol table info available.\r\n#9 0x00007f9441512455 in std::_Function_handler<void (), tensorflow::(anonymous namespace)::ExecutorState<tensorflow::PropagatorState>::Finish()::{lambda()#4}>::_M_invoke(std::_Any_data const&) ()\r\n from /usr/local/lib/python3.10/dist-packages/tensorflow/python/platform/../../libtensorflow_framework.so.2\r\nNo symbol table info available.\r\n#10 0x00007f9441afe121 in std::_Function_handler<void (), std::_Bind<tensorflow::data::RunnerWithMaxParallelism(std::function<void (std::function<void ()>)>, int)::{lambda(std::function<void (std::function<void ()>)> const&, std::function<void ()>)#1}::operator()(std::function<void (std::function<void ()>)> const&, std::function<void ()>) const::{lambda(std::function<void ()> const&)#1} (std::function<void ()>)> >::_M_invoke(std::_Any_data const&) ()\r\n from /usr/local/lib/python3.10/dist-packages/tensorflow/python/platform/../../libtensorflow_framework.so.2\r\nNo symbol table info available.\r\n#11 0x00007f94406eb645 in Eigen::ThreadPoolTempl<tsl::thread::EigenEnvironment>::WorkerLoop(int) () from /usr/local/lib/python3.10/dist-packages/tensorflow/python/platform/../../libtensorflow_framework.so.2\r\nNo symbol table info available.\r\n#12 0x00007f94406ed2b8 in void absl::lts_20230125::internal_any_invocable::RemoteInvoker<false, void, tsl::thread::EigenEnvironment::CreateThread(std::function<void ()>)::{lambda()#1}&>(absl::lts_20230125::internal_any_invocable::TypeErasedState*) ()\r\n from /usr/local/lib/python3.10/dist-packages/tensorflow/python/platform/../../libtensorflow_framework.so.2\r\nNo symbol table info available.\r\n#13 0x00007f9441344fcf in tsl::(anonymous namespace)::PThread::ThreadFn(void*) () from /usr/local/lib/python3.10/dist-packages/tensorflow/python/platform/../../libtensorflow_framework.so.2\r\nNo symbol table info available.\r\n#14 0x00007f94ec8d4ac3 in start_thread (arg=<optimized out>) at ./nptl/pthread_create.c:442\r\n ret = <optimized out>\r\n pd = <optimized out>\r\n out = <optimized out>\r\n unwind_buf = {cancel_jmp_buf = {{jmp_buf = {140732530118736, 6006152095181854869, 140224407516736, 0, 140277600569296, 140732530119088, -6031673960749836139, -6020192743340746603}, mask_was_saved = 0}}, priv = {pad = {0x0, 0x0, 0x0, 0x0}, data = {prev = 0x0, \r\n cleanup = 0x0, canceltype = 0}}}\r\n not_first_call = <optimized out>\r\n#15 0x00007f94ec965bf4 in epoll_wait (epfd=-699346607, events=0x7f88940327e0, maxevents=-938782704, timeout=-938782712) at ../sysdeps/unix/sysv/linux/epoll_wait.c:30\r\n __arg4 = -938782712\r\n __arg2 = 140224575514592\r\n _a3 = -938782704\r\n _a1 = -699346607\r\n resultvar = <optimized out>\r\n __arg3 = -938782704\r\n __arg1 = -699346607\r\n _a4 = 0\r\n _a2 = 140224575514592\r\n sc_ret = <optimized out>\r\n sc_ret = <optimized out>\r\n#16 0x0000000000000000 in ?? ()\r\nNo symbol table info available.\r\n```\r\n" ]
2023-12-01T09:22:59
2024-01-03T13:36:42
null
NONE
null
null
null
### Issue type Bug ### Have you reproduced the bug with TensorFlow Nightly? No ### TensorFlow version 2.10, 2.12, doesn't really matter ### Custom code Yes ### OS platform and distribution ubuntu 20.04 ### Python version 3.8, 3.9 ### Bazel version 5.3.0 ### GCC/compiler version 9.4 ### Current behavior? The story is kinda bit long and it took us months to debug this issue. I'll try to keep it short. ### Background and what the problem is We run the [DLRM from NV DLE](https://github.com/NVIDIA/DeepLearningExamples/tree/3f82b7f982a1d9a9c5392bd110a5b93dfa7e80eb/Tensorflow2/Recommendation/DLRM) in our CI daily test. Very rare, like once every few weeks, the daily test report such error. ` W tensorflow/core/framework/op_kernel.cc:1874] OP_REQUIRES failed at strided_slice_op.cc:112 : INVALID_ARGUMENT: Expected begin, end, and strides to be 1D equal size tensors, but got shapes [65536], [1], and [1] instead. ` The[ strided slice op validator](https://github.com/tensorflow/tensorflow/blob/3a029b19c9c156cd68cab671b5ce95bde839f15e/tensorflow/core/util/strided_slice_op.cc#L214) isn't happy about the input parameters. The problem is like a ghost. It appears once every 1 or 2 weeks which makes it very hard to debug. But one thing is for sure is that it's not a random dram bit flip caused by cosmic rays because the error systoms is stable: one of (begin, end, stride) tensor's shape is incorrectly set to [65535] instead of [1] ### Evidence I'll skip the lengthy debug process and jump to the last step. ![image](https://github.com/tensorflow/tensorflow/assets/8800468/d73ddf6d-7cc9-4a10-90f8-1e4d3885c91b) ![image](https://github.com/tensorflow/tensorflow/assets/8800468/0b99c252-59b0-490d-8321-924adb70be72) We observed a malfunctioned SEND/RECV pair. The item tensor in SENDOP has a shape of [1] but during the local rendezvous process the recevied tensor got a shape of [65536]. This tensor later flows to strided slice op and triggers the grumpy validator. ![image](https://github.com/tensorflow/tensorflow/assets/8800468/7626ef2f-0de4-4eb0-8c25-3cfd9618b359) We grep the key hash (red highlighted 1632...9087) in the debug log. There are two threads: 66787 and 63997. Most of the time the rendezvous runs in a SEND-RECV pattern within the same thread. But in the green rectangle you can see two threads interleaved and a SEND-SEND-RECV-RECV pattern is observed. Soon after this the bomb exploded. ### Analysis It seems the root cause is a bug in the rendezvous mechanism. Two unrelated operations generated the same communication Rendezvous key. Assuming there are 2 threads, when the CPU load is not heavy, in most cases the scheduling order is - THREAD A: SEND(KEY, VALUE_A) - THREAD A: RECV(KEY) - THREAD B: SEND(KEY, VALUE_B) - THREAD B: RECV(KEY) In this way, everyone will be fine even if the keys are the same. It is equivalent to time-division multiplexing of the same KEY. When the CPU load becomes heavy (which is the case in our daily test scenario), thread scheduling becomes unpredictable. It is possible that such a pattern may occur - THREAD A: SEND(KEY, VALUE_A) - THREAD B: SEND(KEY, VALUE_B) - THREAD A: RECV(KEY) - THREAD B: RECV(KEY) This will cause THREAD A to incorrectly receive the data sent by THREAD B. ### Related python code 1. Thread A is running the normal [training loop.](https://github.com/NVIDIA/DeepLearningExamples/blob/3f82b7f982a1d9a9c5392bd110a5b93dfa7e80eb/Tensorflow2/Recommendation/DLRM/main.py#L281) 2. Thread B is created for the RawBinaryDataset class. The pre-processing creates an [asynchronous thread pool.](https://github.com/NVIDIA/DeepLearningExamples/blob/3f82b7f982a1d9a9c5392bd110a5b93dfa7e80eb/Tensorflow2/Recommendation/DLRM/split_binary_dataset.py#L138) 3. Thread A is running[ x = x[self.begin_idx:self.end_idx]](https://github.com/NVIDIA/DeepLearningExamples/blob/3f82b7f982a1d9a9c5392bd110a5b93dfa7e80eb/Tensorflow2/Recommendation/DLRM/model.py#L58). Translated into StridedSlice operation by eager runtime. begin_idx, end_idx and the implicit stride are all constant tensors on CPU, and StridedSlice is an device operator. So these three tensors need to be sent to the device side. 4. Thread B is running[ tensor = tf.expand_dims(tensor, axis=1). ](https://github.com/NVIDIA/DeepLearningExamples/blob/3f82b7f982a1d9a9c5392bd110a5b93dfa7e80eb/Tensorflow2/Recommendation/DLRM/split_binary_dataset.py#L204). Execute expand_dims. Similar to thread A, the expand_dims operator runs on the device side while the input is on CPU, and the tensor needs to be sent to the device. The shape of this tensor is [65536]. 5. Due to the defect of Rendezous::CreateKey, the keys generated by these two operators in the eager runtime are exactly the same. 6. Multi-threading + HASH KEY collision + SEND-SEND-RECV-RECV, all three together, BOOOM. ### FIX ![image](https://github.com/tensorflow/tensorflow/assets/8800468/f50ed76a-9cef-4bf7-87b2-e4a298236dd9) The key string is /job:localhost/replica:0/task:0/device:CPU:0;ea74dbce35f0ab7e;/job:localhost/replica:0/task:0/device:MLU:0;edge_2_input;0:0 The rendezvous key consists 5 parts. - src_device: /job:localhost/replica:0/task:0/device:CPU:0; - src_incarnation: ea74dbce35f0ab7e; - dst_device: /job:localhost/replica:0/task:0/device:MLU:0; - name: edge_2_input; - frame_inter: 0:0 In this case, 4 out of 5 (src_device, src_incarnation, dst_device_frame_iter) are naturely indistinguishable. Unless we add new field in the key, the only field we can play with is `name`. The creation of `edge_2_input` involes another few tons of code. ![image](https://github.com/tensorflow/tensorflow/assets/8800468/b2d85cd7-1e47-492b-be50-1e3d63f13d55) A quick fix for my current problem is simply add `dst_name` in the `tensor_name_attr` during graph partition. The names will become `edge_2_input_strided_slice` and `edge_2_input_expand_dims` thus my problem is solved. ```diff diff --git a/tensorflow/core/graph/graph_partition.cc b/tensorflow/core/graph/graph_partition.cc index a4f09383c63..57a4e919526 100644 --- a/tensorflow/core/graph/graph_partition.cc +++ b/tensorflow/core/graph/graph_partition.cc @@ -1147,7 +1147,8 @@ Status Partition(const PartitionOptions& opts, Graph* g, tensor_name_attr = opts.get_tensor_name_attr(edge); } else { tensor_name_attr = - strings::StrCat("edge_", edge->id(), "_", edge->src()->name()); + strings::StrCat("edge_", edge->id(), "_", edge->src()->name(), + "_", edge->dst()->name()); } ``` But a more approriate and generic fix might be to have a unique src node name. The source name will be like input_xxxxxx and input_yyyyyy. But this sounds like a fundamental change and I'm not sure if this would break too many things. And I'm not sure about the right place to make the change, like manipulating the input name a little bit [here](https://github.com/tensorflow/tensorflow/blob/master/tensorflow/core/common_runtime/eager/execute.cc#L988). I'd like to hear your opinion on this problem and I'd like to file a PR if you could point me the right place to apply the fix. Cheers Hengwen ### Standalone code to reproduce the issue A minimum case derived from the [DLRM from NV DLE](https://github.com/NVIDIA/DeepLearningExamples/tree/3f82b7f982a1d9a9c5392bd110a5b93dfa7e80eb/Tensorflow2/Recommendation/DLRM) Click run all and you can reproduce the error in a few seconds. https://colab.research.google.com/drive/1CUiyuG2Aob-Fj8xllrmbx2w3cNBXFYqU?usp=sharing
{ "url": "https://api.github.com/repos/tensorflow/tensorflow/issues/62523/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/tensorflow/tensorflow/issues/62523/timeline
null
null
false
https://api.github.com/repos/tensorflow/tensorflow/issues/62522
https://api.github.com/repos/tensorflow/tensorflow
https://api.github.com/repos/tensorflow/tensorflow/issues/62522/labels{/name}
https://api.github.com/repos/tensorflow/tensorflow/issues/62522/comments
https://api.github.com/repos/tensorflow/tensorflow/issues/62522/events
https://github.com/tensorflow/tensorflow/issues/62522
2,020,249,774
I_kwDOArmXAs54apCu
62,522
A hang issue in tf.raw_ops.MapUnstage
{ "login": "Zoeeeeey", "id": 99133420, "node_id": "U_kgDOBein7A", "avatar_url": "https://avatars.githubusercontent.com/u/99133420?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Zoeeeeey", "html_url": "https://github.com/Zoeeeeey", "followers_url": "https://api.github.com/users/Zoeeeeey/followers", "following_url": "https://api.github.com/users/Zoeeeeey/following{/other_user}", "gists_url": "https://api.github.com/users/Zoeeeeey/gists{/gist_id}", "starred_url": "https://api.github.com/users/Zoeeeeey/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Zoeeeeey/subscriptions", "organizations_url": "https://api.github.com/users/Zoeeeeey/orgs", "repos_url": "https://api.github.com/users/Zoeeeeey/repos", "events_url": "https://api.github.com/users/Zoeeeeey/events{/privacy}", "received_events_url": "https://api.github.com/users/Zoeeeeey/received_events", "type": "User", "site_admin": false }
[ { "id": 404586594, "node_id": "MDU6TGFiZWw0MDQ1ODY1OTQ=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/stat:awaiting%20tensorflower", "name": "stat:awaiting tensorflower", "color": "f4b400", "default": false, "description": "Status - Awaiting response from tensorflower" }, { "id": 473172988, "node_id": "MDU6TGFiZWw0NzMxNzI5ODg=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/type:bug", "name": "type:bug", "color": "159b2e", "default": false, "description": "Bug" }, { "id": 1097547147, "node_id": "MDU6TGFiZWwxMDk3NTQ3MTQ3", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/comp:ops", "name": "comp:ops", "color": "0052cc", "default": false, "description": "OPs related issues" }, { "id": 5508003926, "node_id": "LA_kwDOArmXAs8AAAABSE14Vg", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/TF%202.13", "name": "TF 2.13", "color": "B13ACB", "default": false, "description": "For issues related to Tensorflow 2.13" } ]
closed
false
{ "login": "sachinprasadhs", "id": 73069040, "node_id": "MDQ6VXNlcjczMDY5MDQw", "avatar_url": "https://avatars.githubusercontent.com/u/73069040?v=4", "gravatar_id": "", "url": "https://api.github.com/users/sachinprasadhs", "html_url": "https://github.com/sachinprasadhs", "followers_url": "https://api.github.com/users/sachinprasadhs/followers", "following_url": "https://api.github.com/users/sachinprasadhs/following{/other_user}", "gists_url": "https://api.github.com/users/sachinprasadhs/gists{/gist_id}", "starred_url": "https://api.github.com/users/sachinprasadhs/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/sachinprasadhs/subscriptions", "organizations_url": "https://api.github.com/users/sachinprasadhs/orgs", "repos_url": "https://api.github.com/users/sachinprasadhs/repos", "events_url": "https://api.github.com/users/sachinprasadhs/events{/privacy}", "received_events_url": "https://api.github.com/users/sachinprasadhs/received_events", "type": "User", "site_admin": false }
[ { "login": "sachinprasadhs", "id": 73069040, "node_id": "MDQ6VXNlcjczMDY5MDQw", "avatar_url": "https://avatars.githubusercontent.com/u/73069040?v=4", "gravatar_id": "", "url": "https://api.github.com/users/sachinprasadhs", "html_url": "https://github.com/sachinprasadhs", "followers_url": "https://api.github.com/users/sachinprasadhs/followers", "following_url": "https://api.github.com/users/sachinprasadhs/following{/other_user}", "gists_url": "https://api.github.com/users/sachinprasadhs/gists{/gist_id}", "starred_url": "https://api.github.com/users/sachinprasadhs/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/sachinprasadhs/subscriptions", "organizations_url": "https://api.github.com/users/sachinprasadhs/orgs", "repos_url": "https://api.github.com/users/sachinprasadhs/repos", "events_url": "https://api.github.com/users/sachinprasadhs/events{/privacy}", "received_events_url": "https://api.github.com/users/sachinprasadhs/received_events", "type": "User", "site_admin": false } ]
null
[ "Lack of parameter checks.\r\n```\r\nimport tensorflow as tf\r\nimport numpy as np \r\narg_0=tf.constant(value=np.random.randint(0,100,size=(2, 2)), shape=(2, 2), dtype=tf.int64)\r\narg_1=tf.constant(value=np.random.randint(0,100,size=(2, 2)), shape=(2, 2), dtype=tf.int32)\r\narg_2=[tf.uint8]\r\narg_3=5\r\narg_4=8\r\narg_5='aaaa'\r\narg_6='aaaa'\r\narg_7='bbb'\r\ntf.raw_ops.MapUnstage(key=arg_0, indices=arg_1, dtypes=arg_2, capacity=arg_3, memory_limit=arg_4, container=arg_5, shared_name=arg_6, name=arg_7)\r\n```\r\nRelevant log output\r\n```\r\n2023-12-04 09:43:19.814479: I tensorflow/core/platform/cpu_feature_guard.cc:182] This TensorFlow binary is optimized to use available CPU instructions in performance-critical operations.\r\nTo enable the following instructions: SSE SSE2 SSE3 SSE4.1 SSE4.2 AVX AVX2 FMA, in other operations, rebuild TensorFlow with the appropriate compiler flags.\r\n2023-12-04 09:43:19.830681: W tensorflow/core/framework/op_kernel.cc:1828] OP_REQUIRES failed at map_stage_op.cc:590 : INVALID_ARGUMENT: Indices are not strictly ordered\r\nTraceback (most recent call last):\r\n File \"E:/zzy/ErrorFuzz/output/2_13/GotCrashes/tf.raw_ops.MapUnstage-1.py\", line 11, in <module>\r\n tf.raw_ops.MapUnstage(key=arg_0, indices=arg_1, dtypes=arg_2, capacity=arg_3, memory_limit=arg_4, container=arg_5, shared_name=arg_6, name=arg_7)\r\n File \"C:\\ProgramData\\Anaconda3\\envs\\fuzztf13py38\\lib\\site-packages\\tensorflow\\python\\util\\tf_export.py\", line 413, in wrapper\r\n return f(**kwargs)\r\n File \"C:\\ProgramData\\Anaconda3\\envs\\fuzztf13py38\\lib\\site-packages\\tensorflow\\python\\ops\\gen_data_flow_ops.py\", line 1980, in map_unstage\r\n _ops.raise_from_not_ok_status(e, name)\r\n File \"C:\\ProgramData\\Anaconda3\\envs\\fuzztf13py38\\lib\\site-packages\\tensorflow\\python\\framework\\ops.py\", line 6656, in raise_from_not_ok_status\r\n raise core._status_to_exception(e) from None # pylint: disable=protected-access\r\ntensorflow.python.framework.errors_impl.InvalidArgumentError: {{function_node __wrapped__MapUnstage_dtypes_1_device_/job:localhost/replica:0/task:0/device:CPU:0}} Indices are not strictly ordered [Op:MapUnstage] name: bbb\r\n\r\n```", "Hi @Zoeeeeey ,\r\n\r\nI have replicated the reported behaviour with colab using TF v2.14, 2.15 and nightly. Please find the [gist](https://colab.research.google.com/gist/Venkat6871/e69e18612569ba42fb0fd825663c7c03/62522_2-14-v.ipynb), [gist1](https://colab.research.google.com/gist/Venkat6871/dc9425eab892c75a54b56eab0eada065/62522_2-15-v.ipynb), [gist2 ](https://colab.research.google.com/gist/Venkat6871/d916226e20b37eeb57de6fb6c2706cc5/62522_tf-nightly-v.ipynb)here for reference.\r\n\r\nThank you!", "Are you satisfied with the resolution of your issue?\n<a href=\"https://docs.google.com/forms/d/e/1FAIpQLSfaP12TRhd9xSxjXZjcZFNXPGk4kc1-qMdv3gc6bEP90vY1ew/viewform?entry.85265664=Yes&entry.2137816233=https://github.com/tensorflow/tensorflow/issues/62522\">Yes</a>\n<a href=\"https://docs.google.com/forms/d/e/1FAIpQLSfaP12TRhd9xSxjXZjcZFNXPGk4kc1-qMdv3gc6bEP90vY1ew/viewform?entry.85265664=No&entry.2137816233=https://github.com/tensorflow/tensorflow/issues/62522\">No</a>\n" ]
2023-12-01T07:07:52
2024-03-19T01:49:22
2024-03-19T01:49:19
NONE
null
null
null
### Issue type Bug ### Have you reproduced the bug with TensorFlow Nightly? No ### Source binary ### TensorFlow version tf 2.13 ### Custom code Yes ### OS platform and distribution Windows ### Mobile device _No response_ ### Python version 3.8.11 ### Bazel version _No response_ ### GCC/compiler version _No response_ ### CUDA/cuDNN version _No response_ ### GPU model and memory _No response_ ### Current behavior? When `tf.raw_ops.MapStage` and `tf.raw_ops.MapUnstage` are called simultaneously, the program hangs. ### Standalone code to reproduce the issue ```shell import tensorflow as tf tf.compat.v1.disable_eager_execution() key = tf.constant(1, dtype=tf.int64) data = tf.random.uniform(shape=[3, 2], dtype=tf.float32) stage_op = tf.raw_ops.MapStage(key=key, indices=[0], values=[data], dtypes=[data.dtype]) unstage_op = tf.raw_ops.MapUnstage(key=key, indices=[0], dtypes=[data.dtype]) with tf.compat.v1.Session() as sess: sess.run(stage_op) result = sess.run(unstage_op) print(result) ``` ### Relevant log output ```shell 2023-12-01 15:06:40.211973: I tensorflow/core/platform/cpu_feature_guard.cc:182] This TensorFlow binary is optimized to use available CPU instructions in performance-critical operations. To enable the following instructions: SSE SSE2 SSE3 SSE4.1 SSE4.2 AVX AVX2 FMA, in other operations, rebuild TensorFlow with the appropriate compiler flags. 2023-12-01 15:06:40.213921: I tensorflow/compiler/mlir/mlir_graph_optimization_pass.cc:375] MLIR V1 optimization pass is not enabled Process finished with exit code -1 ```
{ "url": "https://api.github.com/repos/tensorflow/tensorflow/issues/62522/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/tensorflow/tensorflow/issues/62522/timeline
null
completed
false
https://api.github.com/repos/tensorflow/tensorflow/issues/62521
https://api.github.com/repos/tensorflow/tensorflow
https://api.github.com/repos/tensorflow/tensorflow/issues/62521/labels{/name}
https://api.github.com/repos/tensorflow/tensorflow/issues/62521/comments
https://api.github.com/repos/tensorflow/tensorflow/issues/62521/events
https://github.com/tensorflow/tensorflow/issues/62521
2,020,236,457
I_kwDOArmXAs54alyp
62,521
Inconsistency in XLA Compiled Model with Extra Transpose and Argmax Output Nodes
{ "login": "Gwihwan-Go", "id": 83144588, "node_id": "MDQ6VXNlcjgzMTQ0NTg4", "avatar_url": "https://avatars.githubusercontent.com/u/83144588?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Gwihwan-Go", "html_url": "https://github.com/Gwihwan-Go", "followers_url": "https://api.github.com/users/Gwihwan-Go/followers", "following_url": "https://api.github.com/users/Gwihwan-Go/following{/other_user}", "gists_url": "https://api.github.com/users/Gwihwan-Go/gists{/gist_id}", "starred_url": "https://api.github.com/users/Gwihwan-Go/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Gwihwan-Go/subscriptions", "organizations_url": "https://api.github.com/users/Gwihwan-Go/orgs", "repos_url": "https://api.github.com/users/Gwihwan-Go/repos", "events_url": "https://api.github.com/users/Gwihwan-Go/events{/privacy}", "received_events_url": "https://api.github.com/users/Gwihwan-Go/received_events", "type": "User", "site_admin": false }
[ { "id": 386191887, "node_id": "MDU6TGFiZWwzODYxOTE4ODc=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/stat:awaiting%20response", "name": "stat:awaiting response", "color": "f4b400", "default": false, "description": "Status - Awaiting response from author" }, { "id": 473172988, "node_id": "MDU6TGFiZWw0NzMxNzI5ODg=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/type:bug", "name": "type:bug", "color": "159b2e", "default": false, "description": "Bug" }, { "id": 474725938, "node_id": "MDU6TGFiZWw0NzQ3MjU5Mzg=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/stale", "name": "stale", "color": "d4c5f9", "default": false, "description": "This label marks the issue/pr stale - to be closed automatically if no activity" }, { "id": 1133285679, "node_id": "MDU6TGFiZWwxMTMzMjg1Njc5", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/comp:xla", "name": "comp:xla", "color": "0052cc", "default": false, "description": "XLA" }, { "id": 6218999181, "node_id": "LA_kwDOArmXAs8AAAABcq5ljQ", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/TF%202.15", "name": "TF 2.15", "color": "9162CB", "default": false, "description": "For issues related to 2.15.x" } ]
closed
false
{ "login": "sachinprasadhs", "id": 73069040, "node_id": "MDQ6VXNlcjczMDY5MDQw", "avatar_url": "https://avatars.githubusercontent.com/u/73069040?v=4", "gravatar_id": "", "url": "https://api.github.com/users/sachinprasadhs", "html_url": "https://github.com/sachinprasadhs", "followers_url": "https://api.github.com/users/sachinprasadhs/followers", "following_url": "https://api.github.com/users/sachinprasadhs/following{/other_user}", "gists_url": "https://api.github.com/users/sachinprasadhs/gists{/gist_id}", "starred_url": "https://api.github.com/users/sachinprasadhs/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/sachinprasadhs/subscriptions", "organizations_url": "https://api.github.com/users/sachinprasadhs/orgs", "repos_url": "https://api.github.com/users/sachinprasadhs/repos", "events_url": "https://api.github.com/users/sachinprasadhs/events{/privacy}", "received_events_url": "https://api.github.com/users/sachinprasadhs/received_events", "type": "User", "site_admin": false }
[ { "login": "sachinprasadhs", "id": 73069040, "node_id": "MDQ6VXNlcjczMDY5MDQw", "avatar_url": "https://avatars.githubusercontent.com/u/73069040?v=4", "gravatar_id": "", "url": "https://api.github.com/users/sachinprasadhs", "html_url": "https://github.com/sachinprasadhs", "followers_url": "https://api.github.com/users/sachinprasadhs/followers", "following_url": "https://api.github.com/users/sachinprasadhs/following{/other_user}", "gists_url": "https://api.github.com/users/sachinprasadhs/gists{/gist_id}", "starred_url": "https://api.github.com/users/sachinprasadhs/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/sachinprasadhs/subscriptions", "organizations_url": "https://api.github.com/users/sachinprasadhs/orgs", "repos_url": "https://api.github.com/users/sachinprasadhs/repos", "events_url": "https://api.github.com/users/sachinprasadhs/events{/privacy}", "received_events_url": "https://api.github.com/users/sachinprasadhs/received_events", "type": "User", "site_admin": false } ]
null
[ "@sachinprasadhs I was able to replicate the issue on both [gpu](https://colab.research.google.com/gist/sushreebarsa/b9173e2ec49832a9e87f78e54da9780d/62521.ipynb) and [cpu](https://colab.research.google.com/gist/sushreebarsa/dc437db57486f1fb8afe56b7f923869e/62521.ipynb).\r\nPlease find the attached gists here. Thank you!", "When you try to compare with additional nodes output to assert_allclose, it gives mismatch and it is working as expected when you remove those nodes as you have mentioned. \r\n\r\n```python\r\nimport tensorflow as tf\r\nimport numpy as np\r\n\r\nclass Model1(tf.keras.Model):\r\n @tf.function(jit_compile=True)\r\n def __call__(self, inp):\r\n # Forward pass logic using TensorFlow operations\r\n # inp: [37, 1, 1, 15, 36] : int8\r\n mul = tf.multiply(inp, inp)\r\n abs = tf.abs(mul)\r\n reduce_max = tf.math.reduce_max(abs, axis=2)\r\n return reduce_max\r\n\r\nclass Model2(tf.keras.Model):\r\n @tf.function(jit_compile=True)\r\n def __call__(self, inp):\r\n # Forward pass logic using TensorFlow operations\r\n # inp: [37, 1, 1, 15, 36] : int8\r\n trans1 = tf.transpose(inp, perm=[1, 0, 2, 3, 4])\r\n trans_mul = tf.multiply(trans1, trans1)\r\n mul = tf.transpose(trans_mul, perm=[1, 0, 2, 3, 4])\r\n abs = tf.abs(mul)\r\n reduce_max = tf.math.reduce_max(abs, axis=2)\r\n argmax = tf.math.argmax(mul, axis=4)\r\n return reduce_max # removing eitheor trans1 or argmax will not trigger error!\r\n\r\ninputs = [\r\ntf.cast(tf.random.uniform(shape=[37, 1, 1, 15, 36], minval=-128, maxval=128, dtype=tf.int32), tf.int8),\r\n]\r\nmodel1 = Model1()\r\nmodel2 = Model2()\r\ndevice = \"cpu\" # \"gpu\" also trigger the error\r\nwith tf.device(device):\r\n tf.config.run_functions_eagerly(True)\r\n out1 = model1(*inputs)\r\n out2 = model2(*inputs)\r\n print(f'=========eager_output(version:{tf.__version__})================')\r\n try :\r\n for i in range(min(len(out1),len(out2))):\r\n np.testing.assert_allclose(out1[i].numpy(), out2[i].numpy(), rtol=0.001, atol=0.001, err_msg=f'at checking {i}th')\r\n print(\"XLA_eager does not trigger assertion\")\r\n except AssertionError as e:\r\n print(\"XLA_eager triggers assertion\")\r\n print(e)\r\n tf.config.run_functions_eagerly(False)\r\n out1 = model1(*inputs)\r\n out2 = model2(*inputs)\r\n print(f'=========compiled_output(version:{tf.__version__})================')\r\n try :\r\n for i in range(min(len(out1),len(out2))):\r\n np.testing.assert_allclose(out1[i].numpy(), out2[i].numpy(), rtol=0.001, atol=0.001, err_msg=f'at checking {i}th')\r\n print(\"XLA_complie does not trigger assertion\")\r\n except AssertionError as e:\r\n print(\"XLA_complie triggers assertion\")\r\n print(e)\r\n```", "This issue is stale because it has been open for 7 days with no activity. It will be closed if no further activity occurs. Thank you.", "This issue was closed because it has been inactive for 7 days since being marked as stale. Please reopen if you'd like to work on this further.", "Are you satisfied with the resolution of your issue?\n<a href=\"https://docs.google.com/forms/d/e/1FAIpQLSfaP12TRhd9xSxjXZjcZFNXPGk4kc1-qMdv3gc6bEP90vY1ew/viewform?entry.85265664=Yes&entry.2137816233=https://github.com/tensorflow/tensorflow/issues/62521\">Yes</a>\n<a href=\"https://docs.google.com/forms/d/e/1FAIpQLSfaP12TRhd9xSxjXZjcZFNXPGk4kc1-qMdv3gc6bEP90vY1ew/viewform?entry.85265664=No&entry.2137816233=https://github.com/tensorflow/tensorflow/issues/62521\">No</a>\n" ]
2023-12-01T06:57:13
2024-01-11T01:49:53
2024-01-11T01:49:45
NONE
null
null
null
### Issue type Bug ### Have you reproduced the bug with TensorFlow Nightly? Yes ### Source source ### TensorFlow version 2.15.0 ### Custom code Yes ### OS platform and distribution Ubuntu 22.04.3 LTS ### Mobile device _No response_ ### Python version 3.10.0 ### Bazel version _No response_ ### GCC/compiler version _No response_ ### CUDA/cuDNN version cuda : 12.2 / cudnn 8.9.04 ### GPU model and memory Tesla V100S-PCIE-32GB ### Current behavior? In TensorFlow 2.15.0, I've found that a XLA compiled model returning an extra two nodes produces inconsistent outputs, compared to a simpler model. This inconsistency is observed on both **CPU and GPU devices**. Interestingly, removing one of the extra output nodes from the complex model resolves the discrepancy. ### Standalone code to reproduce the issue ```python import tensorflow as tf import numpy as np class Model1(tf.keras.Model): @tf.function(jit_compile=True) def __call__(self, inp): # Forward pass logic using TensorFlow operations # inp: [37, 1, 1, 15, 36] : int8 mul = tf.multiply(inp, inp) abs = tf.abs(mul) reduce_max = tf.math.reduce_max(abs, axis=2) return reduce_max, class Model2(tf.keras.Model): @tf.function(jit_compile=True) def __call__(self, inp): # Forward pass logic using TensorFlow operations # inp: [37, 1, 1, 15, 36] : int8 trans1 = tf.transpose(inp, perm=[1, 0, 2, 3, 4]) trans_mul = tf.multiply(trans1, trans1) mul = tf.transpose(trans_mul, perm=[1, 0, 2, 3, 4]) abs = tf.abs(mul) reduce_max = tf.math.reduce_max(abs, axis=2) argmax = tf.math.argmax(mul, axis=4) return reduce_max, trans1, argmax, # removing eitheor trans1 or argmax will not trigger error! inputs = [ tf.cast(tf.random.uniform(shape=[37, 1, 1, 15, 36], minval=-128, maxval=128, dtype=tf.int32), tf.int8), ] model1 = Model1() model2 = Model2() device = "cpu" # "gpu" also trigger the error with tf.device(device): tf.config.run_functions_eagerly(True) out1 = model1(*inputs) out2 = model2(*inputs) print(f'=========eager_output(version:{tf.__version__})================') try : for i in range(min(len(out1),len(out2))): np.testing.assert_allclose(out1[i].numpy(), out2[i].numpy(), rtol=0.001, atol=0.001, err_msg=f'at checking {i}th') print("XLA_eager does not trigger assertion") except AssertionError as e: print("XLA_eager triggers assertion") print(e) tf.config.run_functions_eagerly(False) out1 = model1(*inputs) out2 = model2(*inputs) print(f'=========compiled_output(version:{tf.__version__})================') try : for i in range(min(len(out1),len(out2))): np.testing.assert_allclose(out1[i].numpy(), out2[i].numpy(), rtol=0.001, atol=0.001, err_msg=f'at checking {i}th') print("XLA_complie does not trigger assertion") except AssertionError as e: print("XLA_complie triggers assertion") print(e) ``` ### Relevant log output ```shell =========eager_output(version:2.15.0)================ XLA_eager does not trigger assertion 2023-12-01 06:50:46.483140: I external/local_xla/xla/service/service.cc:168] XLA service 0x55c0d1fa47c0 initialized for platform Host (this does not guarantee that XLA will be used). Devices: =========compiled_output(version:2.15.0)================ XLA_complie triggers assertion Not equal to tolerance rtol=0.001, atol=0.001 at checking 1th Mismatched elements: 8812 / 19980 (44.1%) Max absolute difference: 126 Max relative difference: 2. x: array([[[[ 73, 105, 121, ..., -112, -15, -87], [ 121, -79, 89, ..., 100, -111, 100], [ 89, 68, 9, ..., -47, -31, -87],... y: array([[[[ 73, 105, 121, ..., 112, 15, 87], [121, 79, 89, ..., 100, 111, 100], [ 89, 68, 9, ..., 47, 31, 87],... ```
{ "url": "https://api.github.com/repos/tensorflow/tensorflow/issues/62521/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/tensorflow/tensorflow/issues/62521/timeline
null
completed
false
https://api.github.com/repos/tensorflow/tensorflow/issues/62520
https://api.github.com/repos/tensorflow/tensorflow
https://api.github.com/repos/tensorflow/tensorflow/issues/62520/labels{/name}
https://api.github.com/repos/tensorflow/tensorflow/issues/62520/comments
https://api.github.com/repos/tensorflow/tensorflow/issues/62520/events
https://github.com/tensorflow/tensorflow/pull/62520
2,019,819,922
PR_kwDOArmXAs5g17ST
62,520
Create 2.15.0.post1 with tensorrt removed from [and-cuda] install
{ "login": "angerson", "id": 32465472, "node_id": "MDQ6VXNlcjMyNDY1NDcy", "avatar_url": "https://avatars.githubusercontent.com/u/32465472?v=4", "gravatar_id": "", "url": "https://api.github.com/users/angerson", "html_url": "https://github.com/angerson", "followers_url": "https://api.github.com/users/angerson/followers", "following_url": "https://api.github.com/users/angerson/following{/other_user}", "gists_url": "https://api.github.com/users/angerson/gists{/gist_id}", "starred_url": "https://api.github.com/users/angerson/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/angerson/subscriptions", "organizations_url": "https://api.github.com/users/angerson/orgs", "repos_url": "https://api.github.com/users/angerson/repos", "events_url": "https://api.github.com/users/angerson/events{/privacy}", "received_events_url": "https://api.github.com/users/angerson/received_events", "type": "User", "site_admin": false }
[]
closed
false
null
[]
null
[]
2023-12-01T00:57:34
2023-12-01T02:20:38
2023-12-01T02:20:37
CONTRIBUTOR
null
false
{ "url": "https://api.github.com/repos/tensorflow/tensorflow/pulls/62520", "html_url": "https://github.com/tensorflow/tensorflow/pull/62520", "diff_url": "https://github.com/tensorflow/tensorflow/pull/62520.diff", "patch_url": "https://github.com/tensorflow/tensorflow/pull/62520.patch", "merged_at": "2023-12-01T02:20:37" }
Context: tensorflow[and-cuda]==2.15.0 cannot be installed due to this tensorrt dependency.
{ "url": "https://api.github.com/repos/tensorflow/tensorflow/issues/62520/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/tensorflow/tensorflow/issues/62520/timeline
null
null
true
https://api.github.com/repos/tensorflow/tensorflow/issues/62519
https://api.github.com/repos/tensorflow/tensorflow
https://api.github.com/repos/tensorflow/tensorflow/issues/62519/labels{/name}
https://api.github.com/repos/tensorflow/tensorflow/issues/62519/comments
https://api.github.com/repos/tensorflow/tensorflow/issues/62519/events
https://github.com/tensorflow/tensorflow/issues/62519
2,019,264,509
I_kwDOArmXAs54W4f9
62,519
Inconsistency in XLA Compiled TensorFlow Model with `tf.nn.softmax, tf.math.reduce_sum` with Extra Transposed Output
{ "login": "Gwihwan-Go", "id": 83144588, "node_id": "MDQ6VXNlcjgzMTQ0NTg4", "avatar_url": "https://avatars.githubusercontent.com/u/83144588?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Gwihwan-Go", "html_url": "https://github.com/Gwihwan-Go", "followers_url": "https://api.github.com/users/Gwihwan-Go/followers", "following_url": "https://api.github.com/users/Gwihwan-Go/following{/other_user}", "gists_url": "https://api.github.com/users/Gwihwan-Go/gists{/gist_id}", "starred_url": "https://api.github.com/users/Gwihwan-Go/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Gwihwan-Go/subscriptions", "organizations_url": "https://api.github.com/users/Gwihwan-Go/orgs", "repos_url": "https://api.github.com/users/Gwihwan-Go/repos", "events_url": "https://api.github.com/users/Gwihwan-Go/events{/privacy}", "received_events_url": "https://api.github.com/users/Gwihwan-Go/received_events", "type": "User", "site_admin": false }
[ { "id": 404586594, "node_id": "MDU6TGFiZWw0MDQ1ODY1OTQ=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/stat:awaiting%20tensorflower", "name": "stat:awaiting tensorflower", "color": "f4b400", "default": false, "description": "Status - Awaiting response from tensorflower" }, { "id": 473172988, "node_id": "MDU6TGFiZWw0NzMxNzI5ODg=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/type:bug", "name": "type:bug", "color": "159b2e", "default": false, "description": "Bug" }, { "id": 1133285679, "node_id": "MDU6TGFiZWwxMTMzMjg1Njc5", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/comp:xla", "name": "comp:xla", "color": "0052cc", "default": false, "description": "XLA" }, { "id": 6218999181, "node_id": "LA_kwDOArmXAs8AAAABcq5ljQ", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/TF%202.15", "name": "TF 2.15", "color": "9162CB", "default": false, "description": "For issues related to 2.15.x" } ]
open
false
{ "login": "SuryanarayanaY", "id": 116063290, "node_id": "U_kgDOBur8Og", "avatar_url": "https://avatars.githubusercontent.com/u/116063290?v=4", "gravatar_id": "", "url": "https://api.github.com/users/SuryanarayanaY", "html_url": "https://github.com/SuryanarayanaY", "followers_url": "https://api.github.com/users/SuryanarayanaY/followers", "following_url": "https://api.github.com/users/SuryanarayanaY/following{/other_user}", "gists_url": "https://api.github.com/users/SuryanarayanaY/gists{/gist_id}", "starred_url": "https://api.github.com/users/SuryanarayanaY/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/SuryanarayanaY/subscriptions", "organizations_url": "https://api.github.com/users/SuryanarayanaY/orgs", "repos_url": "https://api.github.com/users/SuryanarayanaY/repos", "events_url": "https://api.github.com/users/SuryanarayanaY/events{/privacy}", "received_events_url": "https://api.github.com/users/SuryanarayanaY/received_events", "type": "User", "site_admin": false }
[ { "login": "SuryanarayanaY", "id": 116063290, "node_id": "U_kgDOBur8Og", "avatar_url": "https://avatars.githubusercontent.com/u/116063290?v=4", "gravatar_id": "", "url": "https://api.github.com/users/SuryanarayanaY", "html_url": "https://github.com/SuryanarayanaY", "followers_url": "https://api.github.com/users/SuryanarayanaY/followers", "following_url": "https://api.github.com/users/SuryanarayanaY/following{/other_user}", "gists_url": "https://api.github.com/users/SuryanarayanaY/gists{/gist_id}", "starred_url": "https://api.github.com/users/SuryanarayanaY/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/SuryanarayanaY/subscriptions", "organizations_url": "https://api.github.com/users/SuryanarayanaY/orgs", "repos_url": "https://api.github.com/users/SuryanarayanaY/repos", "events_url": "https://api.github.com/users/SuryanarayanaY/events{/privacy}", "received_events_url": "https://api.github.com/users/SuryanarayanaY/received_events", "type": "User", "site_admin": false } ]
null
[ "Hi @GwiHwan-Go ,\r\n\r\nI have replicated the reported behaviour with `jit_compile=True`. With `jit_compile=False` the results are same. Attaching [gist](https://colab.sandbox.google.com/gist/SuryanarayanaY/9f8b260d81cb22802a436e44ec6129c0/62519.ipynb) for reference. Needs to check for this behaviour." ]
2023-11-30T18:23:14
2023-12-05T04:04:45
null
NONE
null
null
null
### Issue type Bug ### Have you reproduced the bug with TensorFlow Nightly? Yes ### Source source ### TensorFlow version 2.15 ### Custom code Yes ### OS platform and distribution Ubuntu 22.04.3 LTS ### Mobile device _No response_ ### Python version 3.10.0 ### Bazel version _No response_ ### GCC/compiler version _No response_ ### CUDA/cuDNN version _No response_ ### GPU model and memory _No response_ ### Current behavior? In TensorFlow 2.15.0, a model combining `tf.nn.softmax`, `tf.math.reduce_sum`, and `tf.cast`, with an additional transposed output node, exhibits inconsistent results under XLA compilation compared to eager execution. This inconsistency does not arise when any one of these operations is omitted from the model. This error is seen both on **cpu and gpu.** ### Standalone code to reproduce the issue ```python import tensorflow as tf import numpy as np class Model1(tf.keras.Model): @tf.function(jit_compile=True) def __call__(self, inp1): softmax = tf.nn.softmax(inp1, axis=0) trans = tf.transpose(softmax, perm=[0, 2, 1]) reduce_sum = tf.math.reduce_sum(trans, axis=0) cast = tf.cast(reduce_sum, dtype=tf.int64) return reduce_sum, cast, class Model2(tf.keras.Model): @tf.function(jit_compile=True) def __call__(self, inp1): softmax = tf.nn.softmax(inp1, axis=0) trans = tf.transpose(softmax, perm=[0, 2, 1]) reduce_sum = tf.math.reduce_sum(trans, axis=0) cast = tf.cast(reduce_sum, dtype=tf.int64) return reduce_sum, cast, trans inputs = [ tf.random.uniform(shape=[5, 5, 5], dtype=tf.float32), ] model1 = Model1() model2 = Model2() device = "cpu" with tf.device(device): tf.config.run_functions_eagerly(True) out1 = model1(*inputs) out2 = model2(*inputs) print(f'=========eager_output(version:{tf.__version__})================') try : for i in range(min(len(out1),len(out2))): np.testing.assert_allclose(out1[i].numpy(), out2[i].numpy(), rtol=0.001, atol=0.001, err_msg=f'at checking {i}th') print("XLA_eager does not trigger assertion") except AssertionError as e: print("XLA_eager triggers assertion") print(e) tf.config.run_functions_eagerly(False) out1 = model1(*inputs) out2 = model2(*inputs) print(f'=========compiled_output(version:{tf.__version__})================') try : for i in range(min(len(out1),len(out2))): np.testing.assert_allclose(out1[i].numpy(), out2[i].numpy(), rtol=0.001, atol=0.001, err_msg=f'at checking {i}th') print("XLA_complie does not trigger assertion") except AssertionError as e: print("XLA_complie triggers assertion") print(e) ``` ### Relevant log output ```shell =========eager_output(version:2.15.0)================ XLA_eager does not trigger assertion 2023-11-30 18:20:25.532764: I external/local_xla/xla/service/service.cc:168] XLA service 0x556187224ad0 initialized for platform Host (this does not guarantee that XLA will be used). Devices: 2023-11-30 18:20:25.532782: I external/local_xla/xla/service/service.cc:176] StreamExecutor device (0): Host, Default Version 2023-11-30 18:20:25.535884: I tensorflow/compiler/mlir/tensorflow/utils/dump_mlir_util.cc:269] disabling MLIR crash reproducer, set env var `MLIR_CRASH_REPRODUCER_DIRECTORY` to enable. WARNING: All log messages before absl::InitializeLog() is called are written to STDERR I0000 00:00:1701368425.554320 2905470 device_compiler.h:186] Compiled cluster using XLA! This line is logged at most once for the lifetime of the process. 2023-11-30 18:20:25.555056: E external/local_xla/xla/stream_executor/stream_executor_internal.h:177] SetPriority unimplemented for this stream. =========compiled_output(version:2.15.0)================ XLA_complie triggers assertion Not equal to tolerance rtol=0.001, atol=0.001 at checking 1th Mismatched elements: 3 / 25 (12%) Max absolute difference: 1 Max relative difference: 0. x: array([[0, 1, 1, 1, 1], [1, 1, 1, 1, 1], [1, 1, 1, 1, 1],... y: array([[0, 0, 1, 1, 1], [1, 1, 1, 1, 1], [1, 1, 1, 0, 1],... ```
{ "url": "https://api.github.com/repos/tensorflow/tensorflow/issues/62519/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/tensorflow/tensorflow/issues/62519/timeline
null
null
false
https://api.github.com/repos/tensorflow/tensorflow/issues/62518
https://api.github.com/repos/tensorflow/tensorflow
https://api.github.com/repos/tensorflow/tensorflow/issues/62518/labels{/name}
https://api.github.com/repos/tensorflow/tensorflow/issues/62518/comments
https://api.github.com/repos/tensorflow/tensorflow/issues/62518/events
https://github.com/tensorflow/tensorflow/issues/62518
2,019,216,026
I_kwDOArmXAs54Wsqa
62,518
Output Discrepancy in TensorFlow XLA Compilation with Operand Order Swap in Addition with `tf.rev + tf.math.argmax`
{ "login": "Gwihwan-Go", "id": 83144588, "node_id": "MDQ6VXNlcjgzMTQ0NTg4", "avatar_url": "https://avatars.githubusercontent.com/u/83144588?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Gwihwan-Go", "html_url": "https://github.com/Gwihwan-Go", "followers_url": "https://api.github.com/users/Gwihwan-Go/followers", "following_url": "https://api.github.com/users/Gwihwan-Go/following{/other_user}", "gists_url": "https://api.github.com/users/Gwihwan-Go/gists{/gist_id}", "starred_url": "https://api.github.com/users/Gwihwan-Go/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Gwihwan-Go/subscriptions", "organizations_url": "https://api.github.com/users/Gwihwan-Go/orgs", "repos_url": "https://api.github.com/users/Gwihwan-Go/repos", "events_url": "https://api.github.com/users/Gwihwan-Go/events{/privacy}", "received_events_url": "https://api.github.com/users/Gwihwan-Go/received_events", "type": "User", "site_admin": false }
[ { "id": 386191887, "node_id": "MDU6TGFiZWwzODYxOTE4ODc=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/stat:awaiting%20response", "name": "stat:awaiting response", "color": "f4b400", "default": false, "description": "Status - Awaiting response from author" }, { "id": 473172988, "node_id": "MDU6TGFiZWw0NzMxNzI5ODg=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/type:bug", "name": "type:bug", "color": "159b2e", "default": false, "description": "Bug" }, { "id": 474725938, "node_id": "MDU6TGFiZWw0NzQ3MjU5Mzg=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/stale", "name": "stale", "color": "d4c5f9", "default": false, "description": "This label marks the issue/pr stale - to be closed automatically if no activity" }, { "id": 1133285679, "node_id": "MDU6TGFiZWwxMTMzMjg1Njc5", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/comp:xla", "name": "comp:xla", "color": "0052cc", "default": false, "description": "XLA" }, { "id": 6218999181, "node_id": "LA_kwDOArmXAs8AAAABcq5ljQ", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/TF%202.15", "name": "TF 2.15", "color": "9162CB", "default": false, "description": "For issues related to 2.15.x" } ]
closed
false
{ "login": "sachinprasadhs", "id": 73069040, "node_id": "MDQ6VXNlcjczMDY5MDQw", "avatar_url": "https://avatars.githubusercontent.com/u/73069040?v=4", "gravatar_id": "", "url": "https://api.github.com/users/sachinprasadhs", "html_url": "https://github.com/sachinprasadhs", "followers_url": "https://api.github.com/users/sachinprasadhs/followers", "following_url": "https://api.github.com/users/sachinprasadhs/following{/other_user}", "gists_url": "https://api.github.com/users/sachinprasadhs/gists{/gist_id}", "starred_url": "https://api.github.com/users/sachinprasadhs/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/sachinprasadhs/subscriptions", "organizations_url": "https://api.github.com/users/sachinprasadhs/orgs", "repos_url": "https://api.github.com/users/sachinprasadhs/repos", "events_url": "https://api.github.com/users/sachinprasadhs/events{/privacy}", "received_events_url": "https://api.github.com/users/sachinprasadhs/received_events", "type": "User", "site_admin": false }
[ { "login": "sachinprasadhs", "id": 73069040, "node_id": "MDQ6VXNlcjczMDY5MDQw", "avatar_url": "https://avatars.githubusercontent.com/u/73069040?v=4", "gravatar_id": "", "url": "https://api.github.com/users/sachinprasadhs", "html_url": "https://github.com/sachinprasadhs", "followers_url": "https://api.github.com/users/sachinprasadhs/followers", "following_url": "https://api.github.com/users/sachinprasadhs/following{/other_user}", "gists_url": "https://api.github.com/users/sachinprasadhs/gists{/gist_id}", "starred_url": "https://api.github.com/users/sachinprasadhs/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/sachinprasadhs/subscriptions", "organizations_url": "https://api.github.com/users/sachinprasadhs/orgs", "repos_url": "https://api.github.com/users/sachinprasadhs/repos", "events_url": "https://api.github.com/users/sachinprasadhs/events{/privacy}", "received_events_url": "https://api.github.com/users/sachinprasadhs/received_events", "type": "User", "site_admin": false } ]
null
[ "@sachinprasadhs I was able to replicate this issue on colab, please find the gist [here](https://colab.research.google.com/gist/sushreebarsa/6f76c1eaa0f1d566c7cfea43d2459df7/62518.ipynb). \r\nThank you!", "Hi, \r\n\r\nThis behavior is expected, since the change in order of operation results in the different code path for numerical computations. Thanks! ", "This issue is stale because it has been open for 7 days with no activity. It will be closed if no further activity occurs. Thank you.", "This issue was closed because it has been inactive for 7 days since being marked as stale. Please reopen if you'd like to work on this further.", "Are you satisfied with the resolution of your issue?\n<a href=\"https://docs.google.com/forms/d/e/1FAIpQLSfaP12TRhd9xSxjXZjcZFNXPGk4kc1-qMdv3gc6bEP90vY1ew/viewform?entry.85265664=Yes&entry.2137816233=https://github.com/tensorflow/tensorflow/issues/62518\">Yes</a>\n<a href=\"https://docs.google.com/forms/d/e/1FAIpQLSfaP12TRhd9xSxjXZjcZFNXPGk4kc1-qMdv3gc6bEP90vY1ew/viewform?entry.85265664=No&entry.2137816233=https://github.com/tensorflow/tensorflow/issues/62518\">No</a>\n" ]
2023-11-30T17:53:59
2024-01-10T01:49:27
2024-01-10T01:49:22
NONE
null
null
null
### Issue type Bug ### Have you reproduced the bug with TensorFlow Nightly? Yes ### Source source ### TensorFlow version 2.15.0 ### Custom code Yes ### OS platform and distribution Ubuntu 22.04.3 LTS ### Mobile device _No response_ ### Python version 3.10 ### Bazel version _No response_ ### GCC/compiler version _No response_ ### CUDA/cuDNN version _No response_ ### GPU model and memory _No response_ ### Current behavior? In TensorFlow 2.15.0, a inconsistency arises when **swapping the order** of operands in an addition operation followed by `tf.math.argmax`, under XLA compilation. This error is seen both on **cpu and gpu.** ### Standalone code to reproduce the issue ```shell import tensorflow as tf import numpy as np class Model1(tf.keras.Model): @tf.function(jit_compile=True) def __call__(self, inp1, inp2): mul = tf.matmul(inp2, inp1) rev = tf.reverse(mul, axis=[1]) add = tf.add(rev, mul) argmax = tf.math.argmax(add, axis=1) return add, argmax, class Model2(tf.keras.Model): @tf.function(jit_compile=True) def __call__(self, inp1, inp2): mul = tf.matmul(inp2, inp1) rev = tf.reverse(mul, axis=[1]) add = tf.add(mul, rev) # add = tf.add(rev, mul) <- this will not trigger error argmax = tf.math.argmax(add, axis=1) return add, argmax, inputs = [ tf.random.uniform(shape=[1, 18], dtype=tf.float32), tf.random.uniform(shape=[34, 1], dtype=tf.float32), ] model1 = Model1() model2 = Model2() device = "cpu" with tf.device(device): tf.config.run_functions_eagerly(True) out1 = model1(*inputs) out2 = model2(*inputs) print(f'=========eager_output(version:{tf.__version__})================') try : for i in range(min(len(out1),len(out2))): np.testing.assert_allclose(out1[i].numpy(), out2[i].numpy(), rtol=0.001, atol=0.001, err_msg=f'at checking {i}th') print("XLA_eager does not trigger assertion") except AssertionError as e: print("XLA_eager triggers assertion") print(e) tf.config.run_functions_eagerly(False) out1 = model1(*inputs) out2 = model2(*inputs) print(f'=========compiled_output(version:{tf.__version__})================') try : for i in range(min(len(out1),len(out2))): np.testing.assert_allclose(out1[i].numpy(), out2[i].numpy(), rtol=0.001, atol=0.001, err_msg=f'at checking {i}th') print("XLA_complie does not trigger assertion") except AssertionError as e: print("XLA_complie triggers assertion") print(e) ``` ### Relevant log output ```shell =========eager_output(version:2.15.0)================ XLA_eager does not trigger assertion 2023-11-30 17:52:36.159568: I external/local_xla/xla/service/service.cc:168] XLA service 0x55bc4ba863c0 initialized for platform Host (this does not guarantee that XLA will be used). Devices: 2023-11-30 17:52:36.159586: I external/local_xla/xla/service/service.cc:176] StreamExecutor device (0): Host, Default Version WARNING: All log messages before absl::InitializeLog() is called are written to STDERR I0000 00:00:1701366756.171765 2872023 device_compiler.h:186] Compiled cluster using XLA! This line is logged at most once for the lifetime of the process. 2023-11-30 17:52:36.171907: E external/local_xla/xla/stream_executor/stream_executor_internal.h:177] SetPriority unimplemented for this stream. =========compiled_output(version:2.15.0)================ XLA_complie triggers assertion Not equal to tolerance rtol=0.001, atol=0.001 at checking 1th # indicates that errors happen after applied tf.math.argmax Mismatched elements: 11 / 34 (32.4%) Max absolute difference: 3 Max relative difference: 0.42857143 x: array([ 7, 7, 7, 7, 7, 7, 10, 7, 7, 7, 7, 7, 7, 7, 7, 7, 10, 7, 7, 7, 7, 7, 7, 7, 7, 10, 10, 7, 7, 7, 7, 7, 7, 7]) y: array([ 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 10, 7, 10, 7, 7, 7, 10, 10, 7, 7, 7, 7, 10, 7, 7, 7, 7, 10, 7, 7, 7, 10, 7])```
{ "url": "https://api.github.com/repos/tensorflow/tensorflow/issues/62518/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/tensorflow/tensorflow/issues/62518/timeline
null
completed
false
https://api.github.com/repos/tensorflow/tensorflow/issues/62517
https://api.github.com/repos/tensorflow/tensorflow
https://api.github.com/repos/tensorflow/tensorflow/issues/62517/labels{/name}
https://api.github.com/repos/tensorflow/tensorflow/issues/62517/comments
https://api.github.com/repos/tensorflow/tensorflow/issues/62517/events
https://github.com/tensorflow/tensorflow/issues/62517
2,018,970,117
I_kwDOArmXAs54VwoF
62,517
Inconsistency in XLA Compiled Models with Distributed Multiplications, `tf.abs`, and `tf.clip_by_value`
{ "login": "Gwihwan-Go", "id": 83144588, "node_id": "MDQ6VXNlcjgzMTQ0NTg4", "avatar_url": "https://avatars.githubusercontent.com/u/83144588?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Gwihwan-Go", "html_url": "https://github.com/Gwihwan-Go", "followers_url": "https://api.github.com/users/Gwihwan-Go/followers", "following_url": "https://api.github.com/users/Gwihwan-Go/following{/other_user}", "gists_url": "https://api.github.com/users/Gwihwan-Go/gists{/gist_id}", "starred_url": "https://api.github.com/users/Gwihwan-Go/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Gwihwan-Go/subscriptions", "organizations_url": "https://api.github.com/users/Gwihwan-Go/orgs", "repos_url": "https://api.github.com/users/Gwihwan-Go/repos", "events_url": "https://api.github.com/users/Gwihwan-Go/events{/privacy}", "received_events_url": "https://api.github.com/users/Gwihwan-Go/received_events", "type": "User", "site_admin": false }
[ { "id": 386191887, "node_id": "MDU6TGFiZWwzODYxOTE4ODc=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/stat:awaiting%20response", "name": "stat:awaiting response", "color": "f4b400", "default": false, "description": "Status - Awaiting response from author" }, { "id": 473172988, "node_id": "MDU6TGFiZWw0NzMxNzI5ODg=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/type:bug", "name": "type:bug", "color": "159b2e", "default": false, "description": "Bug" }, { "id": 474725938, "node_id": "MDU6TGFiZWw0NzQ3MjU5Mzg=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/stale", "name": "stale", "color": "d4c5f9", "default": false, "description": "This label marks the issue/pr stale - to be closed automatically if no activity" }, { "id": 1133285679, "node_id": "MDU6TGFiZWwxMTMzMjg1Njc5", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/comp:xla", "name": "comp:xla", "color": "0052cc", "default": false, "description": "XLA" }, { "id": 6218999181, "node_id": "LA_kwDOArmXAs8AAAABcq5ljQ", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/TF%202.15", "name": "TF 2.15", "color": "9162CB", "default": false, "description": "For issues related to 2.15.x" } ]
closed
false
{ "login": "sachinprasadhs", "id": 73069040, "node_id": "MDQ6VXNlcjczMDY5MDQw", "avatar_url": "https://avatars.githubusercontent.com/u/73069040?v=4", "gravatar_id": "", "url": "https://api.github.com/users/sachinprasadhs", "html_url": "https://github.com/sachinprasadhs", "followers_url": "https://api.github.com/users/sachinprasadhs/followers", "following_url": "https://api.github.com/users/sachinprasadhs/following{/other_user}", "gists_url": "https://api.github.com/users/sachinprasadhs/gists{/gist_id}", "starred_url": "https://api.github.com/users/sachinprasadhs/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/sachinprasadhs/subscriptions", "organizations_url": "https://api.github.com/users/sachinprasadhs/orgs", "repos_url": "https://api.github.com/users/sachinprasadhs/repos", "events_url": "https://api.github.com/users/sachinprasadhs/events{/privacy}", "received_events_url": "https://api.github.com/users/sachinprasadhs/received_events", "type": "User", "site_admin": false }
[ { "login": "sachinprasadhs", "id": 73069040, "node_id": "MDQ6VXNlcjczMDY5MDQw", "avatar_url": "https://avatars.githubusercontent.com/u/73069040?v=4", "gravatar_id": "", "url": "https://api.github.com/users/sachinprasadhs", "html_url": "https://github.com/sachinprasadhs", "followers_url": "https://api.github.com/users/sachinprasadhs/followers", "following_url": "https://api.github.com/users/sachinprasadhs/following{/other_user}", "gists_url": "https://api.github.com/users/sachinprasadhs/gists{/gist_id}", "starred_url": "https://api.github.com/users/sachinprasadhs/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/sachinprasadhs/subscriptions", "organizations_url": "https://api.github.com/users/sachinprasadhs/orgs", "repos_url": "https://api.github.com/users/sachinprasadhs/repos", "events_url": "https://api.github.com/users/sachinprasadhs/events{/privacy}", "received_events_url": "https://api.github.com/users/sachinprasadhs/received_events", "type": "User", "site_admin": false } ]
null
[ "Hi @GwiHwan-Go ,\r\n\r\nI have replicated the reported behaviour with colab using TF v2.14, 2.15 and nightly. Please find the [gist](https://colab.research.google.com/gist/Venkat6871/9300b2e3055a14d51db78647887cb7d0/62517_2-15_2-14_nightly-v.ipynb) here for reference.\r\n\r\nThank you!", "Hi,\r\n\r\nThis precision loss is due to the different order of multiplication and involving mutiple `tf.multiply `operations will accumulate the precession error. This does not show any unexpected behavior in the implementation.\r\n\r\n```\r\n add = tf.add(inp2, inp1)\r\n mul = tf.multiply(add, add)\r\n abs = tf.abs(mul)\r\n```\r\n\r\nvs \r\n\r\n```\r\n add = tf.add(inp1, inp2)\r\n mul1_1 = tf.multiply(inp1, add)\r\n mul1_2 = tf.multiply(inp2, add)\r\n mul = tf.add(mul1_1, mul1_2)\r\n abs = tf.abs(mul)\r\n```\r\n", "This issue is stale because it has been open for 7 days with no activity. It will be closed if no further activity occurs. Thank you.", "Since it is not an implementation bug, I will close this issue.", "Are you satisfied with the resolution of your issue?\n<a href=\"https://docs.google.com/forms/d/e/1FAIpQLSfaP12TRhd9xSxjXZjcZFNXPGk4kc1-qMdv3gc6bEP90vY1ew/viewform?entry.85265664=Yes&entry.2137816233=https://github.com/tensorflow/tensorflow/issues/62517\">Yes</a>\n<a href=\"https://docs.google.com/forms/d/e/1FAIpQLSfaP12TRhd9xSxjXZjcZFNXPGk4kc1-qMdv3gc6bEP90vY1ew/viewform?entry.85265664=No&entry.2137816233=https://github.com/tensorflow/tensorflow/issues/62517\">No</a>\n" ]
2023-11-30T15:56:38
2023-12-15T07:28:18
2023-12-15T07:28:16
NONE
null
null
null
### Issue type Bug ### Have you reproduced the bug with TensorFlow Nightly? Yes ### Source source ### TensorFlow version 2.15.0 ### Custom code Yes ### OS platform and distribution Ubuntu 22.04.3 LTS ### Mobile device _No response_ ### Python version 3.10.0 ### Bazel version _No response_ ### GCC/compiler version _No response_ ### CUDA/cuDNN version _No response_ ### GPU model and memory _No response_ ### Current behavior? In TensorFlow 2.15.0, I've encountered an inconsistency where models involving distributed multiplications combined with `tf.abs` and `tf.clip_by_value` yield different results under XLA compilation. This behavior only seen on **cpu.** You can reproduce this bug on [colab](https://colab.research.google.com/drive/1i4Et5UB6fEsAQi-E8FNVa7OH1UUqR8WZ?usp=sharing) ### Standalone code to reproduce the issue ```python ## Model overview ## refer colab link to reproduce the error class Model1(tf.keras.Model): def __init__(self): super().__init__() self.p0 = tf.constant(params[0]) # [] int8 @tf.function(jit_compile=True) def __call__(self, inp1, inp2): add = tf.add(inp2, inp1) mul = tf.multiply(add, add) abs = tf.abs(mul) print(abs) clipped = tf.clip_by_value(abs, -1, 1) return abs, clipped, class Model2(tf.keras.Model): def __init__(self): super().__init__() self.p0 = tf.constant(params[0]) # [] int8 @tf.function(jit_compile=True) def __call__(self, inp1, inp2): add = tf.add(inp1, inp2) mul1_1 = tf.multiply(inp1, add) mul1_2 = tf.multiply(inp2, add) mul = tf.add(mul1_1, mul1_2) abs = tf.abs(mul) clipped = tf.clip_by_value(abs, -1, 1) return abs, clipped ,#, v4_0, ``` ### Relevant log output ```shell # On colab =========RUNNING WITH PICKLE FILES=========== =========eager_output(version:2.14.0, cpu:Intel(R) Xeon(R) CPU @ 2.20GHz)================ XLA_eager does not trigger assertion =========compiled_output(version:2.14.0, cpu:Intel(R) Xeon(R) CPU @ 2.20GHz)================ XLA_complie triggers assertion Not equal to tolerance rtol=0.001, atol=0.001 at checking 0th Mismatched elements: 1 / 1 (100%) Max absolute difference: 18 Max relative difference: 0.1512605 x: array(-119, dtype=int8) y: array(119, dtype=int8) Not equal to tolerance rtol=0.001, atol=0.001 at checking 0th Mismatched elements: 1 / 1 (100%) Max absolute difference: 18 Max relative difference: 0.1512605 x: array(-119, dtype=int8) y: array(119, dtype=int8) ```
{ "url": "https://api.github.com/repos/tensorflow/tensorflow/issues/62517/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/tensorflow/tensorflow/issues/62517/timeline
null
completed
false
https://api.github.com/repos/tensorflow/tensorflow/issues/62516
https://api.github.com/repos/tensorflow/tensorflow
https://api.github.com/repos/tensorflow/tensorflow/issues/62516/labels{/name}
https://api.github.com/repos/tensorflow/tensorflow/issues/62516/comments
https://api.github.com/repos/tensorflow/tensorflow/issues/62516/events
https://github.com/tensorflow/tensorflow/pull/62516
2,018,886,948
PR_kwDOArmXAs5gyssb
62,516
[ROCM] activated bfloat16 tests for rocm platform
{ "login": "pemeliya", "id": 141146080, "node_id": "U_kgDOCGm34A", "avatar_url": "https://avatars.githubusercontent.com/u/141146080?v=4", "gravatar_id": "", "url": "https://api.github.com/users/pemeliya", "html_url": "https://github.com/pemeliya", "followers_url": "https://api.github.com/users/pemeliya/followers", "following_url": "https://api.github.com/users/pemeliya/following{/other_user}", "gists_url": "https://api.github.com/users/pemeliya/gists{/gist_id}", "starred_url": "https://api.github.com/users/pemeliya/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/pemeliya/subscriptions", "organizations_url": "https://api.github.com/users/pemeliya/orgs", "repos_url": "https://api.github.com/users/pemeliya/repos", "events_url": "https://api.github.com/users/pemeliya/events{/privacy}", "received_events_url": "https://api.github.com/users/pemeliya/received_events", "type": "User", "site_admin": false }
[ { "id": 390482148, "node_id": "MDU6TGFiZWwzOTA0ODIxNDg=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/awaiting%20review", "name": "awaiting review", "color": "bc3869", "default": false, "description": "Pull request awaiting review" }, { "id": 987666414, "node_id": "MDU6TGFiZWw5ODc2NjY0MTQ=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/ready%20to%20pull", "name": "ready to pull", "color": "2cd643", "default": false, "description": "PR ready for merge process" }, { "id": 1097547538, "node_id": "MDU6TGFiZWwxMDk3NTQ3NTM4", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/comp:gpu", "name": "comp:gpu", "color": "0052cc", "default": false, "description": "GPU related issues" }, { "id": 1169365682, "node_id": "MDU6TGFiZWwxMTY5MzY1Njgy", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/size:L", "name": "size:L", "color": "adafea", "default": false, "description": "CL Change Size: Large" } ]
closed
false
{ "login": "gbaned", "id": 48215717, "node_id": "MDQ6VXNlcjQ4MjE1NzE3", "avatar_url": "https://avatars.githubusercontent.com/u/48215717?v=4", "gravatar_id": "", "url": "https://api.github.com/users/gbaned", "html_url": "https://github.com/gbaned", "followers_url": "https://api.github.com/users/gbaned/followers", "following_url": "https://api.github.com/users/gbaned/following{/other_user}", "gists_url": "https://api.github.com/users/gbaned/gists{/gist_id}", "starred_url": "https://api.github.com/users/gbaned/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/gbaned/subscriptions", "organizations_url": "https://api.github.com/users/gbaned/orgs", "repos_url": "https://api.github.com/users/gbaned/repos", "events_url": "https://api.github.com/users/gbaned/events{/privacy}", "received_events_url": "https://api.github.com/users/gbaned/received_events", "type": "User", "site_admin": false }
[ { "login": "gbaned", "id": 48215717, "node_id": "MDQ6VXNlcjQ4MjE1NzE3", "avatar_url": "https://avatars.githubusercontent.com/u/48215717?v=4", "gravatar_id": "", "url": "https://api.github.com/users/gbaned", "html_url": "https://github.com/gbaned", "followers_url": "https://api.github.com/users/gbaned/followers", "following_url": "https://api.github.com/users/gbaned/following{/other_user}", "gists_url": "https://api.github.com/users/gbaned/gists{/gist_id}", "starred_url": "https://api.github.com/users/gbaned/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/gbaned/subscriptions", "organizations_url": "https://api.github.com/users/gbaned/orgs", "repos_url": "https://api.github.com/users/gbaned/repos", "events_url": "https://api.github.com/users/gbaned/events{/privacy}", "received_events_url": "https://api.github.com/users/gbaned/received_events", "type": "User", "site_admin": false } ]
null
[ "Requested changes are implemented, I wonder if there are any futher issues with this PR ? Thanks!", "I have also fixed a regression bug in tensorflow/core/kernels/matmul_util.cc after XLA changes by one of our collegues here: https://github.com/openxla/xla/pull/4768 " ]
2023-11-30T15:14:23
2023-12-07T05:57:33
2023-12-07T05:57:32
CONTRIBUTOR
null
false
{ "url": "https://api.github.com/repos/tensorflow/tensorflow/pulls/62516", "html_url": "https://github.com/tensorflow/tensorflow/pull/62516", "diff_url": "https://github.com/tensorflow/tensorflow/pull/62516.diff", "patch_url": "https://github.com/tensorflow/tensorflow/pull/62516.patch", "merged_at": "2023-12-07T05:57:32" }
In this PR, we added a one central function **IsBF16NotSupportedInOps** in tensorflow/core/kernels/gpu_utils.cc to decide whether bfloat16 datatypes are directly supported on the target acrhitecture or not. Previously, the similar checks were spread around several files. It will facilitate the integration of bfloat16 support to ROCM: currently bfloat16-to-float conversion is always enabled on ROCM. Besides, we also enabled bfloat16 support in a number of subtests. Finally, rewrote tensorflow/core/kernels/batch_norm_op_test.cc in a generic form to be able to test it for different number types.
{ "url": "https://api.github.com/repos/tensorflow/tensorflow/issues/62516/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/tensorflow/tensorflow/issues/62516/timeline
null
null
true
https://api.github.com/repos/tensorflow/tensorflow/issues/62515
https://api.github.com/repos/tensorflow/tensorflow
https://api.github.com/repos/tensorflow/tensorflow/issues/62515/labels{/name}
https://api.github.com/repos/tensorflow/tensorflow/issues/62515/comments
https://api.github.com/repos/tensorflow/tensorflow/issues/62515/events
https://github.com/tensorflow/tensorflow/issues/62515
2,018,832,932
I_kwDOArmXAs54VPIk
62,515
Inconsistency with Extra Transpose Node in XLA Compiled Models Using `tf.squeeze` and `tf.abs`
{ "login": "Gwihwan-Go", "id": 83144588, "node_id": "MDQ6VXNlcjgzMTQ0NTg4", "avatar_url": "https://avatars.githubusercontent.com/u/83144588?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Gwihwan-Go", "html_url": "https://github.com/Gwihwan-Go", "followers_url": "https://api.github.com/users/Gwihwan-Go/followers", "following_url": "https://api.github.com/users/Gwihwan-Go/following{/other_user}", "gists_url": "https://api.github.com/users/Gwihwan-Go/gists{/gist_id}", "starred_url": "https://api.github.com/users/Gwihwan-Go/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Gwihwan-Go/subscriptions", "organizations_url": "https://api.github.com/users/Gwihwan-Go/orgs", "repos_url": "https://api.github.com/users/Gwihwan-Go/repos", "events_url": "https://api.github.com/users/Gwihwan-Go/events{/privacy}", "received_events_url": "https://api.github.com/users/Gwihwan-Go/received_events", "type": "User", "site_admin": false }
[ { "id": 386191887, "node_id": "MDU6TGFiZWwzODYxOTE4ODc=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/stat:awaiting%20response", "name": "stat:awaiting response", "color": "f4b400", "default": false, "description": "Status - Awaiting response from author" }, { "id": 473172988, "node_id": "MDU6TGFiZWw0NzMxNzI5ODg=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/type:bug", "name": "type:bug", "color": "159b2e", "default": false, "description": "Bug" }, { "id": 474725938, "node_id": "MDU6TGFiZWw0NzQ3MjU5Mzg=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/stale", "name": "stale", "color": "d4c5f9", "default": false, "description": "This label marks the issue/pr stale - to be closed automatically if no activity" }, { "id": 1133285679, "node_id": "MDU6TGFiZWwxMTMzMjg1Njc5", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/comp:xla", "name": "comp:xla", "color": "0052cc", "default": false, "description": "XLA" }, { "id": 6218999181, "node_id": "LA_kwDOArmXAs8AAAABcq5ljQ", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/TF%202.15", "name": "TF 2.15", "color": "9162CB", "default": false, "description": "For issues related to 2.15.x" } ]
closed
false
{ "login": "sachinprasadhs", "id": 73069040, "node_id": "MDQ6VXNlcjczMDY5MDQw", "avatar_url": "https://avatars.githubusercontent.com/u/73069040?v=4", "gravatar_id": "", "url": "https://api.github.com/users/sachinprasadhs", "html_url": "https://github.com/sachinprasadhs", "followers_url": "https://api.github.com/users/sachinprasadhs/followers", "following_url": "https://api.github.com/users/sachinprasadhs/following{/other_user}", "gists_url": "https://api.github.com/users/sachinprasadhs/gists{/gist_id}", "starred_url": "https://api.github.com/users/sachinprasadhs/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/sachinprasadhs/subscriptions", "organizations_url": "https://api.github.com/users/sachinprasadhs/orgs", "repos_url": "https://api.github.com/users/sachinprasadhs/repos", "events_url": "https://api.github.com/users/sachinprasadhs/events{/privacy}", "received_events_url": "https://api.github.com/users/sachinprasadhs/received_events", "type": "User", "site_admin": false }
[ { "login": "sachinprasadhs", "id": 73069040, "node_id": "MDQ6VXNlcjczMDY5MDQw", "avatar_url": "https://avatars.githubusercontent.com/u/73069040?v=4", "gravatar_id": "", "url": "https://api.github.com/users/sachinprasadhs", "html_url": "https://github.com/sachinprasadhs", "followers_url": "https://api.github.com/users/sachinprasadhs/followers", "following_url": "https://api.github.com/users/sachinprasadhs/following{/other_user}", "gists_url": "https://api.github.com/users/sachinprasadhs/gists{/gist_id}", "starred_url": "https://api.github.com/users/sachinprasadhs/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/sachinprasadhs/subscriptions", "organizations_url": "https://api.github.com/users/sachinprasadhs/orgs", "repos_url": "https://api.github.com/users/sachinprasadhs/repos", "events_url": "https://api.github.com/users/sachinprasadhs/events{/privacy}", "received_events_url": "https://api.github.com/users/sachinprasadhs/received_events", "type": "User", "site_admin": false } ]
null
[ "@sachinprasadhs I was able to replicate the issue reported here for [cpu](https://colab.research.google.com/gist/sushreebarsa/91ee69d4fd7c3cade0239f4ea0d21e6d/62515.ipynb) and [gpu](https://colab.research.google.com/gist/sushreebarsa/a7f796a67b3e71e5059d5cae430cf3b6/62515.ipynb). Thank you!", "Hi, \r\n\r\nCould you please confirm why you're returning `trans_mul` along with `abs` in the `Model2`, where as `Model1` returns only `abs`. ", "This issue is stale because it has been open for 7 days with no activity. It will be closed if no further activity occurs. Thank you.", "Hello @sachinprasadhs,\r\nWe were considering the importance of examining the intermediate values in the layer and thought it might be a crucial aspect to look into. Additionally, we think that returning extra output should not impact the model's original return values.", "Hi, \r\n\r\nProviding extra node to the output does not yield in matching results when comparing.\r\nThis behavior is expected behavior. ", "This issue was closed because it has been inactive for 7 days since being marked as stale. Please reopen if you'd like to work on this further.", "Are you satisfied with the resolution of your issue?\n<a href=\"https://docs.google.com/forms/d/e/1FAIpQLSfaP12TRhd9xSxjXZjcZFNXPGk4kc1-qMdv3gc6bEP90vY1ew/viewform?entry.85265664=Yes&entry.2137816233=https://github.com/tensorflow/tensorflow/issues/62515\">Yes</a>\n<a href=\"https://docs.google.com/forms/d/e/1FAIpQLSfaP12TRhd9xSxjXZjcZFNXPGk4kc1-qMdv3gc6bEP90vY1ew/viewform?entry.85265664=No&entry.2137816233=https://github.com/tensorflow/tensorflow/issues/62515\">No</a>\n" ]
2023-11-30T14:48:59
2024-01-05T01:49:05
2024-01-05T01:49:02
NONE
null
null
null
### Issue type Bug ### Have you reproduced the bug with TensorFlow Nightly? Yes ### Source source ### TensorFlow version 2.15.0 ### Custom code Yes ### OS platform and distribution _No response_ ### Mobile device _No response_ ### Python version _No response_ ### Bazel version _No response_ ### GCC/compiler version _No response_ ### CUDA/cuDNN version _No response_ ### GPU model and memory _No response_ ### Current behavior? In TensorFlow 2.15.0, I've encountered a inconsistency where a TensorFlow model using `tf.squeeze` and `tf.abs` with an additional transpose node output compiled with XLA. This behavior occurs on both **GPU and CPU**. ### Additional Context In our efforts to understand and locate the root cause of this issue, we have conducted several tests and experiments. Through this process, we've identified specific conditions under which the discrepancy does not occur : 1. If any one of the operators (tf.squeeze(along with reduced_dim input), tf.abs) is removed from either model, the discrepancy under XLA compilation is no longer triggered. 2. Removing `trans_mul` output from Model2's Return values will not trigger this issue. 3. Changing Input Tensor Data Type: Altering the data type of the input tensor to a type other than int8 also prevents the occurrence of this discrepancy. ### Standalone code to reproduce the issue ```shell import tensorflow as tf import numpy as np class Model1(tf.keras.Model): @tf.function(jit_compile=True) def __call__(self, inp): # Forward pass logic using TensorFlow operations # inp: [1, 55, 3, 27] : int8 squeeze = tf.squeeze(inp, axis=0) mul = tf.multiply(squeeze, squeeze) abs = tf.abs(mul) return abs, class Model2(tf.keras.Model): @tf.function(jit_compile=True) def __call__(self, inp): # Forward pass logic using TensorFlow operations # inp: [1, 55, 3, 27] : int8 squeeze = tf.squeeze(inp, axis=0) transposed = tf.transpose(squeeze, perm=[1, 0, 2]) trans_mul = tf.multiply(transposed, transposed) mul = tf.transpose(trans_mul, perm=[1, 0, 2]) abs = tf.abs(mul) return abs, trans_mul, inputs = [ tf.cast(tf.random.uniform(shape=[1, 55, 3, 27], minval=-128, maxval=128, dtype=tf.int32), tf.int8), ] model1 = Model1() model2 = Model2() device = "cpu" with tf.device(device): tf.config.run_functions_eagerly(True) out1 = model1(*inputs) out2 = model2(*inputs) print(f'=========eager_output(version:{tf.__version__})================') try : for i in range(min(len(out1),len(out2))): np.testing.assert_allclose(out1[i].numpy(), out2[i].numpy(), rtol=0.001, atol=0.001, err_msg=f'at checking {i}th') print("XLA_eager does not trigger assertion") except AssertionError as e: print("XLA_eager triggers assertion") print(e) tf.config.run_functions_eagerly(False) out1 = model1(*inputs) out2 = model2(*inputs) print(f'=========compiled_output(version:{tf.__version__})================') try : for i in range(min(len(out1),len(out2))): np.testing.assert_allclose(out1[i].numpy(), out2[i].numpy(), rtol=0.001, atol=0.001, err_msg=f'at checking {i}th') print("XLA_complie does not trigger assertion") except AssertionError as e: print("XLA_complie triggers assertion") print(e) ``` ### Relevant log output ```shell =========eager_output(version:2.15.0)================ XLA_eager does not trigger assertion =========compiled_output(version:2.15.0)================ XLA_complie triggers assertion Not equal to tolerance rtol=0.001, atol=0.001 at checking 0th Mismatched elements: 1980 / 4455 (44.4%) Max absolute difference: 126 Max relative difference: 2. x: array([[[ 16, 100, -79, ..., 73, 36, -28], [ 0, -71, 16, ..., 41, 16, 25], [ 49, -31, 113, ..., 64, -60, -15]],... y: array([[[ 16, 100, 79, ..., 73, 36, 28], [ 0, 71, 16, ..., 41, 16, 25], [ 49, 31, 113, ..., 64, 60, 15]],... ```
{ "url": "https://api.github.com/repos/tensorflow/tensorflow/issues/62515/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/tensorflow/tensorflow/issues/62515/timeline
null
completed
false
https://api.github.com/repos/tensorflow/tensorflow/issues/62514
https://api.github.com/repos/tensorflow/tensorflow
https://api.github.com/repos/tensorflow/tensorflow/issues/62514/labels{/name}
https://api.github.com/repos/tensorflow/tensorflow/issues/62514/comments
https://api.github.com/repos/tensorflow/tensorflow/issues/62514/events
https://github.com/tensorflow/tensorflow/issues/62514
2,018,730,718
I_kwDOArmXAs54U2Le
62,514
Output Discrepancy in XLA Compiled Models with `tf.divide, tf.math.reduce_prod, and tf.cos` Involving Extra Node
{ "login": "Gwihwan-Go", "id": 83144588, "node_id": "MDQ6VXNlcjgzMTQ0NTg4", "avatar_url": "https://avatars.githubusercontent.com/u/83144588?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Gwihwan-Go", "html_url": "https://github.com/Gwihwan-Go", "followers_url": "https://api.github.com/users/Gwihwan-Go/followers", "following_url": "https://api.github.com/users/Gwihwan-Go/following{/other_user}", "gists_url": "https://api.github.com/users/Gwihwan-Go/gists{/gist_id}", "starred_url": "https://api.github.com/users/Gwihwan-Go/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Gwihwan-Go/subscriptions", "organizations_url": "https://api.github.com/users/Gwihwan-Go/orgs", "repos_url": "https://api.github.com/users/Gwihwan-Go/repos", "events_url": "https://api.github.com/users/Gwihwan-Go/events{/privacy}", "received_events_url": "https://api.github.com/users/Gwihwan-Go/received_events", "type": "User", "site_admin": false }
[ { "id": 404586594, "node_id": "MDU6TGFiZWw0MDQ1ODY1OTQ=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/stat:awaiting%20tensorflower", "name": "stat:awaiting tensorflower", "color": "f4b400", "default": false, "description": "Status - Awaiting response from tensorflower" }, { "id": 473172988, "node_id": "MDU6TGFiZWw0NzMxNzI5ODg=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/type:bug", "name": "type:bug", "color": "159b2e", "default": false, "description": "Bug" }, { "id": 1097547147, "node_id": "MDU6TGFiZWwxMDk3NTQ3MTQ3", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/comp:ops", "name": "comp:ops", "color": "0052cc", "default": false, "description": "OPs related issues" }, { "id": 1133285679, "node_id": "MDU6TGFiZWwxMTMzMjg1Njc5", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/comp:xla", "name": "comp:xla", "color": "0052cc", "default": false, "description": "XLA" }, { "id": 6218999181, "node_id": "LA_kwDOArmXAs8AAAABcq5ljQ", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/TF%202.15", "name": "TF 2.15", "color": "9162CB", "default": false, "description": "For issues related to 2.15.x" } ]
open
false
{ "login": "SuryanarayanaY", "id": 116063290, "node_id": "U_kgDOBur8Og", "avatar_url": "https://avatars.githubusercontent.com/u/116063290?v=4", "gravatar_id": "", "url": "https://api.github.com/users/SuryanarayanaY", "html_url": "https://github.com/SuryanarayanaY", "followers_url": "https://api.github.com/users/SuryanarayanaY/followers", "following_url": "https://api.github.com/users/SuryanarayanaY/following{/other_user}", "gists_url": "https://api.github.com/users/SuryanarayanaY/gists{/gist_id}", "starred_url": "https://api.github.com/users/SuryanarayanaY/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/SuryanarayanaY/subscriptions", "organizations_url": "https://api.github.com/users/SuryanarayanaY/orgs", "repos_url": "https://api.github.com/users/SuryanarayanaY/repos", "events_url": "https://api.github.com/users/SuryanarayanaY/events{/privacy}", "received_events_url": "https://api.github.com/users/SuryanarayanaY/received_events", "type": "User", "site_admin": false }
[ { "login": "SuryanarayanaY", "id": 116063290, "node_id": "U_kgDOBur8Og", "avatar_url": "https://avatars.githubusercontent.com/u/116063290?v=4", "gravatar_id": "", "url": "https://api.github.com/users/SuryanarayanaY", "html_url": "https://github.com/SuryanarayanaY", "followers_url": "https://api.github.com/users/SuryanarayanaY/followers", "following_url": "https://api.github.com/users/SuryanarayanaY/following{/other_user}", "gists_url": "https://api.github.com/users/SuryanarayanaY/gists{/gist_id}", "starred_url": "https://api.github.com/users/SuryanarayanaY/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/SuryanarayanaY/subscriptions", "organizations_url": "https://api.github.com/users/SuryanarayanaY/orgs", "repos_url": "https://api.github.com/users/SuryanarayanaY/repos", "events_url": "https://api.github.com/users/SuryanarayanaY/events{/privacy}", "received_events_url": "https://api.github.com/users/SuryanarayanaY/received_events", "type": "User", "site_admin": false } ]
null
[ "Hi @GwiHwan-Go ,\r\n\r\nI have replicated the reported difference in results with `jit_compile=True` and with `jit_compile=False` the results are same . Attaching [gist](https://colab.sandbox.google.com/gist/SuryanarayanaY/7e02216d3d9fd52c8252992f77078237/62514.ipynb) for reference." ]
2023-11-30T13:59:55
2023-12-05T04:08:23
null
NONE
null
null
null
### Issue type Bug ### Have you reproduced the bug with TensorFlow Nightly? Yes ### Source source ### TensorFlow version 2.15.0 ### Custom code Yes ### OS platform and distribution Ubuntu 22.04.3 LTS ### Mobile device _No response_ ### Python version 3.10 ### Bazel version _No response_ ### GCC/compiler version _No response_ ### CUDA/cuDNN version _No response_ ### GPU model and memory _No response_ ### Current behavior? In TensorFlow 2.15.0, a model involving `tf.divide`, `tf.math.reduce_prod`, and `tf.cos`, along with an additional concat node returning, exhibits different outputs under XLA compilation compared to a simpler model without the extra node. You can reproduce this issue using the [Colab notebook](https://colab.research.google.com/drive/19bN4XvDdfPLiXDbWakcgxfA4aBVIBreG?usp=sharing) I have prepared. ### Additional Context In our efforts to understand and locate the root cause of this issue, we have conducted several tests and experiments. Removing an Operator: If any one of the operators (tf.divide, tf.math.reduce_prod, tf.cos) is removed from either model, the discrepancy under XLA compilation is no longer triggered. Removing `transposed_concat` or replacing with `concat` from Model2's Return: Omitting or replacing the output `transposed_concat` will not trigger this error. ### Standalone code to reproduce the issue ```python from typing import Dict import tensorflow as tf import pickle import os import numpy as np params = [ tf.random.uniform(shape=[49, 9, 1], dtype=tf.float32), ] class Model1(tf.keras.Model): def __init__(self): super().__init__() # Tensor objects (with comments for shapes) self.p0 = tf.constant(params[0]) # [49, 9, 1] float32 @tf.function(jit_compile=True) def __call__(self, inp): # Forward pass logic using TensorFlow operations # inp: [7, 5, 49, 1, 1] : float32 div = tf.divide(inp, self.p0) transposed_div = tf.transpose(div, perm=[0, 1, 3, 2, 4]) red = tf.math.reduce_prod(transposed_div, axis=2) cos = tf.cos(red) return red, cos, class Model2(tf.keras.Model): def __init__(self): super().__init__() # Tensor objects (with comments for shapes) self.p0 = tf.constant(params[0]) # [49, 9, 1] float32 # Layers or other Keras model objects @tf.function(jit_compile=True) def __call__(self, inp): # Forward pass logic using TensorFlow operations # inp: [7, 5, 49, 1, 1] : float32 div = tf.divide(inp, self.p0) transposed_div = tf.transpose(div, perm=[0, 1, 3, 2, 4]) concat = tf.concat([transposed_div, transposed_div], axis=2) transposed_concat = tf.transpose(concat, perm=[1, 0, 2, 3, 4]) red = tf.math.reduce_prod(transposed_div, axis=2) cos = tf.cos(red) return red, cos, transposed_concat inputs = [ tf.random.uniform(shape=[7, 5, 49, 1, 1], dtype=tf.float32), ] model1 = Model1() model2 = Model2() device = "cpu" with tf.device(device): tf.config.run_functions_eagerly(True) out1 = model1(*inputs) out2 = model2(*inputs) print(f'=========eager_output(version:{tf.__version__})================') try : for i in range(min(len(out1),len(out2))): np.testing.assert_allclose(out1[i].numpy(), out2[i].numpy(), rtol=0.001, atol=0.001, err_msg=f'at checking {i}th') print("XLA_eager does not trigger assertion") except AssertionError as e: print("XLA_eager triggers assertion") print(e) tf.config.run_functions_eagerly(False) out1 = model1(*inputs) out2 = model2(*inputs) print(f'=========compiled_output(version:{tf.__version__})================') try : for i in range(min(len(out1),len(out2))): np.testing.assert_allclose(out1[i].numpy(), out2[i].numpy(), rtol=0.001, atol=0.001, err_msg=f'at checking {i}th') print("XLA_complie does not trigger assertion") except AssertionError as e: print("XLA_complie triggers assertion") print(e) ``` ### Relevant log output ```shell =========eager_output(version:2.15.0)================ XLA_eager does not trigger assertion =========compiled_output(version:2.15.0)================ XLA_complie triggers assertion Not equal to tolerance rtol=0.001, atol=0.001 at checking 1th Mismatched elements: 77 / 1715 (4.49%) Max absolute difference: 0.85310125 Max relative difference: 32.919815 x: array([[[[-0.067188], [ 0.995112], [ 0.119131],... y: array([[[[-0.067309], [ 0.995112], [ 0.119131],... =========RUNNING WITH PICKLE FILES=========== # on AMD CPU =========eager_output(version:2.15.0)================ XLA_eager does not trigger assertion =========compiled_output(version:2.15.0)================ XLA_complie triggers assertion Not equal to tolerance rtol=0.001, atol=0.001 at checking 1th Mismatched elements: 1080 / 1715 (63%) Max absolute difference: 1.9984823 Max relative difference: 360.84747 x: array([[[[ 0.942858], [-0.681655], [-0.800721],... y: array([[[[ 0.738128], [-0.681655], [-0.800721],... ```
{ "url": "https://api.github.com/repos/tensorflow/tensorflow/issues/62514/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/tensorflow/tensorflow/issues/62514/timeline
null
null
false
https://api.github.com/repos/tensorflow/tensorflow/issues/62513
https://api.github.com/repos/tensorflow/tensorflow
https://api.github.com/repos/tensorflow/tensorflow/issues/62513/labels{/name}
https://api.github.com/repos/tensorflow/tensorflow/issues/62513/comments
https://api.github.com/repos/tensorflow/tensorflow/issues/62513/events
https://github.com/tensorflow/tensorflow/issues/62513
2,018,628,069
I_kwDOArmXAs54UdHl
62,513
A hang issue in tf.raw_ops.Unstage
{ "login": "Zoeeeeey", "id": 99133420, "node_id": "U_kgDOBein7A", "avatar_url": "https://avatars.githubusercontent.com/u/99133420?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Zoeeeeey", "html_url": "https://github.com/Zoeeeeey", "followers_url": "https://api.github.com/users/Zoeeeeey/followers", "following_url": "https://api.github.com/users/Zoeeeeey/following{/other_user}", "gists_url": "https://api.github.com/users/Zoeeeeey/gists{/gist_id}", "starred_url": "https://api.github.com/users/Zoeeeeey/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Zoeeeeey/subscriptions", "organizations_url": "https://api.github.com/users/Zoeeeeey/orgs", "repos_url": "https://api.github.com/users/Zoeeeeey/repos", "events_url": "https://api.github.com/users/Zoeeeeey/events{/privacy}", "received_events_url": "https://api.github.com/users/Zoeeeeey/received_events", "type": "User", "site_admin": false }
[ { "id": 386191887, "node_id": "MDU6TGFiZWwzODYxOTE4ODc=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/stat:awaiting%20response", "name": "stat:awaiting response", "color": "f4b400", "default": false, "description": "Status - Awaiting response from author" }, { "id": 473172988, "node_id": "MDU6TGFiZWw0NzMxNzI5ODg=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/type:bug", "name": "type:bug", "color": "159b2e", "default": false, "description": "Bug" }, { "id": 1097547147, "node_id": "MDU6TGFiZWwxMDk3NTQ3MTQ3", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/comp:ops", "name": "comp:ops", "color": "0052cc", "default": false, "description": "OPs related issues" }, { "id": 5508003926, "node_id": "LA_kwDOArmXAs8AAAABSE14Vg", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/TF%202.13", "name": "TF 2.13", "color": "B13ACB", "default": false, "description": "For issues related to Tensorflow 2.13" } ]
closed
false
{ "login": "sachinprasadhs", "id": 73069040, "node_id": "MDQ6VXNlcjczMDY5MDQw", "avatar_url": "https://avatars.githubusercontent.com/u/73069040?v=4", "gravatar_id": "", "url": "https://api.github.com/users/sachinprasadhs", "html_url": "https://github.com/sachinprasadhs", "followers_url": "https://api.github.com/users/sachinprasadhs/followers", "following_url": "https://api.github.com/users/sachinprasadhs/following{/other_user}", "gists_url": "https://api.github.com/users/sachinprasadhs/gists{/gist_id}", "starred_url": "https://api.github.com/users/sachinprasadhs/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/sachinprasadhs/subscriptions", "organizations_url": "https://api.github.com/users/sachinprasadhs/orgs", "repos_url": "https://api.github.com/users/sachinprasadhs/repos", "events_url": "https://api.github.com/users/sachinprasadhs/events{/privacy}", "received_events_url": "https://api.github.com/users/sachinprasadhs/received_events", "type": "User", "site_admin": false }
[ { "login": "sachinprasadhs", "id": 73069040, "node_id": "MDQ6VXNlcjczMDY5MDQw", "avatar_url": "https://avatars.githubusercontent.com/u/73069040?v=4", "gravatar_id": "", "url": "https://api.github.com/users/sachinprasadhs", "html_url": "https://github.com/sachinprasadhs", "followers_url": "https://api.github.com/users/sachinprasadhs/followers", "following_url": "https://api.github.com/users/sachinprasadhs/following{/other_user}", "gists_url": "https://api.github.com/users/sachinprasadhs/gists{/gist_id}", "starred_url": "https://api.github.com/users/sachinprasadhs/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/sachinprasadhs/subscriptions", "organizations_url": "https://api.github.com/users/sachinprasadhs/orgs", "repos_url": "https://api.github.com/users/sachinprasadhs/repos", "events_url": "https://api.github.com/users/sachinprasadhs/events{/privacy}", "received_events_url": "https://api.github.com/users/sachinprasadhs/received_events", "type": "User", "site_admin": false } ]
null
[ "@sachinprasadhs I was able to replicate this issue on colab, please find the gist [here](https://colab.research.google.com/gist/sushreebarsa/4c93b168372254d339d217673f424593/62513.ipynb) for reference. Thank you!", "Even in eager mode also, I was able to observe this behavior, it's getting stuck at `unstage_op` \r\n\r\n```\r\nimport tensorflow as tf\r\ndata = tf.random.uniform(shape=[3, 2])\r\nstage_op = tf.raw_ops.Stage(values=[data])\r\nunstage_op = tf.raw_ops.Unstage(dtypes=[data.dtype])\r\n```", "Thank you for your report.\r\n\r\nAs background, [tf.queue](https://www.tensorflow.org/api_docs/python/tf/queue) has similar ops with public API. In contrast, `Stage` and `Unstage` are internal-only ops that are optimized for performance.\r\n\r\nHere's a working example where an `Unstage` op does not have a shared name:\r\n\r\n```\r\nIn [1]: import tensorflow as tf\r\nIn [2]: tf.compat.v1.disable_eager_execution()\r\nIn [3]: data = tf.random.uniform(shape=[3, 2])\r\nIn [4]: stage_op = tf.raw_ops.Stage(values=[data], shared_name=\"Unstage\")\r\nIn [5]: unstage_op = tf.raw_ops.Unstage(dtypes=[data.dtype]) # The name of this op is `Unstage`\r\nIn [6]: with tf.compat.v1.Session() as sess:\r\n ...: sess.run(stage_op)\r\n ...: result = sess.run(unstage_op)\r\n ...: print(result)\r\n ...:\r\n[array([[0.9767339 , 0.24937785],\r\n [0.974072 , 0.40839374],\r\n [0.06740427, 0.6676805 ]], dtype=float32)]\r\n```\r\n\r\nAdding error checking seems infeasible since an Unstage that does not specify shared_name is valid and changing this would break API compatibility. These internal-ops are optimized for performance and the potential to block forever seems intended.\r\n", "Thanks, I got it. :>", "@Zoeeeeey , Could you please close the issue, if you don't have any further question.", "Are you satisfied with the resolution of your issue?\n<a href=\"https://docs.google.com/forms/d/e/1FAIpQLSfaP12TRhd9xSxjXZjcZFNXPGk4kc1-qMdv3gc6bEP90vY1ew/viewform?entry.85265664=Yes&entry.2137816233=https://github.com/tensorflow/tensorflow/issues/62513\">Yes</a>\n<a href=\"https://docs.google.com/forms/d/e/1FAIpQLSfaP12TRhd9xSxjXZjcZFNXPGk4kc1-qMdv3gc6bEP90vY1ew/viewform?entry.85265664=No&entry.2137816233=https://github.com/tensorflow/tensorflow/issues/62513\">No</a>\n" ]
2023-11-30T13:04:12
2023-12-19T02:53:44
2023-12-19T02:53:41
NONE
null
null
null
### Issue type Bug ### Have you reproduced the bug with TensorFlow Nightly? No ### Source binary ### TensorFlow version tf 2.13 ### Custom code Yes ### OS platform and distribution Windows ### Mobile device _No response_ ### Python version 3.8.11 ### Bazel version _No response_ ### GCC/compiler version _No response_ ### CUDA/cuDNN version _No response_ ### GPU model and memory _No response_ ### Current behavior? The implementation of tf.raw_ops.Unstage fails as a result of that the program hangs. When tf.raw_ops.Unstage is called simultaneously with tf.raw_ops.Stage, there's also an occurrence of the program hanging. ### Standalone code to reproduce the issue ```shell import tensorflow as tf tf.compat.v1.disable_eager_execution() data = tf.random.uniform(shape=[3, 2]) stage_op = tf.raw_ops.Stage(values=[data]) unstage_op = tf.raw_ops.Unstage(dtypes=[data.dtype]) with tf.compat.v1.Session() as sess: sess.run(stage_op) result = sess.run(unstage_op) print(result) ``` ### Relevant log output ```shell 2023-11-30 21:03:36.123836: I tensorflow/core/platform/cpu_feature_guard.cc:182] This TensorFlow binary is optimized to use available CPU instructions in performance-critical operations. To enable the following instructions: SSE SSE2 SSE3 SSE4.1 SSE4.2 AVX AVX2 FMA, in other operations, rebuild TensorFlow with the appropriate compiler flags. 2023-11-30 21:03:36.125717: I tensorflow/compiler/mlir/mlir_graph_optimization_pass.cc:375] MLIR V1 optimization pass is not enabled ```
{ "url": "https://api.github.com/repos/tensorflow/tensorflow/issues/62513/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/tensorflow/tensorflow/issues/62513/timeline
null
completed
false
https://api.github.com/repos/tensorflow/tensorflow/issues/62512
https://api.github.com/repos/tensorflow/tensorflow
https://api.github.com/repos/tensorflow/tensorflow/issues/62512/labels{/name}
https://api.github.com/repos/tensorflow/tensorflow/issues/62512/comments
https://api.github.com/repos/tensorflow/tensorflow/issues/62512/events
https://github.com/tensorflow/tensorflow/issues/62512
2,018,490,541
I_kwDOArmXAs54T7it
62,512
Output Discrepancies in TensorFlow Model with Extra Concat Output Node Under XLA Compilation
{ "login": "Gwihwan-Go", "id": 83144588, "node_id": "MDQ6VXNlcjgzMTQ0NTg4", "avatar_url": "https://avatars.githubusercontent.com/u/83144588?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Gwihwan-Go", "html_url": "https://github.com/Gwihwan-Go", "followers_url": "https://api.github.com/users/Gwihwan-Go/followers", "following_url": "https://api.github.com/users/Gwihwan-Go/following{/other_user}", "gists_url": "https://api.github.com/users/Gwihwan-Go/gists{/gist_id}", "starred_url": "https://api.github.com/users/Gwihwan-Go/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Gwihwan-Go/subscriptions", "organizations_url": "https://api.github.com/users/Gwihwan-Go/orgs", "repos_url": "https://api.github.com/users/Gwihwan-Go/repos", "events_url": "https://api.github.com/users/Gwihwan-Go/events{/privacy}", "received_events_url": "https://api.github.com/users/Gwihwan-Go/received_events", "type": "User", "site_admin": false }
[ { "id": 386191887, "node_id": "MDU6TGFiZWwzODYxOTE4ODc=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/stat:awaiting%20response", "name": "stat:awaiting response", "color": "f4b400", "default": false, "description": "Status - Awaiting response from author" }, { "id": 473172988, "node_id": "MDU6TGFiZWw0NzMxNzI5ODg=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/type:bug", "name": "type:bug", "color": "159b2e", "default": false, "description": "Bug" }, { "id": 474725938, "node_id": "MDU6TGFiZWw0NzQ3MjU5Mzg=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/stale", "name": "stale", "color": "d4c5f9", "default": false, "description": "This label marks the issue/pr stale - to be closed automatically if no activity" }, { "id": 1133285679, "node_id": "MDU6TGFiZWwxMTMzMjg1Njc5", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/comp:xla", "name": "comp:xla", "color": "0052cc", "default": false, "description": "XLA" }, { "id": 6218999181, "node_id": "LA_kwDOArmXAs8AAAABcq5ljQ", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/TF%202.15", "name": "TF 2.15", "color": "9162CB", "default": false, "description": "For issues related to 2.15.x" } ]
closed
false
{ "login": "sachinprasadhs", "id": 73069040, "node_id": "MDQ6VXNlcjczMDY5MDQw", "avatar_url": "https://avatars.githubusercontent.com/u/73069040?v=4", "gravatar_id": "", "url": "https://api.github.com/users/sachinprasadhs", "html_url": "https://github.com/sachinprasadhs", "followers_url": "https://api.github.com/users/sachinprasadhs/followers", "following_url": "https://api.github.com/users/sachinprasadhs/following{/other_user}", "gists_url": "https://api.github.com/users/sachinprasadhs/gists{/gist_id}", "starred_url": "https://api.github.com/users/sachinprasadhs/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/sachinprasadhs/subscriptions", "organizations_url": "https://api.github.com/users/sachinprasadhs/orgs", "repos_url": "https://api.github.com/users/sachinprasadhs/repos", "events_url": "https://api.github.com/users/sachinprasadhs/events{/privacy}", "received_events_url": "https://api.github.com/users/sachinprasadhs/received_events", "type": "User", "site_admin": false }
[ { "login": "sachinprasadhs", "id": 73069040, "node_id": "MDQ6VXNlcjczMDY5MDQw", "avatar_url": "https://avatars.githubusercontent.com/u/73069040?v=4", "gravatar_id": "", "url": "https://api.github.com/users/sachinprasadhs", "html_url": "https://github.com/sachinprasadhs", "followers_url": "https://api.github.com/users/sachinprasadhs/followers", "following_url": "https://api.github.com/users/sachinprasadhs/following{/other_user}", "gists_url": "https://api.github.com/users/sachinprasadhs/gists{/gist_id}", "starred_url": "https://api.github.com/users/sachinprasadhs/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/sachinprasadhs/subscriptions", "organizations_url": "https://api.github.com/users/sachinprasadhs/orgs", "repos_url": "https://api.github.com/users/sachinprasadhs/repos", "events_url": "https://api.github.com/users/sachinprasadhs/events{/privacy}", "received_events_url": "https://api.github.com/users/sachinprasadhs/received_events", "type": "User", "site_admin": false } ]
null
[ "Hi @GwiHwan-Go ,\r\n\r\nI have replicated the reported behaviour with colab using TF v2.14, 2.15 and nightly. Please find the [gist](https://colab.research.google.com/gist/Venkat6871/1cefccbfed72119f66f57ea30674843f/62512_2-15_2-14-nightly-v.ipynb) here for reference.\r\n\r\nThank you!", "Hi, Could you please explain why `concated` was being returned in `Model2` where as `Model1` doesn't return this. ", "This issue is stale because it has been open for 7 days with no activity. It will be closed if no further activity occurs. Thank you.", "Hello @sachinprasadhs,\r\nWe were considering the importance of examining the intermediate values in the layer and thought it might be a crucial aspect to look into.", "Hi,\r\n\r\nReturning the different outputs for both the results and then comparing will not yield same result.\r\n\r\nThe behavior is expected and it does not not look like a bug.", "This issue is stale because it has been open for 7 days with no activity. It will be closed if no further activity occurs. Thank you.", "This issue was closed because it has been inactive for 7 days since being marked as stale. Please reopen if you'd like to work on this further.", "Are you satisfied with the resolution of your issue?\n<a href=\"https://docs.google.com/forms/d/e/1FAIpQLSfaP12TRhd9xSxjXZjcZFNXPGk4kc1-qMdv3gc6bEP90vY1ew/viewform?entry.85265664=Yes&entry.2137816233=https://github.com/tensorflow/tensorflow/issues/62512\">Yes</a>\n<a href=\"https://docs.google.com/forms/d/e/1FAIpQLSfaP12TRhd9xSxjXZjcZFNXPGk4kc1-qMdv3gc6bEP90vY1ew/viewform?entry.85265664=No&entry.2137816233=https://github.com/tensorflow/tensorflow/issues/62512\">No</a>\n" ]
2023-11-30T11:54:08
2024-01-11T01:49:56
2024-01-11T01:49:47
NONE
null
null
null
### Issue type Bug ### Have you reproduced the bug with TensorFlow Nightly? Yes ### Source source ### TensorFlow version 2.15.0 ### Custom code Yes ### OS platform and distribution Ubuntu 22.04.3 LTS ### Mobile device _No response_ ### Python version 3.10 ### Bazel version _No response_ ### GCC/compiler version _No response_ ### CUDA/cuDNN version _No response_ ### GPU model and memory _No response_ ### Current behavior? In TensorFlow 2.15.0, I have encountered an issue where adding an **extra concat output node** to a model results in discrepancies in the output when compiled with XLA. This inconsistency is not observed without XLA compilation. ### Additional Context In our efforts to understand and locate the root cause of this issue, we have conducted several tests and experiments. Through this process, we've identified specific conditions under which the discrepancy does not occur: 1. Removing an Operator: If any one of the operators `(expand_dims, multiply, abs)` is removed from either model, the discrepancy under XLA compilation is no longer triggered. 2. Removing `concated` from Model2's Return: Omitting the concated tensor from the output of Model2 results in no discrepancy between the two models under XLA compilation. 3. Changing Input Tensor Data Type: Altering the data type of the input tensor to a type other than `int8` also prevents the occurrence of this discrepancy. ### Standalone code to reproduce the issue ```shell import tensorflow as tf import numpy as np class Model1(tf.keras.Model): @tf.function(jit_compile=True) def __call__(self, inp): expanded = tf.expand_dims(inp, axis=0) multiplied = tf.multiply(inp, expanded) absed = tf.abs(multiplied) return absed, class Model2(tf.keras.Model): @tf.function(jit_compile=True) def __call__(self, inp): expanded = tf.expand_dims(inp, axis=0) multiplied = tf.multiply(expanded, inp) concated = tf.concat([multiplied, multiplied], axis=0) absed = tf.abs(multiplied) return absed, concated inputs = [ tf.cast(tf.random.uniform(shape=[5, 5], minval=-128, maxval=128, dtype=tf.int32), tf.int8), ] model1 = Model1() model2 = Model2() device = "cpu" with tf.device(device): tf.config.run_functions_eagerly(True) out1 = model1(*inputs) out2 = model2(*inputs) print(f'=========eager_output(version:{tf.__version__})================') try : for i in range(min(len(out1),len(out2))): np.testing.assert_allclose(out1[i].numpy(), out2[i].numpy(), rtol=0.001, atol=0.001, err_msg=f'at checking {i}th') print("XLA_eager does not trigger assertion") except AssertionError as e: print("XLA_eager triggers assertion") print(e) tf.config.run_functions_eagerly(False) out1 = model1(*inputs) out2 = model2(*inputs) print(f'=========compiled_output(version:{tf.__version__})================') try : for i in range(min(len(out1),len(out2))): np.testing.assert_allclose(out1[i].numpy(), out2[i].numpy(), rtol=0.001, atol=0.001, err_msg=f'at checking {i}th') print("XLA_complie does not trigger assertion") except AssertionError as e: print("XLA_complie triggers assertion") print(e) ``` ### Relevant log output ```shell =========eager_output(version:2.15.0)================ XLA_eager does not trigger assertion =========compiled_output(version:2.15.0)================ XLA_complie triggers assertion Not equal to tolerance rtol=0.001, atol=0.001 at checking 0th Mismatched elements: 13 / 25 (52%) Max absolute difference: 126 Max relative difference: 2. x: array([[[ -63, 16, -23, -15, 68], [ -92, -103, -60, 100, 1], [ 9, 16, 36, 33, 64],... y: array([[[ 63, 16, 23, 15, 68], [ 92, 103, 60, 100, 1], [ 9, 16, 36, 33, 64],... ```
{ "url": "https://api.github.com/repos/tensorflow/tensorflow/issues/62512/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/tensorflow/tensorflow/issues/62512/timeline
null
completed
false
https://api.github.com/repos/tensorflow/tensorflow/issues/62511
https://api.github.com/repos/tensorflow/tensorflow
https://api.github.com/repos/tensorflow/tensorflow/issues/62511/labels{/name}
https://api.github.com/repos/tensorflow/tensorflow/issues/62511/comments
https://api.github.com/repos/tensorflow/tensorflow/issues/62511/events
https://github.com/tensorflow/tensorflow/issues/62511
2,018,472,865
I_kwDOArmXAs54T3Oh
62,511
Add a csv/plain data input from directory
{ "login": "Formiga57", "id": 34382140, "node_id": "MDQ6VXNlcjM0MzgyMTQw", "avatar_url": "https://avatars.githubusercontent.com/u/34382140?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Formiga57", "html_url": "https://github.com/Formiga57", "followers_url": "https://api.github.com/users/Formiga57/followers", "following_url": "https://api.github.com/users/Formiga57/following{/other_user}", "gists_url": "https://api.github.com/users/Formiga57/gists{/gist_id}", "starred_url": "https://api.github.com/users/Formiga57/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Formiga57/subscriptions", "organizations_url": "https://api.github.com/users/Formiga57/orgs", "repos_url": "https://api.github.com/users/Formiga57/repos", "events_url": "https://api.github.com/users/Formiga57/events{/privacy}", "received_events_url": "https://api.github.com/users/Formiga57/received_events", "type": "User", "site_admin": false }
[ { "id": 386191887, "node_id": "MDU6TGFiZWwzODYxOTE4ODc=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/stat:awaiting%20response", "name": "stat:awaiting response", "color": "f4b400", "default": false, "description": "Status - Awaiting response from author" }, { "id": 473173272, "node_id": "MDU6TGFiZWw0NzMxNzMyNzI=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/type:feature", "name": "type:feature", "color": "159b2e", "default": false, "description": "Feature requests" }, { "id": 474725938, "node_id": "MDU6TGFiZWw0NzQ3MjU5Mzg=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/stale", "name": "stale", "color": "d4c5f9", "default": false, "description": "This label marks the issue/pr stale - to be closed automatically if no activity" } ]
closed
false
{ "login": "sachinprasadhs", "id": 73069040, "node_id": "MDQ6VXNlcjczMDY5MDQw", "avatar_url": "https://avatars.githubusercontent.com/u/73069040?v=4", "gravatar_id": "", "url": "https://api.github.com/users/sachinprasadhs", "html_url": "https://github.com/sachinprasadhs", "followers_url": "https://api.github.com/users/sachinprasadhs/followers", "following_url": "https://api.github.com/users/sachinprasadhs/following{/other_user}", "gists_url": "https://api.github.com/users/sachinprasadhs/gists{/gist_id}", "starred_url": "https://api.github.com/users/sachinprasadhs/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/sachinprasadhs/subscriptions", "organizations_url": "https://api.github.com/users/sachinprasadhs/orgs", "repos_url": "https://api.github.com/users/sachinprasadhs/repos", "events_url": "https://api.github.com/users/sachinprasadhs/events{/privacy}", "received_events_url": "https://api.github.com/users/sachinprasadhs/received_events", "type": "User", "site_admin": false }
[ { "login": "sachinprasadhs", "id": 73069040, "node_id": "MDQ6VXNlcjczMDY5MDQw", "avatar_url": "https://avatars.githubusercontent.com/u/73069040?v=4", "gravatar_id": "", "url": "https://api.github.com/users/sachinprasadhs", "html_url": "https://github.com/sachinprasadhs", "followers_url": "https://api.github.com/users/sachinprasadhs/followers", "following_url": "https://api.github.com/users/sachinprasadhs/following{/other_user}", "gists_url": "https://api.github.com/users/sachinprasadhs/gists{/gist_id}", "starred_url": "https://api.github.com/users/sachinprasadhs/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/sachinprasadhs/subscriptions", "organizations_url": "https://api.github.com/users/sachinprasadhs/orgs", "repos_url": "https://api.github.com/users/sachinprasadhs/repos", "events_url": "https://api.github.com/users/sachinprasadhs/events{/privacy}", "received_events_url": "https://api.github.com/users/sachinprasadhs/received_events", "type": "User", "site_admin": false } ]
null
[ "Hi, \r\n\r\nThanks for filing a feature request.\r\n\r\nYou can make use of `tf.data.experimental.CsvDataset` [here](https://www.tensorflow.org/api_docs/python/tf/data/experimental/CsvDataset) to load the CSV data and then it can be used in any of the model building process.\r\n\r\nAlso, if you would like to feed the data to the Keras model, then you can load the CSV file using Pandas and prepare a pipeline as mentioned here https://www.tensorflow.org/tutorials/load_data/csv. \r\n\r\n", "This issue is stale because it has been open for 7 days with no activity. It will be closed if no further activity occurs. Thank you.", "This issue was closed because it has been inactive for 7 days since being marked as stale. Please reopen if you'd like to work on this further." ]
2023-11-30T11:43:23
2023-12-21T01:48:35
2023-12-21T01:48:35
NONE
null
null
null
### Issue type Feature Request ### Have you reproduced the bug with TensorFlow Nightly? No ### Source source ### TensorFlow version 2.14.0 ### Custom code Yes ### OS platform and distribution _No response_ ### Mobile device _No response_ ### Python version _No response_ ### Bazel version _No response_ ### GCC/compiler version _No response_ ### CUDA/cuDNN version _No response_ ### GPU model and memory _No response_ ### Current behavior? I'm a aerospace engineering student actually searching for ways to measure sensors influence within a wing with help of ML. Given a lot of researches based on gathering sensors data stored in .csv files or binary ones, we thought it would be a nice feature to have a csv loader or even more formats of data files from sensors or researches, not only mainly Images, Audio or Text. Then we implemented a "csv_dataset_from_directory()", by now as just an example, method to load a directory with multiple classes of csv, notwithstanding resulting in good results with a tiny amount of our dataset. Please feel free to give any comments or suggestions about this implementation, we had only about 12 hours to think in a project cause we're in a Hackathon right now. Let us know if you guys had issues inputting that kind of data, therefore for our use in the aerospace engineering would be the exactly feature we're looking for! [Pull Request into Keras](https://github.com/keras-team/keras/pull/18853) [Pull Request into Docs](https://github.com/tensorflow/docs/pull/2289) The analysis is included in the Docs. ### Standalone code to reproduce the issue ```shell import keras csv_dataset = keras.utils.csv_dataset_from_directory( "./cases", stride = 1, batch_size=1, ) class_names = csv_dataset.class_names ``` ### Relevant log output _No response_
{ "url": "https://api.github.com/repos/tensorflow/tensorflow/issues/62511/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/tensorflow/tensorflow/issues/62511/timeline
null
completed
false
https://api.github.com/repos/tensorflow/tensorflow/issues/62510
https://api.github.com/repos/tensorflow/tensorflow
https://api.github.com/repos/tensorflow/tensorflow/issues/62510/labels{/name}
https://api.github.com/repos/tensorflow/tensorflow/issues/62510/comments
https://api.github.com/repos/tensorflow/tensorflow/issues/62510/events
https://github.com/tensorflow/tensorflow/issues/62510
2,018,461,849
I_kwDOArmXAs54T0iZ
62,510
Output Discrepancy in Mathematically Equivalent Models with`tf.abs` Under XLA Compilation
{ "login": "Gwihwan-Go", "id": 83144588, "node_id": "MDQ6VXNlcjgzMTQ0NTg4", "avatar_url": "https://avatars.githubusercontent.com/u/83144588?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Gwihwan-Go", "html_url": "https://github.com/Gwihwan-Go", "followers_url": "https://api.github.com/users/Gwihwan-Go/followers", "following_url": "https://api.github.com/users/Gwihwan-Go/following{/other_user}", "gists_url": "https://api.github.com/users/Gwihwan-Go/gists{/gist_id}", "starred_url": "https://api.github.com/users/Gwihwan-Go/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Gwihwan-Go/subscriptions", "organizations_url": "https://api.github.com/users/Gwihwan-Go/orgs", "repos_url": "https://api.github.com/users/Gwihwan-Go/repos", "events_url": "https://api.github.com/users/Gwihwan-Go/events{/privacy}", "received_events_url": "https://api.github.com/users/Gwihwan-Go/received_events", "type": "User", "site_admin": false }
[ { "id": 404586594, "node_id": "MDU6TGFiZWw0MDQ1ODY1OTQ=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/stat:awaiting%20tensorflower", "name": "stat:awaiting tensorflower", "color": "f4b400", "default": false, "description": "Status - Awaiting response from tensorflower" }, { "id": 473172988, "node_id": "MDU6TGFiZWw0NzMxNzI5ODg=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/type:bug", "name": "type:bug", "color": "159b2e", "default": false, "description": "Bug" }, { "id": 1133285679, "node_id": "MDU6TGFiZWwxMTMzMjg1Njc5", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/comp:xla", "name": "comp:xla", "color": "0052cc", "default": false, "description": "XLA" }, { "id": 6218999181, "node_id": "LA_kwDOArmXAs8AAAABcq5ljQ", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/TF%202.15", "name": "TF 2.15", "color": "9162CB", "default": false, "description": "For issues related to 2.15.x" } ]
open
false
{ "login": "SuryanarayanaY", "id": 116063290, "node_id": "U_kgDOBur8Og", "avatar_url": "https://avatars.githubusercontent.com/u/116063290?v=4", "gravatar_id": "", "url": "https://api.github.com/users/SuryanarayanaY", "html_url": "https://github.com/SuryanarayanaY", "followers_url": "https://api.github.com/users/SuryanarayanaY/followers", "following_url": "https://api.github.com/users/SuryanarayanaY/following{/other_user}", "gists_url": "https://api.github.com/users/SuryanarayanaY/gists{/gist_id}", "starred_url": "https://api.github.com/users/SuryanarayanaY/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/SuryanarayanaY/subscriptions", "organizations_url": "https://api.github.com/users/SuryanarayanaY/orgs", "repos_url": "https://api.github.com/users/SuryanarayanaY/repos", "events_url": "https://api.github.com/users/SuryanarayanaY/events{/privacy}", "received_events_url": "https://api.github.com/users/SuryanarayanaY/received_events", "type": "User", "site_admin": false }
[ { "login": "SuryanarayanaY", "id": 116063290, "node_id": "U_kgDOBur8Og", "avatar_url": "https://avatars.githubusercontent.com/u/116063290?v=4", "gravatar_id": "", "url": "https://api.github.com/users/SuryanarayanaY", "html_url": "https://github.com/SuryanarayanaY", "followers_url": "https://api.github.com/users/SuryanarayanaY/followers", "following_url": "https://api.github.com/users/SuryanarayanaY/following{/other_user}", "gists_url": "https://api.github.com/users/SuryanarayanaY/gists{/gist_id}", "starred_url": "https://api.github.com/users/SuryanarayanaY/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/SuryanarayanaY/subscriptions", "organizations_url": "https://api.github.com/users/SuryanarayanaY/orgs", "repos_url": "https://api.github.com/users/SuryanarayanaY/repos", "events_url": "https://api.github.com/users/SuryanarayanaY/events{/privacy}", "received_events_url": "https://api.github.com/users/SuryanarayanaY/received_events", "type": "User", "site_admin": false } ]
null
[ "Hi @GwiHwan-Go ,\r\n\r\nI have replicated the reported behaviour with jit_compile=True. With jit_compile=False the results are same. Attaching [gist](https://colab.sandbox.google.com/gist/SuryanarayanaY/a48c48f1fffa79b939676d94ff2e5a0a/62510.ipynb) for reference. Needs to check for this behaviour.", "It seems the problem is with tf.abs Op with XLA which is not converting -ve values to positive for Model1.\r\n\r\nIf we remove this operation then it works as intended and results are same with `jit` also.Refer attached [gist](https://colab.sandbox.google.com/gist/SuryanarayanaY/d51757db95de060f6fa8439fef61d0be/62510_r1.ipynb)." ]
2023-11-30T11:36:18
2024-04-19T04:03:36
null
NONE
null
null
null
### Issue type Bug ### Have you reproduced the bug with TensorFlow Nightly? Yes ### Source source ### TensorFlow version 2.15.0 ### Custom code Yes ### OS platform and distribution Ubuntu 22.04.3 LTS ### Mobile device _No response_ ### Python version 3.10 ### Bazel version _No response_ ### GCC/compiler version _No response_ ### CUDA/cuDNN version _No response_ ### GPU model and memory _No response_ ### Current behavior? In TensorFlow 2.15.0, two models involving `tf.abs` and presenting mathematically equivalent operations are producing different results when compiled with XLA. This inconsistency is not observed in eager execution mode. Both models essentially compute the square of the sum of inp1 and inp2. In Model1, this is done directly by squaring the sum (inp1 + inp2)^2. In Model2, it is achieved by distributing the multiplication across the sum, inp1*(inp1 + inp2) + inp2*(inp1 + inp2), which simplifies to the same result. The inclusion of tf.abs should not affect the mathematical equivalence of these operations. ### Standalone code to reproduce the issue ```python import tensorflow as tf import numpy as np class Model1(tf.keras.Model): @tf.function(jit_compile=True) def __call__(self, inp1, inp2): # (inp1 + inp2)^2 added = tf.add(inp1, inp2) fin_out = tf.multiply(added, added) absed = tf.abs(fin_out) return fin_out, absed class Model2(tf.keras.Model): @tf.function(jit_compile=True) def __call__(self, inp1, inp2): # inp1*(inp1 + inp2) + inp2*(inp1 + inp2) added = tf.add(inp1, inp2) v5_0 = tf.multiply(inp2, added) v6_0 = tf.multiply(inp1, added) fin_out = tf.add(v6_0, v5_0) absed = tf.abs(fin_out) return fin_out, absed inputs = [ tf.cast(tf.random.uniform(shape=[], minval=-128, maxval=128, dtype=tf.int32), tf.int8), tf.cast(tf.random.uniform(shape=[10, 10, 10], minval=-128, maxval=128, dtype=tf.int32), tf.int8), ] model1 = Model1() model2 = Model2() device = "cpu" with tf.device(device): tf.config.run_functions_eagerly(True) out1 = model1(*inputs) out2 = model2(*inputs) print(f'=========eager_output(version:{tf.__version__})================') try : for i in range(min(len(out1),len(out2))): np.testing.assert_allclose(out1[i].numpy(), out2[i].numpy(), rtol=0.001, atol=0.001, err_msg=f'at checking {i}th') print("XLA_eager does not trigger assertion") except AssertionError as e: print("XLA_eager triggers assertion") print(e) tf.config.run_functions_eagerly(False) out1 = model1(*inputs) out2 = model2(*inputs) print(f'=========compiled_output(version:{tf.__version__})================') try : for i in range(min(len(out1),len(out2))): np.testing.assert_allclose(out1[i].numpy(), out2[i].numpy(), rtol=0.001, atol=0.001, err_msg=f'at checking {i}th') print("XLA_complie does not trigger assertion") except AssertionError as e: print("XLA_complie triggers assertion") print(e) ``` ### Relevant log output ```shell =========eager_output(version:2.15.0)================ XLA_eager does not trigger assertion I0000 00:00:1701344148.577671 2752804 device_compiler.h:186] Compiled cluster using XLA! This line is logged at most once for the lifetime of the process. 2023-11-30 11:35:48.578399: E external/local_xla/xla/stream_executor/stream_executor_internal.h:177] SetPriority unimplemented for this stream. =========compiled_output(version:2.15.0)================ XLA_complie triggers assertion Not equal to tolerance rtol=0.001, atol=0.001 at checking 1th Mismatched elements: 445 / 1000 (44.5%) Max absolute difference: 126 Max relative difference: 2. x: array([[[ 36, 81, -92, -31, 64, -111, 41, -23, 36, 64], [ 9, 25, 113, -103, 49, 17, 64, -71, -112, -71], [-124, -28, -119, 0, -112, 1, 9, -60, -63, 65],... y: array([[[ 36, 81, 92, 31, 64, 111, 41, 23, 36, 64], [ 9, 25, 113, 103, 49, 17, 64, 71, 112, 71], [124, 28, 119, 0, 112, 1, 9, 60, 63, 65],... ```
{ "url": "https://api.github.com/repos/tensorflow/tensorflow/issues/62510/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/tensorflow/tensorflow/issues/62510/timeline
null
null
false
https://api.github.com/repos/tensorflow/tensorflow/issues/62509
https://api.github.com/repos/tensorflow/tensorflow
https://api.github.com/repos/tensorflow/tensorflow/issues/62509/labels{/name}
https://api.github.com/repos/tensorflow/tensorflow/issues/62509/comments
https://api.github.com/repos/tensorflow/tensorflow/issues/62509/events
https://github.com/tensorflow/tensorflow/issues/62509
2,018,282,953
I_kwDOArmXAs54TI3J
62,509
Significant Output Discrepancies in `tf.math.reduce_prod + tf.tan` with Extra Transposed Tensor Output
{ "login": "Gwihwan-Go", "id": 83144588, "node_id": "MDQ6VXNlcjgzMTQ0NTg4", "avatar_url": "https://avatars.githubusercontent.com/u/83144588?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Gwihwan-Go", "html_url": "https://github.com/Gwihwan-Go", "followers_url": "https://api.github.com/users/Gwihwan-Go/followers", "following_url": "https://api.github.com/users/Gwihwan-Go/following{/other_user}", "gists_url": "https://api.github.com/users/Gwihwan-Go/gists{/gist_id}", "starred_url": "https://api.github.com/users/Gwihwan-Go/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Gwihwan-Go/subscriptions", "organizations_url": "https://api.github.com/users/Gwihwan-Go/orgs", "repos_url": "https://api.github.com/users/Gwihwan-Go/repos", "events_url": "https://api.github.com/users/Gwihwan-Go/events{/privacy}", "received_events_url": "https://api.github.com/users/Gwihwan-Go/received_events", "type": "User", "site_admin": false }
[ { "id": 404586594, "node_id": "MDU6TGFiZWw0MDQ1ODY1OTQ=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/stat:awaiting%20tensorflower", "name": "stat:awaiting tensorflower", "color": "f4b400", "default": false, "description": "Status - Awaiting response from tensorflower" }, { "id": 473172988, "node_id": "MDU6TGFiZWw0NzMxNzI5ODg=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/type:bug", "name": "type:bug", "color": "159b2e", "default": false, "description": "Bug" }, { "id": 1097547147, "node_id": "MDU6TGFiZWwxMDk3NTQ3MTQ3", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/comp:ops", "name": "comp:ops", "color": "0052cc", "default": false, "description": "OPs related issues" }, { "id": 6218999181, "node_id": "LA_kwDOArmXAs8AAAABcq5ljQ", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/TF%202.15", "name": "TF 2.15", "color": "9162CB", "default": false, "description": "For issues related to 2.15.x" } ]
open
false
{ "login": "sachinprasadhs", "id": 73069040, "node_id": "MDQ6VXNlcjczMDY5MDQw", "avatar_url": "https://avatars.githubusercontent.com/u/73069040?v=4", "gravatar_id": "", "url": "https://api.github.com/users/sachinprasadhs", "html_url": "https://github.com/sachinprasadhs", "followers_url": "https://api.github.com/users/sachinprasadhs/followers", "following_url": "https://api.github.com/users/sachinprasadhs/following{/other_user}", "gists_url": "https://api.github.com/users/sachinprasadhs/gists{/gist_id}", "starred_url": "https://api.github.com/users/sachinprasadhs/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/sachinprasadhs/subscriptions", "organizations_url": "https://api.github.com/users/sachinprasadhs/orgs", "repos_url": "https://api.github.com/users/sachinprasadhs/repos", "events_url": "https://api.github.com/users/sachinprasadhs/events{/privacy}", "received_events_url": "https://api.github.com/users/sachinprasadhs/received_events", "type": "User", "site_admin": false }
[ { "login": "sachinprasadhs", "id": 73069040, "node_id": "MDQ6VXNlcjczMDY5MDQw", "avatar_url": "https://avatars.githubusercontent.com/u/73069040?v=4", "gravatar_id": "", "url": "https://api.github.com/users/sachinprasadhs", "html_url": "https://github.com/sachinprasadhs", "followers_url": "https://api.github.com/users/sachinprasadhs/followers", "following_url": "https://api.github.com/users/sachinprasadhs/following{/other_user}", "gists_url": "https://api.github.com/users/sachinprasadhs/gists{/gist_id}", "starred_url": "https://api.github.com/users/sachinprasadhs/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/sachinprasadhs/subscriptions", "organizations_url": "https://api.github.com/users/sachinprasadhs/orgs", "repos_url": "https://api.github.com/users/sachinprasadhs/repos", "events_url": "https://api.github.com/users/sachinprasadhs/events{/privacy}", "received_events_url": "https://api.github.com/users/sachinprasadhs/received_events", "type": "User", "site_admin": false } ]
null
[ "@sachinprasadhs I was able to replicate this issue here. Please find the [gist](https://colab.research.google.com/gist/sushreebarsa/a53719eac0a0098382f7bdebc35e9c18/issue_61509.ipynb#scrollTo=pPWJfo6L7wrO). Thank you!" ]
2023-11-30T09:59:49
2023-12-05T23:16:05
null
NONE
null
null
null
### Issue type Bug ### Have you reproduced the bug with TensorFlow Nightly? Yes ### Source source ### TensorFlow version 2.15.0 ### Custom code Yes ### OS platform and distribution Ubuntu 22.04.3 LTS ### Mobile device _No response_ ### Python version 3.10 ### Bazel version _No response_ ### GCC/compiler version _No response_ ### CUDA/cuDNN version _No response_ ### GPU model and memory _No response_ ### Current behavior? In TensorFlow 2.15.0 and 2.14.0, I've encountered a critical issue where adding an extra output of a transposed tensor in a model leads to **significant discrepancies** in the output under XLA compilation. This inconsistency is not observed in eager execution mode. This error is reproducible using certain input tensors. , you can download the input tensor from [this link](https://github.com/GwiHwan-Go/repo/raw/main/issues/pickles/extra_transpose_output_err.pickle) Alternatively, you can replicate this issue using the [Colab notebook](https://colab.research.google.com/drive/1SZ-UNxfzreeK3llenVmDowSPEqHCJ2EY?usp=sharing) I have prepared. ### Standalone code to reproduce the issue ```python import tensorflow as tf import pickle import os import numpy as np class Model1(tf.keras.Model): def __init__(self): super().__init__() @tf.function(jit_compile=True) def __call__(self, inp1, inp2): conc = tf.concat([inp2, inp1], axis=4) reduced = tf.math.reduce_prod(conc, axis=4) taned = tf.tan(reduced) return taned, class Model2(tf.keras.Model): def __init__(self): super().__init__() @tf.function(jit_compile=True) def __call__(self, inp1, inp2): transposed_inp1 = tf.transpose(inp1, perm=[4, 1, 2, 3, 0]) transposed_inp2 = tf.transpose(inp2, perm=[4, 1, 2, 3, 0]) transposed_conc = tf.concat([transposed_inp2, transposed_inp1], axis=0) conc = tf.transpose(transposed_conc, perm=[4, 1, 2, 3, 0]) reduced = tf.math.reduce_prod(conc, axis=4) taned = tf.tan(reduced) return taned, conc, model1 = Model1() model2 = Model2() device = tf.device(tf.config.list_logical_devices('CPU')[0].name) pickle_file_path = 'extra_transpose_output_err.pickle' #YOUR_PICKLE_FILE_PATH if not os.path.exists(pickle_file_path) : print(f'Pickle file not exist') else : with open('extra_transpose_output_err.pickle', 'rb') as f : nparr = pickle.load(f) inputs = [tf.convert_to_tensor(arr) for arr in nparr] with device: tf.config.run_functions_eagerly(True) out1 = model1(*inputs) out2 = model2(*inputs) print(f'=========eager_output(version:{tf.__version__})================') try : for i in range(min(len(out1),len(out2))): np.testing.assert_allclose(out1[i].numpy(), out2[i].numpy(), rtol=0.001, atol=0.001, err_msg=f'at checking {i}th') print("XLA_eager does not trigger assertion") except AssertionError as e: print("XLA_eager triggers assertion") print(e) tf.config.run_functions_eagerly(False) xla_out1 = model1(*inputs) xla_out2 = model2(*inputs) print(f'=========compiled_output(version:{tf.__version__})================') try : for i in range(min(len(xla_out1),len(xla_out2))): np.testing.assert_allclose(xla_out1[i].numpy(), xla_out2[i].numpy(), rtol=0.001, atol=0.001, err_msg=f'at checking {i}th') print("XLA_eager does not trigger assertion") except AssertionError as e: print("XLA_eager triggers assertion") print(e) ``` ### Relevant log output ```shell =========eager_output(version:2.15.0)================ XLA_eager does not trigger assertion 2023-11-30 09:56:05.513431: I external/local_xla/xla/service/service.cc:168] XLA service 0x56088e88e500 initialized for platform Host (this does not guarantee that XLA will be used). Devices: 2023-11-30 09:56:05.513456: I external/local_xla/xla/service/service.cc:176] StreamExecutor device (0): Host, Default Version WARNING: All log messages before absl::InitializeLog() is called are written to STDERR I0000 00:00:1701338165.528241 2713592 device_compiler.h:186] Compiled cluster using XLA! This line is logged at most once for the lifetime of the process. 2023-11-30 09:56:05.528431: E external/local_xla/xla/stream_executor/stream_executor_internal.h:177] SetPriority unimplemented for this stream. =========compiled_output(version:2.15.0)================ XLA_eager triggers assertion Not equal to tolerance rtol=0.001, atol=0.001 at checking 0th Mismatched elements: 316 / 378 (83.6%) Max absolute difference: 9560.12305077 Max relative difference: 52884.86608184 x: array([[[[ 1.765686e-01]], [[ 1.316997e+00]],... y: array([[[[-2.580517e-01]], [[-5.574215e-01]],... ## COLAB OUTPUT ## =========eager_output(version:2.14.0)================ XLA_eager does not trigger assertion =========compiled_output(version:2.14.0)================ XLA_eager triggers assertion Not equal to tolerance rtol=0.001, atol=0.001 at checking 0th Mismatched elements: 316 / 378 (83.6%) Max absolute difference: 9560.12305077 Max relative difference: 52884.86608184 x: array([[[[ 1.765686e-01]], [[ 1.316997e+00]],... y: array([[[[-2.580517e-01]], [[-5.574215e-01]],... ```
{ "url": "https://api.github.com/repos/tensorflow/tensorflow/issues/62509/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/tensorflow/tensorflow/issues/62509/timeline
null
null
false
https://api.github.com/repos/tensorflow/tensorflow/issues/62508
https://api.github.com/repos/tensorflow/tensorflow
https://api.github.com/repos/tensorflow/tensorflow/issues/62508/labels{/name}
https://api.github.com/repos/tensorflow/tensorflow/issues/62508/comments
https://api.github.com/repos/tensorflow/tensorflow/issues/62508/events
https://github.com/tensorflow/tensorflow/issues/62508
2,018,278,400
I_kwDOArmXAs54THwA
62,508
TensorFlow int32 data type memory copy issue
{ "login": "cboss6", "id": 25453568, "node_id": "MDQ6VXNlcjI1NDUzNTY4", "avatar_url": "https://avatars.githubusercontent.com/u/25453568?v=4", "gravatar_id": "", "url": "https://api.github.com/users/cboss6", "html_url": "https://github.com/cboss6", "followers_url": "https://api.github.com/users/cboss6/followers", "following_url": "https://api.github.com/users/cboss6/following{/other_user}", "gists_url": "https://api.github.com/users/cboss6/gists{/gist_id}", "starred_url": "https://api.github.com/users/cboss6/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/cboss6/subscriptions", "organizations_url": "https://api.github.com/users/cboss6/orgs", "repos_url": "https://api.github.com/users/cboss6/repos", "events_url": "https://api.github.com/users/cboss6/events{/privacy}", "received_events_url": "https://api.github.com/users/cboss6/received_events", "type": "User", "site_admin": false }
[ { "id": 404586594, "node_id": "MDU6TGFiZWw0MDQ1ODY1OTQ=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/stat:awaiting%20tensorflower", "name": "stat:awaiting tensorflower", "color": "f4b400", "default": false, "description": "Status - Awaiting response from tensorflower" }, { "id": 473172988, "node_id": "MDU6TGFiZWw0NzMxNzI5ODg=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/type:bug", "name": "type:bug", "color": "159b2e", "default": false, "description": "Bug" }, { "id": 736653459, "node_id": "MDU6TGFiZWw3MzY2NTM0NTk=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/comp:eager", "name": "comp:eager", "color": "0052cc", "default": false, "description": "Eager related issues" }, { "id": 6218999181, "node_id": "LA_kwDOArmXAs8AAAABcq5ljQ", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/TF%202.15", "name": "TF 2.15", "color": "9162CB", "default": false, "description": "For issues related to 2.15.x" } ]
open
false
{ "login": "sachinprasadhs", "id": 73069040, "node_id": "MDQ6VXNlcjczMDY5MDQw", "avatar_url": "https://avatars.githubusercontent.com/u/73069040?v=4", "gravatar_id": "", "url": "https://api.github.com/users/sachinprasadhs", "html_url": "https://github.com/sachinprasadhs", "followers_url": "https://api.github.com/users/sachinprasadhs/followers", "following_url": "https://api.github.com/users/sachinprasadhs/following{/other_user}", "gists_url": "https://api.github.com/users/sachinprasadhs/gists{/gist_id}", "starred_url": "https://api.github.com/users/sachinprasadhs/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/sachinprasadhs/subscriptions", "organizations_url": "https://api.github.com/users/sachinprasadhs/orgs", "repos_url": "https://api.github.com/users/sachinprasadhs/repos", "events_url": "https://api.github.com/users/sachinprasadhs/events{/privacy}", "received_events_url": "https://api.github.com/users/sachinprasadhs/received_events", "type": "User", "site_admin": false }
[ { "login": "sachinprasadhs", "id": 73069040, "node_id": "MDQ6VXNlcjczMDY5MDQw", "avatar_url": "https://avatars.githubusercontent.com/u/73069040?v=4", "gravatar_id": "", "url": "https://api.github.com/users/sachinprasadhs", "html_url": "https://github.com/sachinprasadhs", "followers_url": "https://api.github.com/users/sachinprasadhs/followers", "following_url": "https://api.github.com/users/sachinprasadhs/following{/other_user}", "gists_url": "https://api.github.com/users/sachinprasadhs/gists{/gist_id}", "starred_url": "https://api.github.com/users/sachinprasadhs/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/sachinprasadhs/subscriptions", "organizations_url": "https://api.github.com/users/sachinprasadhs/orgs", "repos_url": "https://api.github.com/users/sachinprasadhs/repos", "events_url": "https://api.github.com/users/sachinprasadhs/events{/privacy}", "received_events_url": "https://api.github.com/users/sachinprasadhs/received_events", "type": "User", "site_admin": false } ]
null
[ "Hi, just to check if any updates for this issue", "Anyone checked this PR? Any update?" ]
2023-11-30T09:57:19
2024-03-19T09:02:12
null
NONE
null
null
null
### Issue type Bug ### Have you reproduced the bug with TensorFlow Nightly? Yes ### Source binary ### TensorFlow version TF2.15 ### Custom code Yes ### OS platform and distribution Linux Ubunto 22.04 ### Mobile device _No response_ ### Python version 3.10 ### Bazel version _No response_ ### GCC/compiler version _No response_ ### CUDA/cuDNN version 12.3 ### GPU model and memory _No response_ ### Current behavior? On eager mode, when doing CopyCPUTensorToGPU, if input is a int32 tensor(allocated on device) and hold by an _Arg op on device, this int32 device tensor will be treated as a host src, here is log to prove that behavior(see host_src=0x7f7351400500, which is a device mem pointer): 38974 2023-11-30 08:18:14.566732: I tensorflow/core/common_runtime/copy_tensor.cc:211] Copy edge_2_tensor 38975 2023-11-30 08:18:14.566740: I tensorflow/core/common_runtime/gpu/gpu_util.cc:315] CopyCPUTensorToGPU 38976 2023-11-30 08:18:14.566750: I tensorflow/compiler/xla/stream_executor/stream.cc:1030] [stream=0x7e88fe0,impl=0x83b6310] Called Stream::ThenWaitFor(other=0x7e89090) 38977 2023-11-30 08:18:14.566765: I tensorflow/compiler/xla/stream_executor/stream.cc:2002] [stream=0x7e88fe0,impl=0x83b6310] Called Stream::ThenMemcpy(gpu_dst=0x7f7351400600, host_src=**0x7f7351400500**, size=4) However, this case still can pass even with this abnormal behavior. But when integrating Next Pluggable Device with PJRT buffer, a device pointer being treated as host src would cause critical value issues. ### Standalone code to reproduce the issue ```shell https://github.com/horovod/horovod/blob/7e4d99386ee4196d767943e30945e45640049ebe/test/parallel/test_tensorflow.py#L3130 Note: this case can pass, only for reproducing the abnormal behavior. ``` ### Relevant log output ```shell 38974 2023-11-30 08:18:14.566732: I tensorflow/core/common_runtime/copy_tensor.cc:211] Copy edge_2_tensor 38975 2023-11-30 08:18:14.566740: I tensorflow/core/common_runtime/gpu/gpu_util.cc:315] CopyCPUTensorToGPU 38976 2023-11-30 08:18:14.566750: I tensorflow/compiler/xla/stream_executor/stream.cc:1030] [stream=0x7e88fe0,impl=0x83b6310] Called Stream::ThenWaitFor(other=0x7e89090) 38977 2023-11-30 08:18:14.566765: I tensorflow/compiler/xla/stream_executor/stream.cc:2002] [stream=0x7e88fe0,impl=0x83b6310] Called Stream::ThenMemcpy(gpu_dst=0x7f7351400600, host_src=**0x7f7351400500**, size=4) ```
{ "url": "https://api.github.com/repos/tensorflow/tensorflow/issues/62508/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/tensorflow/tensorflow/issues/62508/timeline
null
null
false
https://api.github.com/repos/tensorflow/tensorflow/issues/62507
https://api.github.com/repos/tensorflow/tensorflow
https://api.github.com/repos/tensorflow/tensorflow/issues/62507/labels{/name}
https://api.github.com/repos/tensorflow/tensorflow/issues/62507/comments
https://api.github.com/repos/tensorflow/tensorflow/issues/62507/events
https://github.com/tensorflow/tensorflow/pull/62507
2,018,004,866
PR_kwDOArmXAs5gvq_D
62,507
validate axis argument of tf.squeeze
{ "login": "SuryanarayanaY", "id": 116063290, "node_id": "U_kgDOBur8Og", "avatar_url": "https://avatars.githubusercontent.com/u/116063290?v=4", "gravatar_id": "", "url": "https://api.github.com/users/SuryanarayanaY", "html_url": "https://github.com/SuryanarayanaY", "followers_url": "https://api.github.com/users/SuryanarayanaY/followers", "following_url": "https://api.github.com/users/SuryanarayanaY/following{/other_user}", "gists_url": "https://api.github.com/users/SuryanarayanaY/gists{/gist_id}", "starred_url": "https://api.github.com/users/SuryanarayanaY/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/SuryanarayanaY/subscriptions", "organizations_url": "https://api.github.com/users/SuryanarayanaY/orgs", "repos_url": "https://api.github.com/users/SuryanarayanaY/repos", "events_url": "https://api.github.com/users/SuryanarayanaY/events{/privacy}", "received_events_url": "https://api.github.com/users/SuryanarayanaY/received_events", "type": "User", "site_admin": false }
[ { "id": 474725938, "node_id": "MDU6TGFiZWw0NzQ3MjU5Mzg=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/stale", "name": "stale", "color": "d4c5f9", "default": false, "description": "This label marks the issue/pr stale - to be closed automatically if no activity" }, { "id": 1097547147, "node_id": "MDU6TGFiZWwxMDk3NTQ3MTQ3", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/comp:ops", "name": "comp:ops", "color": "0052cc", "default": false, "description": "OPs related issues" }, { "id": 1169364259, "node_id": "MDU6TGFiZWwxMTY5MzY0MjU5", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/size:XS", "name": "size:XS", "color": "adafea", "default": false, "description": "CL Change Size: Extra Small" }, { "id": 1178505529, "node_id": "MDU6TGFiZWwxMTc4NTA1NTI5", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/prtype:bugfix", "name": "prtype:bugfix", "color": "159b2e", "default": false, "description": "PR to fix a bug" } ]
closed
false
{ "login": "gbaned", "id": 48215717, "node_id": "MDQ6VXNlcjQ4MjE1NzE3", "avatar_url": "https://avatars.githubusercontent.com/u/48215717?v=4", "gravatar_id": "", "url": "https://api.github.com/users/gbaned", "html_url": "https://github.com/gbaned", "followers_url": "https://api.github.com/users/gbaned/followers", "following_url": "https://api.github.com/users/gbaned/following{/other_user}", "gists_url": "https://api.github.com/users/gbaned/gists{/gist_id}", "starred_url": "https://api.github.com/users/gbaned/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/gbaned/subscriptions", "organizations_url": "https://api.github.com/users/gbaned/orgs", "repos_url": "https://api.github.com/users/gbaned/repos", "events_url": "https://api.github.com/users/gbaned/events{/privacy}", "received_events_url": "https://api.github.com/users/gbaned/received_events", "type": "User", "site_admin": false }
[ { "login": "gbaned", "id": 48215717, "node_id": "MDQ6VXNlcjQ4MjE1NzE3", "avatar_url": "https://avatars.githubusercontent.com/u/48215717?v=4", "gravatar_id": "", "url": "https://api.github.com/users/gbaned", "html_url": "https://github.com/gbaned", "followers_url": "https://api.github.com/users/gbaned/followers", "following_url": "https://api.github.com/users/gbaned/following{/other_user}", "gists_url": "https://api.github.com/users/gbaned/gists{/gist_id}", "starred_url": "https://api.github.com/users/gbaned/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/gbaned/subscriptions", "organizations_url": "https://api.github.com/users/gbaned/orgs", "repos_url": "https://api.github.com/users/gbaned/repos", "events_url": "https://api.github.com/users/gbaned/events{/privacy}", "received_events_url": "https://api.github.com/users/gbaned/received_events", "type": "User", "site_admin": false } ]
null
[ "Hi @SuryanarayanaY Can you please check @cantonios's [comments](https://github.com/tensorflow/tensorflow/pull/62507#discussion_r1529000077)? Thank you!", "This PR is stale because it has been open for 14 days with no activity. It will be closed if no further activity occurs. Thank you.", "This PR was closed because it has been inactive for 14 days since being marked as stale. Please reopen if you'd like to work on this further." ]
2023-11-30T06:59:55
2024-05-25T01:49:04
2024-05-25T01:48:54
COLLABORATOR
null
false
{ "url": "https://api.github.com/repos/tensorflow/tensorflow/pulls/62507", "html_url": "https://github.com/tensorflow/tensorflow/pull/62507", "diff_url": "https://github.com/tensorflow/tensorflow/pull/62507.diff", "patch_url": "https://github.com/tensorflow/tensorflow/pull/62507.patch", "merged_at": null }
In current implementation of `tf.squeeze`, if we pass a tensor for `axis` argument it will raise error like below, which is not inferable. `MemoryError: std::bad_alloc` This is due to the reason that the `Squeeze` Op expects `axis` as a `list` and there is no check for this in C++ level. Hence I am adding a validation check at Python level itself. Attached [gist](https://colab.sandbox.google.com/gist/SuryanarayanaY/06d4e539682780b4326a4f75f654a29e/62504.ipynb#scrollTo=NeGfeG5Jnhfn) also for reference. Might fix #62504.
{ "url": "https://api.github.com/repos/tensorflow/tensorflow/issues/62507/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/tensorflow/tensorflow/issues/62507/timeline
null
null
true
https://api.github.com/repos/tensorflow/tensorflow/issues/62506
https://api.github.com/repos/tensorflow/tensorflow
https://api.github.com/repos/tensorflow/tensorflow/issues/62506/labels{/name}
https://api.github.com/repos/tensorflow/tensorflow/issues/62506/comments
https://api.github.com/repos/tensorflow/tensorflow/issues/62506/events
https://github.com/tensorflow/tensorflow/issues/62506
2,017,758,427
I_kwDOArmXAs54RIzb
62,506
TFLITE: Benchmarking failure on GPT2 quantized autocomplete.tflite
{ "login": "suyash-narain", "id": 78979784, "node_id": "MDQ6VXNlcjc4OTc5Nzg0", "avatar_url": "https://avatars.githubusercontent.com/u/78979784?v=4", "gravatar_id": "", "url": "https://api.github.com/users/suyash-narain", "html_url": "https://github.com/suyash-narain", "followers_url": "https://api.github.com/users/suyash-narain/followers", "following_url": "https://api.github.com/users/suyash-narain/following{/other_user}", "gists_url": "https://api.github.com/users/suyash-narain/gists{/gist_id}", "starred_url": "https://api.github.com/users/suyash-narain/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/suyash-narain/subscriptions", "organizations_url": "https://api.github.com/users/suyash-narain/orgs", "repos_url": "https://api.github.com/users/suyash-narain/repos", "events_url": "https://api.github.com/users/suyash-narain/events{/privacy}", "received_events_url": "https://api.github.com/users/suyash-narain/received_events", "type": "User", "site_admin": false }
[ { "id": 404586594, "node_id": "MDU6TGFiZWw0MDQ1ODY1OTQ=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/stat:awaiting%20tensorflower", "name": "stat:awaiting tensorflower", "color": "f4b400", "default": false, "description": "Status - Awaiting response from tensorflower" }, { "id": 473172988, "node_id": "MDU6TGFiZWw0NzMxNzI5ODg=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/type:bug", "name": "type:bug", "color": "159b2e", "default": false, "description": "Bug" }, { "id": 750616506, "node_id": "MDU6TGFiZWw3NTA2MTY1MDY=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/comp:lite", "name": "comp:lite", "color": "0052cc", "default": false, "description": "TF Lite related issues" }, { "id": 6218999181, "node_id": "LA_kwDOArmXAs8AAAABcq5ljQ", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/TF%202.15", "name": "TF 2.15", "color": "9162CB", "default": false, "description": "For issues related to 2.15.x" } ]
open
false
{ "login": "yijie-yang", "id": 47220338, "node_id": "MDQ6VXNlcjQ3MjIwMzM4", "avatar_url": "https://avatars.githubusercontent.com/u/47220338?v=4", "gravatar_id": "", "url": "https://api.github.com/users/yijie-yang", "html_url": "https://github.com/yijie-yang", "followers_url": "https://api.github.com/users/yijie-yang/followers", "following_url": "https://api.github.com/users/yijie-yang/following{/other_user}", "gists_url": "https://api.github.com/users/yijie-yang/gists{/gist_id}", "starred_url": "https://api.github.com/users/yijie-yang/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/yijie-yang/subscriptions", "organizations_url": "https://api.github.com/users/yijie-yang/orgs", "repos_url": "https://api.github.com/users/yijie-yang/repos", "events_url": "https://api.github.com/users/yijie-yang/events{/privacy}", "received_events_url": "https://api.github.com/users/yijie-yang/received_events", "type": "User", "site_admin": false }
[ { "login": "yijie-yang", "id": 47220338, "node_id": "MDQ6VXNlcjQ3MjIwMzM4", "avatar_url": "https://avatars.githubusercontent.com/u/47220338?v=4", "gravatar_id": "", "url": "https://api.github.com/users/yijie-yang", "html_url": "https://github.com/yijie-yang", "followers_url": "https://api.github.com/users/yijie-yang/followers", "following_url": "https://api.github.com/users/yijie-yang/following{/other_user}", "gists_url": "https://api.github.com/users/yijie-yang/gists{/gist_id}", "starred_url": "https://api.github.com/users/yijie-yang/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/yijie-yang/subscriptions", "organizations_url": "https://api.github.com/users/yijie-yang/orgs", "repos_url": "https://api.github.com/users/yijie-yang/repos", "events_url": "https://api.github.com/users/yijie-yang/events{/privacy}", "received_events_url": "https://api.github.com/users/yijie-yang/received_events", "type": "User", "site_admin": false }, { "login": "pkgoogle", "id": 132095473, "node_id": "U_kgDOB9-d8Q", "avatar_url": "https://avatars.githubusercontent.com/u/132095473?v=4", "gravatar_id": "", "url": "https://api.github.com/users/pkgoogle", "html_url": "https://github.com/pkgoogle", "followers_url": "https://api.github.com/users/pkgoogle/followers", "following_url": "https://api.github.com/users/pkgoogle/following{/other_user}", "gists_url": "https://api.github.com/users/pkgoogle/gists{/gist_id}", "starred_url": "https://api.github.com/users/pkgoogle/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/pkgoogle/subscriptions", "organizations_url": "https://api.github.com/users/pkgoogle/orgs", "repos_url": "https://api.github.com/users/pkgoogle/repos", "events_url": "https://api.github.com/users/pkgoogle/events{/privacy}", "received_events_url": "https://api.github.com/users/pkgoogle/received_events", "type": "User", "site_admin": false } ]
null
[ "@suyash-narain Could you please make sure you have included the necessary libraries for the \"RegexSplitWithOffsets\" op, such as the TFLite Text library and try to rebuild the runtime with appropriate flag enabled?\r\nAlso try to disable XNNPACK, If you don't need the XNNPACK delegate for performance reasons. Thank you!", "Hi @sushreebarsa, RegexSplitWithOffsets op error comes into picture when I use benchmark model with flex delegate. Is using flex delegate a necessity with autocomplete.tflite model? Why is general tflite nightly benchmark model not able to execute this tflite model?\r\nDo i need to enable tflite task library? i thought thats enabled by default when we build runtime?\r\n\r\ndoes the prebuilt benchmark model from tflite not have support for tensorflow text libraries?\r\n\r\nwhich flag do i need to enable while building tflite runtime for tflite text library as i cannot find any references on either tensorflow github or tflite documentation. ", "@suyash-narain Using the Flex delegate is not strictly necessary with the autocomplete.tflite model, but it is highly recommended for two primary reasons such as for unsupported operations and high performance. Due to some missing dependencies or incompatible versions. General TFLite nightly benchmark model not able to execute the tflite model due to missing dependencies. \r\n\r\nYou don't need to explicitly enable the TensorFlow Lite Task Library in most cases. It is automatically included in the TFLite runtime and utilizes the same inference infrastructure as the standard TFLite API.\r\n\r\n\r\nUnfortunately, the prebuilt benchmark model from TFLite currently does not have native support for TensorFlow Text libraries. \r\n\r\nThank you!", "Hi @sushreebarsa, thanks for your reply.\r\nI still don't understand. Building tflite runtime by default should have the Task Library enabled in most cases. But tflite benchmark model provided as default (even the one with flex delegate) cannot benchmark autocomplete.tflite gpt2 model. \r\nHow do I benchmark it then?\r\nif i build my benchmark model, which flags do i need to enable, if you can let me know? or would building default tflite benchmark model will work for tf2.15?", "Hi @LakshmiKalaKadali any updates?", "Hi @pkgoogle,\r\n\r\nPlease look into the issue.\r\n\r\nThank You\r\n", "I was able to replicate on linux x86_64:\r\n\r\n```\r\n./benchmark_model_plus_flex --graph=autocomplete.tflite --num_threads=1 --num_runs=10\r\n\r\nINFO: STARTING!\r\nINFO: Log parameter values verbosely: [0]\r\nINFO: Min num runs: [10]\r\nINFO: Num threads: [1]\r\nINFO: Graph: [autocomplete.tflite]\r\nINFO: #threads used for CPU inference: [1]\r\nINFO: Loaded model autocomplete.tflite\r\nINFO: The input model file size (MB): 129.674\r\nINFO: Initialized session in 36.666ms.\r\nINFO: Running benchmark for at least 1 iterations and at least 0.5 seconds but terminate if exceeding 150 seconds.\r\nINFO: count=23009 first=38091 curr=20 min=18 max=38091 avg=21.434 std=251\r\n\r\nINFO: Created TensorFlow Lite delegate for select TF ops.\r\n2023-12-14 00:29:53.965212: I tensorflow/core/platform/cpu_feature_guard.cc:182] This TensorFlow binary is optimized to use available CPU instructio\r\nns in performance-critical operations.\r\nTo enable the following instructions: SSE3 SSE4.1 SSE4.2 AVX AVX2 FMA, in other operations, rebuild TensorFlow with the appropriate compiler flags.\r\nINFO: TfLiteFlexDelegate delegate: 29 nodes delegated out of 1139 nodes with 14 partitions.\r\n\r\nERROR: Op type not registered 'RegexSplitWithOffsets' in binary running on xxxxxx.xxxxxx.xxxxxx. Make sure the Op and Kernel are registered \r\nin the binary running in this process. Note that if you are loading a saved graph which used ops from tf.contrib (e.g. `tf.contrib.resampler`), acce\r\nssing should be done before importing the graph, as contrib ops are lazily registered when the module is first accessed.\r\nERROR: Op type not registered 'RegexSplitWithOffsets' in binary running on xxxxxxxx.xxxxxx.xxxxxxx. Make sure the Op and Kernel are registered \r\nin the binary running in this process. Note that if you are loading a saved graph which used ops from tf.contrib (e.g. `tf.contrib.resampler`), acce\r\nssing should be done before importing the graph, as contrib ops are lazily registered when the module is first accessed.\r\nINFO: Created TensorFlow Lite XNNPACK delegate for CPU.\r\nWARNING: Attempting to use a delegate that only supports static-sized tensors with a graph that has dynamic-sized tensors (tensor#243 is a dynamic-s\r\nized tensor).\r\nERROR: Delegate kernel was not initialized\r\nERROR: Node number 1139 (TfLiteFlexDelegate) failed to prepare.\r\nERROR: Delegate kernel was not initialized\r\nERROR: Node number 1139 (TfLiteFlexDelegate) failed to prepare.\r\nERROR: Delegate kernel was not initialized\r\nERROR: Node number 1139 (TfLiteFlexDelegate) failed to prepare.\r\nERROR: Delegate kernel was not initialized\r\nERROR: Node number 1139 (TfLiteFlexDelegate) failed to prepare.\r\nERROR: Delegate kernel was not initialized\r\nERROR: Node number 1139 (TfLiteFlexDelegate) failed to prepare.\r\nERROR: Delegate kernel was not initialized\r\nERROR: Node number 1139 (TfLiteFlexDelegate) failed to prepare.\r\nERROR: Delegate kernel was not initialized\r\nERROR: Node number 1139 (TfLiteFlexDelegate) failed to prepare.\r\nERROR: Delegate kernel was not initialized\r\nERROR: Node number 1139 (TfLiteFlexDelegate) failed to prepare.\r\nERROR: Delegate kernel was not initialized\r\nERROR: Node number 1139 (TfLiteFlexDelegate) failed to prepare.\r\nERROR: Delegate kernel was not initialized\r\nERROR: Node number 1139 (TfLiteFlexDelegate) failed to prepare.\r\nERROR: Delegate kernel was not initialized\r\nERROR: Node number 1139 (TfLiteFlexDelegate) failed to prepare.\r\nERROR: Delegate kernel was not initialized\r\nERROR: Node number 1139 (TfLiteFlexDelegate) failed to prepare.\r\nERROR: Delegate kernel was not initialized\r\nERROR: Node number 1139 (TfLiteFlexDelegate) failed to prepare.\r\n...\r\n<repeated a lot>\r\n```\r\n\r\n@yijie-yang can you please take a look? Thanks.", "Yes, to benchmark the gpt2 model you need some extra dependencies. There are 2 steps needed:\r\n\r\n1. add this library to your workspace: https://github.com/tensorflow/text/tree/master\r\n\r\n2. under `tensorflow/tensorflow/lite/tools/benchmark/BUILD`, add the dependency of `\"//tensorflow_text:ops_lib\",` to your `benchmark_model_plus_flex` binary.\r\n\r\nThen you should be good to go!", "@suyash-narain, can you try the above and let us know if your issue is resolved? Thanks.", "This issue is stale because it has been open for 7 days with no activity. It will be closed if no further activity occurs. Thank you.", "let me try this and get back to you. thanks", "Hi @pkgoogle @yijie-yang \r\n\r\nI was trying to build the benchmark_model_plus_flex binary using the mentioned changes and ran into an error.\r\n\r\n> ERROR: /home/tensorflow/tensorflow_text/core/kernels/BUILD:35:14: no such package '@local_config_tf//': The repository '@local_config_tf' could not be resolved: Repository '@local_config_tf' is not defined and referenced by '//tensorflow_text/core/kernels:boise_offset_converter_kernel'\r\n\r\nI downloaded tensorflow-text from the mentioned link and put it inside my workspace. \r\nI made changes to the BUILD as well by adding '//tensorflow_text:ops_lib' dependency.\r\n\r\nI am not sure what the '@local_config_tf' error pertains to here.\r\n\r\nthe log is below:\r\n\r\n> user@user:~/tensorflow$ bazel build -c opt --config=elinux_aarch64 //tensorflow/lite/tools/benchmark:benchmark_model_plus_flex\r\n> INFO: Reading 'startup' options from /home/tensorflow/.bazelrc: --windows_enable_symlinks\r\n> INFO: Options provided by the client:\r\n> Inherited 'common' options: --isatty=1 --terminal_columns=203\r\n> INFO: Reading rc options for 'build' from /home/tensorflow/.bazelrc:\r\n> Inherited 'common' options: --experimental_repo_remote_exec\r\n> INFO: Reading rc options for 'build' from /home/tensorflow/.bazelrc:\r\n> 'build' options: --define framework_shared_object=true --define tsl_protobuf_header_only=true --define=use_fast_cpp_protos=true --define=allow_oversize_protos=true --spawn_strategy=standalone -c opt --announce_rc --define=grpc_no_ares=true --noincompatible_remove_legacy_whole_archive --features=-force_no_whole_archive --enable_platform_specific_config --define=with_xla_support=true --config=short_logs --config=v2 --define=no_aws_support=true --define=no_hdfs_support=true --experimental_cc_shared_library --experimental_link_static_libraries_once=false --incompatible_enforce_config_setting_visibility\r\n> INFO: Found applicable config definition build:short_logs in file /home/tensorflow/.bazelrc: --output_filter=DONT_MATCH_ANYTHING\r\n> INFO: Found applicable config definition build:v2 in file /home/tensorflow/.bazelrc: --define=tf_api_version=2 --action_env=TF2_BEHAVIOR=1\r\n> INFO: Found applicable config definition build:elinux_aarch64 in file /home/tensorflow/.bazelrc: --config=elinux --cpu=aarch64\r\n> INFO: Found applicable config definition build:elinux in file /home/tensorflow/.bazelrc: --crosstool_top=@local_config_embedded_arm//:toolchain --host_crosstool_top=@bazel_tools//tools/cpp:toolchain\r\n> INFO: Found applicable config definition build:linux in file /home/tensorflow/.bazelrc: --host_copt=-w --copt=-Wno-all --copt=-Wno-extra --copt=-Wno-deprecated --copt=-Wno-deprecated-declarations --copt=-Wno-ignored-attributes --copt=-Wno-array-bounds --copt=-Wunused-result --copt=-Werror=unused-result --copt=-Wswitch --copt=-Werror=switch --copt=-Wno-error=unused-but-set-variable --define=PREFIX=/usr --define=LIBDIR=$(PREFIX)/lib --define=INCLUDEDIR=$(PREFIX)/include --define=PROTOBUF_INCLUDE_PATH=$(PREFIX)/include --cxxopt=-std=c++17 --host_cxxopt=-std=c++17 --config=dynamic_kernels --experimental_guard_against_concurrent_changes\r\n> INFO: Found applicable config definition build:dynamic_kernels in file /home/tensorflow/.bazelrc: --define=dynamic_loaded_kernels=true --copt=-DAUTOLOAD_DYNAMIC_KERNELS\r\n> INFO: Repository boringssl instantiated at:\r\n> /home/tensorflow/WORKSPACE:84:14: in <toplevel>\r\n> /home/tensorflow/tensorflow/workspace2.bzl:928:21: in workspace\r\n> /home/tensorflow/tensorflow/workspace2.bzl:469:20: in _tf_repositories\r\n> /home/tensorflow/third_party/repo.bzl:136:21: in tf_http_archive\r\n> Repository rule _tf_http_archive defined at:\r\n> /home/tensorflow/third_party/repo.bzl:89:35: in <toplevel>\r\n> INFO: Repository curl instantiated at:\r\n> /home/tensorflow/WORKSPACE:84:14: in <toplevel>\r\n> /home/tensorflow/tensorflow/workspace2.bzl:928:21: in workspace\r\n> /home/tensorflow/tensorflow/workspace2.bzl:410:20: in _tf_repositories\r\n> /home/tensorflow/third_party/repo.bzl:136:21: in tf_http_archive\r\n> Repository rule _tf_http_archive defined at:\r\n> /home/tensorflow/third_party/repo.bzl:89:35: in <toplevel>\r\n> INFO: Repository icu instantiated at:\r\n> /home/tensorflow/WORKSPACE:84:14: in <toplevel>\r\n> /home/tensorflow/tensorflow/workspace2.bzl:921:28: in workspace\r\n> /home/tensorflow/tensorflow/workspace2.bzl:75:8: in _initialize_third_party\r\n> /home/tensorflow/third_party/icu/workspace.bzl:8:20: in repo\r\n> /home/tensorflow/third_party/repo.bzl:136:21: in tf_http_archive\r\n> Repository rule _tf_http_archive defined at:\r\n> /home/tensorflow/third_party/repo.bzl:89:35: in <toplevel>\r\n> INFO: Repository armhf_linux_toolchain instantiated at:\r\n> /home/tensorflow/WORKSPACE:84:14: in <toplevel>\r\n> /home/tensorflow/tensorflow/workspace2.bzl:928:21: in workspace\r\n> /home/tensorflow/tensorflow/workspace2.bzl:258:20: in _tf_repositories\r\n> /home/tensorflow/third_party/repo.bzl:136:21: in tf_http_archive\r\n> Repository rule _tf_http_archive defined at:\r\n> /home/tensorflow/third_party/repo.bzl:89:35: in <toplevel>\r\n> INFO: Repository aarch64_linux_toolchain instantiated at:\r\n> /home/tensorflow/WORKSPACE:84:14: in <toplevel>\r\n> /home/tensorflow/tensorflow/workspace2.bzl:928:21: in workspace\r\n> /home/tensorflow/tensorflow/workspace2.bzl:250:20: in _tf_repositories\r\n> /home/tensorflow/third_party/repo.bzl:136:21: in tf_http_archive\r\n> Repository rule _tf_http_archive defined at:\r\n> /home/tensorflow/third_party/repo.bzl:89:35: in <toplevel>\r\n> INFO: Repository XNNPACK instantiated at:\r\n> /home/tensorflow/WORKSPACE:84:14: in <toplevel>\r\n> /home/tensorflow/tensorflow/workspace2.bzl:928:21: in workspace\r\n> /home/tensorflow/tensorflow/workspace2.bzl:151:20: in _tf_repositories\r\n> /home/tensorflow/third_party/repo.bzl:136:21: in tf_http_archive\r\n> Repository rule _tf_http_archive defined at:\r\n> /home/tensorflow/third_party/repo.bzl:89:35: in <toplevel>\r\n> INFO: Repository double_conversion instantiated at:\r\n> /home/tensorflow/WORKSPACE:84:14: in <toplevel>\r\n> /home/tensorflow/tensorflow/workspace2.bzl:928:21: in workspace\r\n> /home/tensorflow/tensorflow/workspace2.bzl:629:20: in _tf_repositories\r\n> /home/tensorflow/third_party/repo.bzl:136:21: in tf_http_archive\r\n> Repository rule _tf_http_archive defined at:\r\n> /home/tensorflow/third_party/repo.bzl:89:35: in <toplevel>\r\n> ERROR: /home/tensorflow/tensorflow_text/core/kernels/BUILD:35:14: no such package '@local_config_tf//': The repository '@local_config_tf' could not be resolved: Repository '@local_config_tf' is not defined and referenced by '//tensorflow_text/core/kernels:boise_offset_converter_kernel'\r\n> ERROR: Analysis of target '//tensorflow/lite/tools/benchmark:benchmark_model_plus_flex' failed; build aborted: \r\n> INFO: Elapsed time: 1.277s\r\n> INFO: 0 processes.\r\n> FAILED: Build did NOT complete successfully (53 packages loaded, 1743 targets configured)\r\n> currently loading: @gif// ... (2 packages)\r\n> Fetching https://storage.googleapis.com/mirror.tensorflow.org/curl.se/download/curl-8.4.0.tar.gz; 3.0 MiB (3,137,536B)\r\n> Fetching https://storage.googleapis.com/mirror.tensorflow.org/github.com/google/XNNPACK/archive/bbbaa7352a3ea729987d3e654d37be93e8009691.zip; 977.6 KiB (1,001,081B)\r\n> Fetching https://storage.googleapis.com/.../developer.arm.com/-/media/Files/downloads/gnu/11.3.rel1/binrel/arm-gnu-toolchain-11.3.rel1-x86_64-aarch64-none-linux-gnu.tar.xz; 365.5 KiB (374,310B)\r\n> Fetching https://storage.googleapis.com/.../developer.arm.com/-/media/Files/downloads/gnu/11.3.rel1/binrel/arm-gnu-toolchain-11.3.rel1-x86_64-arm-none-linux-gnueabihf.tar.xz; 759.0 KiB (777,173B)\r\n> Fetching https://storage.googleapis.com/mirror.tensorflow.org/github.com/google/boringssl/archive/c00d7ca810e93780bd0c8ee4eea28f4f2ea4bcdc.tar.gz; 261.5 KiB (267,782B)\r\n> Fetching https://storage.googleapis.com/mirror.tensorflow.org/github.com/unicode-org/icu/archive/release-69-1.zip\r\n> Fetching https://storage.googleapis.com/mirror.tensorflow.org/github.com/google/double-conversion/archive/v3.2.0.tar.gz\r\n> ", "Hmmm...could you try adding deps `@org_tensorflow_text//tensorflow_text:ops_lib` instead? Some great discussion about the similar issue is found: https://github.com/tensorflow/tensorflow/issues/50924", "Hi @yijie-yang \r\n\r\nThis time i get the error:\r\n\r\n> ERROR: /home/mtk/tensorflow/tensorflow/lite/tools/benchmark/BUILD:74:13: no such package '@org_tensorflow_text//tensorflow_text': The repository '@org_tensorflow_text' could not be resolved: Repository '@org_tensorflow_text' is not defined and referenced by '//tensorflow/lite/tools/benchmark:benchmark_model_plus_flex'\r\n> ERROR: Analysis of target '//tensorflow/lite/tools/benchmark:benchmark_model_plus_flex' failed; build aborted: Analysis failed\r\n> \r\n\r\n\r\nthe BUILD snippet is as below:\r\n\r\n```\r\n tf_cc_binary(\r\n name = \"benchmark_model_plus_flex\",\r\n srcs = [\r\n \"benchmark_plus_flex_main.cc\",\r\n ],\r\n copts = common_copts,\r\n linkopts = tflite_linkopts() + select({\r\n \"//tensorflow:android\": [\r\n \"-pie\", # Android 5.0 and later supports only PIE\r\n \"-lm\", # some builtin ops, e.g., tanh, need -lm\r\n ],\r\n \"//conditions:default\": [],\r\n }),\r\n deps = [\r\n \":benchmark_tflite_model_lib\",\r\n \"//tensorflow/lite/delegates/flex:delegate\",\r\n \"//tensorflow/lite/testing:init_tensorflow\",\r\n \"//tensorflow/lite/tools:logging\",\r\n \"@org_tensorflow_text//tensorflow_text:ops_lib\",\r\n ],\r\n )\r\n```\r\n", "Sorry my mistakes.\r\n\r\n> ERROR: /home/tensorflow/tensorflow_text/core/kernels/BUILD:35:14: no such package '@local_config_tf//': The repository '@local_config_tf' could not be resolved: Repository '@local_config_tf' is not defined and referenced by '//tensorflow_text/core/kernels:boise_offset_converter_kernel'\r\n\r\nIt's likely an error with your repository rules or directory structure. Could you reference to this discussion and make the changes to WORKSPACE and BUILD? https://stackoverflow.com/questions/53254061/failing-to-bazel-build-c-project-with-tensorflow-as-a-dependency", "Hi @yijie-yang \r\nI think I was getting the error because i didn't add tensorflow-text rules to my WORKSPACE. How can I add it to this file?\r\ni copied the tensorflow-text folder into tensorflow directory but not sure how to reference it in workspace file.\r\nHow do i integrate tensorflow-text WORKSPACE file with tensorflow WORKSPACE?\r\nany suggestions? ", "Hi @yijie-yang,\r\nIn my WORKSPACE file, I add the following:\r\n```\r\nlocal_repository(\r\n name = \"org_tensorflow_text\",\r\n path = \"text/tensorflow_text\",\r\n)\r\n```\r\nI get the below error log. What other changes am I missing here in WORKSPACE file?\r\n\r\n> user@user:~/tensorflow$ bazel build -c opt --config=elinux_aarch64 //tensorflow/lite/tools/benchmark:benchmark_model_plus_flex\r\n> INFO: Reading 'startup' options from /home/tensorflow/.bazelrc: --windows_enable_symlinks\r\n> INFO: Options provided by the client:\r\n> Inherited 'common' options: --isatty=1 --terminal_columns=203\r\n> INFO: Reading rc options for 'build' from /home/tensorflow/.bazelrc:\r\n> Inherited 'common' options: --experimental_repo_remote_exec\r\n> INFO: Reading rc options for 'build' from /home/tensorflow/.bazelrc:\r\n> 'build' options: --define framework_shared_object=true --define tsl_protobuf_header_only=true --define=use_fast_cpp_protos=true --define=allow_oversize_protos=true --spawn_strategy=standalone -c opt --announce_rc --define=grpc_no_ares=true --noincompatible_remove_legacy_whole_archive --features=-force_no_whole_archive --enable_platform_specific_config --define=with_xla_support=true --config=short_logs --config=v2 --define=no_aws_support=true --define=no_hdfs_support=true --experimental_cc_shared_library --experimental_link_static_libraries_once=false --incompatible_enforce_config_setting_visibility\r\n> INFO: Found applicable config definition build:short_logs in file /home/tensorflow/.bazelrc: --output_filter=DONT_MATCH_ANYTHING\r\n> INFO: Found applicable config definition build:v2 in file /home/tensorflow/.bazelrc: --define=tf_api_version=2 --action_env=TF2_BEHAVIOR=1\r\n> INFO: Found applicable config definition build:elinux_aarch64 in file /home/tensorflow/.bazelrc: --config=elinux --cpu=aarch64\r\n> INFO: Found applicable config definition build:elinux in file /home/tensorflow/.bazelrc: --crosstool_top=@local_config_embedded_arm//:toolchain --host_crosstool_top=@bazel_tools//tools/cpp:toolchain\r\n> INFO: Found applicable config definition build:linux in file /home/tensorflow/.bazelrc: --host_copt=-w --copt=-Wno-all --copt=-Wno-extra --copt=-Wno-deprecated --copt=-Wno-deprecated-declarations --copt=-Wno-ignored-attributes --copt=-Wno-array-bounds --copt=-Wunused-result --copt=-Werror=unused-result --copt=-Wswitch --copt=-Werror=switch --copt=-Wno-error=unused-but-set-variable --define=PREFIX=/usr --define=LIBDIR=$(PREFIX)/lib --define=INCLUDEDIR=$(PREFIX)/include --define=PROTOBUF_INCLUDE_PATH=$(PREFIX)/include --cxxopt=-std=c++17 --host_cxxopt=-std=c++17 --config=dynamic_kernels --experimental_guard_against_concurrent_changes\r\n> INFO: Found applicable config definition build:dynamic_kernels in file /home/tensorflow/.bazelrc: --define=dynamic_loaded_kernels=true --copt=-DAUTOLOAD_DYNAMIC_KERNELS\r\n> ERROR: /home/tensorflow/WORKSPACE:72:17: fetching local_repository rule //external:org_tensorflow_text: java.io.IOException: No WORKSPACE file found in /home/.cache/bazel/_bazel/716ac13c348ce3335128b3d9f4131682/external/org_tensorflow_text\r\n> ERROR: /home/tensorflow/tensorflow/lite/tools/benchmark/BUILD:74:13: //tensorflow/lite/tools/benchmark:benchmark_model_plus_flex depends on @org_tensorflow_text//tensorflow_text:ops_lib in repository @org_tensorflow_text which failed to fetch. no such package '@org_tensorflow_text//tensorflow_text': No WORKSPACE file found in /home/.cache/bazel/_bazel/716ac13c348ce3335128b3d9f4131682/external/org_tensorflow_text\r\n> ERROR: Analysis of target '//tensorflow/lite/tools/benchmark:benchmark_model_plus_flex' failed; build aborted: Analysis failed\r\n> INFO: Elapsed time: 0.595s\r\n> INFO: 0 processes.\r\n> FAILED: Build did NOT complete successfully (3 packages loaded, 6 targets configured)\r\n> ", "Did you build and intall both tensorflow and tensorflow_text properly?\r\n- Setup TF: https://www.tensorflow.org/install/source\r\n- Install tensorflow_text: https://github.com/tensorflow/text/tree/master?tab=readme-ov-file#installation", "@yijie-yang,\r\n\r\nI have both TF and Tensorflow_text installed via pip. The build from source also simply builds the pip file which is then installed. Is there anything else I need to add to workspace file?", "Let me reproduce your error in my local later today.", "thanks", "Hi @suyash-narain,\r\n\r\nSorry I'm too busy with the current work and don't have enough time to repro your error. Could you refer to this doc (https://bazel.build/reference/be/workspace) for workspace setup?", "Hi @yijie-yang\r\nI did refer to it before but the local_repository rules were still giving me the same error. Do you have an example workspace i can refer to?\r\nNot entirely sure how to add tf-text to tf workspace.\r\ncreating the local_repository gives me same error as beforre\r\nthanks", "Hi @broken,\r\n\r\nCould you provide some insights to this?", "The cause of the issue is that TF Text tries to link with the tensorflow shared library, but since you are building this inside TF, this isn't needed. I think you should use the tensorflow serving (model server) library as an example. After viewing its [workspace.bzl](https://github.com/tensorflow/serving/blob/master/tensorflow_serving/workspace.bzl#L65) file, instead of local_repository, try:\r\n\r\n```\r\n http_archive(\r\n name = \"org_tensorflow_text\",\r\n sha256 = \"4e6ec543a1d70a50f0105e0ea69ea8a1edd0b17a38d0244aa3b14f889b2cf74d\",\r\n strip_prefix = \"text-2.12.1\",\r\n url = \"https://github.com/tensorflow/text/archive/v2.12.1.zip\",\r\n patches = [\"@//third_party/tf_text:tftext.patch\"],\r\n patch_args = [\"-p1\"],\r\n repo_mapping = {\"@com_google_re2\": \"@com_googlesource_code_re2\"},\r\n )\r\n```\r\n\r\nyou can then link in tf text ops with: ` \"@org_tensorflow_text//tensorflow_text:ops_lib\",`\r\n\r\n[serving](https://github.com/tensorflow/serving/tree/master/tensorflow_serving) also has patches, ~but you should be able to ignore this as it is only bc it builds with an older version of c++.~ You should also copy [this directory](https://github.com/tensorflow/serving/blob/master/third_party/tf_text/) into the TF third_party one. It rewrites @local_config_tf to @org_tensorflow. Though, does @org_tensorflow exist for your target? Maybe update the patch so those point correctly by simply removing @org_tensorflow?\r\n\r\nedit:\r\n\r\n- updated notes on the patch\r\n- you may need to update the version in http_archive.\r\n- the \"redo_mapping\" may not be needed. It depends what core TF is using for its re2 lib name.\r\n", "Hi @broken ,\r\n\r\nI am not sure what I am doing wrong here. \r\nMy WORKSPACE file is as below:\r\n\r\n> workspace(name = \"org_tensorflow\")\r\n> \r\n>\r\n> load(\"@bazel_tools//tools/build_defs/repo:http.bzl\", \"http_archive\")\r\n> \r\n> http_archive(\r\n> name = \"bazel_skylib\",\r\n> sha256 = \"74d544d96f4a5bb630d465ca8bbcfe231e3594e5aae57e1edbf17a6eb3ca2506\",\r\n> urls = [\r\n> \"https://storage.googleapis.com/mirror.tensorflow.org/github.com/bazelbuild/bazel-skylib/releases/download/1.3.0/bazel-skylib-1.3.0.tar.gz\",\r\n> \"https://github.com/bazelbuild/bazel-skylib/releases/download/1.3.0/bazel-skylib-1.3.0.tar.gz\",\r\n> ],\r\n> )\r\n> \r\n> http_archive(\r\n> name = \"rules_python\",\r\n> sha256 = \"9d04041ac92a0985e344235f5d946f71ac543f1b1565f2cdbc9a2aaee8adf55b\",\r\n> strip_prefix = \"rules_python-0.26.0\",\r\n> url = \"https://github.com/bazelbuild/rules_python/releases/download/0.26.0/rules_python-0.26.0.tar.gz\",\r\n> )\r\n> \r\n> http_archive(\r\n> name = \"org_tensorflow_text\",\r\n> sha256 = \"4e6ec543a1d70a50f0105e0ea69ea8a1edd0b17a38d0244aa3b14f889b2cf74d\",\r\n> strip_prefix = \"text-2.12.1\",\r\n> url = \"https://github.com/tensorflow/text/archive/v2.12.1.zip\",\r\n> repo_mapping = {\"@com_google_re2\": \"@com_googlesource_code_re2\"},\r\n> )\r\n> \r\n> load(\"@rules_python//python:repositories.bzl\", \"py_repositories\")\r\n> \r\n> py_repositories()\r\n> \r\n> load(\"@rules_python//python:repositories.bzl\", \"python_register_toolchains\")\r\n> load(\r\n> \"//tensorflow/tools/toolchains/python:python_repo.bzl\",\r\n> \"python_repository\",\r\n> )\r\n> \r\n> python_repository(name = \"python_version_repo\")\r\n> \r\n> load(\"@python_version_repo//:py_version.bzl\", \"HERMETIC_PYTHON_VERSION\")\r\n> \r\n> python_register_toolchains(\r\n> name = \"python\",\r\n> ignore_root_user_error = True,\r\n> python_version = HERMETIC_PYTHON_VERSION,\r\n> )\r\n> \r\n> load(\"@python//:defs.bzl\", \"interpreter\")\r\n> load(\"@rules_python//python:pip.bzl\", \"package_annotation\", \"pip_parse\")\r\n> \r\n> NUMPY_ANNOTATIONS = {\r\n> \"numpy\": package_annotation(\r\n> additive_build_content = \"\"\"\\\r\n> filegroup(\r\n> name = \"includes\",\r\n> srcs = glob([\"site-packages/numpy/core/include/**/*.h\"]),\r\n> )\r\n> cc_library(\r\n> name = \"numpy_headers\",\r\n> hdrs = [\":includes\"],\r\n> strip_include_prefix=\"site-packages/numpy/core/include/\",\r\n> )\r\n> \"\"\",\r\n> ),\r\n> }\r\n> \r\n> pip_parse(\r\n> name = \"pypi\",\r\n> annotations = NUMPY_ANNOTATIONS,\r\n> python_interpreter_target = interpreter,\r\n> requirements = \"//:requirements_lock_\" + HERMETIC_PYTHON_VERSION.replace(\".\", \"_\") + \".txt\",\r\n> )\r\n> \r\n> load(\"@pypi//:requirements.bzl\", \"install_deps\")\r\n> \r\n> install_deps()\r\n> \r\n> \r\n> #load(\"@//text/tensorflow_text:tftext.bzl\",\r\n> #\"tf_text_workspace\")\r\n> #tf_text_workspace()\r\n> \r\n> load(\"@//tensorflow:workspace3.bzl\", \"tf_workspace3\")\r\n> \r\n> tf_workspace3()\r\n> \r\n> load(\"@//tensorflow:workspace2.bzl\", \"tf_workspace2\")\r\n> \r\n> tf_workspace2()\r\n> \r\n> load(\"@//tensorflow:workspace1.bzl\", \"tf_workspace1\")\r\n> \r\n> tf_workspace1()\r\n> \r\n> load(\"@//tensorflow:workspace0.bzl\", \"tf_workspace0\")\r\n> \r\n> tf_workspace0()\r\n\r\non trying to build the benchmark_model_plus_flex binary, i get the error:\r\n```\r\n\r\nERROR: /home/.cache/bazel/_bazel/716ac13c348ce3335128b3d9f4131682/external/org_tensorflow_text/tensorflow_text/core/kernels/sentencepiece/BUILD:178:14: no such package '@local_config_tf//': The repository '@local_config_tf' could not be resolved: Repository '@local_config_tf' is not defined and referenced by '@org_tensorflow_text//tensorflow_text/core/kernels/sentencepiece:sentencepiece_tokenizer_kernel'\r\nERROR: Analysis of target '//tensorflow/lite/tools/benchmark:benchmark_model_plus_flex' failed; build aborted: \r\nINFO: Elapsed time: 3.013s\r\nINFO: 0 processes.\r\nFAILED: Build did NOT complete successfully (82 packages loaded, 326 targets configured)\r\n currently loading: @com_google_absl//absl/base ... (6 packages)\r\n\r\n```\r\n\r\nmy file structure is:\r\n\r\n```\r\ntensorflow\r\n |--tensorflow (tf stuff here)\r\n |--text (tensorflow-text cloned inside tf repository)\r\n |--WORKSPACE (tf workspace as it is from tf repo with tf-text changes added)\r\n```\r\nEdit:\r\nDo I need to add tf serving? If so, where should it be added? i am just cloning tensorflow/tensorflow to build benchmark model and cloning tensorlfow-text in the same directory.", "Hi @broken \r\nfollow-up to your edits:\r\nDo I need to add tf serving? If so, where should it be added? i am just cloning tensorflow/tensorflow to build benchmark model and cloning tensorlfow-text in the same directory.", "Apologies, I actually had edited my comment, and maybe you viewed it before the edit. The patch file is necessary, but not tf serving.\r\n\r\nIn your file structure, add a third_party directory, and copy the tf_text directory that I had linked above that contains the patch file. Then update your workspace to include that patch to tf text.", "Hi @broken,\r\n\r\nI tried your suggestions and am no longer getting the @org_tensorflow_text errors. But i started getting build failures related to eigen I am not sure how to proceed with. \r\nI am using bazel v6.1.0\r\nthe error log is attached:\r\n[error.log](https://github.com/tensorflow/tensorflow/files/13924606/error.log)\r\n\r\ni get errors like:\r\n\r\n> ERROR: /home/mtk/tensorflow/tensorflow/c/BUILD:411:11: Compiling tensorflow/c/tf_status.cc failed: (Exit 1): aarch64-none-linux-gnu-gcc failed: error executing command (from target //tensorflow/c:tf_status) /home/mtk/.cache/bazel/_bazel_mtk/716ac13c348ce3335128b3d9f4131682/external/aarch64_linux_toolchain/bin/aarch64-none-linux-gnu-gcc -fstack-protector -g0 -O2 -DNDEBUG -ffunction-sections -fdata-sections ... (remaining 144 arguments skipped)\r\n> In file included from external/local_tsl/tsl/platform/types.h:21,\r\n> from external/local_tsl/tsl/platform/default/logging.h:38,\r\n> from external/local_tsl/tsl/platform/logging.h:26,\r\n> from external/local_tsl/tsl/platform/status.h:34,\r\n> from external/local_tsl/tsl/c/tsl_status_internal.h:19,\r\n> from ./tensorflow/c/tf_status_internal.h:19,\r\n> from tensorflow/c/tf_status.cc:20:\r\n> external/local_tsl/tsl/platform/bfloat16.h:24:16: error: 'bfloat16' in namespace 'Eigen' does not name a type\r\n> 24 | typedef Eigen::bfloat16 bfloat16;\r\n> | ^~~~~~~~\r\n> In file included from external/local_tsl/tsl/platform/ml_dtypes.h:19,\r\n> from external/local_tsl/tsl/platform/types.h:22,\r\n> from external/local_tsl/tsl/platform/default/logging.h:38,\r\n> from external/local_tsl/tsl/platform/logging.h:26,\r\n> from external/local_tsl/tsl/platform/status.h:34,\r\n> from external/local_tsl/tsl/c/tsl_status_internal.h:19,\r\n> from ./tensorflow/c/tf_status_internal.h:19,\r\n> from tensorflow/c/tf_status.cc:20:\r\n> bazel-out/aarch64-opt/bin/external/ml_dtypes/_virtual_includes/float8/ml_dtypes/include/float8.h:71:57: error: expected ')' before 'bf16'\r\n> 71 | explicit EIGEN_DEVICE_FUNC float8_base(Eigen::bfloat16 bf16)\r\n> \r\n\r\nmy workspace file has the following changes:\r\n\r\n```\r\nhttp_archive(\r\n name = \"org_tensorflow_text\",\r\n sha256 = \"70838b0474d4e15802f0771bdbbcd82fcce89bf5eccd78f8f9ae10fce520ffa4\",\r\n strip_prefix = \"text-2.15.0\",\r\n url = \"https://github.com/tensorflow/text/archive/v2.15.0.zip\",\r\n patches = [\"@//third_party/tf_text:tftext.patch\"],\r\n patch_args = [\"-p1\"],\r\n )\r\nhttp_archive(\r\n name = \"com_google_sentencepiece\",\r\n strip_prefix = \"sentencepiece-0.1.99\",\r\n sha256 = \"68dbb82ccd8261da7b6088d9da988368798556284f84562e572df9e61e7fd4e2\",\r\n urls = [\r\n \"https://github.com/google/sentencepiece/archive/refs/tags/v0.1.99.zip\",\r\n ],\r\n build_file = \"//third_party/sentencepiece:BUILD\",\r\n )\r\n\r\nhttp_archive(\r\n name = \"com_google_glog\",\r\n sha256 = \"1ee310e5d0a19b9d584a855000434bb724aa744745d5b8ab1855c85bff8a8e21\",\r\n strip_prefix = \"glog-028d37889a1e80e8a07da1b8945ac706259e5fd8\",\r\n urls = [\r\n \"https://mirror.bazel.build/github.com/google/glog/archive/028d37889a1e80e8a07da1b8945ac706259e5fd8.tar.gz\",\r\n \"https://github.com/google/glog/archive/028d37889a1e80e8a07da1b8945ac706259e5fd8.tar.gz\",\r\n ],\r\n )\r\n\r\nhttp_archive(\r\n name = \"darts_clone\",\r\n build_file = \"//third_party/darts_clone:BUILD\",\r\n sha256 = \"c97f55d05c98da6fcaf7f9ecc6a6dc6bc5b18b8564465f77abff8879d446491c\",\r\n strip_prefix = \"darts-clone-e40ce4627526985a7767444b6ed6893ab6ff8983\",\r\n urls = [\r\n \"https://github.com/s-yata/darts-clone/archive/e40ce4627526985a7767444b6ed6893ab6ff8983.zip\",\r\n ],\r\n )\r\n```\r\nthe complete error log is attached. Do you have any suggestions for these eigen related errors?", "Hi @broken,\r\n\r\ni am not getting eigen issues now, I had to hide /usr/include/Eigen and that solved the error. But i am getting some weird sentencepiece errors as below\r\n\r\n```\r\nERROR: /home/.cache/bazel/_bazel/716ac13c348ce3335128b3d9f4131682/external/com_google_sentencepiece/BUILD.bazel:49:11: Compiling src/error.cc failed: (Exit 1): aarch64-none-linux-gnu-gcc failed: error executing command (from target @com_google_sentencepiece//:sentencepiece_processor) /home/.cache/bazel/_bazel_716ac13c348ce3335128b3d9f4131682/external/aarch64_linux_toolchain/bin/aarch64-none-linux-gnu-gcc -fstack-protector -g0 -O2 -DNDEBUG -ffunction-sections -fdata-sections ... (remaining 77 arguments skipped)\r\nIn file included from external/com_google_sentencepiece/src/init.h:25,\r\n from external/com_google_sentencepiece/src/error.cc:18:\r\nexternal/com_google_sentencepiece/third_party/protobuf-lite/google/protobuf/message_lite.h:79:8: error: redefinition of 'struct google::protobuf::internal::ConstantInitialized'\r\n 79 | struct ConstantInitialized {\r\n | ^~~~~~~~~~~~~~~~~~~\r\nIn file included from external/com_google_protobuf/src/google/protobuf/io/coded_stream.h:134,\r\n from external/com_google_sentencepiece/third_party/protobuf-lite/google/protobuf/message_lite.h:47,\r\n from external/com_google_sentencepiece/src/init.h:25,\r\n from external/com_google_sentencepiece/src/error.cc:18:\r\nexternal/com_google_protobuf/src/google/protobuf/port.h:64:8: note: previous definition of 'struct google::protobuf::internal::ConstantInitialized'\r\n 64 | struct ConstantInitialized {\r\n | ^~~~~~~~~~~~~~~~~~~\r\nIn file included from external/com_google_sentencepiece/src/init.h:25,\r\n from external/com_google_sentencepiece/src/error.cc:18:\r\nexternal/com_google_sentencepiece/third_party/protobuf-lite/google/protobuf/message_lite.h:154:1: error: 'PROTOBUF_DISABLE_MSVC_UNION_WARNING' does not name a type\r\n 154 | PROTOBUF_DISABLE_MSVC_UNION_WARNING\r\n | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\r\nexternal/com_google_sentencepiece/third_party/protobuf-lite/google/protobuf/message_lite.h:166:1: error: 'PROTOBUF_ENABLE_MSVC_UNION_WARNING' does not name a type\r\n 166 | PROTOBUF_ENABLE_MSVC_UNION_WARNING\r\n | ^~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~\r\nexternal/com_google_sentencepiece/third_party/protobuf-lite/google/protobuf/message_lite.h: In function 'constexpr const string& google::protobuf::internal::GetEmptyStringAlreadyInited()':\r\nexternal/com_google_sentencepiece/third_party/protobuf-lite/google/protobuf/message_lite.h:174:10: error: 'fixed_address_empty_string' was not declared in this scope\r\n 174 | return fixed_address_empty_string.value;\r\n | ^~~~~~~~~~~~~~~~~~~~~~~~~~\r\nTarget //tensorflow/lite/tools/benchmark:benchmark_model_plus_flex failed to build\r\nUse --verbose_failures to see the command lines of failed build steps.\r\nINFO: Elapsed time: 339.254s, Critical Path: 74.85s\r\nINFO: 473 processes: 9 internal, 464 local.\r\nFAILED: Build did NOT complete successfully\r\n```\r\nany suggestions?" ]
2023-11-30T02:33:49
2024-01-26T01:46:24
null
NONE
null
null
null
### System information - **Have I written custom code (as opposed to using a stock example script provided in TensorFlow)**: No - **OS Platform and Distribution (e.g., Linux Ubuntu 16.04)**: aarch64 linux - **TensorFlow installed from (source or binary)**: binary - **TensorFlow version (use command below)**: tf 2.15 - **Python version**: python3.10.9 - **Exact command to reproduce**: `linux_aarch64_benchmark_model --graph=autocomplete.tflite --num_threads=1 --num_runs=10` ### Describe the problem I am using an aarch64 device like raspberry pi. I created the gpt2 autocomplete.tflite model using the official colab tutorial https://colab.research.google.com/github/tensorflow/codelabs/blob/main/KerasNLP/io2023_workshop.ipynb#scrollTo=uLsz2IcN46eb I was able to create both the quantized and unquantized tflite model. I then tried to benchmark these models on the aarch64 device using the official nightly tflite benchmark model linux_aarch64 using tf 2.15 sourced from https://www.tensorflow.org/lite/performance/measurement#native_benchmark_binary On running the benchmark model using the command: `linux_aarch64_benchmark_model --graph=autocomplete.tflite --num_threads=1 --num_runs=10` I get benchmarking failure errors. I am running the model on CPU itself but it seems the ops are unsupported. I get same benchmarking failure on running the unquantized version of it as well. The logs are below. > root@user:~# ./linux_aarch64_benchmark_model --graph=autocomplete.tflite --num_threads=1 --num_runs=10 > INFO: STARTING! > INFO: Log parameter values verbosely: [0] > INFO: Num threads: [4] > INFO: Graph: [autocomplete.tflite] > INFO: #threads used for CPU inference: [4] > INFO: Loaded model autocomplete.tflite > INFO: Created TensorFlow Lite XNNPACK delegate for CPU. > ERROR: Select TensorFlow op(s), included in the given model, is(are) not support ed by this interpreter. Make sure you apply/link the Flex delegate before infere nce. For the Android, it can be resolved by adding "org.tensorflow:tensorflow-li te-select-tf-ops" dependency. See instructions: https://www.tensorflow.org/lite/ guide/ops_select > ERROR: Node number 2 (FlexMutableHashTableV2) failed to prepare. > ERROR: Select TensorFlow op(s), included in the given model, is(are) not support ed by this interpreter. Make sure you apply/link the Flex delegate before infere nce. For the Android, it can be resolved by adding "org.tensorflow:tensorflow-li te-select-tf-ops" dependency. See instructions: https://www.tensorflow.org/lite/ guide/ops_select > ERROR: Node number 2 (FlexMutableHashTableV2) failed to prepare. > ERROR: Failed to allocate tensors! > ERROR: Benchmarking failed. How do I benchmark this model correctly? Since its a tflite model I should be able to benchmark it using tflite benchmark model. On trying the model with flex delegate benchmark model, it doesn't seem like benchmarking is failing but it does give a lot of error messages along with the INFO last line giving some avg values and exits soon after. the logs are below: > root@user:~# linux_aarch64_benchmark_model_plus_flex --graph=autocomplete.tflite --num_threads=1 > INFO: STARTING! > INFO: Log parameter values verbosely: [0] > INFO: Num threads: [1] > INFO: Graph: [autocomplete.tflite] > INFO: #threads used for CPU inference: [1] > INFO: Loaded model autocomplete.tflite > INFO: Created TensorFlow Lite delegate for select TF ops. > INFO: TfLiteFlexDelegate delegate: 29 nodes delegated out of 1139 nodes with 14 partitions. > > ERROR: Op type not registered 'RegexSplitWithOffsets' in binary running on device. Make sure the Op and Kernel are registered in the binary running in this process. Note that if you are loading a saved graph which used ops from tf.contrib (e.g. `tf.contrib.resampler`), accessing should be done before importing the graph, as contrib ops are lazily registered when the module is first accessed. > ERROR: Op type not registered 'RegexSplitWithOffsets' in binary running on device. Make sure the Op and Kernel are registered in the binary running in this process. Note that if you are loading a saved graph which used ops from tf.contrib (e.g. `tf.contrib.resampler`), accessing should be done before importing the graph, as contrib ops are lazily registered when the module is first accessed. > INFO: Created TensorFlow Lite XNNPACK delegate for CPU. > WARNING: Attempting to use a delegate that only supports static-sized tensors with a graph that has dynamic-sized tensors (tensor#243 is a dynamic-sized tensor). > INFO: The input model file size (MB): 129.674 > INFO: Initialized session in 131.484ms. > INFO: Running benchmark for at least 1 iterations and at least 0.5 seconds but terminate if exceeding 150 seconds. > ERROR: Delegate kernel was not initialized > ERROR: Node number 1139 (TfLiteFlexDelegate) failed to prepare. > ERROR: Delegate kernel was not initialized > > ERROR: Node number 1139 (TfLiteFlexDelegate) failed to prepare. > ERROR: Delegate kernel was not initialized > ERROR: Node number 1139 (TfLiteFlexDelegate) failed to prepare. > ERROR: Delegate kernel was not initialized > ERROR: Node number 1139 (TfLiteFlexDelegate) failed to prepare. > ERROR: Delegate kernel was not initialized > ERROR: Node number 1139 (TfLiteFlexDelegate) failed to prepare. > ERROR: Delegate kernel was not initialized > ERROR: Node number 1139 (TfLiteFlexDelegate) failed to prepare. > ERROR: Delegate kernel was not initialized > ERROR: Node number 1139 (TfLiteFlexDelegate) failed to prepare. > ERROR: Delegate kernel was not initialized > ERROR: Node number 1139 (TfLiteFlexDelegate) failed to prepare. > ERROR: Delegate kernel was not initialized > ERROR: Node number 1139 (TfLiteFlexDelegate) failed to prepare. > ERROR: Delegate kernel was not initialized > ERROR: Node number 1139 (TfLiteFlexDelegate) failed to prepare. > ERROR: Delegate kernel was not initialized > ERROR: Node number 1139 (TfLiteFlexDelegate) failed to prepare. > ERROR: Delegate kernel was not initialized > ERROR: Node number 1139 (TfLiteFlexDelegate) failed to prepare. > ERROR: Delegate kernel was not initialized > ERROR: Node number 1139 (TfLiteFlexDelegate) failed to prepare. > ERROR: Delegate kernel was not initialized > ERROR: Node number 1139 (TfLiteFlexDelegate) failed to prepare. > ERROR: Delegate kernel was not initialized > ERROR: Node number 1139 (TfLiteFlexDelegate) failed to prepare. > ERROR: Delegate kernel was not initialized > ERROR: Node number 1139 (TfLiteFlexDelegate) failed to prepare. > ERROR: Delegate kernel was not initialized > ERROR: Node number 1139 (TfLiteFlexDelegate) failed to prepare. > ERROR: Delegate kernel was not initialized > ERROR: Node number 1139 (TfLiteFlexDelegate) failed to prepare. > ERROR: Delegate kernel was not initialized > ERROR: Node number 1139 (TfLiteFlexDelegate) failed to prepare. > ERROR: Delegate kernel was not initialized > ERROR: Node number 1139 (TfLiteFlexDelegate) failed to prepare. > ERROR: Delegate kernel was not initialized > ERROR: Node number 1139 (TfLiteFlexDelegate) failed to prepare. > ERROR: Delegate kernel was not initialized > ERROR: Node number 1139 (TfLiteFlexDelegate) failed to prepare. > ERROR: Delegate kernel was not initialized > ERROR: Node number 1139 (TfLiteFlexDelegate) failed to prepare. > ERROR: Delegate kernel was not initialized > ERROR: Node number 1139 (TfLiteFlexDelegate) failed to prepare. > ERROR: Delegate kernel was not initialized > ERROR: Node number 1139 (TfLiteFlexDelegate) failed to prepare. > ERROR: Delegate kernel was not initialized > ERROR: Node number 1139 (TfLiteFlexDelegate) failed to prepare. > ERROR: Delegate kernel was not initialized > ERROR: Node number 1139 (TfLiteFlexDelegate) failed to prepare. > ERROR: Delegate kernel was not initialized > ERROR: Node number 1139 (TfLiteFlexDelegate) failed to prepare. > ERROR: Delegate kernel was not initialized > ERROR: Node number 1139 (TfLiteFlexDelegate) failed to prepare. > ERROR: Delegate kernel was not initialized > ERROR: Node number 1139 (TfLiteFlexDelegate) failed to prepare. > ERROR: Delegate kernel was not initialized > ERROR: Node number 1139 (TfLiteFlexDelegate) failed to prepare. > ERROR: Delegate kernel was not initialized > ERROR: Node number 1139 (TfLiteFlexDelegate) failed to prepare. > ERROR: Delegate kernel was not initialized > ERROR: Node number 1139 (TfLiteFlexDelegate) failed to prepare. > ERROR: Delegate kernel was not initialized > ERROR: Node number 1139 (TfLiteFlexDelegate) failed to prepare. > ERROR: Delegate kernel was not initialized > ERROR: Node number 1139 (TfLiteFlexDelegate) failed to prepare. > ERROR: Delegate kernel was not initialized > ERROR: Node number 1139 (TfLiteFlexDelegate) failed to prepare. > ERROR: Delegate kernel was not initialized > ERROR: Node number 1139 (TfLiteFlexDelegate) failed to prepare. > ERROR: Delegate kernel was not initialized > ERROR: Node number 1139 (TfLiteFlexDelegate) failed to prepare. > ERROR: Delegate kernel was not initialized > ERROR: Node number 1139 (TfLiteFlexDelegate) failed to prepare. > ERROR: Delegate kernel was not initialized > ERROR: Node number 1139 (TfLiteFlexDelegate) failed to prepare. > ERROR: Delegate kernel was not initialized > ERROR: Node number 1139 (TfLiteFlexDelegate) failed to prepare. > ERROR: Delegate kernel was not initialized > ERROR: Node number 1139 (TfLiteFlexDelegate) failed to prepare. > ERROR: Delegate kernel was not initialized > ERROR: Node number 1139 (TfLiteFlexDelegate) failed to prepare. > ERROR: Delegate kernel was not initialized > ERROR: Node number 1139 (TfLiteFlexDelegate) failed to prepare. > ERROR: Delegate kernel was not initialized > ERROR: Node number 1139 (TfLiteFlexDelegate) failed to prepare. > ERROR: Delegate kernel was not initialized > ERROR: Node number 1139 (TfLiteFlexDelegate) failed to prepare. > ERROR: Delegate kernel was not initialized > ERROR: Node number 1139 (TfLiteFlexDelegate) failed to prepare. > ERROR: Delegate kernel was not initialized > ERROR: Node number 1139 (TfLiteFlexDelegate) failed to prepare. > ERROR: Delegate kernel was not initialized > ERROR: Node number 1139 (TfLiteFlexDelegate) failed to prepare. > ERROR: Delegate kernel was not initialized > ERROR: Node number 1139 (TfLiteFlexDelegate) failed to prepare. > ERROR: Delegate kernel was not initialized > ERROR: Node number 1139 (TfLiteFlexDelegate) failed to prepare. > ERROR: Delegate kernel was not initialized > ERROR: Node number 1139 (TfLiteFlexDelegate) failed to prepare. > ERROR: Delegate kernel was not initialized > ERROR: Node number 1139 (TfLiteFlexDelegate) failed to prepare. > ERROR: Delegate kernel was not initialized > ERROR: Node number 1139 (TfLiteFlexDelegate) failed to prepare. > ERROR: Delegate kernel was not initialized > ERROR: Node number 1139 (TfLiteFlexDelegate) failed to prepare. > ERROR: Delegate kernel was not initialized > ERROR: Node number 1139 (TfLiteFlexDelegate) failed to prepare. > ERROR: Delegate kernel was not initialized > ERROR: Node number 1139 (TfLiteFlexDelegate) failed to prepare. > ERROR: Delegate kernel was not initialized > ERROR: Node number 1139 (TfLiteFlexDelegate) failed to prepare. > ERROR: Delegate kernel was not initialized > ERROR: Node number 1139 (TfLiteFlexDelegate) failed to prepare. > ERROR: Delegate kernel was not initialized > ERROR: Node number 1139 (TfLiteFlexDelegate) failed to prepare. > ERROR: Delegate kernel was not initialized > ERROR: Node number 1139 (TfLiteFlexDelegate) failed to prepare. > ERROR: Delegate kernel was not initialized > ERROR: Node number 1139 (TfLiteFlexDelegate) failed to prepare. > ERROR: Delegate kernel was not initialized > ERROR: Node number 1139 (TfLiteFlexDelegate) failed to prepare. > ERROR: Delegate kernel was not initialized > ERROR: Node number 1139 (TfLiteFlexDelegate) failed to prepare. > ERROR: Delegate kernel was not initialized > ERROR: Node number 1139 (TfLiteFlexDelegate) failed to prepare. > ERROR: Delegate kernel was not initialized > ERROR: Node number 1139 (TfLiteFlexDelegate) failed to prepare. > ERROR: Delegate kernel was not initialized > ERROR: Node number 1139 (TfLiteFlexDelegate) failed to prepare. > ERROR: Delegate kernel was not initialized > ERROR: Node number 1139 (TfLiteFlexDelegate) failed to prepare. > ERROR: Delegate kernel was not initialized > ERROR: Node number 1139 (TfLiteFlexDelegate) failed to prepare. > ERROR: Delegate kernel was not initialized > ERROR: Node number 1139 (TfLiteFlexDelegate) failed to prepare. > ERROR: Delegate kernel was not initialized > ERROR: Node number 1139 (TfLiteFlexDelegate) failed to prepare. > ERROR: Delegate kernel was not initialized > ERROR: Node number 1139 (TfLiteFlexDelegate) failed to prepare. > ERROR: Delegate kernel was not initialized > ERROR: Node number 1139 (TfLiteFlexDelegate) failed to prepare. > ERROR: Delegate kernel was not initialized > ERROR: Node number 1139 (TfLiteFlexDelegate) failed to prepare. > ERROR: Delegate kernel was not initialized > ERROR: Node number 1139 (TfLiteFlexDelegate) failed to prepare. > ERROR: Delegate kernel was not initialized > ERROR: Node number 1139 (TfLiteFlexDelegate) failed to prepare. > ERROR: Delegate kernel was not initialized > ERROR: Node number 1139 (TfLiteFlexDelegate) failed to prepare. > ERROR: Delegate kernel was not initialized > ERROR: Node number 1139 (TfLiteFlexDelegate) failed to prepare. > ERROR: Delegate kernel was not initialized > ERROR: Node number 1139 (TfLiteFlexDelegate) failed to prepare. > ERROR: Delegate kernel was not initialized > ERROR: Node number 1139 (TfLiteFlexDelegate) failed to prepare. > ERROR: Delegate kernel was not initialized > ERROR: Node number 1139 (TfLiteFlexDelegate) failed to prepare. > ERROR: Delegate kernel was not initialized > ERROR: Node number 1139 (TfLiteFlexDelegate) failed to prepare. > ERROR: Delegate kernel was not initialized > ERROR: Node number 1139 (TfLiteFlexDelegate) failed to prepare. > ERROR: Delegate kernel was not initialized > ERROR: Node number 1139 (TfLiteFlexDelegate) failed to prepare. > ERROR: Delegate kernel was not initialized > ERROR: Node number 1139 (TfLiteFlexDelegate) failed to prepare. > ERROR: Delegate kernel was not initialized > ERROR: Node number 1139 (TfLiteFlexDelegate) failed to prepare. > ERROR: Delegate kernel was not initialized > ERROR: Node number 1139 (TfLiteFlexDelegate) failed to prepare. > ERROR: Delegate kernel was not initialized > ERROR: Node number 1139 (TfLiteFlexDelegate) failed to prepare. > ERROR: Delegate kernel was not initialized > ERROR: Node number 1139 (TfLiteFlexDelegate) failed to prepare. > ERROR: Delegate kernel was not initialized > ERROR: Node number 1139 (TfLiteFlexDelegate) failed to prepare. > ERROR: Delegate kernel was not initialized > ERROR: Node number 1139 (TfLiteFlexDelegate) failed to prepare. > ERROR: Delegate kernel was not initialized > ERROR: Node number 1139 (TfLiteFlexDelegate) failed to prepare. > ERROR: Delegate kernel was not initialized > ERROR: Node number 1139 (TfLiteFlexDelegate) failed to prepare. > ERROR: Delegate kernel was not initialized > ERROR: Node number 1139 (TfLiteFlexDelegate) failed to prepare. > ERROR: Delegate kernel was not initialized > ERROR: Node number 1139 (TfLiteFlexDelegate) failed to prepare. > ERROR: Delegate kernel was not initialized > ERROR: Node number 1139 (TfLiteFlexDelegate) failed to prepare. > ERROR: Delegate kernel was not initialized > ERROR: Node number 1139 (TfLiteFlexDelegate) failed to prepare. > ERROR: Delegate kernel was not initialized > ERROR: Node number 1139 (TfLiteFlexDelegate) failed to prepare. > ERROR: Delegate kernel was not initialized > ERROR: Node number 1139 (TfLiteFlexDelegate) failed to prepare. > ERROR: Delegate kernel was not initialized > ERROR: Node number 1139 (TfLiteFlexDelegate) failed to prepare. > ERROR: Delegate kernel was not initialized > ERROR: Node number 1139 (TfLiteFlexDelegate) failed to prepare. > ERROR: Delegate kernel was not initialized > ERROR: Node number 1139 (TfLiteFlexDelegate) failed to prepare. > ERROR: Delegate kernel was not initialized > ERROR: Node number 1139 (TfLiteFlexDelegate) failed to prepare. > ERROR: Delegate kernel was not initialized > ERROR: Node number 1139 (TfLiteFlexDelegate) failed to prepare. > ERROR: Delegate kernel was not initialized > ERROR: Node number 1139 (TfLiteFlexDelegate) failed to prepare. > ERROR: Delegate kernel was not initialized > ERROR: Node number 1139 (TfLiteFlexDelegate) failed to prepare. > ERROR: Delegate kernel was not initialized > ERROR: Node number 1139 (TfLiteFlexDelegate) failed to prepare. > ERROR: Delegate kernel was not initialized > ERROR: Node number 1139 (TfLiteFlexDelegate) failed to prepare. > ERROR: Delegate kernel was not initialized > ERROR: Node number 1139 (TfLiteFlexDelegate) failed to prepare. > ERROR: Delegate kernel was not initialized > ERROR: Node number 1139 (TfLiteFlexDelegate) failed to prepare. > ERROR: Delegate kernel was not initialized > ERROR: Node number 1139 (TfLiteFlexDelegate) failed to prepare. > ERROR: Delegate kernel was not initialized > ERROR: Node number 1139 (TfLiteFlexDelegate) failed to prepare. > ERROR: Delegate kernel was not initialized > ERROR: Node number 1139 (TfLiteFlexDelegate) failed to prepare. > INFO: count=113 first=216495 curr=60356 min=22 max=216495 avg=4851.66 std=26505 >
{ "url": "https://api.github.com/repos/tensorflow/tensorflow/issues/62506/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/tensorflow/tensorflow/issues/62506/timeline
null
null
false
https://api.github.com/repos/tensorflow/tensorflow/issues/62505
https://api.github.com/repos/tensorflow/tensorflow
https://api.github.com/repos/tensorflow/tensorflow/issues/62505/labels{/name}
https://api.github.com/repos/tensorflow/tensorflow/issues/62505/comments
https://api.github.com/repos/tensorflow/tensorflow/issues/62505/events
https://github.com/tensorflow/tensorflow/pull/62505
2,017,713,023
PR_kwDOArmXAs5gusSn
62,505
Create a propertie to get a dict with the names and layers of the model.
{ "login": "CaioWing", "id": 98857812, "node_id": "U_kgDOBeRzVA", "avatar_url": "https://avatars.githubusercontent.com/u/98857812?v=4", "gravatar_id": "", "url": "https://api.github.com/users/CaioWing", "html_url": "https://github.com/CaioWing", "followers_url": "https://api.github.com/users/CaioWing/followers", "following_url": "https://api.github.com/users/CaioWing/following{/other_user}", "gists_url": "https://api.github.com/users/CaioWing/gists{/gist_id}", "starred_url": "https://api.github.com/users/CaioWing/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/CaioWing/subscriptions", "organizations_url": "https://api.github.com/users/CaioWing/orgs", "repos_url": "https://api.github.com/users/CaioWing/repos", "events_url": "https://api.github.com/users/CaioWing/events{/privacy}", "received_events_url": "https://api.github.com/users/CaioWing/received_events", "type": "User", "site_admin": false }
[ { "id": 1097546578, "node_id": "MDU6TGFiZWwxMDk3NTQ2NTc4", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/comp:keras", "name": "comp:keras", "color": "0052cc", "default": false, "description": "Keras related issues" }, { "id": 1169364259, "node_id": "MDU6TGFiZWwxMTY5MzY0MjU5", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/size:XS", "name": "size:XS", "color": "adafea", "default": false, "description": "CL Change Size: Extra Small" } ]
closed
false
{ "login": "gbaned", "id": 48215717, "node_id": "MDQ6VXNlcjQ4MjE1NzE3", "avatar_url": "https://avatars.githubusercontent.com/u/48215717?v=4", "gravatar_id": "", "url": "https://api.github.com/users/gbaned", "html_url": "https://github.com/gbaned", "followers_url": "https://api.github.com/users/gbaned/followers", "following_url": "https://api.github.com/users/gbaned/following{/other_user}", "gists_url": "https://api.github.com/users/gbaned/gists{/gist_id}", "starred_url": "https://api.github.com/users/gbaned/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/gbaned/subscriptions", "organizations_url": "https://api.github.com/users/gbaned/orgs", "repos_url": "https://api.github.com/users/gbaned/repos", "events_url": "https://api.github.com/users/gbaned/events{/privacy}", "received_events_url": "https://api.github.com/users/gbaned/received_events", "type": "User", "site_admin": false }
[ { "login": "gbaned", "id": 48215717, "node_id": "MDQ6VXNlcjQ4MjE1NzE3", "avatar_url": "https://avatars.githubusercontent.com/u/48215717?v=4", "gravatar_id": "", "url": "https://api.github.com/users/gbaned", "html_url": "https://github.com/gbaned", "followers_url": "https://api.github.com/users/gbaned/followers", "following_url": "https://api.github.com/users/gbaned/following{/other_user}", "gists_url": "https://api.github.com/users/gbaned/gists{/gist_id}", "starred_url": "https://api.github.com/users/gbaned/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/gbaned/subscriptions", "organizations_url": "https://api.github.com/users/gbaned/orgs", "repos_url": "https://api.github.com/users/gbaned/repos", "events_url": "https://api.github.com/users/gbaned/events{/privacy}", "received_events_url": "https://api.github.com/users/gbaned/received_events", "type": "User", "site_admin": false } ]
null
[ "Thanks for your pull request! It looks like this may be your first contribution to a Google open source project. Before we can look at your pull request, you'll need to sign a Contributor License Agreement (CLA).\n\nView this [failed invocation](https://github.com/tensorflow/tensorflow/pull/62505/checks?check_run_id=19162464222) of the CLA check for more information.\n\nFor the most up to date status, view the checks section at the bottom of the pull request.", "Hi @CaioWing It looks like your PR relates to the Keras component. Please submit it to the github.com/keras-team/keras repository instead. Thankyou.\r\n@fchollet, @qlzh727" ]
2023-11-30T01:39:37
2023-11-30T04:37:39
2023-11-30T04:37:37
NONE
null
false
{ "url": "https://api.github.com/repos/tensorflow/tensorflow/pulls/62505", "html_url": "https://github.com/tensorflow/tensorflow/pull/62505", "diff_url": "https://github.com/tensorflow/tensorflow/pull/62505.diff", "patch_url": "https://github.com/tensorflow/tensorflow/pull/62505.patch", "merged_at": null }
Trying to solve the *TODO* from fchollet, "We could build a dictionary based on layer names since they are constant, but we have not done that yet.”. I’d just create a new propertie to the Model's Class.
{ "url": "https://api.github.com/repos/tensorflow/tensorflow/issues/62505/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/tensorflow/tensorflow/issues/62505/timeline
null
null
true
https://api.github.com/repos/tensorflow/tensorflow/issues/62504
https://api.github.com/repos/tensorflow/tensorflow
https://api.github.com/repos/tensorflow/tensorflow/issues/62504/labels{/name}
https://api.github.com/repos/tensorflow/tensorflow/issues/62504/comments
https://api.github.com/repos/tensorflow/tensorflow/issues/62504/events
https://github.com/tensorflow/tensorflow/issues/62504
2,017,277,371
I_kwDOArmXAs54PTW7
62,504
MemoryError: std::bad_alloc when calling tf.squeeze with a floating tensor axis
{ "login": "drewshark", "id": 128925028, "node_id": "U_kgDOB689ZA", "avatar_url": "https://avatars.githubusercontent.com/u/128925028?v=4", "gravatar_id": "", "url": "https://api.github.com/users/drewshark", "html_url": "https://github.com/drewshark", "followers_url": "https://api.github.com/users/drewshark/followers", "following_url": "https://api.github.com/users/drewshark/following{/other_user}", "gists_url": "https://api.github.com/users/drewshark/gists{/gist_id}", "starred_url": "https://api.github.com/users/drewshark/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/drewshark/subscriptions", "organizations_url": "https://api.github.com/users/drewshark/orgs", "repos_url": "https://api.github.com/users/drewshark/repos", "events_url": "https://api.github.com/users/drewshark/events{/privacy}", "received_events_url": "https://api.github.com/users/drewshark/received_events", "type": "User", "site_admin": false }
[ { "id": 390482148, "node_id": "MDU6TGFiZWwzOTA0ODIxNDg=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/awaiting%20review", "name": "awaiting review", "color": "bc3869", "default": false, "description": "Pull request awaiting review" }, { "id": 473172988, "node_id": "MDU6TGFiZWw0NzMxNzI5ODg=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/type:bug", "name": "type:bug", "color": "159b2e", "default": false, "description": "Bug" }, { "id": 1097547147, "node_id": "MDU6TGFiZWwxMDk3NTQ3MTQ3", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/comp:ops", "name": "comp:ops", "color": "0052cc", "default": false, "description": "OPs related issues" }, { "id": 5922361893, "node_id": "LA_kwDOArmXAs8AAAABYQASJQ", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/TF2.14", "name": "TF2.14", "color": "b60205", "default": false, "description": "For issues related to Tensorflow 2.14.x" } ]
open
false
{ "login": "SuryanarayanaY", "id": 116063290, "node_id": "U_kgDOBur8Og", "avatar_url": "https://avatars.githubusercontent.com/u/116063290?v=4", "gravatar_id": "", "url": "https://api.github.com/users/SuryanarayanaY", "html_url": "https://github.com/SuryanarayanaY", "followers_url": "https://api.github.com/users/SuryanarayanaY/followers", "following_url": "https://api.github.com/users/SuryanarayanaY/following{/other_user}", "gists_url": "https://api.github.com/users/SuryanarayanaY/gists{/gist_id}", "starred_url": "https://api.github.com/users/SuryanarayanaY/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/SuryanarayanaY/subscriptions", "organizations_url": "https://api.github.com/users/SuryanarayanaY/orgs", "repos_url": "https://api.github.com/users/SuryanarayanaY/repos", "events_url": "https://api.github.com/users/SuryanarayanaY/events{/privacy}", "received_events_url": "https://api.github.com/users/SuryanarayanaY/received_events", "type": "User", "site_admin": false }
[ { "login": "SuryanarayanaY", "id": 116063290, "node_id": "U_kgDOBur8Og", "avatar_url": "https://avatars.githubusercontent.com/u/116063290?v=4", "gravatar_id": "", "url": "https://api.github.com/users/SuryanarayanaY", "html_url": "https://github.com/SuryanarayanaY", "followers_url": "https://api.github.com/users/SuryanarayanaY/followers", "following_url": "https://api.github.com/users/SuryanarayanaY/following{/other_user}", "gists_url": "https://api.github.com/users/SuryanarayanaY/gists{/gist_id}", "starred_url": "https://api.github.com/users/SuryanarayanaY/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/SuryanarayanaY/subscriptions", "organizations_url": "https://api.github.com/users/SuryanarayanaY/orgs", "repos_url": "https://api.github.com/users/SuryanarayanaY/repos", "events_url": "https://api.github.com/users/SuryanarayanaY/events{/privacy}", "received_events_url": "https://api.github.com/users/SuryanarayanaY/received_events", "type": "User", "site_admin": false } ]
null
[ "Hello, I'm glad you posted this issue. I can try to give you a hand.\r\n\r\nFirst thing first, I would like to better clarify the `tf.squeeze()` function. According to the [documentation](https://www.tensorflow.org/api_docs/python/tf/squeeze), the `tf.squeeze()` function is used to remove dimensions of size 1 from the shape of a tensor. For instance, if you have a tensor `t` with shape `[batch_num, 1, elem_num]`, then `tf.squeeze(t)` will return a tensor with shape `[batch_num, elem_num]`. You can also specify which axis (or axes) to squeeze by passing the `axis` argument.\r\n\r\nHowever, the `axis` argument MUST be integers, not floats. This is because the axis represent the indices of the dimensions to squeeze, and indices can only be integers.\r\n\r\nIn your specific case, the `input` tensor has shape `(3,)`, not `(1, 3)`. Therefore, it doesn't have any dimension of size 1, and you cannot squeeze it at all. The axis argument is irrelevant. I hope this solves your problem. I wish you a wonderful day!\r\n\r\nPost Scriptum: to better check yourself, you can change your code this way:\r\n```import tensorflow as tf\r\ninput = tf.constant([1,2,3], dtype='float32')\r\nout = tf.squeeze(input)\r\n\r\nprint(input.shape)\r\nprint(out.shape)``` ", "Hi @dante-tech \r\n\r\nThanks for your explanation. \r\nMy further inspection finds that the issue is not related to the data type of axis. Instead, this issue occurs as long as I set the axis to be a tensor. For instance, even though I set the axis data type to integer, this issue still occurs:\r\n\r\n```\r\nimport tensorflow as tf\r\ninput = tf.constant([[1,2,3]], dtype='float32')\r\naxis = tf.constant(1, dtype='int32')\r\nout = tf.squeeze(input,axis)\r\n```\r\n\r\nerror message:\r\n```\r\n/usr/local/lib/python3.10/dist-packages/tensorflow/python/ops/gen_array_ops.py in squeeze(input, axis, name)\r\n 10613 if tld.is_eager:\r\n 10614 try:\r\n> 10615 _result = pywrap_tfe.TFE_Py_FastPathExecute(\r\n 10616 _ctx, \"Squeeze\", name, input, \"squeeze_dims\", axis)\r\n 10617 return _result\r\n\r\nMemoryError: std::bad_alloc\r\n```\r\n\r\n\r\n", "Hi @drewshark ,\r\n\r\nThe behaviour is due to the value passed to axis argument. The axis argument expects it as int or list of ints as per [documentation](https://www.tensorflow.org/api_docs/python/tf/squeeze#:~:text=input%20to%20squeeze.-,axis,to%20%5B%5D.%20If%20specified%2C%20only%20squeezes%20the%20dimensions%20listed.%20The,-dimension%20index%20starts).\r\n\r\nYeah, this error is related to Axis argument, but not related to input shape. If you pass invalid shapes or shapes that don't have dim '1' then it will raise intended exception. I have attached a [gist](https://colab.sandbox.google.com/gist/SuryanarayanaY/06d4e539682780b4326a4f75f654a29e/62504.ipynb#scrollTo=NeGfeG5Jnhfn) for reference.\r\n\r\nI will submit a probable fix for this to type check and raise exception.", "Hello @drewshark, \r\n\r\nYes, you are correct. Is there anything else I can help you with? " ]
2023-11-29T19:27:17
2023-11-30T11:27:18
null
NONE
null
null
null
### Issue type Bug ### Have you reproduced the bug with TensorFlow Nightly? Yes ### Source source ### TensorFlow version tf 2.14.0, tf-nightly ### Custom code Yes ### OS platform and distribution _No response_ ### Mobile device _No response_ ### Python version _No response_ ### Bazel version _No response_ ### GCC/compiler version _No response_ ### CUDA/cuDNN version _No response_ ### GPU model and memory _No response_ ### Current behavior? When calling tf.squeeze with axis to be floating tensor, it will raise memory error. MemoryError looks dangerous to me, a proper input argument handling may be better. ### Standalone code to reproduce the issue ```shell import tensorflow as tf input = tf.constant([1,2,3], dtype='float32') axis = tf.constant(1.0, dtype='float32') out = tf.squeeze(input,axis) ``` ### Relevant log output ```shell /usr/local/lib/python3.10/dist-packages/tensorflow/python/ops/gen_array_ops.py in squeeze(input, axis, name) 10614 if tld.is_eager: 10615 try: > 10616 _result = pywrap_tfe.TFE_Py_FastPathExecute( 10617 _ctx, "Squeeze", name, input, "squeeze_dims", axis) 10618 return _result MemoryError: std::bad_alloc ```
{ "url": "https://api.github.com/repos/tensorflow/tensorflow/issues/62504/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/tensorflow/tensorflow/issues/62504/timeline
null
null
false
https://api.github.com/repos/tensorflow/tensorflow/issues/62503
https://api.github.com/repos/tensorflow/tensorflow
https://api.github.com/repos/tensorflow/tensorflow/issues/62503/labels{/name}
https://api.github.com/repos/tensorflow/tensorflow/issues/62503/comments
https://api.github.com/repos/tensorflow/tensorflow/issues/62503/events
https://github.com/tensorflow/tensorflow/issues/62503
2,017,199,812
I_kwDOArmXAs54PAbE
62,503
File "/home/mona/anaconda3/envs/EfficientPose/lib/python3.8/site-packages/google/protobuf/internal/containers.py", line 70, in __getitem__ return self._values[key] TypeError: list indices must be integers or slices, not str
{ "login": "monajalal", "id": 1892917, "node_id": "MDQ6VXNlcjE4OTI5MTc=", "avatar_url": "https://avatars.githubusercontent.com/u/1892917?v=4", "gravatar_id": "", "url": "https://api.github.com/users/monajalal", "html_url": "https://github.com/monajalal", "followers_url": "https://api.github.com/users/monajalal/followers", "following_url": "https://api.github.com/users/monajalal/following{/other_user}", "gists_url": "https://api.github.com/users/monajalal/gists{/gist_id}", "starred_url": "https://api.github.com/users/monajalal/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/monajalal/subscriptions", "organizations_url": "https://api.github.com/users/monajalal/orgs", "repos_url": "https://api.github.com/users/monajalal/repos", "events_url": "https://api.github.com/users/monajalal/events{/privacy}", "received_events_url": "https://api.github.com/users/monajalal/received_events", "type": "User", "site_admin": false }
[ { "id": 386191887, "node_id": "MDU6TGFiZWwzODYxOTE4ODc=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/stat:awaiting%20response", "name": "stat:awaiting response", "color": "f4b400", "default": false, "description": "Status - Awaiting response from author" }, { "id": 473172988, "node_id": "MDU6TGFiZWw0NzMxNzI5ODg=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/type:bug", "name": "type:bug", "color": "159b2e", "default": false, "description": "Bug" }, { "id": 474725938, "node_id": "MDU6TGFiZWw0NzQ3MjU5Mzg=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/stale", "name": "stale", "color": "d4c5f9", "default": false, "description": "This label marks the issue/pr stale - to be closed automatically if no activity" } ]
closed
false
{ "login": "sushreebarsa", "id": 84765720, "node_id": "MDQ6VXNlcjg0NzY1NzIw", "avatar_url": "https://avatars.githubusercontent.com/u/84765720?v=4", "gravatar_id": "", "url": "https://api.github.com/users/sushreebarsa", "html_url": "https://github.com/sushreebarsa", "followers_url": "https://api.github.com/users/sushreebarsa/followers", "following_url": "https://api.github.com/users/sushreebarsa/following{/other_user}", "gists_url": "https://api.github.com/users/sushreebarsa/gists{/gist_id}", "starred_url": "https://api.github.com/users/sushreebarsa/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/sushreebarsa/subscriptions", "organizations_url": "https://api.github.com/users/sushreebarsa/orgs", "repos_url": "https://api.github.com/users/sushreebarsa/repos", "events_url": "https://api.github.com/users/sushreebarsa/events{/privacy}", "received_events_url": "https://api.github.com/users/sushreebarsa/received_events", "type": "User", "site_admin": false }
[ { "login": "sushreebarsa", "id": 84765720, "node_id": "MDQ6VXNlcjg0NzY1NzIw", "avatar_url": "https://avatars.githubusercontent.com/u/84765720?v=4", "gravatar_id": "", "url": "https://api.github.com/users/sushreebarsa", "html_url": "https://github.com/sushreebarsa", "followers_url": "https://api.github.com/users/sushreebarsa/followers", "following_url": "https://api.github.com/users/sushreebarsa/following{/other_user}", "gists_url": "https://api.github.com/users/sushreebarsa/gists{/gist_id}", "starred_url": "https://api.github.com/users/sushreebarsa/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/sushreebarsa/subscriptions", "organizations_url": "https://api.github.com/users/sushreebarsa/orgs", "repos_url": "https://api.github.com/users/sushreebarsa/repos", "events_url": "https://api.github.com/users/sushreebarsa/events{/privacy}", "received_events_url": "https://api.github.com/users/sushreebarsa/received_events", "type": "User", "site_admin": false } ]
null
[ "@monajalal Please make sure that the key variable is an integer or a slice before passing it to `__getitem__` and try with the latest stable TF version 2.15. Please let us know the outcome?\r\nThank you!", "This issue is stale because it has been open for 7 days with no activity. It will be closed if no further activity occurs. Thank you.", "This issue was closed because it has been inactive for 7 days since being marked as stale. Please reopen if you'd like to work on this further.", "Are you satisfied with the resolution of your issue?\n<a href=\"https://docs.google.com/forms/d/e/1FAIpQLSfaP12TRhd9xSxjXZjcZFNXPGk4kc1-qMdv3gc6bEP90vY1ew/viewform?entry.85265664=Yes&entry.2137816233=https://github.com/tensorflow/tensorflow/issues/62503\">Yes</a>\n<a href=\"https://docs.google.com/forms/d/e/1FAIpQLSfaP12TRhd9xSxjXZjcZFNXPGk4kc1-qMdv3gc6bEP90vY1ew/viewform?entry.85265664=No&entry.2137816233=https://github.com/tensorflow/tensorflow/issues/62503\">No</a>\n" ]
2023-11-29T18:41:21
2023-12-15T01:49:12
2023-12-15T01:49:09
NONE
null
null
null
### Issue type Bug Please note I saw this related issue but there was no response to it https://github.com/tensorflow/tensorflow/issues/32694 ### Have you reproduced the bug with TensorFlow Nightly? No ### Source binary ### TensorFlow version nvidia-tensorflow==1.15.4+nv20.12 ### Custom code Yes ### OS platform and distribution Ubuntu 22.04 ### Mobile device _No response_ ### Python version Python 3.8.18 (default, Sep 11 2023, 13:40:15) ### Bazel version _No response_ ### GCC/compiler version GCC 11.2.0 ### CUDA/cuDNN version check below ### GPU model and memory ADA RTX 6000 ### Current behavior? (EfficientPose) mona@ada:~/EfficientPose$ python evaluate.py --phi 0 --weights weights/Weights/Linemod/object_8/phi_0_linemod_best_ADD.h5 --validation-image-save-path val_imgs linemod data/Linemod_preprocessed/ --object-id 8 2023-11-29 13:29:34.439724: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcudart.so.11.0 WARNING:tensorflow:Deprecation warnings have been disabled. Set TF_ENABLE_DEPRECATION_WARNINGS=1 to re-enable them. WARNING:tensorflow:From evaluate.py:132: The name tf.ConfigProto is deprecated. Please use tf.compat.v1.ConfigProto instead. WARNING:tensorflow:From evaluate.py:134: The name tf.Session is deprecated. Please use tf.compat.v1.Session instead. 2023-11-29 13:29:35.373875: I tensorflow/core/platform/profile_utils/cpu_utils.cc:94] CPU Frequency: 3096000000 Hz 2023-11-29 13:29:35.376509: I tensorflow/compiler/xla/service/service.cc:168] XLA service 0x2d38cf0 initialized for platform Host (this does not guarantee that XLA will be used). Devices: 2023-11-29 13:29:35.376534: I tensorflow/compiler/xla/service/service.cc:176] StreamExecutor device (0): Host, Default Version 2023-11-29 13:29:35.378180: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcuda.so.1 2023-11-29 13:29:35.428770: I tensorflow/compiler/xla/service/service.cc:168] XLA service 0x2635d90 initialized for platform CUDA (this does not guarantee that XLA will be used). Devices: 2023-11-29 13:29:35.428838: I tensorflow/compiler/xla/service/service.cc:176] StreamExecutor device (0): NVIDIA RTX 6000 Ada Generation, Compute Capability 8.9 2023-11-29 13:29:35.429576: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1665] Found device 0 with properties: name: NVIDIA RTX 6000 Ada Generation major: 8 minor: 9 memoryClockRate(GHz): 2.505 pciBusID: 0000:52:00.0 2023-11-29 13:29:35.429626: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcudart.so.11.0 2023-11-29 13:29:35.451229: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcublas.so.11 2023-11-29 13:29:35.454294: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcufft.so.10 2023-11-29 13:29:35.454582: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcurand.so.10 2023-11-29 13:29:35.455238: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcusolver.so.11 2023-11-29 13:29:35.456550: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcusparse.so.11 2023-11-29 13:29:35.456820: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcudnn.so.8 2023-11-29 13:29:35.457185: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1793] Adding visible gpu devices: 0 2023-11-29 13:29:35.457221: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcudart.so.11.0 2023-11-29 13:29:35.461017: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1206] Device interconnect StreamExecutor with strength 1 edge matrix: 2023-11-29 13:29:35.461038: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1212] 0 2023-11-29 13:29:35.461047: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1225] 0: N 2023-11-29 13:29:35.461327: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1351] Created TensorFlow device (/job:localhost/replica:0/task:0/device:GPU:0 with 39328 MB memory) -> physical GPU (device: 0, name: NVIDIA RTX 6000 Ada Generation, pci bus id: 0000:52:00.0, compute capability: 8.9) {'dataset_type': 'linemod', 'rotation_representation': 'axis_angle', 'weights': 'weights/Weights/Linemod/object_8/phi_0_linemod_best_ADD.h5', 'batch_size': 1, 'phi': 0, 'gpu': None, 'score_threshold': 0.5, 'validation_image_save_path': 'val_imgs', 'linemod_path': 'data/Linemod_preprocessed/', 'object_id': 8} Creating the Generators... Done! Building the Model... Traceback (most recent call last): File "evaluate.py", line 368, in <module> main() File "evaluate.py", line 111, in main _, prediction_model, _ = build_EfficientPose(args.phi, File "/home/mona/EfficientPose/model.py", line 99, in build_EfficientPose image_input = layers.Input(input_shape) File "/home/mona/anaconda3/envs/EfficientPose/lib/python3.8/site-packages/tensorflow_core/python/keras/engine/input_layer.py", line 265, in Input input_layer = InputLayer(**input_layer_config) File "/home/mona/anaconda3/envs/EfficientPose/lib/python3.8/site-packages/tensorflow_core/python/keras/engine/input_layer.py", line 121, in __init__ input_tensor = backend.placeholder( File "/home/mona/anaconda3/envs/EfficientPose/lib/python3.8/site-packages/tensorflow_core/python/keras/backend.py", line 1051, in placeholder x = array_ops.placeholder(dtype, shape=shape, name=name) File "/home/mona/anaconda3/envs/EfficientPose/lib/python3.8/site-packages/tensorflow_core/python/ops/array_ops.py", line 2619, in placeholder return gen_array_ops.placeholder(dtype=dtype, shape=shape, name=name) File "/home/mona/anaconda3/envs/EfficientPose/lib/python3.8/site-packages/tensorflow_core/python/ops/gen_array_ops.py", line 6668, in placeholder _, _, _op = _op_def_lib._apply_op_helper( File "/home/mona/anaconda3/envs/EfficientPose/lib/python3.8/site-packages/tensorflow_core/python/framework/op_def_library.py", line 792, in _apply_op_helper op = g.create_op(op_type_name, inputs, dtypes=None, name=scope, File "/home/mona/anaconda3/envs/EfficientPose/lib/python3.8/site-packages/tensorflow_core/python/util/deprecation.py", line 513, in new_func return func(*args, **kwargs) File "/home/mona/anaconda3/envs/EfficientPose/lib/python3.8/site-packages/tensorflow_core/python/framework/ops.py", line 3356, in create_op return self._create_op_internal(op_type, inputs, dtypes, input_types, name, File "/home/mona/anaconda3/envs/EfficientPose/lib/python3.8/site-packages/tensorflow_core/python/framework/ops.py", line 3411, in _create_op_internal node_def = _NodeDef(op_type, name, device=None, attrs=attrs) File "/home/mona/anaconda3/envs/EfficientPose/lib/python3.8/site-packages/tensorflow_core/python/framework/ops.py", line 1552, in _NodeDef node_def.attr[k].CopyFrom(v) File "/home/mona/anaconda3/envs/EfficientPose/lib/python3.8/site-packages/google/protobuf/internal/containers.py", line 70, in __getitem__ return self._values[key] TypeError: list indices must be integers or slices, not str ### Standalone code to reproduce the issue ```shell EfficientPose Repo also here is my environment.yml for conda env (EfficientPose) mona@ada:~/EfficientPose$ cat environment.yml name: EfficientPose channels: - defaults dependencies: - _libgcc_mutex=0.1=main - _openmp_mutex=5.1=1_gnu - ca-certificates=2023.08.22=h06a4308_0 - ld_impl_linux-64=2.38=h1181459_1 - libffi=3.4.4=h6a678d5_0 - libgcc-ng=11.2.0=h1234567_1 - libgomp=11.2.0=h1234567_1 - libstdcxx-ng=11.2.0=h1234567_1 - ncurses=6.4=h6a678d5_0 - openssl=3.0.12=h7f8727e_0 - pip=23.3.1=py38h06a4308_0 - python=3.8.18=h955ad1f_0 - pyyaml=6.0.1=py38h5eee18b_0 - readline=8.2=h5eee18b_0 - setuptools=68.0.0=py38h06a4308_0 - sqlite=3.41.2=h5eee18b_0 - tk=8.6.12=h1ccaba5_0 - wheel=0.41.2=py38h06a4308_0 - xz=5.4.2=h5eee18b_0 - yaml=0.2.5=h7b6447c_0 - zlib=1.2.13=h5eee18b_0 - pip: - absl-py==2.0.0 - astor==0.8.1 - contourpy==1.1.1 - cycler==0.12.1 - cython==3.0.6 - fonttools==4.45.1 - gast==0.2.2 - google-pasta==0.2.0 - grpcio==1.59.3 - h5py==3.10.0 - imageio==2.33.0 - imgaug==0.4.0 - importlib-metadata==6.8.0 - importlib-resources==6.1.1 - keras-applications==1.0.8 - keras-preprocessing==1.1.2 - kiwisolver==1.4.5 - lazy-loader==0.3 - markdown==3.5.1 - markupsafe==2.1.3 - matplotlib==3.7.4 - networkx==3.1 - numpy==1.24.4 - nvidia-cublas==11.3.0.106 - nvidia-cuda-cupti==11.1.105 - nvidia-cuda-nvcc==11.1.105 - nvidia-cuda-nvrtc==11.1.105 - nvidia-cuda-runtime==11.1.74 - nvidia-cudnn==8.0.5.43 - nvidia-cufft==10.3.0.105 - nvidia-curand==10.2.2.105 - nvidia-cusolver==11.0.1.105 - nvidia-cusparse==11.3.0.10 - nvidia-dali-cuda110==0.28.0 - nvidia-dali-nvtf-plugin==0.28.0+nv20.12 - nvidia-nccl==2.8.3 - nvidia-pyindex==1.0.9 - nvidia-tensorboard==1.15.0+nv20.12 - nvidia-tensorflow==1.15.4+nv20.12 - nvidia-tensorrt==7.2.2.1 - opencv-python==4.8.1.78 - opt-einsum==3.3.0 - packaging==23.2 - pillow==10.1.0 - plyfile==1.0.2 - protobuf==4.25.1 - pyparsing==3.1.1 - python-dateutil==2.8.2 - pywavelets==1.4.1 - scikit-image==0.21.0 - scipy==1.10.1 - shapely==2.0.2 - six==1.16.0 - tensorboard==1.15.0 - tensorflow-estimator==1.15.1 - termcolor==2.3.0 - tifffile==2023.7.10 - typeguard==4.1.5 - typing-extensions==4.8.0 - webencodings==0.5.1 - werkzeug==3.0.1 - wrapt==1.16.0 - zipp==3.17.0 prefix: /home/mona/anaconda3/envs/EfficientPose ``` ### Relevant log output ```shell (base) mona@ada:~$ lsb_release -a LSB Version: core-11.1.0ubuntu4-noarch:security-11.1.0ubuntu4-noarch Distributor ID: Ubuntu Description: Ubuntu 22.04.3 LTS Release: 22.04 Codename: jammy (base) mona@ada:~$ uname -a Linux ada 6.2.0-37-generic #38~22.04.1-Ubuntu SMP PREEMPT_DYNAMIC Thu Nov 2 18:01:13 UTC 2 x86_64 x86_64 x86_64 GNU/Linux (base) mona@ada:~$ nvcc --version nvcc: NVIDIA (R) Cuda compiler driver Copyright (c) 2005-2022 NVIDIA Corporation Built on Wed_Sep_21_10:33:58_PDT_2022 Cuda compilation tools, release 11.8, V11.8.89 Build cuda_11.8.r11.8/compiler.31833905_0 (base) mona@ada:~$ nvidia-smi Wed Nov 29 13:41:11 2023 +---------------------------------------------------------------------------------------+ | NVIDIA-SMI 535.104.12 Driver Version: 535.104.12 CUDA Version: 12.2 | |-----------------------------------------+----------------------+----------------------+ | GPU Name Persistence-M | Bus-Id Disp.A | Volatile Uncorr. ECC | | Fan Temp Perf Pwr:Usage/Cap | Memory-Usage | GPU-Util Compute M. | | | | MIG M. | |=========================================+======================+======================| | 0 NVIDIA RTX 6000 Ada Gene... On | 00000000:52:00.0 On | Off | | 32% 61C P2 75W / 300W | 7324MiB / 49140MiB | 5% Default | | | | N/A | +-----------------------------------------+----------------------+----------------------+ +---------------------------------------------------------------------------------------+ | Processes: | | GPU GI CI PID Type Process name GPU Memory | | ID ID Usage | |=======================================================================================| | 0 N/A N/A 2317 G /usr/lib/xorg/Xorg 740MiB | | 0 N/A N/A 2519 G /usr/bin/gnome-shell 57MiB | | 0 N/A N/A 2994 G ...AAAAAAAACAAAAAAAAAA= --shared-files 93MiB | | 0 N/A N/A 25264 G ...0208189,17325718055376231948,262144 60MiB | | 0 N/A N/A 652962 G ...irefox/3358/usr/lib/firefox/firefox 266MiB | | 0 N/A N/A 703622 G blender 205MiB | | 0 N/A N/A 829624 G /usr/bin/gnome-control-center 79MiB | | 0 N/A N/A 837524 C python 844MiB | | 0 N/A N/A 842408 G ...sion,SpareRendererForSitePerProcess 106MiB | | 0 N/A N/A 847224 C python 1046MiB | | 0 N/A N/A 855952 C python 984MiB | | 0 N/A N/A 856952 C python 914MiB | | 0 N/A N/A 857675 C python 730MiB | | 0 N/A N/A 1068492 G meshlab 12MiB | | 0 N/A N/A 1118791 C python 1046MiB | +---------------------------------------------------------------------------------------+ ```
{ "url": "https://api.github.com/repos/tensorflow/tensorflow/issues/62503/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/tensorflow/tensorflow/issues/62503/timeline
null
completed
false
https://api.github.com/repos/tensorflow/tensorflow/issues/62502
https://api.github.com/repos/tensorflow/tensorflow
https://api.github.com/repos/tensorflow/tensorflow/issues/62502/labels{/name}
https://api.github.com/repos/tensorflow/tensorflow/issues/62502/comments
https://api.github.com/repos/tensorflow/tensorflow/issues/62502/events
https://github.com/tensorflow/tensorflow/pull/62502
2,016,925,331
PR_kwDOArmXAs5gr_lV
62,502
Register half and bfloat16 support for tf.truncatemod
{ "login": "SuryanarayanaY", "id": 116063290, "node_id": "U_kgDOBur8Og", "avatar_url": "https://avatars.githubusercontent.com/u/116063290?v=4", "gravatar_id": "", "url": "https://api.github.com/users/SuryanarayanaY", "html_url": "https://github.com/SuryanarayanaY", "followers_url": "https://api.github.com/users/SuryanarayanaY/followers", "following_url": "https://api.github.com/users/SuryanarayanaY/following{/other_user}", "gists_url": "https://api.github.com/users/SuryanarayanaY/gists{/gist_id}", "starred_url": "https://api.github.com/users/SuryanarayanaY/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/SuryanarayanaY/subscriptions", "organizations_url": "https://api.github.com/users/SuryanarayanaY/orgs", "repos_url": "https://api.github.com/users/SuryanarayanaY/repos", "events_url": "https://api.github.com/users/SuryanarayanaY/events{/privacy}", "received_events_url": "https://api.github.com/users/SuryanarayanaY/received_events", "type": "User", "site_admin": false }
[ { "id": 474725938, "node_id": "MDU6TGFiZWw0NzQ3MjU5Mzg=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/stale", "name": "stale", "color": "d4c5f9", "default": false, "description": "This label marks the issue/pr stale - to be closed automatically if no activity" }, { "id": 1169364259, "node_id": "MDU6TGFiZWwxMTY5MzY0MjU5", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/size:XS", "name": "size:XS", "color": "adafea", "default": false, "description": "CL Change Size: Extra Small" }, { "id": 1178505529, "node_id": "MDU6TGFiZWwxMTc4NTA1NTI5", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/prtype:bugfix", "name": "prtype:bugfix", "color": "159b2e", "default": false, "description": "PR to fix a bug" }, { "id": 1478826728, "node_id": "MDU6TGFiZWwxNDc4ODI2NzI4", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/comp:core", "name": "comp:core", "color": "024391", "default": false, "description": "issues related to core part of tensorflow" } ]
closed
false
{ "login": "gbaned", "id": 48215717, "node_id": "MDQ6VXNlcjQ4MjE1NzE3", "avatar_url": "https://avatars.githubusercontent.com/u/48215717?v=4", "gravatar_id": "", "url": "https://api.github.com/users/gbaned", "html_url": "https://github.com/gbaned", "followers_url": "https://api.github.com/users/gbaned/followers", "following_url": "https://api.github.com/users/gbaned/following{/other_user}", "gists_url": "https://api.github.com/users/gbaned/gists{/gist_id}", "starred_url": "https://api.github.com/users/gbaned/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/gbaned/subscriptions", "organizations_url": "https://api.github.com/users/gbaned/orgs", "repos_url": "https://api.github.com/users/gbaned/repos", "events_url": "https://api.github.com/users/gbaned/events{/privacy}", "received_events_url": "https://api.github.com/users/gbaned/received_events", "type": "User", "site_admin": false }
[ { "login": "gbaned", "id": 48215717, "node_id": "MDQ6VXNlcjQ4MjE1NzE3", "avatar_url": "https://avatars.githubusercontent.com/u/48215717?v=4", "gravatar_id": "", "url": "https://api.github.com/users/gbaned", "html_url": "https://github.com/gbaned", "followers_url": "https://api.github.com/users/gbaned/followers", "following_url": "https://api.github.com/users/gbaned/following{/other_user}", "gists_url": "https://api.github.com/users/gbaned/gists{/gist_id}", "starred_url": "https://api.github.com/users/gbaned/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/gbaned/subscriptions", "organizations_url": "https://api.github.com/users/gbaned/orgs", "repos_url": "https://api.github.com/users/gbaned/repos", "events_url": "https://api.github.com/users/gbaned/events{/privacy}", "received_events_url": "https://api.github.com/users/gbaned/received_events", "type": "User", "site_admin": false } ]
null
[ "Hi @SuryanarayanaY Can you please check @gaurides's [comments](https://github.com/tensorflow/tensorflow/pull/62502#pullrequestreview-1756354941) ? Thank you!", "Hi @SuryanarayanaY Any update on this PR? Please. Thank you!", "Hi @SuryanarayanaY Any update on this PR? Please. Thank you!", "This PR is stale because it has been open for 14 days with no activity. It will be closed if no further activity occurs. Thank you.", "This PR was closed because it has been inactive for 14 days since being marked as stale. Please reopen if you'd like to work on this further." ]
2023-11-29T16:05:20
2024-02-08T19:59:38
2024-02-07T01:46:34
COLLABORATOR
null
false
{ "url": "https://api.github.com/repos/tensorflow/tensorflow/pulls/62502", "html_url": "https://github.com/tensorflow/tensorflow/pull/62502", "diff_url": "https://github.com/tensorflow/tensorflow/pull/62502.diff", "patch_url": "https://github.com/tensorflow/tensorflow/pull/62502.patch", "merged_at": null }
As per documentation of `tf.truncatemod` this Op should support `{int32, int64, bfloat16, half, float, double}` dtypes. But actually this Op not supporting `half` and `bfloat16` dtypes. As per `math_ops.cc `this op registered for the dtypes below. https://github.com/tensorflow/tensorflow/blob/74866075411bd9444246e16a79429b852e4db31c/tensorflow/core/ops/math_ops.cc#L622-L626. But in kernel registry not found for `half` and `bfloat16` dtypes. Hence adding these types into register hoping `functor::fmod` can handle these dtypes. May Fix #62070
{ "url": "https://api.github.com/repos/tensorflow/tensorflow/issues/62502/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/tensorflow/tensorflow/issues/62502/timeline
null
null
true
https://api.github.com/repos/tensorflow/tensorflow/issues/62501
https://api.github.com/repos/tensorflow/tensorflow
https://api.github.com/repos/tensorflow/tensorflow/issues/62501/labels{/name}
https://api.github.com/repos/tensorflow/tensorflow/issues/62501/comments
https://api.github.com/repos/tensorflow/tensorflow/issues/62501/events
https://github.com/tensorflow/tensorflow/issues/62501
2,016,888,731
I_kwDOArmXAs54N0eb
62,501
Please bring back native Windows CUDA support!
{ "login": "pass-lin", "id": 62837036, "node_id": "MDQ6VXNlcjYyODM3MDM2", "avatar_url": "https://avatars.githubusercontent.com/u/62837036?v=4", "gravatar_id": "", "url": "https://api.github.com/users/pass-lin", "html_url": "https://github.com/pass-lin", "followers_url": "https://api.github.com/users/pass-lin/followers", "following_url": "https://api.github.com/users/pass-lin/following{/other_user}", "gists_url": "https://api.github.com/users/pass-lin/gists{/gist_id}", "starred_url": "https://api.github.com/users/pass-lin/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/pass-lin/subscriptions", "organizations_url": "https://api.github.com/users/pass-lin/orgs", "repos_url": "https://api.github.com/users/pass-lin/repos", "events_url": "https://api.github.com/users/pass-lin/events{/privacy}", "received_events_url": "https://api.github.com/users/pass-lin/received_events", "type": "User", "site_admin": false }
[ { "id": 386191887, "node_id": "MDU6TGFiZWwzODYxOTE4ODc=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/stat:awaiting%20response", "name": "stat:awaiting response", "color": "f4b400", "default": false, "description": "Status - Awaiting response from author" }, { "id": 473173351, "node_id": "MDU6TGFiZWw0NzMxNzMzNTE=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/type:build/install", "name": "type:build/install", "color": "159b2e", "default": false, "description": "Build and install issues" }, { "id": 474725938, "node_id": "MDU6TGFiZWw0NzQ3MjU5Mzg=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/stale", "name": "stale", "color": "d4c5f9", "default": false, "description": "This label marks the issue/pr stale - to be closed automatically if no activity" }, { "id": 1188421838, "node_id": "MDU6TGFiZWwxMTg4NDIxODM4", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/subtype:windows", "name": "subtype:windows", "color": "b619ea", "default": false, "description": "Windows Build/Installation Issues" }, { "id": 6218999181, "node_id": "LA_kwDOArmXAs8AAAABcq5ljQ", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/TF%202.15", "name": "TF 2.15", "color": "9162CB", "default": false, "description": "For issues related to 2.15.x" } ]
closed
false
{ "login": "sachinprasadhs", "id": 73069040, "node_id": "MDQ6VXNlcjczMDY5MDQw", "avatar_url": "https://avatars.githubusercontent.com/u/73069040?v=4", "gravatar_id": "", "url": "https://api.github.com/users/sachinprasadhs", "html_url": "https://github.com/sachinprasadhs", "followers_url": "https://api.github.com/users/sachinprasadhs/followers", "following_url": "https://api.github.com/users/sachinprasadhs/following{/other_user}", "gists_url": "https://api.github.com/users/sachinprasadhs/gists{/gist_id}", "starred_url": "https://api.github.com/users/sachinprasadhs/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/sachinprasadhs/subscriptions", "organizations_url": "https://api.github.com/users/sachinprasadhs/orgs", "repos_url": "https://api.github.com/users/sachinprasadhs/repos", "events_url": "https://api.github.com/users/sachinprasadhs/events{/privacy}", "received_events_url": "https://api.github.com/users/sachinprasadhs/received_events", "type": "User", "site_admin": false }
[ { "login": "sachinprasadhs", "id": 73069040, "node_id": "MDQ6VXNlcjczMDY5MDQw", "avatar_url": "https://avatars.githubusercontent.com/u/73069040?v=4", "gravatar_id": "", "url": "https://api.github.com/users/sachinprasadhs", "html_url": "https://github.com/sachinprasadhs", "followers_url": "https://api.github.com/users/sachinprasadhs/followers", "following_url": "https://api.github.com/users/sachinprasadhs/following{/other_user}", "gists_url": "https://api.github.com/users/sachinprasadhs/gists{/gist_id}", "starred_url": "https://api.github.com/users/sachinprasadhs/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/sachinprasadhs/subscriptions", "organizations_url": "https://api.github.com/users/sachinprasadhs/orgs", "repos_url": "https://api.github.com/users/sachinprasadhs/repos", "events_url": "https://api.github.com/users/sachinprasadhs/events{/privacy}", "received_events_url": "https://api.github.com/users/sachinprasadhs/received_events", "type": "User", "site_admin": false } ]
null
[ "This is a duplicate of #59918", "@pass-lin , Could you please close this issue and track the progress in above linked issue since it has more visibility. Thanks!", "This issue is stale because it has been open for 7 days with no activity. It will be closed if no further activity occurs. Thank you.", "This issue was closed because it has been inactive for 7 days since being marked as stale. Please reopen if you'd like to work on this further.", "Are you satisfied with the resolution of your issue?\n<a href=\"https://docs.google.com/forms/d/e/1FAIpQLSfaP12TRhd9xSxjXZjcZFNXPGk4kc1-qMdv3gc6bEP90vY1ew/viewform?entry.85265664=Yes&entry.2137816233=https://github.com/tensorflow/tensorflow/issues/62501\">Yes</a>\n<a href=\"https://docs.google.com/forms/d/e/1FAIpQLSfaP12TRhd9xSxjXZjcZFNXPGk4kc1-qMdv3gc6bEP90vY1ew/viewform?entry.85265664=No&entry.2137816233=https://github.com/tensorflow/tensorflow/issues/62501\">No</a>\n" ]
2023-11-29T15:46:52
2023-12-16T01:48:25
2023-12-16T01:48:22
NONE
null
null
null
### Issue type Feature Request ### Have you reproduced the bug with TensorFlow Nightly? Yes ### Source source ### TensorFlow version tf2.15 ### Custom code Yes ### OS platform and distribution windows ### Mobile device _No response_ ### Python version 3.10 ### Bazel version _No response_ ### GCC/compiler version _No response_ ### CUDA/cuDNN version _No response_ ### GPU model and memory _No response_ ### Current behavior? i want to use keras3.0 at windows,but it only support tensorflow1.15 that can't use GPU ### Standalone code to reproduce the issue ```shell tf.test ``` ### Relevant log output _No response_
{ "url": "https://api.github.com/repos/tensorflow/tensorflow/issues/62501/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/tensorflow/tensorflow/issues/62501/timeline
null
completed
false
https://api.github.com/repos/tensorflow/tensorflow/issues/62500
https://api.github.com/repos/tensorflow/tensorflow
https://api.github.com/repos/tensorflow/tensorflow/issues/62500/labels{/name}
https://api.github.com/repos/tensorflow/tensorflow/issues/62500/comments
https://api.github.com/repos/tensorflow/tensorflow/issues/62500/events
https://github.com/tensorflow/tensorflow/issues/62500
2,016,609,467
I_kwDOArmXAs54MwS7
62,500
/usr/bin/env: 'python3': No such file or directory
{ "login": "adamjstewart", "id": 12021217, "node_id": "MDQ6VXNlcjEyMDIxMjE3", "avatar_url": "https://avatars.githubusercontent.com/u/12021217?v=4", "gravatar_id": "", "url": "https://api.github.com/users/adamjstewart", "html_url": "https://github.com/adamjstewart", "followers_url": "https://api.github.com/users/adamjstewart/followers", "following_url": "https://api.github.com/users/adamjstewart/following{/other_user}", "gists_url": "https://api.github.com/users/adamjstewart/gists{/gist_id}", "starred_url": "https://api.github.com/users/adamjstewart/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/adamjstewart/subscriptions", "organizations_url": "https://api.github.com/users/adamjstewart/orgs", "repos_url": "https://api.github.com/users/adamjstewart/repos", "events_url": "https://api.github.com/users/adamjstewart/events{/privacy}", "received_events_url": "https://api.github.com/users/adamjstewart/received_events", "type": "User", "site_admin": false }
[ { "id": 404586594, "node_id": "MDU6TGFiZWw0MDQ1ODY1OTQ=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/stat:awaiting%20tensorflower", "name": "stat:awaiting tensorflower", "color": "f4b400", "default": false, "description": "Status - Awaiting response from tensorflower" }, { "id": 473173351, "node_id": "MDU6TGFiZWw0NzMxNzMzNTE=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/type:build/install", "name": "type:build/install", "color": "159b2e", "default": false, "description": "Build and install issues" }, { "id": 1205615612, "node_id": "MDU6TGFiZWwxMjA1NjE1NjEy", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/subtype:%20ubuntu/linux", "name": "subtype: ubuntu/linux", "color": "b619ea", "default": false, "description": "Ubuntu/Linux Build/Installation Issues" }, { "id": 6218999181, "node_id": "LA_kwDOArmXAs8AAAABcq5ljQ", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/TF%202.15", "name": "TF 2.15", "color": "9162CB", "default": false, "description": "For issues related to 2.15.x" } ]
closed
false
{ "login": "sachinprasadhs", "id": 73069040, "node_id": "MDQ6VXNlcjczMDY5MDQw", "avatar_url": "https://avatars.githubusercontent.com/u/73069040?v=4", "gravatar_id": "", "url": "https://api.github.com/users/sachinprasadhs", "html_url": "https://github.com/sachinprasadhs", "followers_url": "https://api.github.com/users/sachinprasadhs/followers", "following_url": "https://api.github.com/users/sachinprasadhs/following{/other_user}", "gists_url": "https://api.github.com/users/sachinprasadhs/gists{/gist_id}", "starred_url": "https://api.github.com/users/sachinprasadhs/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/sachinprasadhs/subscriptions", "organizations_url": "https://api.github.com/users/sachinprasadhs/orgs", "repos_url": "https://api.github.com/users/sachinprasadhs/repos", "events_url": "https://api.github.com/users/sachinprasadhs/events{/privacy}", "received_events_url": "https://api.github.com/users/sachinprasadhs/received_events", "type": "User", "site_admin": false }
[ { "login": "sachinprasadhs", "id": 73069040, "node_id": "MDQ6VXNlcjczMDY5MDQw", "avatar_url": "https://avatars.githubusercontent.com/u/73069040?v=4", "gravatar_id": "", "url": "https://api.github.com/users/sachinprasadhs", "html_url": "https://github.com/sachinprasadhs", "followers_url": "https://api.github.com/users/sachinprasadhs/followers", "following_url": "https://api.github.com/users/sachinprasadhs/following{/other_user}", "gists_url": "https://api.github.com/users/sachinprasadhs/gists{/gist_id}", "starred_url": "https://api.github.com/users/sachinprasadhs/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/sachinprasadhs/subscriptions", "organizations_url": "https://api.github.com/users/sachinprasadhs/orgs", "repos_url": "https://api.github.com/users/sachinprasadhs/repos", "events_url": "https://api.github.com/users/sachinprasadhs/events{/privacy}", "received_events_url": "https://api.github.com/users/sachinprasadhs/received_events", "type": "User", "site_admin": false } ]
null
[ "Hello, thank you for having the time to submitting this issue. One possible way to solve this problem is by installing \"python-is-python3\" in Ubuntu by running the code `sudo apt update; sudo apt install python-is-python3`. This solved the issue for me. However, even if it worked for me, I can't assure it will work for you as well. I'm just providing you with educational information, I take no responsability. Please, remember that changing the default Python version may have unintended consequences for your system.\r\n\r\nDo research on your own before taking action on anything. Here are some sources you can check out that helped me solve this problem myself: [1](https://askubuntu.com/questions/1296790/python-is-python3-package-in-ubuntu-20-04-what-is-it-and-what-does-it-actually); [2](https://linuxpip.org/python-is-python3/); [3](https://askubuntu.com/questions/1440678/purpose-of-python-is-python3).\r\n\r\nI hope this serves you well, I wish you a wonderful day!\r\n", "I don't have sudo privileges on this system so unfortunately that's not an option for me. ", "I see. Since you don't have sudo privileges on the system, I think a valid alternative would be to create a virtual environment using the following commands in a terminal: `python3 -m venv venv source venv/bin/activate`. This will create a virtual environment called venv and activate it. You can then install any packages you need using pip, such as boto or openvpn. You can also run python scripts that use `/usr/bin/env` python without errors, as the virtual environment will use the python version in `venv/bin/python`.\r\n\r\nHave you already tried this?", "For context, I'm maintaining the build recipe for TF for the [Spack](https://spack.io) package manager. We want to build from source so we can optimize the build for the exact CPU microarchitecture and GPU model on our supercomputers. So using pip to install pre-compiled wheels is also not an option.", "@dante-tech the issue is a problem with Tensorflow, it's not a system issue.\r\n\r\nTensorflow unsets PATH before executing a script with shebang `#!/usr/bin/env python3`\r\n\r\nThan means it requires a system dependency `/usr/bin/python3`.\r\n\r\nThat's should never be assumed to exist.\r\n\r\nAt the same time the build system allows passing `PYTHON_BIN_PATH`, which is ignored.", "@haampie, I see, thank you to have taken the time to clarify the problem. I will look more into that, and, hopefully, find a solution.", "@dante-tech it looks like you are GPT4, correct?\r\n\r\nCan an actual Tensorflow developer look at this issue?", "@haampie GPT4? Ahahahahh, I'm just trying to be kind and helpful.", "Duplicate of #62497", "Closing as a duplicate", "Are you satisfied with the resolution of your issue?\n<a href=\"https://docs.google.com/forms/d/e/1FAIpQLSfaP12TRhd9xSxjXZjcZFNXPGk4kc1-qMdv3gc6bEP90vY1ew/viewform?entry.85265664=Yes&entry.2137816233=https://github.com/tensorflow/tensorflow/issues/62500\">Yes</a>\n<a href=\"https://docs.google.com/forms/d/e/1FAIpQLSfaP12TRhd9xSxjXZjcZFNXPGk4kc1-qMdv3gc6bEP90vY1ew/viewform?entry.85265664=No&entry.2137816233=https://github.com/tensorflow/tensorflow/issues/62500\">No</a>\n" ]
2023-11-29T13:34:48
2024-01-06T16:25:42
2024-01-06T16:25:39
NONE
null
null
null
### Issue type Build/Install ### Have you reproduced the bug with TensorFlow Nightly? No ### Source source ### TensorFlow version 2.15.0 ### Custom code No ### OS platform and distribution Linux Ubuntu 22.04 ### Mobile device _No response_ ### Python version 3.11.6 ### Bazel version 6.1.0 ### GCC/compiler version 11.3.0 ### CUDA/cuDNN version N/A ### GPU model and memory N/A ### Current behavior? Towards the end of the build process it crashes with: ``` [14,337 / 14,358] Compiling tensorflow/python/framework/proto_comparators.cc; 7s local ... (12 actions, 11 running) ERROR: /tmp/root/spack-stage/spack-stage-py-tensorflow-2.15.0-xg7ygjwh7z2ux4a4ozaiwpqnrdu33g2j/spack-src/tensorflow/python/distribute/BUILD:214:18: Extracting tensorflow APIs for //tensorflow/python/distribute:distribute_config to bazel-out/k8-opt/bin/tensorflow/python/distribute/distribute_config_extracted_tensorflow_api.json. failed: (Exit 127): main failed: error executing command (from target //tensorflow/python/distribute:distribute_config) (cd /tmp/spack7c01y2cw/7ef5423ae2652e6ab0e0367d007b4ffb/execroot/org_tensorflow && \ exec env - \ bazel-out/k8-opt-exec-50AE0418/bin/tensorflow/python/tools/api/generator2/extractor/main --output bazel-out/k8-opt/bin/tensorflow/python/distribute/distribute_config_extracted_tensorflow_api.json --decorator tensorflow.python.util.tf_export.tf_export --api_name tensorflow tensorflow/python/distribute/distribute_config.py) # Configuration: 758a0b8a2b418890cf5bce389d31d024017db7b474e9f8dfd2813d4ddd8fb235 # Execution platform: @local_execution_config_platform//:platform /usr/bin/env: 'python3': No such file or directory Target //tensorflow/tools/pip_package:build_pip_package failed to build ERROR: /tmp/root/spack-stage/spack-stage-py-tensorflow-2.15.0-xg7ygjwh7z2ux4a4ozaiwpqnrdu33g2j/spack-src/tensorflow/lite/python/BUILD:72:17 Middleman _middlemen/tensorflow_Slite_Spython_Stflite_Uconvert-runfiles failed: (Exit 127): main failed: error executing command (from target //tensorflow/python/distribute:distribute_config) (cd /tmp/spack7c01y2cw/7ef5423ae2652e6ab0e0367d007b4ffb/execroot/org_tensorflow && \ exec env - \ bazel-out/k8-opt-exec-50AE0418/bin/tensorflow/python/tools/api/generator2/extractor/main --output bazel-out/k8-opt/bin/tensorflow/python/distribute/distribute_config_extracted_tensorflow_api.json --decorator tensorflow.python.util.tf_export.tf_export --api_name tensorflow tensorflow/python/distribute/distribute_config.py) # Configuration: 758a0b8a2b418890cf5bce389d31d024017db7b474e9f8dfd2813d4ddd8fb235 # Execution platform: @local_execution_config_platform//:platform INFO: Elapsed time: 6724.423s, Critical Path: 384.31s INFO: 14355 processes: 1147 internal, 13208 local. FAILED: Build did NOT complete successfully ``` The problem is that Ubuntu 22.04 does not have a `/usr/bin/python3`, only `/usr/bin/python`. I have `python3` installed and in my `PATH`, but the use of `env -` ignores the `PATH` environment variable. I don't see where `env -` is defined so I can't comment it out or replace it. ### Standalone code to reproduce the issue Build TensorFlow 2.15.0 on Ubuntu 22.04 when `/usr/bin/python3` is not installed but `python3` is in `PATH`. ### Relevant log output * [build log](https://github.com/tensorflow/tensorflow/files/13501082/spack-build-out.txt) * [build env](https://github.com/tensorflow/tensorflow/files/13501081/spack-build-env-mods.txt)
{ "url": "https://api.github.com/repos/tensorflow/tensorflow/issues/62500/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/tensorflow/tensorflow/issues/62500/timeline
null
completed
false
https://api.github.com/repos/tensorflow/tensorflow/issues/62499
https://api.github.com/repos/tensorflow/tensorflow
https://api.github.com/repos/tensorflow/tensorflow/issues/62499/labels{/name}
https://api.github.com/repos/tensorflow/tensorflow/issues/62499/comments
https://api.github.com/repos/tensorflow/tensorflow/issues/62499/events
https://github.com/tensorflow/tensorflow/issues/62499
2,016,127,375
I_kwDOArmXAs54K6mP
62,499
PadV2 constant_values tensor not quantized using 16x8 quantization mode
{ "login": "riestmo-nxp", "id": 149694071, "node_id": "U_kgDOCOwmdw", "avatar_url": "https://avatars.githubusercontent.com/u/149694071?v=4", "gravatar_id": "", "url": "https://api.github.com/users/riestmo-nxp", "html_url": "https://github.com/riestmo-nxp", "followers_url": "https://api.github.com/users/riestmo-nxp/followers", "following_url": "https://api.github.com/users/riestmo-nxp/following{/other_user}", "gists_url": "https://api.github.com/users/riestmo-nxp/gists{/gist_id}", "starred_url": "https://api.github.com/users/riestmo-nxp/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/riestmo-nxp/subscriptions", "organizations_url": "https://api.github.com/users/riestmo-nxp/orgs", "repos_url": "https://api.github.com/users/riestmo-nxp/repos", "events_url": "https://api.github.com/users/riestmo-nxp/events{/privacy}", "received_events_url": "https://api.github.com/users/riestmo-nxp/received_events", "type": "User", "site_admin": false }
[ { "id": 404586594, "node_id": "MDU6TGFiZWw0MDQ1ODY1OTQ=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/stat:awaiting%20tensorflower", "name": "stat:awaiting tensorflower", "color": "f4b400", "default": false, "description": "Status - Awaiting response from tensorflower" }, { "id": 750616506, "node_id": "MDU6TGFiZWw3NTA2MTY1MDY=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/comp:lite", "name": "comp:lite", "color": "0052cc", "default": false, "description": "TF Lite related issues" }, { "id": 1661751498, "node_id": "MDU6TGFiZWwxNjYxNzUxNDk4", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/TFLiteConverter", "name": "TFLiteConverter", "color": "bfdadc", "default": false, "description": "For issues related to TFLite converter" }, { "id": 2671351731, "node_id": "MDU6TGFiZWwyNjcxMzUxNzMx", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/ModelOptimizationToolkit", "name": "ModelOptimizationToolkit", "color": "BFD629", "default": false, "description": "TF Model Optimization Toolkit" }, { "id": 6218999181, "node_id": "LA_kwDOArmXAs8AAAABcq5ljQ", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/TF%202.15", "name": "TF 2.15", "color": "9162CB", "default": false, "description": "For issues related to 2.15.x" } ]
open
false
{ "login": "abattery", "id": 3203059, "node_id": "MDQ6VXNlcjMyMDMwNTk=", "avatar_url": "https://avatars.githubusercontent.com/u/3203059?v=4", "gravatar_id": "", "url": "https://api.github.com/users/abattery", "html_url": "https://github.com/abattery", "followers_url": "https://api.github.com/users/abattery/followers", "following_url": "https://api.github.com/users/abattery/following{/other_user}", "gists_url": "https://api.github.com/users/abattery/gists{/gist_id}", "starred_url": "https://api.github.com/users/abattery/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/abattery/subscriptions", "organizations_url": "https://api.github.com/users/abattery/orgs", "repos_url": "https://api.github.com/users/abattery/repos", "events_url": "https://api.github.com/users/abattery/events{/privacy}", "received_events_url": "https://api.github.com/users/abattery/received_events", "type": "User", "site_admin": false }
[ { "login": "abattery", "id": 3203059, "node_id": "MDQ6VXNlcjMyMDMwNTk=", "avatar_url": "https://avatars.githubusercontent.com/u/3203059?v=4", "gravatar_id": "", "url": "https://api.github.com/users/abattery", "html_url": "https://github.com/abattery", "followers_url": "https://api.github.com/users/abattery/followers", "following_url": "https://api.github.com/users/abattery/following{/other_user}", "gists_url": "https://api.github.com/users/abattery/gists{/gist_id}", "starred_url": "https://api.github.com/users/abattery/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/abattery/subscriptions", "organizations_url": "https://api.github.com/users/abattery/orgs", "repos_url": "https://api.github.com/users/abattery/repos", "events_url": "https://api.github.com/users/abattery/events{/privacy}", "received_events_url": "https://api.github.com/users/abattery/received_events", "type": "User", "site_admin": false }, { "login": "pkgoogle", "id": 132095473, "node_id": "U_kgDOB9-d8Q", "avatar_url": "https://avatars.githubusercontent.com/u/132095473?v=4", "gravatar_id": "", "url": "https://api.github.com/users/pkgoogle", "html_url": "https://github.com/pkgoogle", "followers_url": "https://api.github.com/users/pkgoogle/followers", "following_url": "https://api.github.com/users/pkgoogle/following{/other_user}", "gists_url": "https://api.github.com/users/pkgoogle/gists{/gist_id}", "starred_url": "https://api.github.com/users/pkgoogle/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/pkgoogle/subscriptions", "organizations_url": "https://api.github.com/users/pkgoogle/orgs", "repos_url": "https://api.github.com/users/pkgoogle/repos", "events_url": "https://api.github.com/users/pkgoogle/events{/privacy}", "received_events_url": "https://api.github.com/users/pkgoogle/received_events", "type": "User", "site_admin": false } ]
null
[ "Hi @riestmo-nxp ,\r\n\r\nThe error for the model_int16.tflite has been replicated. \r\n![Screenshot 2023-12-02 11 39 41 AM](https://github.com/tensorflow/tensorflow/assets/149650845/2caec509-a657-4cb5-89df-4a99e5c695b0) It might be incompatibilities between input and padding data types. Please check the incompatibilities and let us know if the issue still persists.\r\n\r\n\r\nThank You\r\n\r\n\r\n\r\n\r\n\r\n", "Hi @LakshmiKalaKadali,\r\nthanks for reproducing the issue. I think the incompatibilities between the input (int16) and the padding data (float) must be introduced by the TFLiteConverter. As previously mentioned, the TFLiteConverter quantizes both, the input and the constant_values tensor, to int8 in int8 quantization mode. However, it only quantizes the input to int16 and keeps the constant_values tensor in float32 during quantization in int16x8 quantization mode. The expected behavior would be that both, the input and the constant_values tensor, get quantized to int16 in int16x8 quantization mode. Therefore I am not sure how I could further investigate the incompatibilities. Any hints from your side? Thanks!", "Hi @riestmo-nxp ,\r\n\r\n1. Your model_int16.tflite file is quantizing PadV2 to int16 and it is working as expected. We have to observe the quantized value at the \"output of PadV2 node\". Please find the screenshot. \r\n![image](https://github.com/tensorflow/tensorflow/assets/149650845/fc4a5844-b6d4-4f74-85e5-da8497d16d2f). \r\n2. Regarding incompatibility between input and output data types at Padv2(Inputs: Constant values- flaot32) node is due to setting ```constant_values = -1``` in your code. The model is also working as expected for ```constant_values = 0`` \r\n\r\nThank You\r\n", "Hi @LakshmiKalaKadali,\r\n\r\n1. I agree that the input (Input 1) and the output of the PadV2 operator is quantized to int16 as expected. However, the issue during runtime is caused between the different data types of Input 1 and Input 3 (constant_values).\r\n\r\n1. When setting \"constant_values = 0\", the resulting operator in the tflite model is the Pad operator and not the PadV2 operator. Therefore this does not really help to solve this issue. I would still like to be able to set any other value than 0 as constant_values and get a running tflite model with int16 inputs and outputs. Do you see any solution for this?", "@pkgoogle, \r\n\r\nPlease look into the issue.\r\n\r\nThank You", "Hi @abattery, can you please take a look? Thanks.", "Hi @riestmo-nxp , if you are able to access a linux system you may be able to resolve your issue by using [AI-Edge-Torch](https://github.com/google-ai-edge/ai-edge-torch), you can find more information here: [googleblog](https://developers.googleblog.com/en/ai-edge-torch-high-performance-inference-of-pytorch-models-on-mobile-devices/).\r\n\r\nI have actually created a simple script for converting your model here, also for further quantization details, please follow [this](https://github.com/google-ai-edge/ai-edge-torch/blob/main/docs/pytorch_converter/README.md#quantization)\r\n\r\n\r\n```\r\nimport torch\r\nimport torch.nn as nn\r\nimport torch.nn.functional as F\r\nimport ai_edge_torch\r\n\r\nclass PadLayer(nn.Module):\r\n def __init__(self):\r\n super(PadLayer, self).__init__()\r\n self.paddings = (0, 0, 1, 1) # Padding for the second and third dimensions\r\n\r\n def forward(self, x):\r\n return F.pad(x, pad=self.paddings, mode=\"constant\", value=-1)\r\n\r\nclass SimpleModel(nn.Module):\r\n def __init__(self):\r\n super(SimpleModel, self).__init__()\r\n self.conv = nn.Conv2d(in_channels=1, out_channels=16, kernel_size=(1, 7), padding='same')\r\n self.pad = PadLayer()\r\n self.mpool = nn.MaxPool2d(kernel_size=(1, 3), stride=(1, 2), padding=0)\r\n\r\n def forward(self, x):\r\n x = self.conv(x)\r\n x = self.pad(x)\r\n x = self.mpool(x)\r\n return x\r\n\r\nmodel = SimpleModel()\r\n\r\nexample_input = torch.randn(1, 1, 32, 25)\r\n\r\nedge_model = ai_edge_torch.convert(model.eval(), (example_input,))\r\n\r\n# Export the model to TFLite format\r\nedge_model.export('simple_model_with_pad.tflite')\r\n```\r\nIf you want to, you can actually try visualizing the result in [model-explorer](https://github.com/google-ai-edge/model-explorer) as well.\r\n\r\nPlease try them out and let us know if this resolves your issue. If you still need further help, feel free to open a new issue at the respective repo.\r\n" ]
2023-11-29T09:05:45
2024-06-12T12:17:42
null
NONE
null
null
null
### 1. System information - OS Platform and Distribution: Ubuntu 22.04.3 - TensorFlow installation: pip package - TensorFlow library: 2.15.0 ### 2. Code Please see the attached PadV2Issue.ipynb notebook for reproducing the issue. It creates a dummy model that contains a pad operator with constant -1. This translates into a PadV2 operator in the tflite model. [PadV2Issue.zip](https://github.com/tensorflow/tensorflow/files/13498177/PadV2Issue.zip) ### 3. Failure after conversion In the generated model_int8.tflite, the _constant_values_ tensor is correctly quantized to int8. However, in model_int16.tflite the _constant_values_ tensor is not quantized at all and remains a float32 tensor after conversion. ![PadV2Issue](https://github.com/tensorflow/tensorflow/assets/149694071/343dfd3a-1e67-470c-945a-95ad976b1a4d) Eventually, this causes a runtime error during inference. The expected behavior of the converter for the 16x8 quantization mode would either be to quantize the _constant_values_ tensor to int16 or, if it is intentionally not supported, to throw an error already during conversion. Does anybody know what can be done to fix this issue?
{ "url": "https://api.github.com/repos/tensorflow/tensorflow/issues/62499/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/tensorflow/tensorflow/issues/62499/timeline
null
null
false
https://api.github.com/repos/tensorflow/tensorflow/issues/62498
https://api.github.com/repos/tensorflow/tensorflow
https://api.github.com/repos/tensorflow/tensorflow/issues/62498/labels{/name}
https://api.github.com/repos/tensorflow/tensorflow/issues/62498/comments
https://api.github.com/repos/tensorflow/tensorflow/issues/62498/events
https://github.com/tensorflow/tensorflow/issues/62498
2,015,226,082
I_kwDOArmXAs54Heji
62,498
GPU installation docs woefully broken
{ "login": "FlorinAndrei", "id": 901867, "node_id": "MDQ6VXNlcjkwMTg2Nw==", "avatar_url": "https://avatars.githubusercontent.com/u/901867?v=4", "gravatar_id": "", "url": "https://api.github.com/users/FlorinAndrei", "html_url": "https://github.com/FlorinAndrei", "followers_url": "https://api.github.com/users/FlorinAndrei/followers", "following_url": "https://api.github.com/users/FlorinAndrei/following{/other_user}", "gists_url": "https://api.github.com/users/FlorinAndrei/gists{/gist_id}", "starred_url": "https://api.github.com/users/FlorinAndrei/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/FlorinAndrei/subscriptions", "organizations_url": "https://api.github.com/users/FlorinAndrei/orgs", "repos_url": "https://api.github.com/users/FlorinAndrei/repos", "events_url": "https://api.github.com/users/FlorinAndrei/events{/privacy}", "received_events_url": "https://api.github.com/users/FlorinAndrei/received_events", "type": "User", "site_admin": false }
[ { "id": 284443156, "node_id": "MDU6TGFiZWwyODQ0NDMxNTY=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/type:docs-bug", "name": "type:docs-bug", "color": "159b2e", "default": false, "description": "Document issues" }, { "id": 386191887, "node_id": "MDU6TGFiZWwzODYxOTE4ODc=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/stat:awaiting%20response", "name": "stat:awaiting response", "color": "f4b400", "default": false, "description": "Status - Awaiting response from author" }, { "id": 473172988, "node_id": "MDU6TGFiZWw0NzMxNzI5ODg=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/type:bug", "name": "type:bug", "color": "159b2e", "default": false, "description": "Bug" }, { "id": 474725938, "node_id": "MDU6TGFiZWw0NzQ3MjU5Mzg=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/stale", "name": "stale", "color": "d4c5f9", "default": false, "description": "This label marks the issue/pr stale - to be closed automatically if no activity" } ]
closed
false
{ "login": "sachinprasadhs", "id": 73069040, "node_id": "MDQ6VXNlcjczMDY5MDQw", "avatar_url": "https://avatars.githubusercontent.com/u/73069040?v=4", "gravatar_id": "", "url": "https://api.github.com/users/sachinprasadhs", "html_url": "https://github.com/sachinprasadhs", "followers_url": "https://api.github.com/users/sachinprasadhs/followers", "following_url": "https://api.github.com/users/sachinprasadhs/following{/other_user}", "gists_url": "https://api.github.com/users/sachinprasadhs/gists{/gist_id}", "starred_url": "https://api.github.com/users/sachinprasadhs/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/sachinprasadhs/subscriptions", "organizations_url": "https://api.github.com/users/sachinprasadhs/orgs", "repos_url": "https://api.github.com/users/sachinprasadhs/repos", "events_url": "https://api.github.com/users/sachinprasadhs/events{/privacy}", "received_events_url": "https://api.github.com/users/sachinprasadhs/received_events", "type": "User", "site_admin": false }
[ { "login": "sachinprasadhs", "id": 73069040, "node_id": "MDQ6VXNlcjczMDY5MDQw", "avatar_url": "https://avatars.githubusercontent.com/u/73069040?v=4", "gravatar_id": "", "url": "https://api.github.com/users/sachinprasadhs", "html_url": "https://github.com/sachinprasadhs", "followers_url": "https://api.github.com/users/sachinprasadhs/followers", "following_url": "https://api.github.com/users/sachinprasadhs/following{/other_user}", "gists_url": "https://api.github.com/users/sachinprasadhs/gists{/gist_id}", "starred_url": "https://api.github.com/users/sachinprasadhs/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/sachinprasadhs/subscriptions", "organizations_url": "https://api.github.com/users/sachinprasadhs/orgs", "repos_url": "https://api.github.com/users/sachinprasadhs/repos", "events_url": "https://api.github.com/users/sachinprasadhs/events{/privacy}", "received_events_url": "https://api.github.com/users/sachinprasadhs/received_events", "type": "User", "site_admin": false } ]
null
[ "What eventually worked for me was installing JAX (which I need anyway) with the 11.8 CUDA...\r\n\r\n```\r\npip install --upgrade --user \"jax[cuda11_pip]\" -f https://storage.googleapis.com/jax-releases/jax_cuda_releases.html\r\n```\r\n\r\n...which actually succeeds in installing the NVIDIA CUDA python modules, and then:\r\n\r\n```\r\npip install --upgrade --user tensorflow=2.14.1\r\n```\r\n\r\n...gave me a working Tensorflow install that can actually use the GPU.\r\n\r\nBut the docs are clearly wrong. If Tensorflow does not install the Python modules with CUDA, then what are the requirements for a separate CUDA install?\r\n\r\nBasically, it looks like nobody checked if the docs still work for someone installing TF from scratch.\r\n\r\nEDIT: As a side-note, it looks like you can also install PyTorch for the 11.8 CUDA version at the end. Then Tensorflow, JAX, and PyTorch all work in the same env with the same CUDA version, which is nice.", "Hello, @FlorinAndrei! \r\nPlease have a look at [this](https://www.tensorflow.org/install/source#gpu) tested build configuration to check the compatible versions of gpu support.\r\nThank you! ", "On the same distribution (Ubuntu 22.04.3), on a PC with nvidia driver 535.129.03 (thus cuda 12.2 support), doing `pip3 install tensorflow[and-cuda]==2.15.0` \r\nI get:\r\n```\r\nCollecting tensorflow==2.15.0 (from tensorflow[and-cuda]==2.15.0)\r\n Downloading tensorflow-2.15.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl.metadata (4.4 kB)\r\nRequirement already satisfied: absl-py>=1.0.0 in /usr/local/lib/python3.10/dist-packages (from tensorflow==2.15.0->tensorflow[and-cuda]==2.15.0) (1.3.0)\r\nRequirement already satisfied: astunparse>=1.6.0 in /usr/local/lib/python3.10/dist-packages (from tensorflow==2.15.0->tensorflow[and-cuda]==2.15.0) (1.6.3)\r\nCollecting flatbuffers>=23.5.26 (from tensorflow==2.15.0->tensorflow[and-cuda]==2.15.0)\r\n Downloading flatbuffers-23.5.26-py2.py3-none-any.whl.metadata (850 bytes)\r\nRequirement already satisfied: gast!=0.5.0,!=0.5.1,!=0.5.2,>=0.2.1 in /usr/local/lib/python3.10/dist-packages (from tensorflow==2.15.0->tensorflow[and-cuda]==2.15.0) (0.4.0)\r\nRequirement already satisfied: google-pasta>=0.1.1 in /usr/local/lib/python3.10/dist-packages (from tensorflow==2.15.0->tensorflow[and-cuda]==2.15.0) (0.2.0)\r\nRequirement already satisfied: h5py>=2.9.0 in /usr/local/lib/python3.10/dist-packages (from tensorflow==2.15.0->tensorflow[and-cuda]==2.15.0) (3.7.0)\r\nRequirement already satisfied: libclang>=13.0.0 in /usr/local/lib/python3.10/dist-packages (from tensorflow==2.15.0->tensorflow[and-cuda]==2.15.0) (14.0.6)\r\nCollecting ml-dtypes~=0.2.0 (from tensorflow==2.15.0->tensorflow[and-cuda]==2.15.0)\r\n Downloading ml_dtypes-0.2.0-cp310-cp310-manylinux_2_17_x86_64.manylinux2014_x86_64.whl.metadata (20 kB)\r\nRequirement already satisfied: numpy<2.0.0,>=1.23.5 in /usr/local/lib/python3.10/dist-packages (from tensorflow==2.15.0->tensorflow[and-cuda]==2.15.0) (1.23.5)\r\nRequirement already satisfied: opt-einsum>=2.3.2 in /usr/local/lib/python3.10/dist-packages (from tensorflow==2.15.0->tensorflow[and-cuda]==2.15.0) (3.3.0)\r\nRequirement already satisfied: packaging in /usr/local/lib/python3.10/dist-packages (from tensorflow==2.15.0->tensorflow[and-cuda]==2.15.0) (21.3)\r\nCollecting protobuf!=4.21.0,!=4.21.1,!=4.21.2,!=4.21.3,!=4.21.4,!=4.21.5,<5.0.0dev,>=3.20.3 (from tensorflow==2.15.0->tensorflow[and-cuda]==2.15.0)\r\n Downloading protobuf-4.25.1-cp37-abi3-manylinux2014_x86_64.whl.metadata (541 bytes)\r\nRequirement already satisfied: setuptools in /usr/lib/python3/dist-packages (from tensorflow==2.15.0->tensorflow[and-cuda]==2.15.0) (59.6.0)\r\nRequirement already satisfied: six>=1.12.0 in /usr/lib/python3/dist-packages (from tensorflow==2.15.0->tensorflow[and-cuda]==2.15.0) (1.16.0)\r\nRequirement already satisfied: termcolor>=1.1.0 in /usr/local/lib/python3.10/dist-packages (from tensorflow==2.15.0->tensorflow[and-cuda]==2.15.0) (2.1.1)\r\nRequirement already satisfied: typing-extensions>=3.6.6 in /usr/local/lib/python3.10/dist-packages (from tensorflow==2.15.0->tensorflow[and-cuda]==2.15.0) (4.4.0)\r\nRequirement already satisfied: wrapt<1.15,>=1.11.0 in /usr/local/lib/python3.10/dist-packages (from tensorflow==2.15.0->tensorflow[and-cuda]==2.15.0) (1.14.1)\r\nRequirement already satisfied: tensorflow-io-gcs-filesystem>=0.23.1 in /usr/local/lib/python3.10/dist-packages (from tensorflow==2.15.0->tensorflow[and-cuda]==2.15.0) (0.28.0)\r\nRequirement already satisfied: grpcio<2.0,>=1.24.3 in /usr/local/lib/python3.10/dist-packages (from tensorflow==2.15.0->tensorflow[and-cuda]==2.15.0) (1.50.0)\r\nCollecting tensorboard<2.16,>=2.15 (from tensorflow==2.15.0->tensorflow[and-cuda]==2.15.0)\r\n Downloading tensorboard-2.15.1-py3-none-any.whl.metadata (1.7 kB)\r\nCollecting tensorflow-estimator<2.16,>=2.15.0 (from tensorflow==2.15.0->tensorflow[and-cuda]==2.15.0)\r\n Downloading tensorflow_estimator-2.15.0-py2.py3-none-any.whl.metadata (1.3 kB)\r\nCollecting keras<2.16,>=2.15.0 (from tensorflow==2.15.0->tensorflow[and-cuda]==2.15.0)\r\n Downloading keras-2.15.0-py3-none-any.whl.metadata (2.4 kB)\r\nCollecting nvidia-cublas-cu12==12.2.5.6 (from tensorflow[and-cuda]==2.15.0)\r\n Downloading nvidia_cublas_cu12-12.2.5.6-py3-none-manylinux1_x86_64.whl.metadata (1.5 kB)\r\nCollecting nvidia-cuda-cupti-cu12==12.2.142 (from tensorflow[and-cuda]==2.15.0)\r\n Downloading nvidia_cuda_cupti_cu12-12.2.142-py3-none-manylinux1_x86_64.whl.metadata (1.6 kB)\r\nCollecting nvidia-cuda-nvcc-cu12==12.2.140 (from tensorflow[and-cuda]==2.15.0)\r\n Downloading nvidia_cuda_nvcc_cu12-12.2.140-py3-none-manylinux1_x86_64.whl.metadata (1.5 kB)\r\nCollecting nvidia-cuda-nvrtc-cu12==12.2.140 (from tensorflow[and-cuda]==2.15.0)\r\n Downloading nvidia_cuda_nvrtc_cu12-12.2.140-py3-none-manylinux1_x86_64.whl.metadata (1.5 kB)\r\nCollecting nvidia-cuda-runtime-cu12==12.2.140 (from tensorflow[and-cuda]==2.15.0)\r\n Downloading nvidia_cuda_runtime_cu12-12.2.140-py3-none-manylinux1_x86_64.whl.metadata (1.5 kB)\r\nCollecting nvidia-cudnn-cu12==8.9.4.25 (from tensorflow[and-cuda]==2.15.0)\r\n Downloading nvidia_cudnn_cu12-8.9.4.25-py3-none-manylinux1_x86_64.whl.metadata (1.6 kB)\r\nCollecting nvidia-cufft-cu12==11.0.8.103 (from tensorflow[and-cuda]==2.15.0)\r\n Downloading nvidia_cufft_cu12-11.0.8.103-py3-none-manylinux1_x86_64.whl.metadata (1.5 kB)\r\nCollecting nvidia-curand-cu12==10.3.3.141 (from tensorflow[and-cuda]==2.15.0)\r\n Downloading nvidia_curand_cu12-10.3.3.141-py3-none-manylinux1_x86_64.whl.metadata (1.5 kB)\r\nCollecting nvidia-cusolver-cu12==11.5.2.141 (from tensorflow[and-cuda]==2.15.0)\r\n Downloading nvidia_cusolver_cu12-11.5.2.141-py3-none-manylinux1_x86_64.whl.metadata (1.6 kB)\r\nCollecting nvidia-cusparse-cu12==12.1.2.141 (from tensorflow[and-cuda]==2.15.0)\r\n Downloading nvidia_cusparse_cu12-12.1.2.141-py3-none-manylinux1_x86_64.whl.metadata (1.6 kB)\r\nCollecting nvidia-nccl-cu12==2.16.5 (from tensorflow[and-cuda]==2.15.0)\r\n Downloading nvidia_nccl_cu12-2.16.5-py3-none-manylinux1_x86_64.whl (188.7 MB)\r\n ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 188.7/188.7 MB 7.3 MB/s eta 0:00:00\r\nCollecting nvidia-nvjitlink-cu12==12.2.140 (from tensorflow[and-cuda]==2.15.0)\r\n Downloading nvidia_nvjitlink_cu12-12.2.140-py3-none-manylinux1_x86_64.whl.metadata (1.5 kB)\r\nCollecting tensorrt==8.6.1.post1 (from tensorflow[and-cuda]==2.15.0)\r\n Downloading tensorrt-8.6.1.post1.tar.gz (18 kB)\r\n Preparing metadata (setup.py) ... done\r\nCollecting tensorrt-bindings==8.6.1 (from tensorflow[and-cuda]==2.15.0)\r\n Downloading tensorrt_bindings-8.6.1-cp310-none-manylinux_2_17_x86_64.whl (979 kB)\r\n ━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━ 979.4/979.4 kB 6.9 MB/s eta 0:00:00\r\nINFO: pip is looking at multiple versions of tensorflow[and-cuda] to determine which version is compatible with other requirements. This could take a while.\r\nERROR: Could not find a version that satisfies the requirement tensorrt-libs==8.6.1; extra == \"and-cuda\" (from tensorflow[and-cuda]) (from versions: 9.0.0.post11.dev1, 9.0.0.post12.dev1, 9.0.1.post11.dev4, 9.0.1.post12.dev4, 9.1.0.post11.dev4, 9.1.0.post12.dev4, 9.2.0.post11.dev5, 9.2.0.post12.dev5)\r\nERROR: No matching distribution found for tensorrt-libs==8.6.1; extra == \"and-cuda\"\r\n```\r\n\r\nWhen I do `pip3 install tensorrt-libs==`, I get only the available versions: `9.0.0.post11.dev1, 9.0.0.post12.dev1, 9.0.1.post11.dev4, 9.0.1.post12.dev4, 9.1.0.post11.dev4, 9.1.0.post12.dev4, 9.2.0.post11.dev5, 9.2.0.post12.dev5`\r\n\r\n\r\n", "Why is that information buried in the source install? And, for that matter, why are we talking about the source install at all? This is not about doing a source install.", "Hi, \r\n\r\nThe information is available in both for pip install and install from source.\r\n\r\n1. For pip install it says https://www.tensorflow.org/install/pip\r\n\r\n![image](https://github.com/tensorflow/tensorflow/assets/73069040/b8025505-f372-4d96-a3e8-88caec94a9ec)\r\n\r\n\r\nthe mentioned link will redirect to the requirement table for both CPU and GPU here https://www.tensorflow.org/install/source#tested_build_configurations.\r\n\r\n2. For building from source, it is anyways the same link. \r\n\r\n\r\nSince mentioning the same information will be redundant in each guides, we prefer having hyperlinks to route to the right source of information. \r\n\r\n`and-cuda` option in the pip install tensorflow was introduced recently, which will be available for versions > 2.13.1 only.\r\n", "Yes, that is the current documentation. Now go to the first comment on this page and notice how that leads to a broken installation, when followed verbatim.", "https://www.tensorflow.org/install page guides you to the pip install page for official packages for different OS and as I have mentioned in the above comment it has all the required build requirements for `CPU` and `GPU`.\r\n\r\n![image](https://github.com/tensorflow/tensorflow/assets/73069040/eacafe7f-84de-43a4-939e-a9c76a0afb01)\r\n", "This issue is stale because it has been open for 7 days with no activity. It will be closed if no further activity occurs. Thank you.", "This issue was closed because it has been inactive for 7 days since being marked as stale. Please reopen if you'd like to work on this further.", "Are you satisfied with the resolution of your issue?\n<a href=\"https://docs.google.com/forms/d/e/1FAIpQLSfaP12TRhd9xSxjXZjcZFNXPGk4kc1-qMdv3gc6bEP90vY1ew/viewform?entry.85265664=Yes&entry.2137816233=https://github.com/tensorflow/tensorflow/issues/62498\">Yes</a>\n<a href=\"https://docs.google.com/forms/d/e/1FAIpQLSfaP12TRhd9xSxjXZjcZFNXPGk4kc1-qMdv3gc6bEP90vY1ew/viewform?entry.85265664=No&entry.2137816233=https://github.com/tensorflow/tensorflow/issues/62498\">No</a>\n" ]
2023-11-28T20:14:47
2023-12-23T01:47:36
2023-12-23T01:47:31
NONE
null
null
null
### Issue type Documentation Bug ### Have you reproduced the bug with TensorFlow Nightly? No ### Source binary ### TensorFlow version 2.15.0 ### Custom code No ### OS platform and distribution Linux Ubuntu 22.04 ### Mobile device _No response_ ### Python version 3.11.6 ### Bazel version _No response_ ### GCC/compiler version _No response_ ### CUDA/cuDNN version _No response_ ### GPU model and memory _No response_ ### Current behavior? Installing Tensorflow from scratch on Ubuntu 22.04. The install page is here: https://www.tensorflow.org/install It says: ``` # Current stable release for CPU and GPU pip install tensorflow ``` Okay. It installs tensorflow-2.15.0. But what are the GPU requirements, in terms of CUDA version, etc? No clue. But there's a link in the left-hand menu, called "GPU device plugins", located here: https://www.tensorflow.org/install/gpu_plugins Nope. That page says: ``` Note: This page is for non-NVIDIA® GPU devices. For NVIDIA® GPU support, go to the [Install TensorFlow with pip](https://www.tensorflow.org/install/pip) guide. ``` Okay. Let's go to that guide, located here: https://www.tensorflow.org/install/pip The guide says: ``` python3 -m pip install tensorflow[and-cuda] ``` Okay, start from scratch then. Uninstall tensorflow, install this new thing. Which downloads a bunch of NVIDIA CUDA python modules, installs tensorflow-2.13.1, but does not install any of the NVIDIA python modules because of this: ``` WARNING: tensorflow 2.13.1 does not provide the extra 'and-cuda' ``` Seriously? Folks, this is not acceptable! Fix the documentation, or whatever else needs fixing. Thank you. ### Standalone code to reproduce the issue ```shell No code. It's a documentation issue. No idea why it's prompting me to enter code since I selected the Documentation Bug category at the top. ``` ### Relevant log output _No response_
{ "url": "https://api.github.com/repos/tensorflow/tensorflow/issues/62498/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/tensorflow/tensorflow/issues/62498/timeline
null
completed
false
https://api.github.com/repos/tensorflow/tensorflow/issues/62497
https://api.github.com/repos/tensorflow/tensorflow
https://api.github.com/repos/tensorflow/tensorflow/issues/62497/labels{/name}
https://api.github.com/repos/tensorflow/tensorflow/issues/62497/comments
https://api.github.com/repos/tensorflow/tensorflow/issues/62497/events
https://github.com/tensorflow/tensorflow/issues/62497
2,015,118,495
I_kwDOArmXAs54HESf
62,497
TF 2.15 fails to build with error "env: 'python3': No such file or directory" from bazel py_strict_library.
{ "login": "trevor-m", "id": 12981474, "node_id": "MDQ6VXNlcjEyOTgxNDc0", "avatar_url": "https://avatars.githubusercontent.com/u/12981474?v=4", "gravatar_id": "", "url": "https://api.github.com/users/trevor-m", "html_url": "https://github.com/trevor-m", "followers_url": "https://api.github.com/users/trevor-m/followers", "following_url": "https://api.github.com/users/trevor-m/following{/other_user}", "gists_url": "https://api.github.com/users/trevor-m/gists{/gist_id}", "starred_url": "https://api.github.com/users/trevor-m/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/trevor-m/subscriptions", "organizations_url": "https://api.github.com/users/trevor-m/orgs", "repos_url": "https://api.github.com/users/trevor-m/repos", "events_url": "https://api.github.com/users/trevor-m/events{/privacy}", "received_events_url": "https://api.github.com/users/trevor-m/received_events", "type": "User", "site_admin": false }
[ { "id": 404586594, "node_id": "MDU6TGFiZWw0MDQ1ODY1OTQ=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/stat:awaiting%20tensorflower", "name": "stat:awaiting tensorflower", "color": "f4b400", "default": false, "description": "Status - Awaiting response from tensorflower" }, { "id": 473173351, "node_id": "MDU6TGFiZWw0NzMxNzMzNTE=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/type:build/install", "name": "type:build/install", "color": "159b2e", "default": false, "description": "Build and install issues" }, { "id": 1205615612, "node_id": "MDU6TGFiZWwxMjA1NjE1NjEy", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/subtype:%20ubuntu/linux", "name": "subtype: ubuntu/linux", "color": "b619ea", "default": false, "description": "Ubuntu/Linux Build/Installation Issues" }, { "id": 6218999181, "node_id": "LA_kwDOArmXAs8AAAABcq5ljQ", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/TF%202.15", "name": "TF 2.15", "color": "9162CB", "default": false, "description": "For issues related to 2.15.x" } ]
open
false
{ "login": "sachinprasadhs", "id": 73069040, "node_id": "MDQ6VXNlcjczMDY5MDQw", "avatar_url": "https://avatars.githubusercontent.com/u/73069040?v=4", "gravatar_id": "", "url": "https://api.github.com/users/sachinprasadhs", "html_url": "https://github.com/sachinprasadhs", "followers_url": "https://api.github.com/users/sachinprasadhs/followers", "following_url": "https://api.github.com/users/sachinprasadhs/following{/other_user}", "gists_url": "https://api.github.com/users/sachinprasadhs/gists{/gist_id}", "starred_url": "https://api.github.com/users/sachinprasadhs/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/sachinprasadhs/subscriptions", "organizations_url": "https://api.github.com/users/sachinprasadhs/orgs", "repos_url": "https://api.github.com/users/sachinprasadhs/repos", "events_url": "https://api.github.com/users/sachinprasadhs/events{/privacy}", "received_events_url": "https://api.github.com/users/sachinprasadhs/received_events", "type": "User", "site_admin": false }
[ { "login": "sachinprasadhs", "id": 73069040, "node_id": "MDQ6VXNlcjczMDY5MDQw", "avatar_url": "https://avatars.githubusercontent.com/u/73069040?v=4", "gravatar_id": "", "url": "https://api.github.com/users/sachinprasadhs", "html_url": "https://github.com/sachinprasadhs", "followers_url": "https://api.github.com/users/sachinprasadhs/followers", "following_url": "https://api.github.com/users/sachinprasadhs/following{/other_user}", "gists_url": "https://api.github.com/users/sachinprasadhs/gists{/gist_id}", "starred_url": "https://api.github.com/users/sachinprasadhs/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/sachinprasadhs/subscriptions", "organizations_url": "https://api.github.com/users/sachinprasadhs/orgs", "repos_url": "https://api.github.com/users/sachinprasadhs/repos", "events_url": "https://api.github.com/users/sachinprasadhs/events{/privacy}", "received_events_url": "https://api.github.com/users/sachinprasadhs/received_events", "type": "User", "site_admin": false } ]
null
[ "@angerson It seems like this might be related to hermetic python. Any thoughts?", "Yeah, this is probably due to unsetting PATH while using a (generated?) shebang of the form `#!/usr/bin/env python3`.\r\n\r\nIt breaks build isolation since _if_ it works it picks up system `/bin/python3` or `/usr/bin/python3` on Linux instead of the Python Tensorflow was instructed to use, see https://linux.die.net/man/3/execl:\r\n\r\n> The file is sought in the colon-separated list of directory pathnames specified in the PATH environment variable. If this variable isn't defined, the path list defaults to the current directory followed by the list of directories returned by confstr(_CS_PATH). (This [confstr](https://linux.die.net/man/3/confstr)(3) call typically returns the value \"/bin:/usr/bin\".)\r\n\r\nUnsetting PATH may be fine but then execute `env - <absolute path to python interpreter> ./script.py` instead of `env - ./script.py`, or use the absolute path in the shebang (but note that has downsides too since the relevant executable may be in a long path, and Linux has a shebang line limit).\r\n", "What commands are you using to start a build? It's been working fine in our nightly and continuous tests on the tensorflow/build containers.", "Hi @angerson, I'm using `./configure && bazel build -c opt --cxxopt=-D_GLIBCXX_USE_CXX11_ABI=1 --java_runtime_version=remotejdk_11 tensorflow/tools/pip_package:build_pip_package`.\r\n\r\nThis error only occurs with our manylinux build container which does not contain a `python3` in the \"unset PATH\" directories that @haampie mentioned (`/bin:/usr/bin/:/usr/local/bin`. I believe the tensorflow/build containers are ubuntu based and will have a system python3 in one of those directories which is currently being inadvertently used for these build rules. \r\n\r\n> It breaks build isolation since _if_ it works it picks up system `/bin/python3` or `/usr/bin/python3` on Linux instead of the Python Tensorflow was instructed to use, see https://linux.die.net/man/3/execl:\r\n\r\nYes, this appears to be exactly what's happening.\r\n\r\n> Unsetting PATH may be fine but then execute `env - <absolute path to python interpreter> ./script.py` instead of `env - ./script.py`, or use the absolute path in the shebang (but note that has downsides too since the relevant executable may be in a long path, and Linux has a shebang line limit).\r\n\r\nThis makes sense, I think this change needs to be in bazel? It sounds like `py_binary` should be setting up the command to use the hermetic python environment and it is not.", "I bisected it to 539673ead2b66a9c2dce3fb90e3767efda5deef5\r\n\r\n```\r\n539673ead2b66a9c2dce3fb90e3767efda5deef5 is the first bad commit\r\ncommit 539673ead2b66a9c2dce3fb90e3767efda5deef5\r\nAuthor: Marc Fisher II <[email protected]>\r\nDate: Fri Sep 8 09:27:41 2023 -0700\r\n\r\n Switch to using new API generation.\r\n\r\n ci/official/wheel_test/test_import_api_packages.py | 1 +\r\n tensorflow/BUILD | 58 ++++++++--------------\r\n .../python/tools/api/generator2/generate_api.bzl | 52 +++++++++++++++++--\r\n 3 files changed, 69 insertions(+), 42 deletions(-)\r\n```\r\n\r\nPing @DrMarcII \r\n\r\nI don't know bazel well enough to quickly see how to solve it, let's leave that to googlers ;p", "Revert of 539673ead2b66a9c2dce3fb90e3767efda5deef5 applies cleanly to 2.15, but then the build fails with \r\n\r\n```ImportError: _pywrap_tensorflow_internal.so: cannot open shared object file: No such file or directory```\r\n\r\nso more is necessary. If someone could take over to fix it that'd be great.\r\n\r\nIf you want to reproduce, run `mv /usr/bin/python3 /usr/bin/python3.tmp` and do an ordinary build (with another python)", "I think that this may be related to https://github.com/bazelbuild/rules_python/issues/691. 539673ead2b66a9c2dce3fb90e3767efda5deef5 added an aspect that runs a `py_binary` on each `py_library`. It looks like the `py_binary` bootstrap script currently has an implicit dependency on a system interpreter being installed. ", "Can you set the shebang line to `PYTHON_BIN_PATH`? The linked issue mentions stubs for shebangs.\r\n\r\nOr if possible: invoke the script directly `$PYTHON_BIN_PATH script.py`. This is more robust as it allows for longer paths to the python executable.\r\n\r\nSee https://www.in-ulm.de/~mascheck/various/shebang/#issues for reference.\r\n\r\n> the length of the #! is much smaller than the maximum path length ", "Any updates on this? Would be great to be able to build TF without assuming that `/usr/bin/python3` exists.", "This affects multiple package managers that don't have a `/usr/bin/python3`:\r\n\r\n- Nix\r\n- Guix\r\n- Spack\r\n- Gentoo Prefix\r\n\r\nand possibly others. Can someone have a look at it?" ]
2023-11-28T19:04:52
2024-01-29T21:39:31
null
CONTRIBUTOR
null
null
null
### Issue type Bug ### Have you reproduced the bug with TensorFlow Nightly? No ### Source source ### TensorFlow version 2.15 ### Custom code No ### OS platform and distribution Manylinux 2.28 (AlmaLinux 8) quay.io/pypa/manylinux_2_28 ### Mobile device _No response_ ### Python version 3.10 ### Bazel version 6.1.0 ### GCC/compiler version 11.2.1 ### CUDA/cuDNN version 12.3 ### GPU model and memory _No response_ ### Current behavior? Compiling TF 2.15 from source fails with the error `env: 'python3': No such file or directory` coming from all instances of `py_strict_library` or `pytype_strict_library`. Based on the commands bazel is issuing from `--verbose_failures`, bazel is using `env -` to clear the environment which causes it to be unable to find python3 since it is not longer in the path. For example: ``` [root@2ac299be24a6 tensorflow]# exec env - python3 env: _python3_: No such file or directory [root@1aa528794d75 tensorflow]# env - bash -c 'which python3' which: no python3 in ((null)) [root@1aa528794d75 tensorflow]# bash -c 'which python3' /opt/python/v/bin/python3 ``` ### Standalone code to reproduce the issue ```shell It can be reproduced by building TF from source. I'm using the container `quay.io/pypa/manylinux_2_28`. ``` ### Relevant log output ```shell ERROR: /opt/tensorflow/tensorflow-source/tensorflow/python/util/BUILD:383:18: Extracting tensorflow APIs for //tensorflow/python/util:tf_decorator to bazel-out/k8-opt/bin/tensorflow/python/util/tf_decorator_extracted_tensorflow_api.json. failed: (Exit 127): main failed: error executing command (from target //tensorflow/python/util:tf_decorator) (cd /root/.cache/bazel/_bazel_root/a8fc6d0749b4f3c43761726a36e8ec4c/execroot/org_tensorflow && \ exec env - \ bazel-out/k8-opt-exec-50AE0418/bin/tensorflow/python/tools/api/generator2/extractor/main --output bazel-out/k8-opt/bin/tensorflow/python/util/tf_decorator_extracted_tensorflow_api.json --decorator tensorflow.python.util.tf_export.tf_export --api_name tensorflow tensorflow/python/util/tf_contextlib.py tensorflow/python/util/tf_decorator.py tensorflow/python/util/tf_inspect.py) # Configuration: f8e9df02b24a37687b60048a360df004e0c5cb673a184a2d96618507db49ca2c # Execution platform: @local_execution_config_platform//:platform env: 'python3': No such file or directory ERROR: /opt/tensorflow/tensorflow-source/tensorflow/python/distribute/BUILD:214:18: Extracting tensorflow APIs for //tensorflow/python/distribute:distribute_config to bazel-out/k8-opt/bin/tensorflow/python/distribute/distribute_config_extracted_tensorflow_api.json. failed: (Exit 127): main failed: error executing command (from target //tensorflow/python/distribute:distribute_config) (cd /root/.cache/bazel/_bazel_root/a8fc6d0749b4f3c43761726a36e8ec4c/execroot/org_tensorflow && \ exec env - \ bazel-out/k8-opt-exec-50AE0418/bin/tensorflow/python/tools/api/generator2/extractor/main --output bazel-out/k8-opt/bin/tensorflow/python/distribute/distribute_config_extracted_tensorflow_api.json --decorator tensorflow.python.util.tf_export.tf_export --api_name tensorflow tensorflow/python/distribute/distribute_config.py) # Configuration: f8e9df02b24a37687b60048a360df004e0c5cb673a184a2d96618507db49ca2c # Execution platform: @local_execution_config_platform//:platform env: 'python3': No such file or directory Target //tensorflow/tools/pip_package:build_pip_package failed to build ERROR: /opt/tensorflow/tensorflow-source/tensorflow/tools/pip_package/BUILD:255:10 Middleman _middlemen/tensorflow_Stools_Spip_Upackage_Sbuild_Upip_Upackage-runfiles failed: (Exit 127): main failed: error executing command (from target //tensorflow/python/util:tf_decorator) (cd /root/.cache/bazel/_bazel_root/a8fc6d0749b4f3c43761726a36e8ec4c/execroot/org_tensorflow && \ exec env - \ bazel-out/k8-opt-exec-50AE0418/bin/tensorflow/python/tools/api/generator2/extractor/main --output bazel-out/k8-opt/bin/tensorflow/python/util/tf_decorator_extracted_tensorflow_api.json --decorator tensorflow.python.util.tf_export.tf_export --api_name tensorflow tensorflow/python/util/tf_contextlib.py tensorflow/python/util/tf_decorator.py tensorflow/python/util/tf_inspect.py) # Configuration: f8e9df02b24a37687b60048a360df004e0c5cb673a184a2d96618507db49ca2c # Execution platform: @local_execution_config_platform//:platform INFO: Elapsed time: 2272.714s, Critical Path: 496.44s INFO: 9446 processes: 242 internal, 9204 local. FAILED: Build did NOT complete successfully ```
{ "url": "https://api.github.com/repos/tensorflow/tensorflow/issues/62497/reactions", "total_count": 2, "+1": 2, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/tensorflow/tensorflow/issues/62497/timeline
null
null
false
https://api.github.com/repos/tensorflow/tensorflow/issues/62496
https://api.github.com/repos/tensorflow/tensorflow
https://api.github.com/repos/tensorflow/tensorflow/issues/62496/labels{/name}
https://api.github.com/repos/tensorflow/tensorflow/issues/62496/comments
https://api.github.com/repos/tensorflow/tensorflow/issues/62496/events
https://github.com/tensorflow/tensorflow/pull/62496
2,014,916,290
PR_kwDOArmXAs5glHji
62,496
Fixed issue 'Python code modifies loop while iterating over it #62492'
{ "login": "proxOP", "id": 114665879, "node_id": "U_kgDOBtWplw", "avatar_url": "https://avatars.githubusercontent.com/u/114665879?v=4", "gravatar_id": "", "url": "https://api.github.com/users/proxOP", "html_url": "https://github.com/proxOP", "followers_url": "https://api.github.com/users/proxOP/followers", "following_url": "https://api.github.com/users/proxOP/following{/other_user}", "gists_url": "https://api.github.com/users/proxOP/gists{/gist_id}", "starred_url": "https://api.github.com/users/proxOP/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/proxOP/subscriptions", "organizations_url": "https://api.github.com/users/proxOP/orgs", "repos_url": "https://api.github.com/users/proxOP/repos", "events_url": "https://api.github.com/users/proxOP/events{/privacy}", "received_events_url": "https://api.github.com/users/proxOP/received_events", "type": "User", "site_admin": false }
[ { "id": 390482148, "node_id": "MDU6TGFiZWwzOTA0ODIxNDg=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/awaiting%20review", "name": "awaiting review", "color": "bc3869", "default": false, "description": "Pull request awaiting review" }, { "id": 987666414, "node_id": "MDU6TGFiZWw5ODc2NjY0MTQ=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/ready%20to%20pull", "name": "ready to pull", "color": "2cd643", "default": false, "description": "PR ready for merge process" }, { "id": 1169364259, "node_id": "MDU6TGFiZWwxMTY5MzY0MjU5", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/size:XS", "name": "size:XS", "color": "adafea", "default": false, "description": "CL Change Size: Extra Small" }, { "id": 1178505529, "node_id": "MDU6TGFiZWwxMTc4NTA1NTI5", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/prtype:bugfix", "name": "prtype:bugfix", "color": "159b2e", "default": false, "description": "PR to fix a bug" } ]
closed
false
{ "login": "gbaned", "id": 48215717, "node_id": "MDQ6VXNlcjQ4MjE1NzE3", "avatar_url": "https://avatars.githubusercontent.com/u/48215717?v=4", "gravatar_id": "", "url": "https://api.github.com/users/gbaned", "html_url": "https://github.com/gbaned", "followers_url": "https://api.github.com/users/gbaned/followers", "following_url": "https://api.github.com/users/gbaned/following{/other_user}", "gists_url": "https://api.github.com/users/gbaned/gists{/gist_id}", "starred_url": "https://api.github.com/users/gbaned/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/gbaned/subscriptions", "organizations_url": "https://api.github.com/users/gbaned/orgs", "repos_url": "https://api.github.com/users/gbaned/repos", "events_url": "https://api.github.com/users/gbaned/events{/privacy}", "received_events_url": "https://api.github.com/users/gbaned/received_events", "type": "User", "site_admin": false }
[ { "login": "gbaned", "id": 48215717, "node_id": "MDQ6VXNlcjQ4MjE1NzE3", "avatar_url": "https://avatars.githubusercontent.com/u/48215717?v=4", "gravatar_id": "", "url": "https://api.github.com/users/gbaned", "html_url": "https://github.com/gbaned", "followers_url": "https://api.github.com/users/gbaned/followers", "following_url": "https://api.github.com/users/gbaned/following{/other_user}", "gists_url": "https://api.github.com/users/gbaned/gists{/gist_id}", "starred_url": "https://api.github.com/users/gbaned/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/gbaned/subscriptions", "organizations_url": "https://api.github.com/users/gbaned/orgs", "repos_url": "https://api.github.com/users/gbaned/repos", "events_url": "https://api.github.com/users/gbaned/events{/privacy}", "received_events_url": "https://api.github.com/users/gbaned/received_events", "type": "User", "site_admin": false } ]
null
[ "Thanks for your pull request! It looks like this may be your first contribution to a Google open source project. Before we can look at your pull request, you'll need to sign a Contributor License Agreement (CLA).\n\nView this [failed invocation](https://github.com/tensorflow/tensorflow/pull/62496/checks?check_run_id=19105729735) of the CLA check for more information.\n\nFor the most up to date status, view the checks section at the bottom of the pull request." ]
2023-11-28T17:10:57
2023-11-30T07:10:04
2023-11-30T07:10:04
CONTRIBUTOR
null
false
{ "url": "https://api.github.com/repos/tensorflow/tensorflow/pulls/62496", "html_url": "https://github.com/tensorflow/tensorflow/pull/62496", "diff_url": "https://github.com/tensorflow/tensorflow/pull/62496.diff", "patch_url": "https://github.com/tensorflow/tensorflow/pull/62496.patch", "merged_at": "2023-11-30T07:10:04" }
Fixed #62492 issue : In tensorflow/python/training/saver.py, in _RecordLastCheckpoint: # Remove first from list if the same name was used before. for p in self._last_checkpoints: if latest_save_path == self._CheckpointFilename(p): self._last_checkpoints.remove(p) This modifies the _last_checkpoints list while iterating over it, causing the loop to skip steps. Solution: for p in self._last_checkpoints[:]: if latest_save_path == self._CheckpointFilename(p): self._last_checkpoints.remove(p) Doing this will pass a copy of the list for checking then modyfing the original list seperately
{ "url": "https://api.github.com/repos/tensorflow/tensorflow/issues/62496/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/tensorflow/tensorflow/issues/62496/timeline
null
null
true
https://api.github.com/repos/tensorflow/tensorflow/issues/62495
https://api.github.com/repos/tensorflow/tensorflow
https://api.github.com/repos/tensorflow/tensorflow/issues/62495/labels{/name}
https://api.github.com/repos/tensorflow/tensorflow/issues/62495/comments
https://api.github.com/repos/tensorflow/tensorflow/issues/62495/events
https://github.com/tensorflow/tensorflow/issues/62495
2,014,861,999
I_kwDOArmXAs54GFqv
62,495
Model checkpoint not saved to google cloud storage
{ "login": "rcalonso", "id": 19788974, "node_id": "MDQ6VXNlcjE5Nzg4OTc0", "avatar_url": "https://avatars.githubusercontent.com/u/19788974?v=4", "gravatar_id": "", "url": "https://api.github.com/users/rcalonso", "html_url": "https://github.com/rcalonso", "followers_url": "https://api.github.com/users/rcalonso/followers", "following_url": "https://api.github.com/users/rcalonso/following{/other_user}", "gists_url": "https://api.github.com/users/rcalonso/gists{/gist_id}", "starred_url": "https://api.github.com/users/rcalonso/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/rcalonso/subscriptions", "organizations_url": "https://api.github.com/users/rcalonso/orgs", "repos_url": "https://api.github.com/users/rcalonso/repos", "events_url": "https://api.github.com/users/rcalonso/events{/privacy}", "received_events_url": "https://api.github.com/users/rcalonso/received_events", "type": "User", "site_admin": false }
[ { "id": 404586594, "node_id": "MDU6TGFiZWw0MDQ1ODY1OTQ=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/stat:awaiting%20tensorflower", "name": "stat:awaiting tensorflower", "color": "f4b400", "default": false, "description": "Status - Awaiting response from tensorflower" }, { "id": 473172988, "node_id": "MDU6TGFiZWw0NzMxNzI5ODg=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/type:bug", "name": "type:bug", "color": "159b2e", "default": false, "description": "Bug" }, { "id": 1097546578, "node_id": "MDU6TGFiZWwxMDk3NTQ2NTc4", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/comp:keras", "name": "comp:keras", "color": "0052cc", "default": false, "description": "Keras related issues" }, { "id": 5922361893, "node_id": "LA_kwDOArmXAs8AAAABYQASJQ", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/TF2.14", "name": "TF2.14", "color": "b60205", "default": false, "description": "For issues related to Tensorflow 2.14.x" } ]
open
false
{ "login": "nkovela1", "id": 60985914, "node_id": "MDQ6VXNlcjYwOTg1OTE0", "avatar_url": "https://avatars.githubusercontent.com/u/60985914?v=4", "gravatar_id": "", "url": "https://api.github.com/users/nkovela1", "html_url": "https://github.com/nkovela1", "followers_url": "https://api.github.com/users/nkovela1/followers", "following_url": "https://api.github.com/users/nkovela1/following{/other_user}", "gists_url": "https://api.github.com/users/nkovela1/gists{/gist_id}", "starred_url": "https://api.github.com/users/nkovela1/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/nkovela1/subscriptions", "organizations_url": "https://api.github.com/users/nkovela1/orgs", "repos_url": "https://api.github.com/users/nkovela1/repos", "events_url": "https://api.github.com/users/nkovela1/events{/privacy}", "received_events_url": "https://api.github.com/users/nkovela1/received_events", "type": "User", "site_admin": false }
[ { "login": "nkovela1", "id": 60985914, "node_id": "MDQ6VXNlcjYwOTg1OTE0", "avatar_url": "https://avatars.githubusercontent.com/u/60985914?v=4", "gravatar_id": "", "url": "https://api.github.com/users/nkovela1", "html_url": "https://github.com/nkovela1", "followers_url": "https://api.github.com/users/nkovela1/followers", "following_url": "https://api.github.com/users/nkovela1/following{/other_user}", "gists_url": "https://api.github.com/users/nkovela1/gists{/gist_id}", "starred_url": "https://api.github.com/users/nkovela1/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/nkovela1/subscriptions", "organizations_url": "https://api.github.com/users/nkovela1/orgs", "repos_url": "https://api.github.com/users/nkovela1/repos", "events_url": "https://api.github.com/users/nkovela1/events{/privacy}", "received_events_url": "https://api.github.com/users/nkovela1/received_events", "type": "User", "site_admin": false }, { "login": "sachinprasadhs", "id": 73069040, "node_id": "MDQ6VXNlcjczMDY5MDQw", "avatar_url": "https://avatars.githubusercontent.com/u/73069040?v=4", "gravatar_id": "", "url": "https://api.github.com/users/sachinprasadhs", "html_url": "https://github.com/sachinprasadhs", "followers_url": "https://api.github.com/users/sachinprasadhs/followers", "following_url": "https://api.github.com/users/sachinprasadhs/following{/other_user}", "gists_url": "https://api.github.com/users/sachinprasadhs/gists{/gist_id}", "starred_url": "https://api.github.com/users/sachinprasadhs/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/sachinprasadhs/subscriptions", "organizations_url": "https://api.github.com/users/sachinprasadhs/orgs", "repos_url": "https://api.github.com/users/sachinprasadhs/repos", "events_url": "https://api.github.com/users/sachinprasadhs/events{/privacy}", "received_events_url": "https://api.github.com/users/sachinprasadhs/received_events", "type": "User", "site_admin": false } ]
null
[ "@rcalonso,\r\nCould you please confirm whether you are facing any error while saving to Google cloud storage. If yes, please provide the error log here and help us to debug the issue. Thank you!", "@tilakrayal I don't get any error, in the console I see the log `f\"\\nEpoch {epoch + 1}: saving model to {filepath}\"` from https://github.com/keras-team/keras/blob/v2.14.0/keras/callbacks.py#L1571 and the training continues but without any model saved.\r\n\r\nLooking closer I found the model is not saved because of this function which was added in 2.14.0 https://github.com/keras-team/keras/blob/v2.14.0/keras/saving/saving_api.py#L137", "@rcalonso,\r\nCould you please try with the latest Keras 3.0 version and the tensorflow v2.15, and also I tried to execute on the colab and I was able to check the saved items. Kindly find the gist of it [here](https://colab.research.google.com/gist/tilakrayal/e4d41b04ecba60333ff1fd1668f03b5e/untitled1611.ipynb). Thank you!", "@tilakrayal,\r\nKeras 3.0 is only compatible with tensorflow=>2.16 according to https://github.com/keras-team/keras/blob/master/requirements-tensorflow-cuda.txt#L3 and tensorflow 2.15.0 is only compatible with keras 2.15.0.\r\n\r\nIn the gist you provided, you're saving the checkpoint as `gcs_bucket = \"/content/sample_data/ckpt/epoch_{epoch:03d}.keras\"` which works as you said but the problem is when saving to google cloud storage and the path start with `gs://` (e.g. `\"gs://bucket_name/ckpt/epoch_{epoch:03d}.keras\"`)." ]
2023-11-28T16:43:20
2023-12-26T23:14:27
null
NONE
null
null
null
### Issue type Bug ### Have you reproduced the bug with TensorFlow Nightly? No ### Source binary ### TensorFlow version 2.14.1 ### Custom code Yes ### OS platform and distribution _No response_ ### Mobile device _No response_ ### Python version 3.9.17 ### Bazel version _No response_ ### GCC/compiler version _No response_ ### CUDA/cuDNN version _No response_ ### GPU model and memory _No response_ ### Current behavior? I'm training a model and I want to save the model checkpoints in `.keras` format to google cloud storage. I'm using the `ModelCheckpoint` callback but nothing is saved. I'm also using the `Tensorboard` callback and the logs are saved correctly in the same bucket. If I set the file path to a local directory the model checkpoint is saved without any problem. ### Standalone code to reproduce the issue https://gist.github.com/rcalonso/f12863b6e2c4669be6875deee2ff6dbf ### Relevant log output _No response_
{ "url": "https://api.github.com/repos/tensorflow/tensorflow/issues/62495/reactions", "total_count": 1, "+1": 1, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/tensorflow/tensorflow/issues/62495/timeline
null
null
false
https://api.github.com/repos/tensorflow/tensorflow/issues/62494
https://api.github.com/repos/tensorflow/tensorflow
https://api.github.com/repos/tensorflow/tensorflow/issues/62494/labels{/name}
https://api.github.com/repos/tensorflow/tensorflow/issues/62494/comments
https://api.github.com/repos/tensorflow/tensorflow/issues/62494/events
https://github.com/tensorflow/tensorflow/issues/62494
2,014,773,009
I_kwDOArmXAs54Fv8R
62,494
CUDA_ERROR_NO_DEVICE: no CUDA-capable device is detected
{ "login": "martiiv", "id": 60174223, "node_id": "MDQ6VXNlcjYwMTc0MjIz", "avatar_url": "https://avatars.githubusercontent.com/u/60174223?v=4", "gravatar_id": "", "url": "https://api.github.com/users/martiiv", "html_url": "https://github.com/martiiv", "followers_url": "https://api.github.com/users/martiiv/followers", "following_url": "https://api.github.com/users/martiiv/following{/other_user}", "gists_url": "https://api.github.com/users/martiiv/gists{/gist_id}", "starred_url": "https://api.github.com/users/martiiv/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/martiiv/subscriptions", "organizations_url": "https://api.github.com/users/martiiv/orgs", "repos_url": "https://api.github.com/users/martiiv/repos", "events_url": "https://api.github.com/users/martiiv/events{/privacy}", "received_events_url": "https://api.github.com/users/martiiv/received_events", "type": "User", "site_admin": false }
[ { "id": 473173351, "node_id": "MDU6TGFiZWw0NzMxNzMzNTE=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/type:build/install", "name": "type:build/install", "color": "159b2e", "default": false, "description": "Build and install issues" } ]
closed
false
{ "login": "SuryanarayanaY", "id": 116063290, "node_id": "U_kgDOBur8Og", "avatar_url": "https://avatars.githubusercontent.com/u/116063290?v=4", "gravatar_id": "", "url": "https://api.github.com/users/SuryanarayanaY", "html_url": "https://github.com/SuryanarayanaY", "followers_url": "https://api.github.com/users/SuryanarayanaY/followers", "following_url": "https://api.github.com/users/SuryanarayanaY/following{/other_user}", "gists_url": "https://api.github.com/users/SuryanarayanaY/gists{/gist_id}", "starred_url": "https://api.github.com/users/SuryanarayanaY/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/SuryanarayanaY/subscriptions", "organizations_url": "https://api.github.com/users/SuryanarayanaY/orgs", "repos_url": "https://api.github.com/users/SuryanarayanaY/repos", "events_url": "https://api.github.com/users/SuryanarayanaY/events{/privacy}", "received_events_url": "https://api.github.com/users/SuryanarayanaY/received_events", "type": "User", "site_admin": false }
[ { "login": "SuryanarayanaY", "id": 116063290, "node_id": "U_kgDOBur8Og", "avatar_url": "https://avatars.githubusercontent.com/u/116063290?v=4", "gravatar_id": "", "url": "https://api.github.com/users/SuryanarayanaY", "html_url": "https://github.com/SuryanarayanaY", "followers_url": "https://api.github.com/users/SuryanarayanaY/followers", "following_url": "https://api.github.com/users/SuryanarayanaY/following{/other_user}", "gists_url": "https://api.github.com/users/SuryanarayanaY/gists{/gist_id}", "starred_url": "https://api.github.com/users/SuryanarayanaY/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/SuryanarayanaY/subscriptions", "organizations_url": "https://api.github.com/users/SuryanarayanaY/orgs", "repos_url": "https://api.github.com/users/SuryanarayanaY/repos", "events_url": "https://api.github.com/users/SuryanarayanaY/events{/privacy}", "received_events_url": "https://api.github.com/users/SuryanarayanaY/received_events", "type": "User", "site_admin": false } ]
null
[ "Closed as repository I was using was manually setting CUDA_VISIBLE_DEVICES to -1...", "Are you satisfied with the resolution of your issue?\n<a href=\"https://docs.google.com/forms/d/e/1FAIpQLSfaP12TRhd9xSxjXZjcZFNXPGk4kc1-qMdv3gc6bEP90vY1ew/viewform?entry.85265664=Yes&entry.2137816233=https://github.com/tensorflow/tensorflow/issues/62494\">Yes</a>\n<a href=\"https://docs.google.com/forms/d/e/1FAIpQLSfaP12TRhd9xSxjXZjcZFNXPGk4kc1-qMdv3gc6bEP90vY1ew/viewform?entry.85265664=No&entry.2137816233=https://github.com/tensorflow/tensorflow/issues/62494\">No</a>\n" ]
2023-11-28T15:58:55
2023-11-29T09:29:27
2023-11-29T09:29:23
NONE
null
null
null
### Issue type Build/Install ### Have you reproduced the bug with TensorFlow Nightly? No ### Source source ### TensorFlow version unknown 2.4.0 ### Custom code Yes ### OS platform and distribution Linux, Rocky linux ### Mobile device _No response_ ### Python version Python 3.8.6 ### Bazel version _No response_ ### GCC/compiler version gcc (GCC) 10.2.0 ### CUDA/cuDNN version CUDA 11.1 cuDNN/8.0.4.30 ### GPU model and memory Nvidia A100 ### Current behavior? Hello! I am working on a deep learning project. I have configured Tensorflow correctly and it detects my GPUs when running python3 -c "import tensorflow as tf; print(tf.config.list_physical_devices('GPU'))" Results in ` 2023-11-28 16:53:38.531363: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcudart.so.11.0 2023-11-28 16:53:44.264370: I tensorflow/compiler/jit/xla_cpu_device.cc:41] Not creating XLA devices, tf_xla_enable_xla_devices not set 2023-11-28 16:53:44.269638: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcuda.so.1 2023-11-28 16:53:44.297140: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1720] Found device 0 with properties: pciBusID: 0000:03:00.0 name: Tesla P100-PCIE-16GB computeCapability: 6.0 coreClock: 1.3285GHz coreCount: 56 deviceMemorySize: 15.90GiB deviceMemoryBandwidth: 681.88GiB/s 2023-11-28 16:53:44.297732: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1720] Found device 1 with properties: pciBusID: 0000:82:00.0 name: Tesla P100-PCIE-16GB computeCapability: 6.0 coreClock: 1.3285GHz coreCount: 56 deviceMemorySize: 15.90GiB deviceMemoryBandwidth: 681.88GiB/s 2023-11-28 16:53:44.297762: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcudart.so.11.0 2023-11-28 16:53:44.363331: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcublas.so.11 2023-11-28 16:53:44.363424: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcublasLt.so.11 2023-11-28 16:53:44.376801: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcufft.so.10 2023-11-28 16:53:44.380587: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcurand.so.10 2023-11-28 16:53:44.446207: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcusolver.so.11 2023-11-28 16:53:44.458537: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcusparse.so.11 2023-11-28 16:53:44.460103: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcudnn.so.8 2023-11-28 16:53:44.462356: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1862] Adding visible gpu devices: 0, 1 [PhysicalDevice(name='/physical_device:GPU:0', device_type='GPU'), PhysicalDevice(name='/physical_device:GPU:1', device_type='GPU')] ` However, when I run my program I get the following message: ` 2023-11-28 16:42:02.209699: I tensorflow/stream_executor/platform/default/[dso_loader.cc:49](http://dso_loader.cc:49/)] Successfully opened dynamic library [libcuda.so](http://libcuda.so/).1 2023-11-28 16:42:02.417037: E tensorflow/stream_executor/cuda/[cuda_driver.cc:328](http://cuda_driver.cc:328/)] failed call to cuInit: CUDA_ERROR_NO_DEVICE: no CUDA-capable device is detected ` My CUDA_VISIBLE_DEVICES is set to "0,1" so it should see both the GPUs but tensorflow doesn’t detect them. Have anyone else encountered this issue? ### Standalone code to reproduce the issue ```shell Pip packages Package Version Editable project location ----------------------------- ------------- ------------------------------------------------- absl-py 0.10.0 alabaster 0.7.12 appdirs 1.4.4 array-record 0.4.0 asn1crypto 1.4.0 astunparse 1.6.3 atomicwrites 1.4.0 attrs 20.2.0 Babel 2.8.0 bcrypt 3.2.0 bitstring 3.1.7 blist 1.3.6 Bottleneck 1.3.2 CacheControl 0.12.6 cachetools 4.2.0 cachy 0.3.0 ccimport 0.3.7 certifi 2020.6.20 cffi 1.14.3 chardet 3.0.4 cleo 0.8.1 click 7.1.2 clikit 0.6.2 cloudpickle 3.0.0 colorama 0.4.3 contourpy 1.1.1 crashtest 0.3.1 cryptography 3.1.1 cumm-cu111 0.2.9 cycler 0.12.1 Cython 0.29.21 dacite 1.8.1 dask 2023.3.1 dataclass_array 1.4.1 deap 1.3.1 decorator 4.4.2 dill 0.3.3 distlib 0.3.1 dm-tree 0.1.8 docopt 0.6.2 docutils 0.16 easydict 1.11 ecdsa 0.16.0 einops 0.7.0 einsum 0.3.0 etils 1.3.0 filelock 3.0.12 fire 0.5.0 flatbuffers 1.12 flit 3.0.0 flit_core 3.0.0 fonttools 4.45.1 fsspec 0.8.4 future 0.18.2 gast 0.3.3 google-auth 1.24.0 google-auth-oauthlib 0.4.2 google-pasta 0.2.0 googleapis-common-protos 1.61.0 grpcio 1.32.0 gviz-api 1.9.0 h5py 2.10.0 html5lib 1.1 idna 2.10 imageio 2.33.0 imagesize 1.2.0 immutabledict 2.2.0 importlib-metadata 6.8.0 importlib-resources 6.1.1 iniconfig 1.0.1 install 1.3.5 intervaltree 3.1.0 intreehooks 1.0 ipaddress 1.0.23 jeepney 0.4.3 Jinja2 2.11.2 joblib 1.3.2 jsonschema 3.2.0 keras 2.11.0 Keras-Preprocessing 1.1.2 keyring 21.4.0 keyrings.alt 4.0.0 kiwisolver 1.4.5 lark 1.1.8 lazy_loader 0.3 liac-arff 2.5.0 libclang 16.0.6 llvmlite 0.39.1 locket 1.0.0 lockfile 0.12.2 Markdown 3.3.3 MarkupSafe 2.1.3 matplotlib 3.6.1 mock 4.0.2 more-itertools 8.5.0 mpi4py 3.0.3 mpmath 1.1.0 msgpack 1.0.0 netaddr 0.8.0 netifaces 0.10.9 networkx 3.1 ninja 1.11.1.1 nose 1.3.7 numba 0.56.4 numexpr 2.7.1 numpy 1.19.4 oauthlib 3.1.0 opencv-python 4.8.1.78 OpenEXR 1.3.9 opt-einsum 3.3.0 packaging 23.2 pandas 1.1.4 paramiko 2.7.2 partd 1.4.1 pastel 0.2.1 pathlib2 2.3.5 paycheck 1.0.2 pbr 5.5.0 pccm 0.3.4 pcdet 0.5.2+82e7c4f /cluster/home/martiiv/DeepLearningProject/3DTrans pexpect 4.8.0 Pillow 9.2.0 pip 20.2.3 pkginfo 1.5.0.1 plotly 5.13.1 pluggy 0.13.1 poetry 1.1.3 poetry-core 1.0.0 portalocker 2.8.2 portpicker 1.3.1 promise 2.3 protobuf 3.14.0 psutil 5.7.2 ptyprocess 0.6.0 py 1.9.0 py-expression-eval 0.3.10 pyarrow 10.0.0 pyasn1 0.4.8 pyasn1-modules 0.2.8 pybind11 2.6.0 pycparser 2.20 pycrypto 2.6.1 Pygments 2.7.1 pylev 1.3.0 PyNaCl 1.4.0 pyparsing 2.4.7 pyrsistent 0.17.3 pytest 6.1.1 python-dateutil 2.8.1 pytoml 0.1.21 pytz 2020.1 PyWavelets 1.4.1 PyYAML 5.3.1 regex 2020.10.11 requests 2.24.0 requests-oauthlib 1.3.0 requests-toolbelt 0.9.1 rsa 4.7 scandir 1.10.0 scikit-image 0.19.3 scikit-learn 1.2.2 scipy 1.5.4 SecretStorage 3.1.2 setuptools 67.6.0 setuptools-scm 4.1.2 SharedArray 3.1.0 shellingham 1.3.2 simplegeneric 0.8.1 simplejson 3.17.2 six 1.15.0 snowballstemmer 2.0.0 sortedcontainers 2.2.2 spconv-cu111 2.1.25 Sphinx 3.2.1 sphinx-bootstrap-theme 0.7.1 sphinxcontrib-applehelp 1.0.2 sphinxcontrib-devhelp 1.0.2 sphinxcontrib-htmlhelp 1.0.3 sphinxcontrib-jsmath 1.0.1 sphinxcontrib-qthelp 1.0.3 sphinxcontrib-serializinghtml 1.1.4 sphinxcontrib-websupport 1.2.4 tabulate 0.8.7 tblib 1.7.0 tenacity 8.2.3 tensorboard 2.4.0 tensorboard-data-server 0.6.1 tensorboard-plugin-profile 2.4.0 tensorboard-plugin-wit 1.8.0 tensorboardX 2.6 tensorflow 2.4.0 tensorflow-estimator 2.4.0 termcolor 1.1.0 threadpoolctl 2.1.0 tifffile 2023.7.10 toml 0.10.1 tomlkit 0.7.0 toolz 0.12.0 torch 1.8.1+cu111 torchaudio 0.8.1 torchvision 0.9.1+cu111 tqdm 4.66.1 trimesh 4.0.5 typeguard 2.13.3 typing-extensions 3.7.4.3 ujson 4.0.1 urllib3 1.25.10 virtualenv 20.0.34 visu3d 1.5.1 waymo-open-dataset-tf-2-4-0 1.4.1 wcwidth 0.2.5 webencodings 0.5.1 Werkzeug 1.0.1 wheel 0.35.1 wrapt 1.12.1 xlrd 1.2.0 zipp 3.3.0 ``` ### Relevant log output ```shell ---------------The waymo sample interval is 1, total sequecnes is 798----------------- 0%| | 0/798 [00:00<?, ?it/s]2023-11-28 16:42:02.086115: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcuda.so.1 2023-11-28 16:42:02.088466: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcuda.so.1 2023-11-28 16:42:02.092942: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcuda.so.1 2023-11-28 16:42:02.097380: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcuda.so.1 2023-11-28 16:42:02.099599: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcuda.so.1 2023-11-28 16:42:02.101598: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcuda.so.1 2023-11-28 16:42:02.103590: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcuda.so.1 2023-11-28 16:42:02.105583: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcuda.so.1 2023-11-28 16:42:02.107602: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcuda.so.1 2023-11-28 16:42:02.109606: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcuda.so.1 2023-11-28 16:42:02.160116: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcuda.so.1 2023-11-28 16:42:02.200029: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcuda.so.1 2023-11-28 16:42:02.209699: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcuda.so.1 2023-11-28 16:42:02.417037: E tensorflow/stream_executor/cuda/cuda_driver.cc:328] failed call to cuInit: CUDA_ERROR_NO_DEVICE: no CUDA-capable device is detected 2023-11-28 16:42:02.423311: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcuda.so.1 2023-11-28 16:42:02.426811: I tensorflow/stream_executor/cuda/cuda_diagnostics.cc:169] retrieving CUDA diagnostic information for host: idun-06-18 2023-11-28 16:42:02.427445: I tensorflow/stream_executor/cuda/cuda_diagnostics.cc:176] hostname: idun-06-18 2023-11-28 16:42:02.427608: I tensorflow/stream_executor/cuda/cuda_diagnostics.cc:200] libcuda reported version is: 515.43.4 2023-11-28 16:42:02.427668: I tensorflow/stream_executor/cuda/cuda_diagnostics.cc:204] kernel reported version is: 515.43.4 2023-11-28 16:42:02.427696: I tensorflow/stream_executor/cuda/cuda_diagnostics.cc:310] kernel version seems to match DSO: 515.43.4 2023-11-28 16:42:02.553576: E tensorflow/stream_executor/cuda/cuda_driver.cc:328] failed call to cuInit: CUDA_ERROR_NO_DEVICE: no CUDA-capable device is detected 2023-11-28 16:42:02.553649: I tensorflow/stream_executor/cuda/cuda_diagnostics.cc:169] retrieving CUDA diagnostic information for host: idun-06-18 2023-11-28 16:42:02.553683: I tensorflow/stream_executor/cuda/cuda_diagnostics.cc:176] hostname: idun-06-18 2023-11-28 16:42:02.553902: I tensorflow/stream_executor/cuda/cuda_diagnostics.cc:200] libcuda reported version is: 515.43.4 2023-11-28 16:42:02.553961: I tensorflow/stream_executor/cuda/cuda_diagnostics.cc:204] kernel reported version is: 515.43.4 2023-11-28 16:42:02.553987: I tensorflow/stream_executor/cuda/cuda_diagnostics.cc:310] kernel version seems to match DSO: 515.43.4 2023-11-28 16:42:02.563455: E tensorflow/stream_executor/cuda/cuda_driver.cc:328] failed call to cuInit: CUDA_ERROR_NO_DEVICE: no CUDA-capable device is detected 2023-11-28 16:42:02.563537: I tensorflow/stream_executor/cuda/cuda_diagnostics.cc:169] retrieving CUDA diagnostic information for host: idun-06-18 2023-11-28 16:42:02.563584: I tensorflow/stream_executor/cuda/cuda_diagnostics.cc:176] hostname: idun-06-18 2023-11-28 16:42:02.563781: I tensorflow/stream_executor/cuda/cuda_diagnostics.cc:200] libcuda reported version is: 515.43.4 2023-11-28 16:42:02.563848: I tensorflow/stream_executor/cuda/cuda_diagnostics.cc:204] kernel reported version is: 515.43.4 2023-11-28 16:42:02.563877: I tensorflow/stream_executor/cuda/cuda_diagnostics.cc:310] kernel version seems to match DSO: 515.43.4 2023-11-28 16:42:02.573007: E tensorflow/stream_executor/cuda/cuda_driver.cc:328] failed call to cuInit: CUDA_ERROR_NO_DEVICE: no CUDA-capable device is detected 2023-11-28 16:42:02.573069: I tensorflow/stream_executor/cuda/cuda_diagnostics.cc:169] retrieving CUDA diagnostic information for host: idun-06-18 2023-11-28 16:42:02.573098: I tensorflow/stream_executor/cuda/cuda_diagnostics.cc:176] hostname: idun-06-18 2023-11-28 16:42:02.573242: I tensorflow/stream_executor/cuda/cuda_diagnostics.cc:200] libcuda reported version is: 515.43.4 2023-11-28 16:42:02.573306: I tensorflow/stream_executor/cuda/cuda_diagnostics.cc:204] kernel reported version is: 515.43.4 2023-11-28 16:42:02.573333: I tensorflow/stream_executor/cuda/cuda_diagnostics.cc:310] kernel version seems to match DSO: 515.43.4 2023-11-28 16:42:02.582579: E tensorflow/stream_executor/cuda/cuda_driver.cc:328] failed call to cuInit: CUDA_ERROR_NO_DEVICE: no CUDA-capable device is detected 2023-11-28 16:42:02.582642: I tensorflow/stream_executor/cuda/cuda_diagnostics.cc:169] retrieving CUDA diagnostic information for host: idun-06-18 2023-11-28 16:42:02.582670: I tensorflow/stream_executor/cuda/cuda_diagnostics.cc:176] hostname: idun-06-18 2023-11-28 16:42:02.582809: I tensorflow/stream_executor/cuda/cuda_diagnostics.cc:200] libcuda reported version is: 515.43.4 2023-11-28 16:42:02.582869: I tensorflow/stream_executor/cuda/cuda_diagnostics.cc:204] kernel reported version is: 515.43.4 2023-11-28 16:42:02.582896: I tensorflow/stream_executor/cuda/cuda_diagnostics.cc:310] kernel version seems to match DSO: 515.43.4 2023-11-28 16:42:02.591947: E tensorflow/stream_executor/cuda/cuda_driver.cc:328] failed call to cuInit: CUDA_ERROR_NO_DEVICE: no CUDA-capable device is detected 2023-11-28 16:42:02.592010: I tensorflow/stream_executor/cuda/cuda_diagnostics.cc:169] retrieving CUDA diagnostic information for host: idun-06-18 2023-11-28 16:42:02.592040: I tensorflow/stream_executor/cuda/cuda_diagnostics.cc:176] hostname: idun-06-18 2023-11-28 16:42:02.592180: I tensorflow/stream_executor/cuda/cuda_diagnostics.cc:200] libcuda reported version is: 515.43.4 2023-11-28 16:42:02.592258: I tensorflow/stream_executor/cuda/cuda_diagnostics.cc:204] kernel reported version is: 515.43.4 2023-11-28 16:42:02.592287: I tensorflow/stream_executor/cuda/cuda_diagnostics.cc:310] kernel version seems to match DSO: 515.43.4 2023-11-28 16:42:02.601424: E tensorflow/stream_executor/cuda/cuda_driver.cc:328] failed call to cuInit: CUDA_ERROR_NO_DEVICE: no CUDA-capable device is detected 2023-11-28 16:42:02.601488: I tensorflow/stream_executor/cuda/cuda_diagnostics.cc:169] retrieving CUDA diagnostic information for host: idun-06-18 2023-11-28 16:42:02.601517: I tensorflow/stream_executor/cuda/cuda_diagnostics.cc:176] hostname: idun-06-18 2023-11-28 16:42:02.601656: I tensorflow/stream_executor/cuda/cuda_diagnostics.cc:200] libcuda reported version is: 515.43.4 2023-11-28 16:42:02.601715: I tensorflow/stream_executor/cuda/cuda_diagnostics.cc:204] kernel reported version is: 515.43.4 2023-11-28 16:42:02.601742: I tensorflow/stream_executor/cuda/cuda_diagnostics.cc:310] kernel version seems to match DSO: 515.43.4 2023-11-28 16:42:02.610852: E tensorflow/stream_executor/cuda/cuda_driver.cc:328] failed call to cuInit: CUDA_ERROR_NO_DEVICE: no CUDA-capable device is detected 2023-11-28 16:42:02.610938: I tensorflow/stream_executor/cuda/cuda_diagnostics.cc:169] retrieving CUDA diagnostic information for host: idun-06-18 2023-11-28 16:42:02.610969: I tensorflow/stream_executor/cuda/cuda_diagnostics.cc:176] hostname: idun-06-18 2023-11-28 16:42:02.611111: I tensorflow/stream_executor/cuda/cuda_diagnostics.cc:200] libcuda reported version is: 515.43.4 2023-11-28 16:42:02.611171: I tensorflow/stream_executor/cuda/cuda_diagnostics.cc:204] kernel reported version is: 515.43.4 2023-11-28 16:42:02.611198: I tensorflow/stream_executor/cuda/cuda_diagnostics.cc:310] kernel version seems to match DSO: 515.43.4 2023-11-28 16:42:02.632051: E tensorflow/stream_executor/cuda/cuda_driver.cc:328] failed call to cuInit: CUDA_ERROR_NO_DEVICE: no CUDA-capable device is detected 2023-11-28 16:42:02.632119: I tensorflow/stream_executor/cuda/cuda_diagnostics.cc:169] retrieving CUDA diagnostic information for host: idun-06-18 2023-11-28 16:42:02.632149: I tensorflow/stream_executor/cuda/cuda_diagnostics.cc:176] hostname: idun-06-18 2023-11-28 16:42:02.632301: I tensorflow/stream_executor/cuda/cuda_diagnostics.cc:200] libcuda reported version is: 515.43.4 2023-11-28 16:42:02.632363: I tensorflow/stream_executor/cuda/cuda_diagnostics.cc:204] kernel reported version is: 515.43.4 2023-11-28 16:42:02.632390: I tensorflow/stream_executor/cuda/cuda_diagnostics.cc:310] kernel version seems to match DSO: 515.43.4 2023-11-28 16:42:02.641638: E tensorflow/stream_executor/cuda/cuda_driver.cc:328] failed call to cuInit: CUDA_ERROR_NO_DEVICE: no CUDA-capable device is detected 2023-11-28 16:42:02.641728: I tensorflow/stream_executor/cuda/cuda_diagnostics.cc:169] retrieving CUDA diagnostic information for host: idun-06-18 2023-11-28 16:42:02.641775: I tensorflow/stream_executor/cuda/cuda_diagnostics.cc:176] hostname: idun-06-18 2023-11-28 16:42:02.642013: I tensorflow/stream_executor/cuda/cuda_diagnostics.cc:200] libcuda reported version is: 515.43.4 2023-11-28 16:42:02.642080: I tensorflow/stream_executor/cuda/cuda_diagnostics.cc:204] kernel reported version is: 515.43.4 2023-11-28 16:42:02.642110: I tensorflow/stream_executor/cuda/cuda_diagnostics.cc:310] kernel version seems to match DSO: 515.43.4 2023-11-28 16:42:02.651328: E tensorflow/stream_executor/cuda/cuda_driver.cc:328] failed call to cuInit: CUDA_ERROR_NO_DEVICE: no CUDA-capable device is detected 2023-11-28 16:42:02.651395: I tensorflow/stream_executor/cuda/cuda_diagnostics.cc:169] retrieving CUDA diagnostic information for host: idun-06-18 2023-11-28 16:42:02.651425: I tensorflow/stream_executor/cuda/cuda_diagnostics.cc:176] hostname: idun-06-18 2023-11-28 16:42:02.651568: I tensorflow/stream_executor/cuda/cuda_diagnostics.cc:200] libcuda reported version is: 515.43.4 2023-11-28 16:42:02.651628: I tensorflow/stream_executor/cuda/cuda_diagnostics.cc:204] kernel reported version is: 515.43.4 2023-11-28 16:42:02.651655: I tensorflow/stream_executor/cuda/cuda_diagnostics.cc:310] kernel version seems to match DSO: 515.43.4 2023-11-28 16:42:02.669139: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcuda.so.1 2023-11-28 16:42:02.713192: I tensorflow/stream_executor/platform/default/dso_loader.cc:49] Successfully opened dynamic library libcuda.so.1 2023-11-28 16:42:02.738968: E tensorflow/stream_executor/cuda/cuda_driver.cc:328] failed call to cuInit: CUDA_ERROR_NO_DEVICE: no CUDA-capable device is detected 2023-11-28 16:42:02.739035: I tensorflow/stream_executor/cuda/cuda_diagnostics.cc:169] retrieving CUDA diagnostic information for host: idun-06-18 2023-11-28 16:42:02.739065: I tensorflow/stream_executor/cuda/cuda_diagnostics.cc:176] hostname: idun-06-18 2023-11-28 16:42:02.739212: I tensorflow/stream_executor/cuda/cuda_diagnostics.cc:200] libcuda reported version is: 515.43.4 2023-11-28 16:42:02.739283: I tensorflow/stream_executor/cuda/cuda_diagnostics.cc:204] kernel reported version is: 515.43.4 2023-11-28 16:42:02.739310: I tensorflow/stream_executor/cuda/cuda_diagnostics.cc:310] kernel version seems to match DSO: 515.43.4 2023-11-28 16:42:02.774039: E tensorflow/stream_executor/cuda/cuda_driver.cc:328] failed call to cuInit: CUDA_ERROR_NO_DEVICE: no CUDA-capable device is detected 2023-11-28 16:42:02.774103: I tensorflow/stream_executor/cuda/cuda_diagnostics.cc:169] retrieving CUDA diagnostic information for host: idun-06-18 2023-11-28 16:42:02.774132: I tensorflow/stream_executor/cuda/cuda_diagnostics.cc:176] hostname: idun-06-18 2023-11-28 16:42:02.774284: I tensorflow/stream_executor/cuda/cuda_diagnostics.cc:200] libcuda reported version is: 515.43.4 2023-11-28 16:42:02.774344: I tensorflow/stream_executor/cuda/cuda_diagnostics.cc:204] kernel reported version is: 515.43.4 2023-11-28 16:42:02.774370: I tensorflow/stream_executor/cuda/cuda_diagnostics.cc:310] kernel version seems to match DSO: 515.43.4 2023-11-28 16:42:02.790868: E tensorflow/stream_executor/cuda/cuda_driver.cc:328] failed call to cuInit: CUDA_ERROR_NO_DEVICE: no CUDA-capable device is detected 2023-11-28 16:42:02.790970: I tensorflow/stream_executor/cuda/cuda_diagnostics.cc:169] retrieving CUDA diagnostic information for host: idun-06-18 2023-11-28 16:42:02.791020: I tensorflow/stream_executor/cuda/cuda_diagnostics.cc:176] hostname: idun-06-18 2023-11-28 16:42:02.791222: I tensorflow/stream_executor/cuda/cuda_diagnostics.cc:200] libcuda reported version is: 515.43.4 2023-11-28 16:42:02.791302: I tensorflow/stream_executor/cuda/cuda_diagnostics.cc:204] kernel reported version is: 515.43.4 2023-11-28 16:42:02.791363: I tensorflow/stream_executor/cuda/cuda_diagnostics.cc:310] kernel version seems to match DSO: 515.43.4 2023-11-28 16:42:02.820776: I tensorflow/core/platform/profile_utils/cpu_utils.cc:112] CPU Frequency: 2100205000 Hz 2023-11-28 16:42:02.821129: I tensorflow/core/platform/profile_utils/cpu_utils.cc:112] CPU Frequency: 2100205000 Hz 2023-11-28 16:42:02.821421: I tensorflow/core/platform/profile_utils/cpu_utils.cc:112] CPU Frequency: 2100205000 Hz 2023-11-28 16:42:02.821629: I tensorflow/core/platform/profile_utils/cpu_utils.cc:112] CPU Frequency: 2100205000 Hz 2023-11-28 16:42:02.821832: I tensorflow/core/platform/profile_utils/cpu_utils.cc:112] CPU Frequency: 2100205000 Hz 2023-11-28 16:42:02.822030: I tensorflow/core/platform/profile_utils/cpu_utils.cc:112] CPU Frequency: 2100205000 Hz 2023-11-28 16:42:02.822231: I tensorflow/core/platform/profile_utils/cpu_utils.cc:112] CPU Frequency: 2100205000 Hz 2023-11-28 16:42:02.822442: I tensorflow/core/platform/profile_utils/cpu_utils.cc:112] CPU Frequency: 2100205000 Hz 2023-11-28 16:42:02.822646: I tensorflow/core/platform/profile_utils/cpu_utils.cc:112] CPU Frequency: 2100205000 Hz 2023-11-28 16:42:02.822847: I tensorflow/core/platform/profile_utils/cpu_utils.cc:112] CPU Frequency: 2100205000 Hz ```
{ "url": "https://api.github.com/repos/tensorflow/tensorflow/issues/62494/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/tensorflow/tensorflow/issues/62494/timeline
null
completed
false
https://api.github.com/repos/tensorflow/tensorflow/issues/62493
https://api.github.com/repos/tensorflow/tensorflow
https://api.github.com/repos/tensorflow/tensorflow/issues/62493/labels{/name}
https://api.github.com/repos/tensorflow/tensorflow/issues/62493/comments
https://api.github.com/repos/tensorflow/tensorflow/issues/62493/events
https://github.com/tensorflow/tensorflow/issues/62493
2,014,362,010
I_kwDOArmXAs54ELma
62,493
incorrect result run collective_ops.all_reduce with communication_hint = nccl
{ "login": "Jiaao-Bai", "id": 60597682, "node_id": "MDQ6VXNlcjYwNTk3Njgy", "avatar_url": "https://avatars.githubusercontent.com/u/60597682?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Jiaao-Bai", "html_url": "https://github.com/Jiaao-Bai", "followers_url": "https://api.github.com/users/Jiaao-Bai/followers", "following_url": "https://api.github.com/users/Jiaao-Bai/following{/other_user}", "gists_url": "https://api.github.com/users/Jiaao-Bai/gists{/gist_id}", "starred_url": "https://api.github.com/users/Jiaao-Bai/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Jiaao-Bai/subscriptions", "organizations_url": "https://api.github.com/users/Jiaao-Bai/orgs", "repos_url": "https://api.github.com/users/Jiaao-Bai/repos", "events_url": "https://api.github.com/users/Jiaao-Bai/events{/privacy}", "received_events_url": "https://api.github.com/users/Jiaao-Bai/received_events", "type": "User", "site_admin": false }
[ { "id": 386191887, "node_id": "MDU6TGFiZWwzODYxOTE4ODc=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/stat:awaiting%20response", "name": "stat:awaiting response", "color": "f4b400", "default": false, "description": "Status - Awaiting response from author" }, { "id": 473172988, "node_id": "MDU6TGFiZWw0NzMxNzI5ODg=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/type:bug", "name": "type:bug", "color": "159b2e", "default": false, "description": "Bug" }, { "id": 996845227, "node_id": "MDU6TGFiZWw5OTY4NDUyMjc=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/comp:dist-strat", "name": "comp:dist-strat", "color": "0052cc", "default": false, "description": "Distribution Strategy related issues" }, { "id": 3797168204, "node_id": "LA_kwDOArmXAs7iVDBM", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/TF%202.8", "name": "TF 2.8", "color": "5DC9D0", "default": false, "description": "" } ]
closed
false
{ "login": "sushreebarsa", "id": 84765720, "node_id": "MDQ6VXNlcjg0NzY1NzIw", "avatar_url": "https://avatars.githubusercontent.com/u/84765720?v=4", "gravatar_id": "", "url": "https://api.github.com/users/sushreebarsa", "html_url": "https://github.com/sushreebarsa", "followers_url": "https://api.github.com/users/sushreebarsa/followers", "following_url": "https://api.github.com/users/sushreebarsa/following{/other_user}", "gists_url": "https://api.github.com/users/sushreebarsa/gists{/gist_id}", "starred_url": "https://api.github.com/users/sushreebarsa/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/sushreebarsa/subscriptions", "organizations_url": "https://api.github.com/users/sushreebarsa/orgs", "repos_url": "https://api.github.com/users/sushreebarsa/repos", "events_url": "https://api.github.com/users/sushreebarsa/events{/privacy}", "received_events_url": "https://api.github.com/users/sushreebarsa/received_events", "type": "User", "site_admin": false }
[ { "login": "sushreebarsa", "id": 84765720, "node_id": "MDQ6VXNlcjg0NzY1NzIw", "avatar_url": "https://avatars.githubusercontent.com/u/84765720?v=4", "gravatar_id": "", "url": "https://api.github.com/users/sushreebarsa", "html_url": "https://github.com/sushreebarsa", "followers_url": "https://api.github.com/users/sushreebarsa/followers", "following_url": "https://api.github.com/users/sushreebarsa/following{/other_user}", "gists_url": "https://api.github.com/users/sushreebarsa/gists{/gist_id}", "starred_url": "https://api.github.com/users/sushreebarsa/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/sushreebarsa/subscriptions", "organizations_url": "https://api.github.com/users/sushreebarsa/orgs", "repos_url": "https://api.github.com/users/sushreebarsa/repos", "events_url": "https://api.github.com/users/sushreebarsa/events{/privacy}", "received_events_url": "https://api.github.com/users/sushreebarsa/received_events", "type": "User", "site_admin": false } ]
null
[ "run this demo in two process plz", "@Jiaao-Bai Could you please verify that the NCCL library is installed correctly and that its version is compatible with the TensorFlow version. Check for any environment variables or configuration settings that might affect NCCL communication.\r\nPlease try with the latest tensorflow version as older versions are not recommended as well. Thank you!", "i add control_dependencies and result is correct, thanks\r\n\r\n```\r\n\"\"\"Illustrate AllReduce\"\"\"\r\n\r\nimport multiprocessing as mp\r\nimport logging\r\nimport time\r\nimport threading\r\nimport tensorflow as tf\r\nfrom tensorflow.python.ops import collective_ops\r\nimport os\r\nimport json\r\nimport datetime\r\nfrom tensorflow.python.framework import ops\r\nfrom typing import Callable, List, Optional, Union\r\nfrom tensorflow.python.types import core\r\nfrom tensorflow.python.client import timeline\r\n\r\nFORMAT = '%(asctime)s, %(levelname)-4s [%(filename)s:%(lineno)d] %(message)s'\r\nlogging.basicConfig(format=FORMAT, level=logging.INFO)\r\ntf.compat.v1.disable_eager_execution()\r\n\r\nos.environ['TF_CONFIG'] = json.dumps({\r\n 'cluster': {\r\n 'worker': [\"jscs-ai-nn-28:12345\",\"jscs-ai-nn-28:23456\"]\r\n },\r\n 'task': {'type': 'worker', 'index': 0}\r\n})\r\n\r\nMP_METHOD = 'fork'\r\nNUM_PROCESSES = 2\r\nCHIEF_INDEX = 0\r\nJOB = 'worker'\r\nDELAY = 0.01\r\n\r\ndef control_input(control_input: Union[core.TensorLike,\r\n ops.Operation]):\r\n if control_input is not None:\r\n return ops.control_dependencies([control_input])\r\n return ops.NullContextmanager()\r\n\r\n\r\ndef process_fn():\r\n \"\"\"allreduce process\"\"\"\r\n cluster_resolver = tf.distribute.cluster_resolver.TFConfigClusterResolver()\r\n cluster_spec = cluster_resolver.cluster_spec()\r\n task_index = cluster_resolver.task_id\r\n\r\n\r\n config = tf.compat.v1.ConfigProto()\r\n config.experimental.collective_group_leader = \"/job:worker/replica:0/task:0\"\r\n server = tf.compat.v1.train.Server(cluster_spec,\r\n config=config,\r\n job_name='worker',\r\n task_index=task_index)\r\n with tf.Graph().as_default():\r\n sync_ops = []\r\n for i in range(100):\r\n token = tf.compat.v1.get_variable(name = str(i), shape = [128, 4])\r\n if sync_ops:\r\n depend = sync_ops[-1]\r\n else:\r\n depend = None\r\n with control_input(depend):\r\n sync_op = collective_ops.all_reduce(\r\n token, NUM_PROCESSES, 0, i, 'Add', 'Div', (0,), 'nccl')\r\n sync_ops.append(sync_op)\r\n\r\n with tf.compat.v1.Session(\r\n target=server.target,\r\n config = config) \\\r\n as mon_sess:\r\n time.sleep((task_index + 1) * DELAY)\r\n run_metadata = tf.compat.v1.RunMetadata()\r\n profile_options = tf.compat.v1.RunOptions(trace_level=tf.compat.v1.RunOptions.FULL_TRACE)\r\n\r\n gvi = tf.compat.v1.global_variables_initializer()\r\n mon_sess.run(gvi)\r\n batch = 0\r\n start_time = datetime.datetime.now()\r\n print(\"begin to run\")\r\n while 1:\r\n ret = mon_sess.run(sync_ops, options=profile_options, run_metadata=run_metadata) # successful\r\n batch += 1\r\n if batch % 1 == 0:\r\n tl = timeline.Timeline(run_metadata.step_stats)\r\n ctf = tl.generate_chrome_trace_format()\r\n timline_json = \"./timeline_{}_{}.json\".format(task_index, batch)\r\n with open(timline_json, 'w+') as f:\r\n f.write(ctf)\r\n delta = datetime.datetime.now() - start_time\r\n start_time = datetime.datetime.now()\r\n print('batch {}, delta {}.'.format(batch, delta.total_seconds()))\r\n return\r\n else:\r\n print('batch {}.'.format(batch))\r\n\r\ndef start_process():\r\n \"\"\"start process\"\"\"\r\n process_fn()\r\n\r\nif __name__ == '__main__':\r\n start_process()\r\n```", "Are you satisfied with the resolution of your issue?\n<a href=\"https://docs.google.com/forms/d/e/1FAIpQLSfaP12TRhd9xSxjXZjcZFNXPGk4kc1-qMdv3gc6bEP90vY1ew/viewform?entry.85265664=Yes&entry.2137816233=https://github.com/tensorflow/tensorflow/issues/62493\">Yes</a>\n<a href=\"https://docs.google.com/forms/d/e/1FAIpQLSfaP12TRhd9xSxjXZjcZFNXPGk4kc1-qMdv3gc6bEP90vY1ew/viewform?entry.85265664=No&entry.2137816233=https://github.com/tensorflow/tensorflow/issues/62493\">No</a>\n", "@Jiaao-Bai Glad the issue has been resolved!\r\nThank you for the confirmation." ]
2023-11-28T12:46:36
2023-12-04T04:17:17
2023-12-04T04:07:23
NONE
null
null
null
### Issue type Bug ### Have you reproduced the bug with TensorFlow Nightly? No ### Source binary ### TensorFlow version tf2.8 ### Custom code Yes ### OS platform and distribution ubuntu 20.04 ### Mobile device not mobile device ### Python version 3.10.6 ### Bazel version _No response_ ### GCC/compiler version _No response_ ### CUDA/cuDNN version _No response_ ### GPU model and memory _No response_ ### Current behavior? the result is wrong should be [0.0, 1.0, 2.0, 3.0, 4.0, 5.0, 6.0, 7.0, 8.0, 9.0] ### Standalone code to reproduce the issue ```shell """Illustrate AllReduce""" import multiprocessing as mp import logging import time import threading import tensorflow as tf from tensorflow.python.ops import collective_ops import os import json import datetime FORMAT = '%(asctime)s, %(levelname)-4s [%(filename)s:%(lineno)d] %(message)s' logging.basicConfig(format=FORMAT, level=logging.INFO) tf.compat.v1.disable_eager_execution() os.environ['TF_CONFIG'] = json.dumps({ 'cluster': { #change this 'worker': ["jscs-ai-nn-28:12345","jscs-ai-nn-28:23456"] }, #change this 'task': {'type': 'worker', 'index': 0} }) MP_METHOD = 'fork' NUM_PROCESSES = 2 CHIEF_INDEX = 0 JOB = 'worker' DELAY = 0.01 def process_fn(): """allreduce process""" cluster_resolver = tf.distribute.cluster_resolver.TFConfigClusterResolver() cluster_spec = cluster_resolver.cluster_spec() task_index = cluster_resolver.task_id config = tf.compat.v1.ConfigProto() config.experimental.collective_group_leader = "/job:worker/replica:0/task:0" server = tf.compat.v1.train.Server(cluster_spec, config=config, job_name='worker', task_index=task_index) with tf.Graph().as_default(): sync_ops = [] for i in range(10): # token = tf.compat.v1.get_variable(name = str(i), shape = [2, 3]) token = tf.constant(i, dtype=tf.float32, name = str(i)) sync_op = collective_ops.all_reduce_v2( token, NUM_PROCESSES, 0, i, 'Add', 'Div', 'nccl') sync_ops.append(sync_op) with tf.compat.v1.Session( target=server.target, config = config) \ as mon_sess: time.sleep((task_index + 1) * DELAY) gvi = tf.compat.v1.global_variables_initializer() mon_sess.run(gvi) batch = 0 start_time = datetime.datetime.now() print("begin to run") while 1: ret = mon_sess.run(sync_ops) # successful print(ret) batch += 1 if batch % 1 == 0: delta = datetime.datetime.now() - start_time start_time = datetime.datetime.now() print('batch {}, delta {}.'.format(batch, delta.total_seconds())) return else: print('batch {}.'.format(batch)) def start_process(): """start process""" process_fn() if __name__ == '__main__': start_process() ``` ### Relevant log output ```shell begin to run [0.5, 4.0, 2.5, 6.0, 3.0, 2.5, 5.0, 6.0, 7.0, 8.5] batch 1, delta 1.517457. ```
{ "url": "https://api.github.com/repos/tensorflow/tensorflow/issues/62493/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/tensorflow/tensorflow/issues/62493/timeline
null
completed
false
https://api.github.com/repos/tensorflow/tensorflow/issues/62492
https://api.github.com/repos/tensorflow/tensorflow
https://api.github.com/repos/tensorflow/tensorflow/issues/62492/labels{/name}
https://api.github.com/repos/tensorflow/tensorflow/issues/62492/comments
https://api.github.com/repos/tensorflow/tensorflow/issues/62492/events
https://github.com/tensorflow/tensorflow/issues/62492
2,014,349,779
I_kwDOArmXAs54EInT
62,492
Python code modifies loop while iterating over it
{ "login": "Sjord", "id": 113030, "node_id": "MDQ6VXNlcjExMzAzMA==", "avatar_url": "https://avatars.githubusercontent.com/u/113030?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Sjord", "html_url": "https://github.com/Sjord", "followers_url": "https://api.github.com/users/Sjord/followers", "following_url": "https://api.github.com/users/Sjord/following{/other_user}", "gists_url": "https://api.github.com/users/Sjord/gists{/gist_id}", "starred_url": "https://api.github.com/users/Sjord/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Sjord/subscriptions", "organizations_url": "https://api.github.com/users/Sjord/orgs", "repos_url": "https://api.github.com/users/Sjord/repos", "events_url": "https://api.github.com/users/Sjord/events{/privacy}", "received_events_url": "https://api.github.com/users/Sjord/received_events", "type": "User", "site_admin": false }
[ { "id": 473172988, "node_id": "MDU6TGFiZWw0NzMxNzI5ODg=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/type:bug", "name": "type:bug", "color": "159b2e", "default": false, "description": "Bug" } ]
closed
false
{ "login": "Venkat6871", "id": 147127861, "node_id": "U_kgDOCMT-NQ", "avatar_url": "https://avatars.githubusercontent.com/u/147127861?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Venkat6871", "html_url": "https://github.com/Venkat6871", "followers_url": "https://api.github.com/users/Venkat6871/followers", "following_url": "https://api.github.com/users/Venkat6871/following{/other_user}", "gists_url": "https://api.github.com/users/Venkat6871/gists{/gist_id}", "starred_url": "https://api.github.com/users/Venkat6871/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Venkat6871/subscriptions", "organizations_url": "https://api.github.com/users/Venkat6871/orgs", "repos_url": "https://api.github.com/users/Venkat6871/repos", "events_url": "https://api.github.com/users/Venkat6871/events{/privacy}", "received_events_url": "https://api.github.com/users/Venkat6871/received_events", "type": "User", "site_admin": false }
[ { "login": "Venkat6871", "id": 147127861, "node_id": "U_kgDOCMT-NQ", "avatar_url": "https://avatars.githubusercontent.com/u/147127861?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Venkat6871", "html_url": "https://github.com/Venkat6871", "followers_url": "https://api.github.com/users/Venkat6871/followers", "following_url": "https://api.github.com/users/Venkat6871/following{/other_user}", "gists_url": "https://api.github.com/users/Venkat6871/gists{/gist_id}", "starred_url": "https://api.github.com/users/Venkat6871/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Venkat6871/subscriptions", "organizations_url": "https://api.github.com/users/Venkat6871/orgs", "repos_url": "https://api.github.com/users/Venkat6871/repos", "events_url": "https://api.github.com/users/Venkat6871/events{/privacy}", "received_events_url": "https://api.github.com/users/Venkat6871/received_events", "type": "User", "site_admin": false } ]
null
[ "Doing this will pass a copy of the list for checking then modyfing the original list seperately:\r\nfor p in self._last_checkpoints[:]:\r\n if latest_save_path == self._CheckpointFilename(p):\r\n self._last_checkpoints.remove(p)\r\n", "can in be assigned this issue\r\n", "Are you satisfied with the resolution of your issue?\n<a href=\"https://docs.google.com/forms/d/e/1FAIpQLSfaP12TRhd9xSxjXZjcZFNXPGk4kc1-qMdv3gc6bEP90vY1ew/viewform?entry.85265664=Yes&entry.2137816233=https://github.com/tensorflow/tensorflow/issues/62492\">Yes</a>\n<a href=\"https://docs.google.com/forms/d/e/1FAIpQLSfaP12TRhd9xSxjXZjcZFNXPGk4kc1-qMdv3gc6bEP90vY1ew/viewform?entry.85265664=No&entry.2137816233=https://github.com/tensorflow/tensorflow/issues/62492\">No</a>\n" ]
2023-11-28T12:39:28
2023-11-30T07:10:08
2023-11-30T07:10:05
NONE
null
null
null
### Issue type Bug ### Have you reproduced the bug with TensorFlow Nightly? The code is present on the master branch, but I found this bug by looking at the code, not from executing TensorFlow. ### Source source ### TensorFlow version master ### Custom code No ### OS platform and distribution _No response_ ### Mobile device _No response_ ### Python version _No response_ ### Bazel version _No response_ ### GCC/compiler version _No response_ ### CUDA/cuDNN version _No response_ ### GPU model and memory _No response_ ### Current behavior? In tensorflow/python/training/saver.py, in _RecordLastCheckpoint: ``` # Remove first from list if the same name was used before. for p in self._last_checkpoints: if latest_save_path == self._CheckpointFilename(p): self._last_checkpoints.remove(p) ``` This modifies the `_last_checkpoints` list while iterating over it, causing the loop to skip steps. This seems like a bug, from looking at the code. This should probably be something like: ``` self._last_checkpoints = [p for p in self._last_checkpoints if latest_save_path != self._CheckpointFilename(p)] ``` ### Standalone code to reproduce the issue ```shell Example program: l = [1, 1, 1, 1, 1, 1, 1, 1] for e in l: if e == 1: l.remove(e) print(l) ``` Output ``` [1, 1, 1, 1] ``` ``` ### Relevant log output _No response_
{ "url": "https://api.github.com/repos/tensorflow/tensorflow/issues/62492/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/tensorflow/tensorflow/issues/62492/timeline
null
completed
false
https://api.github.com/repos/tensorflow/tensorflow/issues/62491
https://api.github.com/repos/tensorflow/tensorflow
https://api.github.com/repos/tensorflow/tensorflow/issues/62491/labels{/name}
https://api.github.com/repos/tensorflow/tensorflow/issues/62491/comments
https://api.github.com/repos/tensorflow/tensorflow/issues/62491/events
https://github.com/tensorflow/tensorflow/issues/62491
2,014,335,155
I_kwDOArmXAs54EFCz
62,491
Keras namespace stubs are gone after upgrading to 2.15
{ "login": "tbhaxor", "id": 28386721, "node_id": "MDQ6VXNlcjI4Mzg2NzIx", "avatar_url": "https://avatars.githubusercontent.com/u/28386721?v=4", "gravatar_id": "", "url": "https://api.github.com/users/tbhaxor", "html_url": "https://github.com/tbhaxor", "followers_url": "https://api.github.com/users/tbhaxor/followers", "following_url": "https://api.github.com/users/tbhaxor/following{/other_user}", "gists_url": "https://api.github.com/users/tbhaxor/gists{/gist_id}", "starred_url": "https://api.github.com/users/tbhaxor/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/tbhaxor/subscriptions", "organizations_url": "https://api.github.com/users/tbhaxor/orgs", "repos_url": "https://api.github.com/users/tbhaxor/repos", "events_url": "https://api.github.com/users/tbhaxor/events{/privacy}", "received_events_url": "https://api.github.com/users/tbhaxor/received_events", "type": "User", "site_admin": false }
[ { "id": 386191887, "node_id": "MDU6TGFiZWwzODYxOTE4ODc=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/stat:awaiting%20response", "name": "stat:awaiting response", "color": "f4b400", "default": false, "description": "Status - Awaiting response from author" }, { "id": 473172988, "node_id": "MDU6TGFiZWw0NzMxNzI5ODg=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/type:bug", "name": "type:bug", "color": "159b2e", "default": false, "description": "Bug" }, { "id": 1097546578, "node_id": "MDU6TGFiZWwxMDk3NTQ2NTc4", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/comp:keras", "name": "comp:keras", "color": "0052cc", "default": false, "description": "Keras related issues" }, { "id": 6218999181, "node_id": "LA_kwDOArmXAs8AAAABcq5ljQ", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/TF%202.15", "name": "TF 2.15", "color": "9162CB", "default": false, "description": "For issues related to 2.15.x" } ]
closed
false
{ "login": "sachinprasadhs", "id": 73069040, "node_id": "MDQ6VXNlcjczMDY5MDQw", "avatar_url": "https://avatars.githubusercontent.com/u/73069040?v=4", "gravatar_id": "", "url": "https://api.github.com/users/sachinprasadhs", "html_url": "https://github.com/sachinprasadhs", "followers_url": "https://api.github.com/users/sachinprasadhs/followers", "following_url": "https://api.github.com/users/sachinprasadhs/following{/other_user}", "gists_url": "https://api.github.com/users/sachinprasadhs/gists{/gist_id}", "starred_url": "https://api.github.com/users/sachinprasadhs/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/sachinprasadhs/subscriptions", "organizations_url": "https://api.github.com/users/sachinprasadhs/orgs", "repos_url": "https://api.github.com/users/sachinprasadhs/repos", "events_url": "https://api.github.com/users/sachinprasadhs/events{/privacy}", "received_events_url": "https://api.github.com/users/sachinprasadhs/received_events", "type": "User", "site_admin": false }
[ { "login": "sachinprasadhs", "id": 73069040, "node_id": "MDQ6VXNlcjczMDY5MDQw", "avatar_url": "https://avatars.githubusercontent.com/u/73069040?v=4", "gravatar_id": "", "url": "https://api.github.com/users/sachinprasadhs", "html_url": "https://github.com/sachinprasadhs", "followers_url": "https://api.github.com/users/sachinprasadhs/followers", "following_url": "https://api.github.com/users/sachinprasadhs/following{/other_user}", "gists_url": "https://api.github.com/users/sachinprasadhs/gists{/gist_id}", "starred_url": "https://api.github.com/users/sachinprasadhs/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/sachinprasadhs/subscriptions", "organizations_url": "https://api.github.com/users/sachinprasadhs/orgs", "repos_url": "https://api.github.com/users/sachinprasadhs/repos", "events_url": "https://api.github.com/users/sachinprasadhs/events{/privacy}", "received_events_url": "https://api.github.com/users/sachinprasadhs/received_events", "type": "User", "site_admin": false } ]
null
[ "@tbhaxor,\r\nHi,\r\n\r\nThanks for reporting the issue.\r\n\r\nSince the migration, there were many changes made, the error you are getting is due to one such change here https://github.com/keras-team/tf-keras/commit/cff6ac903e2b8a0dde2a469d949f0f0ce3b5f282.\r\n\r\nTo get rid of the error, you need to install tf-keras-nightly as well as tf-nightly.\r\n\r\nNote that, tf-keras-nightly is legacy Keras code, to use the Keras 3 with multi-backend support, use keras-nightly and import Keras directly.\r\n\r\nAlso the fix will be available once both Tensorflow and tf-keras do the next stable release, until then continue using as suggested. Thank you!", "Hi @tilakrayal Thanks for your response. Unfortunately it didn't work for me. \r\n\r\nThough it was not working with our process (we are using pypoetry). I have tried using virtualenv \r\n\r\n```console\r\ncd $(mktemp -d)\r\nvirtualenv venv && source venv/bin/activate\r\npip install -U pip\r\npip install tf-keras-nightly tf-nightly\r\n\r\ncode .\r\n```\r\n\r\nOn `tf.ker` it shows same output as uploaded above.", "Hi, \r\n\r\nKeras is migrated to Keras 3 with multi backend support.\r\n\r\nCould you please install Keras separately using `pip install keras==3.0.0` and `import keras` directly and let us know the outcome with `keras.layers` etc", "Yes, we have already migrated to Keras 3 and it's working fine. Thanks for your response.", "Are you satisfied with the resolution of your issue?\n<a href=\"https://docs.google.com/forms/d/e/1FAIpQLSfaP12TRhd9xSxjXZjcZFNXPGk4kc1-qMdv3gc6bEP90vY1ew/viewform?entry.85265664=Yes&entry.2137816233=https://github.com/tensorflow/tensorflow/issues/62491\">Yes</a>\n<a href=\"https://docs.google.com/forms/d/e/1FAIpQLSfaP12TRhd9xSxjXZjcZFNXPGk4kc1-qMdv3gc6bEP90vY1ew/viewform?entry.85265664=No&entry.2137816233=https://github.com/tensorflow/tensorflow/issues/62491\">No</a>\n" ]
2023-11-28T12:30:46
2023-12-06T05:36:01
2023-12-06T05:35:58
NONE
null
null
null
### Issue type Bug ### Have you reproduced the bug with TensorFlow Nightly? No ### Source source ### TensorFlow version v2.15.0-rc1-8-g6887368d6d4 ### Custom code Yes ### OS platform and distribution Arch Linux x64 ### Mobile device _No response_ ### Python version 3.11 ### Bazel version _No response_ ### GCC/compiler version _No response_ ### CUDA/cuDNN version _No response_ ### GPU model and memory _No response_ ### Current behavior? I upgraded it from 2.14 to 2.15 and the keras intellisense stub in the vscode is missing. ![image](https://github.com/tensorflow/tensorflow/assets/28386721/8b9f63eb-3012-4758-b5de-a325ac9bfc83) I restarted the LSP, still it doesn't show `keras` namespace. However, on checking in the REPL it shows the completion ![image](https://github.com/tensorflow/tensorflow/assets/28386721/a11bf08d-0c0c-466a-b647-d216be5a5b2e) ### Standalone code to reproduce the issue ```shell import tensorflow as tf ``` ### Relevant log output _No response_
{ "url": "https://api.github.com/repos/tensorflow/tensorflow/issues/62491/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/tensorflow/tensorflow/issues/62491/timeline
null
completed
false
https://api.github.com/repos/tensorflow/tensorflow/issues/62490
https://api.github.com/repos/tensorflow/tensorflow
https://api.github.com/repos/tensorflow/tensorflow/issues/62490/labels{/name}
https://api.github.com/repos/tensorflow/tensorflow/issues/62490/comments
https://api.github.com/repos/tensorflow/tensorflow/issues/62490/events
https://github.com/tensorflow/tensorflow/issues/62490
2,014,161,874
I_kwDOArmXAs54DavS
62,490
[AARCH64] Building TF 2.15.0 from sources failed with undefined __Int8x8_t
{ "login": "smuzaffar", "id": 4115138, "node_id": "MDQ6VXNlcjQxMTUxMzg=", "avatar_url": "https://avatars.githubusercontent.com/u/4115138?v=4", "gravatar_id": "", "url": "https://api.github.com/users/smuzaffar", "html_url": "https://github.com/smuzaffar", "followers_url": "https://api.github.com/users/smuzaffar/followers", "following_url": "https://api.github.com/users/smuzaffar/following{/other_user}", "gists_url": "https://api.github.com/users/smuzaffar/gists{/gist_id}", "starred_url": "https://api.github.com/users/smuzaffar/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/smuzaffar/subscriptions", "organizations_url": "https://api.github.com/users/smuzaffar/orgs", "repos_url": "https://api.github.com/users/smuzaffar/repos", "events_url": "https://api.github.com/users/smuzaffar/events{/privacy}", "received_events_url": "https://api.github.com/users/smuzaffar/received_events", "type": "User", "site_admin": false }
[ { "id": 404586594, "node_id": "MDU6TGFiZWw0MDQ1ODY1OTQ=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/stat:awaiting%20tensorflower", "name": "stat:awaiting tensorflower", "color": "f4b400", "default": false, "description": "Status - Awaiting response from tensorflower" }, { "id": 473173351, "node_id": "MDU6TGFiZWw0NzMxNzMzNTE=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/type:build/install", "name": "type:build/install", "color": "159b2e", "default": false, "description": "Build and install issues" }, { "id": 1205615612, "node_id": "MDU6TGFiZWwxMjA1NjE1NjEy", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/subtype:%20ubuntu/linux", "name": "subtype: ubuntu/linux", "color": "b619ea", "default": false, "description": "Ubuntu/Linux Build/Installation Issues" }, { "id": 6218999181, "node_id": "LA_kwDOArmXAs8AAAABcq5ljQ", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/TF%202.15", "name": "TF 2.15", "color": "9162CB", "default": false, "description": "For issues related to 2.15.x" } ]
open
false
{ "login": "sachinprasadhs", "id": 73069040, "node_id": "MDQ6VXNlcjczMDY5MDQw", "avatar_url": "https://avatars.githubusercontent.com/u/73069040?v=4", "gravatar_id": "", "url": "https://api.github.com/users/sachinprasadhs", "html_url": "https://github.com/sachinprasadhs", "followers_url": "https://api.github.com/users/sachinprasadhs/followers", "following_url": "https://api.github.com/users/sachinprasadhs/following{/other_user}", "gists_url": "https://api.github.com/users/sachinprasadhs/gists{/gist_id}", "starred_url": "https://api.github.com/users/sachinprasadhs/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/sachinprasadhs/subscriptions", "organizations_url": "https://api.github.com/users/sachinprasadhs/orgs", "repos_url": "https://api.github.com/users/sachinprasadhs/repos", "events_url": "https://api.github.com/users/sachinprasadhs/events{/privacy}", "received_events_url": "https://api.github.com/users/sachinprasadhs/received_events", "type": "User", "site_admin": false }
[ { "login": "sachinprasadhs", "id": 73069040, "node_id": "MDQ6VXNlcjczMDY5MDQw", "avatar_url": "https://avatars.githubusercontent.com/u/73069040?v=4", "gravatar_id": "", "url": "https://api.github.com/users/sachinprasadhs", "html_url": "https://github.com/sachinprasadhs", "followers_url": "https://api.github.com/users/sachinprasadhs/followers", "following_url": "https://api.github.com/users/sachinprasadhs/following{/other_user}", "gists_url": "https://api.github.com/users/sachinprasadhs/gists{/gist_id}", "starred_url": "https://api.github.com/users/sachinprasadhs/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/sachinprasadhs/subscriptions", "organizations_url": "https://api.github.com/users/sachinprasadhs/orgs", "repos_url": "https://api.github.com/users/sachinprasadhs/repos", "events_url": "https://api.github.com/users/sachinprasadhs/events{/privacy}", "received_events_url": "https://api.github.com/users/sachinprasadhs/received_events", "type": "User", "site_admin": false } ]
null
[ "I have the same build problem on the Arm64 with CPU:\r\n`processor\t: 0\r\nmodel name\t: ARMv8 Processor rev 1 (v8l)\r\nBogoMIPS\t: 62.50\r\nFeatures\t: fp asimd evtstrm aes pmull sha1 sha2 crc32 atomics fphp asimdhp cpuid asimdrdm lrcpc dcpop asimddp uscat ilrcpc flagm\r\nCPU implementer\t: 0x41\r\nCPU architecture: 8\r\nCPU variant\t: 0x0\r\nCPU part\t: 0xd42\r\nCPU revision\t: 1\r\n`\r\n\r\nThe CPU doesn't have the NEON feature at all. It needs to turn off Arm-neon support at this platform. Any suggestion to turn off?", "Still broken on 2.16.1", "This seems to work around the issue for me - built on grace hopper, TF 2.16.1 using spack.\r\n\r\nChange to TF:\r\n```diff\r\ndiff --git a/third_party/absl/workspace.bzl b/third_party/absl/workspace.bzl\r\nindex 06f75166ce4b..56d146d65abe 100644\r\n--- a/third_party/absl/workspace.bzl\r\n+++ b/third_party/absl/workspace.bzl\r\n@@ -42,6 +42,7 @@ def repo():\r\n build_file = \"//third_party/absl:com_google_absl.BUILD\",\r\n system_build_file = \"//third_party/absl:system.BUILD\",\r\n system_link_files = SYS_LINKS,\r\n+ patch_file = [\"//third_party/absl:absl_neon.patch\"],\r\n strip_prefix = \"abseil-cpp-{commit}\".format(commit = ABSL_COMMIT),\r\n urls = tf_mirror_urls(\"https://github.com/abseil/abseil-cpp/archive/{commit}.tar.gz\".format(commit = ABSL_COMMIT)),\r\n )\r\n```\r\n\r\nPatch for absl:\r\n```diff\r\ndiff --git a/absl/base/config.h b/absl/base/config.h\r\nindex 5fa9f0efe5a4..741e320fe40c 100644\r\n--- a/absl/base/config.h\r\n+++ b/absl/base/config.h\r\n@@ -962,7 +962,7 @@ static_assert(ABSL_INTERNAL_INLINE_NAMESPACE_STR[0] != 'h' ||\r\n // https://llvm.org/docs/CompileCudaWithLLVM.html#detecting-clang-vs-nvcc-from-code\r\n #ifdef ABSL_INTERNAL_HAVE_ARM_NEON\r\n #error ABSL_INTERNAL_HAVE_ARM_NEON cannot be directly set\r\n-#elif defined(__ARM_NEON) && !defined(__CUDA_ARCH__)\r\n+#elif defined(__ARM_NEON) && !defined(__CUDACC__)\r\n #define ABSL_INTERNAL_HAVE_ARM_NEON 1\r\n #endif\r\n```\r\n" ]
2023-11-28T10:51:12
2024-04-25T16:02:46
null
CONTRIBUTOR
null
null
null
### Issue type Build/Install ### Have you reproduced the bug with TensorFlow Nightly? No ### Source source ### TensorFlow version 2.15.0 ### Custom code Yes ### OS platform and distribution RHEL 8 ### Mobile device _No response_ ### Python version 3.9 ### Bazel version 6.1.0 ### GCC/compiler version GCC 12.3 ### CUDA/cuDNN version Cuda 12.2 , cuDNN 8.8.0 ### GPU model and memory _No response_ ### Current behavior? Building TF 2.15.0 from sources for aarch64 fails with error like [a]. Note that building TF 2.15.0 from sources for x86_64 worked fine. [a] ``` ERROR: <path>/tensorflow-2.15.0/tensorflow/core/kernels/BUILD:5131:18: Compiling tensorflow/core/kernels/sparse_tensor_dense_matmul_op_gpu.cu.cc failed: (Exit 4): crosstool_wrapper_driver_is_not_gcc failed: error executing command (from target //tensorflow/core/kernels:sparse_tensor_dense_matmul_op_gpu) TF2_BEHAVIOR=1 \ TF_CUDA_COMPUTE_CAPABILITIES=compute_60,compute_70,compute_75,compute_80,compute_89 \ TF_CUDA_PATHS=<path>/cudnn/8.8.0.121-7bc7095db72117b743b32c95e6e3687e \ TF_CUDA_VERSION=12.2 \ TF_SYSTEM_LIBS=absl_py,boringssl,com_github_grpc_grpc,curl,cython,eigen_archive,flatbuffers,gif,libjpeg_turbo,org_sqlite,pasta,png,pybind11,zlib \ external/local_config_cuda/crosstool/clang/bin/crosstool_wrapper_driver_is_not_gcc -MD -MF bazel-out/aarch64-opt/bin/tensorflow/core/kernels/_objs/sparse_tensor_dense_matmul_op_gpu/sparse_tensor_dense_matmul_op_gpu.cu.pic.d '-frandom-seed=bazel-out/aarch64-opt/bin/tensorflow/core/kernels/_objs/sparse_tensor_dense_matmul_op_gpu/sparse_tensor_dense_matmul_op_gpu.cu.pic.o' -DEIGEN_MPL2_ONLY '-DEIGEN_MAX_ALIGN_BYTES=64' -DHAVE_SYS_UIO_H -DTF_USE_SNAPPY '-DBAZEL_CURRENT_REPOSITORY=""' -iquote . -iquote bazel-out/aarch64-opt/bin -iquote external/com_google_absl -iquote bazel-out/aarch64-opt/bin/external/com_google_absl -iquote external/nsync -iquote bazel-out/aarch64-opt/bin/external/nsync -iquote external/com_google_protobuf -iquote bazel-out/aarch64-opt/bin/external/com_google_protobuf -iquote external/local_tsl -iquote bazel-out/aarch64-opt/bin/external/local_tsl -iquote external/com_googlesource_code_re2 -iquote bazel-out/aarch64-opt/bin/external/com_googlesource_code_re2 -iquote external/farmhash_archive -iquote bazel-out/aarch64-opt/bin/external/farmhash_archive -iquote external/fft2d -iquote bazel-out/aarch64-opt/bin/external/fft2d -iquote external/highwayhash -iquote bazel-out/aarch64-opt/bin/external/highwayhash -iquote external/gif -iquote bazel-out/aarch64-opt/bin/external/gif -iquote external/libjpeg_turbo -iquote bazel-out/aarch64-opt/bin/external/libjpeg_turbo -iquote external/zlib -iquote bazel-out/aarch64-opt/bin/external/zlib -iquote external/eigen_archive -iquote bazel-out/aarch64-opt/bin/external/eigen_archive -iquote external/ml_dtypes -iquote bazel-out/aarch64-opt/bin/external/ml_dtypes -iquote external/local_config_cuda -iquote bazel-out/aarch64-opt/bin/external/local_config_cuda -iquote external/snappy -iquote bazel-out/aarch64-opt/bin/external/snappy -iquote external/double_conversion -iquote bazel-out/aarch64-opt/bin/external/double_conversion -iquote external/nccl_archive -iquote bazel-out/aarch64-opt/bin/external/nccl_archive -iquote external/local_config_rocm -iquote bazel-out/aarch64-opt/bin/external/local_config_rocm -iquote external/local_config_tensorrt -iquote bazel-out/aarch64-opt/bin/external/local_config_tensorrt -iquote external/local_xla -iquote bazel-out/aarch64-opt/bin/external/local_xla -Ibazel-out/aarch64-opt/bin/external/ml_dtypes/_virtual_includes/float8 -Ibazel-out/aarch64-opt/bin/external/ml_dtypes/_virtual_includes/int4 -Ibazel-out/aarch64-opt/bin/external/local_config_cuda/cuda/_virtual_includes/cuda_headers_virtual -Ibazel-out/aarch64-opt/bin/external/nccl_archive/_virtual_includes/nccl_config -Ibazel-out/aarch64-opt/bin/external/local_config_tensorrt/_virtual_includes/tensorrt_headers -isystem external/nsync/public -isystem bazel-out/aarch64-opt/bin/external/nsync/public -isystem external/com_google_protobuf/src -isystem bazel-out/aarch64-opt/bin/external/com_google_protobuf/src -isystem external/farmhash_archive/src -isystem bazel-out/aarch64-opt/bin/external/farmhash_archive/src -isystem external/gif/include -isystem bazel-out/aarch64-opt/bin/external/gif/include -isystem external/libjpeg_turbo/include -isystem bazel-out/aarch64-opt/bin/external/libjpeg_turbo/include -isystem external/zlib -isystem bazel-out/aarch64-opt/bin/external/zlib -isystem external/eigen_archive/include/eigen3 -isystem bazel-out/aarch64-opt/bin/external/eigen_archive/include/eigen3 -isystem external/ml_dtypes -isystem bazel-out/aarch64-opt/bin/external/ml_dtypes -isystem external/ml_dtypes/ml_dtypes -isystem bazel-out/aarch64-opt/bin/external/ml_dtypes/ml_dtypes -isystem external/local_config_cuda/cuda -isystem bazel-out/aarch64-opt/bin/external/local_config_cuda/cuda -isystem external/local_config_cuda/cuda/cuda/include -isystem bazel-out/aarch64-opt/bin/external/local_config_cuda/cuda/cuda/include -isystem external/local_config_rocm/rocm -isystem bazel-out/aarch64-opt/bin/external/local_config_rocm/rocm -isystem external/local_config_rocm/rocm/rocm/include -isystem bazel-out/aarch64-opt/bin/external/local_config_rocm/rocm/rocm/include -isystem external/local_config_rocm/rocm/rocm/include/rocrand -isystem bazel-out/aarch64-opt/bin/external/local_config_rocm/rocm/rocm/include/rocrand -isystem external/local_config_rocm/rocm/rocm/include/roctracer -isystem bazel-out/aarch64-opt/bin/external/local_config_rocm/rocm/rocm/include/roctracer -Wno-builtin-macro-redefined '-D__DATE__="redacted"' '-D__TIMESTAMP__="redacted"' '-D__TIME__="redacted"' -fPIC -U_FORTIFY_SOURCE '-D_FORTIFY_SOURCE=1' -fstack-protector -Wall -fno-omit-frame-pointer -no-canonical-prefixes -fno-canonical-system-headers -DNDEBUG -g0 -O2 -ffunction-sections -fdata-sections -Wno-all -Wno-extra -Wno-deprecated -Wno-deprecated-declarations -Wno-ignored-attributes -Wno-array-bounds -Wunused-result '-Werror=unused-result' -Wswitch '-Werror=switch' '-Wno-error=unused-but-set-variable' -DAUTOLOAD_DYNAMIC_KERNELS '-march=armv8-a' -mno-outline-atomics -Wno-sign-compare '-std=c++17' '-std=c++17' -x cuda '-DGOOGLE_CUDA=1' '--cuda-include-ptx=sm_60' '--cuda-gpu-arch=sm_60' '--cuda-include-ptx=sm_70' '--cuda-gpu-arch=sm_70' '--cuda-include-ptx=sm_75' '--cuda-gpu-arch=sm_75' '--cuda-include-ptx=sm_80' '--cuda-gpu-arch=sm_80' '--cuda-include-ptx=sm_89' '--cuda-gpu-arch=sm_89' '-Xcuda-fatbinary=--compress-all' '-nvcc_options=expt-relaxed-constexpr' -DEIGEN_AVOID_STL_ARRAY -Iexternal/gemmlowp -Wno-sign-compare '-ftemplate-depth=900' -fno-exceptions '-DGOOGLE_CUDA=1' '-DTENSORFLOW_USE_NVCC=1' -pthread '-nvcc_options=relaxed-constexpr' '-nvcc_options=ftz=true' -c tensorflow/core/kernels/sparse_tensor_dense_matmul_op_gpu.cu.cc -o bazel-out/aarch64-opt/bin/tensorflow/core/kernels/_objs/sparse_tensor_dense_matmul_op_gpu/sparse_tensor_dense_matmul_op_gpu.cu.pic.o) # Configuration: b904123c2caf5d17d0cded6e4d2ae3e922ea3a81ef7f2bc493d95e1ad6f05410 # Execution platform: @local_execution_config_platform//:platform In file included from external/local_xla/xla/stream_executor/device_options.h:27, from external/local_xla/xla/stream_executor/platform.h:27, from external/local_xla/xla/stream_executor/cuda/cuda_platform_id.h:19, from ./tensorflow/core/platform/stream_executor.h:19, from ./tensorflow/core/util/gpu_launch_config.h:27, from ./tensorflow/core/util/gpu_kernel_helper.h:28, from tensorflow/core/kernels/sparse_tensor_dense_matmul_op_gpu.cu.cc:24: external/com_google_absl/absl/log/check.h:57: warning: "CHECK" redefined 57 | #define CHECK(condition) ABSL_CHECK_IMPL((condition), #condition) | In file included from external/local_tsl/tsl/platform/logging.h:26, from external/local_tsl/tsl/platform/refcount.h:23, from ./tensorflow/core/platform/refcount.h:20, from ./tensorflow/core/lib/core/refcount.h:19, from ./tensorflow/core/framework/resource_base.h:23, from ./tensorflow/core/framework/resource_handle.h:21, from ./tensorflow/core/framework/register_types.h:21, from tensorflow/core/kernels/sparse_tensor_dense_matmul_op_gpu.cu.cc:21: external/local_tsl/tsl/platform/default/logging.h:308: note: this is the location of the previous definition 308 | #define CHECK(condition) \ | external/com_google_absl/absl/log/check.h:65: warning: "QCHECK" redefined 65 | #define QCHECK(condition) ABSL_QCHECK_IMPL((condition), #condition) | external/local_tsl/tsl/platform/default/logging.h:542: note: this is the location of the previous definition 542 | #define QCHECK(condition) CHECK(condition) | external/com_google_absl/absl/log/check.h:88: warning: "DCHECK" redefined 88 | #define DCHECK(condition) ABSL_DCHECK_IMPL((condition), #condition) .... .... <gcc-12.3>/bin/../lib/gcc/aarch64-redhat-linux-gnu/12.3.1/include/arm_neon.h(40): error: identifier "__Int8x8_t" is undefined typedef __Int8x8_t int8x8_t; ^ <gcc-12.3>/bin/../lib/gcc/aarch64-redhat-linux-gnu/12.3.1/include/arm_neon.h(41): error: identifier "__Int16x4_t" is undefined typedef __Int16x4_t int16x4_t; ^ <gcc-12.3>/bin/../lib/gcc/aarch64-redhat-linux-gnu/12.3.1/include/arm_neon.h(42): error: identifier "__Int32x2_t" is undefined typedef __Int32x2_t int32x2_t; ^ <gcc-12.3>/bin/../lib/gcc/aarch64-redhat-linux-gnu/12.3.1/include/arm_neon.h(43): error: identifier "__Int64x1_t" is undefined typedef __Int64x1_t int64x1_t; ``` ### Standalone code to reproduce the issue ```shell it is a build/configure issue ``` ### Relevant log output _No response_
{ "url": "https://api.github.com/repos/tensorflow/tensorflow/issues/62490/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/tensorflow/tensorflow/issues/62490/timeline
null
null
false
https://api.github.com/repos/tensorflow/tensorflow/issues/62489
https://api.github.com/repos/tensorflow/tensorflow
https://api.github.com/repos/tensorflow/tensorflow/issues/62489/labels{/name}
https://api.github.com/repos/tensorflow/tensorflow/issues/62489/comments
https://api.github.com/repos/tensorflow/tensorflow/issues/62489/events
https://github.com/tensorflow/tensorflow/issues/62489
2,013,115,806
I_kwDOArmXAs53_bWe
62,489
Op type not registered 'DisableCopyOnRead'
{ "login": "pedro21900", "id": 61558221, "node_id": "MDQ6VXNlcjYxNTU4MjIx", "avatar_url": "https://avatars.githubusercontent.com/u/61558221?v=4", "gravatar_id": "", "url": "https://api.github.com/users/pedro21900", "html_url": "https://github.com/pedro21900", "followers_url": "https://api.github.com/users/pedro21900/followers", "following_url": "https://api.github.com/users/pedro21900/following{/other_user}", "gists_url": "https://api.github.com/users/pedro21900/gists{/gist_id}", "starred_url": "https://api.github.com/users/pedro21900/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/pedro21900/subscriptions", "organizations_url": "https://api.github.com/users/pedro21900/orgs", "repos_url": "https://api.github.com/users/pedro21900/repos", "events_url": "https://api.github.com/users/pedro21900/events{/privacy}", "received_events_url": "https://api.github.com/users/pedro21900/received_events", "type": "User", "site_admin": false }
[ { "id": 386191887, "node_id": "MDU6TGFiZWwzODYxOTE4ODc=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/stat:awaiting%20response", "name": "stat:awaiting response", "color": "f4b400", "default": false, "description": "Status - Awaiting response from author" }, { "id": 473184161, "node_id": "MDU6TGFiZWw0NzMxODQxNjE=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/type:support", "name": "type:support", "color": "159b2e", "default": false, "description": "Support issues" }, { "id": 474725938, "node_id": "MDU6TGFiZWw0NzQ3MjU5Mzg=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/stale", "name": "stale", "color": "d4c5f9", "default": false, "description": "This label marks the issue/pr stale - to be closed automatically if no activity" }, { "id": 1097547147, "node_id": "MDU6TGFiZWwxMDk3NTQ3MTQ3", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/comp:ops", "name": "comp:ops", "color": "0052cc", "default": false, "description": "OPs related issues" }, { "id": 5922361893, "node_id": "LA_kwDOArmXAs8AAAABYQASJQ", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/TF2.14", "name": "TF2.14", "color": "b60205", "default": false, "description": "For issues related to Tensorflow 2.14.x" } ]
closed
false
{ "login": "sachinprasadhs", "id": 73069040, "node_id": "MDQ6VXNlcjczMDY5MDQw", "avatar_url": "https://avatars.githubusercontent.com/u/73069040?v=4", "gravatar_id": "", "url": "https://api.github.com/users/sachinprasadhs", "html_url": "https://github.com/sachinprasadhs", "followers_url": "https://api.github.com/users/sachinprasadhs/followers", "following_url": "https://api.github.com/users/sachinprasadhs/following{/other_user}", "gists_url": "https://api.github.com/users/sachinprasadhs/gists{/gist_id}", "starred_url": "https://api.github.com/users/sachinprasadhs/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/sachinprasadhs/subscriptions", "organizations_url": "https://api.github.com/users/sachinprasadhs/orgs", "repos_url": "https://api.github.com/users/sachinprasadhs/repos", "events_url": "https://api.github.com/users/sachinprasadhs/events{/privacy}", "received_events_url": "https://api.github.com/users/sachinprasadhs/received_events", "type": "User", "site_admin": false }
[ { "login": "sachinprasadhs", "id": 73069040, "node_id": "MDQ6VXNlcjczMDY5MDQw", "avatar_url": "https://avatars.githubusercontent.com/u/73069040?v=4", "gravatar_id": "", "url": "https://api.github.com/users/sachinprasadhs", "html_url": "https://github.com/sachinprasadhs", "followers_url": "https://api.github.com/users/sachinprasadhs/followers", "following_url": "https://api.github.com/users/sachinprasadhs/following{/other_user}", "gists_url": "https://api.github.com/users/sachinprasadhs/gists{/gist_id}", "starred_url": "https://api.github.com/users/sachinprasadhs/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/sachinprasadhs/subscriptions", "organizations_url": "https://api.github.com/users/sachinprasadhs/orgs", "repos_url": "https://api.github.com/users/sachinprasadhs/repos", "events_url": "https://api.github.com/users/sachinprasadhs/events{/privacy}", "received_events_url": "https://api.github.com/users/sachinprasadhs/received_events", "type": "User", "site_admin": false } ]
null
[ "@pedro21900 \r\nIf you're loading a SavedModel that contains operations from TensorFlow Contrib, you need to ensure that TensorFlow Contrib is installed and accessible. \r\nIn order to expedite the trouble-shooting process here,Could you please fill the issue [template](https://github.com/tensorflow/tensorflow/issues/new/choose),\r\nThank you!", "I didn't use tf contrib.\r\n\r\nthis is my code in python\r\n\r\n```python\r\n\r\nimport pandas as pd\r\n\r\nimport seaborn as sns\r\n\r\nimport matplotlib.pyplot as plt\r\n\r\nimport numpy as np\r\n\r\nimport tensorflow as tf\r\n\r\n\r\n# Carregar dados localmente de um arquivo Excel\r\nsheet = pd.read_excel(\"/content/drive/MyDrive/dados_treinamento_ml/evidencias.xlsx\")\r\nsheet.columns= ['task','difficulty','activity','desc']\r\n\r\ndf = sheet\r\n#['task','difficulty','activity','desc']\r\n# Definir a função concat_names antes de usá-la\r\ndef concat_names(row):\r\n\r\n # Verificar se cada coluna não é nula antes de concatenar\r\n task = str(row['task']) if not pd.isnull(row['task']) else ''\r\n difficulty = str(row['difficulty']) if not pd.isnull(row['difficulty']) else ''\r\n if pd.notna(row['activity']) and not str(row['activity']).isdigit():\r\n return None # Retorna None para indicar que a linha deve ser ignorada\r\n\r\n activity = str(int(row['activity'])) if not pd.isnull(row['activity']) else ''\r\n\r\n\r\n # Realizar a concatenação\r\n return task + ' - ' + difficulty + ' - ' + activity\r\n\r\n# Aplicar a função à nova coluna 'label'\r\ndf['label'] = df.apply(concat_names, axis=1)\r\n\r\n\r\n\r\n# Remover colunas desnecessárias\r\ndf.drop(['task', 'difficulty', 'activity'], axis=1, inplace=True)\r\ndf.columns = ['text','label']\r\n# # Exibir o DataFrame resultante\r\ndf.head()\r\n\r\n\r\n!pip install unidecode\r\n\r\nfrom unidecode import unidecode\r\nfrom nltk.corpus import stopwords\r\nimport nltk\r\n\r\nnltk.download('stopwords')\r\nnltk.download('rslp')\r\n\r\ndef preprocess_text(text):\r\n #remove acentos\r\n text = unidecode(text)\r\n # Converter para minúsculas\r\n text = text.lower()\r\n # Remover stopwords\r\n stop_words = set(stopwords.words('portuguese'))\r\n text = ' '.join(word for word in text.split() if word not in stop_words)\r\n return text\r\n\r\n\r\n# Aplicar pré-processamento ao DataFrame\r\ndf['text'] = df['text'].apply(preprocess_text)\r\n\r\n\r\nfrom tensorflow.keras.preprocessing.text import Tokenizer\r\nfrom tensorflow.keras.preprocessing.sequence import pad_sequences\r\n\r\nMAX_SEQUENCE_LENGTH = 400\r\ntokenizer = Tokenizer()\r\ntokenizer.fit_on_texts(df['text'])\r\nX = tokenizer.texts_to_sequences(df['text'])\r\nX = pad_sequences(X, maxlen=MAX_SEQUENCE_LENGTH)\r\nX.shape\r\n\r\n\r\nfrom sklearn.preprocessing import LabelEncoder\r\nfrom tensorflow.keras.utils import to_categorical\r\n\r\nlabel_encoder = LabelEncoder()\r\nY = label_encoder.fit_transform(df['label'])\r\n\r\n# Converter rótulos para one-hot encoding\r\nY = to_categorical(Y)\r\n\r\nfrom sklearn.model_selection import train_test_split\r\nX_train, X_test, y_train, y_test = train_test_split(X, Y, test_size = 0.2, random_state = 5)\r\n\r\n\r\nfrom gensim.models import KeyedVectors\r\nfrom tensorflow.keras.layers import Embedding\r\n\r\n# Caminho para o arquivo GloVe pré-treinado\r\nglove_file = '/content/drive/MyDrive/ml_libs/glove_s300.txt'\r\n# Carregar o modelo GloVe\r\nglove_model = KeyedVectors.load_word2vec_format(glove_file)\r\n\r\n# Dimensão do embedding\r\nembedding_dim = 300\r\n\r\n\r\n# Criar uma matriz de embedding para o seu vocabulário\r\nembedding_matrix = np.zeros((len(tokenizer.word_index) + 1, embedding_dim))\r\nfor word, i in tokenizer.word_index.items():\r\n if word in glove_model:\r\n embedding_vector = glove_model[word]\r\n embedding_matrix[i] = embedding_vector\r\n\r\n\r\n\r\nmodelo = tf.keras.models.Sequential()\r\n# Adicionar camada de embedding ao modelo Keras\r\nmodelo.add(Embedding(len(tokenizer.word_index) + 1,\r\n embedding_dim,\r\n weights=[embedding_matrix],\r\n input_length=MAX_SEQUENCE_LENGTH,\r\n trainable=False)) # Defina como True se desejar treinar os embeddings\r\nmodelo.add(tf.keras.layers.Dropout(0.5)) # Adicionando Dropout para evitar overfitting\r\nmodelo.add(tf.keras.layers.Conv1D(64, 5, padding=\"valid\", activation=\"relu\", strides=2))\r\nmodelo.add(tf.keras.layers.GlobalMaxPooling1D())\r\nmodelo.add(tf.keras.layers.Dense(Y.shape[1], activation='softmax'))\r\n# Compilação do modelo\r\nmodelo.compile(optimizer='adam', loss='categorical_crossentropy', metrics=['accuracy'])\r\n\r\n# Treinamento do modelo\r\nepochs_hist = modelo.fit(X_train, y_train, epochs=50, batch_size = 50, validation_split=0.2)\r\n\r\n\r\n# Pré-processamento do texto de exemplo\r\ntexto_tokenizado = tokenizer.texts_to_sequences([\"Ajuste no banco de dados\"])\r\ntexto_pad = pad_sequences(texto_tokenizado, maxlen=MAX_SEQUENCE_LENGTH)\r\n\r\n# Aplicação do modelo para fazer a previsão\r\nprevisao_probabilidades = modelo.predict(texto_pad)\r\npredicted_labels = np.argmax(previsao_probabilidades, axis=1)\r\npredicted_labels_original = label_encoder.inverse_transform(predicted_labels)\r\n# Agora, você pode acessar os valores reais das labels originais\r\nprint(predicted_labels_original)\r\n```\r\nbut my error is in java", "@pedro21900 Could you please let us know the TF version you are using here?\r\nI tried to replicate the issue and faced another [error](https://colab.research.google.com/gist/sushreebarsa/159fb268b06d7e90f30d20452758dc08/62489.ipynb) so could you please provide all the dependencies to replicate the issue reported. Thank you!", "The version I'm using is tensorflow is 2.14.0.\r\n\r\nThese are all imports:\r\n\r\n```python\r\nimport pandas as pd\r\n\r\nimport seaborn as sns\r\n\r\nimport matplotlib.pyplot as plt\r\n\r\nimport numpy as np\r\n\r\nimport tensorflow as tf\r\n\r\n\r\nfrom unidecode import unidecode\r\nfrom nltk.corpus import stopwords\r\nimport nltk\r\n\r\nfrom tensorflow.keras.preprocessing.text import Tokenizer\r\nfrom tensorflow.keras.preprocessing.sequence import pad_sequences\r\n\r\nfrom sklearn.preprocessing import LabelEncoder\r\nfrom tensorflow.keras.utils import to_categorical\r\n\r\nfrom gensim.models import KeyedVectors\r\nfrom tensorflow.keras.layers import Embedding\r\n\r\n```\r\nand in java it is used\r\n\r\n```xml\r\n\t\t<dependency>\r\n\t\t\t<groupId>org.tensorflow</groupId>\r\n\t\t\t<artifactId>tensorflow</artifactId>\r\n\t\t\t<version>1.15.0</version>\r\n\t\t</dependency>\r\n\r\n```\r\ncode in java \r\n\r\n```java\r\npackage site.horizon.fillevidence.service;\r\n\r\nimport lombok.extern.slf4j.Slf4j;\r\nimport org.springframework.stereotype.Service;\r\nimport org.tensorflow.*;\r\n\r\nimport java.nio.charset.StandardCharsets;\r\nimport java.util.Arrays;\r\n\r\n@Slf4j\r\n@Service\r\npublic class TensorFlowInferenceService {\r\n\r\n public void exec() {\r\n try {\r\n\r\n // Carregar o modelo SavedModel\r\n SavedModelBundle model = SavedModelBundle.load(\"ml_fill_evidence\", \"serve\");\r\n Session sess = model.session();\r\n\r\n // Texto de entrada (substitua isso pela sua entrada)\r\n String entrada = \"AJUSTES NA VIEW DE POPULAÇÃO DE BANCO LOCAL - BANCO\";\r\n\r\n // Pré-processamento da entrada\r\n byte[] textoBytes = entrada.getBytes(StandardCharsets.UTF_8);\r\n Tensor<String> inputTensor = Tensor.create(textoBytes, String.class);\r\n\r\n\r\n // Execute a inferência\r\n Tensor<?> result = sess.runner()\r\n .feed(\"embedding_input\", inputTensor) // Use o nome correto da entrada\r\n .fetch(\"dense\") // Use o nome correto da saída\r\n .run()\r\n .get(0);\r\n\r\n // Obtenha o resultado como um array de bytes\r\n byte[] resultadoBytes = new byte[(int) result.numElements()];\r\n result.copyTo(resultadoBytes);\r\n\r\n // Converta os bytes de volta para uma String\r\n String resultadoString = new String(resultadoBytes, StandardCharsets.UTF_8);\r\n\r\n // Pós-processamento\r\n // A parte equivalente ao np.argmax no Python\r\n // (neste exemplo, apenas exibe a classe prevista como string)\r\n String[] classes = resultadoString.split(\",\"); // Supondo que as classes são separadas por vírgula\r\n String predictedLabel = Arrays.stream(classes)\r\n .max(String::compareTo)\r\n .orElse(\"Classe não encontrada\");\r\n\r\n // Exiba o resultado\r\n System.out.println(\"Resultado: \" + predictedLabel);\r\n\r\n // Feche o modelo\r\n model.close();\r\n } catch (Exception e) {\r\n e.printStackTrace();\r\n }\r\n }\r\n}\r\n\r\n```\r\n\r\n", "Any ideas what to do", "Hi @pedro21900 could you make sure that the Java TF version is set to 0.5.0 or later (corresponds with TF > 2.10)? https://github.com/tensorflow/java/#tensorflowjava-version-support", "This issue is stale because it has been open for 7 days with no activity. It will be closed if no further activity occurs. Thank you.", "```xml \r\n<dependency>\r\n\t\t\t<groupId>org.tensorflow</groupId>\r\n\t\t\t<artifactId>tensorflow</artifactId>\r\n\t\t\t<version>1.15.0</version>\r\n</dependency>\r\n```", "My guess is that you have multiple TensorFlow binaries, at least one used by Python and one by Java, and they are different versions. There were changes to TensorFlow in April 2022 related to `DisableCopyOnRead`. My guess is version 2.13 is the first release with this. If the version of TensorFlow used when running under Java was old, it would not have this op. Consistently using the same version, whether an older or a newer version should fix this. Rebuilding the one that generates the error so it uses a newer version is a possible solution.\r\n\r\nPlease comment again if this doesn't match your problem.", "This issue is stale because it has been open for 7 days with no activity. It will be closed if no further activity occurs. Thank you.", "This issue was closed because it has been inactive for 7 days since being marked as stale. Please reopen if you'd like to work on this further.", "Are you satisfied with the resolution of your issue?\n<a href=\"https://docs.google.com/forms/d/e/1FAIpQLSfaP12TRhd9xSxjXZjcZFNXPGk4kc1-qMdv3gc6bEP90vY1ew/viewform?entry.85265664=Yes&entry.2137816233=https://github.com/tensorflow/tensorflow/issues/62489\">Yes</a>\n<a href=\"https://docs.google.com/forms/d/e/1FAIpQLSfaP12TRhd9xSxjXZjcZFNXPGk4kc1-qMdv3gc6bEP90vY1ew/viewform?entry.85265664=No&entry.2137816233=https://github.com/tensorflow/tensorflow/issues/62489\">No</a>\n" ]
2023-11-27T21:15:23
2024-04-04T01:48:03
2024-04-04T01:48:00
NONE
null
null
null
I'm running my model in java and I get this error. My project was all done in Python, what should I do to solve this problem? Do I register this in Python or adjust it in Java? Restoring SavedModel bundle. ```python 2023-11-27 16:43:21.653275: I tensorflow/cc/saved_model/loader.cc:151] Running initialization op on SavedModel bundle at path: /home/lenonn/git/fill-evidence/src/main/resources/ml_fill_evidence 2023-11-27 16:43:21.656519: I tensorflow/cc/saved_model/loader.cc:311] SavedModel load for tags { serve }; Status: success. Took 46958 microseconds. org.tensorflow.TensorFlowException: Op type not registered 'DisableCopyOnRead' in binary running on rpawsds077523. Make sure the Op and Kernel are registered in the binary running in this process. Note that if you are loading a saved graph which used ops from tf.contrib, accessing (e.g.) `tf.contrib.resampler` should be done before importing the graph, as contrib ops are lazily registered when the module is first accessed. at org.tensorflow.SavedModelBundle.load(Native Method) at org.tensorflow.SavedModelBundle.access$000(SavedModelBundle.java:27) at org.tensorflow.SavedModelBundle$Loader.load(SavedModelBundle.java:32) at org.tensorflow.SavedModelBundle.load(SavedModelBundle.java:95) at site.horizon.fillevidence.service.TensorFlowInferenceService.exec(TensorFlowInferenceService.java:28) at site.horizon.fillevidence.FillEvidenceApplication.run(FillEvidenceApplication.java:28) at org.springframework.boot.SpringApplication.callRunner(SpringApplication.java:768) at org.springframework.boot.SpringApplication.callRunners(SpringApplication.java:752) at org.springframework.boot.SpringApplication.run(SpringApplication.java:314) at org.springframework.boot.SpringApplication.run(SpringApplication.java:1303) at org.springframework.boot.SpringApplication.run(SpringApplication.java:1292) at site.horizon.fillevidence.FillEvidenceApplication.main(FillEvidenceApplication.java:21) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke0(Native Method) at java.base/jdk.internal.reflect.NativeMethodAccessorImpl.invoke(NativeMethodAccessorImpl.java:77) at java.base/jdk.internal.reflect.DelegatingMethodAccessorImpl.invoke(DelegatingMethodAccessorImpl.java:43) at java.base/java.lang.reflect.Method.invoke(Method.java:568) at org.springframework.boot.devtools.restart.RestartLauncher.run(RestartLauncher.java:50) ```
{ "url": "https://api.github.com/repos/tensorflow/tensorflow/issues/62489/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/tensorflow/tensorflow/issues/62489/timeline
null
completed
false
https://api.github.com/repos/tensorflow/tensorflow/issues/62488
https://api.github.com/repos/tensorflow/tensorflow
https://api.github.com/repos/tensorflow/tensorflow/issues/62488/labels{/name}
https://api.github.com/repos/tensorflow/tensorflow/issues/62488/comments
https://api.github.com/repos/tensorflow/tensorflow/issues/62488/events
https://github.com/tensorflow/tensorflow/issues/62488
2,012,878,769
I_kwDOArmXAs53-hex
62,488
GPU-Specific Inconsistency in XLA Compiled Model with `tf.nn.atrous_conv2d, tf.cos, and tf.multiply`
{ "login": "Gwihwan-Go", "id": 83144588, "node_id": "MDQ6VXNlcjgzMTQ0NTg4", "avatar_url": "https://avatars.githubusercontent.com/u/83144588?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Gwihwan-Go", "html_url": "https://github.com/Gwihwan-Go", "followers_url": "https://api.github.com/users/Gwihwan-Go/followers", "following_url": "https://api.github.com/users/Gwihwan-Go/following{/other_user}", "gists_url": "https://api.github.com/users/Gwihwan-Go/gists{/gist_id}", "starred_url": "https://api.github.com/users/Gwihwan-Go/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Gwihwan-Go/subscriptions", "organizations_url": "https://api.github.com/users/Gwihwan-Go/orgs", "repos_url": "https://api.github.com/users/Gwihwan-Go/repos", "events_url": "https://api.github.com/users/Gwihwan-Go/events{/privacy}", "received_events_url": "https://api.github.com/users/Gwihwan-Go/received_events", "type": "User", "site_admin": false }
[ { "id": 404586594, "node_id": "MDU6TGFiZWw0MDQ1ODY1OTQ=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/stat:awaiting%20tensorflower", "name": "stat:awaiting tensorflower", "color": "f4b400", "default": false, "description": "Status - Awaiting response from tensorflower" }, { "id": 473172988, "node_id": "MDU6TGFiZWw0NzMxNzI5ODg=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/type:bug", "name": "type:bug", "color": "159b2e", "default": false, "description": "Bug" }, { "id": 1097547538, "node_id": "MDU6TGFiZWwxMDk3NTQ3NTM4", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/comp:gpu", "name": "comp:gpu", "color": "0052cc", "default": false, "description": "GPU related issues" }, { "id": 1133285679, "node_id": "MDU6TGFiZWwxMTMzMjg1Njc5", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/comp:xla", "name": "comp:xla", "color": "0052cc", "default": false, "description": "XLA" }, { "id": 6218999181, "node_id": "LA_kwDOArmXAs8AAAABcq5ljQ", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/TF%202.15", "name": "TF 2.15", "color": "9162CB", "default": false, "description": "For issues related to 2.15.x" } ]
open
false
{ "login": "sachinprasadhs", "id": 73069040, "node_id": "MDQ6VXNlcjczMDY5MDQw", "avatar_url": "https://avatars.githubusercontent.com/u/73069040?v=4", "gravatar_id": "", "url": "https://api.github.com/users/sachinprasadhs", "html_url": "https://github.com/sachinprasadhs", "followers_url": "https://api.github.com/users/sachinprasadhs/followers", "following_url": "https://api.github.com/users/sachinprasadhs/following{/other_user}", "gists_url": "https://api.github.com/users/sachinprasadhs/gists{/gist_id}", "starred_url": "https://api.github.com/users/sachinprasadhs/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/sachinprasadhs/subscriptions", "organizations_url": "https://api.github.com/users/sachinprasadhs/orgs", "repos_url": "https://api.github.com/users/sachinprasadhs/repos", "events_url": "https://api.github.com/users/sachinprasadhs/events{/privacy}", "received_events_url": "https://api.github.com/users/sachinprasadhs/received_events", "type": "User", "site_admin": false }
[ { "login": "sachinprasadhs", "id": 73069040, "node_id": "MDQ6VXNlcjczMDY5MDQw", "avatar_url": "https://avatars.githubusercontent.com/u/73069040?v=4", "gravatar_id": "", "url": "https://api.github.com/users/sachinprasadhs", "html_url": "https://github.com/sachinprasadhs", "followers_url": "https://api.github.com/users/sachinprasadhs/followers", "following_url": "https://api.github.com/users/sachinprasadhs/following{/other_user}", "gists_url": "https://api.github.com/users/sachinprasadhs/gists{/gist_id}", "starred_url": "https://api.github.com/users/sachinprasadhs/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/sachinprasadhs/subscriptions", "organizations_url": "https://api.github.com/users/sachinprasadhs/orgs", "repos_url": "https://api.github.com/users/sachinprasadhs/repos", "events_url": "https://api.github.com/users/sachinprasadhs/events{/privacy}", "received_events_url": "https://api.github.com/users/sachinprasadhs/received_events", "type": "User", "site_admin": false } ]
null
[ "Hi **@GwiHwan-Go** ,\r\nI tried to reproduce above in colab, I downloaded the pickle file too. But this file is not opening could you provide proper link to me?\r\n\r\nThank you!", "Hi @Venkat6871 ,\r\nMy apologies for any inconvenience caused. I have prepared a [Colab notebook](https://colab.research.google.com/drive/1JihfqsRpAGTkgV2QGWV4HrSuOzhKuE4r?usp=sharing) to address this issue. However, as I don't have Colab Pro access, I couldn't test it on a V100 GPU. Could you please verify if the issue persists on a V100 GPU? Thank you\r\n", "Hi, \r\n\r\nCould you please provide the code in a simplified format without the need of pickle file, it would help us to debug faster. Thanks!", "This issue is stale because it has been open for 7 days with no activity. It will be closed if no further activity occurs. Thank you.", "Hi, @sachinprasadhs \r\nThis issue can only be reproduced with the specific inputs." ]
2023-11-27T18:42:58
2023-12-27T22:41:44
null
NONE
null
null
null
### Issue type Bug ### Have you reproduced the bug with TensorFlow Nightly? Yes ### Source source ### TensorFlow version 2.15.0 ### Custom code Yes ### OS platform and distribution Ubuntu 22.04.3 LTS ### Mobile device _No response_ ### Python version 3.10 ### Bazel version _No response_ ### GCC/compiler version _No response_ ### CUDA/cuDNN version cuda : 12.2 / cudnn 8.9.04 ### GPU model and memory Tesla V100S-PCIE-32GB ### Current behavior? There are significant discrepancies in the model's output under XLA compilation when running on a GPU device, as indicated by the failed assertions. The differences are not minor, suggesting a deeper issue in TensorFlow's compiled execution path, specifically in the context of GPU-accelerated computation. Before run below stand-alone code, please download [the pickle file](https://github.com/GwiHwan-Go/repo/raw/main/issues/pickles/ast_cos_mul.pickle) and replace YOUR_PICKLE_FILE_PATH with youre pickle file path. ### Standalone code to reproduce the issue ```python import tensorflow as tf import pickle import os import numpy as np class Model1(tf.keras.Model): def __init__(self): super().__init__() self.p0 = tf.random.uniform(shape=[18, 14, 14, 4], dtype=tf.float32) @tf.function(jit_compile=True) def __call__(self, inp, inp1): astconv = tf.nn.atrous_conv2d(self.p0, inp1, rate=1, padding="VALID") _cos = tf.cos(astconv) mul = tf.multiply(_cos, astconv) return astconv, _cos, mul model1 = Model1() device = "gpu" print(f'=========RUNNING WITH PICKLE FILES===========') pickle_file_path = 'ast_cos_mul.pickle' #YOUR_PICKLE_FIlE_PATH if not os.path.exists(pickle_file_path) : print(f'Pickle file not exist') else : with open(pickle_file_path, 'rb') as f : inputs = pickle.load(f) inputs = [tf.convert_to_tensor(arr) for arr in inputs] with tf.device(device): tf.config.run_functions_eagerly(True) out1 = model1(*inputs) out2 = model1(*inputs) print(f'=========eager_output(version:{tf.__version__})================') try : for i in range(min(len(out1),len(out2))): np.testing.assert_allclose(out1[i].numpy(), out2[i].numpy(), rtol=0.001, atol=0.001, err_msg=f'at checking {i}th') print("XLA_eager does not trigger assertion") except AssertionError as e: print("XLA_eager triggers assertion") print(e) tf.config.run_functions_eagerly(False) out1 = model1(*inputs) out2 = model1(*inputs) print(f'=========compiled_output(version:{tf.__version__})================') try : for i in range(min(len(out1),len(out2))): np.testing.assert_allclose(out1[i].numpy(), out2[i].numpy(), rtol=0.001, atol=0.001, err_msg=f'at checking {i}th') print("XLA_complie does not trigger assertion") except AssertionError as e: print("XLA_complie triggers assertion") print(e) ``` ### Relevant log output ```shell =========eager_output(version:2.15.0)================ XLA_eager does not trigger assertion 2023-11-27 18:39:00.683604: I external/local_xla/xla/service/service.cc:168] XLA service 0x55a6d3345b50 initialized for platform CUDA (this does not guarantee that XLA will be used). Devices: 2023-11-27 18:39:00.683633: I external/local_xla/xla/service/service.cc:176] StreamExecutor device (0): Tesla V100S-PCIE-32GB, Compute Capability 7.0 2023-11-27 18:39:00.687057: I tensorflow/compiler/mlir/tensorflow/utils/dump_mlir_util.cc:269] disabling MLIR crash reproducer, set env var `MLIR_CRASH_REPRODUCER_DIRECTORY` to enable. WARNING: All log messages before absl::InitializeLog() is called are written to STDERR I0000 00:00:1701110341.197440 963817 device_compiler.h:186] Compiled cluster using XLA! This line is logged at most once for the lifetime of the process. =========compiled_output(version:2.15.0)================ XLA_complie triggers assertion Not equal to tolerance rtol=0.001, atol=0.001 at checking 2th # indicates that removing one of operators will not trigger the error Mismatched elements: 40 / 13104 (0.305%) Max absolute difference: 0.14808655 Max relative difference: 0.0466534 x: array([[[[ 4.319121e+02, 5.756997e+01, -1.646362e+02, ..., 1.929362e+02, 4.481723e+01, 5.383195e+02], [ 2.320642e+02, -6.552701e+01, 1.128223e+02, ...,... y: array([[[[ 4.319121e+02, 5.756836e+01, -1.646362e+02, ..., 1.929362e+02, 4.479507e+01, 5.383195e+02], [ 2.320521e+02, -6.552701e+01, 1.128223e+02, ...,... ```
{ "url": "https://api.github.com/repos/tensorflow/tensorflow/issues/62488/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/tensorflow/tensorflow/issues/62488/timeline
null
null
false
https://api.github.com/repos/tensorflow/tensorflow/issues/62487
https://api.github.com/repos/tensorflow/tensorflow
https://api.github.com/repos/tensorflow/tensorflow/issues/62487/labels{/name}
https://api.github.com/repos/tensorflow/tensorflow/issues/62487/comments
https://api.github.com/repos/tensorflow/tensorflow/issues/62487/events
https://github.com/tensorflow/tensorflow/issues/62487
2,012,858,833
I_kwDOArmXAs53-cnR
62,487
Inconsistent Outputs from tf.nn.astrous_conv2d multiplied with tf.cos on XLA compiled model
{ "login": "Gwihwan-Go", "id": 83144588, "node_id": "MDQ6VXNlcjgzMTQ0NTg4", "avatar_url": "https://avatars.githubusercontent.com/u/83144588?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Gwihwan-Go", "html_url": "https://github.com/Gwihwan-Go", "followers_url": "https://api.github.com/users/Gwihwan-Go/followers", "following_url": "https://api.github.com/users/Gwihwan-Go/following{/other_user}", "gists_url": "https://api.github.com/users/Gwihwan-Go/gists{/gist_id}", "starred_url": "https://api.github.com/users/Gwihwan-Go/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Gwihwan-Go/subscriptions", "organizations_url": "https://api.github.com/users/Gwihwan-Go/orgs", "repos_url": "https://api.github.com/users/Gwihwan-Go/repos", "events_url": "https://api.github.com/users/Gwihwan-Go/events{/privacy}", "received_events_url": "https://api.github.com/users/Gwihwan-Go/received_events", "type": "User", "site_admin": false }
[ { "id": 473172988, "node_id": "MDU6TGFiZWw0NzMxNzI5ODg=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/type:bug", "name": "type:bug", "color": "159b2e", "default": false, "description": "Bug" } ]
closed
false
{ "login": "tilakrayal", "id": 81610181, "node_id": "MDQ6VXNlcjgxNjEwMTgx", "avatar_url": "https://avatars.githubusercontent.com/u/81610181?v=4", "gravatar_id": "", "url": "https://api.github.com/users/tilakrayal", "html_url": "https://github.com/tilakrayal", "followers_url": "https://api.github.com/users/tilakrayal/followers", "following_url": "https://api.github.com/users/tilakrayal/following{/other_user}", "gists_url": "https://api.github.com/users/tilakrayal/gists{/gist_id}", "starred_url": "https://api.github.com/users/tilakrayal/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/tilakrayal/subscriptions", "organizations_url": "https://api.github.com/users/tilakrayal/orgs", "repos_url": "https://api.github.com/users/tilakrayal/repos", "events_url": "https://api.github.com/users/tilakrayal/events{/privacy}", "received_events_url": "https://api.github.com/users/tilakrayal/received_events", "type": "User", "site_admin": false }
[ { "login": "tilakrayal", "id": 81610181, "node_id": "MDQ6VXNlcjgxNjEwMTgx", "avatar_url": "https://avatars.githubusercontent.com/u/81610181?v=4", "gravatar_id": "", "url": "https://api.github.com/users/tilakrayal", "html_url": "https://github.com/tilakrayal", "followers_url": "https://api.github.com/users/tilakrayal/followers", "following_url": "https://api.github.com/users/tilakrayal/following{/other_user}", "gists_url": "https://api.github.com/users/tilakrayal/gists{/gist_id}", "starred_url": "https://api.github.com/users/tilakrayal/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/tilakrayal/subscriptions", "organizations_url": "https://api.github.com/users/tilakrayal/orgs", "repos_url": "https://api.github.com/users/tilakrayal/repos", "events_url": "https://api.github.com/users/tilakrayal/events{/privacy}", "received_events_url": "https://api.github.com/users/tilakrayal/received_events", "type": "User", "site_admin": false } ]
null
[ "Are you satisfied with the resolution of your issue?\n<a href=\"https://docs.google.com/forms/d/e/1FAIpQLSfaP12TRhd9xSxjXZjcZFNXPGk4kc1-qMdv3gc6bEP90vY1ew/viewform?entry.85265664=Yes&entry.2137816233=https://github.com/tensorflow/tensorflow/issues/62487\">Yes</a>\n<a href=\"https://docs.google.com/forms/d/e/1FAIpQLSfaP12TRhd9xSxjXZjcZFNXPGk4kc1-qMdv3gc6bEP90vY1ew/viewform?entry.85265664=No&entry.2137816233=https://github.com/tensorflow/tensorflow/issues/62487\">No</a>\n" ]
2023-11-27T18:30:28
2023-11-27T18:32:08
2023-11-27T18:32:04
NONE
null
null
null
Issue type Bug Have you reproduced the bug with TensorFlow Nightly? Yes Source source TensorFlow version 2.15.0 Custom code Yes OS platform and distribution Ubuntu 22.04.3 LTS Mobile device No response Python version 3.10 Bazel version No response GCC/compiler version No response CUDA/cuDNN version cuda : 12.2 / cudnn 8.9.04 GPU model and memory Tesla V100S-PCIE-32GB Current behavior? I've identified a critical issue in TensorFlow 2.15.0 where the combination of tf.nn.conv2d and tf.cos in an XLA compiled model produces significantly different outputs compared to the eager execution mode. This inconsistency is particularly concerning given that the data type used is float32, which is a standard in many applications. This issue only occurs with certain input data with gpu device To reproduce this, please download first [pickle file](https://github.com/GwiHwan-Go/repo/raw/main/conv_cos_bug_inputs.pickle) and replace YOUR_PICKLE_FILE_PATH with your pickle file path.
{ "url": "https://api.github.com/repos/tensorflow/tensorflow/issues/62487/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/tensorflow/tensorflow/issues/62487/timeline
null
completed
false
https://api.github.com/repos/tensorflow/tensorflow/issues/62486
https://api.github.com/repos/tensorflow/tensorflow
https://api.github.com/repos/tensorflow/tensorflow/issues/62486/labels{/name}
https://api.github.com/repos/tensorflow/tensorflow/issues/62486/comments
https://api.github.com/repos/tensorflow/tensorflow/issues/62486/events
https://github.com/tensorflow/tensorflow/issues/62486
2,012,657,045
I_kwDOArmXAs539rWV
62,486
Inconsistent Outputs from `tf.nn.astrous_conv2d + tf.round` with XLA compiled model on GPU
{ "login": "Gwihwan-Go", "id": 83144588, "node_id": "MDQ6VXNlcjgzMTQ0NTg4", "avatar_url": "https://avatars.githubusercontent.com/u/83144588?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Gwihwan-Go", "html_url": "https://github.com/Gwihwan-Go", "followers_url": "https://api.github.com/users/Gwihwan-Go/followers", "following_url": "https://api.github.com/users/Gwihwan-Go/following{/other_user}", "gists_url": "https://api.github.com/users/Gwihwan-Go/gists{/gist_id}", "starred_url": "https://api.github.com/users/Gwihwan-Go/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Gwihwan-Go/subscriptions", "organizations_url": "https://api.github.com/users/Gwihwan-Go/orgs", "repos_url": "https://api.github.com/users/Gwihwan-Go/repos", "events_url": "https://api.github.com/users/Gwihwan-Go/events{/privacy}", "received_events_url": "https://api.github.com/users/Gwihwan-Go/received_events", "type": "User", "site_admin": false }
[ { "id": 404586594, "node_id": "MDU6TGFiZWw0MDQ1ODY1OTQ=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/stat:awaiting%20tensorflower", "name": "stat:awaiting tensorflower", "color": "f4b400", "default": false, "description": "Status - Awaiting response from tensorflower" }, { "id": 473172988, "node_id": "MDU6TGFiZWw0NzMxNzI5ODg=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/type:bug", "name": "type:bug", "color": "159b2e", "default": false, "description": "Bug" }, { "id": 1133285679, "node_id": "MDU6TGFiZWwxMTMzMjg1Njc5", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/comp:xla", "name": "comp:xla", "color": "0052cc", "default": false, "description": "XLA" }, { "id": 6218999181, "node_id": "LA_kwDOArmXAs8AAAABcq5ljQ", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/TF%202.15", "name": "TF 2.15", "color": "9162CB", "default": false, "description": "For issues related to 2.15.x" } ]
open
false
{ "login": "sachinprasadhs", "id": 73069040, "node_id": "MDQ6VXNlcjczMDY5MDQw", "avatar_url": "https://avatars.githubusercontent.com/u/73069040?v=4", "gravatar_id": "", "url": "https://api.github.com/users/sachinprasadhs", "html_url": "https://github.com/sachinprasadhs", "followers_url": "https://api.github.com/users/sachinprasadhs/followers", "following_url": "https://api.github.com/users/sachinprasadhs/following{/other_user}", "gists_url": "https://api.github.com/users/sachinprasadhs/gists{/gist_id}", "starred_url": "https://api.github.com/users/sachinprasadhs/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/sachinprasadhs/subscriptions", "organizations_url": "https://api.github.com/users/sachinprasadhs/orgs", "repos_url": "https://api.github.com/users/sachinprasadhs/repos", "events_url": "https://api.github.com/users/sachinprasadhs/events{/privacy}", "received_events_url": "https://api.github.com/users/sachinprasadhs/received_events", "type": "User", "site_admin": false }
[ { "login": "sachinprasadhs", "id": 73069040, "node_id": "MDQ6VXNlcjczMDY5MDQw", "avatar_url": "https://avatars.githubusercontent.com/u/73069040?v=4", "gravatar_id": "", "url": "https://api.github.com/users/sachinprasadhs", "html_url": "https://github.com/sachinprasadhs", "followers_url": "https://api.github.com/users/sachinprasadhs/followers", "following_url": "https://api.github.com/users/sachinprasadhs/following{/other_user}", "gists_url": "https://api.github.com/users/sachinprasadhs/gists{/gist_id}", "starred_url": "https://api.github.com/users/sachinprasadhs/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/sachinprasadhs/subscriptions", "organizations_url": "https://api.github.com/users/sachinprasadhs/orgs", "repos_url": "https://api.github.com/users/sachinprasadhs/repos", "events_url": "https://api.github.com/users/sachinprasadhs/events{/privacy}", "received_events_url": "https://api.github.com/users/sachinprasadhs/received_events", "type": "User", "site_admin": false } ]
null
[ "@sachinprasadhs Hey! Have you reproduced this issue?", "Hi, Could you please simplify the code without pickle file to debug in detail about the issue. Thanks", "This issue is stale because it has been open for 7 days with no activity. It will be closed if no further activity occurs. Thank you.", "Hi, @sachinprasadhs \r\nHere is the colab notebook [link](https://colab.research.google.com/drive/15fYjGKA8XmBns1GgzR7MXglAJci6PQ_1?usp=sharing), Please note that the bug can only be reproduced when using a Tesla V100 GPU." ]
2023-11-27T16:30:24
2023-12-27T22:40:59
null
NONE
null
null
null
### Issue type Bug ### Have you reproduced the bug with TensorFlow Nightly? Yes ### Source source ### TensorFlow version 2.15.0 ### Custom code Yes ### OS platform and distribution Ubuntu 22.04.3 LTS ### Mobile device _No response_ ### Python version 3.10 ### Bazel version _No response_ ### GCC/compiler version _No response_ ### CUDA/cuDNN version cuda : 12.2 / cudnn 8.9.04 ### GPU model and memory Tesla V100S-PCIE-32GB ### Current behavior? I've identified a issue in TensorFlow 2.15.0 where the combination of `tf.nn.atrous_conv2d` and `tf.round` in a XLA compiled model produces different outputs compared to the eager execution mode. This error is only seen on **gpu** To reproduce this, please download first [pickle file](https://github.com/GwiHwan-Go/repo/raw/main/issues/pickles/ast_round.pickle) and replace `YOUR_PICKLE_FILE_PATH` with your pickle file path. ### Standalone code to reproduce the issue ```python from typing import Dict import tensorflow as tf import pickle import os import numpy as np class Model1(tf.keras.Model): def __init__(self): super().__init__() # Tensor objects (with comments for shapes) self.p1 = tf.Variable(tf.random.uniform(shape=[2, 34, 35, 25], dtype=tf.float32)) # [2, 34, 35, 25] float32 # Layers or other Keras model objects @tf.function(jit_compile=True) def __call__(self, inp): # Forward pass logic using TensorFlow operations # inp: [14, 2, 25, 53] : float32 astconv = tf.nn.atrous_conv2d(self.p1, inp, rate=2, padding="VALID") round_ast = tf.round(astconv) return astconv, round_ast inputs = [ tf.random.uniform(shape=[14, 2, 25, 53], dtype=tf.float32), ] model1 = Model1() device = "gpu" pickle_file_path = "ast_round.pickle" #YOUR_PICKLE_FILE_PATH if not os.path.exists(pickle_file_path) : print(f'Pickle file not exist') else : with open(pickle_file_path, 'rb') as f : oracle = pickle.load(f) inputs = [tf.convert_to_tensor(arr) for arr in oracle.values()] with tf.device(device): tf.config.run_functions_eagerly(True) out1 = model1(*inputs) out2 = model1(*inputs) print(f'=========eager_output(version:{tf.__version__})================') try : for i in range(min(len(out1),len(out2))): np.testing.assert_allclose(out1[i].numpy(), out2[i].numpy(), rtol=0.001, atol=0.001, err_msg=f'at checking {i}th') print("XLA_eager does not trigger assertion") except AssertionError as e: print("XLA_eager triggers assertion") print(e) tf.config.run_functions_eagerly(False) out1 = model1(*inputs) out2 = model1(*inputs) print(f'=========compiled_output(version:{tf.__version__})================') try : for i in range(min(len(out1),len(out2))): np.testing.assert_allclose(out1[i].numpy(), out2[i].numpy(), rtol=0.001, atol=0.001, err_msg=f'at checking {i}th') print("XLA_complie does not trigger assertion") except AssertionError as e: print("XLA_complie triggers assertion") print(e) ``` ### Relevant log output ```shell =========eager_output(version:2.15.0)================ XLA_eager does not trigger assertion 2023-11-27 16:28:59.925669: I external/local_xla/xla/service/service.cc:168] XLA service 0x55968b3b66f0 initialized for platform CUDA (this does not guarantee that XLA will be used). Devices: 2023-11-27 16:28:59.925702: I external/local_xla/xla/service/service.cc:176] StreamExecutor device (0): Tesla V100S-PCIE-32GB, Compute Capability 7.0 WARNING: All log messages before absl::InitializeLog() is called are written to STDERR I0000 00:00:1701102540.412886 749930 device_compiler.h:186] Compiled cluster using XLA! This line is logged at most once for the lifetime of the process. =========compiled_output(version:2.15.0)================ XLA_complie triggers assertion Not equal to tolerance rtol=0.001, atol=0.001 at checking 1th # indicates that astrous_conv2d alone will not result in different behavior. Mismatched elements: 2 / 27984 (0.00715%) Max absolute difference: 1. Max relative difference: 0.00327869 x: array([[[[ 1120., -118., 834., ..., -443., -693., -699.], [ 1132., 717., 1710., ..., -1091., -1453., -906.], [ 1380., 757., 1576., ..., -918., -495., -634.],... y: array([[[[ 1120., -118., 834., ..., -443., -693., -699.], [ 1132., 717., 1710., ..., -1091., -1453., -906.], [ 1380., 757., 1576., ..., -918., -495., -634.],... ```
{ "url": "https://api.github.com/repos/tensorflow/tensorflow/issues/62486/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/tensorflow/tensorflow/issues/62486/timeline
null
null
false
https://api.github.com/repos/tensorflow/tensorflow/issues/62485
https://api.github.com/repos/tensorflow/tensorflow
https://api.github.com/repos/tensorflow/tensorflow/issues/62485/labels{/name}
https://api.github.com/repos/tensorflow/tensorflow/issues/62485/comments
https://api.github.com/repos/tensorflow/tensorflow/issues/62485/events
https://github.com/tensorflow/tensorflow/issues/62485
2,012,628,631
I_kwDOArmXAs539kaX
62,485
Inconsistency in XLA Compiled Model Output for `tf.nn.conv2d` Combined with `tf.tan` on GPU
{ "login": "Gwihwan-Go", "id": 83144588, "node_id": "MDQ6VXNlcjgzMTQ0NTg4", "avatar_url": "https://avatars.githubusercontent.com/u/83144588?v=4", "gravatar_id": "", "url": "https://api.github.com/users/Gwihwan-Go", "html_url": "https://github.com/Gwihwan-Go", "followers_url": "https://api.github.com/users/Gwihwan-Go/followers", "following_url": "https://api.github.com/users/Gwihwan-Go/following{/other_user}", "gists_url": "https://api.github.com/users/Gwihwan-Go/gists{/gist_id}", "starred_url": "https://api.github.com/users/Gwihwan-Go/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/Gwihwan-Go/subscriptions", "organizations_url": "https://api.github.com/users/Gwihwan-Go/orgs", "repos_url": "https://api.github.com/users/Gwihwan-Go/repos", "events_url": "https://api.github.com/users/Gwihwan-Go/events{/privacy}", "received_events_url": "https://api.github.com/users/Gwihwan-Go/received_events", "type": "User", "site_admin": false }
[ { "id": 386191887, "node_id": "MDU6TGFiZWwzODYxOTE4ODc=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/stat:awaiting%20response", "name": "stat:awaiting response", "color": "f4b400", "default": false, "description": "Status - Awaiting response from author" }, { "id": 473172988, "node_id": "MDU6TGFiZWw0NzMxNzI5ODg=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/type:bug", "name": "type:bug", "color": "159b2e", "default": false, "description": "Bug" }, { "id": 474725938, "node_id": "MDU6TGFiZWw0NzQ3MjU5Mzg=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/stale", "name": "stale", "color": "d4c5f9", "default": false, "description": "This label marks the issue/pr stale - to be closed automatically if no activity" }, { "id": 1133285679, "node_id": "MDU6TGFiZWwxMTMzMjg1Njc5", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/comp:xla", "name": "comp:xla", "color": "0052cc", "default": false, "description": "XLA" }, { "id": 6218999181, "node_id": "LA_kwDOArmXAs8AAAABcq5ljQ", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/TF%202.15", "name": "TF 2.15", "color": "9162CB", "default": false, "description": "For issues related to 2.15.x" } ]
closed
false
{ "login": "sachinprasadhs", "id": 73069040, "node_id": "MDQ6VXNlcjczMDY5MDQw", "avatar_url": "https://avatars.githubusercontent.com/u/73069040?v=4", "gravatar_id": "", "url": "https://api.github.com/users/sachinprasadhs", "html_url": "https://github.com/sachinprasadhs", "followers_url": "https://api.github.com/users/sachinprasadhs/followers", "following_url": "https://api.github.com/users/sachinprasadhs/following{/other_user}", "gists_url": "https://api.github.com/users/sachinprasadhs/gists{/gist_id}", "starred_url": "https://api.github.com/users/sachinprasadhs/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/sachinprasadhs/subscriptions", "organizations_url": "https://api.github.com/users/sachinprasadhs/orgs", "repos_url": "https://api.github.com/users/sachinprasadhs/repos", "events_url": "https://api.github.com/users/sachinprasadhs/events{/privacy}", "received_events_url": "https://api.github.com/users/sachinprasadhs/received_events", "type": "User", "site_admin": false }
[ { "login": "sachinprasadhs", "id": 73069040, "node_id": "MDQ6VXNlcjczMDY5MDQw", "avatar_url": "https://avatars.githubusercontent.com/u/73069040?v=4", "gravatar_id": "", "url": "https://api.github.com/users/sachinprasadhs", "html_url": "https://github.com/sachinprasadhs", "followers_url": "https://api.github.com/users/sachinprasadhs/followers", "following_url": "https://api.github.com/users/sachinprasadhs/following{/other_user}", "gists_url": "https://api.github.com/users/sachinprasadhs/gists{/gist_id}", "starred_url": "https://api.github.com/users/sachinprasadhs/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/sachinprasadhs/subscriptions", "organizations_url": "https://api.github.com/users/sachinprasadhs/orgs", "repos_url": "https://api.github.com/users/sachinprasadhs/repos", "events_url": "https://api.github.com/users/sachinprasadhs/events{/privacy}", "received_events_url": "https://api.github.com/users/sachinprasadhs/received_events", "type": "User", "site_admin": false } ]
null
[ "This is another code that trigger the same bug : \r\n```python\r\nfrom typing import Dict\r\nimport tensorflow as tf\r\nimport pickle\r\nimport os\r\nimport numpy as np\r\n\r\n###updated at 23.12.06\r\np0 = tf.random.uniform(shape=[6, 21, 59, 6], dtype=tf.float32)\r\np1 = tf.random.uniform(shape=[1, 54, 6, 6], dtype=tf.float32)\r\n\r\nclass Model1(tf.keras.Model):\r\n def __init__(self):\r\n super().__init__()\r\n self.p0 = p0\r\n self.p1 = p1\r\n###updated at 23.12.06\r\n @tf.function(jit_compile=True)\r\n def __call__(self, inp):\r\n conv2 = tf.nn.conv2d(self.p0, inp, strides=1, padding=\"SAME\", dilations=(3, 3))\r\n _tan = tf.tan(conv2)\r\n return conv2, _tan\r\n\r\ninputs = [\r\ntf.random.uniform(shape=[18, 54, 6, 6], dtype=tf.float32),\r\n]\r\nmodel1 = Model1()\r\ndevice = \"gpu\"\r\nwith tf.device(device):\r\n tf.config.run_functions_eagerly(True)\r\n out1 = model1(*inputs)\r\n out2 = model1(*inputs)\r\n print(f'=========eager_output(version:{tf.__version__})================')\r\n try :\r\n for i in range(min(len(out1),len(out2))):\r\n np.testing.assert_allclose(out1[i].numpy(), out2[i].numpy(), rtol=0.001, atol=0.001, err_msg=f'at checking {i}th')\r\n print(\"XLA_eager does not trigger assertion\")\r\n except AssertionError as e:\r\n print(\"XLA_eager triggers assertion\")\r\n print(e)\r\n tf.config.run_functions_eagerly(False)\r\n out1 = model1(*inputs)\r\n out2 = model1(*inputs)\r\n print(f'=========compiled_output(version:{tf.__version__})================')\r\n try :\r\n for i in range(min(len(out1),len(out2))):\r\n np.testing.assert_allclose(out1[i].numpy(), out2[i].numpy(), rtol=0.001, atol=0.001, err_msg=f'at checking {i}th')\r\n print(\"XLA_complie does not trigger assertion\")\r\n except AssertionError as e:\r\n print(\"XLA_complie triggers assertion\")\r\n print(e)\r\n```\r\nand the log output \r\n```\r\n=========compiled_output(version:2.15.0)================\r\nXLA_complie triggers assertion\r\n\r\nNot equal to tolerance rtol=0.001, atol=0.001\r\nat checking 1th\r\nMismatched elements: 89 / 44604 (0.2%)\r\nMax absolute difference: 634.3164\r\nMax relative difference: 0.1164573\r\n x: array([[[[-5.409578e-01, -2.587222e-01, 1.120472e+00, 1.204898e-01,\r\n -5.236940e+00, -7.960033e-01],\r\n [-6.590452e-02, 1.740580e+00, 1.766167e+00, 1.575795e+00,...\r\n y: array([[[[-5.409578e-01, -2.587222e-01, 1.120472e+00, 1.204898e-01,\r\n -5.236940e+00, -7.960033e-01],\r\n [-6.590452e-02, 1.740580e+00, 1.766167e+00, 1.575795e+00,...\r\n```", "@sachinprasadhs I was able to replicate this issue on colab, please find the gist [here](https://colab.research.google.com/gist/sushreebarsa/b21c94c48966c50c740215442c8cce0f/62485.ipynb). Thank you!", "Hi,\r\n\r\nSince the code you have mentioned has random number generation, the outputs can not be guaranteed for each run.\r\nI have tried by setting seed and enabling op determinism and still produces random results.\r\n\r\n```\r\ntf.random.set_seed(42)\r\ntf.config.experimental.enable_op_determinism() \r\n```", "Hi, @sachinprasadhs \r\nThank you for pointing out the error in my code in **comment**. I'll update my comment accordingly. Additionally, there still exist bugs related to misbehavior in the XLA compiled model output when using tf.nn.conv2d combined with tf.tan on a GPU.", "This issue is stale because it has been open for 7 days with no activity. It will be closed if no further activity occurs. Thank you.", "This issue was closed because it has been inactive for 7 days since being marked as stale. Please reopen if you'd like to work on this further.", "Are you satisfied with the resolution of your issue?\n<a href=\"https://docs.google.com/forms/d/e/1FAIpQLSfaP12TRhd9xSxjXZjcZFNXPGk4kc1-qMdv3gc6bEP90vY1ew/viewform?entry.85265664=Yes&entry.2137816233=https://github.com/tensorflow/tensorflow/issues/62485\">Yes</a>\n<a href=\"https://docs.google.com/forms/d/e/1FAIpQLSfaP12TRhd9xSxjXZjcZFNXPGk4kc1-qMdv3gc6bEP90vY1ew/viewform?entry.85265664=No&entry.2137816233=https://github.com/tensorflow/tensorflow/issues/62485\">No</a>\n" ]
2023-11-27T16:14:55
2023-12-22T01:48:31
2023-12-22T01:48:28
NONE
null
null
null
### Issue type Bug ### Have you reproduced the bug with TensorFlow Nightly? Yes ### Source source ### TensorFlow version 2.15.0 ### Custom code Yes ### OS platform and distribution Ubuntu 22.04.3 LTS ### Mobile device _No response_ ### Python version 3.10.0 ### Bazel version _No response_ ### GCC/compiler version _No response_ ### CUDA/cuDNN version cuda : 12.2 / cudnn 8.9.04 ### GPU model and memory Tesla V100S-PCIE-32GB ### Current behavior? In TensorFlow 2.15.0, a inconsistency arises when using `tf.nn.conv2d` followed by `tf.tan` in an XLA compiled model. The output differs significantly when comparing the XLA compiled execution to the eager mode execution. ### Standalone code to reproduce the issue ```shell from typing import Dict import tensorflow as tf import pickle import os import numpy as np class Model1(tf.keras.Model): def __init__(self): super().__init__() @tf.function(jit_compile=True) def __call__(self, inp1, inp2): # inp1: [1, 8147, 1, 1] : float32 # inp2: [1, 1, 8160, 1] : float32 nnconv = tf.nn.conv2d(inp2, inp1, strides=1, padding="VALID", dilations=(2, 1)) nnconvtan = tf.tan(nnconv) return nnconv, nnconvtan inputs = [ tf.random.uniform(shape=[1, 8147, 1, 1], dtype=tf.float32), tf.random.uniform(shape=[1, 1, 8160, 1], dtype=tf.float32), ] model1 = Model1() device = "gpu" with tf.device(device): tf.config.run_functions_eagerly(True) out1 = model1(*inputs) out2 = model1(*inputs) print(f'=========eager_output(version:{tf.__version__})================') try : for i in range(min(len(out1),len(out2))): np.testing.assert_allclose(out1[i].numpy(), out2[i].numpy(), rtol=0.001, atol=0, err_msg=f'at checking {i}th') print("XLA_eager does not trigger assertion") except AssertionError as e: print("XLA_eager triggers assertion") print(e) tf.config.run_functions_eagerly(False) out1 = model1(*inputs) out2 = model1(*inputs) print(f'=========compiled_output(version:{tf.__version__})================') try : for i in range(min(len(out1),len(out2))): np.testing.assert_allclose(out1[i].numpy(), out2[i].numpy(), rtol=0.001, atol=0, err_msg=f'at checking {i}th') print("XLA_complie does not trigger assertion") except AssertionError as e: print("XLA_complie triggers assertion") print(e) ``` ### Relevant log output ```shell To enable the following instructions: AVX2 FMA, in other operations, rebuild TensorFlow with the appropriate compiler flags. 2023-11-27 16:13:45.155776: W tensorflow/compiler/tf2tensorrt/utils/py_utils.cc:38] TF-TRT Warning: Could not find TensorRT 2023-11-27 16:13:51.062493: I tensorflow/core/common_runtime/gpu/gpu_device.cc:1929] Created device /job:localhost/replica:0/task:0/device:GPU:0 with 30946 MB memory: -> device: 0, name: Tesla V100S-PCIE-32GB, pci bus id: 0000:01:00.0, compute capability: 7.0 2023-11-27 16:13:52.134495: I external/local_xla/xla/stream_executor/cuda/cuda_dnn.cc:454] Loaded cuDNN version 8904 =========eager_output(version:2.15.0)================ XLA_eager does not trigger assertion 2023-11-27 16:13:54.695301: I external/local_xla/xla/service/service.cc:168] XLA service 0x561d3b24cf00 initialized for platform CUDA (this does not guarantee that XLA will be used). Devices: 2023-11-27 16:13:54.695336: I external/local_xla/xla/service/service.cc:176] StreamExecutor device (0): Tesla V100S-PCIE-32GB, Compute Capability 7.0 2023-11-27 16:13:54.695345: I external/local_xla/xla/service/service.cc:176] StreamExecutor WARNING: All log messages before absl::InitializeLog() is called are written to STDERR I0000 00:00:1701101635.181623 741835 device_compiler.h:186] Compiled cluster using XLA! This line is logged at most once for the lifetime of the process. =========compiled_output(version:2.15.0)================ XLA_complie triggers assertion Not equal to tolerance rtol=0.001, atol=0 at checking 1th Mismatched elements: 2 / 14 (14.3%) Max absolute difference: 0.0102663 Max relative difference: 0.00553954 x: array([[[[-3.548327], [ 0.044403], [-2.706214],... y: array([[[[-3.548327], [ 0.044158], [-2.706214],... ```
{ "url": "https://api.github.com/repos/tensorflow/tensorflow/issues/62485/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/tensorflow/tensorflow/issues/62485/timeline
null
completed
false
https://api.github.com/repos/tensorflow/tensorflow/issues/62484
https://api.github.com/repos/tensorflow/tensorflow
https://api.github.com/repos/tensorflow/tensorflow/issues/62484/labels{/name}
https://api.github.com/repos/tensorflow/tensorflow/issues/62484/comments
https://api.github.com/repos/tensorflow/tensorflow/issues/62484/events
https://github.com/tensorflow/tensorflow/pull/62484
2,012,334,944
PR_kwDOArmXAs5gcRvA
62,484
lite: add tensorflowlite_flex to minimal example
{ "login": "aflaischer", "id": 26463144, "node_id": "MDQ6VXNlcjI2NDYzMTQ0", "avatar_url": "https://avatars.githubusercontent.com/u/26463144?v=4", "gravatar_id": "", "url": "https://api.github.com/users/aflaischer", "html_url": "https://github.com/aflaischer", "followers_url": "https://api.github.com/users/aflaischer/followers", "following_url": "https://api.github.com/users/aflaischer/following{/other_user}", "gists_url": "https://api.github.com/users/aflaischer/gists{/gist_id}", "starred_url": "https://api.github.com/users/aflaischer/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/aflaischer/subscriptions", "organizations_url": "https://api.github.com/users/aflaischer/orgs", "repos_url": "https://api.github.com/users/aflaischer/repos", "events_url": "https://api.github.com/users/aflaischer/events{/privacy}", "received_events_url": "https://api.github.com/users/aflaischer/received_events", "type": "User", "site_admin": false }
[ { "id": 390482148, "node_id": "MDU6TGFiZWwzOTA0ODIxNDg=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/awaiting%20review", "name": "awaiting review", "color": "bc3869", "default": false, "description": "Pull request awaiting review" }, { "id": 750616506, "node_id": "MDU6TGFiZWw3NTA2MTY1MDY=", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/comp:lite", "name": "comp:lite", "color": "0052cc", "default": false, "description": "TF Lite related issues" }, { "id": 1169364458, "node_id": "MDU6TGFiZWwxMTY5MzY0NDU4", "url": "https://api.github.com/repos/tensorflow/tensorflow/labels/size:S", "name": "size:S", "color": "adafea", "default": false, "description": "CL Change Size: Small" } ]
open
false
{ "login": "gbaned", "id": 48215717, "node_id": "MDQ6VXNlcjQ4MjE1NzE3", "avatar_url": "https://avatars.githubusercontent.com/u/48215717?v=4", "gravatar_id": "", "url": "https://api.github.com/users/gbaned", "html_url": "https://github.com/gbaned", "followers_url": "https://api.github.com/users/gbaned/followers", "following_url": "https://api.github.com/users/gbaned/following{/other_user}", "gists_url": "https://api.github.com/users/gbaned/gists{/gist_id}", "starred_url": "https://api.github.com/users/gbaned/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/gbaned/subscriptions", "organizations_url": "https://api.github.com/users/gbaned/orgs", "repos_url": "https://api.github.com/users/gbaned/repos", "events_url": "https://api.github.com/users/gbaned/events{/privacy}", "received_events_url": "https://api.github.com/users/gbaned/received_events", "type": "User", "site_admin": false }
[ { "login": "gbaned", "id": 48215717, "node_id": "MDQ6VXNlcjQ4MjE1NzE3", "avatar_url": "https://avatars.githubusercontent.com/u/48215717?v=4", "gravatar_id": "", "url": "https://api.github.com/users/gbaned", "html_url": "https://github.com/gbaned", "followers_url": "https://api.github.com/users/gbaned/followers", "following_url": "https://api.github.com/users/gbaned/following{/other_user}", "gists_url": "https://api.github.com/users/gbaned/gists{/gist_id}", "starred_url": "https://api.github.com/users/gbaned/starred{/owner}{/repo}", "subscriptions_url": "https://api.github.com/users/gbaned/subscriptions", "organizations_url": "https://api.github.com/users/gbaned/orgs", "repos_url": "https://api.github.com/users/gbaned/repos", "events_url": "https://api.github.com/users/gbaned/events{/privacy}", "received_events_url": "https://api.github.com/users/gbaned/received_events", "type": "User", "site_admin": false } ]
null
[ "Hi @LukeBoyer Can you please review this PR ? Thank you!", "Hi @aflaischer Can you please check @LukeBoyer's [comments](https://github.com/tensorflow/tensorflow/pull/62484#discussion_r1433142751) and keep us posted? Thank you!", "Hello @gbaned, I've updated the review\r\n", "Hi @LukeBoyer Can you please review this PR ? Thank you!", "Hi @LukeBoyer Can you please review this PR ? Thank you!", "Hi @LukeBoyer Can you please review this PR ? Thank you!", "Hi @LukeBoyer Can you please review this PR ? Thank you!", "Hi @LukeBoyer Can you please review this PR ? Thank you!", "Hi @LukeBoyer Can you please review this PR ? Thank you!", "hi @gbaned, is there something I can do regarding the pull request?", "> hi @gbaned, is there something I can do regarding the pull request?\r\n\r\nHi @aflaischer Sorry for the delay, this PR is awaiting review status and will be processed further once it is approved. Nothing is pending on your end. Thank you!", "Hi @LukeBoyer Can you please review this PR ? Thank you!", "Hi @LukeBoyer Can you please review this PR ? Thank you!", "Hi @LukeBoyer Can you please review this PR ? Thank you!", "Hi @LukeBoyer Can you please review this PR ? Thank you!", "Hi @LukeBoyer Can you please review this PR ? Thank you!", "Hi @LukeBoyer Can you please review this PR ? Thank you!" ]
2023-11-27T13:56:33
2024-06-05T08:20:24
null
NONE
null
false
{ "url": "https://api.github.com/repos/tensorflow/tensorflow/pulls/62484", "html_url": "https://github.com/tensorflow/tensorflow/pull/62484", "diff_url": "https://github.com/tensorflow/tensorflow/pull/62484.diff", "patch_url": "https://github.com/tensorflow/tensorflow/pull/62484.patch", "merged_at": null }
add option to link tensorflowlite_flex when building minimal example
{ "url": "https://api.github.com/repos/tensorflow/tensorflow/issues/62484/reactions", "total_count": 0, "+1": 0, "-1": 0, "laugh": 0, "hooray": 0, "confused": 0, "heart": 0, "rocket": 0, "eyes": 0 }
https://api.github.com/repos/tensorflow/tensorflow/issues/62484/timeline
null
null
true