final tweaks
Browse files
app.py
CHANGED
@@ -144,10 +144,10 @@ FIG_DPI = 300
|
|
144 |
|
145 |
def get_plot(model_name, plot_eager, generate_type):
|
146 |
df = pd.DataFrame(BENCHMARK_DATA[generate_type][model_name])
|
147 |
-
df["framework"] = ["PyTorch", "TF (Eager
|
148 |
df = pd.melt(df, id_vars=["framework"], value_vars=["T4", "3090", "A100"])
|
149 |
if plot_eager == "No":
|
150 |
-
df = df[df["framework"] != "TF (Eager
|
151 |
|
152 |
g = sns.catplot(
|
153 |
data=df,
|
@@ -155,7 +155,7 @@ def get_plot(model_name, plot_eager, generate_type):
|
|
155 |
x="variable",
|
156 |
y="value",
|
157 |
hue="framework",
|
158 |
-
palette={"PyTorch": "blue", "TF (Eager
|
159 |
alpha=.9,
|
160 |
)
|
161 |
g.despine(left=True)
|
@@ -177,7 +177,7 @@ with demo:
|
|
177 |
"""
|
178 |
# TensorFlow XLA Text Generation Benchmark
|
179 |
Instructions:
|
180 |
-
1. Pick a tab for the type of generation (or
|
181 |
2. Select a model from the dropdown menu;
|
182 |
3. Optionally omit results from TensorFlow Eager Execution, if you wish to better compare the performance of
|
183 |
PyTorch to TensorFlow with XLA.
|
@@ -269,7 +269,7 @@ with demo:
|
|
269 |
gr.Dataframe(
|
270 |
headers=["Parameter", "Value"],
|
271 |
value=[
|
272 |
-
["Transformers Version", "4.
|
273 |
["TensorFlow Version", "2.9.1"],
|
274 |
["Pytorch Version", "1.11.0"],
|
275 |
["OS", "22.04 LTS (3090) / Debian 10 (other GPUs)"],
|
|
|
144 |
|
145 |
def get_plot(model_name, plot_eager, generate_type):
|
146 |
df = pd.DataFrame(BENCHMARK_DATA[generate_type][model_name])
|
147 |
+
df["framework"] = ["PyTorch", "TF (Eager Execution)", "TF (XLA)"]
|
148 |
df = pd.melt(df, id_vars=["framework"], value_vars=["T4", "3090", "A100"])
|
149 |
if plot_eager == "No":
|
150 |
+
df = df[df["framework"] != "TF (Eager Execution)"]
|
151 |
|
152 |
g = sns.catplot(
|
153 |
data=df,
|
|
|
155 |
x="variable",
|
156 |
y="value",
|
157 |
hue="framework",
|
158 |
+
palette={"PyTorch": "blue", "TF (Eager Execution)": "orange", "TF (XLA)": "red"},
|
159 |
alpha=.9,
|
160 |
)
|
161 |
g.despine(left=True)
|
|
|
177 |
"""
|
178 |
# TensorFlow XLA Text Generation Benchmark
|
179 |
Instructions:
|
180 |
+
1. Pick a tab for the type of generation (or for benchmark information);
|
181 |
2. Select a model from the dropdown menu;
|
182 |
3. Optionally omit results from TensorFlow Eager Execution, if you wish to better compare the performance of
|
183 |
PyTorch to TensorFlow with XLA.
|
|
|
269 |
gr.Dataframe(
|
270 |
headers=["Parameter", "Value"],
|
271 |
value=[
|
272 |
+
["Transformers Version", "4.21"],
|
273 |
["TensorFlow Version", "2.9.1"],
|
274 |
["Pytorch Version", "1.11.0"],
|
275 |
["OS", "22.04 LTS (3090) / Debian 10 (other GPUs)"],
|