Spaces:
Sleeping
Sleeping
v3
Browse files
app.py
CHANGED
|
@@ -5,7 +5,6 @@ import io
|
|
| 5 |
from typing import Any, Tuple
|
| 6 |
import os
|
| 7 |
|
| 8 |
-
|
| 9 |
class Client:
|
| 10 |
def __init__(self, server_url: str):
|
| 11 |
self.server_url = server_url
|
|
@@ -40,7 +39,8 @@ with gr.Blocks() as demo:
|
|
| 40 |
model_selector = gr.Dropdown(
|
| 41 |
choices=[
|
| 42 |
"facebook/opt-1.3b",
|
| 43 |
-
"
|
|
|
|
| 44 |
# "microsoft/Phi-3-mini-128k-instruct"
|
| 45 |
],
|
| 46 |
value="facebook/opt-1.3b",
|
|
@@ -50,8 +50,8 @@ with gr.Blocks() as demo:
|
|
| 50 |
choices=[
|
| 51 |
"Layer wise non-linearity (with first layer)",
|
| 52 |
"Next-token prediction from intermediate representations",
|
| 53 |
-
"Contextualization
|
| 54 |
-
"Layerwise predictions
|
| 55 |
"Tokenwise loss without i-th layer"
|
| 56 |
],
|
| 57 |
value="Layer wise non-linearity (with first layer)",
|
|
@@ -78,15 +78,15 @@ with gr.Blocks() as demo:
|
|
| 78 |
return "token-wise"
|
| 79 |
if task_name == "Next-token prediction from intermediate representations":
|
| 80 |
return "token-wise"
|
| 81 |
-
if task_name == "Contextualization
|
| 82 |
return "global"
|
| 83 |
-
if task_name == "Layerwise predictions
|
| 84 |
return "global"
|
| 85 |
if task_name == "Tokenwise loss without i-th layer":
|
| 86 |
return "token-wise"
|
| 87 |
|
| 88 |
def check_normalization(task_name: str, normalization_name) -> Tuple[str, str]:
|
| 89 |
-
if task_name == "Contextualization
|
| 90 |
return ("global", "\nALERT: Cannot apply token-wise normalization to one sentence, setting global normalization\n")
|
| 91 |
return (normalization_name, "")
|
| 92 |
|
|
|
|
| 5 |
from typing import Any, Tuple
|
| 6 |
import os
|
| 7 |
|
|
|
|
| 8 |
class Client:
|
| 9 |
def __init__(self, server_url: str):
|
| 10 |
self.server_url = server_url
|
|
|
|
| 39 |
model_selector = gr.Dropdown(
|
| 40 |
choices=[
|
| 41 |
"facebook/opt-1.3b",
|
| 42 |
+
"TheBloke/Llama-2-7B-fp16"
|
| 43 |
+
# "facebook/opt-2.7b",
|
| 44 |
# "microsoft/Phi-3-mini-128k-instruct"
|
| 45 |
],
|
| 46 |
value="facebook/opt-1.3b",
|
|
|
|
| 50 |
choices=[
|
| 51 |
"Layer wise non-linearity (with first layer)",
|
| 52 |
"Next-token prediction from intermediate representations",
|
| 53 |
+
"Contextualization measurement",
|
| 54 |
+
"Layerwise predictions (logit lens)",
|
| 55 |
"Tokenwise loss without i-th layer"
|
| 56 |
],
|
| 57 |
value="Layer wise non-linearity (with first layer)",
|
|
|
|
| 78 |
return "token-wise"
|
| 79 |
if task_name == "Next-token prediction from intermediate representations":
|
| 80 |
return "token-wise"
|
| 81 |
+
if task_name == "Contextualization measurement":
|
| 82 |
return "global"
|
| 83 |
+
if task_name == "Layerwise predictions (logit lens)":
|
| 84 |
return "global"
|
| 85 |
if task_name == "Tokenwise loss without i-th layer":
|
| 86 |
return "token-wise"
|
| 87 |
|
| 88 |
def check_normalization(task_name: str, normalization_name) -> Tuple[str, str]:
|
| 89 |
+
if task_name == "Contextualization measurement" and normalization_name == "token-wise":
|
| 90 |
return ("global", "\nALERT: Cannot apply token-wise normalization to one sentence, setting global normalization\n")
|
| 91 |
return (normalization_name, "")
|
| 92 |
|