Spaces:
Sleeping
Sleeping
File size: 12,642 Bytes
2ce452b 06c192a 78b0615 2ce452b 0b626fe c55aca5 2ce452b 78b0615 2ce452b e988543 2ce452b 78b0615 2ce452b 78b0615 2ce452b fc259cf 2ce452b fbfeb5d 2ce452b 43bf776 c7b9f8a 2ce452b fbfeb5d 2ce452b b982777 2ce452b c7b9f8a 2203049 b982777 341d2b6 ebb441e 341d2b6 ebb441e 341d2b6 b982777 2203049 341d2b6 2203049 b982777 3364339 2203049 c7b9f8a 2203049 c7b9f8a 2ce452b 9eec2cd 2203049 9eec2cd 2203049 9eec2cd 2ce452b 2203049 d578e49 2203049 2ce452b c7b9f8a 2ce452b 9eec2cd 2ce452b 9eec2cd 2ce452b 9eec2cd 2ce452b 9eec2cd 2ce452b 9eec2cd 2203049 2ce452b |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 |
import streamlit as st
import torch
from transformers import AutoConfig, AutoTokenizer, AutoModel
from huggingface_hub import login
import re
import copy
from modeling_st2 import ST2ModelV2, SignalDetector
from huggingface_hub import hf_hub_download
from safetensors.torch import load_file
hf_token = st.secrets["HUGGINGFACE_TOKEN"]
login(token=hf_token)
# Load model & tokenizer once (cached for efficiency)
@st.cache_resource
def load_model():
config = AutoConfig.from_pretrained("roberta-large")
tokenizer = AutoTokenizer.from_pretrained("roberta-large", use_fast=True, add_prefix_space=True)
class Args:
def __init__(self):
self.dropout = 0.1
self.signal_classification = True
self.pretrained_signal_detector = False
args = Args()
model = ST2ModelV2(args)
repo_id = "anamargarida/SpanExtractionWithSignalCls_2"
filename = "model.safetensors"
# Download the model file
model_path = hf_hub_download(repo_id=repo_id, filename=filename)
# Load the model weights
state_dict = load_file(model_path)
model.load_state_dict(state_dict)
return tokenizer, model
# Load the model and tokenizer
tokenizer, model = load_model()
model.eval() # Set model to evaluation mode
def extract_arguments(text, tokenizer, model, beam_search=True):
class Args:
def __init__(self):
self.signal_classification = True
self.pretrained_signal_detector = False
args = Args()
inputs = tokenizer(text, return_tensors="pt")
# Get tokenized words (for reconstruction later)
word_ids = inputs.word_ids()
with torch.no_grad():
outputs = model(**inputs)
# Extract logits
start_cause_logits = outputs["start_arg0_logits"][0]
end_cause_logits = outputs["end_arg0_logits"][0]
start_effect_logits = outputs["start_arg1_logits"][0]
end_effect_logits = outputs["end_arg1_logits"][0]
start_signal_logits = outputs["start_sig_logits"][0]
end_signal_logits = outputs["end_sig_logits"][0]
# Set the first and last token logits to a very low value to ignore them
start_cause_logits[0] = -1e-4
end_cause_logits[0] = -1e-4
start_effect_logits[0] = -1e-4
end_effect_logits[0] = -1e-4
start_cause_logits[len(inputs["input_ids"][0]) - 1] = -1e-4
end_cause_logits[len(inputs["input_ids"][0]) - 1] = -1e-4
start_effect_logits[len(inputs["input_ids"][0]) - 1] = -1e-4
end_effect_logits[len(inputs["input_ids"][0]) - 1] = -1e-4
# Beam Search for position selection
if beam_search:
indices1, indices2, _, _, _ = model.beam_search_position_selector(
start_cause_logits=start_cause_logits,
end_cause_logits=end_cause_logits,
start_effect_logits=start_effect_logits,
end_effect_logits=end_effect_logits,
topk=5
)
start_cause1, end_cause1, start_effect1, end_effect1 = indices1
start_cause2, end_cause2, start_effect2, end_effect2 = indices2
else:
start_cause1 = start_cause_logits.argmax().item()
end_cause1 = end_cause_logits.argmax().item()
start_effect1 = start_effect_logits.argmax().item()
end_effect1 = end_effect_logits.argmax().item()
start_cause2, end_cause2, start_effect2, end_effect2 = None, None, None, None
has_signal = 1
if args.signal_classification:
if not args.pretrained_signal_detector:
has_signal = outputs["signal_classification_logits"].argmax().item()
else:
has_signal = signal_detector.predict(text=batch["text"])
if has_signal:
start_signal_logits[0] = -1e-4
end_signal_logits[0] = -1e-4
start_signal_logits[len(inputs["input_ids"][0]) - 1] = -1e-4
end_signal_logits[len(inputs["input_ids"][0]) - 1] = -1e-4
start_signal = start_signal_logits.argmax().item()
end_signal_logits[:start_signal] = -1e4
end_signal_logits[start_signal + 5:] = -1e4
end_signal = end_signal_logits.argmax().item()
if not has_signal:
start_signal, end_signal = None, None
tokens = tokenizer.convert_ids_to_tokens(inputs["input_ids"][0])
token_ids = inputs["input_ids"][0]
#st.write("Token Positions, IDs, and Corresponding Tokens:")
#for position, (token_id, token) in enumerate(zip(token_ids, tokens)):
#st.write(f"Position: {position}, ID: {token_id}, Token: {token}")
st.write(f"Start Cause 1: {start_cause1}, End Cause: {end_cause1}")
st.write(f"Start Effect 1: {start_effect1}, End Cause: {end_effect1}")
st.write(f"Start Signal: {start_signal}, End Signal: {end_signal}")
def extract_span(start, end):
return tokenizer.convert_tokens_to_string(tokens[start:end+1]) if start is not None and end is not None else ""
cause1 = extract_span(start_cause1, end_cause1)
cause2 = extract_span(start_cause2, end_cause2)
effect1 = extract_span(start_effect1, end_effect1)
effect2 = extract_span(start_effect2, end_effect2)
if has_signal:
signal = extract_span(start_signal, end_signal)
if not has_signal:
signal = 'NA'
list1 = [start_cause1, end_cause1, start_effect1, end_effect1, start_signal, end_signal]
list2 = [start_cause2, end_cause2, start_effect2, end_effect2, start_signal, end_signal]
#return cause1, cause2, effect1, effect2, signal, list1, list2
#return start_cause1, end_cause1, start_cause2, end_cause2, start_effect1, end_effect1, start_effect2, end_effect2, start_signal, end_signal
# Add the argument tags in the sentence directly
def add_tags(original_text, word_ids, start_cause, end_cause, start_effect, end_effect, start_signal, end_signal):
space_splitted_tokens = original_text.split(" ")
this_space_splitted_tokens = copy.deepcopy(space_splitted_tokens)
def safe_insert(tag, position, start=True):
"""Safely insert a tag, checking for None values and index validity."""
if position is not None and word_ids[position] is not None:
word_index = word_ids[position]
# Ensure word_index is within range
if 0 <= word_index < len(this_space_splitted_tokens):
if start:
this_space_splitted_tokens[word_index] = tag + this_space_splitted_tokens[word_index]
else:
this_space_splitted_tokens[word_index] += tag
# Add argument tags safely
safe_insert('<ARG0>', start_cause, start=True)
safe_insert('</ARG0>', end_cause, start=False)
safe_insert('<ARG1>', start_effect, start=True)
safe_insert('</ARG1>', end_effect, start=False)
# Add signal tags safely (if signal exists)
if start_signal is not None and end_signal is not None:
safe_insert('<SIG0>', start_signal, start=True)
safe_insert('</SIG0>', end_signal, start=False)
# Join tokens back into a string
return ' '.join(this_space_splitted_tokens)
# Apply the tags to the sentence tokens
tagged_sentence1 = add_tags(input_text, word_ids, start_cause1, end_cause1, start_effect1, end_effect1, start_signal, end_signal)
tagged_sentence2 = add_tags(input_text, word_ids, start_cause2, end_cause2, start_effect2, end_effect2, start_signal, end_signal)
return tagged_sentence1, tagged_sentence2
def mark_text_by_position(original_text, start_idx, end_idx, color):
"""Marks text in the original string based on character positions."""
if start_idx is not None and end_idx is not None and start_idx <= end_idx:
return (
original_text[:start_idx]
+ f"<mark style='background-color:{color}; padding:2px; border-radius:4px;'>"
+ original_text[start_idx:end_idx]
+ "</mark>"
+ original_text[end_idx:]
)
return original_text # Return unchanged if indices are invalidt # Return unchanged text if no span is found
def mark_text_by_tokens(tokenizer, tokens, start_idx, end_idx, color):
"""Highlights a span in tokenized text using HTML."""
highlighted_tokens = copy.deepcopy(tokens) # Avoid modifying original tokens
if start_idx is not None and end_idx is not None and start_idx <= end_idx:
highlighted_tokens[start_idx] = f"<span style='background-color:{color}; padding:2px; border-radius:4px;'>{highlighted_tokens[start_idx]}"
highlighted_tokens[end_idx] = f"{highlighted_tokens[end_idx]}</span>"
return tokenizer.convert_tokens_to_string(highlighted_tokens)
def mark_text_by_word_ids(original_text, token_ids, start_word_id, end_word_id, color):
"""Marks words in the original text based on word IDs from tokenized input."""
words = original_text.split() # Split text into words
if start_word_id is not None and end_word_id is not None and start_word_id <= end_word_id:
words[start_word_id] = f"<mark style='background-color:{color}; padding:2px; border-radius:4px;'>{words[start_word_id]}"
words[end_word_id] = f"{words[end_word_id]}</mark>"
return " ".join(words)
st.title("Causal Relation Extraction")
input_text = st.text_area("Enter your text here:", height=300)
beam_search = st.radio("Enable Beam Search?", ('No', 'Yes')) == 'Yes'
if st.button("Add Argument Tags"):
if input_text:
tagged_sentence1, tagged_sentence2 = extract_arguments(input_text, tokenizer, model, beam_search=True)
st.write("**Tagged Sentence_1:**")
st.write(tagged_sentence1)
st.write("**Tagged Sentence_2:**")
st.write(tagged_sentence2)
else:
st.warning("Please enter some text to analyze.")
if st.button("Extract"):
if input_text:
start_cause_id, end_cause_id, start_effect_id, end_effect_id, start_signal_id, end_signal_id = extract_arguments(input_text, tokenizer, model, beam_search=beam_search)
cause_text = mark_text_by_word_ids(input_text, inputs["input_ids"][0], start_cause_id, end_cause_id, "#FFD700") # Gold for cause
effect_text = mark_text_by_word_ids(input_text, inputs["input_ids"][0], start_effect_id, end_effect_id, "#90EE90") # Light green for effect
signal_text = mark_text_by_word_ids(input_text, inputs["input_ids"][0], start_signal_id, end_signal_id, "#FF6347") # Tomato red for signal
st.markdown(f"**Cause:**<br>{cause_text}", unsafe_allow_html=True)
st.markdown(f"**Effect:**<br>{effect_text}", unsafe_allow_html=True)
st.markdown(f"**Signal:**<br>{signal_text}", unsafe_allow_html=True)
else:
st.warning("Please enter some text before extracting.")
if st.button("Extract1"):
if input_text:
start_cause1, end_cause1, start_cause2, end_cause2, start_effect1, end_effect1, start_effect2, end_effect2, start_signal, end_signal = extract_arguments(input_text, tokenizer, model, beam_search=beam_search)
# Convert text to tokenized format
tokenized_input = tokenizer.tokenize(input_text)
cause_text1 = mark_text_by_tokens(tokenizer, tokenized_input, start_cause1, end_cause1, "#FFD700") # Gold for cause
effect_text1 = mark_text_by_tokens(tokenizer, tokenized_input, start_effect1, end_effect1, "#90EE90") # Light green for effect
signal_text = mark_text_by_tokens(tokenizer, tokenized_input, start_signal, end_signal, "#FF6347") # Tomato red for signal
# Display first relation
st.markdown(f"<strong>Relation 1:</strong>", unsafe_allow_html=True)
st.markdown(f"**Cause:** {cause_text1}", unsafe_allow_html=True)
st.markdown(f"**Effect:** {effect_text1}", unsafe_allow_html=True)
st.markdown(f"**Signal:** {signal_text}", unsafe_allow_html=True)
# Display second relation if beam search is enabled
if beam_search:
cause_text2 = mark_text_by_tokens(tokenizer, tokenized_input, start_cause2, end_cause2, "#FFD700")
effect_text2 = mark_text_by_tokens(tokenizer, tokenized_input, start_effect2, end_effect2, "#90EE90")
st.markdown(f"<strong>Relation 2:</strong>", unsafe_allow_html=True)
st.markdown(f"**Cause:** {cause_text2}", unsafe_allow_html=True)
st.markdown(f"**Effect:** {effect_text2}", unsafe_allow_html=True)
st.markdown(f"**Signal:** {signal_text}", unsafe_allow_html=True)
else:
st.warning("Please enter some text before extracting.") |