openfree commited on
Commit
6845a85
·
verified ·
1 Parent(s): a12abfd

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +38 -15
app.py CHANGED
@@ -78,22 +78,46 @@ def predict(title, abstract):
78
  abstract = abstract.replace("\n", " ").strip().replace(''',"'")
79
  global model, tokenizer
80
  if model is None:
81
- model = AutoModelForSequenceClassification.from_pretrained(
82
- model_path,
83
- num_labels=1,
84
- torch_dtype=torch.float32 if device == 'cpu' else torch.float16
85
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
86
  tokenizer = AutoTokenizer.from_pretrained(model_path)
87
- model.to(device)
88
  model.eval()
 
89
  text = f'''Given a certain paper, Title: {title}\n Abstract: {abstract}. \n Predict its normalized academic impact (between 0 and 1):'''
90
- inputs = tokenizer(text, return_tensors="pt").to(device)
91
- with torch.no_grad():
92
- outputs = model(**inputs)
93
- probability = torch.sigmoid(outputs.logits).item()
94
- if probability + 0.05 >= 1.0:
95
- return round(1, 4)
96
- return round(probability + 0.05, 4)
 
 
 
 
 
 
 
 
 
 
97
 
98
  def get_grade_and_emoji(score):
99
  if score >= 0.900: return "AAA 🌟"
@@ -224,8 +248,7 @@ css = """
224
  with gr.Blocks(theme=gr.themes.Default(), css=css) as iface:
225
  gr.Markdown(
226
  """
227
- # PaperImpact: AI-Powered Research Impact Predictor {.main-title}
228
- ### Estimate the future academic impact from the title and abstract with advanced AI analysis {.sub-title}
229
  """
230
  )
231
 
 
78
  abstract = abstract.replace("\n", " ").strip().replace(''',"'")
79
  global model, tokenizer
80
  if model is None:
81
+ try:
82
+ # First try loading without quantization
83
+ model = AutoModelForSequenceClassification.from_pretrained(
84
+ model_path,
85
+ num_labels=1,
86
+ device_map='auto',
87
+ torch_dtype=torch.float32 if device == 'cpu' else torch.float16
88
+ )
89
+ except Exception as e:
90
+ print(f"Standard loading failed, trying without device mapping: {str(e)}")
91
+ # Fallback to basic loading
92
+ model = AutoModelForSequenceClassification.from_pretrained(
93
+ model_path,
94
+ num_labels=1,
95
+ torch_dtype=torch.float32
96
+ )
97
+ if torch.cuda.is_available():
98
+ model = model.cuda()
99
+
100
  tokenizer = AutoTokenizer.from_pretrained(model_path)
 
101
  model.eval()
102
+
103
  text = f'''Given a certain paper, Title: {title}\n Abstract: {abstract}. \n Predict its normalized academic impact (between 0 and 1):'''
104
+
105
+ try:
106
+ inputs = tokenizer(text, return_tensors="pt")
107
+ if torch.cuda.is_available():
108
+ inputs = {k: v.cuda() for k, v in inputs.items()}
109
+
110
+ with torch.no_grad():
111
+ outputs = model(**inputs)
112
+ probability = torch.sigmoid(outputs.logits).item()
113
+
114
+ if probability + 0.05 >= 1.0:
115
+ return round(1, 4)
116
+ return round(probability + 0.05, 4)
117
+
118
+ except Exception as e:
119
+ print(f"Prediction error: {str(e)}")
120
+ return 0.0 # Return default value in case of error
121
 
122
  def get_grade_and_emoji(score):
123
  if score >= 0.900: return "AAA 🌟"
 
248
  with gr.Blocks(theme=gr.themes.Default(), css=css) as iface:
249
  gr.Markdown(
250
  """
251
+ # PaperImpact: AI-Powered Research Impact Predictor
 
252
  """
253
  )
254