IAMTFRMZA commited on
Commit
624ce0c
·
verified ·
1 Parent(s): bf7ece0
Files changed (1) hide show
  1. app.py +13 -11
app.py CHANGED
@@ -7,11 +7,14 @@ from PyPDF2 import PdfReader
7
  import docx
8
  from pptx import Presentation
9
  import openpyxl
 
10
 
11
- # Load the model
12
- model = pipeline("question-answering", model="facebook/llama-7b-hf")
 
 
13
 
14
- # Function to read text from uploaded documents
15
  def read_text_from_document(file):
16
  if file.name.endswith('.txt'):
17
  text = file.read().decode('utf-8')
@@ -41,7 +44,7 @@ def read_text_from_document(file):
41
  text += str(cell.value) + ' '
42
  return text
43
 
44
- # Function to scrape URL
45
  def scrape_url(url):
46
  try:
47
  response = requests.get(url)
@@ -51,14 +54,13 @@ def scrape_url(url):
51
  except Exception as e:
52
  return str(e)
53
 
54
- # Function to answer questions based on input data
55
  def answer_questions(data, question):
56
  if data:
57
- try:
58
- result = model(question=question, context=data)
59
- return result['answer']
60
- except Exception as e:
61
- return str(e)
62
  else:
63
  return "No data provided"
64
 
@@ -71,7 +73,7 @@ demo = gr.Interface(
71
  gr.Textbox(label="Ask a question")
72
  ],
73
  outputs=gr.Textbox(label="Answer"),
74
- title="LLM Chatbot",
75
  description="Upload a document or enter a URL and ask a question"
76
  )
77
 
 
7
  import docx
8
  from pptx import Presentation
9
  import openpyxl
10
+ from transformers import AutoModelForSeq2SeqLM, AutoTokenizer
11
 
12
+ # Load the RAG model
13
+ model_name = "facebook/llama-7b-hf"
14
+ rag_tokenizer = AutoTokenizer.from_pretrained(model_name)
15
+ rag_model = AutoModelForSeq2SeqLM.from_pretrained(model_name)
16
 
17
+ # Define a function to read text from uploaded documents
18
  def read_text_from_document(file):
19
  if file.name.endswith('.txt'):
20
  text = file.read().decode('utf-8')
 
44
  text += str(cell.value) + ' '
45
  return text
46
 
47
+ # Define a function to scrape URL
48
  def scrape_url(url):
49
  try:
50
  response = requests.get(url)
 
54
  except Exception as e:
55
  return str(e)
56
 
57
+ # Define a function to answer questions based on input data using RAG
58
  def answer_questions(data, question):
59
  if data:
60
+ inputs = rag_tokenizer.encode("Question: " + question + " Context: " + data, return_tensors="pt")
61
+ outputs = rag_model.generate(inputs, max_length=100)
62
+ answer = rag_tokenizer.decode(outputs<a href="undefined" target="_blank" className="bg-light-secondary dark:bg-dark-secondary px-1 rounded ml-1 no-underline text-xs text-black/70 dark:text-white/70 relative">0</a>, skip_special_tokens=True)
63
+ return answer
 
64
  else:
65
  return "No data provided"
66
 
 
73
  gr.Textbox(label="Ask a question")
74
  ],
75
  outputs=gr.Textbox(label="Answer"),
76
+ title="RAG Chat",
77
  description="Upload a document or enter a URL and ask a question"
78
  )
79