abinashnp commited on
Commit
c45dc39
·
1 Parent(s): b590e5f

Initial Space setup

Browse files
Files changed (1) hide show
  1. app.py +47 -13
app.py CHANGED
@@ -2,37 +2,71 @@ import gradio as gr
2
  from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, pipeline
3
  from peft import PeftModel
4
 
5
- # 1) Load the original base model & tokenizer
6
  BASE_MODEL = "facebook/blenderbot-400M-distill"
7
  tokenizer = AutoTokenizer.from_pretrained(BASE_MODEL)
8
  base_model = AutoModelForSeq2SeqLM.from_pretrained(BASE_MODEL)
9
 
10
- # 2) Load your fine-tuned LoRA adapter on top
11
  ADAPTER_REPO = "abinashnp/bayedger-chatbot"
12
  model = PeftModel.from_pretrained(base_model, ADAPTER_REPO)
13
 
14
- # 3) Wrap that in a text2text pipeline
15
  chatbot = pipeline(
16
  "text2text-generation",
17
  model=model,
18
  tokenizer=tokenizer,
19
- device_map="auto", # leave out device arg when using accelerate device_map
 
 
 
 
 
 
 
 
 
 
 
 
 
 
20
  )
21
 
22
  def respond(query):
 
 
 
 
 
 
 
 
23
  out = chatbot(
24
- f"question: {query} answer:",
25
- max_new_tokens=150,
26
- temperature=1.0,
27
- top_p=0.9,
28
- repetition_penalty=1.1,
29
- num_beams=1
30
  )[0]["generated_text"]
31
- return out
32
 
 
 
 
 
 
 
 
 
 
 
 
 
 
33
  with gr.Blocks() as demo:
34
- gr.Markdown("# 🤖 Bayedger FAQ Chatbot")
35
- txt = gr.Textbox(label="Ask me anything")
36
  out = gr.Textbox(label="Answer")
37
  txt.submit(respond, txt, out)
38
 
 
2
  from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, pipeline
3
  from peft import PeftModel
4
 
5
+ # 1) Base model & tokenizer
6
  BASE_MODEL = "facebook/blenderbot-400M-distill"
7
  tokenizer = AutoTokenizer.from_pretrained(BASE_MODEL)
8
  base_model = AutoModelForSeq2SeqLM.from_pretrained(BASE_MODEL)
9
 
10
+ # 2) Attach your LoRA adapter
11
  ADAPTER_REPO = "abinashnp/bayedger-chatbot"
12
  model = PeftModel.from_pretrained(base_model, ADAPTER_REPO)
13
 
14
+ # 3) Build the text2text pipeline (no explicit device arg)
15
  chatbot = pipeline(
16
  "text2text-generation",
17
  model=model,
18
  tokenizer=tokenizer,
19
+ # device_map="auto" # only if you use Accelerate; otherwise remove
20
+ )
21
+
22
+ # 4) System prompt (context) that always precedes user questions
23
+ SYSTEM_PROMPT = (
24
+ "You are BayEdger’s AI assistant. You only answer FAQs about BayEdger’s "
25
+ "services, pricing, and contact info. If you don’t know the answer, "
26
+ "you must say exactly:\n"
27
+ '"Sorry, I don’t have that info—please contact [email protected]."\n\n'
28
+ "Here is what you should know about BayEdger:\n"
29
+ "- AI‐powered websites and automation\n"
30
+ "- Chatbots, email agents, process automation, analytics, content gen\n"
31
+ "- Clear pricing tiers: Basic site ($400), Chatbot ($750+50/mo), Email ($1k+100/mo), etc.\n"
32
+ "- Starter/Growth/Premium bundles\n"
33
+ "- Contact: [email protected], +1‐234‐559‐87994, 13 Madison St, NY\n\n"
34
  )
35
 
36
  def respond(query):
37
+ # 5) Compose full prompt
38
+ prompt = (
39
+ SYSTEM_PROMPT
40
+ f"question: {query}\n"
41
+ "answer:"
42
+ )
43
+
44
+ # 6) Generate
45
  out = chatbot(
46
+ prompt,
47
+ max_new_tokens=128,
48
+ do_sample=False,
49
+ num_beams=2,
50
+ early_stopping=True,
51
+ pad_token_id=tokenizer.eos_token_id
52
  )[0]["generated_text"]
 
53
 
54
+ # 7) Strip off everything up through our "answer:" token
55
+ if "answer:" in out:
56
+ reply = out.split("answer:", 1)[1].strip()
57
+ else:
58
+ reply = out.strip()
59
+
60
+ # 8) Fallback: if the model didn’t produce anything substantial
61
+ if len(reply) < 15 or "don't know" in reply.lower() or "sorry" in reply.lower():
62
+ return "Sorry, I don’t have that info—please contact [email protected]."
63
+
64
+ return reply
65
+
66
+ # 9) Gradio UI
67
  with gr.Blocks() as demo:
68
+ gr.Markdown("# 🤖 BayEdger FAQ Chatbot")
69
+ txt = gr.Textbox(placeholder="Ask me about BayEdger…", label="Your question")
70
  out = gr.Textbox(label="Answer")
71
  txt.submit(respond, txt, out)
72