Guhanselvam commited on
Commit
ea31fb2
·
verified ·
1 Parent(s): 410eefd

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +61 -0
app.py ADDED
@@ -0,0 +1,61 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import streamlit as st
2
+ import requests
3
+ from transformers import AutoModelForCausalLM, AutoTokenizer
4
+
5
+ # Load the Llama 3.2 model and tokenizer
6
+ model_name = "your-llama-3-2-model" # Replace with the correct model path in Hugging Face Hub
7
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
8
+ model = AutoModelForCausalLM.from_pretrained(model_name)
9
+
10
+ def search_api_call(query: str):
11
+ api_key = '614e992d87c496fb15a81a2039e00e6a42530f5c' # Replace with your actual API key
12
+ url = f"https://api.serper.dev/search?api_key={api_key}&q={query}"
13
+
14
+ try:
15
+ response = requests.get(url)
16
+ data = response.json()
17
+ return data['organic_results'] # Adjust according to the API response structure
18
+ except Exception as e:
19
+ st.error("Error fetching from the API: " + str(e))
20
+ return None
21
+
22
+ def is_llm_insufficient(llm_response: str) -> bool:
23
+ return "I don't know" in llm_response or llm_response.strip() == ''
24
+
25
+ def summarize_search_results(results):
26
+ summary = ""
27
+ for result in results:
28
+ summary += f"- {result['title']} ({result['link']})\n"
29
+ return summary
30
+
31
+ def generate_llm_response(user_query: str) -> str:
32
+ inputs = tokenizer(user_query, return_tensors="pt")
33
+ outputs = model.generate(**inputs, max_length=150)
34
+ return tokenizer.decode(outputs[0], skip_special_tokens=True)
35
+
36
+ # Streamlit user interface
37
+ st.title("Real-Time Factual Information Fetcher")
38
+ user_query = st.text_input("Enter your query:")
39
+
40
+ if st.button("Submit"):
41
+ if user_query:
42
+ llm_response = generate_llm_response(user_query)
43
+
44
+ if is_llm_insufficient(llm_response) or 'recent' in user_query.lower():
45
+ search_results = search_api_call(user_query)
46
+ if search_results:
47
+ search_summary = summarize_search_results(search_results)
48
+ combined_response = f"{llm_response}\n\nHere are some recent findings:\n{search_summary.strip()}"
49
+ else:
50
+ combined_response = llm_response # Fallback to the original LLM response
51
+ else:
52
+ combined_response = llm_response
53
+
54
+ st.markdown("### Response:")
55
+ st.markdown(combined_response)
56
+
57
+ else:
58
+ st.warning("Please enter a query.")
59
+
60
+ if __name__ == "__main__":
61
+ import streamlit as st