import streamlit as st import os from transformers import pipeline from langdetect import detect from groq import Groq import torch print(torch.__version__) print("CUDA available:", torch.cuda.is_available()) # Check if GPU is available # Load Hugging Face token from environment HF_TOKEN = os.environ.get("homeo_doc") if not HF_TOKEN: st.error("Missing Hugging Face API token. Set 'homeo_doc' in environment variables.") # Initialize translation pipeline translator = pipeline("translation", model="facebook/nllb-200-distilled-600M", token=HF_TOKEN) # Initialize Groq client for homeopathic advice groq_client = Groq(api_key=os.environ.get("GROQ_API_KEY")) # Language code mapping for NLLB-200 LANG_CODE_MAP = { 'en': 'eng_Latn', # English 'ur': 'urd_Arab', # Urdu 'ar': 'arb_Arab', # Arabic 'es': 'spa_Latn', # Spanish 'hi': 'hin_Deva', # Hindi 'fr': 'fra_Latn' # French } def translate_text(text, target_lang='eng_Latn'): """Translate text using NLLB-200""" try: source_lang = detect(text) source_code = LANG_CODE_MAP.get(source_lang, 'eng_Latn') translation = translator(text)[0]['translation_text'] return translation except Exception as e: st.error(f"Translation error: {str(e)}") return text def get_homeopathic_advice(symptoms): """Get medical advice using Groq API""" try: response = groq_client.chat.completions.create( model="llama3-70b-8192", messages=[{ "role": "user", "content": f"Act as a homeopathic expert. Suggest remedies for: {symptoms}" }], temperature=0.3 ) return response.choices[0].message.content except Exception as e: return f"Error: {str(e)}" # Streamlit UI st.set_page_config(page_title="Homeo Advisor", page_icon="🌿") st.title("🌍 Multilingual Homeopathic Advisor") # Chat interface if "messages" not in st.session_state: st.session_state.messages = [] for message in st.session_state.messages: with st.chat_message(message["role"]): st.markdown(message["content"]) if prompt := st.chat_input("Describe symptoms in any language"): st.session_state.messages.append({"role": "user", "content": prompt}) # Process input with st.spinner("Analyzing..."): # Translate input to English english_input = translate_text(prompt) # Get medical advice english_advice = get_homeopathic_advice(english_input) # Translate back to original language source_lang = detect(prompt) translated_advice = translate_text(english_advice) # Format response final_response = f""" **English Recommendation:** {english_advice} **Translated Recommendation ({source_lang.upper()}):** {translated_advice} """ # Display response with st.chat_message("assistant"): st.markdown(final_response) st.session_state.messages.append({"role": "assistant", "content": final_response}) # Disclaimer st.caption("⚠️ This is not medical advice. Consult a professional.")