Tonic commited on
Commit
29dc181
·
1 Parent(s): 65e7e18

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +10 -69
app.py CHANGED
@@ -76,10 +76,9 @@ languages = {
76
  components = {}
77
  dotenv.load_dotenv()
78
  seamless_client = Client("facebook/seamless_m4t")
 
79
  HuggingFace_Token = os.getenv("HuggingFace_Token")
80
  hf_token = os.getenv("HuggingFace_Token")
81
- base_model_id = os.getenv('BASE_MODEL_ID', 'default_base_model_id')
82
- model_directory = os.getenv('MODEL_DIRECTORY', 'default_model_directory')
83
  device = "cuda" if torch.cuda.is_available() else "cpu"
84
 
85
  image_description = ""
@@ -364,75 +363,17 @@ def query_vectara(text):
364
  return f"Error: {response.status_code}"
365
 
366
 
367
- # Functions to Wrap the Prompt Correctly
368
- def wrap_text(text, width=90):
369
- lines = text.split('\n')
370
- wrapped_lines = [textwrap.fill(line, width=width) for line in lines]
371
- wrapped_text = '\n'.join(wrapped_lines)
372
- return wrapped_text
373
-
374
-
375
- def multimodal_prompt(user_input, system_prompt="You are an expert medical analyst:"):
376
-
377
- # Combine user input and system prompt
378
- formatted_input = f"{user_input}{system_prompt}"
379
-
380
- # Encode the input text
381
- encodeds = tokenizer(formatted_input, return_tensors="pt", add_special_tokens=False)
382
- model_inputs = encodeds.to(device)
383
-
384
- # Generate a response using the model //MODEL UNDEFINED, using peft_model instead.
385
- output = peft_model.generate(
386
- **model_inputs,
387
- max_length=512,
388
- use_cache=True,
389
- early_stopping=True,
390
- bos_token_id=peft_model.config.bos_token_id,
391
- eos_token_id=peft_model.config.eos_token_id,
392
- pad_token_id=peft_model.config.eos_token_id,
393
- temperature=0.1,
394
- do_sample=True
395
- )
396
-
397
- # Decode the response
398
- response_text = tokenizer.decode(output[0], skip_special_tokens=True)
399
-
400
- return response_text
401
-
402
-
403
- # Instantiate the Tokenizer
404
- tokenizer = AutoTokenizer.from_pretrained("stabilityai/stablelm-3b-4e1t", token=hf_token, trust_remote_code=True, padding_side="left")
405
- # tokenizer = AutoTokenizer.from_pretrained("Tonic/stablemed", trust_remote_code=True, padding_side="left")
406
- tokenizer.pad_token = tokenizer.eos_token
407
- tokenizer.padding_side = 'left'
408
-
409
- # Load the PEFT model
410
- peft_config = PeftConfig.from_pretrained("Tonic/stablemed", token=hf_token)
411
- peft_model = AutoModelForCausalLM.from_pretrained("stabilityai/stablelm-3b-4e1t", token=hf_token, trust_remote_code=True)
412
- peft_model = PeftModel.from_pretrained(peft_model, "Tonic/stablemed", token=hf_token)
413
-
414
-
415
- class ChatBot:
416
- def __init__(self):
417
- self.history = []
418
-
419
- @staticmethod
420
- def doctor(user_input, system_prompt="You are an expert medical analyst:"):
421
- formatted_input = f"{system_prompt}{user_input}"
422
- user_input_ids = tokenizer.encode(formatted_input, return_tensors="pt")
423
- response = peft_model.generate(input_ids=user_input_ids, max_length=512, pad_token_id=tokenizer.eos_token_id)
424
- response_text = tokenizer.decode(response[0], skip_special_tokens=True)
425
- return response_text
426
-
427
-
428
- bot = ChatBot()
429
-
430
-
431
  def process_summary_with_stablemed(summary):
432
- system_prompt = "You are a medical instructor . Assess and describe the proper options to your students in minute detail. Propose a course of action for them to base their recommendations on based on your description."
433
- response_text = bot.doctor(summary, system_prompt)
 
 
 
 
 
 
 
434
  return response_text
435
-
436
 
437
  # Main function to handle the Gradio interface logic
438
 
 
76
  components = {}
77
  dotenv.load_dotenv()
78
  seamless_client = Client("facebook/seamless_m4t")
79
+ mistralmed_client = Client("https://tonic1-mistralmed-chat.hf.space/--replicas/crzkn/")
80
  HuggingFace_Token = os.getenv("HuggingFace_Token")
81
  hf_token = os.getenv("HuggingFace_Token")
 
 
82
  device = "cuda" if torch.cuda.is_available() else "cpu"
83
 
84
  image_description = ""
 
363
  return f"Error: {response.status_code}"
364
 
365
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
366
  def process_summary_with_stablemed(summary):
367
+ system_prompt = "You are a medical instructor. Assess and describe the proper options to your students in minute detail. Propose a course of action for them to base their recommendations on based on your description."
368
+ # Use the Mistral Med Gradio client API call
369
+ result = mistralmed_client.predict(
370
+ summary, # Summary text
371
+ system_prompt, # System prompt
372
+ api_name="/predict"
373
+ )
374
+ # Assuming the result is the response text
375
+ response_text = result if isinstance(result, str) else "Error in processing"
376
  return response_text
 
377
 
378
  # Main function to handle the Gradio interface logic
379