File size: 896 Bytes
7ce19a5
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
import { HfInference } from '@huggingface/inference'

const hf = new HfInference(process.env.HUGGINGFACE_API_KEY)

export async function POST(request: Request) {
  try {
    const { symptoms, medicalHistory } = await request.json()
    
    const prompt = `Given the following patient information:
    Symptoms: ${symptoms}
    Medical History: ${medicalHistory}
    
    Please recommend appropriate medications and treatments. Consider potential drug interactions and contraindications.`

    const response = await hf.textGeneration({
      model: "nvidia/Llama-3.1-Nemotron-70B-Instruct-HF",
      inputs: prompt,
      parameters: {
        max_new_tokens: 32000,
        temperature: 0.7
      }
    })

    return Response.json({ recommendation: response.generated_text })
  } catch (error) {
    return Response.json({ error: 'Failed to generate recommendation' }, { status: 500 })
  }
}