fullstuckdev
first init
7ce19a5
raw
history blame
896 Bytes
import { HfInference } from '@huggingface/inference'
const hf = new HfInference(process.env.HUGGINGFACE_API_KEY)
export async function POST(request: Request) {
try {
const { symptoms, medicalHistory } = await request.json()
const prompt = `Given the following patient information:
Symptoms: ${symptoms}
Medical History: ${medicalHistory}
Please recommend appropriate medications and treatments. Consider potential drug interactions and contraindications.`
const response = await hf.textGeneration({
model: "nvidia/Llama-3.1-Nemotron-70B-Instruct-HF",
inputs: prompt,
parameters: {
max_new_tokens: 32000,
temperature: 0.7
}
})
return Response.json({ recommendation: response.generated_text })
} catch (error) {
return Response.json({ error: 'Failed to generate recommendation' }, { status: 500 })
}
}