text-to-panorama / src /app /engine /getPanoramaFlux.ts
jbilcke-hf's picture
jbilcke-hf HF Staff
exhumation of this old project 🧟
f42fa3f
raw
history blame
1.7 kB
"use server"
import { HfInference, HfInferenceEndpoint } from '@huggingface/inference'
import { filterOutBadWords } from "./censorship"
export async function getPanoramaFlux({
prompt,
}: {
prompt: string
}): Promise<string> {
if (!prompt) {
console.error(`cannot call the rendering API without a prompt, aborting..`)
throw new Error(`cannot call the rendering API without a prompt, aborting..`)
}
prompt = [
`hdri view`,
`highly detailed`,
`intricate details`,
filterOutBadWords(prompt)
].join(', ')
console.log(`calling API with prompt: ${prompt}`)
const hf: HfInferenceEndpoint = new HfInference(
`${process.env.HF_API_KEY}`
)
const blob: Blob = await hf.textToImage({
model: "<put a 360° flux model here>",
inputs: prompt,
parameters: {
height: 1024,
width: 2048,
// this triggers the following exception:
// Error: __call__() got an unexpected keyword argument 'negative_prompt'
// negative_prompt: request.prompts.image.negative || '',
/**
* The number of denoising steps. More denoising steps usually lead to a higher quality image at the expense of slower inference.
*/
// num_inference_steps?: number;
/**
* Guidance scale: Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, usually at the expense of lower image quality.
*/
// guidance_scale?: number;
},
})
// console.log('output from Hugging Face Inference API:', blob)
const buffer = Buffer.from(await blob.arrayBuffer())
return `data:${blob.type || 'image/jpeg'};base64,${buffer.toString('base64')}`
}