File size: 1,445 Bytes
17fc570
3558088
4dba69f
4e343fc
17fc570
4e343fc
3558088
a472fc4
3558088
be608b3
7d3d31c
 
4dba69f
 
 
 
 
 
 
be608b3
 
 
a472fc4
be608b3
a472fc4
 
 
3558088
a472fc4
4dba69f
 
42524e6
4dba69f
 
a472fc4
 
 
 
 
7d3d31c
a472fc4
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
import streamlit as st
from PIL import Image
from transformers import pipeline as transformer
from diffusers import StableDiffusionPipeline

pipe = StableDiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4")

captions = []

with st.sidebar:
    files = st.file_uploader("Upload images to blend", accept_multiple_files=True)
    st.divider()
    caption_model = st.selectbox("Caption Model", [
        "ydshieh/vit-gpt2-coco-en",
        "Salesforce/blip-image-captioning-large",
        "nlpconnect/vit-gpt2-image-captioning",
        "microsoft/git-base"
    ])
    st.divider()
    image_gen_guidance = st.slider("Stable Diffusion: Guidance Scale", value=7.5)
    image_gen_steps = st.slider("stable Diffusion: Inference Steps", value=50)

col1, col2 = st.columns(2)

with col1:
    for file_name in files:
        image = Image.open(file_name)

        with st.spinner('Captioning Provided Image'):
            captioner = transformer(model=caption_model)
            caption = captioner(image)[0].generated_text

        captions.append(caption)
        st.image(image, caption=caption)

with col2:
    if len(captions) > 0:
        description = ' '.join(captions)

        with st.spinner(f'Generating Photo for {description}'):
            images = pipe(description, guidance_scale=image_gen_guidance, num_inference_steps=image_gen_steps).images

        for image in images:
            st.image(image, caption=description)