File size: 3,189 Bytes
7703dd9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
import gradio as gr
from gradio.inputs import Textbox

import torch
from diffusers import StableDiffusionPipeline
import boto3
from io import BytesIO
import os
import botocore
from time import sleep

AWS_ACCESS_KEY_ID = os.getenv("AWS_ACCESS_KEY_ID")
AWS_SECRET_ACCESS_KEY = os.getenv("AWS_SECRET_ACCESS_KEY")
S3_BUCKET_NAME = 'pineblogs101145-dev'

model_id = "CompVis/stable-diffusion-v1-4"
device = "cuda" if torch.cuda.is_available() else "cpu"

pipe = StableDiffusionPipeline.from_pretrained(
    model_id, torch_dtype=torch.float32)

pipe = pipe.to(device)

s3 = boto3.resource('s3')

s3_client = boto3.client('s3')


def text_to_image(prompt, save_as, key_id):

    if AWS_ACCESS_KEY_ID != key_id:
        return "not permition"

    # Create an instance of the S3 client
    s3 = boto3.client('s3',
                      aws_access_key_id=AWS_ACCESS_KEY_ID,
                      aws_secret_access_key=AWS_SECRET_ACCESS_KEY)

    image_name = '-'.join(save_as.split()) + ".webp"

    def save_image_to_s3(image):
        # Create a BytesIO object to store the image.
        image_buffer = BytesIO()
        image.save(image_buffer, format='WEBP')
        image_buffer.seek(0)

        # Full path of the file in the bucket
        s3_key = "public/" + image_name

        # Upload the image to the S3 bucket
        s3.upload_fileobj(image_buffer, S3_BUCKET_NAME, s3_key)

    def generator_image(prompt):
        prompt = prompt
        image = pipe(prompt).images[0]

        # Save the image in S3
        save_image_to_s3(image)

    generator_image(prompt)
    return image_name



# iface = gr.Interface(fn=text_to_image, inputs=[Textbox(label="prompt"), Textbox(label="s3_save_as"), Textbox(label="aws_key_id")], outputs="text")
# iface.launch()


def check_if_exist(bucket_name, key):

    try:
        s3.Object(bucket_name, key).load()
    except botocore.exceptions.ClientError as e:
        if e.response['Error']['Code'] == "404":
            # The object does not exist.
            return False
        else:
            # Something else has gone wrong.
            raise
    else:
        return True


def list_s3_files(bucket_name, folder):

    my_bucket = s3.Bucket(bucket_name)

    for objects in my_bucket.objects.filter(Prefix=folder):
        print(objects.key)

        filename_ext = '%s' % os.path.basename(objects.key)
        filename = os.path.splitext(filename_ext)[0]
        s3image = 'public/%s.webp' % filename

        if check_if_exist(bucket_name, s3image):
            print('Image %s already exists!' % s3image)
        else:
            response = s3_client.head_object(Bucket=bucket_name, Key=objects.key)
            metadata = response['Metadata']
            print(metadata)
            if 'resumen' in metadata:
                print('Has resume, ready to create image!')
                print('Start creating image.. %s ' % s3image)
                resumen = metadata['resumen']
            else:
                print('There is NOT resume, skipping..')

        sleep(500/1000)
    text_to_image(resumen, filename, AWS_ACCESS_KEY_ID)


bucket_name = 'pineblogs101145-dev'
folder = 'public/mdx/'

list_s3_files(bucket_name, folder)