alfrds commited on
Commit
4603c75
·
1 Parent(s): 9dbb565
Files changed (3) hide show
  1. app.py +69 -3
  2. generate.py +6 -8
  3. old_app.py +59 -0
app.py CHANGED
@@ -6,10 +6,12 @@ from diffusers import StableDiffusionPipeline
6
  import boto3
7
  from io import BytesIO
8
  import os
 
 
9
 
10
  AWS_ACCESS_KEY_ID = os.getenv("AWS_ACCESS_KEY_ID")
11
  AWS_SECRET_ACCESS_KEY = os.getenv("AWS_SECRET_ACCESS_KEY")
12
- S3_BUCKET_NAME = os.getenv("BUCKET_NAME")
13
 
14
  model_id = "CompVis/stable-diffusion-v1-4"
15
  device = "cuda" if torch.cuda.is_available() else "cpu"
@@ -19,6 +21,18 @@ pipe = StableDiffusionPipeline.from_pretrained(
19
 
20
  pipe = pipe.to(device)
21
 
 
 
 
 
 
 
 
 
 
 
 
 
22
  def text_to_image(prompt, save_as, key_id):
23
 
24
  if AWS_ACCESS_KEY_ID != key_id:
@@ -39,13 +53,21 @@ def text_to_image(prompt, save_as, key_id):
39
 
40
  # Full path of the file in the bucket
41
  s3_key = "public/" + image_name
 
42
 
43
  # Upload the image to the S3 bucket
44
  s3.upload_fileobj(image_buffer, S3_BUCKET_NAME, s3_key)
 
45
 
46
  def generator_image(prompt):
47
  prompt = prompt
48
- image = pipe(prompt).images[0]
 
 
 
 
 
 
49
 
50
  # Save the image in S3
51
  save_image_to_s3(image)
@@ -54,6 +76,50 @@ def text_to_image(prompt, save_as, key_id):
54
  return image_name
55
 
56
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
57
 
58
- iface = gr.Interface(fn=text_to_image, inputs=[Textbox(label="prompt"), Textbox(label="s3_save_as"), Textbox(label="aws_key_id")], outputs="text")
59
  iface.launch()
 
6
  import boto3
7
  from io import BytesIO
8
  import os
9
+ import botocore
10
+ from time import sleep
11
 
12
  AWS_ACCESS_KEY_ID = os.getenv("AWS_ACCESS_KEY_ID")
13
  AWS_SECRET_ACCESS_KEY = os.getenv("AWS_SECRET_ACCESS_KEY")
14
+ S3_BUCKET_NAME = 'pineblogs101145-dev'
15
 
16
  model_id = "CompVis/stable-diffusion-v1-4"
17
  device = "cuda" if torch.cuda.is_available() else "cpu"
 
21
 
22
  pipe = pipe.to(device)
23
 
24
+ s3 = boto3.resource('s3',
25
+ aws_access_key_id=AWS_ACCESS_KEY_ID,
26
+ aws_secret_access_key=AWS_SECRET_ACCESS_KEY)
27
+
28
+ s3_client = boto3.client('s3',
29
+ aws_access_key_id=AWS_ACCESS_KEY_ID,
30
+ aws_secret_access_key=AWS_SECRET_ACCESS_KEY)
31
+
32
+ bucket_name = 'pineblogs101145-dev'
33
+ folder = 'public/mdx/'
34
+
35
+
36
  def text_to_image(prompt, save_as, key_id):
37
 
38
  if AWS_ACCESS_KEY_ID != key_id:
 
53
 
54
  # Full path of the file in the bucket
55
  s3_key = "public/" + image_name
56
+ print('Saving image to s3')
57
 
58
  # Upload the image to the S3 bucket
59
  s3.upload_fileobj(image_buffer, S3_BUCKET_NAME, s3_key)
60
+ print('Image saved to s3')
61
 
62
  def generator_image(prompt):
63
  prompt = prompt
64
+ print('Starting to generate the image ...')
65
+ try:
66
+ image = pipe(prompt).images[0]
67
+ except Exception as e:
68
+ print('Error: ', e)
69
+
70
+ print('Image generation completed')
71
 
72
  # Save the image in S3
73
  save_image_to_s3(image)
 
76
  return image_name
77
 
78
 
79
+ def check_if_exist(bucket_name, key):
80
+
81
+ try:
82
+ s3.Object(bucket_name, key).load()
83
+ except botocore.exceptions.ClientError as e:
84
+ if e.response['Error']['Code'] == "404":
85
+ # The object does not exist.
86
+ return False
87
+ else:
88
+ # Something else has gone wrong.
89
+ raise
90
+ else:
91
+ return True
92
+
93
+
94
+ def list_s3_files(bucket_name, folder):
95
+
96
+ my_bucket = s3.Bucket(bucket_name)
97
+
98
+ for objects in my_bucket.objects.filter(Prefix=folder):
99
+ print(objects.key)
100
+
101
+ filename_ext = '%s' % os.path.basename(objects.key)
102
+ filename = os.path.splitext(filename_ext)[0]
103
+ s3image = 'public/%s.webp' % filename
104
+
105
+ if check_if_exist(bucket_name, s3image):
106
+ print('Image %s already exists!' % s3image)
107
+ else:
108
+ response = s3_client.head_object(Bucket=bucket_name, Key=objects.key)
109
+ metadata = response['Metadata']
110
+ print(metadata)
111
+ if 'resumen' in metadata:
112
+ print('Has resume, ready to create image!')
113
+ print('Start creating image.. %s ' % s3image)
114
+ resumen = metadata['resumen']
115
+ else:
116
+ print('There is NOT resume, skipping..')
117
+
118
+ sleep(500/1000)
119
+ text_to_image(resumen, filename, AWS_ACCESS_KEY_ID)
120
+
121
+
122
+ list_s3_files(bucket_name, folder)
123
 
124
+ iface = gr.Interface(fn=list_s3_files, inputs=[Textbox(label="bucket_name"), Textbox(label="folder")], outputs="text")
125
  iface.launch()
generate.py CHANGED
@@ -29,6 +29,9 @@ s3_client = boto3.client('s3',
29
  aws_access_key_id=AWS_ACCESS_KEY_ID,
30
  aws_secret_access_key=AWS_SECRET_ACCESS_KEY)
31
 
 
 
 
32
 
33
  def text_to_image(prompt, save_as, key_id):
34
 
@@ -73,11 +76,6 @@ def text_to_image(prompt, save_as, key_id):
73
  return image_name
74
 
75
 
76
-
77
- # iface = gr.Interface(fn=text_to_image, inputs=[Textbox(label="prompt"), Textbox(label="s3_save_as"), Textbox(label="aws_key_id")], outputs="text")
78
- # iface.launch()
79
-
80
-
81
  def check_if_exist(bucket_name, key):
82
 
83
  try:
@@ -121,7 +119,7 @@ def list_s3_files(bucket_name, folder):
121
  text_to_image(resumen, filename, AWS_ACCESS_KEY_ID)
122
 
123
 
124
- bucket_name = 'pineblogs101145-dev'
125
- folder = 'public/mdx/'
126
-
127
  list_s3_files(bucket_name, folder)
 
 
 
 
29
  aws_access_key_id=AWS_ACCESS_KEY_ID,
30
  aws_secret_access_key=AWS_SECRET_ACCESS_KEY)
31
 
32
+ bucket_name = 'pineblogs101145-dev'
33
+ folder = 'public/mdx/'
34
+
35
 
36
  def text_to_image(prompt, save_as, key_id):
37
 
 
76
  return image_name
77
 
78
 
 
 
 
 
 
79
  def check_if_exist(bucket_name, key):
80
 
81
  try:
 
119
  text_to_image(resumen, filename, AWS_ACCESS_KEY_ID)
120
 
121
 
 
 
 
122
  list_s3_files(bucket_name, folder)
123
+
124
+ iface = gr.Interface(fn=list_s3_files, inputs=[Textbox(label="bucket_name"), Textbox(label="folder")], outputs="text")
125
+ iface.launch()
old_app.py ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from gradio.inputs import Textbox
3
+
4
+ import torch
5
+ from diffusers import StableDiffusionPipeline
6
+ import boto3
7
+ from io import BytesIO
8
+ import os
9
+
10
+ AWS_ACCESS_KEY_ID = os.getenv("AWS_ACCESS_KEY_ID")
11
+ AWS_SECRET_ACCESS_KEY = os.getenv("AWS_SECRET_ACCESS_KEY")
12
+ S3_BUCKET_NAME = os.getenv("BUCKET_NAME")
13
+
14
+ model_id = "CompVis/stable-diffusion-v1-4"
15
+ device = "cuda" if torch.cuda.is_available() else "cpu"
16
+
17
+ pipe = StableDiffusionPipeline.from_pretrained(
18
+ model_id, torch_dtype=torch.float32)
19
+
20
+ pipe = pipe.to(device)
21
+
22
+ def text_to_image(prompt, save_as, key_id):
23
+
24
+ if AWS_ACCESS_KEY_ID != key_id:
25
+ return "not permition"
26
+
27
+ # Create an instance of the S3 client
28
+ s3 = boto3.client('s3',
29
+ aws_access_key_id=AWS_ACCESS_KEY_ID,
30
+ aws_secret_access_key=AWS_SECRET_ACCESS_KEY)
31
+
32
+ image_name = '-'.join(save_as.split()) + ".webp"
33
+
34
+ def save_image_to_s3(image):
35
+ # Create a BytesIO object to store the image.
36
+ image_buffer = BytesIO()
37
+ image.save(image_buffer, format='WEBP')
38
+ image_buffer.seek(0)
39
+
40
+ # Full path of the file in the bucket
41
+ s3_key = "public/" + image_name
42
+
43
+ # Upload the image to the S3 bucket
44
+ s3.upload_fileobj(image_buffer, S3_BUCKET_NAME, s3_key)
45
+
46
+ def generator_image(prompt):
47
+ prompt = prompt
48
+ image = pipe(prompt).images[0]
49
+
50
+ # Save the image in S3
51
+ save_image_to_s3(image)
52
+
53
+ generator_image(prompt)
54
+ return image_name
55
+
56
+
57
+
58
+ iface = gr.Interface(fn=text_to_image, inputs=[Textbox(label="prompt"), Textbox(label="s3_save_as"), Textbox(label="aws_key_id")], outputs="text")
59
+ iface.launch()