Ffftdtd5dtft commited on
Commit
182a3f4
verified
1 Parent(s): 8d09718

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -15
app.py CHANGED
@@ -5,10 +5,10 @@ import torch
5
  from PIL import Image
6
  from diffusers import StableDiffusionPipeline, StableDiffusionImg2ImgPipeline, FluxPipeline, DiffusionPipeline, DPMSolverMultistepScheduler
7
  from diffusers.utils import export_to_video
8
- from transformers import pipeline as transformers_pipeline, AutoModelForCausalLM, AutoTokenizer, TrainingArguments, Trainer
9
  from audiocraft.models import MusicGen
10
  import gradio as gr
11
- from huggingface_hub import snapshot_download, HfApi, HfFolder
12
  import multiprocessing
13
  import io
14
  import time
@@ -27,14 +27,12 @@ def connect_to_redis():
27
  redis_client.ping()
28
  return redis_client
29
  except (redis.exceptions.ConnectionError, redis.exceptions.TimeoutError, BrokenPipeError) as e:
30
- print(f"Connection to Redis failed: {e}. Retrying in 1 second...")
31
  time.sleep(1)
32
 
33
  def reconnect_if_needed(redis_client):
34
  try:
35
  redis_client.ping()
36
  except (redis.exceptions.ConnectionError, redis.exceptions.TimeoutError, BrokenPipeError):
37
- print("Reconnecting to Redis...")
38
  return connect_to_redis()
39
  return redis_client
40
 
@@ -45,7 +43,6 @@ def load_object_from_redis(key):
45
  obj_data = redis_client.get(key)
46
  return pickle.loads(obj_data) if obj_data else None
47
  except (pickle.PickleError, redis.exceptions.RedisError) as e:
48
- print(f"Failed to load object from Redis: {e}")
49
  return None
50
 
51
  def save_object_to_redis(key, obj):
@@ -64,7 +61,6 @@ def get_model_or_download(model_id, redis_key, loader_func):
64
  model = loader_func(model_id, torch_dtype=torch.float16)
65
  save_object_to_redis(redis_key, model)
66
  except Exception as e:
67
- print(f"Failed to load or save model: {e}")
68
  return None
69
 
70
  def generate_image(prompt):
@@ -75,7 +71,6 @@ def generate_image(prompt):
75
  image = text_to_image_pipeline(prompt).images[0]
76
  save_object_to_redis(redis_key, image)
77
  except Exception as e:
78
- print(f"Failed to generate image: {e}")
79
  return None
80
  return image
81
 
@@ -87,7 +82,6 @@ def edit_image_with_prompt(image, prompt, strength=0.75):
87
  edited_image = img2img_pipeline(prompt=prompt, init_image=image.convert("RGB"), strength=strength).images[0]
88
  save_object_to_redis(redis_key, edited_image)
89
  except Exception as e:
90
- print(f"Failed to edit image: {e}")
91
  return None
92
  return edited_image
93
 
@@ -99,7 +93,6 @@ def generate_song(prompt, duration=10):
99
  song = music_gen.generate(prompt, duration=duration)
100
  save_object_to_redis(redis_key, song)
101
  except Exception as e:
102
- print(f"Failed to generate song: {e}")
103
  return None
104
  return song
105
 
@@ -108,10 +101,10 @@ def generate_text(prompt):
108
  text = load_object_from_redis(redis_key)
109
  if not text:
110
  try:
 
111
  text = text_gen_pipeline([{"role": "user", "content": prompt}], max_new_tokens=256)[0]["generated_text"].strip()
112
  save_object_to_redis(redis_key, text)
113
  except Exception as e:
114
- print(f"Failed to generate text: {e}")
115
  return None
116
  return text
117
 
@@ -129,7 +122,6 @@ def generate_flux_image(prompt):
129
  ).images[0]
130
  save_object_to_redis(redis_key, flux_image)
131
  except Exception as e:
132
- print(f"Failed to generate flux image: {e}")
133
  return None
134
  return flux_image
135
 
@@ -143,7 +135,6 @@ def generate_code(prompt):
143
  code = starcoder_tokenizer.decode(outputs[0])
144
  save_object_to_redis(redis_key, code)
145
  except Exception as e:
146
- print(f"Failed to generate code: {e}")
147
  return None
148
  return code
149
 
@@ -158,7 +149,6 @@ def generate_video(prompt):
158
  video = export_to_video(pipe(prompt, num_inference_steps=25).frames)
159
  save_object_to_redis(redis_key, video)
160
  except Exception as e:
161
- print(f"Failed to generate video: {e}")
162
  return None
163
  return video
164
 
@@ -174,7 +164,6 @@ def test_model_meta_llama():
174
  response = meta_llama_pipeline(messages, max_new_tokens=256)[0]["generated_text"].strip()
175
  save_object_to_redis(redis_key, response)
176
  except Exception as e:
177
- print(f"Failed to test Meta-Llama: {e}")
178
  return None
179
  return response
180
 
@@ -219,7 +208,7 @@ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
219
  text_to_image_pipeline = get_model_or_download("stabilityai/stable-diffusion-2", "text_to_image_model", StableDiffusionPipeline.from_pretrained)
220
  img2img_pipeline = get_model_or_download("CompVis/stable-diffusion-v1-4", "img2img_model", StableDiffusionImg2ImgPipeline.from_pretrained)
221
  flux_pipeline = get_model_or_download("black-forest-labs/FLUX.1-schnell", "flux_model", FluxPipeline.from_pretrained)
222
- text_gen_pipeline = transformers_pipeline("text-generation", model="bigcode/starcoder", tokenizer="bigcode/starcoder", device=0)
223
  music_gen = load_object_from_redis("music_gen") or MusicGen.from_pretrained('melody')
224
  meta_llama_pipeline = get_model_or_download("meta-llama/Meta-Llama-3.1-8B-Instruct", "meta_llama_model", transformers_pipeline)
225
 
 
5
  from PIL import Image
6
  from diffusers import StableDiffusionPipeline, StableDiffusionImg2ImgPipeline, FluxPipeline, DiffusionPipeline, DPMSolverMultistepScheduler
7
  from diffusers.utils import export_to_video
8
+ from transformers import pipeline as transformers_pipeline, TrainingArguments, Trainer
9
  from audiocraft.models import MusicGen
10
  import gradio as gr
11
+ from huggingface_hub import HfFolder
12
  import multiprocessing
13
  import io
14
  import time
 
27
  redis_client.ping()
28
  return redis_client
29
  except (redis.exceptions.ConnectionError, redis.exceptions.TimeoutError, BrokenPipeError) as e:
 
30
  time.sleep(1)
31
 
32
  def reconnect_if_needed(redis_client):
33
  try:
34
  redis_client.ping()
35
  except (redis.exceptions.ConnectionError, redis.exceptions.TimeoutError, BrokenPipeError):
 
36
  return connect_to_redis()
37
  return redis_client
38
 
 
43
  obj_data = redis_client.get(key)
44
  return pickle.loads(obj_data) if obj_data else None
45
  except (pickle.PickleError, redis.exceptions.RedisError) as e:
 
46
  return None
47
 
48
  def save_object_to_redis(key, obj):
 
61
  model = loader_func(model_id, torch_dtype=torch.float16)
62
  save_object_to_redis(redis_key, model)
63
  except Exception as e:
 
64
  return None
65
 
66
  def generate_image(prompt):
 
71
  image = text_to_image_pipeline(prompt).images[0]
72
  save_object_to_redis(redis_key, image)
73
  except Exception as e:
 
74
  return None
75
  return image
76
 
 
82
  edited_image = img2img_pipeline(prompt=prompt, init_image=image.convert("RGB"), strength=strength).images[0]
83
  save_object_to_redis(redis_key, edited_image)
84
  except Exception as e:
 
85
  return None
86
  return edited_image
87
 
 
93
  song = music_gen.generate(prompt, duration=duration)
94
  save_object_to_redis(redis_key, song)
95
  except Exception as e:
 
96
  return None
97
  return song
98
 
 
101
  text = load_object_from_redis(redis_key)
102
  if not text:
103
  try:
104
+ # Reemplazar "bigcode/starcoder" con otro modelo de generaci贸n de texto
105
  text = text_gen_pipeline([{"role": "user", "content": prompt}], max_new_tokens=256)[0]["generated_text"].strip()
106
  save_object_to_redis(redis_key, text)
107
  except Exception as e:
 
108
  return None
109
  return text
110
 
 
122
  ).images[0]
123
  save_object_to_redis(redis_key, flux_image)
124
  except Exception as e:
 
125
  return None
126
  return flux_image
127
 
 
135
  code = starcoder_tokenizer.decode(outputs[0])
136
  save_object_to_redis(redis_key, code)
137
  except Exception as e:
 
138
  return None
139
  return code
140
 
 
149
  video = export_to_video(pipe(prompt, num_inference_steps=25).frames)
150
  save_object_to_redis(redis_key, video)
151
  except Exception as e:
 
152
  return None
153
  return video
154
 
 
164
  response = meta_llama_pipeline(messages, max_new_tokens=256)[0]["generated_text"].strip()
165
  save_object_to_redis(redis_key, response)
166
  except Exception as e:
 
167
  return None
168
  return response
169
 
 
208
  text_to_image_pipeline = get_model_or_download("stabilityai/stable-diffusion-2", "text_to_image_model", StableDiffusionPipeline.from_pretrained)
209
  img2img_pipeline = get_model_or_download("CompVis/stable-diffusion-v1-4", "img2img_model", StableDiffusionImg2ImgPipeline.from_pretrained)
210
  flux_pipeline = get_model_or_download("black-forest-labs/FLUX.1-schnell", "flux_model", FluxPipeline.from_pretrained)
211
+ text_gen_pipeline = transformers_pipeline("text-generation", model="google/flan-t5-xl", tokenizer="google/flan-t5-xl", device=device)
212
  music_gen = load_object_from_redis("music_gen") or MusicGen.from_pretrained('melody')
213
  meta_llama_pipeline = get_model_or_download("meta-llama/Meta-Llama-3.1-8B-Instruct", "meta_llama_model", transformers_pipeline)
214