multimodalart HF staff commited on
Commit
31061ea
·
1 Parent(s): f611983

Update script.py

Browse files
Files changed (1) hide show
  1. script.py +41 -34
script.py CHANGED
@@ -3,6 +3,7 @@ import subprocess
3
  from safetensors.torch import load_file
4
  from diffusers import AutoPipelineForText2Image
5
  from datasets import load_dataset
 
6
  import torch
7
  import re
8
  import argparse
@@ -22,46 +23,52 @@ def do_train(script_args):
22
  subprocess.run(['python', 'trainer.py'] + script_args)
23
 
24
  def do_inference(dataset_name, output_dir, num_tokens):
25
- print("Starting inference to generate example images...")
26
- dataset = load_dataset(dataset_name)
27
- pipe = AutoPipelineForText2Image.from_pretrained(
28
- "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16
29
- )
30
- pipe = pipe.to("cuda")
31
- pipe.load_lora_weights(f'{output_dir}/pytorch_lora_weights.safetensors')
32
-
33
- prompts = dataset["train"]["prompt"]
34
- card_string = ''
35
- if(num_tokens > 0):
36
- tokens_sequence = ''.join(f'<s{i}>' for i in range(num_tokens))
37
- tokens_list = [f'<s{i}>' for i in range(num_tokens)]
38
-
39
- state_dict = load_file(f"{output_dir}/embeddings.safetensors")
40
- pipe.load_textual_inversion(state_dict["clip_l"], token=tokens_list, text_encoder=pipe.text_encoder, tokenizer=pipe.tokenizer)
41
- pipe.load_textual_inversion(state_dict["clip_g"], token=tokens_list, text_encoder=pipe.text_encoder_2, tokenizer=pipe.tokenizer_2)
42
 
43
- prompts = [prompt.replace("TOK", tokens_sequence) for prompt in prompts]
44
-
45
- for i, prompt in enumerate(prompts):
46
- image = pipe(prompt, num_inference_steps=25, guidance_scale=7.5).images[0]
47
- filename = f"image-{i}.png"
48
- image.save(f"{output_dir}/filename")
49
- card_string += f"""
50
- - text: '{prompt}'
51
- output:
52
- url:
53
- '{filename}'"""
54
- with open(f'{output_dir}/README.md', 'r') as file:
55
- readme_content = file.read()
56
 
57
- updated_readme_content = re.sub(r'(widget:\n)(.*?)(?=\n\S+:)', f'\\1{card_string}', readme_content, flags=re.DOTALL)
58
- print("Images generated!")
59
- with open('README.md', 'w') as file:
60
- file.write(updated_readme_content)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
61
  from huggingface_hub import HfApi
62
  api = HfApi()
63
  username = api.whoami()["name"]
64
  print("Starting upload...")
 
65
  api.upload_folder(
66
  folder_path=output_dir,
67
  repo_id=f"{username}/{output_dir}",
 
3
  from safetensors.torch import load_file
4
  from diffusers import AutoPipelineForText2Image
5
  from datasets import load_dataset
6
+ from huggingface_hub.repocard import RepoCard
7
  import torch
8
  import re
9
  import argparse
 
23
  subprocess.run(['python', 'trainer.py'] + script_args)
24
 
25
  def do_inference(dataset_name, output_dir, num_tokens):
26
+ try:
27
+ print("Starting inference to generate example images...")
28
+ dataset = load_dataset(dataset_name)
29
+ pipe = AutoPipelineForText2Image.from_pretrained(
30
+ "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16
31
+ )
32
+ pipe = pipe.to("cuda")
33
+ pipe.load_lora_weights(f'{output_dir}/pytorch_lora_weights.safetensors')
 
 
 
 
 
 
 
 
 
34
 
35
+ prompts = dataset["train"]["prompt"]
36
+ widget_content = []
37
+ if(num_tokens > 0):
38
+ tokens_sequence = ''.join(f'<s{i}>' for i in range(num_tokens))
39
+ tokens_list = [f'<s{i}>' for i in range(num_tokens)]
40
+
41
+ state_dict = load_file(f"{output_dir}/embeddings.safetensors")
42
+ pipe.load_textual_inversion(state_dict["clip_l"], token=tokens_list, text_encoder=pipe.text_encoder, tokenizer=pipe.tokenizer)
43
+ pipe.load_textual_inversion(state_dict["clip_g"], token=tokens_list, text_encoder=pipe.text_encoder_2, tokenizer=pipe.tokenizer_2)
44
+
45
+ prompts = [prompt.replace("TOK", tokens_sequence) for prompt in prompts]
 
 
46
 
47
+ for i, prompt in enumerate(prompts):
48
+ image = pipe(prompt, num_inference_steps=25, guidance_scale=7.5).images[0]
49
+ filename = f"image-{i}.png"
50
+ image.save(f"{output_dir}/{filename}")
51
+ card_dict = {
52
+ "text": prompt,
53
+ "output": {
54
+ "url": filename
55
+ }
56
+ }
57
+ widget_content.append(card_dict)
58
+ with open(f'{output_dir}/README.md', 'r') as file:
59
+ readme_content = file.read()
60
+
61
+ card = RepoCard(readme_content)
62
+ card.data["widget"] = widget_content
63
+ card.save(f'{output_dir}/README.md')
64
+ except Exception as e:
65
+ print("Something went wrong with generating images, specifically: ", e)
66
+
67
  from huggingface_hub import HfApi
68
  api = HfApi()
69
  username = api.whoami()["name"]
70
  print("Starting upload...")
71
+ api.create_repo(f"{username}/{output_dir}", exist_ok=True)
72
  api.upload_folder(
73
  folder_path=output_dir,
74
  repo_id=f"{username}/{output_dir}",