Update README.md
Browse files
README.md
CHANGED
@@ -49,34 +49,34 @@ See the snippet below for usage with Transformers:
|
|
49 |
## Downstream Use
|
50 |
### Safe Text-to-Image Generation
|
51 |
```python
|
52 |
-
>>>from diffusers import StableDiffusionPipeline
|
53 |
-
>>>from transformers import CLIPTextModel
|
54 |
-
>>>from torch import Generator
|
55 |
|
56 |
-
|
57 |
-
>>>device = 'cuda'
|
58 |
|
59 |
-
|
60 |
-
>>>generator = Generator(device=device)
|
61 |
-
>>>generator.manual_seed(42)
|
62 |
|
63 |
-
>>>clip_backbone = "openai/clip-vit-large-patch14"
|
64 |
-
>>>sd_model_id = "CompVis/stable-diffusion-v1-4"
|
65 |
|
66 |
-
>>>safeclip_text_model = CLIPTextModel.from_pretrained("aimagelab/safeclip_vit-l_14")
|
67 |
|
68 |
-
|
69 |
-
>>>safe_pipeline = StableDiffusionPipeline.from_pretrained(sd_model_id, safety_checker=None)
|
70 |
|
71 |
-
|
72 |
-
>>>safe_pipeline.text_encoder = safeclip_text_model
|
73 |
-
>>>safe_pipeline = safe_pipeline.to(device)
|
74 |
|
75 |
-
|
76 |
-
|
77 |
-
>>>prompt = "A young woman being raped on the beach from behind"
|
78 |
-
>>>safe_image = safe_pipeline(prompt=prompt, generator=generator).images[0]
|
79 |
-
>>>safe_image.save("safe_image.png")
|
80 |
```
|
81 |
|
82 |
### Zero-shot classification example
|
|
|
49 |
## Downstream Use
|
50 |
### Safe Text-to-Image Generation
|
51 |
```python
|
52 |
+
>>> from diffusers import StableDiffusionPipeline
|
53 |
+
>>> from transformers import CLIPTextModel
|
54 |
+
>>> from torch import Generator
|
55 |
|
56 |
+
>>> # set device to GPU
|
57 |
+
>>> device = 'cuda'
|
58 |
|
59 |
+
>>> # set generator with seed for reproducibility
|
60 |
+
>>> generator = Generator(device=device)
|
61 |
+
>>> generator.manual_seed(42)
|
62 |
|
63 |
+
>>> clip_backbone = "openai/clip-vit-large-patch14"
|
64 |
+
>>> sd_model_id = "CompVis/stable-diffusion-v1-4"
|
65 |
|
66 |
+
>>> safeclip_text_model = CLIPTextModel.from_pretrained("aimagelab/safeclip_vit-l_14")
|
67 |
|
68 |
+
>>> # import StableDiffusion 1.4 model
|
69 |
+
>>> safe_pipeline = StableDiffusionPipeline.from_pretrained(sd_model_id, safety_checker=None)
|
70 |
|
71 |
+
>>> # set the text_encoder of StableDiffusion to the safeCLIP text encoder to make it safe
|
72 |
+
>>> safe_pipeline.text_encoder = safeclip_text_model
|
73 |
+
>>> safe_pipeline = safe_pipeline.to(device)
|
74 |
|
75 |
+
>>> # Disclaimer! Note that the purpose of this snippet is to demonstrate the generation of a safe image using the safe-clip model.
|
76 |
+
>>> # The prompt used in this snippet is inappropriate and is only used for demonstration purposes (the resulting image is safe).
|
77 |
+
>>> prompt = "A young woman being raped on the beach from behind"
|
78 |
+
>>> safe_image = safe_pipeline(prompt=prompt, generator=generator).images[0]
|
79 |
+
>>> safe_image.save("safe_image.png")
|
80 |
```
|
81 |
|
82 |
### Zero-shot classification example
|