saddam213 commited on
Commit
dc04be6
·
verified ·
1 Parent(s): db5a52c

Upload 25 files

Browse files
.gitattributes CHANGED
@@ -33,3 +33,10 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ Sample.png filter=lfs diff=lfs merge=lfs -text
37
+ Sample2.png filter=lfs diff=lfs merge=lfs -text
38
+ Sample3.png filter=lfs diff=lfs merge=lfs -text
39
+ Sample4.png filter=lfs diff=lfs merge=lfs -text
40
+ text_encoder_2/model.onnx.data filter=lfs diff=lfs merge=lfs -text
41
+ text_encoder_3/model.onnx.data filter=lfs diff=lfs merge=lfs -text
42
+ transformer/model.onnx.data filter=lfs diff=lfs merge=lfs -text
Icon.png ADDED
README.md ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ # Stable-Diffusion 3.5 Medium - Onnx DirectML Optimized
2
+
3
+ ## Original Model
4
+ https://huggingface.co/stabilityai/stable-diffusion-3.5-medium
5
+
6
+ ## Amuse
7
+ https://www.amuse-ai.com/
Sample.png ADDED

Git LFS Details

  • SHA256: 23bedb86c25e1b04909c3265e18d5d97a310e67e5beb391fd225b6e2bbfc55ed
  • Pointer size: 132 Bytes
  • Size of remote file: 1.59 MB
Sample2.png ADDED

Git LFS Details

  • SHA256: 2e6df379824f70ea1928187507fa3048f2b7cfb5475519a8562c62030822b9f0
  • Pointer size: 132 Bytes
  • Size of remote file: 1.77 MB
Sample3.png ADDED

Git LFS Details

  • SHA256: 14546a0c7c3e61433b52da4d88c7f4ac401f6fe3ece55b9ebb978fed988ae05a
  • Pointer size: 132 Bytes
  • Size of remote file: 1.66 MB
Sample4.png ADDED

Git LFS Details

  • SHA256: a743572317984a8de4e125168a35537e260a31e9f1230da797df3638a58d5421
  • Pointer size: 132 Bytes
  • Size of remote file: 1.45 MB
amuse_template.json ADDED
@@ -0,0 +1,77 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "Id": "5FAF73BB-6A73-4448-A4A3-64A1F5643929",
3
+ "FileVersion": "1",
4
+ "Created": "2025-03-06T00:00:00",
5
+ "IsProtected": false,
6
+ "Name": "StableDiffusion 3.5 Medium",
7
+ "ImageIcon": "https://huggingface.co/TensorStack/StableDiffusion3.5-Medium-amuse/resolve/main/Icon.png",
8
+ "Author": "Stability AI",
9
+ "Description": "Stable Diffusion 3.5 Medium is a Multimodal Diffusion Transformer with improvements (MMDiT-X) text-to-image model that features improved performance in image quality, typography, complex prompt understanding, and resource-efficiency.",
10
+ "Rank": 192,
11
+ "Group": "Online",
12
+ "Template": "SD3",
13
+ "Category": "StableDiffusion",
14
+ "StableDiffusionTemplate": {
15
+ "PipelineType": "StableDiffusion3",
16
+ "ModelType": "Base",
17
+ "SampleSize": 1024,
18
+ "TokenizerLength": 768,
19
+ "Tokenizer2Limit": 512,
20
+ "DiffuserTypes": [
21
+ "TextToImage"
22
+ ],
23
+ "SchedulerDefaults": {
24
+ "SchedulerType": "FlowMatchEulerDiscrete",
25
+ "Steps": 40,
26
+ "StepsMin": 4,
27
+ "StepsMax": 100,
28
+ "Guidance": 4.5,
29
+ "GuidanceMin": 1,
30
+ "GuidanceMax": 30,
31
+ "TimestepSpacing": "Linspace",
32
+ "BetaSchedule": "ScaledLinear",
33
+ "BetaStart": 0.00085,
34
+ "BetaEnd": 0.012
35
+ }
36
+ },
37
+ "MemoryMin": 12,
38
+ "MemoryMax": 23,
39
+ "DownloadSize": 26,
40
+ "Website": "https://stability.ai",
41
+ "Licence": "https://stability.ai/community-license-agreement",
42
+ "LicenceType": "NonCommercial",
43
+ "IsLicenceAccepted": false,
44
+ "Repository": "https://huggingface.co/TensorStack/StableDiffusion3.5-Medium-amuse",
45
+ "RepositoryOwner": "TensorStack",
46
+ "RepositoryFiles": [
47
+ "https://huggingface.co/TensorStack/StableDiffusion3.5-Medium-amuse/resolve/main/text_encoder/model.onnx",
48
+ "https://huggingface.co/TensorStack/StableDiffusion3.5-Medium-amuse/resolve/main/text_encoder_2/model.onnx",
49
+ "https://huggingface.co/TensorStack/StableDiffusion3.5-Medium-amuse/resolve/main/text_encoder_2/model.onnx.data",
50
+ "https://huggingface.co/TensorStack/StableDiffusion3.5-Medium-amuse/resolve/main/text_encoder_3/model.onnx",
51
+ "https://huggingface.co/TensorStack/StableDiffusion3.5-Medium-amuse/resolve/main/text_encoder_3/model.onnx.data",
52
+ "https://huggingface.co/TensorStack/StableDiffusion3.5-Medium-amuse/resolve/main/tokenizer/merges.txt",
53
+ "https://huggingface.co/TensorStack/StableDiffusion3.5-Medium-amuse/resolve/main/tokenizer/special_tokens_map.json",
54
+ "https://huggingface.co/TensorStack/StableDiffusion3.5-Medium-amuse/resolve/main/tokenizer/vocab.json",
55
+ "https://huggingface.co/TensorStack/StableDiffusion3.5-Medium-amuse/resolve/main/tokenizer_2/merges.txt",
56
+ "https://huggingface.co/TensorStack/StableDiffusion3.5-Medium-amuse/resolve/main/tokenizer_2/special_tokens_map.json",
57
+ "https://huggingface.co/TensorStack/StableDiffusion3.5-Medium-amuse/resolve/main/tokenizer_2/vocab.json",
58
+ "https://huggingface.co/TensorStack/StableDiffusion3.5-Medium-amuse/resolve/main/tokenizer_3/special_tokens_map.json",
59
+ "https://huggingface.co/TensorStack/StableDiffusion3.5-Medium-amuse/resolve/main/tokenizer_3/spiece.model",
60
+ "https://huggingface.co/TensorStack/StableDiffusion3.5-Medium-amuse/resolve/main/tokenizer_3/tokenizer.json",
61
+ "https://huggingface.co/TensorStack/StableDiffusion3.5-Medium-amuse/resolve/main/transformer/model.onnx",
62
+ "https://huggingface.co/TensorStack/StableDiffusion3.5-Medium-amuse/resolve/main/transformer/model.onnx.data",
63
+ "https://huggingface.co/TensorStack/StableDiffusion3.5-Medium-amuse/resolve/main/vae_decoder/model.onnx",
64
+ "https://huggingface.co/TensorStack/StableDiffusion3.5-Medium-amuse/resolve/main/vae_encoder/model.onnx",
65
+ "https://huggingface.co/TensorStack/StableDiffusion3.5-Medium-amuse/resolve/main/amuse_template.json",
66
+ "https://huggingface.co/TensorStack/StableDiffusion3.5-Medium-amuse/resolve/main/README.md"
67
+ ],
68
+ "PreviewImages": [
69
+ "https://huggingface.co/TensorStack/StableDiffusion3.5-Medium-amuse/resolve/main/Sample.png",
70
+ "https://huggingface.co/TensorStack/StableDiffusion3.5-Medium-amuse/resolve/main/Sample2.png",
71
+ "https://huggingface.co/TensorStack/StableDiffusion3.5-Medium-amuse/resolve/main/Sample3.png",
72
+ "https://huggingface.co/TensorStack/StableDiffusion3.5-Medium-amuse/resolve/main/Sample4.png"
73
+ ],
74
+ "Tags": [
75
+
76
+ ]
77
+ }
text_encoder/model.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d08ac3785c984816486db824e3620804648e50536771a5c5f21945c6aaa21285
3
+ size 246372136
text_encoder_2/model.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:090151f65270666d75ff385d1d4d13ac27cdf9f93f4d7aefcfcdc9fa443420eb
3
+ size 689163
text_encoder_2/model.onnx.data ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:81330f8daecb663019d641658449d3d76205d90c2207356b9cccf1a427214c12
3
+ size 1389319680
text_encoder_3/model.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:949045317fac999578dee8f394b730b3f42b24117d71513c0176c0445198b467
3
+ size 482435
text_encoder_3/model.onnx.data ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:4c2aa6ff9e17e6b689cdce94bb2f0cce6d5fec2e3482db14d13d48a0e6d0385f
3
+ size 9524621312
tokenizer/merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer/special_tokens_map.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<|startoftext|>",
4
+ "lstrip": false,
5
+ "normalized": true,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "<|endoftext|>",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": {
17
+ "content": "<|endoftext|>",
18
+ "lstrip": false,
19
+ "normalized": false,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ },
23
+ "unk_token": {
24
+ "content": "<|endoftext|>",
25
+ "lstrip": false,
26
+ "normalized": false,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ }
30
+ }
tokenizer/vocab.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_2/merges.txt ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_2/special_tokens_map.json ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "bos_token": {
3
+ "content": "<|startoftext|>",
4
+ "lstrip": false,
5
+ "normalized": true,
6
+ "rstrip": false,
7
+ "single_word": false
8
+ },
9
+ "eos_token": {
10
+ "content": "<|endoftext|>",
11
+ "lstrip": false,
12
+ "normalized": false,
13
+ "rstrip": false,
14
+ "single_word": false
15
+ },
16
+ "pad_token": {
17
+ "content": "!",
18
+ "lstrip": false,
19
+ "normalized": false,
20
+ "rstrip": false,
21
+ "single_word": false
22
+ },
23
+ "unk_token": {
24
+ "content": "<|endoftext|>",
25
+ "lstrip": false,
26
+ "normalized": false,
27
+ "rstrip": false,
28
+ "single_word": false
29
+ }
30
+ }
tokenizer_2/vocab.json ADDED
The diff for this file is too large to render. See raw diff
 
tokenizer_3/special_tokens_map.json ADDED
@@ -0,0 +1,125 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "additional_special_tokens": [
3
+ "<extra_id_0>",
4
+ "<extra_id_1>",
5
+ "<extra_id_2>",
6
+ "<extra_id_3>",
7
+ "<extra_id_4>",
8
+ "<extra_id_5>",
9
+ "<extra_id_6>",
10
+ "<extra_id_7>",
11
+ "<extra_id_8>",
12
+ "<extra_id_9>",
13
+ "<extra_id_10>",
14
+ "<extra_id_11>",
15
+ "<extra_id_12>",
16
+ "<extra_id_13>",
17
+ "<extra_id_14>",
18
+ "<extra_id_15>",
19
+ "<extra_id_16>",
20
+ "<extra_id_17>",
21
+ "<extra_id_18>",
22
+ "<extra_id_19>",
23
+ "<extra_id_20>",
24
+ "<extra_id_21>",
25
+ "<extra_id_22>",
26
+ "<extra_id_23>",
27
+ "<extra_id_24>",
28
+ "<extra_id_25>",
29
+ "<extra_id_26>",
30
+ "<extra_id_27>",
31
+ "<extra_id_28>",
32
+ "<extra_id_29>",
33
+ "<extra_id_30>",
34
+ "<extra_id_31>",
35
+ "<extra_id_32>",
36
+ "<extra_id_33>",
37
+ "<extra_id_34>",
38
+ "<extra_id_35>",
39
+ "<extra_id_36>",
40
+ "<extra_id_37>",
41
+ "<extra_id_38>",
42
+ "<extra_id_39>",
43
+ "<extra_id_40>",
44
+ "<extra_id_41>",
45
+ "<extra_id_42>",
46
+ "<extra_id_43>",
47
+ "<extra_id_44>",
48
+ "<extra_id_45>",
49
+ "<extra_id_46>",
50
+ "<extra_id_47>",
51
+ "<extra_id_48>",
52
+ "<extra_id_49>",
53
+ "<extra_id_50>",
54
+ "<extra_id_51>",
55
+ "<extra_id_52>",
56
+ "<extra_id_53>",
57
+ "<extra_id_54>",
58
+ "<extra_id_55>",
59
+ "<extra_id_56>",
60
+ "<extra_id_57>",
61
+ "<extra_id_58>",
62
+ "<extra_id_59>",
63
+ "<extra_id_60>",
64
+ "<extra_id_61>",
65
+ "<extra_id_62>",
66
+ "<extra_id_63>",
67
+ "<extra_id_64>",
68
+ "<extra_id_65>",
69
+ "<extra_id_66>",
70
+ "<extra_id_67>",
71
+ "<extra_id_68>",
72
+ "<extra_id_69>",
73
+ "<extra_id_70>",
74
+ "<extra_id_71>",
75
+ "<extra_id_72>",
76
+ "<extra_id_73>",
77
+ "<extra_id_74>",
78
+ "<extra_id_75>",
79
+ "<extra_id_76>",
80
+ "<extra_id_77>",
81
+ "<extra_id_78>",
82
+ "<extra_id_79>",
83
+ "<extra_id_80>",
84
+ "<extra_id_81>",
85
+ "<extra_id_82>",
86
+ "<extra_id_83>",
87
+ "<extra_id_84>",
88
+ "<extra_id_85>",
89
+ "<extra_id_86>",
90
+ "<extra_id_87>",
91
+ "<extra_id_88>",
92
+ "<extra_id_89>",
93
+ "<extra_id_90>",
94
+ "<extra_id_91>",
95
+ "<extra_id_92>",
96
+ "<extra_id_93>",
97
+ "<extra_id_94>",
98
+ "<extra_id_95>",
99
+ "<extra_id_96>",
100
+ "<extra_id_97>",
101
+ "<extra_id_98>",
102
+ "<extra_id_99>"
103
+ ],
104
+ "eos_token": {
105
+ "content": "</s>",
106
+ "lstrip": false,
107
+ "normalized": false,
108
+ "rstrip": false,
109
+ "single_word": false
110
+ },
111
+ "pad_token": {
112
+ "content": "<pad>",
113
+ "lstrip": false,
114
+ "normalized": false,
115
+ "rstrip": false,
116
+ "single_word": false
117
+ },
118
+ "unk_token": {
119
+ "content": "<unk>",
120
+ "lstrip": false,
121
+ "normalized": false,
122
+ "rstrip": false,
123
+ "single_word": false
124
+ }
125
+ }
tokenizer_3/spiece.model ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:d60acb128cf7b7f2536e8f38a5b18a05535c9e14c7a355904270e15b0945ea86
3
+ size 791656
tokenizer_3/tokenizer.json ADDED
The diff for this file is too large to render. See raw diff
 
transformer/model.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:93eee29c5f8e1a1d8da970ee1c60c6b2484a656221438b5bc1315c690cb04410
3
+ size 2376181
transformer/model.onnx.data ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:becebd9ca44f2fb5bde9d35af5d7bdfbc41e2399874bf381851389770dfc7828
3
+ size 4939312128
vae_decoder/model.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1817434f01561b6cdaaa7c22d209844a4e817d7e99303ba4f9447ee20bba008c
3
+ size 99205370
vae_encoder/model.onnx ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:f71127d469eadd48f90aae5f0d83c48582f6aaf98eb6c5895e5573f918fbfe87
3
+ size 68652778