Thomas G. Lopes commited on
Commit
aa2789c
·
unverified ·
2 Parent(s): 6f0b9b9 36ed2d0

Provider selector (#60)

Browse files
Files changed (34) hide show
  1. package.json +3 -0
  2. pnpm-lock.yaml +94 -0
  3. src/lib/components/Avatar.svelte +12 -4
  4. src/lib/components/Icons/IconExternal.svelte +7 -0
  5. src/lib/components/Icons/IconProvider.svelte +40 -0
  6. src/lib/components/InferencePlayground/InferencePlayground.svelte +66 -140
  7. src/lib/components/InferencePlayground/InferencePlaygroundCodeSnippets.svelte +163 -447
  8. src/lib/components/InferencePlayground/InferencePlaygroundConversation.svelte +2 -3
  9. src/lib/components/InferencePlayground/InferencePlaygroundConversationHeader.svelte +19 -24
  10. src/lib/components/InferencePlayground/InferencePlaygroundGenerationConfig.svelte +2 -60
  11. src/lib/components/InferencePlayground/InferencePlaygroundMessage.svelte +2 -2
  12. src/lib/components/InferencePlayground/InferencePlaygroundModelSelector.svelte +25 -29
  13. src/lib/components/InferencePlayground/InferencePlaygroundModelSelectorModal.svelte +18 -26
  14. src/lib/components/InferencePlayground/InferencePlaygroundProviderSelect.svelte +102 -0
  15. src/lib/components/InferencePlayground/inferencePlaygroundUtils.ts +107 -16
  16. src/lib/components/InferencePlayground/types.ts +0 -26
  17. src/lib/index.ts +0 -1
  18. src/lib/stores/models.ts +8 -0
  19. src/lib/stores/session.ts +103 -0
  20. src/lib/stores/token.ts +34 -0
  21. src/lib/types.ts +158 -0
  22. src/lib/utils/array.ts +7 -0
  23. src/lib/utils/cn.ts +6 -0
  24. src/lib/utils/effect.ts +51 -0
  25. src/lib/utils/lifecycle.ts +17 -0
  26. src/lib/utils/model.ts +5 -0
  27. src/lib/utils/noop.ts +6 -0
  28. src/lib/utils/object.ts +14 -0
  29. src/lib/utils/platform.ts +3 -0
  30. src/lib/utils/search.ts +64 -0
  31. src/lib/utils/store.ts +9 -0
  32. src/routes/+page.server.ts +6 -7
  33. src/routes/+page.svelte +1 -2
  34. tsconfig.json +2 -1
package.json CHANGED
@@ -20,6 +20,7 @@
20
  "@tailwindcss/postcss": "^4.0.9",
21
  "@typescript-eslint/eslint-plugin": "^6.21.0",
22
  "@typescript-eslint/parser": "^6.21.0",
 
23
  "eslint": "^8.57.1",
24
  "eslint-config-prettier": "^8.10.0",
25
  "eslint-plugin-svelte": "^2.44.0",
@@ -30,6 +31,7 @@
30
  "prettier-plugin-tailwindcss": "^0.6.11",
31
  "svelte": "^4.2.7",
32
  "svelte-check": "^3.6.0",
 
33
  "tailwindcss": "^4.0.9",
34
  "tslib": "^2.4.1",
35
  "typescript": "^5.6.2",
@@ -40,6 +42,7 @@
40
  "@huggingface/hub": "^1.0.1",
41
  "@huggingface/inference": "^3.5.1",
42
  "@huggingface/tasks": "^0.17.1",
 
43
  "@tailwindcss/container-queries": "^0.1.1"
44
  }
45
  }
 
20
  "@tailwindcss/postcss": "^4.0.9",
21
  "@typescript-eslint/eslint-plugin": "^6.21.0",
22
  "@typescript-eslint/parser": "^6.21.0",
23
+ "clsx": "^2.1.1",
24
  "eslint": "^8.57.1",
25
  "eslint-config-prettier": "^8.10.0",
26
  "eslint-plugin-svelte": "^2.44.0",
 
31
  "prettier-plugin-tailwindcss": "^0.6.11",
32
  "svelte": "^4.2.7",
33
  "svelte-check": "^3.6.0",
34
+ "tailwind-merge": "^3.0.2",
35
  "tailwindcss": "^4.0.9",
36
  "tslib": "^2.4.1",
37
  "typescript": "^5.6.2",
 
42
  "@huggingface/hub": "^1.0.1",
43
  "@huggingface/inference": "^3.5.1",
44
  "@huggingface/tasks": "^0.17.1",
45
+ "@melt-ui/svelte": "^0.86.3",
46
  "@tailwindcss/container-queries": "^0.1.1"
47
  }
48
  }
pnpm-lock.yaml CHANGED
@@ -17,6 +17,9 @@ importers:
17
  '@huggingface/tasks':
18
  specifier: ^0.17.1
19
  version: 0.17.1
 
 
 
20
  '@tailwindcss/container-queries':
21
  specifier: ^0.1.1
22
  version: 0.1.1([email protected])
@@ -42,6 +45,9 @@ importers:
42
  '@typescript-eslint/parser':
43
  specifier: ^6.21.0
44
  version: 6.21.0([email protected])([email protected])
 
 
 
45
  eslint:
46
  specifier: ^8.57.1
47
  version: 8.57.1
@@ -72,6 +78,9 @@ importers:
72
  svelte-check:
73
  specifier: ^3.6.0
74
 
 
 
75
  tailwindcss:
76
  specifier: ^4.0.9
77
  version: 4.0.9
@@ -251,6 +260,15 @@ packages:
251
  resolution: {integrity: sha512-d9zaMRSTIKDLhctzH12MtXvJKSSUhaHcjV+2Z+GK+EEY7XKpP5yR4x+N3TAcHTcu963nIr+TMcCb4DBCYX1z6Q==}
252
  engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0}
253
 
 
 
 
 
 
 
 
 
 
254
  '@huggingface/[email protected]':
255
  resolution: {integrity: sha512-wogGVETaNUV/wYBkny0uQD48L0rK9cttVtbaA1Rw/pGCuSYoZ8YlvTV6zymsGJfXaxQU8zup0aOR2XLIf6HVfg==}
256
  engines: {node: '>=18'}
@@ -278,6 +296,9 @@ packages:
278
  resolution: {integrity: sha512-93zYdMES/c1D69yZiKDBj0V24vqNzB/koF26KPaagAfd3P/4gUlh3Dys5ogAK+Exi9QyzlD8x/08Zt7wIKcDcA==}
279
  deprecated: Use @eslint/object-schema instead
280
 
 
 
 
281
  '@jridgewell/[email protected]':
282
  resolution: {integrity: sha512-imAbBGkb+ebQyxKgzv5Hu2nmROxoDOXHh80evxdoXNOrvAnVx7zimzc1Oo5h9RlfV4vPXaE2iM5pOFbvOCClWA==}
283
  engines: {node: '>=6.0.0'}
@@ -296,6 +317,11 @@ packages:
296
  '@jridgewell/[email protected]':
297
  resolution: {integrity: sha512-vNk6aEwybGtawWmy/PzwnGDOjCkLWSD2wqvjGGAgOAwCGWySYXfYoxt00IJkTF+8Lb57DwOb3Aa0o9CApepiYQ==}
298
 
 
 
 
 
 
299
  '@nodelib/[email protected]':
300
  resolution: {integrity: sha512-vq24Bq3ym5HEQm2NKCr3yXDwjc7vTsEThRDnkp2DK9p1uqLR+DHurm/NOTo0KG7HYHU7eppKZj3MyqYuMBf62g==}
301
  engines: {node: '>= 8'}
@@ -476,6 +502,9 @@ packages:
476
  svelte: ^4.0.0 || ^5.0.0-next.0
477
  vite: ^5.0.0
478
 
 
 
 
479
  '@tailwindcss/[email protected]':
480
  resolution: {integrity: sha512-p18dswChx6WnTSaJCSGx6lTmrGzNNvm2FtXmiO6AuA1V4U5REyoqwmT6kgAsIMdjo07QdAfYXHJ4hnMtfHzWgA==}
481
  peerDependencies:
@@ -709,6 +738,10 @@ packages:
709
  resolution: {integrity: sha512-7VT13fmjotKpGipCW9JEQAusEPE+Ei8nl6/g4FBAmIm0GOOLMua9NDDo/DWp0ZAxCr3cPq5ZpBqmPAQgDda2Pw==}
710
  engines: {node: '>= 8.10.0'}
711
 
 
 
 
 
712
713
  resolution: {integrity: sha512-7qJWqItLA8/VPVlKJlFXU+NBlo/qyfs39aJcuMT/2ere32ZqvF5OSxgdM5xOfJJ7O429gg2HM47y8v9P+9wrNw==}
714
 
@@ -758,6 +791,10 @@ packages:
758
  resolution: {integrity: sha512-3sUqbMEc77XqpdNO7FRyRog+eW3ph+GYCbj+rK+uYyRMuwsVy0rMiVtPn+QJlKFvWP/1PYpapqYn0Me2knFn+A==}
759
  engines: {node: '>=0.10.0'}
760
 
 
 
 
 
761
762
  resolution: {integrity: sha512-reYkTUJAZb9gUuZ2RvVCNhVHdg62RHnJ7WJl8ftMi4diZ6NWlciOzQN88pUhSELEwflJht4oQDv0F0BMlwaYtA==}
763
  engines: {node: '>=8'}
@@ -902,6 +939,9 @@ packages:
902
903
  resolution: {integrity: sha512-GX+ysw4PBCz0PzosHDepZGANEuFCMLrnRTiEy9McGjmkCQYwRq4A/X786G/fjM/+OjsWSU1ZrY5qyARZmO/uwg==}
904
 
 
 
 
905
906
  resolution: {integrity: sha512-OO0pH2lK6a0hZnAdau5ItzHPI6pUlvI7jMVnxUQRtw4owF2wk8lOSabtGDCTP4Ggrg2MbGnWO9X8K1t4+fGMDw==}
907
 
@@ -1170,6 +1210,11 @@ packages:
1170
  engines: {node: ^10 || ^12 || ^13.7 || ^14 || >=15.0.1}
1171
  hasBin: true
1172
 
 
 
 
 
 
1173
1174
  resolution: {integrity: sha512-OWND8ei3VtNC9h7V60qff3SVobHr996CTwgxubgyQYEpg290h9J0buyECNNJexkFm5sOajh5G116RYA1c8ZMSw==}
1175
 
@@ -1506,6 +1551,12 @@ packages:
1506
  resolution: {integrity: sha512-IY1rnGr6izd10B0A8LqsBfmlT5OILVuZ7XsI0vdGPEvuonFV7NYEUK4dAkm9Zg2q0Um92kYjTpS1CAP3Nh/KWw==}
1507
  engines: {node: '>=16'}
1508
 
 
 
 
 
 
 
1509
1510
  resolution: {integrity: sha512-12laZu+fv1ONDRoNR9ipTOpUD7RN9essRVkX36sjxuRUInpN7hIiHN4lBd/SIFjbISvnXzp8h/hXzmU8SQQYhw==}
1511
 
@@ -1717,6 +1768,17 @@ snapshots:
1717
 
1718
  '@eslint/[email protected]': {}
1719
 
 
 
 
 
 
 
 
 
 
 
 
1720
  '@huggingface/[email protected]':
1721
  dependencies:
1722
  '@huggingface/tasks': 0.15.9
@@ -1741,6 +1803,10 @@ snapshots:
1741
 
1742
  '@humanwhocodes/[email protected]': {}
1743
 
 
 
 
 
1744
  '@jridgewell/[email protected]':
1745
  dependencies:
1746
  '@jridgewell/set-array': 1.2.1
@@ -1758,6 +1824,16 @@ snapshots:
1758
  '@jridgewell/resolve-uri': 3.1.2
1759
  '@jridgewell/sourcemap-codec': 1.5.0
1760
 
 
 
 
 
 
 
 
 
 
 
1761
  '@nodelib/[email protected]':
1762
  dependencies:
1763
  '@nodelib/fs.stat': 2.0.5
@@ -1918,6 +1994,10 @@ snapshots:
1918
  transitivePeerDependencies:
1919
  - supports-color
1920
 
 
 
 
 
1921
1922
  dependencies:
1923
  tailwindcss: 4.0.9
@@ -2154,6 +2234,8 @@ snapshots:
2154
  optionalDependencies:
2155
  fsevents: 2.3.3
2156
 
 
 
2157
2158
  dependencies:
2159
  '@jridgewell/sourcemap-codec': 1.5.0
@@ -2195,6 +2277,8 @@ snapshots:
2195
 
2196
2197
 
 
 
2198
2199
 
2200
@@ -2391,6 +2475,10 @@ snapshots:
2391
 
2392
2393
 
 
 
 
 
2394
2395
 
2396
@@ -2608,6 +2696,8 @@ snapshots:
2608
 
2609
2610
 
 
 
2611
2612
 
2613
@@ -2881,6 +2971,10 @@ snapshots:
2881
  magic-string: 0.30.17
2882
  periscopic: 3.1.0
2883
 
 
 
 
 
2884
2885
 
2886
 
17
  '@huggingface/tasks':
18
  specifier: ^0.17.1
19
  version: 0.17.1
20
+ '@melt-ui/svelte':
21
+ specifier: ^0.86.3
22
+ version: 0.86.3([email protected])
23
  '@tailwindcss/container-queries':
24
  specifier: ^0.1.1
25
  version: 0.1.1([email protected])
 
45
  '@typescript-eslint/parser':
46
  specifier: ^6.21.0
47
  version: 6.21.0([email protected])([email protected])
48
+ clsx:
49
+ specifier: ^2.1.1
50
+ version: 2.1.1
51
  eslint:
52
  specifier: ^8.57.1
53
  version: 8.57.1
 
78
  svelte-check:
79
  specifier: ^3.6.0
80
81
+ tailwind-merge:
82
+ specifier: ^3.0.2
83
+ version: 3.0.2
84
  tailwindcss:
85
  specifier: ^4.0.9
86
  version: 4.0.9
 
260
  resolution: {integrity: sha512-d9zaMRSTIKDLhctzH12MtXvJKSSUhaHcjV+2Z+GK+EEY7XKpP5yR4x+N3TAcHTcu963nIr+TMcCb4DBCYX1z6Q==}
261
  engines: {node: ^12.22.0 || ^14.17.0 || >=16.0.0}
262
 
263
+ '@floating-ui/[email protected]':
264
+ resolution: {integrity: sha512-uMXCuQ3BItDUbAMhIXw7UPXRfAlOAvZzdK9BWpE60MCn+Svt3aLn9jsPTi/WNGlRUu2uI0v5S7JiIUsbsvh3fw==}
265
+
266
+ '@floating-ui/[email protected]':
267
+ resolution: {integrity: sha512-umqzocjDgNRGTuO7Q8CU32dkHkECqI8ZdMZ5Swb6QAM0t5rnlrN3lGo1hdpscRd3WS8T6DKYK4ephgIH9iRh3w==}
268
+
269
+ '@floating-ui/[email protected]':
270
+ resolution: {integrity: sha512-MDWhGtE+eHw5JW7lq4qhc5yRLS11ERl1c7Z6Xd0a58DozHES6EnNNwUWbMiG4J9Cgj053Bhk8zvlhFYKVhULwg==}
271
+
272
  '@huggingface/[email protected]':
273
  resolution: {integrity: sha512-wogGVETaNUV/wYBkny0uQD48L0rK9cttVtbaA1Rw/pGCuSYoZ8YlvTV6zymsGJfXaxQU8zup0aOR2XLIf6HVfg==}
274
  engines: {node: '>=18'}
 
296
  resolution: {integrity: sha512-93zYdMES/c1D69yZiKDBj0V24vqNzB/koF26KPaagAfd3P/4gUlh3Dys5ogAK+Exi9QyzlD8x/08Zt7wIKcDcA==}
297
  deprecated: Use @eslint/object-schema instead
298
 
299
+ '@internationalized/[email protected]':
300
+ resolution: {integrity: sha512-VJ5WS3fcVx0bejE/YHfbDKR/yawZgKqn/if+oEeLqNwBtPzVB06olkfcnojTmEMX+gTpH+FlQ69SHNitJ8/erQ==}
301
+
302
  '@jridgewell/[email protected]':
303
  resolution: {integrity: sha512-imAbBGkb+ebQyxKgzv5Hu2nmROxoDOXHh80evxdoXNOrvAnVx7zimzc1Oo5h9RlfV4vPXaE2iM5pOFbvOCClWA==}
304
  engines: {node: '>=6.0.0'}
 
317
  '@jridgewell/[email protected]':
318
  resolution: {integrity: sha512-vNk6aEwybGtawWmy/PzwnGDOjCkLWSD2wqvjGGAgOAwCGWySYXfYoxt00IJkTF+8Lb57DwOb3Aa0o9CApepiYQ==}
319
 
320
+ '@melt-ui/[email protected]':
321
+ resolution: {integrity: sha512-ZsWmHGd6P636mws1CgatlX7JtLkWoUBPXeNzPzvHYgZdagp8io8MPFotDIfRyKwTEQFUqF9fhBks6CWr0Nupuw==}
322
+ peerDependencies:
323
+ svelte: ^3.0.0 || ^4.0.0 || ^5.0.0-next.118
324
+
325
  '@nodelib/[email protected]':
326
  resolution: {integrity: sha512-vq24Bq3ym5HEQm2NKCr3yXDwjc7vTsEThRDnkp2DK9p1uqLR+DHurm/NOTo0KG7HYHU7eppKZj3MyqYuMBf62g==}
327
  engines: {node: '>= 8'}
 
502
  svelte: ^4.0.0 || ^5.0.0-next.0
503
  vite: ^5.0.0
504
 
505
+ '@swc/[email protected]':
506
+ resolution: {integrity: sha512-JQ5TuMi45Owi4/BIMAJBoSQoOJu12oOk/gADqlcUL9JEdHB8vyjUSsxqeNXnmXHjYKMi2WcYtezGEEhqUI/E2g==}
507
+
508
  '@tailwindcss/[email protected]':
509
  resolution: {integrity: sha512-p18dswChx6WnTSaJCSGx6lTmrGzNNvm2FtXmiO6AuA1V4U5REyoqwmT6kgAsIMdjo07QdAfYXHJ4hnMtfHzWgA==}
510
  peerDependencies:
 
738
  resolution: {integrity: sha512-7VT13fmjotKpGipCW9JEQAusEPE+Ei8nl6/g4FBAmIm0GOOLMua9NDDo/DWp0ZAxCr3cPq5ZpBqmPAQgDda2Pw==}
739
  engines: {node: '>= 8.10.0'}
740
 
741
742
+ resolution: {integrity: sha512-eYm0QWBtUrBWZWG0d386OGAw16Z995PiOVo2B7bjWSbHedGl5e0ZWaq65kOGgUSNesEIDkB9ISbTg/JK9dhCZA==}
743
+ engines: {node: '>=6'}
744
+
745
746
  resolution: {integrity: sha512-7qJWqItLA8/VPVlKJlFXU+NBlo/qyfs39aJcuMT/2ere32ZqvF5OSxgdM5xOfJJ7O429gg2HM47y8v9P+9wrNw==}
747
 
 
791
  resolution: {integrity: sha512-3sUqbMEc77XqpdNO7FRyRog+eW3ph+GYCbj+rK+uYyRMuwsVy0rMiVtPn+QJlKFvWP/1PYpapqYn0Me2knFn+A==}
792
  engines: {node: '>=0.10.0'}
793
 
794
795
+ resolution: {integrity: sha512-0je+qPKHEMohvfRTCEo3CrPG6cAzAYgmzKyxRiYSSDkS6eGJdyVJm7WaYA5ECaAD9wLB2T4EEeymA5aFVcYXCA==}
796
+ engines: {node: '>=6'}
797
+
798
799
  resolution: {integrity: sha512-reYkTUJAZb9gUuZ2RvVCNhVHdg62RHnJ7WJl8ftMi4diZ6NWlciOzQN88pUhSELEwflJht4oQDv0F0BMlwaYtA==}
800
  engines: {node: '>=8'}
 
939
940
  resolution: {integrity: sha512-GX+ysw4PBCz0PzosHDepZGANEuFCMLrnRTiEy9McGjmkCQYwRq4A/X786G/fjM/+OjsWSU1ZrY5qyARZmO/uwg==}
941
 
942
943
+ resolution: {integrity: sha512-xx560wGBk7seZ6y933idtjJQc1l+ck+pI3sKvhKozdBV1dRZoKhkW5xoCaFv9tQiX5RH1xfSxjuNu6g+lmN/gw==}
944
+
945
946
  resolution: {integrity: sha512-OO0pH2lK6a0hZnAdau5ItzHPI6pUlvI7jMVnxUQRtw4owF2wk8lOSabtGDCTP4Ggrg2MbGnWO9X8K1t4+fGMDw==}
947
 
 
1210
  engines: {node: ^10 || ^12 || ^13.7 || ^14 || >=15.0.1}
1211
  hasBin: true
1212
 
1213
1214
+ resolution: {integrity: sha512-b+CiXQCNMUGe0Ri64S9SXFcP9hogjAJ2Rd6GdVxhPLRm7mhGaM7VgOvCAJ1ZshfHbqVDI3uqTI5C8/GaKuLI7g==}
1215
+ engines: {node: ^18 || >=20}
1216
+ hasBin: true
1217
+
1218
1219
  resolution: {integrity: sha512-OWND8ei3VtNC9h7V60qff3SVobHr996CTwgxubgyQYEpg290h9J0buyECNNJexkFm5sOajh5G116RYA1c8ZMSw==}
1220
 
 
1551
  resolution: {integrity: sha512-IY1rnGr6izd10B0A8LqsBfmlT5OILVuZ7XsI0vdGPEvuonFV7NYEUK4dAkm9Zg2q0Um92kYjTpS1CAP3Nh/KWw==}
1552
  engines: {node: '>=16'}
1553
 
1554
1555
+ resolution: {integrity: sha512-Cat63mxsVJlzYvN51JmVXIgNoUokrIaT2zLclCXjRd8boZ0004U4KCs/sToJ75C6sdlByWxpYnb5Boif1VSFew==}
1556
+
1557
1558
+ resolution: {integrity: sha512-l7z+OYZ7mu3DTqrL88RiKrKIqO3NcpEO8V/Od04bNpvk0kiIFndGEoqfuzvj4yuhRkHKjRkII2z+KS2HfPcSxw==}
1559
+
1560
1561
  resolution: {integrity: sha512-12laZu+fv1ONDRoNR9ipTOpUD7RN9essRVkX36sjxuRUInpN7hIiHN4lBd/SIFjbISvnXzp8h/hXzmU8SQQYhw==}
1562
 
 
1768
 
1769
  '@eslint/[email protected]': {}
1770
 
1771
+ '@floating-ui/[email protected]':
1772
+ dependencies:
1773
+ '@floating-ui/utils': 0.2.9
1774
+
1775
+ '@floating-ui/[email protected]':
1776
+ dependencies:
1777
+ '@floating-ui/core': 1.6.9
1778
+ '@floating-ui/utils': 0.2.9
1779
+
1780
+ '@floating-ui/[email protected]': {}
1781
+
1782
  '@huggingface/[email protected]':
1783
  dependencies:
1784
  '@huggingface/tasks': 0.15.9
 
1803
 
1804
  '@humanwhocodes/[email protected]': {}
1805
 
1806
+ '@internationalized/[email protected]':
1807
+ dependencies:
1808
+ '@swc/helpers': 0.5.15
1809
+
1810
  '@jridgewell/[email protected]':
1811
  dependencies:
1812
  '@jridgewell/set-array': 1.2.1
 
1824
  '@jridgewell/resolve-uri': 3.1.2
1825
  '@jridgewell/sourcemap-codec': 1.5.0
1826
 
1827
1828
+ dependencies:
1829
+ '@floating-ui/core': 1.6.9
1830
+ '@floating-ui/dom': 1.6.13
1831
+ '@internationalized/date': 3.7.0
1832
+ dequal: 2.0.3
1833
+ focus-trap: 7.6.4
1834
+ nanoid: 5.1.2
1835
+ svelte: 4.2.19
1836
+
1837
  '@nodelib/[email protected]':
1838
  dependencies:
1839
  '@nodelib/fs.stat': 2.0.5
 
1994
  transitivePeerDependencies:
1995
  - supports-color
1996
 
1997
+ '@swc/[email protected]':
1998
+ dependencies:
1999
+ tslib: 2.8.1
2000
+
2001
2002
  dependencies:
2003
  tailwindcss: 4.0.9
 
2234
  optionalDependencies:
2235
  fsevents: 2.3.3
2236
 
2237
2238
+
2239
2240
  dependencies:
2241
  '@jridgewell/sourcemap-codec': 1.5.0
 
2277
 
2278
2279
 
2280
2281
+
2282
2283
 
2284
 
2475
 
2476
2477
 
2478
2479
+ dependencies:
2480
+ tabbable: 6.2.0
2481
+
2482
2483
 
2484
 
2696
 
2697
2698
 
2699
2700
+
2701
2702
 
2703
 
2971
  magic-string: 0.30.17
2972
  periscopic: 3.1.0
2973
 
2974
2975
+
2976
2977
+
2978
2979
 
2980
src/lib/components/Avatar.svelte CHANGED
@@ -1,11 +1,15 @@
1
  <script lang="ts">
2
- export let orgName: string;
 
 
3
  export let size: "sm" | "md" = "md";
4
 
5
- const sizeClass = size === "sm" ? "size-3" : "size-4";
6
 
7
- async function getAvatarUrl(orgName: string) {
 
8
  const url = `https://huggingface.co/api/organizations/${orgName}/avatar`;
 
9
  const res = await fetch(url);
10
  if (!res.ok) {
11
  console.error(`Error getting avatar url for org: ${orgName}`, res.status, res.statusText);
@@ -20,7 +24,11 @@
20
  {#await getAvatarUrl(orgName)}
21
  <div class="{sizeClass} flex-none rounded-sm bg-gray-200"></div>
22
  {:then avatarUrl}
23
- <img class="{sizeClass} flex-none rounded-sm bg-gray-200 object-cover" src={avatarUrl} alt="{orgName} avatar" />
 
 
 
 
24
  {:catch}
25
  <div class="{sizeClass} flex-none rounded-sm bg-gray-200"></div>
26
  {/await}
 
1
  <script lang="ts">
2
+ import { browser } from "$app/environment";
3
+
4
+ export let orgName: string | undefined;
5
  export let size: "sm" | "md" = "md";
6
 
7
+ $: sizeClass = size === "sm" ? "size-3" : "size-4";
8
 
9
+ async function getAvatarUrl(orgName?: string) {
10
+ if (!orgName) return;
11
  const url = `https://huggingface.co/api/organizations/${orgName}/avatar`;
12
+ if (!browser) return;
13
  const res = await fetch(url);
14
  if (!res.ok) {
15
  console.error(`Error getting avatar url for org: ${orgName}`, res.status, res.statusText);
 
24
  {#await getAvatarUrl(orgName)}
25
  <div class="{sizeClass} flex-none rounded-sm bg-gray-200"></div>
26
  {:then avatarUrl}
27
+ {#if avatarUrl}
28
+ <img class="{sizeClass} flex-none rounded-sm bg-gray-200 object-cover" src={avatarUrl} alt="{orgName} avatar" />
29
+ {:else}
30
+ <div class="{sizeClass} flex-none rounded-sm bg-gray-200"></div>
31
+ {/if}
32
  {:catch}
33
  <div class="{sizeClass} flex-none rounded-sm bg-gray-200"></div>
34
  {/await}
src/lib/components/Icons/IconExternal.svelte ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ <script lang="ts">
2
+ export let classNames = "";
3
+ </script>
4
+
5
+ <svg class={classNames} xmlns="http://www.w3.org/2000/svg" width="1em" height="1em" viewBox="0 0 32 32"
6
+ ><path fill="currentColor" d="M10 6v2h12.59L6 24.59L7.41 26L24 9.41V22h2V6H10z" /></svg
7
+ >
src/lib/components/Icons/IconProvider.svelte ADDED
@@ -0,0 +1,40 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <script lang="ts">
2
+ export let provider: string | undefined;
3
+ const icons = {
4
+ "sambanova":
5
+ '<svg class="text-lg" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 26 26"><path d="M23.9395 23H22.8288V9.38464C22.8288 6.5566 20.4901 4.11242 17.6039 4.11242H10.3219C7.43574 4.11242 5.10488 6.44692 5.10488 9.2828C5.10488 12.1108 7.43574 14.4454 10.3219 14.4454H11.0493C12.2695 14.4454 13.2707 15.4481 13.2707 16.6702C13.2707 17.8923 12.2695 18.8872 11.0493 18.8872H3.93945V17.7826H10.9946C11.6047 17.7826 12.1053 17.2812 12.1053 16.725C12.1053 16.114 11.6047 15.6674 10.9946 15.6674H10.2672C6.82565 15.6126 3.93945 12.7767 3.93945 9.2828C3.93945 5.78104 6.82565 3 10.3219 3H17.6587C21.1002 3 23.9395 5.94555 23.9395 9.38464V23Z" fill="#EE7624"></path><path d="M11.1041 13.6069C12.6606 13.6617 14.1624 15.0562 14.1624 16.717C14.1624 18.3856 12.7701 19.827 11.0493 19.827H3.93945V20.9394H10.9946C13.3255 20.9394 15.2652 19.0515 15.2652 16.717C15.2652 16.1137 15.1557 15.5575 14.882 15.0013C14.6551 14.5 14.2719 13.9986 13.8808 13.6069C13.435 13.223 12.9344 12.941 12.379 12.7217C11.7142 12.5023 11.0493 12.5571 10.3219 12.5023C9.93085 12.5023 9.047 12.2751 8.54641 11.9461C8.04583 11.6092 7.65474 11.1627 7.43574 10.6692C7.26366 10.2226 7.15416 9.7761 7.15416 9.27473C7.15416 7.55127 8.54641 6.16466 10.2672 6.16466H17.5961C19.3168 6.16466 20.7091 7.66878 20.7091 9.32957V22.9919H21.8198V9.3844C21.8198 7.05773 19.9348 5.05225 17.5961 5.05225H10.3219C7.99108 5.05225 6.0513 6.94022 6.0513 9.27473C6.0513 9.88577 6.16081 10.442 6.43456 10.9982C6.66139 11.4996 6.9899 12.0009 7.43574 12.3848C7.82682 12.7765 8.38216 13.0507 8.88275 13.2779C9.1565 13.3875 9.43808 13.4424 9.76659 13.4972C10.5488 13.552 11.0493 13.6069 11.1041 13.6069Z" fill="#EE7624"></path><path d="M10.9946 23H3.93945V21.8876H10.9946C13.8808 21.8876 16.2116 19.5531 16.2116 16.7172C16.2116 13.8892 13.8808 11.5546 10.9946 11.5546H10.2672C9.047 11.5546 8.04583 10.5519 8.04583 9.32981C8.04583 8.10772 9.047 7.10498 10.2672 7.10498H17.6039C18.8241 7.10498 19.8253 8.16256 19.8253 9.38465V22.9922H18.7146V9.38465C18.7146 8.78144 18.214 8.22523 17.6587 8.22523H10.3219C9.71184 8.22523 9.27383 8.7266 9.27383 9.27498C9.27383 9.83118 9.77442 10.3326 10.3845 10.3326H11.1041C14.6004 10.3326 17.4396 13.1606 17.4396 16.6075C17.3849 20.1641 14.4909 22.9922 10.9946 22.9922V23Z" fill="#EE7624"></path></svg>',
6
+ "fal":
7
+ '<svg class="text-lg" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 26 26"><path fill-rule="evenodd" clip-rule="evenodd" d="M16.5899 2.37891C16.9579 2.37891 17.2529 2.67812 17.2881 3.04443C17.6019 6.31174 20.2023 8.91191 23.4698 9.22569C23.8361 9.26089 24.1353 9.55582 24.1353 9.92378V16.0761C24.1353 16.4441 23.8361 16.739 23.4698 16.7742C20.2023 17.088 17.6019 19.6881 17.2881 22.9555C17.2529 23.3218 16.9579 23.621 16.5899 23.621H10.4373C10.0692 23.621 9.77432 23.3218 9.73912 22.9555C9.42534 19.6881 6.82494 17.088 3.5574 16.7742C3.19109 16.739 2.89185 16.4441 2.89185 16.0761V9.92378C2.89185 9.55582 3.19109 9.26089 3.55741 9.22569C6.82494 8.91191 9.42534 6.31174 9.73912 3.04443C9.77432 2.67812 10.0692 2.37891 10.4373 2.37891H16.5899ZM7.15714 12.982C7.15714 16.5163 10.0192 19.3814 13.5498 19.3814C17.0804 19.3814 19.9426 16.5163 19.9426 12.982C19.9426 9.44762 17.0804 6.58248 13.5498 6.58248C10.0192 6.58248 7.15714 9.44762 7.15714 12.982Z" fill="currentColor"></path></svg>',
8
+ "cerebras":
9
+ '<svg class="text-lg" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 26 26" fill="none"><path d="M15 22C10.0294 22 6 17.9706 6 13C6 8.02939 10.0294 4 15 4M10.3635 18.5622C7.2966 15.989 6.89677 11.417 9.46998 8.35026C12.0432 5.28338 16.6151 4.88355 19.6819 7.45675M12.4088 17.8643C9.72407 16.447 8.69627 13.1212 10.1136 10.4368C11.5308 7.75157 14.8559 6.72427 17.5411 8.14156M15 16.746C12.9314 16.746 11.2543 15.0689 11.2543 13.0003C11.2543 10.9316 12.9314 9.25454 15 9.25454" stroke="#F15A29" stroke-width="1.5" stroke-miterlimit="10"></path></svg>',
10
+ "replicate":
11
+ '<svg class="text-lg" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 26 26"><rect x="3.14136" y="2.43652" width="21.1274" height="21.1274" rx="3.54011" fill="url(#paint0_linear_171_78)"></rect><path fill-rule="evenodd" clip-rule="evenodd" d="M19.9161 7.72173V6.18848H7.49072V19.8116H9.21034V7.72173H19.9161ZM19.919 9.09575V10.629H12.4584V19.8109H10.7388V9.09575H19.919ZM19.9161 11.9922V13.5342H15.7008V19.8082H13.9811V11.9922H19.9161Z" fill="white"></path><defs><linearGradient id="paint0_linear_171_78" x1="22.9091" y1="3.17345" x2="4.19652" y2="22.4427" gradientUnits="userSpaceOnUse"><stop stop-color="#EBFF18"></stop><stop offset="0.5" stop-color="#EB40F0"></stop><stop offset="1" stop-color="#BE0000"></stop></linearGradient></defs></svg>',
12
+ "black-forest-labs":
13
+ '<svg class="text-lg" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 26 26"><path fill-rule="evenodd" clip-rule="evenodd" d="M13.1146 5L22.5938 18.9541L20.7344 18.9687L13.1146 7.54511L6.55208 17.528H14.6458L16.1042 18.9687C16.1042 19.0468 4 18.9541 4 18.9541L13.1146 5ZM21.3906 9.46122C21.3979 9.47585 21.6969 9.95853 22.0615 10.5436C22.4188 11.1287 22.7615 11.6918 22.9583 12.0063H19.8229L20.2458 11.3262C20.2458 11.3262 20.8365 10.3827 21.026 10.0463C21.2229 9.70988 21.3833 9.44659 21.3906 9.46122Z" fill="currentColor"></path><path d="M19.6305 18.9541H17.917L13.4326 12.0794H15.2555L19.6305 18.9541Z" fill="currentColor"></path><path d="M13.224 15.9556H10.1979L11.6563 13.5787L13.224 15.9556Z" fill="currentColor"></path></svg>',
14
+ "fireworks-ai":
15
+ '<svg class="text-lg" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 26 26"><path d="M19.7941 2.5H6.20588C4.15918 2.5 2.5 4.15918 2.5 6.20588V19.7941C2.5 21.8408 4.15918 23.5 6.20588 23.5H19.7941C21.8408 23.5 23.5 21.8408 23.5 19.7941V6.20588C23.5 4.15918 21.8408 2.5 19.7941 2.5Z" fill="#5019C5"></path><path fill-rule="evenodd" clip-rule="evenodd" d="M12.9917 14.8005C12.4958 14.8005 12.0508 14.5061 11.861 14.0503L9.57335 8.58789H10.9123L12.9995 13.5848L15.0847 8.58789H16.4237L14.1223 14.0523C13.9316 14.5061 13.4875 14.8005 12.9917 14.8005ZM15.9767 17.4106C15.4828 17.4106 15.0398 17.1181 14.8481 16.6663C14.6554 16.2105 14.7551 15.6902 15.1034 15.3371L19.2699 11.1168L19.7902 12.3442L15.9758 16.2007L21.4128 16.1704L21.9331 17.3979L15.9777 17.4125L15.9758 17.4106H15.9767ZM4.58722 16.1684L4.06689 17.3959L4.06885 17.394L10.0242 17.4076C10.5162 17.4076 10.9612 17.1162 11.1529 16.6633C11.3466 16.2085 11.2458 15.6863 10.8977 15.3342L6.73113 11.1138L6.2108 12.3413L10.0242 16.1988L4.58722 16.1684Z" fill="white"></path></svg>',
16
+ "together":
17
+ '<svg class="text-lg" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 26 26"><g clip-path="url(#clip0_55_1726)"><path d="M19.925 2.5H6.33674C4.29004 2.5 2.63086 4.15918 2.63086 6.20588V19.7941C2.63086 21.8408 4.29004 23.5 6.33674 23.5H19.925C21.9717 23.5 23.6309 21.8408 23.6309 19.7941V6.20588C23.6309 4.15918 21.9717 2.5 19.925 2.5Z" fill="#F1EFED"></path><path fill-rule="evenodd" clip-rule="evenodd" d="M17.6087 12.5368C19.6554 12.5368 21.3146 10.8776 21.3146 8.83088C21.3146 6.78418 19.6554 5.125 17.6087 5.125C15.562 5.125 13.9028 6.78418 13.9028 8.83088C13.9028 10.8776 15.562 12.5368 17.6087 12.5368ZM17.6087 21.1842C19.6554 21.1842 21.3146 19.525 21.3146 17.4783C21.3146 15.4316 19.6554 13.7725 17.6087 13.7725C15.562 13.7725 13.9028 15.4316 13.9028 17.4783C13.9028 19.525 15.562 21.1842 17.6087 21.1842ZM12.6676 17.4783C12.6676 19.525 11.0084 21.1842 8.96174 21.1842C6.91504 21.1842 5.25586 19.525 5.25586 17.4783C5.25586 15.4316 6.91504 13.7725 8.96174 13.7725C11.0084 13.7725 12.6676 15.4316 12.6676 17.4783Z" fill="#D3D1D1"></path><path d="M8.96174 12.5368C11.0084 12.5368 12.6676 10.8776 12.6676 8.83088C12.6676 6.78418 11.0084 5.125 8.96174 5.125C6.91504 5.125 5.25586 6.78418 5.25586 8.83088C5.25586 10.8776 6.91504 12.5368 8.96174 12.5368Z" fill="#0F6FFF"></path></g><defs><clipPath id="clip0_55_1726"><rect width="21" height="21" fill="white" transform="translate(2.63086 2.5)"></rect></clipPath></defs></svg>',
18
+ "nebius":
19
+ '<svg width="1em" height="1em" viewBox="0 0 26 26" class="text-lg" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" preserveAspectRatio="xMidYMid meet"><rect x="3.14136" y="2.43652" width="21.1274" height="21.1274" rx="3.54011" fill="#D9FE00"></rect><path fill-rule="evenodd" clip-rule="evenodd" d="M22 5.2226H19V18.4781C19 18.4781 22 18.2885 22 14.6817V5.2226ZM4 11.3183V20.7701H7V7.5146C7 7.5146 8.80257 7.25164 9.75584 9.56444L13.5339 18.6933C14.1519 20.1708 15.1636 21 16.5923 21C18.021 21 19 19.7855 19 18.4842C19 18.4842 17.1974 18.7471 16.2383 16.4356L12.4661 7.30668C11.8481 5.82923 10.8364 5 9.40771 5C7.97897 5 7 6.21327 7 7.5146C6.99416 7.5146 4 7.71029 4 11.3183Z" fill="#002C44"></path></svg>',
20
+ "hyperbolic":
21
+ '<svg class="text-lg" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 26 26"><path d="M3.4211 4.72C3.66077 4.42 4.22 3.9 4.65939 3.58C5.09879 3.26 5.71793 3 6.03749 3C6.397 3 6.69659 3.16 6.87634 3.46C7.03612 3.7 7.67524 5.14 8.27442 6.64C8.89356 8.16 9.39287 9.58 9.39287 9.8C9.39287 10.18 9.35293 10.2 8.15458 9.9C7.45554 9.72 6.35705 9.34 5.69796 9.02C5.03887 8.7 4.16008 8.06 3.76063 7.62C3.16145 6.98 3.00167 6.64 3.00167 6.06C2.9817 5.58 3.14148 5.1 3.4211 4.72Z" fill="#594CE9"></path><path d="M17.7813 6.4C18.3406 5.02 18.9397 3.7 19.0995 3.46C19.2793 3.16 19.5988 3 19.9384 3C20.2379 3 20.8371 3.24 21.2765 3.56C21.7159 3.88 22.2552 4.34 22.4749 4.6C22.7545 4.92 22.8743 5.32 22.8743 6C22.8743 6.84 22.7745 7.08 22.1753 7.7C21.7958 8.1 20.937 8.68 20.2779 9C19.6188 9.32 18.5003 9.72 16.4831 10.2L16.6029 9.54C16.6828 9.2 17.2021 7.78 17.7813 6.4Z" fill="#594CE9"></path><path d="M4.71931 10.8C4.5795 10.3 4.39975 9.72 4.31986 9.52C4.23997 9.24 4.27991 9.16 4.45967 9.24C4.5795 9.32 5.23859 9.6 5.89769 9.86C6.55678 10.14 7.81505 10.52 8.69384 10.7C9.75238 10.92 11.2104 11.04 12.9879 11.04C14.7455 11.04 16.2434 10.92 17.282 10.7C18.1608 10.52 19.5189 10.1 20.2779 9.78C21.0568 9.48 21.6959 9.24 21.7359 9.26C21.7559 9.28 21.616 9.66 21.4363 10.1C21.1966 10.66 21.0968 11.48 21.0768 12.9C21.0768 14.36 21.1767 15.14 21.4363 15.8C21.636 16.3 21.7559 16.72 21.7359 16.74C21.6959 16.76 21.0568 16.52 20.2779 16.22C19.5189 15.9 18.1608 15.48 17.282 15.3C16.2235 15.06 14.7655 14.96 12.9879 14.96C11.2104 14.96 9.75238 15.06 8.69384 15.3C7.81505 15.48 6.47689 15.9 5.69796 16.22C4.93901 16.52 4.27991 16.76 4.25994 16.74C4.23997 16.72 4.39975 16.2 4.59947 15.6C4.83914 14.94 4.99892 13.94 4.99892 13.1C4.99892 12.34 4.87909 11.3 4.71931 10.8Z" fill="#594CE9"></path><path d="M5.69796 17C6.35705 16.68 7.43557 16.3 8.07469 16.14C9.13323 15.88 9.27304 15.9 9.33296 16.18C9.39287 16.36 9.05334 17.44 8.59397 18.6C8.13461 19.76 7.53543 21.2 7.23584 21.8C6.79645 22.7 6.59672 22.9 6.15733 22.96C5.83777 22.98 5.29851 22.82 4.95898 22.62C4.59947 22.42 4.0003 21.92 3.66077 21.52C3.14148 20.96 3.00167 20.62 3.00167 20C3.00167 19.36 3.14148 19.04 3.76063 18.38C4.16008 17.94 5.03887 17.3 5.69796 17Z" fill="#594CE9"></path><path d="M17.7813 19.6C17.2021 18.22 16.7028 16.84 16.6629 16.52L16.583 15.94L17.4817 16.06C17.981 16.14 18.9797 16.44 19.7386 16.74C20.6174 17.1 21.4163 17.62 22.0754 18.24C23.074 19.2 23.074 19.22 22.9342 20.16C22.8543 20.68 22.6346 21.28 22.4349 21.48C22.2352 21.68 21.7159 22.12 21.2765 22.44C20.8371 22.76 20.2379 23 19.9384 23C19.5788 23 19.2793 22.84 19.0995 22.56C18.9397 22.3 18.3406 20.98 17.7813 19.6Z" fill="#594CE9"></path></svg>',
22
+ "novita":
23
+ '<svg width="1em" height="1em" viewBox="0 0 26 26" class="text-lg" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" preserveAspectRatio="xMidYMid meet" version="1.2"><rect x="2.43628" y="2.43652" width="21.1274" height="21.1274" rx="3.54011" fill="black"></rect><path d="M10.7187 5.79061C10.6923 5.80858 10.6791 6.78313 10.6835 8.13942L10.6923 10.4568C4.90331 16.3759 3.23298 18.105 3.24617 18.1274C3.25496 18.1454 4.93408 18.1589 6.97804 18.1589H10.6923C10.6923 14.5391 10.7055 13.4792 10.7275 13.4703C10.7451 13.4568 11.7956 14.5077 13.066 15.8056L15.3736 18.1589C21.1143 18.1589 22.789 18.1454 22.7978 18.123C22.811 18.105 20.1077 15.3161 16.789 11.9253C13.4703 8.53463 10.7407 5.77265 10.7187 5.79061Z" fill="#26D57A"></path></svg>',
24
+ "cohere":
25
+ '<svg class="text-lg" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 26 26"><path fill-rule="evenodd" clip-rule="evenodd" d="M9.48 14.92C10.0133 14.92 11.08 14.8933 12.5733 14.28C14.3067 13.56 17.72 12.28 20.2 10.9467C21.9333 10.0133 22.68 8.78667 22.68 7.13333C22.68 4.86667 20.84 3 18.5467 3H8.94667C5.66667 3 3 5.66667 3 8.94667C3 12.2267 5.50667 14.92 9.48 14.92Z" fill="#39594D"></path><path fill-rule="evenodd" clip-rule="evenodd" d="M11.1066 19C11.1066 17.4 12.0666 15.9333 13.5599 15.32L16.5732 14.0666C19.6399 12.8133 22.9999 15.0533 22.9999 18.36C22.9999 20.92 20.9199 23 18.3599 23H15.0799C12.8932 23 11.1066 21.2133 11.1066 19Z" fill="#D18EE2"></path><path d="M6.44 15.6934C4.54667 15.6934 3 17.24 3 19.1334V19.5867C3 21.4534 4.54667 23 6.44 23C8.33333 23 9.88 21.4534 9.88 19.56V19.1067C9.85333 17.24 8.33333 15.6934 6.44 15.6934Z" fill="#FF7759"></path></svg>',
26
+ "hf-inference":
27
+ '<svg class="text-lg" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 26 26"><rect x="3.34856" y="3.02654" width="19.9474" height="19.9474" rx="2.95009" fill="#FFD21E" stroke="#FFB41E" stroke-width="1.18004"></rect><path fill-rule="evenodd" clip-rule="evenodd" d="M7.69336 9.74609V16.9754H9.32329V13.9595H11.8181V16.9754H13.4591V9.74609H11.8181V12.5292H9.32329V9.74609H7.69336ZM15.1646 9.74609V16.9754H16.7945V14.1702H19.3004V12.7953H16.7945V11.121H19.7217V9.74609H15.1646Z" fill="#814D00"></path></svg>',
28
+ };
29
+
30
+ $: icon = provider && provider in icons ? icons[provider as keyof typeof icons] : null;
31
+ </script>
32
+
33
+ {#if icon}
34
+ <span class="inline-block">{@html icon}</span>
35
+ {:else}
36
+ <!-- Allow passing custom fallback -->
37
+ <slot>
38
+ <div class="size-4 flex-none rounded-sm bg-gray-200"></div>
39
+ </slot>
40
+ {/if}
src/lib/components/InferencePlayground/InferencePlayground.svelte CHANGED
@@ -1,18 +1,18 @@
1
  <script lang="ts">
2
- import type { Conversation, ConversationMessage, ModelEntryWithTokenizer, Session } from "./types";
3
 
4
- import { page } from "$app/stores";
5
- import { defaultGenerationConfig } from "./generationConfigSettings";
6
  import {
7
- createHfInference,
8
- FEATURED_MODELS_IDS,
9
  handleNonStreamingResponse,
10
  handleStreamingResponse,
11
  isSystemPromptSupported,
12
  } from "./inferencePlaygroundUtils";
13
 
14
- import { goto } from "$app/navigation";
15
- import { onDestroy, onMount } from "svelte";
 
 
 
 
16
  import IconCode from "../Icons/IconCode.svelte";
17
  import IconCompare from "../Icons/IconCompare.svelte";
18
  import IconDelete from "../Icons/IconDelete.svelte";
@@ -20,93 +20,57 @@
20
  import IconThrashcan from "../Icons/IconThrashcan.svelte";
21
  import PlaygroundConversation from "./InferencePlaygroundConversation.svelte";
22
  import PlaygroundConversationHeader from "./InferencePlaygroundConversationHeader.svelte";
23
- import GenerationConfig, { defaultSystemMessage } from "./InferencePlaygroundGenerationConfig.svelte";
24
  import HFTokenModal from "./InferencePlaygroundHFTokenModal.svelte";
25
  import ModelSelector from "./InferencePlaygroundModelSelector.svelte";
26
  import ModelSelectorModal from "./InferencePlaygroundModelSelectorModal.svelte";
27
-
28
- export let models: ModelEntryWithTokenizer[];
29
 
30
  const startMessageUser: ConversationMessage = { role: "user", content: "" };
31
- const modelIdsFromQueryParam = $page.url.searchParams.get("modelId")?.split(",");
32
- const modelsFromQueryParam = modelIdsFromQueryParam?.map(id => models.find(model => model.id === id));
33
- const systemMessage: ConversationMessage = {
34
- role: "system",
35
- content: modelIdsFromQueryParam ? (defaultSystemMessage?.[modelIdsFromQueryParam[0]] ?? "") : "",
36
- };
37
-
38
- let session: Session = {
39
- conversations: [
40
- {
41
- model: models.find(m => FEATURED_MODELS_IDS.includes(m.id)) ?? models[0],
42
- config: { ...defaultGenerationConfig },
43
- messages: [{ ...startMessageUser }],
44
- systemMessage,
45
- streaming: true,
46
- },
47
- ],
48
- };
49
-
50
- if (modelsFromQueryParam?.length) {
51
- const conversations = modelsFromQueryParam.map(model => {
52
- return {
53
- model,
54
- config: { ...defaultGenerationConfig },
55
- messages: [{ ...startMessageUser }],
56
- systemMessage,
57
- streaming: true,
58
- };
59
- }) as [Conversation] | [Conversation, Conversation];
60
- session.conversations = conversations;
61
- session = session;
62
- }
63
 
64
- let hfToken = "";
65
  let viewCode = false;
66
  let viewSettings = false;
67
- let showTokenModal = false;
68
  let loading = false;
69
  let abortControllers: AbortController[] = [];
70
  let waitForNonStreaming = true;
71
- let storeLocallyHfToken = true;
72
  let selectCompareModelOpen = false;
73
 
74
  interface GenerationStatistics {
75
  latency: number;
76
  generatedTokensCount: number;
77
  }
78
- let generationStats = session.conversations.map(_ => ({ latency: 0, generatedTokensCount: 0 })) as
79
  | [GenerationStatistics]
80
  | [GenerationStatistics, GenerationStatistics];
81
 
82
- const hfTokenLocalStorageKey = "hf_token";
83
-
84
- $: systemPromptSupported = session.conversations.some(conversation => isSystemPromptSupported(conversation.model));
85
- $: compareActive = session.conversations.length === 2;
86
 
87
  function addMessage(conversationIdx: number) {
88
- const conversation = session.conversations[conversationIdx];
 
 
89
  conversation.messages = [
90
- ...conversation.messages,
91
  {
92
- role: conversation.messages.at(-1)?.role === "user" ? "assistant" : "user",
93
  content: "",
94
  },
95
  ];
96
- session = session;
97
  }
98
 
99
  function deleteMessage(conversationIdx: number, idx: number) {
100
- session.conversations[conversationIdx].messages.splice(idx, 1)[0];
101
- session = session;
102
  }
103
 
104
  function reset() {
105
- session.conversations.map(conversation => {
106
  conversation.systemMessage.content = "";
107
  conversation.messages = [{ ...startMessageUser }];
108
  });
109
- session = session;
110
  }
111
 
112
  function abort() {
@@ -120,15 +84,9 @@
120
  waitForNonStreaming = false;
121
  }
122
 
123
- function resetToken() {
124
- hfToken = "";
125
- localStorage.removeItem(hfTokenLocalStorageKey);
126
- showTokenModal = true;
127
- }
128
-
129
  async function runInference(conversation: Conversation, conversationIdx: number) {
130
  const startTime = performance.now();
131
- const hf = createHfInference(hfToken);
132
 
133
  if (conversation.streaming) {
134
  let addStreamingMessage = true;
@@ -146,8 +104,9 @@
146
  conversation.messages = [...conversation.messages, streamingMessage];
147
  addStreamingMessage = false;
148
  }
149
- session = session;
150
- generationStats[conversationIdx].generatedTokensCount += 1;
 
151
  }
152
  },
153
  abortController
@@ -161,24 +120,26 @@
161
  // check if the user did not abort the request
162
  if (waitForNonStreaming) {
163
  conversation.messages = [...conversation.messages, newMessage];
164
- generationStats[conversationIdx].generatedTokensCount += newTokensCount;
 
165
  }
166
  }
167
 
168
  const endTime = performance.now();
169
- generationStats[conversationIdx].latency = Math.round(endTime - startTime);
 
170
  }
171
 
172
  async function submit() {
173
- if (!hfToken) {
174
- showTokenModal = true;
175
  return;
176
  }
177
 
178
- for (const [idx, conversation] of session.conversations.entries()) {
179
  if (conversation.messages.at(-1)?.role === "assistant") {
180
  let prefix = "";
181
- if (session.conversations.length === 2) {
182
  prefix = `Error on ${idx === 0 ? "left" : "right"} conversation. `;
183
  }
184
  return alert(`${prefix}Messages must alternate between user/assistant roles.`);
@@ -189,21 +150,19 @@
189
  loading = true;
190
 
191
  try {
192
- const promises = session.conversations.map((conversation, idx) => runInference(conversation, idx));
193
  await Promise.all(promises);
194
  } catch (error) {
195
- for (const conversation of session.conversations) {
196
  if (conversation.messages.at(-1)?.role === "assistant" && !conversation.messages.at(-1)?.content?.trim()) {
197
  conversation.messages.pop();
198
  conversation.messages = [...conversation.messages];
199
  }
200
- session = session;
201
  }
202
  if (error instanceof Error) {
203
  if (error.message.includes("token seems invalid")) {
204
- hfToken = "";
205
- localStorage.removeItem(hfTokenLocalStorageKey);
206
- showTokenModal = true;
207
  }
208
  if (error.name !== "AbortError") {
209
  alert("error: " + error.message);
@@ -229,63 +188,30 @@
229
  const submittedHfToken = (formData.get("hf-token") as string).trim() ?? "";
230
  const RE_HF_TOKEN = /\bhf_[a-zA-Z0-9]{34}\b/;
231
  if (RE_HF_TOKEN.test(submittedHfToken)) {
232
- hfToken = submittedHfToken;
233
- if (storeLocallyHfToken) {
234
- localStorage.setItem(hfTokenLocalStorageKey, JSON.stringify(hfToken));
235
- }
236
  submit();
237
- showTokenModal = false;
238
  } else {
239
  alert("Please provide a valid HF token.");
240
  }
241
  }
242
 
243
- function addCompareModel(modelId: ModelEntryWithTokenizer["id"]) {
244
- const model = models.find(m => m.id === modelId);
245
- if (!model || session.conversations.length === 2) {
246
  return;
247
  }
248
- const newConversation = { ...JSON.parse(JSON.stringify(session.conversations[0])), model };
249
- session.conversations = [...session.conversations, newConversation];
250
  generationStats = [generationStats[0], { latency: 0, generatedTokensCount: 0 }];
251
-
252
- // update query param
253
- const url = new URL($page.url);
254
- const queryParamValue = `${session.conversations[0].model.id},${modelId}`;
255
- url.searchParams.set("modelId", queryParamValue);
256
-
257
- const parentOrigin = "https://huggingface.co";
258
- window.parent.postMessage({ queryString: `modelId=${queryParamValue}` }, parentOrigin);
259
- goto(url.toString(), { replaceState: true });
260
  }
261
 
262
  function removeCompareModal(conversationIdx: number) {
263
- session.conversations.splice(conversationIdx, 1)[0];
264
- session = session;
265
  generationStats.splice(conversationIdx, 1)[0];
266
  generationStats = generationStats;
267
-
268
- // update query param
269
- const url = new URL($page.url);
270
- const queryParamValue = url.searchParams.get("modelId");
271
- if (queryParamValue) {
272
- const modelIds = queryParamValue.split(",") as [string, string];
273
- const newQueryParamValue = conversationIdx === 1 ? modelIds[0] : modelIds[1];
274
- url.searchParams.set("modelId", newQueryParamValue);
275
-
276
- const parentOrigin = "https://huggingface.co";
277
- window.parent.postMessage({ queryString: `modelId=${newQueryParamValue}` }, parentOrigin);
278
- goto(url.toString(), { replaceState: true });
279
- }
280
  }
281
 
282
- onMount(() => {
283
- const storedHfToken = localStorage.getItem(hfTokenLocalStorageKey);
284
- if (storedHfToken !== null) {
285
- hfToken = JSON.parse(storedHfToken);
286
- }
287
- });
288
-
289
  onDestroy(() => {
290
  for (const abortController of abortControllers) {
291
  abortController.abort();
@@ -293,8 +219,12 @@
293
  });
294
  </script>
295
 
296
- {#if showTokenModal}
297
- <HFTokenModal bind:storeLocallyHfToken on:close={() => (showTokenModal = false)} on:submit={handleTokenSubmit} />
 
 
 
 
298
  {/if}
299
 
300
  <!-- svelte-ignore a11y-no-static-element-interactions -->
@@ -316,12 +246,12 @@
316
  placeholder={systemPromptSupported
317
  ? "Enter a custom prompt"
318
  : "System prompt is not supported with the chosen model."}
319
- value={systemPromptSupported ? session.conversations[0].systemMessage.content : ""}
320
  on:input={e => {
321
- for (const conversation of session.conversations) {
322
  conversation.systemMessage.content = e.currentTarget.value;
323
  }
324
- session = session;
325
  }}
326
  class="absolute inset-x-0 bottom-0 h-full resize-none bg-transparent px-3 pt-10 text-sm outline-hidden"
327
  ></textarea>
@@ -331,11 +261,10 @@
331
  <div
332
  class="flex h-[calc(100dvh-5rem-120px)] divide-x divide-gray-200 overflow-x-auto overflow-y-hidden *:w-full max-sm:w-dvw md:h-[calc(100dvh-5rem)] md:pt-3 dark:divide-gray-800"
333
  >
334
- {#each session.conversations as conversation, conversationIdx}
335
  <div class="max-sm:min-w-full">
336
  {#if compareActive}
337
  <PlaygroundConversationHeader
338
- {models}
339
  {conversationIdx}
340
  bind:conversation
341
  on:close={() => removeCompareModal(conversationIdx)}
@@ -345,7 +274,6 @@
345
  {loading}
346
  {conversation}
347
  {viewCode}
348
- {hfToken}
349
  {compareActive}
350
  on:addMessage={() => addMessage(conversationIdx)}
351
  on:deleteMessage={e => deleteMessage(conversationIdx, e.detail)}
@@ -403,7 +331,7 @@
403
  {#if loading}
404
  <div class="flex flex-none items-center gap-[3px]">
405
  <span class="mr-2">
406
- {#if session.conversations[0].streaming || session.conversations[1]?.streaming}
407
  Stop
408
  {:else}
409
  Cancel
@@ -425,7 +353,7 @@
425
  {:else}
426
  Run <span
427
  class="inline-flex gap-0.5 rounded-sm border border-white/20 bg-white/10 px-0.5 text-xs text-white/70"
428
- >⌘<span class="translate-y-px">↵</span></span
429
  >
430
  {/if}
431
  </button>
@@ -438,7 +366,7 @@
438
  class="flex flex-1 flex-col gap-6 overflow-y-hidden rounded-xl border border-gray-200/80 bg-white bg-linear-to-b from-white via-white p-3 shadow-xs dark:border-white/5 dark:bg-gray-900 dark:from-gray-800/40 dark:via-gray-800/40"
439
  >
440
  <div class="flex flex-col gap-2">
441
- <ModelSelector {models} bind:conversation={session.conversations[0]} />
442
  <div class="flex items-center gap-2 self-end px-2 text-xs whitespace-nowrap">
443
  <button
444
  class="flex items-center gap-0.5 text-gray-500 hover:text-gray-700 dark:text-gray-400 dark:hover:text-gray-300"
@@ -448,22 +376,21 @@
448
  Compare
449
  </button>
450
  <a
451
- href="https://huggingface.co/{session.conversations[0].model.id}"
 
452
  target="_blank"
453
  class="flex items-center gap-0.5 text-gray-500 hover:text-gray-700 dark:text-gray-400 dark:hover:text-gray-300"
454
  >
455
- <svg xmlns="http://www.w3.org/2000/svg" width="1em" height="1em" viewBox="0 0 32 32"
456
- ><path fill="currentColor" d="M10 6v2h12.59L6 24.59L7.41 26L24 9.41V22h2V6H10z" /></svg
457
- >
458
  Model page
459
  </a>
460
  </div>
461
  </div>
462
 
463
- <GenerationConfig bind:conversation={session.conversations[0]} />
464
- {#if hfToken}
465
  <button
466
- on:click={resetToken}
467
  class="mt-auto flex items-center gap-1 self-end text-sm text-gray-500 underline decoration-gray-300 hover:text-gray-800 dark:text-gray-400 dark:decoration-gray-600 dark:hover:text-gray-200"
468
  ><svg xmlns="http://www.w3.org/2000/svg" class="text-xs" width="1em" height="1em" viewBox="0 0 32 32"
469
  ><path
@@ -517,8 +444,7 @@
517
 
518
  {#if selectCompareModelOpen}
519
  <ModelSelectorModal
520
- {models}
521
- conversation={session.conversations[0]}
522
  on:modelSelected={e => addCompareModel(e.detail)}
523
  on:close={() => (selectCompareModelOpen = false)}
524
  />
 
1
  <script lang="ts">
2
+ import type { Conversation, ConversationMessage, ModelWithTokenizer } from "$lib/types";
3
 
 
 
4
  import {
 
 
5
  handleNonStreamingResponse,
6
  handleStreamingResponse,
7
  isSystemPromptSupported,
8
  } from "./inferencePlaygroundUtils";
9
 
10
+ import { models } from "$lib/stores/models";
11
+ import { session } from "$lib/stores/session";
12
+ import { token } from "$lib/stores/token";
13
+ import { isMac } from "$lib/utils/platform";
14
+ import { HfInference } from "@huggingface/inference";
15
+ import { onDestroy } from "svelte";
16
  import IconCode from "../Icons/IconCode.svelte";
17
  import IconCompare from "../Icons/IconCompare.svelte";
18
  import IconDelete from "../Icons/IconDelete.svelte";
 
20
  import IconThrashcan from "../Icons/IconThrashcan.svelte";
21
  import PlaygroundConversation from "./InferencePlaygroundConversation.svelte";
22
  import PlaygroundConversationHeader from "./InferencePlaygroundConversationHeader.svelte";
23
+ import GenerationConfig from "./InferencePlaygroundGenerationConfig.svelte";
24
  import HFTokenModal from "./InferencePlaygroundHFTokenModal.svelte";
25
  import ModelSelector from "./InferencePlaygroundModelSelector.svelte";
26
  import ModelSelectorModal from "./InferencePlaygroundModelSelectorModal.svelte";
27
+ import IconExternal from "../Icons/IconExternal.svelte";
 
28
 
29
  const startMessageUser: ConversationMessage = { role: "user", content: "" };
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
30
 
 
31
  let viewCode = false;
32
  let viewSettings = false;
 
33
  let loading = false;
34
  let abortControllers: AbortController[] = [];
35
  let waitForNonStreaming = true;
 
36
  let selectCompareModelOpen = false;
37
 
38
  interface GenerationStatistics {
39
  latency: number;
40
  generatedTokensCount: number;
41
  }
42
+ let generationStats = $session.conversations.map(_ => ({ latency: 0, generatedTokensCount: 0 })) as
43
  | [GenerationStatistics]
44
  | [GenerationStatistics, GenerationStatistics];
45
 
46
+ $: systemPromptSupported = $session.conversations.some(conversation => isSystemPromptSupported(conversation.model));
47
+ $: compareActive = $session.conversations.length === 2;
 
 
48
 
49
  function addMessage(conversationIdx: number) {
50
+ const conversation = $session.conversations[conversationIdx];
51
+ if (!conversation) return;
52
+ const msgs = conversation.messages.slice();
53
  conversation.messages = [
54
+ ...msgs,
55
  {
56
+ role: msgs.at(-1)?.role === "user" ? "assistant" : "user",
57
  content: "",
58
  },
59
  ];
60
+ $session = $session;
61
  }
62
 
63
  function deleteMessage(conversationIdx: number, idx: number) {
64
+ $session.conversations[conversationIdx]?.messages.splice(idx, 1)[0];
65
+ $session = $session;
66
  }
67
 
68
  function reset() {
69
+ $session.conversations.map(conversation => {
70
  conversation.systemMessage.content = "";
71
  conversation.messages = [{ ...startMessageUser }];
72
  });
73
+ // session = session;
74
  }
75
 
76
  function abort() {
 
84
  waitForNonStreaming = false;
85
  }
86
 
 
 
 
 
 
 
87
  async function runInference(conversation: Conversation, conversationIdx: number) {
88
  const startTime = performance.now();
89
+ const hf = new HfInference($token.value);
90
 
91
  if (conversation.streaming) {
92
  let addStreamingMessage = true;
 
104
  conversation.messages = [...conversation.messages, streamingMessage];
105
  addStreamingMessage = false;
106
  }
107
+ $session = $session;
108
+ const c = generationStats[conversationIdx];
109
+ if (c) c.generatedTokensCount += 1;
110
  }
111
  },
112
  abortController
 
120
  // check if the user did not abort the request
121
  if (waitForNonStreaming) {
122
  conversation.messages = [...conversation.messages, newMessage];
123
+ const c = generationStats[conversationIdx];
124
+ if (c) c.generatedTokensCount += newTokensCount;
125
  }
126
  }
127
 
128
  const endTime = performance.now();
129
+ const c = generationStats[conversationIdx];
130
+ if (c) c.latency = Math.round(endTime - startTime);
131
  }
132
 
133
  async function submit() {
134
+ if (!$token.value) {
135
+ $token.showModal = true;
136
  return;
137
  }
138
 
139
+ for (const [idx, conversation] of $session.conversations.entries()) {
140
  if (conversation.messages.at(-1)?.role === "assistant") {
141
  let prefix = "";
142
+ if ($session.conversations.length === 2) {
143
  prefix = `Error on ${idx === 0 ? "left" : "right"} conversation. `;
144
  }
145
  return alert(`${prefix}Messages must alternate between user/assistant roles.`);
 
150
  loading = true;
151
 
152
  try {
153
+ const promises = $session.conversations.map((conversation, idx) => runInference(conversation, idx));
154
  await Promise.all(promises);
155
  } catch (error) {
156
+ for (const conversation of $session.conversations) {
157
  if (conversation.messages.at(-1)?.role === "assistant" && !conversation.messages.at(-1)?.content?.trim()) {
158
  conversation.messages.pop();
159
  conversation.messages = [...conversation.messages];
160
  }
161
+ $session = $session;
162
  }
163
  if (error instanceof Error) {
164
  if (error.message.includes("token seems invalid")) {
165
+ token.reset();
 
 
166
  }
167
  if (error.name !== "AbortError") {
168
  alert("error: " + error.message);
 
188
  const submittedHfToken = (formData.get("hf-token") as string).trim() ?? "";
189
  const RE_HF_TOKEN = /\bhf_[a-zA-Z0-9]{34}\b/;
190
  if (RE_HF_TOKEN.test(submittedHfToken)) {
191
+ token.setValue(submittedHfToken);
 
 
 
192
  submit();
 
193
  } else {
194
  alert("Please provide a valid HF token.");
195
  }
196
  }
197
 
198
+ function addCompareModel(modelId: ModelWithTokenizer["id"]) {
199
+ const model = $models.find(m => m.id === modelId);
200
+ if (!model || $session.conversations.length === 2) {
201
  return;
202
  }
203
+ const newConversation = { ...JSON.parse(JSON.stringify($session.conversations[0])), model };
204
+ $session.conversations = [...$session.conversations, newConversation];
205
  generationStats = [generationStats[0], { latency: 0, generatedTokensCount: 0 }];
 
 
 
 
 
 
 
 
 
206
  }
207
 
208
  function removeCompareModal(conversationIdx: number) {
209
+ $session.conversations.splice(conversationIdx, 1)[0];
210
+ $session = $session;
211
  generationStats.splice(conversationIdx, 1)[0];
212
  generationStats = generationStats;
 
 
 
 
 
 
 
 
 
 
 
 
 
213
  }
214
 
 
 
 
 
 
 
 
215
  onDestroy(() => {
216
  for (const abortController of abortControllers) {
217
  abortController.abort();
 
219
  });
220
  </script>
221
 
222
+ {#if $token.showModal}
223
+ <HFTokenModal
224
+ bind:storeLocallyHfToken={$token.writeToLocalStorage}
225
+ on:close={() => ($token.showModal = false)}
226
+ on:submit={handleTokenSubmit}
227
+ />
228
  {/if}
229
 
230
  <!-- svelte-ignore a11y-no-static-element-interactions -->
 
246
  placeholder={systemPromptSupported
247
  ? "Enter a custom prompt"
248
  : "System prompt is not supported with the chosen model."}
249
+ value={systemPromptSupported ? $session.conversations[0].systemMessage.content : ""}
250
  on:input={e => {
251
+ for (const conversation of $session.conversations) {
252
  conversation.systemMessage.content = e.currentTarget.value;
253
  }
254
+ $session = $session;
255
  }}
256
  class="absolute inset-x-0 bottom-0 h-full resize-none bg-transparent px-3 pt-10 text-sm outline-hidden"
257
  ></textarea>
 
261
  <div
262
  class="flex h-[calc(100dvh-5rem-120px)] divide-x divide-gray-200 overflow-x-auto overflow-y-hidden *:w-full max-sm:w-dvw md:h-[calc(100dvh-5rem)] md:pt-3 dark:divide-gray-800"
263
  >
264
+ {#each $session.conversations as conversation, conversationIdx}
265
  <div class="max-sm:min-w-full">
266
  {#if compareActive}
267
  <PlaygroundConversationHeader
 
268
  {conversationIdx}
269
  bind:conversation
270
  on:close={() => removeCompareModal(conversationIdx)}
 
274
  {loading}
275
  {conversation}
276
  {viewCode}
 
277
  {compareActive}
278
  on:addMessage={() => addMessage(conversationIdx)}
279
  on:deleteMessage={e => deleteMessage(conversationIdx, e.detail)}
 
331
  {#if loading}
332
  <div class="flex flex-none items-center gap-[3px]">
333
  <span class="mr-2">
334
+ {#if $session.conversations[0].streaming || $session.conversations[1]?.streaming}
335
  Stop
336
  {:else}
337
  Cancel
 
353
  {:else}
354
  Run <span
355
  class="inline-flex gap-0.5 rounded-sm border border-white/20 bg-white/10 px-0.5 text-xs text-white/70"
356
+ >{isMac() ? "⌘" : "Ctrl"}<span class="translate-y-px">↵</span></span
357
  >
358
  {/if}
359
  </button>
 
366
  class="flex flex-1 flex-col gap-6 overflow-y-hidden rounded-xl border border-gray-200/80 bg-white bg-linear-to-b from-white via-white p-3 shadow-xs dark:border-white/5 dark:bg-gray-900 dark:from-gray-800/40 dark:via-gray-800/40"
367
  >
368
  <div class="flex flex-col gap-2">
369
+ <ModelSelector bind:conversation={$session.conversations[0]} />
370
  <div class="flex items-center gap-2 self-end px-2 text-xs whitespace-nowrap">
371
  <button
372
  class="flex items-center gap-0.5 text-gray-500 hover:text-gray-700 dark:text-gray-400 dark:hover:text-gray-300"
 
376
  Compare
377
  </button>
378
  <a
379
+ href="https://huggingface.co/{$session.conversations[0].model.id}?inference_provider={$session
380
+ .conversations[0].provider}"
381
  target="_blank"
382
  class="flex items-center gap-0.5 text-gray-500 hover:text-gray-700 dark:text-gray-400 dark:hover:text-gray-300"
383
  >
384
+ <IconExternal />
 
 
385
  Model page
386
  </a>
387
  </div>
388
  </div>
389
 
390
+ <GenerationConfig bind:conversation={$session.conversations[0]} />
391
+ {#if $token.value}
392
  <button
393
+ on:click={token.reset}
394
  class="mt-auto flex items-center gap-1 self-end text-sm text-gray-500 underline decoration-gray-300 hover:text-gray-800 dark:text-gray-400 dark:decoration-gray-600 dark:hover:text-gray-200"
395
  ><svg xmlns="http://www.w3.org/2000/svg" class="text-xs" width="1em" height="1em" viewBox="0 0 32 32"
396
  ><path
 
444
 
445
  {#if selectCompareModelOpen}
446
  <ModelSelectorModal
447
+ conversation={$session.conversations[0]}
 
448
  on:modelSelected={e => addCompareModel(e.detail)}
449
  on:close={() => (selectCompareModelOpen = false)}
450
  />
src/lib/components/InferencePlayground/InferencePlaygroundCodeSnippets.svelte CHANGED
@@ -1,442 +1,141 @@
1
  <script lang="ts">
2
- import type { Conversation } from "./types";
3
 
4
- import { createEventDispatcher, onDestroy } from "svelte";
5
  import hljs from "highlight.js/lib/core";
 
6
  import javascript from "highlight.js/lib/languages/javascript";
7
  import python from "highlight.js/lib/languages/python";
8
- import http from "highlight.js/lib/languages/http";
9
 
 
 
 
10
  import IconCopyCode from "../Icons/IconCopyCode.svelte";
11
- import { isSystemPromptSupported } from "./inferencePlaygroundUtils";
 
 
 
 
 
12
 
13
  hljs.registerLanguage("javascript", javascript);
14
  hljs.registerLanguage("python", python);
15
  hljs.registerLanguage("http", http);
16
 
17
  export let conversation: Conversation;
18
- export let hfToken: string;
19
 
20
  const dispatch = createEventDispatcher<{ closeCode: void }>();
21
 
22
- const lanuages = ["javascript", "python", "http"];
23
- type Language = (typeof lanuages)[number];
24
- const labelsByLanguage: Record<Language, string> = {
25
  javascript: "JavaScript",
26
  python: "Python",
27
- http: "Curl",
28
- };
29
-
30
- interface Snippet {
31
- label: string;
32
- code: string;
33
- language?: Language;
34
- needsToken?: boolean;
35
- }
36
-
37
- interface ClientSnippet {
38
- name: string;
39
- snippets: Snippet[];
40
- }
41
-
42
- interface MessagesJoiner {
43
- sep: string;
44
- start: string;
45
- end: string;
46
- }
47
 
48
- let selectedLanguage: Language = "javascript";
49
- let timeout: ReturnType<typeof setTimeout>;
50
  let showToken = false;
51
 
52
  $: tokenStr = getTokenStr(showToken);
53
 
54
- $: clientSnippetsByLang = {
55
- javascript: [
56
- { name: "@huggingface/inference", snippets: getJavascriptSnippetsHfClient(conversation, tokenStr) },
57
- { name: "openai", snippets: getJavascriptSnippetsOAIClient(conversation, tokenStr) },
58
- ],
59
- python: [
60
- { name: "huggingface_hub", snippets: getPythonSnippetsHfClient(conversation, tokenStr) },
61
- { name: "openai", snippets: getPythonSnippetsOAIClient(conversation, tokenStr) },
62
- ],
63
- http: [{ name: "curl", snippets: getHttpSnippets(conversation, tokenStr) }],
64
- } as Record<Language, ClientSnippet[]>;
65
-
66
- const selectedClientIdxByLang: Record<Language, number> = Object.fromEntries(lanuages.map(lang => [lang, 0]));
67
-
68
- function getTokenStr(showToken: boolean) {
69
- if (hfToken && showToken) {
70
- return hfToken;
71
- }
72
- return "YOUR_HF_TOKEN";
73
- }
74
-
75
- function getMessages() {
76
- const placeholder = [{ role: "user", content: "Tell me a story" }];
77
-
78
- let messages = [...conversation.messages];
79
- if (messages.length === 1 && messages[0].role === "user" && !messages[0].content) {
80
- messages = placeholder;
81
- }
82
-
83
- const { model, systemMessage } = conversation;
84
- if (isSystemPromptSupported(model) && systemMessage.content?.length) {
85
- messages.unshift(systemMessage);
86
- }
87
-
88
- const res = messages.map(({ role, content }) => ({
89
- role,
90
- content: JSON.stringify(content).slice(1, -1),
91
- }));
92
- messages = res;
93
-
94
- return res;
95
- }
96
-
97
- function highlight(code: string, language: Language) {
98
- return hljs.highlight(code, { language }).value;
99
- }
100
-
101
- function getJavascriptSnippetsHfClient(conversation: Conversation, tokenStr: string) {
102
- const formattedMessages = ({ sep, start, end }: MessagesJoiner) =>
103
- start +
104
- getMessages()
105
- .map(({ role, content }) => `{ role: "${role}", content: "${content}" }`)
106
- .join(sep) +
107
- end;
108
-
109
- const formattedConfig = ({ sep, start, end }: MessagesJoiner) =>
110
- start +
111
- Object.entries(conversation.config)
112
- .map(([key, val]) => `${key}: ${val}`)
113
- .join(sep) +
114
- end;
115
-
116
- const snippets: Snippet[] = [];
117
- snippets.push({
118
- label: "Install @huggingface/inference",
119
- language: "http",
120
- code: `npm install --save @huggingface/inference`,
121
- });
122
- if (conversation.streaming) {
123
- snippets.push({
124
- label: "Streaming API",
125
- needsToken: true,
126
- code: `import { HfInference } from "@huggingface/inference"
127
-
128
- const client = new HfInference("${tokenStr}")
129
-
130
- let out = "";
131
-
132
- const stream = client.chatCompletionStream({
133
- model: "${conversation.model.id}",
134
- messages: ${formattedMessages({ sep: ",\n\t\t", start: "[\n\t\t", end: "\n\t]" })},
135
- ${formattedConfig({ sep: ",\n\t", start: "", end: "" })}
136
- });
137
-
138
- for await (const chunk of stream) {
139
- if (chunk.choices && chunk.choices.length > 0) {
140
- const newContent = chunk.choices[0].delta.content;
141
- out += newContent;
142
- console.log(newContent);
143
- }
144
- }`,
145
- });
146
- } else {
147
- // non-streaming
148
- snippets.push({
149
- label: "Non-Streaming API",
150
- needsToken: true,
151
- code: `import { HfInference } from '@huggingface/inference'
152
-
153
- const client = new HfInference("${tokenStr}")
154
-
155
- const chatCompletion = await client.chatCompletion({
156
- model: "${conversation.model.id}",
157
- messages: ${formattedMessages({ sep: ",\n\t\t", start: "[\n\t\t", end: "\n\t]" })},
158
- ${formattedConfig({ sep: ",\n\t", start: "", end: "" })}
159
- });
160
-
161
- console.log(chatCompletion.choices[0].message);`,
162
- });
163
- }
164
-
165
- return snippets;
166
- }
167
-
168
- function getJavascriptSnippetsOAIClient(conversation: Conversation, tokenStr: string) {
169
- const formattedMessages = ({ sep, start, end }: MessagesJoiner) =>
170
- start +
171
- getMessages()
172
- .map(({ role, content }) => `{ role: "${role}", content: "${content}" }`)
173
- .join(sep) +
174
- end;
175
-
176
- const formattedConfig = ({ sep, start, end }: MessagesJoiner) =>
177
- start +
178
- Object.entries(conversation.config)
179
- .map(([key, val]) => `${key}: ${val}`)
180
- .join(sep) +
181
- end;
182
-
183
- const snippets: Snippet[] = [];
184
- snippets.push({
185
- label: "Install openai",
186
- language: "http",
187
- code: `npm install --save openai`,
188
  });
189
- if (conversation.streaming) {
190
- snippets.push({
191
- label: "Streaming API",
192
- needsToken: true,
193
- code: `import { OpenAI } from "openai"
194
-
195
- const client = new OpenAI({
196
- baseURL: "https://api-inference.huggingface.co/v1/",
197
- apiKey: "${tokenStr}"
198
- })
199
-
200
- let out = "";
201
-
202
- const stream = await client.chat.completions.create({
203
- model: "${conversation.model.id}",
204
- messages: ${formattedMessages({ sep: ",\n\t\t", start: "[\n\t\t", end: "\n\t]" })},
205
- ${formattedConfig({ sep: ",\n\t", start: "", end: "" })},
206
- stream: true,
207
- });
208
-
209
- for await (const chunk of stream) {
210
- if (chunk.choices && chunk.choices.length > 0) {
211
- const newContent = chunk.choices[0].delta.content;
212
- out += newContent;
213
- console.log(newContent);
214
  }
215
- }`,
216
- });
217
- } else {
218
- // non-streaming
219
- snippets.push({
220
- label: "Non-Streaming API",
221
- needsToken: true,
222
- code: `import { OpenAI } from "openai"
223
 
224
- const client = new OpenAI({
225
- baseURL: "https://api-inference.huggingface.co/v1/",
226
- apiKey: "${tokenStr}"
227
- })
228
-
229
- const chatCompletion = await client.chat.completions.create({
230
- model: "${conversation.model.id}",
231
- messages: ${formattedMessages({ sep: ",\n\t\t", start: "[\n\t\t", end: "\n\t]" })},
232
- ${formattedConfig({ sep: ",\n\t", start: "", end: "" })}
233
- });
234
-
235
- console.log(chatCompletion.choices[0].message);`,
236
- });
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
237
  }
 
238
 
239
- return snippets;
240
- }
241
-
242
- function getPythonSnippetsHfClient(conversation: Conversation, tokenStr: string) {
243
- const formattedMessages = ({ sep, start, end }: MessagesJoiner) =>
244
- start +
245
- getMessages()
246
- .map(({ role, content }) => `{ "role": "${role}", "content": "${content}" }`)
247
- .join(sep) +
248
- end;
249
-
250
- const formattedConfig = ({ sep, start, end, connector }: MessagesJoiner & { connector: string }) =>
251
- start +
252
- Object.entries(conversation.config)
253
- .map(([key, val]) => `${key}${connector}${val}`)
254
- .join(sep) +
255
- end;
256
-
257
- const snippets: Snippet[] = [];
258
- snippets.push({
259
- label: "Install the latest huggingface_hub",
260
- language: "http",
261
- code: `pip install huggingface_hub --upgrade`,
262
- });
263
- if (conversation.streaming) {
264
- snippets.push({
265
- label: "Streaming API",
266
- needsToken: true,
267
- code: `from huggingface_hub import InferenceClient
268
-
269
- client = InferenceClient(api_key="${tokenStr}")
270
-
271
- messages = ${formattedMessages({ sep: ",\n\t", start: `[\n\t`, end: `\n]` })}
272
-
273
- stream = client.chat.completions.create(
274
- model="${conversation.model.id}",
275
- messages=messages,
276
- ${formattedConfig({ sep: ",\n\t", start: "", end: "", connector: "=" })},
277
- stream=True
278
- )
279
-
280
- for chunk in stream:
281
- print(chunk.choices[0].delta.content)`,
282
- });
283
- } else {
284
- // non-streaming
285
- snippets.push({
286
- label: "Non-Streaming API",
287
- needsToken: true,
288
- code: `from huggingface_hub import InferenceClient
289
-
290
- client = InferenceClient(api_key="${tokenStr}")
291
-
292
- messages = ${formattedMessages({ sep: ",\n\t", start: `[\n\t`, end: `\n]` })}
293
-
294
- completion = client.chat.completions.create(
295
- model="${conversation.model.id}",
296
- messages=messages,
297
- ${formattedConfig({ sep: ",\n\t", start: "", end: "", connector: "=" })}
298
- )
299
-
300
- print(completion.choices[0].message)`,
301
- });
302
  }
303
-
304
- return snippets;
305
  }
306
 
307
- function getPythonSnippetsOAIClient(conversation: Conversation, tokenStr: string) {
308
- const formattedMessages = ({ sep, start, end }: MessagesJoiner) =>
309
- start +
310
- getMessages()
311
- .map(({ role, content }) => `{ "role": "${role}", "content": "${content}" }`)
312
- .join(sep) +
313
- end;
314
-
315
- const formattedConfig = ({ sep, start, end, connector }: MessagesJoiner & { connector: string }) =>
316
- start +
317
- Object.entries(conversation.config)
318
- .map(([key, val]) => `${key}${connector}${val}`)
319
- .join(sep) +
320
- end;
321
-
322
- const snippets: Snippet[] = [];
323
- snippets.push({
324
- label: "Install the latest openai",
325
- language: "http",
326
- code: `pip install openai --upgrade`,
327
- });
328
- if (conversation.streaming) {
329
- snippets.push({
330
- label: "Streaming API",
331
- needsToken: true,
332
- code: `from openai import OpenAI
333
-
334
- client = OpenAI(
335
- base_url="https://api-inference.huggingface.co/v1/",
336
- api_key="${tokenStr}"
337
- )
338
-
339
- messages = ${formattedMessages({ sep: ",\n\t", start: `[\n\t`, end: `\n]` })}
340
-
341
- stream = client.chat.completions.create(
342
- model="${conversation.model.id}",
343
- messages=messages,
344
- ${formattedConfig({ sep: ",\n\t", start: "", end: "", connector: "=" })},
345
- stream=True
346
- )
347
-
348
- for chunk in stream:
349
- print(chunk.choices[0].delta.content)`,
350
- });
351
- } else {
352
- // non-streaming
353
- snippets.push({
354
- label: "Non-Streaming API",
355
- needsToken: true,
356
- code: `from openai import OpenAI
357
-
358
- client = OpenAI(
359
- base_url="https://api-inference.huggingface.co/v1/",
360
- api_key="${tokenStr}"
361
- )
362
-
363
- messages = ${formattedMessages({ sep: ",\n\t", start: `[\n\t`, end: `\n]` })}
364
-
365
- completion = client.chat.completions.create(
366
- model="${conversation.model.id}",
367
- messages=messages,
368
- ${formattedConfig({ sep: ",\n\t", start: "", end: "", connector: "=" })}
369
- )
370
-
371
- print(completion.choices[0].message)`,
372
- });
373
- }
374
-
375
- return snippets;
376
  }
377
 
378
- function getHttpSnippets(conversation: Conversation, tokenStr: string) {
379
- if (tokenStr === "YOUR_HF_TOKEN") {
380
- tokenStr = "{YOUR_HF_TOKEN}";
381
- }
382
- const formattedMessages = ({ sep, start, end }: MessagesJoiner) =>
383
- start +
384
- getMessages()
385
- .map(({ role, content }) => {
386
- // escape single quotes since single quotes is used to define http post body inside curl requests
387
- content = content?.replace(/'/g, "'\\''");
388
- return `{ "role": "${role}", "content": "${content}" }`;
389
- })
390
- .join(sep) +
391
- end;
392
-
393
- const formattedConfig = ({ sep, start, end }: MessagesJoiner) =>
394
- start +
395
- Object.entries(conversation.config)
396
- .map(([key, val]) => `"${key}": ${val}`)
397
- .join(sep) +
398
- end;
399
-
400
- const snippets: Snippet[] = [];
401
 
402
- if (conversation.streaming) {
403
- snippets.push({
404
- label: "Streaming API",
405
- needsToken: true,
406
- code: `curl 'https://api-inference.huggingface.co/models/${conversation.model.id}/v1/chat/completions' \\
407
- --header "Authorization: Bearer ${tokenStr}" \\
408
- --header 'Content-Type: application/json' \\
409
- --data '{
410
- "model": "${conversation.model.id}",
411
- "messages": ${formattedMessages({ sep: ",\n ", start: `[\n `, end: `\n]` })},
412
- ${formattedConfig({ sep: ",\n ", start: "", end: "" })},
413
- "stream": true
414
- }'`,
415
- });
416
- } else {
417
- // non-streaming
418
- snippets.push({
419
- label: "Non-Streaming API",
420
- needsToken: true,
421
- code: `curl 'https://api-inference.huggingface.co/models/${conversation.model.id}/v1/chat/completions' \\
422
- --header "Authorization: Bearer ${tokenStr}" \\
423
- --header 'Content-Type: application/json' \\
424
- --data '{
425
- "model": "${conversation.model.id}",
426
- "messages": ${formattedMessages({ sep: ",\n ", start: `[\n `, end: `\n]` })},
427
- ${formattedConfig({ sep: ",\n ", start: "", end: "" })}
428
- }'`,
429
- });
430
  }
431
 
432
- return snippets;
433
- }
434
-
435
- onDestroy(() => {
436
- if (timeout) {
437
  clearTimeout(timeout);
 
 
 
438
  }
439
- });
 
 
 
 
 
 
 
 
 
440
  </script>
441
 
442
  <div class="px-2 pt-2">
@@ -444,11 +143,11 @@ print(completion.choices[0].message)`,
444
  class="border-b border-gray-200 text-center text-sm font-medium text-gray-500 dark:border-gray-700 dark:text-gray-400"
445
  >
446
  <ul class="-mb-px flex flex-wrap">
447
- {#each Object.entries(labelsByLanguage) as [language, label]}
448
  <li>
449
  <button
450
- on:click={() => (selectedLanguage = language)}
451
- class="inline-block rounded-t-lg border-b-2 p-4 {language === selectedLanguage
452
  ? 'border-black text-black dark:border-blue-500 dark:text-blue-500'
453
  : 'border-transparent hover:border-gray-300 hover:text-gray-600 dark:hover:text-gray-300'}"
454
  aria-current="page">{label}</button
@@ -468,55 +167,72 @@ print(completion.choices[0].message)`,
468
  </ul>
469
  </div>
470
 
471
- {#if clientSnippetsByLang[selectedLanguage].length > 1}
472
  <div class="flex gap-x-2 px-2 pt-6">
473
- {#each clientSnippetsByLang[selectedLanguage] as { name }, idx}
 
474
  <button
475
- class="rounded-md px-1.5 py-0.5 leading-tight {idx === selectedClientIdxByLang[selectedLanguage]
476
- ? 'bg-black text-gray-100 dark:bg-gray-600 dark:text-white'
477
- : 'text-gray-500 hover:text-gray-600 dark:hover:text-gray-400'}"
478
- on:click={() => (selectedClientIdxByLang[selectedLanguage] = idx)}>{name}</button
 
479
  >
480
  {/each}
481
  </div>
482
  {/if}
483
 
484
- {#each clientSnippetsByLang[selectedLanguage] as { snippets }, idx}
485
- {#if idx === selectedClientIdxByLang[selectedLanguage]}
486
- {#each snippets as { label, code, language, needsToken }}
487
- <div class="flex items-center justify-between px-2 pt-6 pb-4">
488
- <h2 class="font-semibold">{label}</h2>
489
- <div class="flex items-center gap-x-4">
490
- {#if needsToken && hfToken}
491
- <label class="flex items-center gap-x-1.5 text-sm select-none">
492
- <input type="checkbox" bind:checked={showToken} />
493
- <p class="leading-none">With token</p>
494
- </label>
495
- {/if}
496
- <button
497
- class="flex items-center gap-x-2 rounded-md border bg-white px-1.5 py-0.5 text-sm shadow-xs transition dark:border-gray-800 dark:bg-gray-800"
498
- on:click={e => {
499
- const el = e.currentTarget;
500
- el.classList.add("text-green-500");
501
- navigator.clipboard.writeText(code);
502
- if (timeout) {
503
- clearTimeout(timeout);
504
- }
505
- timeout = setTimeout(() => {
506
- el.classList.remove("text-green-500");
507
- }, 400);
508
- }}
509
- >
510
- <IconCopyCode classNames="text-xs" /> Copy code
511
- </button>
512
- </div>
513
- </div>
514
- <pre
515
- class="overflow-x-auto rounded-lg border border-gray-200/80 bg-white px-4 py-6 text-sm shadow-xs dark:border-gray-800 dark:bg-gray-800/50">{@html highlight(
516
- code,
517
- language ?? selectedLanguage
518
- )}</pre>
519
- {/each}
520
  {/if}
521
- {/each}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
522
  </div>
 
1
  <script lang="ts">
2
+ import type { Conversation } from "$lib/types";
3
 
 
4
  import hljs from "highlight.js/lib/core";
5
+ import http from "highlight.js/lib/languages/http";
6
  import javascript from "highlight.js/lib/languages/javascript";
7
  import python from "highlight.js/lib/languages/python";
8
+ import { createEventDispatcher } from "svelte";
9
 
10
+ import { token } from "$lib/stores/token";
11
+ import { entries, fromEntries, keys } from "$lib/utils/object";
12
+ import type { InferenceProvider } from "@huggingface/inference";
13
  import IconCopyCode from "../Icons/IconCopyCode.svelte";
14
+ import IconExternal from "../Icons/IconExternal.svelte";
15
+ import {
16
+ getInferenceSnippet,
17
+ type GetInferenceSnippetReturn,
18
+ type InferenceSnippetLanguage,
19
+ } from "./inferencePlaygroundUtils";
20
 
21
  hljs.registerLanguage("javascript", javascript);
22
  hljs.registerLanguage("python", python);
23
  hljs.registerLanguage("http", http);
24
 
25
  export let conversation: Conversation;
 
26
 
27
  const dispatch = createEventDispatcher<{ closeCode: void }>();
28
 
29
+ const labelsByLanguage = {
 
 
30
  javascript: "JavaScript",
31
  python: "Python",
32
+ http: "cURL",
33
+ } as const satisfies Record<string, string>;
34
+ type Language = keyof typeof labelsByLanguage;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
35
 
36
+ let lang: Language = "javascript";
 
37
  let showToken = false;
38
 
39
  $: tokenStr = getTokenStr(showToken);
40
 
41
+ type GetSnippetArgs = {
42
+ tokenStr: string;
43
+ conversation: Conversation;
44
+ lang: InferenceSnippetLanguage;
45
+ };
46
+ function getSnippet({ tokenStr, conversation, lang }: GetSnippetArgs) {
47
+ return getInferenceSnippet(conversation.model, conversation.provider as InferenceProvider, lang, tokenStr, {
48
+ messages: conversation.messages,
49
+ streaming: conversation.streaming,
50
+ max_tokens: conversation.config.max_tokens,
51
+ temperature: conversation.config.temperature,
52
+ top_p: conversation.config.top_p,
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
53
  });
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
54
  }
 
 
 
 
 
 
 
 
55
 
56
+ $: snippetsByLang = {
57
+ javascript: getSnippet({ lang: "js", tokenStr, conversation }),
58
+ python: getSnippet({ lang: "python", tokenStr, conversation }),
59
+ http: getSnippet({ lang: "curl", tokenStr, conversation }),
60
+ } as Record<Language, GetInferenceSnippetReturn>;
61
+
62
+ // { javascript: 0, python: 0, http: 0 } at first
63
+ const selectedSnippetIdxByLang: Record<Language, number> = fromEntries(
64
+ keys(labelsByLanguage).map(lang => {
65
+ return [lang, 0];
66
+ })
67
+ );
68
+ $: selectedSnippet = snippetsByLang[lang][selectedSnippetIdxByLang[lang]];
69
+
70
+ type InstallInstructions = {
71
+ title: string;
72
+ content: string;
73
+ docs: string;
74
+ };
75
+ $: installInstructions = (function getInstallInstructions(): InstallInstructions | undefined {
76
+ if (lang === "javascript") {
77
+ const isHugging = selectedSnippet?.client.includes("hugging");
78
+ const toInstall = isHugging ? "@huggingface/inference" : "openai";
79
+ const docs = isHugging
80
+ ? "https://huggingface.co/docs/huggingface.js/inference/README"
81
+ : "https://platform.openai.com/docs/libraries";
82
+ return {
83
+ title: `Install ${toInstall}`,
84
+ content: `npm install --save ${toInstall}`,
85
+ docs,
86
+ };
87
+ } else if (lang === "python") {
88
+ const isHugging = selectedSnippet?.client.includes("hugging");
89
+ const toInstall = isHugging ? "huggingface_hub" : "openai";
90
+ const docs = isHugging
91
+ ? "https://huggingface.co/docs/huggingface_hub/guides/inference"
92
+ : "https://platform.openai.com/docs/libraries";
93
+ return {
94
+ title: `Install the latest`,
95
+ content: `pip install --upgrade ${toInstall}`,
96
+ docs,
97
+ };
98
  }
99
+ })();
100
 
101
+ function getTokenStr(showToken: boolean) {
102
+ if ($token.value && showToken) {
103
+ return $token.value;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
104
  }
105
+ return "YOUR_HF_TOKEN";
 
106
  }
107
 
108
+ function highlight(code?: string, language?: InferenceSnippetLanguage) {
109
+ if (!code || !language) return "";
110
+ return hljs.highlight(code, { language: language === "curl" ? "http" : language }).value;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
111
  }
112
 
113
+ function copy(el: HTMLElement, _content?: string) {
114
+ let timeout: ReturnType<typeof setTimeout>;
115
+ let content = _content ?? "";
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
116
 
117
+ function update(_content?: string) {
118
+ content = _content ?? "";
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
119
  }
120
 
121
+ function onClick() {
122
+ el.classList.add("text-green-500");
123
+ navigator.clipboard.writeText(content);
 
 
124
  clearTimeout(timeout);
125
+ timeout = setTimeout(() => {
126
+ el.classList.remove("text-green-500");
127
+ }, 400);
128
  }
129
+ el.addEventListener("click", onClick);
130
+
131
+ return {
132
+ update,
133
+ destroy() {
134
+ clearTimeout(timeout);
135
+ el.removeEventListener("click", onClick);
136
+ },
137
+ };
138
+ }
139
  </script>
140
 
141
  <div class="px-2 pt-2">
 
143
  class="border-b border-gray-200 text-center text-sm font-medium text-gray-500 dark:border-gray-700 dark:text-gray-400"
144
  >
145
  <ul class="-mb-px flex flex-wrap">
146
+ {#each entries(labelsByLanguage) as [language, label]}
147
  <li>
148
  <button
149
+ on:click={() => (lang = language)}
150
+ class="inline-block rounded-t-lg border-b-2 p-4 {lang === language
151
  ? 'border-black text-black dark:border-blue-500 dark:text-blue-500'
152
  : 'border-transparent hover:border-gray-300 hover:text-gray-600 dark:hover:text-gray-300'}"
153
  aria-current="page">{label}</button
 
167
  </ul>
168
  </div>
169
 
170
+ {#if (snippetsByLang[lang]?.length ?? 0) > 1}
171
  <div class="flex gap-x-2 px-2 pt-6">
172
+ {#each snippetsByLang[lang] ?? [] as { client }, idx}
173
+ {@const isActive = idx === selectedSnippetIdxByLang[lang]}
174
  <button
175
+ class="rounded-lg border px-1.5 py-0.5 text-sm leading-tight
176
+ {isActive
177
+ ? 'bg-black text-gray-100 dark:border-gray-500 dark:bg-gray-700 dark:text-white'
178
+ : 'text-gray-500 hover:text-gray-600 dark:border-gray-600 dark:hover:text-gray-400'}"
179
+ on:click={() => (selectedSnippetIdxByLang[lang] = idx)}>{client}</button
180
  >
181
  {/each}
182
  </div>
183
  {/if}
184
 
185
+ {#if installInstructions}
186
+ <div class="flex items-center justify-between px-2 pt-6 pb-4">
187
+ <h2 class="flex items-baseline gap-2 font-semibold">
188
+ {installInstructions.title}
189
+ <a
190
+ href={installInstructions.docs}
191
+ target="_blank"
192
+ class="relative -bottom-[1px] flex items-center gap-1 text-sm font-normal text-gray-500 hover:text-gray-700 dark:text-gray-400 dark:hover:text-gray-300"
193
+ >
194
+ <IconExternal classNames="size-3" />
195
+ Docs
196
+ </a>
197
+ </h2>
198
+ <div class="flex items-center gap-x-4">
199
+ <button
200
+ class="flex items-center gap-x-2 rounded-md border bg-white px-1.5 py-0.5 text-sm shadow-xs transition dark:border-gray-800 dark:bg-gray-800"
201
+ use:copy={installInstructions.content}
202
+ >
203
+ <IconCopyCode classNames="text-xs" /> Copy code
204
+ </button>
205
+ </div>
206
+ </div>
207
+ <pre
208
+ class="overflow-x-auto rounded-lg border border-gray-200/80 bg-white px-4 py-6 text-sm shadow-xs dark:border-gray-800 dark:bg-gray-800/50">{@html highlight(
209
+ installInstructions.content,
210
+ selectedSnippet?.language
211
+ )}</pre>
212
+ {/if}
213
+
214
+ <div class="flex items-center justify-between px-2 pt-6 pb-4">
215
+ {#if conversation.streaming}
216
+ <h2 class="font-semibold">Streaming API</h2>
217
+ {:else}
218
+ <h2 class="font-semibold">Non-Streaming API</h2>
 
 
219
  {/if}
220
+ <div class="flex items-center gap-x-4">
221
+ <label class="flex items-center gap-x-1.5 text-sm select-none">
222
+ <input type="checkbox" bind:checked={showToken} />
223
+ <p class="leading-none">With token</p>
224
+ </label>
225
+ <button
226
+ class="flex items-center gap-x-2 rounded-md border bg-white px-1.5 py-0.5 text-sm shadow-xs transition dark:border-gray-800 dark:bg-gray-800"
227
+ use:copy={selectedSnippet?.content}
228
+ >
229
+ <IconCopyCode classNames="text-xs" /> Copy code
230
+ </button>
231
+ </div>
232
+ </div>
233
+ <pre
234
+ class="overflow-x-auto rounded-lg border border-gray-200/80 bg-white px-4 py-6 text-sm shadow-xs dark:border-gray-800 dark:bg-gray-800/50">{@html highlight(
235
+ selectedSnippet?.content,
236
+ selectedSnippet?.language
237
+ )}</pre>
238
  </div>
src/lib/components/InferencePlayground/InferencePlaygroundConversation.svelte CHANGED
@@ -1,5 +1,5 @@
1
  <script lang="ts">
2
- import type { Conversation } from "$lib/components/InferencePlayground/types";
3
 
4
  import { createEventDispatcher, tick } from "svelte";
5
 
@@ -10,7 +10,6 @@
10
  export let conversation: Conversation;
11
  export let loading: boolean;
12
  export let viewCode: boolean;
13
- export let hfToken: string;
14
  export let compareActive: boolean;
15
 
16
  let shouldScrollToBottom = true;
@@ -101,6 +100,6 @@
101
  </div>
102
  </button>
103
  {:else}
104
- <CodeSnippets {conversation} {hfToken} on:closeCode />
105
  {/if}
106
  </div>
 
1
  <script lang="ts">
2
+ import type { Conversation } from "$lib/types";
3
 
4
  import { createEventDispatcher, tick } from "svelte";
5
 
 
10
  export let conversation: Conversation;
11
  export let loading: boolean;
12
  export let viewCode: boolean;
 
13
  export let compareActive: boolean;
14
 
15
  let shouldScrollToBottom = true;
 
100
  </div>
101
  </button>
102
  {:else}
103
+ <CodeSnippets {conversation} on:closeCode />
104
  {/if}
105
  </div>
src/lib/components/InferencePlayground/InferencePlaygroundConversationHeader.svelte CHANGED
@@ -1,16 +1,15 @@
1
  <script lang="ts">
2
- import type { Conversation, ModelEntryWithTokenizer } from "$lib/components/InferencePlayground/types";
3
 
4
  import { createEventDispatcher } from "svelte";
5
 
6
- import { page } from "$app/stores";
 
7
  import IconCog from "../Icons/IconCog.svelte";
8
  import GenerationConfig from "./InferencePlaygroundGenerationConfig.svelte";
9
  import ModelSelectorModal from "./InferencePlaygroundModelSelectorModal.svelte";
10
- import Avatar from "../Avatar.svelte";
11
- import { goto } from "$app/navigation";
12
 
13
- export let models: ModelEntryWithTokenizer[];
14
  export let conversation: Conversation;
15
  export let conversationIdx: number;
16
 
@@ -18,35 +17,20 @@
18
 
19
  let modelSelectorOpen = false;
20
 
21
- function changeModel(newModelId: ModelEntryWithTokenizer["id"]) {
22
- const model = models.find(m => m.id === newModelId);
23
  if (!model) {
24
  return;
25
  }
26
  conversation.model = model;
27
-
28
- const url = new URL($page.url);
29
- const queryParamValue = url.searchParams.get("modelId");
30
- if (queryParamValue) {
31
- const modelIds = queryParamValue.split(",") as [string, string];
32
- modelIds[conversationIdx] = newModelId;
33
-
34
- const newQueryParamValue = modelIds.join(",");
35
- url.searchParams.set("modelId", newQueryParamValue);
36
-
37
- const parentOrigin = "https://huggingface.co";
38
- window.parent.postMessage({ queryString: `modelId=${newQueryParamValue}` }, parentOrigin);
39
-
40
- goto(url.toString(), { replaceState: true });
41
- }
42
  }
43
 
44
- $: [nameSpace] = conversation.model.id.split("/");
45
  </script>
46
 
47
  {#if modelSelectorOpen}
48
  <ModelSelectorModal
49
- {models}
50
  {conversation}
51
  on:modelSelected={e => changeModel(e.detail)}
52
  on:close={() => (modelSelectorOpen = false)}
@@ -78,3 +62,14 @@
78
  />
79
  </button>
80
  </div>
 
 
 
 
 
 
 
 
 
 
 
 
1
  <script lang="ts">
2
+ import type { Conversation, ModelWithTokenizer } from "$lib/types";
3
 
4
  import { createEventDispatcher } from "svelte";
5
 
6
+ import { models } from "$lib/stores/models";
7
+ import Avatar from "../Avatar.svelte";
8
  import IconCog from "../Icons/IconCog.svelte";
9
  import GenerationConfig from "./InferencePlaygroundGenerationConfig.svelte";
10
  import ModelSelectorModal from "./InferencePlaygroundModelSelectorModal.svelte";
11
+ import InferencePlaygroundProviderSelect from "./InferencePlaygroundProviderSelect.svelte";
 
12
 
 
13
  export let conversation: Conversation;
14
  export let conversationIdx: number;
15
 
 
17
 
18
  let modelSelectorOpen = false;
19
 
20
+ function changeModel(newModelId: ModelWithTokenizer["id"]) {
21
+ const model = $models.find(m => m.id === newModelId);
22
  if (!model) {
23
  return;
24
  }
25
  conversation.model = model;
26
+ conversation.provider = undefined;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
27
  }
28
 
29
+ $: nameSpace = conversation.model.id.split("/")[0] ?? "";
30
  </script>
31
 
32
  {#if modelSelectorOpen}
33
  <ModelSelectorModal
 
34
  {conversation}
35
  on:modelSelected={e => changeModel(e.detail)}
36
  on:close={() => (modelSelectorOpen = false)}
 
62
  />
63
  </button>
64
  </div>
65
+
66
+ <div
67
+ class="{conversationIdx === 0
68
+ ? 'mr-4 max-sm:ml-4'
69
+ : 'mx-4'} mt-2 h-11 text-sm leading-none whitespace-nowrap max-sm:mt-4"
70
+ >
71
+ <InferencePlaygroundProviderSelect
72
+ bind:conversation
73
+ class="rounded-lg border border-gray-200/80 bg-white dark:border-white/5 dark:bg-gray-800/70 dark:hover:bg-gray-800"
74
+ />
75
+ </div>
src/lib/components/InferencePlayground/InferencePlaygroundGenerationConfig.svelte CHANGED
@@ -1,70 +1,12 @@
1
- <script context="module" lang="ts">
2
- export const defaultSystemMessage: { [key: string]: string } = {
3
- "Qwen/QwQ-32B-Preview":
4
- "You are a helpful and harmless assistant. You are Qwen developed by Alibaba. You should think step-by-step.",
5
- } as const;
6
- </script>
7
-
8
  <script lang="ts">
9
- import type { Conversation } from "$lib/components/InferencePlayground/types";
10
 
11
  import { GENERATION_CONFIG_KEYS, GENERATION_CONFIG_SETTINGS } from "./generationConfigSettings";
 
12
 
13
  export let conversation: Conversation;
14
  export let classNames = "";
15
 
16
- const customMaxTokens: { [key: string]: number } = {
17
- "01-ai/Yi-1.5-34B-Chat": 2048,
18
- "HuggingFaceM4/idefics-9b-instruct": 2048,
19
- "deepseek-ai/DeepSeek-Coder-V2-Instruct": 16384,
20
- "bigcode/starcoder": 8192,
21
- "bigcode/starcoderplus": 8192,
22
- "HuggingFaceH4/starcoderbase-finetuned-oasst1": 8192,
23
- "google/gemma-7b": 8192,
24
- "google/gemma-1.1-7b-it": 8192,
25
- "google/gemma-2b": 8192,
26
- "google/gemma-1.1-2b-it": 8192,
27
- "google/gemma-2-27b-it": 8192,
28
- "google/gemma-2-9b-it": 4096,
29
- "google/gemma-2-2b-it": 8192,
30
- "tiiuae/falcon-7b": 8192,
31
- "tiiuae/falcon-7b-instruct": 8192,
32
- "timdettmers/guanaco-33b-merged": 2048,
33
- "mistralai/Mixtral-8x7B-Instruct-v0.1": 32768,
34
- "Qwen/Qwen2.5-72B-Instruct": 32768,
35
- "Qwen/Qwen2.5-Coder-32B-Instruct": 32768,
36
- "meta-llama/Meta-Llama-3-70B-Instruct": 8192,
37
- "CohereForAI/c4ai-command-r-plus-08-2024": 32768,
38
- "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO": 32768,
39
- "meta-llama/Llama-2-70b-chat-hf": 8192,
40
- "HuggingFaceH4/zephyr-7b-alpha": 17432,
41
- "HuggingFaceH4/zephyr-7b-beta": 32768,
42
- "mistralai/Mistral-7B-Instruct-v0.1": 32768,
43
- "mistralai/Mistral-7B-Instruct-v0.2": 32768,
44
- "mistralai/Mistral-7B-Instruct-v0.3": 32768,
45
- "mistralai/Mistral-Nemo-Instruct-2407": 32768,
46
- "meta-llama/Meta-Llama-3-8B-Instruct": 8192,
47
- "mistralai/Mistral-7B-v0.1": 32768,
48
- "bigcode/starcoder2-3b": 16384,
49
- "bigcode/starcoder2-15b": 16384,
50
- "HuggingFaceH4/starchat2-15b-v0.1": 16384,
51
- "codellama/CodeLlama-7b-hf": 8192,
52
- "codellama/CodeLlama-13b-hf": 8192,
53
- "codellama/CodeLlama-34b-Instruct-hf": 8192,
54
- "meta-llama/Llama-2-7b-chat-hf": 8192,
55
- "meta-llama/Llama-2-13b-chat-hf": 8192,
56
- "OpenAssistant/oasst-sft-6-llama-30b": 2048,
57
- "TheBloke/vicuna-7B-v1.5-GPTQ": 2048,
58
- "HuggingFaceH4/starchat-beta": 8192,
59
- "bigcode/octocoder": 8192,
60
- "vwxyzjn/starcoderbase-triviaqa": 8192,
61
- "lvwerra/starcoderbase-gsm8k": 8192,
62
- "NousResearch/Hermes-3-Llama-3.1-8B": 16384,
63
- "microsoft/Phi-3.5-mini-instruct": 32768,
64
- "meta-llama/Llama-3.1-70B-Instruct": 32768,
65
- "meta-llama/Llama-3.1-8B-Instruct": 8192,
66
- } as const;
67
-
68
  $: modelMaxLength = customMaxTokens[conversation.model.id] ?? conversation.model.tokenizerConfig.model_max_length;
69
  $: maxTokens = Math.min(modelMaxLength ?? GENERATION_CONFIG_SETTINGS["max_tokens"].max, 64_000);
70
  </script>
 
 
 
 
 
 
 
 
1
  <script lang="ts">
2
+ import type { Conversation } from "$lib/types";
3
 
4
  import { GENERATION_CONFIG_KEYS, GENERATION_CONFIG_SETTINGS } from "./generationConfigSettings";
5
+ import { customMaxTokens } from "./inferencePlaygroundUtils";
6
 
7
  export let conversation: Conversation;
8
  export let classNames = "";
9
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
10
  $: modelMaxLength = customMaxTokens[conversation.model.id] ?? conversation.model.tokenizerConfig.model_max_length;
11
  $: maxTokens = Math.min(modelMaxLength ?? GENERATION_CONFIG_SETTINGS["max_tokens"].max, 64_000);
12
  </script>
src/lib/components/InferencePlayground/InferencePlaygroundMessage.svelte CHANGED
@@ -1,6 +1,6 @@
1
  <script lang="ts">
2
  import { createEventDispatcher } from "svelte";
3
- import type { ConversationMessage } from "./types";
4
 
5
  export let message: ConversationMessage;
6
  export let loading: boolean = false;
@@ -22,7 +22,7 @@
22
  {autofocus}
23
  bind:value={message.content}
24
  placeholder="Enter {message.role} message"
25
- class="resize-none overflow-hidden rounded-sm bg-transparent px-2 py-2.5 ring-gray-100 group-hover/message:ring-3 hover:resize-y hover:bg-white focus:resize-y focus:bg-white focus:ring-3 @2xl:px-3 dark:ring-gray-600 dark:hover:bg-gray-900 dark:focus:bg-gray-900"
26
  rows="1"
27
  tabindex="2"
28
  on:input={() => {
 
1
  <script lang="ts">
2
  import { createEventDispatcher } from "svelte";
3
+ import type { ConversationMessage } from "$lib/types";
4
 
5
  export let message: ConversationMessage;
6
  export let loading: boolean = false;
 
22
  {autofocus}
23
  bind:value={message.content}
24
  placeholder="Enter {message.role} message"
25
+ class="resize-none overflow-hidden rounded-sm bg-transparent px-2 py-2.5 ring-gray-100 outline-none group-hover/message:ring-3 hover:resize-y hover:bg-white focus:resize-y focus:bg-white focus:ring-3 @2xl:px-3 dark:ring-gray-600 dark:hover:bg-gray-900 dark:focus:bg-gray-900"
26
  rows="1"
27
  tabindex="2"
28
  on:input={() => {
src/lib/components/InferencePlayground/InferencePlaygroundModelSelector.svelte CHANGED
@@ -1,54 +1,40 @@
1
  <script lang="ts">
2
- import type { Conversation, ModelEntryWithTokenizer } from "./types";
3
-
4
- import { goto } from "$app/navigation";
5
- import { page } from "$app/stores";
6
 
 
 
7
  import IconCaret from "../Icons/IconCaret.svelte";
8
  import ModelSelectorModal from "./InferencePlaygroundModelSelectorModal.svelte";
9
- import Avatar from "../Avatar.svelte";
10
- import { defaultSystemMessage } from "./InferencePlaygroundGenerationConfig.svelte";
11
 
12
- export let models: ModelEntryWithTokenizer[] = [];
13
  export let conversation: Conversation;
14
 
15
  let showModelPickerModal = false;
16
 
17
- function changeModel(modelId: ModelEntryWithTokenizer["id"]) {
18
- const model = models.find(m => m.id === modelId);
 
19
  if (!model) {
20
  return;
21
  }
22
  conversation.model = model;
23
  conversation.systemMessage = { role: "system", content: defaultSystemMessage?.[modelId] ?? "" };
24
-
25
- const url = new URL($page.url);
26
- url.searchParams.set("modelId", model.id);
27
-
28
- const parentOrigin = "https://huggingface.co";
29
- window.parent.postMessage({ queryString: `modelId=${model.id}` }, parentOrigin);
30
-
31
- goto(url.toString(), { replaceState: true });
32
  }
33
 
34
- $: [nameSpace, modelName] = conversation.model.id.split("/");
 
 
35
  </script>
36
 
37
- {#if showModelPickerModal}
38
- <ModelSelectorModal
39
- {models}
40
- {conversation}
41
- on:modelSelected={e => changeModel(e.detail)}
42
- on:close={() => (showModelPickerModal = false)}
43
- />
44
- {/if}
45
-
46
  <div class="flex flex-col gap-2">
47
- <label for="countries" class="flex items-baseline text-sm font-medium text-gray-900 dark:text-white"
48
- >Models<span class="ml-4 font-normal text-gray-400">{models.length}</span>
49
  </label>
50
 
51
  <button
 
52
  class="relative flex items-center justify-between gap-6 overflow-hidden rounded-lg border bg-gray-100/80 px-3 py-1.5 leading-tight whitespace-nowrap shadow-sm hover:brightness-95 dark:border-gray-700 dark:bg-gray-800 dark:hover:brightness-110"
53
  on:click={() => (showModelPickerModal = true)}
54
  >
@@ -62,3 +48,13 @@
62
  <IconCaret classNames="text-xl bg-gray-100 dark:bg-gray-600 rounded-sm size-4 flex-none absolute right-2" />
63
  </button>
64
  </div>
 
 
 
 
 
 
 
 
 
 
 
1
  <script lang="ts">
2
+ import type { Conversation, ModelWithTokenizer } from "$lib/types";
 
 
 
3
 
4
+ import { models } from "$lib/stores/models";
5
+ import Avatar from "../Avatar.svelte";
6
  import IconCaret from "../Icons/IconCaret.svelte";
7
  import ModelSelectorModal from "./InferencePlaygroundModelSelectorModal.svelte";
8
+ import ProviderSelect from "./InferencePlaygroundProviderSelect.svelte";
9
+ import { defaultSystemMessage } from "./inferencePlaygroundUtils";
10
 
 
11
  export let conversation: Conversation;
12
 
13
  let showModelPickerModal = false;
14
 
15
+ // Model
16
+ function changeModel(modelId: ModelWithTokenizer["id"]) {
17
+ const model = $models.find(m => m.id === modelId);
18
  if (!model) {
19
  return;
20
  }
21
  conversation.model = model;
22
  conversation.systemMessage = { role: "system", content: defaultSystemMessage?.[modelId] ?? "" };
23
+ conversation.provider = undefined;
 
 
 
 
 
 
 
24
  }
25
 
26
+ $: nameSpace = conversation.model.id.split("/")[0] ?? "";
27
+ $: modelName = conversation.model.id.split("/")[1] ?? "";
28
+ const id = crypto.randomUUID();
29
  </script>
30
 
 
 
 
 
 
 
 
 
 
31
  <div class="flex flex-col gap-2">
32
+ <label for={id} class="flex items-baseline gap-2 text-sm font-medium text-gray-900 dark:text-white">
33
+ Models<span class="text-xs font-normal text-gray-400">{$models.length}</span>
34
  </label>
35
 
36
  <button
37
+ {id}
38
  class="relative flex items-center justify-between gap-6 overflow-hidden rounded-lg border bg-gray-100/80 px-3 py-1.5 leading-tight whitespace-nowrap shadow-sm hover:brightness-95 dark:border-gray-700 dark:bg-gray-800 dark:hover:brightness-110"
39
  on:click={() => (showModelPickerModal = true)}
40
  >
 
48
  <IconCaret classNames="text-xl bg-gray-100 dark:bg-gray-600 rounded-sm size-4 flex-none absolute right-2" />
49
  </button>
50
  </div>
51
+
52
+ {#if showModelPickerModal}
53
+ <ModelSelectorModal
54
+ {conversation}
55
+ on:modelSelected={e => changeModel(e.detail)}
56
+ on:close={() => (showModelPickerModal = false)}
57
+ />
58
+ {/if}
59
+
60
+ <ProviderSelect bind:conversation />
src/lib/components/InferencePlayground/InferencePlaygroundModelSelectorModal.svelte CHANGED
@@ -1,30 +1,36 @@
1
  <script lang="ts">
2
- import type { Conversation, ModelEntryWithTokenizer } from "./types";
3
 
4
- import { createEventDispatcher, tick } from "svelte";
5
 
6
- import { FEATURED_MODELS_IDS } from "./inferencePlaygroundUtils";
7
  import IconSearch from "../Icons/IconSearch.svelte";
8
  import IconStar from "../Icons/IconStar.svelte";
 
 
9
 
10
- export let models: ModelEntryWithTokenizer[];
11
  export let conversation: Conversation;
12
 
13
  let backdropEl: HTMLDivElement;
14
  let highlightIdx = 0;
15
  let ignoreCursorHighlight = false;
16
  let containerEl: HTMLDivElement;
 
17
 
18
  const dispatch = createEventDispatcher<{ modelSelected: string; close: void }>();
19
 
20
- let featuredModels = models.filter(m => FEATURED_MODELS_IDS.includes(m.id));
21
- let otherModels = models.filter(m => !FEATURED_MODELS_IDS.includes(m.id));
22
 
23
- if (featuredModels.findIndex(model => model.id === conversation.model.id) !== -1) {
24
- highlightIdx = featuredModels.findIndex(model => model.id === conversation.model.id);
25
- } else {
26
- highlightIdx = featuredModels.length + otherModels.findIndex(model => model.id === conversation.model.id);
27
- }
 
 
 
 
 
28
 
29
  function handleKeydown(event: KeyboardEvent) {
30
  const { key } = event;
@@ -79,20 +85,6 @@
79
  dispatch("close");
80
  }
81
  }
82
-
83
- function filterModels(query: string) {
84
- featuredModels = models.filter(m =>
85
- query
86
- ? FEATURED_MODELS_IDS.includes(m.id) && m.id.toLocaleLowerCase().includes(query.toLocaleLowerCase().trim())
87
- : FEATURED_MODELS_IDS.includes(m.id)
88
- );
89
-
90
- otherModels = models.filter(m =>
91
- query
92
- ? !FEATURED_MODELS_IDS.includes(m.id) && m.id.toLocaleLowerCase().includes(query.toLocaleLowerCase().trim())
93
- : !FEATURED_MODELS_IDS.includes(m.id)
94
- );
95
- }
96
  </script>
97
 
98
  <svelte:window on:keydown={handleKeydown} on:mousemove={() => (ignoreCursorHighlight = false)} />
@@ -115,7 +107,7 @@
115
  autofocus
116
  class="flex h-10 w-full rounded-md bg-transparent py-3 text-sm placeholder-gray-400 outline-hidden"
117
  placeholder="Search models ..."
118
- on:input={e => filterModels(e.currentTarget.value)}
119
  />
120
  </div>
121
  <div class="max-h-[300px] overflow-x-hidden overflow-y-auto">
 
1
  <script lang="ts">
2
+ import type { Conversation } from "$lib/types";
3
 
4
+ import { createEventDispatcher, onMount, tick } from "svelte";
5
 
6
+ import { models } from "$lib/stores/models";
7
  import IconSearch from "../Icons/IconSearch.svelte";
8
  import IconStar from "../Icons/IconStar.svelte";
9
+ import { getTrending } from "$lib/utils/model";
10
+ import fuzzysearch from "$lib/utils/search";
11
 
 
12
  export let conversation: Conversation;
13
 
14
  let backdropEl: HTMLDivElement;
15
  let highlightIdx = 0;
16
  let ignoreCursorHighlight = false;
17
  let containerEl: HTMLDivElement;
18
+ let query = "";
19
 
20
  const dispatch = createEventDispatcher<{ modelSelected: string; close: void }>();
21
 
22
+ $: trendingModels = getTrending($models);
 
23
 
24
+ $: featuredModels = fuzzysearch({ needle: query, haystack: trendingModels, property: "id" });
25
+ $: otherModels = fuzzysearch({ needle: query, haystack: $models, property: "id" });
26
+
27
+ onMount(() => {
28
+ if (featuredModels.findIndex(model => model.id === conversation.model.id) !== -1) {
29
+ highlightIdx = featuredModels.findIndex(model => model.id === conversation.model.id);
30
+ } else {
31
+ highlightIdx = featuredModels.length + otherModels.findIndex(model => model.id === conversation.model.id);
32
+ }
33
+ });
34
 
35
  function handleKeydown(event: KeyboardEvent) {
36
  const { key } = event;
 
85
  dispatch("close");
86
  }
87
  }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
88
  </script>
89
 
90
  <svelte:window on:keydown={handleKeydown} on:mousemove={() => (ignoreCursorHighlight = false)} />
 
107
  autofocus
108
  class="flex h-10 w-full rounded-md bg-transparent py-3 text-sm placeholder-gray-400 outline-hidden"
109
  placeholder="Search models ..."
110
+ bind:value={query}
111
  />
112
  </div>
113
  <div class="max-h-[300px] overflow-x-hidden overflow-y-auto">
src/lib/components/InferencePlayground/InferencePlaygroundProviderSelect.svelte ADDED
@@ -0,0 +1,102 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <script lang="ts">
2
+ import type { Conversation } from "$lib/types";
3
+
4
+ import { randomPick } from "$lib/utils/array";
5
+ import { cn } from "$lib/utils/cn";
6
+ import { createSelect, createSync } from "@melt-ui/svelte";
7
+ import IconCaret from "../Icons/IconCaret.svelte";
8
+ import IconProvider from "../Icons/IconProvider.svelte";
9
+
10
+ export let conversation: Conversation;
11
+ let classes: string | undefined = undefined;
12
+ export { classes as class };
13
+
14
+ function reset(providers: typeof conversation.model.inferenceProviderMapping) {
15
+ const validProvider = providers.find(p => p.provider === conversation.provider);
16
+ if (validProvider) return;
17
+ conversation.provider = randomPick(providers)?.provider;
18
+ }
19
+
20
+ $: providers = conversation.model.inferenceProviderMapping;
21
+ $: reset(providers);
22
+
23
+ const {
24
+ elements: { trigger, menu, option },
25
+ states: { selected },
26
+ } = createSelect<string, false>();
27
+ const sync = createSync({ selected });
28
+ $: sync.selected(
29
+ conversation.provider ? { value: conversation.provider } : undefined,
30
+ p => (conversation.provider = p?.value)
31
+ );
32
+
33
+ const nameMap: Record<string, string> = {
34
+ "sambanova": "SambaNova",
35
+ "fal": "fal",
36
+ "cerebras": "Cerebras",
37
+ "replicate": "Replicate",
38
+ "black-forest-labs": "Black Forest Labs",
39
+ "fireworks-ai": "Fireworks",
40
+ "together": "Together AI",
41
+ "nebius": "Nebius AI Studio",
42
+ "hyperbolic": "Hyperbolic",
43
+ "novita": "Novita",
44
+ "cohere": "Nohere",
45
+ "hf-inference": "HF Inference API",
46
+ };
47
+ const UPPERCASE_WORDS = ["hf", "ai"];
48
+
49
+ function formatName(provider: string) {
50
+ if (provider in nameMap) return nameMap[provider];
51
+
52
+ const words = provider
53
+ .toLowerCase()
54
+ .split("-")
55
+ .map(word => {
56
+ if (UPPERCASE_WORDS.includes(word)) {
57
+ return word.toUpperCase();
58
+ } else {
59
+ return word.charAt(0).toUpperCase() + word.slice(1).toLowerCase();
60
+ }
61
+ });
62
+
63
+ return words.join(" ");
64
+ }
65
+ </script>
66
+
67
+ <div class="flex flex-col gap-2">
68
+ <!--
69
+ <label class="flex items-baseline gap-2 text-sm font-medium text-gray-900 dark:text-white">
70
+ Providers<span class="text-xs font-normal text-gray-400"></span>
71
+ </label>
72
+ -->
73
+
74
+ <button
75
+ {...$trigger}
76
+ use:trigger
77
+ class={cn(
78
+ "relative flex items-center justify-between gap-6 overflow-hidden rounded-lg border bg-gray-100/80 px-3 py-1.5 leading-tight whitespace-nowrap shadow-sm",
79
+ "hover:brightness-95 dark:border-gray-700 dark:bg-gray-800 dark:hover:brightness-110",
80
+ classes
81
+ )}
82
+ >
83
+ <div class="flex items-center gap-1 text-sm text-gray-500 dark:text-gray-300">
84
+ <IconProvider provider={conversation.provider} />
85
+ {formatName(conversation.provider ?? "") ?? "loading"}
86
+ </div>
87
+ <IconCaret classNames="text-xl bg-gray-100 dark:bg-gray-600 rounded-sm size-4 flex-none absolute right-2" />
88
+ </button>
89
+
90
+ <div {...$menu} use:menu class="rounded-lg border bg-gray-100/80 dark:border-gray-700 dark:bg-gray-800">
91
+ {#each conversation.model.inferenceProviderMapping as { provider } (provider)}
92
+ <button {...$option({ value: provider })} use:option class="group block w-full p-1 text-sm dark:text-white">
93
+ <div
94
+ class="flex items-center gap-2 rounded-md px-2 py-1.5 group-data-[highlighted]:bg-gray-200 dark:group-data-[highlighted]:bg-gray-700"
95
+ >
96
+ <IconProvider {provider} />
97
+ {formatName(provider)}
98
+ </div>
99
+ </button>
100
+ {/each}
101
+ </div>
102
+ </div>
src/lib/components/InferencePlayground/inferencePlaygroundUtils.ts CHANGED
@@ -1,11 +1,8 @@
 
 
1
  import { type ChatCompletionOutputMessage } from "@huggingface/tasks";
2
- import type { Conversation, ModelEntryWithTokenizer } from "./types";
3
 
4
- import { HfInference } from "@huggingface/inference";
5
-
6
- export function createHfInference(token: string): HfInference {
7
- return new HfInference(token);
8
- }
9
 
10
  export async function handleStreamingResponse(
11
  hf: HfInference,
@@ -23,6 +20,7 @@ export async function handleStreamingResponse(
23
  {
24
  model: model.id,
25
  messages,
 
26
  ...conversation.config,
27
  },
28
  { signal: abortController.signal }
@@ -47,25 +45,118 @@ export async function handleNonStreamingResponse(
47
  const response = await hf.chatCompletion({
48
  model: model.id,
49
  messages,
 
50
  ...conversation.config,
51
  });
52
 
53
  if (response.choices && response.choices.length > 0) {
54
- const { message } = response.choices[0];
 
55
  const { completion_tokens } = response.usage;
56
  return { message, completion_tokens };
57
  }
58
  throw new Error("No response from the model");
59
  }
60
 
61
- export function isSystemPromptSupported(model: ModelEntryWithTokenizer) {
62
- return model.tokenizerConfig?.chat_template?.includes("system");
63
  }
64
 
65
- export const FEATURED_MODELS_IDS = [
66
- "meta-llama/Llama-3.3-70B-Instruct",
67
- "meta-llama/Llama-3.1-8B-Instruct",
68
- "meta-llama/Llama-3.2-3B-Instruct",
69
- "Qwen/Qwen2.5-72B-Instruct",
70
- "Qwen/QwQ-32B-Preview",
71
- ];
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import type { Conversation, ModelWithTokenizer } from "$lib/types";
2
+ import type { InferenceSnippet } from "@huggingface/tasks";
3
  import { type ChatCompletionOutputMessage } from "@huggingface/tasks";
 
4
 
5
+ import { HfInference, snippets, type InferenceProvider } from "@huggingface/inference";
 
 
 
 
6
 
7
  export async function handleStreamingResponse(
8
  hf: HfInference,
 
20
  {
21
  model: model.id,
22
  messages,
23
+ provider: conversation.provider,
24
  ...conversation.config,
25
  },
26
  { signal: abortController.signal }
 
45
  const response = await hf.chatCompletion({
46
  model: model.id,
47
  messages,
48
+ provider: conversation.provider,
49
  ...conversation.config,
50
  });
51
 
52
  if (response.choices && response.choices.length > 0) {
53
+ // eslint-disable-next-line @typescript-eslint/no-non-null-assertion
54
+ const { message } = response.choices[0]!;
55
  const { completion_tokens } = response.usage;
56
  return { message, completion_tokens };
57
  }
58
  throw new Error("No response from the model");
59
  }
60
 
61
+ export function isSystemPromptSupported(model: ModelWithTokenizer) {
62
+ return model?.tokenizerConfig?.chat_template?.includes("system");
63
  }
64
 
65
+ export const defaultSystemMessage: { [key: string]: string } = {
66
+ "Qwen/QwQ-32B-Preview":
67
+ "You are a helpful and harmless assistant. You are Qwen developed by Alibaba. You should think step-by-step.",
68
+ } as const;
69
+
70
+ export const customMaxTokens: { [key: string]: number } = {
71
+ "01-ai/Yi-1.5-34B-Chat": 2048,
72
+ "HuggingFaceM4/idefics-9b-instruct": 2048,
73
+ "deepseek-ai/DeepSeek-Coder-V2-Instruct": 16384,
74
+ "bigcode/starcoder": 8192,
75
+ "bigcode/starcoderplus": 8192,
76
+ "HuggingFaceH4/starcoderbase-finetuned-oasst1": 8192,
77
+ "google/gemma-7b": 8192,
78
+ "google/gemma-1.1-7b-it": 8192,
79
+ "google/gemma-2b": 8192,
80
+ "google/gemma-1.1-2b-it": 8192,
81
+ "google/gemma-2-27b-it": 8192,
82
+ "google/gemma-2-9b-it": 4096,
83
+ "google/gemma-2-2b-it": 8192,
84
+ "tiiuae/falcon-7b": 8192,
85
+ "tiiuae/falcon-7b-instruct": 8192,
86
+ "timdettmers/guanaco-33b-merged": 2048,
87
+ "mistralai/Mixtral-8x7B-Instruct-v0.1": 32768,
88
+ "Qwen/Qwen2.5-72B-Instruct": 32768,
89
+ "Qwen/Qwen2.5-Coder-32B-Instruct": 32768,
90
+ "meta-llama/Meta-Llama-3-70B-Instruct": 8192,
91
+ "CohereForAI/c4ai-command-r-plus-08-2024": 32768,
92
+ "NousResearch/Nous-Hermes-2-Mixtral-8x7B-DPO": 32768,
93
+ "meta-llama/Llama-2-70b-chat-hf": 8192,
94
+ "HuggingFaceH4/zephyr-7b-alpha": 17432,
95
+ "HuggingFaceH4/zephyr-7b-beta": 32768,
96
+ "mistralai/Mistral-7B-Instruct-v0.1": 32768,
97
+ "mistralai/Mistral-7B-Instruct-v0.2": 32768,
98
+ "mistralai/Mistral-7B-Instruct-v0.3": 32768,
99
+ "mistralai/Mistral-Nemo-Instruct-2407": 32768,
100
+ "meta-llama/Meta-Llama-3-8B-Instruct": 8192,
101
+ "mistralai/Mistral-7B-v0.1": 32768,
102
+ "bigcode/starcoder2-3b": 16384,
103
+ "bigcode/starcoder2-15b": 16384,
104
+ "HuggingFaceH4/starchat2-15b-v0.1": 16384,
105
+ "codellama/CodeLlama-7b-hf": 8192,
106
+ "codellama/CodeLlama-13b-hf": 8192,
107
+ "codellama/CodeLlama-34b-Instruct-hf": 8192,
108
+ "meta-llama/Llama-2-7b-chat-hf": 8192,
109
+ "meta-llama/Llama-2-13b-chat-hf": 8192,
110
+ "OpenAssistant/oasst-sft-6-llama-30b": 2048,
111
+ "TheBloke/vicuna-7B-v1.5-GPTQ": 2048,
112
+ "HuggingFaceH4/starchat-beta": 8192,
113
+ "bigcode/octocoder": 8192,
114
+ "vwxyzjn/starcoderbase-triviaqa": 8192,
115
+ "lvwerra/starcoderbase-gsm8k": 8192,
116
+ "NousResearch/Hermes-3-Llama-3.1-8B": 16384,
117
+ "microsoft/Phi-3.5-mini-instruct": 32768,
118
+ "meta-llama/Llama-3.1-70B-Instruct": 32768,
119
+ "meta-llama/Llama-3.1-8B-Instruct": 8192,
120
+ } as const;
121
+
122
+ // Order of the elements in InferenceModal.svelte is determined by this const
123
+ export const inferenceSnippetLanguages = ["python", "js", "curl"] as const;
124
+
125
+ export type InferenceSnippetLanguage = (typeof inferenceSnippetLanguages)[number];
126
+
127
+ const GET_SNIPPET_FN = {
128
+ curl: snippets.curl.getCurlInferenceSnippet,
129
+ js: snippets.js.getJsInferenceSnippet,
130
+ python: snippets.python.getPythonInferenceSnippet,
131
+ } as const;
132
+
133
+ export type GetInferenceSnippetReturn = (InferenceSnippet & { language: InferenceSnippetLanguage })[];
134
+
135
+ export function getInferenceSnippet(
136
+ model: ModelWithTokenizer,
137
+ provider: InferenceProvider,
138
+ language: InferenceSnippetLanguage,
139
+ accessToken: string,
140
+ opts?: Record<string, unknown>
141
+ ): GetInferenceSnippetReturn {
142
+ const providerId = model.inferenceProviderMapping.find(p => p.provider === provider)?.providerId;
143
+ const snippetsByClient = GET_SNIPPET_FN[language](
144
+ { ...model, inference: "" },
145
+ accessToken,
146
+ provider,
147
+ providerId,
148
+ opts
149
+ );
150
+ return snippetsByClient.map(snippetByClient => ({ ...snippetByClient, language }));
151
+ }
152
+
153
+ /**
154
+ * - If language is defined, the function checks if in an inference snippet is available for that specific language
155
+ */
156
+ export function hasInferenceSnippet(
157
+ model: ModelWithTokenizer,
158
+ provider: InferenceProvider,
159
+ language: InferenceSnippetLanguage
160
+ ): boolean {
161
+ return getInferenceSnippet(model, provider, language, "").length > 0;
162
+ }
src/lib/components/InferencePlayground/types.ts DELETED
@@ -1,26 +0,0 @@
1
- import type { GenerationConfig } from "$lib/components/InferencePlayground/generationConfigSettings";
2
- import type { ModelEntry } from "@huggingface/hub";
3
- import type { ChatCompletionInputMessage } from "@huggingface/tasks";
4
-
5
- export type ConversationMessage = Omit<ChatCompletionInputMessage, "content"> & { content?: string };
6
-
7
- export type Conversation = {
8
- model: ModelEntryWithTokenizer;
9
- config: GenerationConfig;
10
- messages: ConversationMessage[];
11
- systemMessage: ConversationMessage;
12
- streaming: boolean;
13
- };
14
-
15
- export type Session = {
16
- conversations: [Conversation] | [Conversation, Conversation];
17
- };
18
-
19
- interface TokenizerConfig {
20
- chat_template?: string;
21
- model_max_length?: number;
22
- }
23
-
24
- export interface ModelEntryWithTokenizer extends ModelEntry {
25
- tokenizerConfig: TokenizerConfig;
26
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
src/lib/index.ts DELETED
@@ -1 +0,0 @@
1
- // place files you want to import through the `$lib` alias in this folder.
 
 
src/lib/stores/models.ts ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ import { page } from "$app/stores";
2
+ import type { ModelWithTokenizer } from "$lib/types";
3
+ import { readable } from "svelte/store";
4
+
5
+ export const models = readable<ModelWithTokenizer[]>(undefined, set => {
6
+ const unsub = page.subscribe($p => set($p.data.models));
7
+ return unsub;
8
+ });
src/lib/stores/session.ts ADDED
@@ -0,0 +1,103 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { browser } from "$app/environment";
2
+ import { goto } from "$app/navigation";
3
+ import { defaultGenerationConfig } from "$lib/components/InferencePlayground/generationConfigSettings";
4
+ import { defaultSystemMessage } from "$lib/components/InferencePlayground/inferencePlaygroundUtils";
5
+ import { PipelineTag, type Conversation, type ConversationMessage, type Session } from "$lib/types";
6
+
7
+ import { models } from "$lib/stores/models";
8
+ import { get, writable } from "svelte/store";
9
+ import { getTrending } from "$lib/utils/model";
10
+
11
+ function createSessionStore() {
12
+ const store = writable<Session>(undefined, (set, update) => {
13
+ const searchParams = new URLSearchParams(browser ? window.location.search : undefined);
14
+
15
+ const modelIdsFromSearchParam = searchParams.getAll("modelId");
16
+ const modelsFromSearchParam = modelIdsFromSearchParam?.map(id => get(models).find(model => model.id === id));
17
+
18
+ const providersFromSearchParam = searchParams.getAll("provider");
19
+
20
+ const startMessageUser: ConversationMessage = { role: "user", content: "" };
21
+ const systemMessage: ConversationMessage = {
22
+ role: "system",
23
+ content: modelIdsFromSearchParam?.[0] ? (defaultSystemMessage?.[modelIdsFromSearchParam[0]] ?? "") : "",
24
+ };
25
+
26
+ const $models = get(models);
27
+ const featured = getTrending($models);
28
+
29
+ set({
30
+ conversations: [
31
+ {
32
+ model: featured[0] ??
33
+ $models[0] ?? {
34
+ _id: "",
35
+ inferenceProviderMapping: [],
36
+ pipeline_tag: PipelineTag.TextGeneration,
37
+ trendingScore: 0,
38
+ tags: ["text-generation"],
39
+ id: "",
40
+ tokenizerConfig: {},
41
+ config: {
42
+ architectures: [] as string[],
43
+ model_type: "",
44
+ tokenizer_config: {},
45
+ },
46
+ },
47
+ config: { ...defaultGenerationConfig },
48
+ messages: [{ ...startMessageUser }],
49
+ systemMessage,
50
+ streaming: true,
51
+ },
52
+ ],
53
+ });
54
+
55
+ if (modelsFromSearchParam?.length) {
56
+ const conversations = modelsFromSearchParam.map((model, i) => {
57
+ return {
58
+ model,
59
+ config: { ...defaultGenerationConfig },
60
+ messages: [{ ...startMessageUser }],
61
+ systemMessage,
62
+ streaming: true,
63
+ provider: providersFromSearchParam?.[i],
64
+ };
65
+ }) as [Conversation] | [Conversation, Conversation];
66
+ update(s => ({ ...s, conversations }));
67
+ }
68
+ });
69
+
70
+ const update: typeof store.update = cb => {
71
+ const prevQuery = window.location.search;
72
+ const query = new URLSearchParams(window.location.search);
73
+ query.delete("modelId");
74
+ query.delete("provider");
75
+
76
+ store.update($s => {
77
+ const s = cb($s);
78
+
79
+ const modelIds = s.conversations.map(c => c.model.id);
80
+ modelIds.forEach(m => query.append("modelId", m));
81
+
82
+ const providers = s.conversations.map(c => c.provider ?? "hf-inference");
83
+ providers.forEach(p => query.append("provider", p));
84
+
85
+ const newQuery = query.toString();
86
+ // slice to remove the ? prefix
87
+ if (newQuery !== prevQuery.slice(1)) {
88
+ console.log(prevQuery, newQuery);
89
+ goto(`?${query}`, { replaceState: true });
90
+ }
91
+
92
+ return s;
93
+ });
94
+ };
95
+
96
+ const set: typeof store.set = (...args) => {
97
+ update(_ => args[0]);
98
+ };
99
+
100
+ return { ...store, set, update };
101
+ }
102
+
103
+ export const session = createSessionStore();
src/lib/stores/token.ts ADDED
@@ -0,0 +1,34 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { browser } from "$app/environment";
2
+ import { writable } from "svelte/store";
3
+
4
+ const key = "hf_token";
5
+
6
+ function createTokenStore() {
7
+ const store = writable({ value: "", writeToLocalStorage: true, showModal: false });
8
+
9
+ function setValue(token: string) {
10
+ store.update(s => {
11
+ if (s.writeToLocalStorage) localStorage.setItem(key, JSON.stringify(token));
12
+ return { ...s, value: token, showModal: !token.length };
13
+ });
14
+ }
15
+
16
+ if (browser) {
17
+ const storedHfToken = localStorage.getItem(key);
18
+ if (storedHfToken !== null) {
19
+ setValue(JSON.parse(storedHfToken));
20
+ }
21
+ }
22
+
23
+ return {
24
+ ...store,
25
+ setValue,
26
+ reset() {
27
+ setValue("");
28
+ localStorage.removeItem(key);
29
+ store.update(s => ({ ...s, showModal: true }));
30
+ },
31
+ };
32
+ }
33
+
34
+ export const token = createTokenStore();
src/lib/types.ts ADDED
@@ -0,0 +1,158 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import type { GenerationConfig } from "$lib/components/InferencePlayground/generationConfigSettings";
2
+ import type { ChatCompletionInputMessage } from "@huggingface/tasks";
3
+
4
+ export type ConversationMessage = Omit<ChatCompletionInputMessage, "content"> & { content?: string };
5
+
6
+ export type Conversation = {
7
+ model: ModelWithTokenizer;
8
+ config: GenerationConfig;
9
+ messages: ConversationMessage[];
10
+ systemMessage: ConversationMessage;
11
+ streaming: boolean;
12
+ provider?: string;
13
+ };
14
+
15
+ export type Session = {
16
+ conversations: [Conversation] | [Conversation, Conversation];
17
+ };
18
+
19
+ interface TokenizerConfig {
20
+ chat_template?: string;
21
+ model_max_length?: number;
22
+ }
23
+
24
+ export type ModelWithTokenizer = Model & {
25
+ tokenizerConfig: TokenizerConfig;
26
+ };
27
+
28
+ export type Model = {
29
+ _id: string;
30
+ id: string;
31
+ inferenceProviderMapping: InferenceProviderMapping[];
32
+ trendingScore: number;
33
+ config: Config;
34
+ tags: string[];
35
+ pipeline_tag: PipelineTag;
36
+ library_name?: LibraryName;
37
+ };
38
+
39
+ export type Config = {
40
+ architectures: string[];
41
+ model_type: string;
42
+ tokenizer_config: TokenizerConfig;
43
+ auto_map?: AutoMap;
44
+ quantization_config?: QuantizationConfig;
45
+ };
46
+
47
+ export type AutoMap = {
48
+ AutoConfig: string;
49
+ AutoModel?: string;
50
+ AutoModelForCausalLM: string;
51
+ AutoModelForSequenceClassification?: string;
52
+ AutoModelForTokenClassification?: string;
53
+ AutoModelForQuestionAnswering?: string;
54
+ };
55
+
56
+ export type QuantizationConfig = {
57
+ quant_method: string;
58
+ bits?: number;
59
+ };
60
+
61
+ // export type TokenizerConfig = {
62
+ // bos_token?: Token | BosTokenEnum | null;
63
+ // chat_template: ChatTemplateElement[] | string;
64
+ // eos_token: Token | EOSTokenEnum;
65
+ // pad_token?: Token | null | string;
66
+ // unk_token?: Token | UnkTokenEnum | null;
67
+ // use_default_system_prompt?: boolean;
68
+ // };
69
+
70
+ export type Token = {
71
+ __type: Type;
72
+ content: Content;
73
+ lstrip: boolean;
74
+ normalized: boolean;
75
+ rstrip: boolean;
76
+ single_word: boolean;
77
+ };
78
+
79
+ export enum Type {
80
+ AddedToken = "AddedToken",
81
+ }
82
+
83
+ export enum Content {
84
+ BeginOfSentence = "<|begin▁of▁sentence|>",
85
+ ContentS = "</s>",
86
+ EndOfSentence = "<|end▁of▁sentence|>",
87
+ S = "<s>",
88
+ Unk = "<unk>",
89
+ }
90
+
91
+ export enum BosTokenEnum {
92
+ BeginOfText = "<|begin_of_text|>",
93
+ Bos = "<bos>",
94
+ BosToken = "<BOS_TOKEN>",
95
+ Endoftext = "<|endoftext|>",
96
+ IMStart = "<|im_start|>",
97
+ S = "<s>",
98
+ Startoftext = "<|startoftext|>",
99
+ }
100
+
101
+ export type ChatTemplateElement = {
102
+ name: string;
103
+ template: string;
104
+ };
105
+
106
+ export enum EOSTokenEnum {
107
+ EOS = "<eos>",
108
+ EndOfText = "<|end_of_text|>",
109
+ EndOfTurnToken = "<|END_OF_TURN_TOKEN|>",
110
+ Endoftext = "<|endoftext|>",
111
+ EotID = "<|eot_id|>",
112
+ IMEnd = "<|im_end|>",
113
+ S = "</s>",
114
+ }
115
+
116
+ export enum UnkTokenEnum {
117
+ Endoftext = "<|endoftext|>",
118
+ Unk = "<unk>",
119
+ }
120
+
121
+ export type InferenceProviderMapping = {
122
+ provider: Provider;
123
+ providerId: string;
124
+ status: Status;
125
+ task: Task;
126
+ };
127
+
128
+ export enum Provider {
129
+ Cerebras = "cerebras",
130
+ FalAI = "fal-ai",
131
+ FireworksAI = "fireworks-ai",
132
+ HFInference = "hf-inference",
133
+ Hyperbolic = "hyperbolic",
134
+ Nebius = "nebius",
135
+ Novita = "novita",
136
+ Replicate = "replicate",
137
+ Sambanova = "sambanova",
138
+ Together = "together",
139
+ }
140
+
141
+ export enum Status {
142
+ Live = "live",
143
+ Staging = "staging",
144
+ }
145
+
146
+ export enum Task {
147
+ Conversational = "conversational",
148
+ }
149
+
150
+ export enum LibraryName {
151
+ Mlx = "mlx",
152
+ Transformers = "transformers",
153
+ Vllm = "vllm",
154
+ }
155
+
156
+ export enum PipelineTag {
157
+ TextGeneration = "text-generation",
158
+ }
src/lib/utils/array.ts ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ export function last<T>(arr: T[]): T | undefined {
2
+ return arr[arr.length - 1];
3
+ }
4
+
5
+ export function randomPick<T>(arr: T[]): T | undefined {
6
+ return arr[Math.floor(Math.random() * arr.length)];
7
+ }
src/lib/utils/cn.ts ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ import { clsx, type ClassValue } from "clsx";
2
+ import { twMerge } from "tailwind-merge";
3
+
4
+ export function cn(...inputs: ClassValue[]) {
5
+ return twMerge(clsx(inputs));
6
+ }
src/lib/utils/effect.ts ADDED
@@ -0,0 +1,51 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import type { Stores, StoresValues } from "svelte/store";
2
+ import { derived } from "svelte/store";
3
+ import { safeOnDestroy } from "./lifecycle";
4
+ import { noop } from "./noop";
5
+
6
+ type EffectOptions = {
7
+ /**
8
+ * Whether to skip the first run
9
+ * @default undefined
10
+ */
11
+ skipFirstRun?: boolean;
12
+ };
13
+
14
+ /**
15
+ * A utility function that creates an effect from a set of stores and a function.
16
+ * The effect is automatically cleaned up when the component is destroyed.
17
+ *
18
+ * @template S - The type of the stores object
19
+ * @param stores - The stores object to derive from
20
+ * @param fn - The function to run when the stores change
21
+ * @param opts {@link EffectOptions}
22
+ * @returns A function that can be used to unsubscribe the effect
23
+ */
24
+ export function effect<S extends Stores>(
25
+ stores: S,
26
+ fn: (values: StoresValues<S>) => (() => void) | void,
27
+ opts: EffectOptions = {}
28
+ ): () => void {
29
+ const { skipFirstRun } = opts;
30
+ let isFirstRun = true;
31
+ let cb: (() => void) | void = undefined;
32
+
33
+ // Create a derived store that contains the stores object and an onUnsubscribe function
34
+ const destroy = derived(stores, stores => {
35
+ cb?.();
36
+ if (isFirstRun && skipFirstRun) {
37
+ isFirstRun = false;
38
+ } else {
39
+ cb = fn(stores);
40
+ }
41
+ }).subscribe(noop);
42
+
43
+ const unsub = () => {
44
+ destroy();
45
+ cb?.();
46
+ };
47
+
48
+ // Automatically unsubscribe the effect when the component is destroyed
49
+ safeOnDestroy(unsub);
50
+ return unsub;
51
+ }
src/lib/utils/lifecycle.ts ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import { onDestroy, onMount } from "svelte";
2
+
3
+ export const safeOnMount = (fn: (...args: unknown[]) => unknown) => {
4
+ try {
5
+ onMount(fn);
6
+ } catch {
7
+ return fn;
8
+ }
9
+ };
10
+
11
+ export const safeOnDestroy = (fn: (...args: unknown[]) => unknown) => {
12
+ try {
13
+ onDestroy(fn);
14
+ } catch {
15
+ return fn;
16
+ }
17
+ };
src/lib/utils/model.ts ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ import type { ModelWithTokenizer } from "$lib/types";
2
+
3
+ export function getTrending(models: ModelWithTokenizer[], limit = 5) {
4
+ return models.toSorted((a, b) => b.trendingScore - a.trendingScore).slice(0, limit);
5
+ }
src/lib/utils/noop.ts ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ /**
2
+ * A no operation function (does nothing)
3
+ */
4
+ export function noop() {
5
+ // do nothing
6
+ }
src/lib/utils/object.ts ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ // typed Object.keys
2
+ export function keys<T extends object>(o: T): (keyof T)[] {
3
+ return Object.keys(o) as (keyof T)[];
4
+ }
5
+
6
+ // typed Object.entries
7
+ export function entries<T extends object>(o: T): [keyof T, T[keyof T]][] {
8
+ return Object.entries(o) as [keyof T, T[keyof T]][];
9
+ }
10
+
11
+ // typed Object.fromEntries
12
+ export function fromEntries<T extends object>(entries: [keyof T, T[keyof T]][]): T {
13
+ return Object.fromEntries(entries) as T;
14
+ }
src/lib/utils/platform.ts ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ export function isMac() {
2
+ return navigator.platform.toUpperCase().indexOf("MAC") >= 0;
3
+ }
src/lib/utils/search.ts ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ /**
2
+ * Generic fuzzy search function that searches through arrays and returns matching items
3
+ *
4
+ * @param options Configuration object for the fuzzy search
5
+ * @returns Array of items that match the search criteria
6
+ */
7
+ export default function fuzzysearch<T>(options: {
8
+ needle: string;
9
+ haystack: T[];
10
+ property: keyof T | ((item: T) => string);
11
+ }): T[] {
12
+ const { needle, haystack, property } = options;
13
+
14
+ if (!Array.isArray(haystack)) {
15
+ throw new Error("Haystack must be an array");
16
+ }
17
+
18
+ if (!property) {
19
+ throw new Error("Property selector is required");
20
+ }
21
+
22
+ // Convert needle to lowercase for case-insensitive matching
23
+ const lowerNeedle = needle.toLowerCase();
24
+
25
+ // Filter the haystack to find matching items
26
+ return haystack.filter(item => {
27
+ // Extract the string value from the item based on the property selector
28
+ const value = typeof property === "function" ? property(item) : String(item[property]);
29
+
30
+ // Convert to lowercase for case-insensitive matching
31
+ const lowerValue = value.toLowerCase();
32
+
33
+ // Perform the fuzzy search
34
+ return fuzzyMatchString(lowerNeedle, lowerValue);
35
+ });
36
+ }
37
+
38
+ /**
39
+ * Internal helper function that performs the actual fuzzy string matching
40
+ */
41
+ function fuzzyMatchString(needle: string, haystack: string): boolean {
42
+ const hlen = haystack.length;
43
+ const nlen = needle.length;
44
+
45
+ if (nlen > hlen) {
46
+ return false;
47
+ }
48
+
49
+ if (nlen === hlen) {
50
+ return needle === haystack;
51
+ }
52
+
53
+ outer: for (let i = 0, j = 0; i < nlen; i++) {
54
+ const nch = needle.charCodeAt(i);
55
+ while (j < hlen) {
56
+ if (haystack.charCodeAt(j++) === nch) {
57
+ continue outer;
58
+ }
59
+ }
60
+ return false;
61
+ }
62
+
63
+ return true;
64
+ }
src/lib/utils/store.ts ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ import { browser } from "$app/environment";
2
+ import { page } from "$app/stores";
3
+ import { readable, type Writable } from "svelte/store";
4
+
5
+ export function partialSet<T extends Record<string, unknown>>(store: Writable<T>, partial: Partial<T>) {
6
+ store.update(s => ({ ...s, ...partial }));
7
+ }
8
+
9
+ export const safePage = browser ? page : readable(undefined);
src/routes/+page.server.ts CHANGED
@@ -1,11 +1,10 @@
1
- import type { ModelEntryWithTokenizer } from "$lib/components/InferencePlayground/types";
2
- import type { ModelEntry } from "@huggingface/hub";
3
- import type { PageServerLoad } from "./$types";
4
  import { env } from "$env/dynamic/private";
 
 
5
 
6
  export const load: PageServerLoad = async ({ fetch }) => {
7
  const apiUrl =
8
- "https://huggingface.co/api/models?pipeline_tag=text-generation&inference_provider=hf-inference&filter=conversational";
9
  const HF_TOKEN = env.HF_TOKEN;
10
 
11
  const res = await fetch(apiUrl, {
@@ -17,7 +16,7 @@ export const load: PageServerLoad = async ({ fetch }) => {
17
  console.error(`Error fetching warm models`, res.status, res.statusText);
18
  return { models: [] };
19
  }
20
- const compatibleModels: ModelEntry[] = await res.json();
21
  compatibleModels.sort((a, b) => a.id.toLowerCase().localeCompare(b.id.toLowerCase()));
22
 
23
  const promises = compatibleModels.map(async model => {
@@ -32,10 +31,10 @@ export const load: PageServerLoad = async ({ fetch }) => {
32
  return null; // Ignore failed requests by returning null
33
  }
34
  const tokenizerConfig = await res.json();
35
- return { ...model, tokenizerConfig } satisfies ModelEntryWithTokenizer;
36
  });
37
 
38
- const models: ModelEntryWithTokenizer[] = (await Promise.all(promises)).filter(model => model !== null);
39
 
40
  return { models };
41
  };
 
 
 
 
1
  import { env } from "$env/dynamic/private";
2
+ import type { Model, ModelWithTokenizer } from "$lib/types";
3
+ import type { PageServerLoad } from "./$types";
4
 
5
  export const load: PageServerLoad = async ({ fetch }) => {
6
  const apiUrl =
7
+ "https://huggingface.co/api/models?pipeline_tag=text-generation&filter=conversational&inference_provider=all&limit=100&expand[]=inferenceProviderMapping&expand[]=config&expand[]=library_name&expand[]=pipeline_tag&expand[]=tags&expand[]=mask_token&expand[]=trendingScore";
8
  const HF_TOKEN = env.HF_TOKEN;
9
 
10
  const res = await fetch(apiUrl, {
 
16
  console.error(`Error fetching warm models`, res.status, res.statusText);
17
  return { models: [] };
18
  }
19
+ const compatibleModels: Model[] = await res.json();
20
  compatibleModels.sort((a, b) => a.id.toLowerCase().localeCompare(b.id.toLowerCase()));
21
 
22
  const promises = compatibleModels.map(async model => {
 
31
  return null; // Ignore failed requests by returning null
32
  }
33
  const tokenizerConfig = await res.json();
34
+ return { ...model, tokenizerConfig } satisfies ModelWithTokenizer;
35
  });
36
 
37
+ const models: ModelWithTokenizer[] = (await Promise.all(promises)).filter(model => model !== null);
38
 
39
  return { models };
40
  };
src/routes/+page.svelte CHANGED
@@ -1,6 +1,5 @@
1
  <script lang="ts">
2
- export let data;
3
  import InferencePlayground from "$lib/components/InferencePlayground/InferencePlayground.svelte";
4
  </script>
5
 
6
- <InferencePlayground models={data.models} />
 
1
  <script lang="ts">
 
2
  import InferencePlayground from "$lib/components/InferencePlayground/InferencePlayground.svelte";
3
  </script>
4
 
5
+ <InferencePlayground />
tsconfig.json CHANGED
@@ -9,7 +9,8 @@
9
  "skipLibCheck": true,
10
  "sourceMap": true,
11
  "strict": true,
12
- "target": "ES2018"
 
13
  },
14
  "exclude": ["vite.config.ts"]
15
  // Path aliases are handled by https://kit.svelte.dev/docs/configuration#alias
 
9
  "skipLibCheck": true,
10
  "sourceMap": true,
11
  "strict": true,
12
+ "target": "ES2018",
13
+ "noUncheckedIndexedAccess": true
14
  },
15
  "exclude": ["vite.config.ts"]
16
  // Path aliases are handled by https://kit.svelte.dev/docs/configuration#alias