parquet-converter commited on
Commit
12167dd
·
1 Parent(s): 4efd843

Update parquet files (step 74 of 476)

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. spaces/1gistliPinn/ChatGPT4/Examples/Cocteau Twins Singles Collection 10CD Box Set 1991 FLAC Fixed.md +0 -6
  2. spaces/1gistliPinn/ChatGPT4/Examples/Download AutoCAD Map 3D 2005 Crack TOP.md +0 -6
  3. spaces/232labs/VToonify/vtoonify/model/stylegan/lpips/base_model.py +0 -58
  4. spaces/7hao/bingo/src/components/chat-panel.tsx +0 -153
  5. spaces/7hao/bingo/src/components/tailwind-indicator.tsx +0 -14
  6. spaces/801artistry/RVC801/tools/infer/infer-pm-index256.py +0 -202
  7. spaces/AIFILMS/image-to-sound-fx/share_btn.py +0 -96
  8. spaces/AISuperheroes/09SL-AI-Image-Music-Video-AIUIUX/app.py +0 -45
  9. spaces/AIZero2Hero4Health/3-ChatbotBlenderbot-GR/app.py +0 -134
  10. spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_0_ClothesDetection/mmyolo/configs/yolov7/yolov7_l_syncbn_fast_8x16b-300e_coco.py +0 -324
  11. spaces/Adapter/CoAdapter/ldm/modules/distributions/__init__.py +0 -0
  12. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/clickoutside/Factory.js +0 -11
  13. spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/confirmdialog/Factory.d.ts +0 -5
  14. spaces/Al-Chan/Vits_League_of_Legends_Yuumi_TTS/long_audio_transcribe.py +0 -71
  15. spaces/Alycer/VITS-Umamusume-voice-synthesizer/ONNXVITS_transforms.py +0 -196
  16. spaces/Ameaou/academic-chatgpt3.1/crazy_functions/test_project/cpp/longcode/jpge.cpp +0 -1049
  17. spaces/Anar0140/6.AI.Dashboard.Wiki.Chat.Cognitive.HTML5/README.md +0 -12
  18. spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/examples/community/stable_diffusion_controlnet_img2img.py +0 -989
  19. spaces/Andy1621/uniformer_image_detection/configs/cascade_rcnn/cascade_mask_rcnn_x101_64x4d_fpn_20e_coco.py +0 -13
  20. spaces/Andy1621/uniformer_image_detection/configs/gn+ws/faster_rcnn_r101_fpn_gn_ws-all_1x_coco.py +0 -3
  21. spaces/Andy1621/uniformer_light/kinetics_class_index.py +0 -402
  22. spaces/AnishKumbhar/ChatBot/text-generation-webui-main/modules/chat.py +0 -724
  23. spaces/AnonymousForSubmission/Graphic_Score_and_Audio/generate_ssrl.py +0 -104
  24. spaces/Arthur678/vits-uma-genshin-honkai/text/cleaners.py +0 -475
  25. spaces/Artrajz/vits-simple-api/bert_vits2/text/symbols.py +0 -198
  26. spaces/Awesimo/jojogan/op/fused_act_cpu.py +0 -41
  27. spaces/BIASLab/sars-cov-2-classification-fcgr/src/model_loader.py +0 -39
  28. spaces/BraydenMoore/MARCI-NFL-Betting/Source/Models/__init__.py +0 -0
  29. spaces/CVPR/LIVE/thrust/thrust/detail/get_iterator_value.h +0 -53
  30. spaces/CVPR/LIVE/thrust/thrust/iterator/detail/device_system_tag.h +0 -40
  31. spaces/CVPR/LIVE/thrust/thrust/system/detail/sequential/gather.h +0 -22
  32. spaces/CVPR/LIVE/thrust/thrust/system/omp/detail/for_each.h +0 -60
  33. spaces/CVPR/drawings-to-human/frontend/src/lib/utils.ts +0 -3
  34. spaces/CVPR/lama-example/saicinpainting/evaluation/masks/countless/README.md +0 -25
  35. spaces/Caoyunkang/Segment-Any-Anomaly/app.py +0 -112
  36. spaces/Carlos056/Cara/README.md +0 -10
  37. spaces/Chakri1997/ChatGPT-prompt-generator/README.md +0 -14
  38. spaces/ChallengeHub/Chinese-LangChain/app.py +0 -226
  39. spaces/CikeyQI/meme-api/docs/update_doc.py +0 -145
  40. spaces/Coweed/GoodTrip/README.md +0 -10
  41. spaces/Cropinky/esrgan/realesrgan/data/realesrgan_dataset.py +0 -192
  42. spaces/Cyril666/ContourNet-ABI/maskrcnn_benchmark/layers/focal_loss.py +0 -61
  43. spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/PIL/ImagePath.py +0 -19
  44. spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/misc/timeTools.py +0 -88
  45. spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/cdn/assets/index-03d58ab8.css +0 -1
  46. spaces/Davidsamuel101/PPTGenerator/src/app.py +0 -18
  47. spaces/DemoLou/moe-tts/text/__init__.py +0 -32
  48. spaces/DragGan/DragGan-Inversion/training/training_loop.py +0 -499
  49. spaces/DragGan/DragGan/stylegan_human/training_scripts/sg2/train.py +0 -560
  50. spaces/ECCV2022/bytetrack/yolox/tracking_utils/evaluation.py +0 -113
spaces/1gistliPinn/ChatGPT4/Examples/Cocteau Twins Singles Collection 10CD Box Set 1991 FLAC Fixed.md DELETED
@@ -1,6 +0,0 @@
1
- <h2>Cocteau Twins Singles Collection 10CD Box Set 1991 FLAC</h2><br /><p><b><b>Download File</b> &#9881;&#9881;&#9881; <a href="https://imgfil.com/2uxXmw">https://imgfil.com/2uxXmw</a></b></p><br /><br />
2
-
3
- Hotel Transylvania 3: license Download Full Movie Torrent Mavis is surprised ... 6 Pro Lisans Kod License Key Sisteminizde parçalanan dosyaları birleştirmekle görevlidir. what does iDRAC stand for. ... 50 and above 14G PowerEdge Servers: iDRAC 9 with Firmware version 3. ... Ableton Live Suite 10. 4d29de3e1b<br />
4
- <br />
5
- <br />
6
- <p></p>
 
 
 
 
 
 
 
spaces/1gistliPinn/ChatGPT4/Examples/Download AutoCAD Map 3D 2005 Crack TOP.md DELETED
@@ -1,6 +0,0 @@
1
- <h2>download AutoCAD Map 3D 2005 crack</h2><br /><p><b><b>Download</b> ---> <a href="https://imgfil.com/2uxX5G">https://imgfil.com/2uxX5G</a></b></p><br /><br />
2
-
3
- AutoCAD Map 3D forum. Welcome to the Autodesk Forum on AutoCAD Map 3D. Share your knowledge, ask questions, and explore popular AutoCAD Map 3D topics. The Frequently Asked Questions section provides answers to frequently asked questions about AutoCAD Map 3D and other Autodesk products. Tutorials are provided in the Learn section 8a78ff9644<br />
4
- <br />
5
- <br />
6
- <p></p>
 
 
 
 
 
 
 
spaces/232labs/VToonify/vtoonify/model/stylegan/lpips/base_model.py DELETED
@@ -1,58 +0,0 @@
1
- import os
2
- import numpy as np
3
- import torch
4
- from torch.autograd import Variable
5
- from pdb import set_trace as st
6
- from IPython import embed
7
-
8
- class BaseModel():
9
- def __init__(self):
10
- pass;
11
-
12
- def name(self):
13
- return 'BaseModel'
14
-
15
- def initialize(self, use_gpu=True, gpu_ids=[0]):
16
- self.use_gpu = use_gpu
17
- self.gpu_ids = gpu_ids
18
-
19
- def forward(self):
20
- pass
21
-
22
- def get_image_paths(self):
23
- pass
24
-
25
- def optimize_parameters(self):
26
- pass
27
-
28
- def get_current_visuals(self):
29
- return self.input
30
-
31
- def get_current_errors(self):
32
- return {}
33
-
34
- def save(self, label):
35
- pass
36
-
37
- # helper saving function that can be used by subclasses
38
- def save_network(self, network, path, network_label, epoch_label):
39
- save_filename = '%s_net_%s.pth' % (epoch_label, network_label)
40
- save_path = os.path.join(path, save_filename)
41
- torch.save(network.state_dict(), save_path)
42
-
43
- # helper loading function that can be used by subclasses
44
- def load_network(self, network, network_label, epoch_label):
45
- save_filename = '%s_net_%s.pth' % (epoch_label, network_label)
46
- save_path = os.path.join(self.save_dir, save_filename)
47
- print('Loading network from %s'%save_path)
48
- network.load_state_dict(torch.load(save_path))
49
-
50
- def update_learning_rate():
51
- pass
52
-
53
- def get_image_paths(self):
54
- return self.image_paths
55
-
56
- def save_done(self, flag=False):
57
- np.save(os.path.join(self.save_dir, 'done_flag'),flag)
58
- np.savetxt(os.path.join(self.save_dir, 'done_flag'),[flag,],fmt='%i')
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/7hao/bingo/src/components/chat-panel.tsx DELETED
@@ -1,153 +0,0 @@
1
- 'use client'
2
-
3
- import * as React from 'react'
4
- import Image from 'next/image'
5
- import Textarea from 'react-textarea-autosize'
6
- import { useAtomValue } from 'jotai'
7
- import { useEnterSubmit } from '@/lib/hooks/use-enter-submit'
8
- import { cn } from '@/lib/utils'
9
-
10
- import BrushIcon from '@/assets/images/brush.svg'
11
- import ChatIcon from '@/assets/images/chat.svg'
12
- import VisualSearchIcon from '@/assets/images/visual-search.svg'
13
- import SendIcon from '@/assets/images/send.svg'
14
- import PinIcon from '@/assets/images/pin.svg'
15
- import PinFillIcon from '@/assets/images/pin-fill.svg'
16
-
17
- import { useBing } from '@/lib/hooks/use-bing'
18
- import { voiceListenAtom } from '@/state'
19
- import Voice from './voice'
20
- import { ChatImage } from './chat-image'
21
- import { ChatAttachments } from './chat-attachments'
22
-
23
- export interface ChatPanelProps
24
- extends Pick<
25
- ReturnType<typeof useBing>,
26
- | 'generating'
27
- | 'input'
28
- | 'setInput'
29
- | 'sendMessage'
30
- | 'resetConversation'
31
- | 'isSpeaking'
32
- | 'attachmentList'
33
- | 'uploadImage'
34
- | 'setAttachmentList'
35
- > {
36
- id?: string
37
- className?: string
38
- }
39
-
40
- export function ChatPanel({
41
- isSpeaking,
42
- generating,
43
- input,
44
- setInput,
45
- className,
46
- sendMessage,
47
- resetConversation,
48
- attachmentList,
49
- uploadImage,
50
- setAttachmentList
51
- }: ChatPanelProps) {
52
- const inputRef = React.useRef<HTMLTextAreaElement>(null)
53
- const {formRef, onKeyDown} = useEnterSubmit()
54
- const [focused, setFocused] = React.useState(false)
55
- const [active, setActive] = React.useState(false)
56
- const [pin, setPin] = React.useState(false)
57
- const [tid, setTid] = React.useState<any>()
58
- const voiceListening = useAtomValue(voiceListenAtom)
59
-
60
- const setBlur = React.useCallback(() => {
61
- clearTimeout(tid)
62
- setActive(false)
63
- const _tid = setTimeout(() => setFocused(false), 2000);
64
- setTid(_tid)
65
- }, [tid])
66
-
67
- const setFocus = React.useCallback(() => {
68
- setFocused(true)
69
- setActive(true)
70
- clearTimeout(tid)
71
- inputRef.current?.focus()
72
- }, [tid])
73
-
74
- React.useEffect(() => {
75
- if (input) {
76
- setFocus()
77
- }
78
- }, [input])
79
-
80
- return (
81
- <form
82
- className={cn('chat-panel', className)}
83
- onSubmit={async e => {
84
- e.preventDefault()
85
- if (generating) {
86
- return;
87
- }
88
- if (!input?.trim()) {
89
- return
90
- }
91
- setInput('')
92
- setPin(false)
93
- await sendMessage(input)
94
- }}
95
- ref={formRef}
96
- >
97
- <div className="action-bar pb-4">
98
- <div className={cn('action-root', { focus: active || pin })} speech-state="hidden" visual-search="" drop-target="">
99
- <div className="fade bottom">
100
- <div className="background"></div>
101
- </div>
102
- <div className={cn('outside-left-container', { collapsed: focused })}>
103
- <div className="button-compose-wrapper">
104
- <button className="body-2 button-compose" type="button" aria-label="新主题" onClick={resetConversation}>
105
- <div className="button-compose-content">
106
- <Image className="pl-2" alt="brush" src={BrushIcon} width={40} />
107
- <div className="button-compose-text">新主题</div>
108
- </div>
109
- </button>
110
- </div>
111
- </div>
112
- <div
113
- className={cn('main-container', { active: active || pin })}
114
- style={{ minHeight: pin ? '360px' : undefined }}
115
- onClick={setFocus}
116
- onBlur={setBlur}
117
- >
118
- <div className="main-bar">
119
- <Image alt="chat" src={ChatIcon} width={20} color="blue" />
120
- <Textarea
121
- ref={inputRef}
122
- tabIndex={0}
123
- onKeyDown={onKeyDown}
124
- rows={1}
125
- value={input}
126
- onChange={e => setInput(e.target.value.slice(0, 4000))}
127
- placeholder={voiceListening ? '持续对话中...对话完成说“发送”即可' : 'Shift + Enter 换行'}
128
- spellCheck={false}
129
- className="message-input min-h-[24px] -mx-1 w-full text-base resize-none bg-transparent focus-within:outline-none"
130
- />
131
- <ChatImage uploadImage={uploadImage}>
132
- <Image alt="visual-search" src={VisualSearchIcon} width={24} />
133
- </ChatImage>
134
- <Voice setInput={setInput} sendMessage={sendMessage} isSpeaking={isSpeaking} input={input} />
135
- <button type="submit">
136
- <Image alt="send" src={SendIcon} width={20} style={{ marginTop: '2px' }} />
137
- </button>
138
- </div>
139
- <ChatAttachments attachmentList={attachmentList} setAttachmentList={setAttachmentList} uploadImage={uploadImage} />
140
- <div className="body-1 bottom-bar">
141
- <div className="letter-counter"><span>{input.length}</span>/4000</div>
142
- <button onClick={() => {
143
- setPin(!pin)
144
- }} className="pr-2">
145
- <Image alt="pin" src={pin ? PinFillIcon : PinIcon} width={20} />
146
- </button>
147
- </div>
148
- </div>
149
- </div>
150
- </div>
151
- </form>
152
- )
153
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/7hao/bingo/src/components/tailwind-indicator.tsx DELETED
@@ -1,14 +0,0 @@
1
- export function TailwindIndicator() {
2
- if (process.env.NODE_ENV === 'production') return null
3
-
4
- return (
5
- <div className="fixed bottom-1 left-1 z-50 flex h-6 w-6 items-center justify-center rounded-full bg-gray-800 p-3 font-mono text-xs text-white">
6
- <div className="block sm:hidden">xs</div>
7
- <div className="hidden sm:block md:hidden">sm</div>
8
- <div className="hidden md:block lg:hidden">md</div>
9
- <div className="hidden lg:block xl:hidden">lg</div>
10
- <div className="hidden xl:block 2xl:hidden">xl</div>
11
- <div className="hidden 2xl:block">2xl</div>
12
- </div>
13
- )
14
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/801artistry/RVC801/tools/infer/infer-pm-index256.py DELETED
@@ -1,202 +0,0 @@
1
- """
2
-
3
- 对源特征进行检索
4
- """
5
- import os
6
- import logging
7
-
8
- logger = logging.getLogger(__name__)
9
-
10
- import parselmouth
11
- import torch
12
-
13
- os.environ["CUDA_VISIBLE_DEVICES"] = "0"
14
- # import torchcrepe
15
- from time import time as ttime
16
-
17
- # import pyworld
18
- import librosa
19
- import numpy as np
20
- import soundfile as sf
21
- import torch.nn.functional as F
22
- from fairseq import checkpoint_utils
23
-
24
- # from models import SynthesizerTrn256#hifigan_nonsf
25
- # from lib.infer_pack.models import SynthesizerTrn256NSF as SynthesizerTrn256#hifigan_nsf
26
- from infer.lib.infer_pack.models import (
27
- SynthesizerTrnMs256NSFsid as SynthesizerTrn256,
28
- ) # hifigan_nsf
29
- from scipy.io import wavfile
30
-
31
- # from lib.infer_pack.models import SynthesizerTrnMs256NSFsid_sim as SynthesizerTrn256#hifigan_nsf
32
- # from models import SynthesizerTrn256NSFsim as SynthesizerTrn256#hifigan_nsf
33
- # from models import SynthesizerTrn256NSFsimFlow as SynthesizerTrn256#hifigan_nsf
34
-
35
-
36
- device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
37
- model_path = r"E:\codes\py39\vits_vc_gpu_train\assets\hubert\hubert_base.pt" #
38
- logger.info("Load model(s) from {}".format(model_path))
39
- models, saved_cfg, task = checkpoint_utils.load_model_ensemble_and_task(
40
- [model_path],
41
- suffix="",
42
- )
43
- model = models[0]
44
- model = model.to(device)
45
- model = model.half()
46
- model.eval()
47
-
48
- # net_g = SynthesizerTrn256(1025,32,192,192,768,2,6,3,0.1,"1", [3,7,11],[[1,3,5], [1,3,5], [1,3,5]],[10,10,2,2],512,[16,16,4,4],183,256,is_half=True)#hifigan#512#256
49
- # net_g = SynthesizerTrn256(1025,32,192,192,768,2,6,3,0.1,"1", [3,7,11],[[1,3,5], [1,3,5], [1,3,5]],[10,10,2,2],512,[16,16,4,4],109,256,is_half=True)#hifigan#512#256
50
- net_g = SynthesizerTrn256(
51
- 1025,
52
- 32,
53
- 192,
54
- 192,
55
- 768,
56
- 2,
57
- 6,
58
- 3,
59
- 0,
60
- "1",
61
- [3, 7, 11],
62
- [[1, 3, 5], [1, 3, 5], [1, 3, 5]],
63
- [10, 10, 2, 2],
64
- 512,
65
- [16, 16, 4, 4],
66
- 183,
67
- 256,
68
- is_half=True,
69
- ) # hifigan#512#256#no_dropout
70
- # net_g = SynthesizerTrn256(1025,32,192,192,768,2,3,3,0.1,"1", [3,7,11],[[1,3,5], [1,3,5], [1,3,5]],[10,10,2,2],512,[16,16,4,4],0)#ts3
71
- # net_g = SynthesizerTrn256(1025,32,192,192,768,2,6,3,0.1,"1", [3,7,11],[[1,3,5], [1,3,5], [1,3,5]],[10,10,2],512,[16,16,4],0)#hifigan-ps-sr
72
- #
73
- # net_g = SynthesizerTrn(1025, 32, 192, 192, 768, 2, 6, 3, 0.1, "1", [3, 7, 11], [[1, 3, 5], [1, 3, 5], [1, 3, 5]], [5,5], 512, [15,15], 0)#ms
74
- # net_g = SynthesizerTrn(1025, 32, 192, 192, 768, 2, 6, 3, 0.1, "1", [3, 7, 11], [[1, 3, 5], [1, 3, 5], [1, 3, 5]], [10,10], 512, [16,16], 0)#idwt2
75
-
76
- # weights=torch.load("infer/ft-mi_1k-noD.pt")
77
- # weights=torch.load("infer/ft-mi-freeze-vocoder-flow-enc_q_1k.pt")
78
- # weights=torch.load("infer/ft-mi-freeze-vocoder_true_1k.pt")
79
- # weights=torch.load("infer/ft-mi-sim1k.pt")
80
- weights = torch.load("infer/ft-mi-no_opt-no_dropout.pt")
81
- logger.debug(net_g.load_state_dict(weights, strict=True))
82
-
83
- net_g.eval().to(device)
84
- net_g.half()
85
-
86
-
87
- def get_f0(x, p_len, f0_up_key=0):
88
- time_step = 160 / 16000 * 1000
89
- f0_min = 50
90
- f0_max = 1100
91
- f0_mel_min = 1127 * np.log(1 + f0_min / 700)
92
- f0_mel_max = 1127 * np.log(1 + f0_max / 700)
93
-
94
- f0 = (
95
- parselmouth.Sound(x, 16000)
96
- .to_pitch_ac(
97
- time_step=time_step / 1000,
98
- voicing_threshold=0.6,
99
- pitch_floor=f0_min,
100
- pitch_ceiling=f0_max,
101
- )
102
- .selected_array["frequency"]
103
- )
104
-
105
- pad_size = (p_len - len(f0) + 1) // 2
106
- if pad_size > 0 or p_len - len(f0) - pad_size > 0:
107
- f0 = np.pad(f0, [[pad_size, p_len - len(f0) - pad_size]], mode="constant")
108
- f0 *= pow(2, f0_up_key / 12)
109
- f0bak = f0.copy()
110
-
111
- f0_mel = 1127 * np.log(1 + f0 / 700)
112
- f0_mel[f0_mel > 0] = (f0_mel[f0_mel > 0] - f0_mel_min) * 254 / (
113
- f0_mel_max - f0_mel_min
114
- ) + 1
115
- f0_mel[f0_mel <= 1] = 1
116
- f0_mel[f0_mel > 255] = 255
117
- # f0_mel[f0_mel > 188] = 188
118
- f0_coarse = np.rint(f0_mel).astype(np.int32)
119
- return f0_coarse, f0bak
120
-
121
-
122
- import faiss
123
-
124
- index = faiss.read_index("infer/added_IVF512_Flat_mi_baseline_src_feat.index")
125
- big_npy = np.load("infer/big_src_feature_mi.npy")
126
- ta0 = ta1 = ta2 = 0
127
- for idx, name in enumerate(
128
- [
129
- "冬之花clip1.wav",
130
- ]
131
- ): ##
132
- wav_path = "todo-songs/%s" % name #
133
- f0_up_key = -2 #
134
- audio, sampling_rate = sf.read(wav_path)
135
- if len(audio.shape) > 1:
136
- audio = librosa.to_mono(audio.transpose(1, 0))
137
- if sampling_rate != 16000:
138
- audio = librosa.resample(audio, orig_sr=sampling_rate, target_sr=16000)
139
-
140
- feats = torch.from_numpy(audio).float()
141
- if feats.dim() == 2: # double channels
142
- feats = feats.mean(-1)
143
- assert feats.dim() == 1, feats.dim()
144
- feats = feats.view(1, -1)
145
- padding_mask = torch.BoolTensor(feats.shape).fill_(False)
146
- inputs = {
147
- "source": feats.half().to(device),
148
- "padding_mask": padding_mask.to(device),
149
- "output_layer": 9, # layer 9
150
- }
151
- if torch.cuda.is_available():
152
- torch.cuda.synchronize()
153
- t0 = ttime()
154
- with torch.no_grad():
155
- logits = model.extract_features(**inputs)
156
- feats = model.final_proj(logits[0])
157
-
158
- ####索引优化
159
- npy = feats[0].cpu().numpy().astype("float32")
160
- D, I = index.search(npy, 1)
161
- feats = (
162
- torch.from_numpy(big_npy[I.squeeze()].astype("float16")).unsqueeze(0).to(device)
163
- )
164
-
165
- feats = F.interpolate(feats.permute(0, 2, 1), scale_factor=2).permute(0, 2, 1)
166
- if torch.cuda.is_available():
167
- torch.cuda.synchronize()
168
- t1 = ttime()
169
- # p_len = min(feats.shape[1],10000,pitch.shape[0])#太大了爆显存
170
- p_len = min(feats.shape[1], 10000) #
171
- pitch, pitchf = get_f0(audio, p_len, f0_up_key)
172
- p_len = min(feats.shape[1], 10000, pitch.shape[0]) # 太大了爆显存
173
- if torch.cuda.is_available():
174
- torch.cuda.synchronize()
175
- t2 = ttime()
176
- feats = feats[:, :p_len, :]
177
- pitch = pitch[:p_len]
178
- pitchf = pitchf[:p_len]
179
- p_len = torch.LongTensor([p_len]).to(device)
180
- pitch = torch.LongTensor(pitch).unsqueeze(0).to(device)
181
- sid = torch.LongTensor([0]).to(device)
182
- pitchf = torch.FloatTensor(pitchf).unsqueeze(0).to(device)
183
- with torch.no_grad():
184
- audio = (
185
- net_g.infer(feats, p_len, pitch, pitchf, sid)[0][0, 0]
186
- .data.cpu()
187
- .float()
188
- .numpy()
189
- ) # nsf
190
- if torch.cuda.is_available():
191
- torch.cuda.synchronize()
192
- t3 = ttime()
193
- ta0 += t1 - t0
194
- ta1 += t2 - t1
195
- ta2 += t3 - t2
196
- # wavfile.write("ft-mi_1k-index256-noD-%s.wav"%name, 40000, audio)##
197
- # wavfile.write("ft-mi-freeze-vocoder-flow-enc_q_1k-%s.wav"%name, 40000, audio)##
198
- # wavfile.write("ft-mi-sim1k-%s.wav"%name, 40000, audio)##
199
- wavfile.write("ft-mi-no_opt-no_dropout-%s.wav" % name, 40000, audio) ##
200
-
201
-
202
- logger.debug("%.2fs %.2fs %.2fs", ta0, ta1, ta2) #
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIFILMS/image-to-sound-fx/share_btn.py DELETED
@@ -1,96 +0,0 @@
1
- community_icon_html = """<svg id="share-btn-share-icon" xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 32 32">
2
- <path d="M20.6081 3C21.7684 3 22.8053 3.49196 23.5284 4.38415C23.9756 4.93678 24.4428 5.82749 24.4808 7.16133C24.9674 7.01707 25.4353 6.93643 25.8725 6.93643C26.9833 6.93643 27.9865 7.37587 28.696 8.17411C29.6075 9.19872 30.0124 10.4579 29.8361 11.7177C29.7523 12.3177 29.5581 12.8555 29.2678 13.3534C29.8798 13.8646 30.3306 14.5763 30.5485 15.4322C30.719 16.1032 30.8939 17.5006 29.9808 18.9403C30.0389 19.0342 30.0934 19.1319 30.1442 19.2318C30.6932 20.3074 30.7283 21.5229 30.2439 22.6548C29.5093 24.3704 27.6841 25.7219 24.1397 27.1727C21.9347 28.0753 19.9174 28.6523 19.8994 28.6575C16.9842 29.4379 14.3477 29.8345 12.0653 29.8345C7.87017 29.8345 4.8668 28.508 3.13831 25.8921C0.356375 21.6797 0.754104 17.8269 4.35369 14.1131C6.34591 12.058 7.67023 9.02782 7.94613 8.36275C8.50224 6.39343 9.97271 4.20438 12.4172 4.20438H12.4179C12.6236 4.20438 12.8314 4.2214 13.0364 4.25468C14.107 4.42854 15.0428 5.06476 15.7115 6.02205C16.4331 5.09583 17.134 4.359 17.7682 3.94323C18.7242 3.31737 19.6794 3 20.6081 3ZM20.6081 5.95917C20.2427 5.95917 19.7963 6.1197 19.3039 6.44225C17.7754 7.44319 14.8258 12.6772 13.7458 14.7131C13.3839 15.3952 12.7655 15.6837 12.2086 15.6837C11.1036 15.6837 10.2408 14.5497 12.1076 13.1085C14.9146 10.9402 13.9299 7.39584 12.5898 7.1776C12.5311 7.16799 12.4731 7.16355 12.4172 7.16355C11.1989 7.16355 10.6615 9.33114 10.6615 9.33114C10.6615 9.33114 9.0863 13.4148 6.38031 16.206C3.67434 18.998 3.5346 21.2388 5.50675 24.2246C6.85185 26.2606 9.42666 26.8753 12.0653 26.8753C14.8021 26.8753 17.6077 26.2139 19.1799 25.793C19.2574 25.7723 28.8193 22.984 27.6081 20.6107C27.4046 20.212 27.0693 20.0522 26.6471 20.0522C24.9416 20.0522 21.8393 22.6726 20.5057 22.6726C20.2076 22.6726 19.9976 22.5416 19.9116 22.222C19.3433 20.1173 28.552 19.2325 27.7758 16.1839C27.639 15.6445 27.2677 15.4256 26.746 15.4263C24.4923 15.4263 19.4358 19.5181 18.3759 19.5181C18.2949 19.5181 18.2368 19.4937 18.2053 19.4419C17.6743 18.557 17.9653 17.9394 21.7082 15.6009C25.4511 13.2617 28.0783 11.8545 26.5841 10.1752C26.4121 9.98141 26.1684 9.8956 25.8725 9.8956C23.6001 9.89634 18.2311 14.9403 18.2311 14.9403C18.2311 14.9403 16.7821 16.496 15.9057 16.496C15.7043 16.496 15.533 16.4139 15.4169 16.2112C14.7956 15.1296 21.1879 10.1286 21.5484 8.06535C21.7928 6.66715 21.3771 5.95917 20.6081 5.95917Z" fill="#FF9D00"></path>
3
- <path d="M5.50686 24.2246C3.53472 21.2387 3.67446 18.9979 6.38043 16.206C9.08641 13.4147 10.6615 9.33111 10.6615 9.33111C10.6615 9.33111 11.2499 6.95933 12.59 7.17757C13.93 7.39581 14.9139 10.9401 12.1069 13.1084C9.29997 15.276 12.6659 16.7489 13.7459 14.713C14.8258 12.6772 17.7747 7.44316 19.304 6.44221C20.8326 5.44128 21.9089 6.00204 21.5484 8.06532C21.188 10.1286 14.795 15.1295 15.4171 16.2118C16.0391 17.2934 18.2312 14.9402 18.2312 14.9402C18.2312 14.9402 25.0907 8.49588 26.5842 10.1752C28.0776 11.8545 25.4512 13.2616 21.7082 15.6008C17.9646 17.9393 17.6744 18.557 18.2054 19.4418C18.7372 20.3266 26.9998 13.1351 27.7759 16.1838C28.5513 19.2324 19.3434 20.1173 19.9117 22.2219C20.48 24.3274 26.3979 18.2382 27.6082 20.6107C28.8193 22.9839 19.2574 25.7722 19.18 25.7929C16.0914 26.62 8.24723 28.3726 5.50686 24.2246Z" fill="#FFD21E"></path>
4
- </svg>"""
5
-
6
- loading_icon_html = """<svg id="share-btn-loading-icon" style="display:none;" class="animate-spin"
7
- style="color: #ffffff;
8
- "
9
- xmlns="http://www.w3.org/2000/svg" xmlns:xlink="http://www.w3.org/1999/xlink" aria-hidden="true" fill="none" focusable="false" role="img" width="1em" height="1em" preserveAspectRatio="xMidYMid meet" viewBox="0 0 24 24"><circle style="opacity: 0.25;" cx="12" cy="12" r="10" stroke="white" stroke-width="4"></circle><path style="opacity: 0.75;" fill="white" d="M4 12a8 8 0 018-8V0C5.373 0 0 5.373 0 12h4zm2 5.291A7.962 7.962 0 014 12H0c0 3.042 1.135 5.824 3 7.938l3-2.647z"></path></svg>"""
10
-
11
- share_js = """async () => {
12
- async function uploadFile(file){
13
- const UPLOAD_URL = 'https://huggingface.co/uploads';
14
- const response = await fetch(UPLOAD_URL, {
15
- method: 'POST',
16
- headers: {
17
- 'Content-Type': file.type,
18
- 'X-Requested-With': 'XMLHttpRequest',
19
- },
20
- body: file, /// <- File inherits from Blob
21
- });
22
- const url = await response.text();
23
- return url;
24
- }
25
- async function getInputImgFile(imgEl){
26
- const res = await fetch(imgEl.src);
27
- const blob = await res.blob();
28
- const imgId = Date.now() % 200;
29
- const isPng = imgEl.src.startsWith(`data:image/png`);
30
- if(isPng){
31
- const fileName = `img-to-sfx-${{imgId}}.png`;
32
- return new File([blob], fileName, { type: 'image/png' });
33
- }else{
34
- const fileName = `img-to-sfx-${{imgId}}.jpg`;
35
- return new File([blob], fileName, { type: 'image/jpeg' });
36
- }
37
- }
38
- async function getOutputSoundFile(audioEL){
39
- const res = await fetch(audioEL.src);
40
- const blob = await res.blob();
41
- const audioId = Date.now() % 200;
42
- const fileName = `img-to-sfx-${{audioId}}.wav`;
43
- const musicBlob = new File([blob], fileName, { type: 'audio/wav' });
44
- console.log(musicBlob);
45
- return musicBlob;
46
- }
47
-
48
- async function audioToBase64(audioFile) {
49
- return new Promise((resolve, reject) => {
50
- let reader = new FileReader();
51
- reader.readAsDataURL(audioFile);
52
- reader.onload = () => resolve(reader.result);
53
- reader.onerror = error => reject(error);
54
-
55
- });
56
- }
57
- const gradioEl = document.querySelector("gradio-app").shadowRoot || document.querySelector('body > gradio-app');
58
- const captionTxt = gradioEl.querySelector('#text-caption textarea').value;
59
- const inputImgEl = gradioEl.querySelector('#input-img img');
60
- const outputSound = gradioEl.querySelector('#sound-output audio');
61
- const outputSound_src = gradioEl.querySelector('#sound-output audio').src;
62
- const outputSound_name = outputSound_src.split('/').pop();
63
- const shareBtnEl = gradioEl.querySelector('#share-btn');
64
- const shareIconEl = gradioEl.querySelector('#share-btn-share-icon');
65
- const loadingIconEl = gradioEl.querySelector('#share-btn-loading-icon');
66
- if(!outputSound){
67
- return;
68
- };
69
- shareBtnEl.style.pointerEvents = 'none';
70
- shareIconEl.style.display = 'none';
71
- loadingIconEl.style.removeProperty('display');
72
- const inputFile = await getInputImgFile(inputImgEl);
73
- const urlInputImg = await uploadFile(inputFile);
74
- const soundFile = await getOutputSoundFile(outputSound);
75
- const dataOutputSound = await uploadFile(soundFile);
76
-
77
- const descriptionMd = `
78
- #### Image input:
79
- <img src='${urlInputImg}' style='max-height: 350px;'>
80
-
81
- #### Sound Effect:
82
- <audio controls>
83
- <source src="${dataOutputSound}" type="audio/wav">
84
- Your browser does not support the audio element.
85
- </audio>
86
- `;
87
- const params = new URLSearchParams({
88
- title: captionTxt,
89
- description: descriptionMd,
90
- });
91
- const paramsStr = params.toString();
92
- window.open(`https://huggingface.co/spaces/fffiloni/image-to-sound-fx/discussions/new?${paramsStr}`, '_blank');
93
- shareBtnEl.style.removeProperty('pointer-events');
94
- shareIconEl.style.removeProperty('display');
95
- loadingIconEl.style.display = 'none';
96
- }"""
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AISuperheroes/09SL-AI-Image-Music-Video-AIUIUX/app.py DELETED
@@ -1,45 +0,0 @@
1
- import streamlit as st
2
- import gradio as gr
3
- import IPython
4
- import streamlit as st
5
- import streamlit.components.v1 as components
6
- from IPython.display import IFrame
7
-
8
- src='' # URL parameter to change the iframe url
9
- def SetIframeURL(option_selected):
10
- if (option_selected=='Collager'):
11
- src='https://www.artbreeder.com/'
12
- if (option_selected=='Midjourney'):
13
- src='https://www.midjourney.com/'
14
- if (option_selected=='DreamStudio'):
15
- src='https://beta.dreamstudio.ai/'
16
- if (option_selected=='NightCafe'):
17
- src='https://creator.nightcafe.studio/'
18
- if (option_selected=='RunwayML'):
19
- src='https://app.runwayml.com/'
20
- if (option_selected=='ArtFromTextandImages'):
21
- src='https://huggingface.co/spaces/awacke1/Art-from-Text-and-Images'
22
- if (option_selected=='Boomy'):
23
- src='https://boomy.com/'
24
-
25
- width = st.sidebar.slider("Width", 200, 1500, 800, 100)
26
- height = st.sidebar.slider("Height", 200, 1500, 900, 100)
27
- st.components.v1.iframe(src, width, height, scrolling=True)
28
-
29
- try:
30
- options = ['Midjourney', 'RunwayML', 'Boomy']
31
- query_params = st.experimental_get_query_params()
32
- query_option = query_params['option'][0] #throws an exception when visiting http://host:port
33
- option_selected = st.sidebar.selectbox('Pick option', options, index=options.index(query_option))
34
- if option_selected:
35
- st.experimental_set_query_params(option=option_selected)
36
- SetIframeURL(option_selected)
37
- except:
38
- options = ['Midjourney', 'RunwayML', 'Boomy']
39
- st.experimental_set_query_params(option=options[1]) # defaults to 1
40
- query_params = st.experimental_get_query_params()
41
- query_option = query_params['option'][0]
42
- option_selected = st.sidebar.selectbox('Pick option', options, index=options.index(query_option))
43
- if option_selected:
44
- st.experimental_set_query_params(option=option_selected)
45
- SetIframeURL(option_selected)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AIZero2Hero4Health/3-ChatbotBlenderbot-GR/app.py DELETED
@@ -1,134 +0,0 @@
1
- from transformers import BlenderbotTokenizer, BlenderbotForConditionalGeneration
2
- import torch
3
- import gradio as gr
4
-
5
-
6
- # PersistDataset -----
7
- import os
8
- import csv
9
- import gradio as gr
10
- from gradio import inputs, outputs
11
- import huggingface_hub
12
- from huggingface_hub import Repository, hf_hub_download, upload_file
13
- from datetime import datetime
14
-
15
-
16
- # -------------------------------------------- For Memory - you will need to set up a dataset and HF_TOKEN ---------
17
- #DATASET_REPO_URL = "https://huggingface.co/datasets/awacke1/ChatbotMemory.csv"
18
- #DATASET_REPO_ID = "awacke1/ChatbotMemory.csv"
19
- #DATA_FILENAME = "ChatbotMemory.csv"
20
- #DATA_FILE = os.path.join("data", DATA_FILENAME)
21
- #HF_TOKEN = os.environ.get("HF_TOKEN")
22
-
23
- #SCRIPT = """
24
- #<script>
25
- #if (!window.hasBeenRun) {
26
- # window.hasBeenRun = true;
27
- # console.log("should only happen once");
28
- # document.querySelector("button.submit").click();
29
- #}
30
- #</script>
31
- #"""
32
-
33
- #try:
34
- # hf_hub_download(
35
- # repo_id=DATASET_REPO_ID,
36
- # filename=DATA_FILENAME,
37
- # cache_dir=DATA_DIRNAME,
38
- # force_filename=DATA_FILENAME
39
- # )
40
- #except:
41
- # print("file not found")
42
- #repo = Repository(
43
- # local_dir="data", clone_from=DATASET_REPO_URL, use_auth_token=HF_TOKEN
44
- #)
45
-
46
- #def store_message(name: str, message: str):
47
- # if name and message:
48
- # with open(DATA_FILE, "a") as csvfile:
49
- # writer = csv.DictWriter(csvfile, fieldnames=["name", "message", "time"])
50
- # writer.writerow(
51
- # {"name": name.strip(), "message": message.strip(), "time": str(datetime.now())}
52
- # )
53
- # uncomment line below to begin saving. If creating your own copy you will need to add a access token called "HF_TOKEN" to your profile, then create a secret for your repo with the access code naming it "HF_TOKEN" For the CSV as well you can copy the header and first few lines to your own then update the paths above which should work to save to your own repository for datasets.
54
- # commit_url = repo.push_to_hub()
55
- # return ""
56
-
57
- #iface = gr.Interface(
58
- # store_message,
59
- # [
60
- # inputs.Textbox(placeholder="Your name"),
61
- # inputs.Textbox(placeholder="Your message", lines=2),
62
- # ],
63
- # "html",
64
- # css="""
65
- # .message {background-color:cornflowerblue;color:white; padding:4px;margin:4px;border-radius:4px; }
66
- # """,
67
- # title="Reading/writing to a HuggingFace dataset repo from Spaces",
68
- # description=f"This is a demo of how to do simple *shared data persistence* in a Gradio Space, backed by a dataset repo.",
69
- # article=f"The dataset repo is [{DATASET_REPO_URL}]({DATASET_REPO_URL})",
70
- #)
71
- # --------------------------------------------------- For Memory
72
-
73
- mname = "facebook/blenderbot-400M-distill"
74
- model = BlenderbotForConditionalGeneration.from_pretrained(mname)
75
- tokenizer = BlenderbotTokenizer.from_pretrained(mname)
76
-
77
- def take_last_tokens(inputs, note_history, history):
78
- """Filter the last 128 tokens"""
79
- if inputs['input_ids'].shape[1] > 128:
80
- inputs['input_ids'] = torch.tensor([inputs['input_ids'][0][-128:].tolist()])
81
- inputs['attention_mask'] = torch.tensor([inputs['attention_mask'][0][-128:].tolist()])
82
- note_history = ['</s> <s>'.join(note_history[0].split('</s> <s>')[2:])]
83
- history = history[1:]
84
- return inputs, note_history, history
85
-
86
- def add_note_to_history(note, note_history):
87
- """Add a note to the historical information"""
88
- note_history.append(note)
89
- note_history = '</s> <s>'.join(note_history)
90
- return [note_history]
91
-
92
- title = "State of the Art Chatbot with Memory Dataset"
93
- description = """Chatbot With Memory"""
94
-
95
- def chat(message, history):
96
- history = history or []
97
- if history:
98
- history_useful = ['</s> <s>'.join([str(a[0])+'</s> <s>'+str(a[1]) for a in history])]
99
- else:
100
- history_useful = []
101
- history_useful = add_note_to_history(message, history_useful)
102
- inputs = tokenizer(history_useful, return_tensors="pt")
103
- inputs, history_useful, history = take_last_tokens(inputs, history_useful, history)
104
- reply_ids = model.generate(**inputs)
105
- response = tokenizer.batch_decode(reply_ids, skip_special_tokens=True)[0]
106
- history_useful = add_note_to_history(response, history_useful)
107
- list_history = history_useful[0].split('</s> <s>')
108
- history.append((list_history[-2], list_history[-1]))
109
- # store_message(message, response) # Save to dataset -- uncomment with code above, create a dataset to store and add your HF_TOKEN from profile to this repo to use.
110
- return history, history
111
-
112
- gr.Interface(
113
- fn=chat,
114
- theme="huggingface",
115
- css=".footer {display:none !important}",
116
- inputs=["text", "state"],
117
- outputs=["chatbot", "state"],
118
- title=title,
119
- allow_flagging="never",
120
- description=f"Gradio chatbot backed by memory in a dataset repository.",
121
- # article=f"The dataset repo is [{DATASET_REPO_URL}]({DATASET_REPO_URL})"
122
- ).launch(debug=True)
123
-
124
- #demo = gr.Blocks()
125
- #with demo:
126
- # audio_file = gr.inputs.Audio(source="microphone", type="filepath")
127
- # text = gr.Textbox(label="Speech to Text")
128
- # TTSchoice = gr.inputs.Radio( label="Pick a Text to Speech Model", choices=MODEL_NAMES, )
129
- # audio = gr.Audio(label="Output", interactive=False)
130
- # b1 = gr.Button("Recognize Speech")
131
- # b5 = gr.Button("Read It Back Aloud")
132
- # b1.click(speech_to_text, inputs=audio_file, outputs=text)
133
- # b5.click(tts, inputs=[text,TTSchoice], outputs=audio)
134
- #demo.launch(share=True)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/ATang0729/Forecast4Muses/Model/Model6/Model6_0_ClothesDetection/mmyolo/configs/yolov7/yolov7_l_syncbn_fast_8x16b-300e_coco.py DELETED
@@ -1,324 +0,0 @@
1
- _base_ = ['../_base_/default_runtime.py', '../_base_/det_p5_tta.py']
2
-
3
- # ========================Frequently modified parameters======================
4
- # -----data related-----
5
- data_root = 'data/coco/' # Root path of data
6
- # Path of train annotation file
7
- train_ann_file = 'annotations/instances_train2017.json'
8
- train_data_prefix = 'train2017/' # Prefix of train image path
9
- # Path of val annotation file
10
- val_ann_file = 'annotations/instances_val2017.json'
11
- val_data_prefix = 'val2017/' # Prefix of val image path
12
-
13
- num_classes = 80 # Number of classes for classification
14
- # Batch size of a single GPU during training
15
- train_batch_size_per_gpu = 16
16
- # Worker to pre-fetch data for each single GPU during training
17
- train_num_workers = 8
18
- # persistent_workers must be False if num_workers is 0
19
- persistent_workers = True
20
-
21
- # -----model related-----
22
- # Basic size of multi-scale prior box
23
- anchors = [
24
- [(12, 16), (19, 36), (40, 28)], # P3/8
25
- [(36, 75), (76, 55), (72, 146)], # P4/16
26
- [(142, 110), (192, 243), (459, 401)] # P5/32
27
- ]
28
- # -----train val related-----
29
- # Base learning rate for optim_wrapper. Corresponding to 8xb16=128 bs
30
- base_lr = 0.01
31
- max_epochs = 300 # Maximum training epochs
32
-
33
- num_epoch_stage2 = 30 # The last 30 epochs switch evaluation interval
34
- val_interval_stage2 = 1 # Evaluation interval
35
-
36
- model_test_cfg = dict(
37
- # The config of multi-label for multi-class prediction.
38
- multi_label=True,
39
- # The number of boxes before NMS.
40
- nms_pre=30000,
41
- score_thr=0.001, # Threshold to filter out boxes.
42
- nms=dict(type='nms', iou_threshold=0.65), # NMS type and threshold
43
- max_per_img=300) # Max number of detections of each image
44
-
45
- # ========================Possible modified parameters========================
46
- # -----data related-----
47
- img_scale = (640, 640) # width, height
48
- # Dataset type, this will be used to define the dataset
49
- dataset_type = 'YOLOv5CocoDataset'
50
- # Batch size of a single GPU during validation
51
- val_batch_size_per_gpu = 1
52
- # Worker to pre-fetch data for each single GPU during validation
53
- val_num_workers = 2
54
-
55
- # Config of batch shapes. Only on val.
56
- # It means not used if batch_shapes_cfg is None.
57
- batch_shapes_cfg = dict(
58
- type='BatchShapePolicy',
59
- batch_size=val_batch_size_per_gpu,
60
- img_size=img_scale[0],
61
- # The image scale of padding should be divided by pad_size_divisor
62
- size_divisor=32,
63
- # Additional paddings for pixel scale
64
- extra_pad_ratio=0.5)
65
-
66
- # -----model related-----
67
- strides = [8, 16, 32] # Strides of multi-scale prior box
68
- num_det_layers = 3 # The number of model output scales
69
- norm_cfg = dict(type='BN', momentum=0.03, eps=0.001)
70
-
71
- # Data augmentation
72
- max_translate_ratio = 0.2 # YOLOv5RandomAffine
73
- scaling_ratio_range = (0.1, 2.0) # YOLOv5RandomAffine
74
- mixup_prob = 0.15 # YOLOv5MixUp
75
- randchoice_mosaic_prob = [0.8, 0.2]
76
- mixup_alpha = 8.0 # YOLOv5MixUp
77
- mixup_beta = 8.0 # YOLOv5MixUp
78
-
79
- # -----train val related-----
80
- loss_cls_weight = 0.3
81
- loss_bbox_weight = 0.05
82
- loss_obj_weight = 0.7
83
- # BatchYOLOv7Assigner params
84
- simota_candidate_topk = 10
85
- simota_iou_weight = 3.0
86
- simota_cls_weight = 1.0
87
- prior_match_thr = 4. # Priori box matching threshold
88
- obj_level_weights = [4., 1.,
89
- 0.4] # The obj loss weights of the three output layers
90
-
91
- lr_factor = 0.1 # Learning rate scaling factor
92
- weight_decay = 0.0005
93
- save_epoch_intervals = 1 # Save model checkpoint and validation intervals
94
- max_keep_ckpts = 3 # The maximum checkpoints to keep.
95
-
96
- # Single-scale training is recommended to
97
- # be turned on, which can speed up training.
98
- env_cfg = dict(cudnn_benchmark=True)
99
-
100
- # ===============================Unmodified in most cases====================
101
- model = dict(
102
- type='YOLODetector',
103
- data_preprocessor=dict(
104
- type='YOLOv5DetDataPreprocessor',
105
- mean=[0., 0., 0.],
106
- std=[255., 255., 255.],
107
- bgr_to_rgb=True),
108
- backbone=dict(
109
- type='YOLOv7Backbone',
110
- arch='L',
111
- norm_cfg=norm_cfg,
112
- act_cfg=dict(type='SiLU', inplace=True)),
113
- neck=dict(
114
- type='YOLOv7PAFPN',
115
- block_cfg=dict(
116
- type='ELANBlock',
117
- middle_ratio=0.5,
118
- block_ratio=0.25,
119
- num_blocks=4,
120
- num_convs_in_block=1),
121
- upsample_feats_cat_first=False,
122
- in_channels=[512, 1024, 1024],
123
- # The real output channel will be multiplied by 2
124
- out_channels=[128, 256, 512],
125
- norm_cfg=norm_cfg,
126
- act_cfg=dict(type='SiLU', inplace=True)),
127
- bbox_head=dict(
128
- type='YOLOv7Head',
129
- head_module=dict(
130
- type='YOLOv7HeadModule',
131
- num_classes=num_classes,
132
- in_channels=[256, 512, 1024],
133
- featmap_strides=strides,
134
- num_base_priors=3),
135
- prior_generator=dict(
136
- type='mmdet.YOLOAnchorGenerator',
137
- base_sizes=anchors,
138
- strides=strides),
139
- # scaled based on number of detection layers
140
- loss_cls=dict(
141
- type='mmdet.CrossEntropyLoss',
142
- use_sigmoid=True,
143
- reduction='mean',
144
- loss_weight=loss_cls_weight *
145
- (num_classes / 80 * 3 / num_det_layers)),
146
- loss_bbox=dict(
147
- type='IoULoss',
148
- iou_mode='ciou',
149
- bbox_format='xywh',
150
- reduction='mean',
151
- loss_weight=loss_bbox_weight * (3 / num_det_layers),
152
- return_iou=True),
153
- loss_obj=dict(
154
- type='mmdet.CrossEntropyLoss',
155
- use_sigmoid=True,
156
- reduction='mean',
157
- loss_weight=loss_obj_weight *
158
- ((img_scale[0] / 640)**2 * 3 / num_det_layers)),
159
- prior_match_thr=prior_match_thr,
160
- obj_level_weights=obj_level_weights,
161
- # BatchYOLOv7Assigner params
162
- simota_candidate_topk=simota_candidate_topk,
163
- simota_iou_weight=simota_iou_weight,
164
- simota_cls_weight=simota_cls_weight),
165
- test_cfg=model_test_cfg)
166
-
167
- pre_transform = [
168
- dict(type='LoadImageFromFile', file_client_args=_base_.file_client_args),
169
- dict(type='LoadAnnotations', with_bbox=True)
170
- ]
171
-
172
- mosiac4_pipeline = [
173
- dict(
174
- type='Mosaic',
175
- img_scale=img_scale,
176
- pad_val=114.0,
177
- pre_transform=pre_transform),
178
- dict(
179
- type='YOLOv5RandomAffine',
180
- max_rotate_degree=0.0,
181
- max_shear_degree=0.0,
182
- max_translate_ratio=max_translate_ratio, # note
183
- scaling_ratio_range=scaling_ratio_range, # note
184
- # img_scale is (width, height)
185
- border=(-img_scale[0] // 2, -img_scale[1] // 2),
186
- border_val=(114, 114, 114)),
187
- ]
188
-
189
- mosiac9_pipeline = [
190
- dict(
191
- type='Mosaic9',
192
- img_scale=img_scale,
193
- pad_val=114.0,
194
- pre_transform=pre_transform),
195
- dict(
196
- type='YOLOv5RandomAffine',
197
- max_rotate_degree=0.0,
198
- max_shear_degree=0.0,
199
- max_translate_ratio=max_translate_ratio, # note
200
- scaling_ratio_range=scaling_ratio_range, # note
201
- # img_scale is (width, height)
202
- border=(-img_scale[0] // 2, -img_scale[1] // 2),
203
- border_val=(114, 114, 114)),
204
- ]
205
-
206
- randchoice_mosaic_pipeline = dict(
207
- type='RandomChoice',
208
- transforms=[mosiac4_pipeline, mosiac9_pipeline],
209
- prob=randchoice_mosaic_prob)
210
-
211
- train_pipeline = [
212
- *pre_transform,
213
- randchoice_mosaic_pipeline,
214
- dict(
215
- type='YOLOv5MixUp',
216
- alpha=mixup_alpha, # note
217
- beta=mixup_beta, # note
218
- prob=mixup_prob,
219
- pre_transform=[*pre_transform, randchoice_mosaic_pipeline]),
220
- dict(type='YOLOv5HSVRandomAug'),
221
- dict(type='mmdet.RandomFlip', prob=0.5),
222
- dict(
223
- type='mmdet.PackDetInputs',
224
- meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape', 'flip',
225
- 'flip_direction'))
226
- ]
227
-
228
- train_dataloader = dict(
229
- batch_size=train_batch_size_per_gpu,
230
- num_workers=train_num_workers,
231
- persistent_workers=persistent_workers,
232
- pin_memory=True,
233
- sampler=dict(type='DefaultSampler', shuffle=True),
234
- collate_fn=dict(type='yolov5_collate'), # FASTER
235
- dataset=dict(
236
- type=dataset_type,
237
- data_root=data_root,
238
- ann_file=train_ann_file,
239
- data_prefix=dict(img=train_data_prefix),
240
- filter_cfg=dict(filter_empty_gt=False, min_size=32),
241
- pipeline=train_pipeline))
242
-
243
- test_pipeline = [
244
- dict(type='LoadImageFromFile', file_client_args=_base_.file_client_args),
245
- dict(type='YOLOv5KeepRatioResize', scale=img_scale),
246
- dict(
247
- type='LetterResize',
248
- scale=img_scale,
249
- allow_scale_up=False,
250
- pad_val=dict(img=114)),
251
- dict(type='LoadAnnotations', with_bbox=True, _scope_='mmdet'),
252
- dict(
253
- type='mmdet.PackDetInputs',
254
- meta_keys=('img_id', 'img_path', 'ori_shape', 'img_shape',
255
- 'scale_factor', 'pad_param'))
256
- ]
257
-
258
- val_dataloader = dict(
259
- batch_size=val_batch_size_per_gpu,
260
- num_workers=val_num_workers,
261
- persistent_workers=persistent_workers,
262
- pin_memory=True,
263
- drop_last=False,
264
- sampler=dict(type='DefaultSampler', shuffle=False),
265
- dataset=dict(
266
- type=dataset_type,
267
- data_root=data_root,
268
- test_mode=True,
269
- data_prefix=dict(img=val_data_prefix),
270
- ann_file=val_ann_file,
271
- pipeline=test_pipeline,
272
- batch_shapes_cfg=batch_shapes_cfg))
273
-
274
- test_dataloader = val_dataloader
275
-
276
- param_scheduler = None
277
- optim_wrapper = dict(
278
- type='OptimWrapper',
279
- optimizer=dict(
280
- type='SGD',
281
- lr=base_lr,
282
- momentum=0.937,
283
- weight_decay=weight_decay,
284
- nesterov=True,
285
- batch_size_per_gpu=train_batch_size_per_gpu),
286
- constructor='YOLOv7OptimWrapperConstructor')
287
-
288
- default_hooks = dict(
289
- param_scheduler=dict(
290
- type='YOLOv5ParamSchedulerHook',
291
- scheduler_type='cosine',
292
- lr_factor=lr_factor, # note
293
- max_epochs=max_epochs),
294
- checkpoint=dict(
295
- type='CheckpointHook',
296
- save_param_scheduler=False,
297
- interval=save_epoch_intervals,
298
- save_best='auto',
299
- max_keep_ckpts=max_keep_ckpts))
300
-
301
- custom_hooks = [
302
- dict(
303
- type='EMAHook',
304
- ema_type='ExpMomentumEMA',
305
- momentum=0.0001,
306
- update_buffers=True,
307
- strict_load=False,
308
- priority=49)
309
- ]
310
-
311
- val_evaluator = dict(
312
- type='mmdet.CocoMetric',
313
- proposal_nums=(100, 1, 10), # Can be accelerated
314
- ann_file=data_root + val_ann_file,
315
- metric='bbox')
316
- test_evaluator = val_evaluator
317
-
318
- train_cfg = dict(
319
- type='EpochBasedTrainLoop',
320
- max_epochs=max_epochs,
321
- val_interval=save_epoch_intervals,
322
- dynamic_intervals=[(max_epochs - num_epoch_stage2, val_interval_stage2)])
323
- val_cfg = dict(type='ValLoop')
324
- test_cfg = dict(type='TestLoop')
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Adapter/CoAdapter/ldm/modules/distributions/__init__.py DELETED
File without changes
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/clickoutside/Factory.js DELETED
@@ -1,11 +0,0 @@
1
- import ClickOutside from './ClickOutside.js';
2
- import ObjectFactory from '../ObjectFactory.js';
3
- import SetValue from '../../../plugins/utils/object/SetValue.js';
4
-
5
- ObjectFactory.register('clickOutside', function (gameObject, config) {
6
- return new ClickOutside(gameObject, config);
7
- });
8
-
9
- SetValue(window, 'RexPlugins.UI.ClickOutside', ClickOutside);
10
-
11
- export default ClickOutside;
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/ui/confirmdialog/Factory.d.ts DELETED
@@ -1,5 +0,0 @@
1
- import ConfirmDialog from './ConfirmDialog';
2
-
3
- export default function (
4
- config?: ConfirmDialog.IConfig
5
- ): ConfirmDialog;
 
 
 
 
 
 
spaces/Al-Chan/Vits_League_of_Legends_Yuumi_TTS/long_audio_transcribe.py DELETED
@@ -1,71 +0,0 @@
1
- from moviepy.editor import AudioFileClip
2
- import whisper
3
- import os
4
- import torchaudio
5
- import librosa
6
- import torch
7
- import argparse
8
- parent_dir = "./denoised_audio/"
9
- filelist = list(os.walk(parent_dir))[0][2]
10
- if __name__ == "__main__":
11
- parser = argparse.ArgumentParser()
12
- parser.add_argument("--languages", default="CJE")
13
- parser.add_argument("--whisper_size", default="medium")
14
- args = parser.parse_args()
15
- if args.languages == "CJE":
16
- lang2token = {
17
- 'zh': "[ZH]",
18
- 'ja': "[JA]",
19
- "en": "[EN]",
20
- }
21
- elif args.languages == "CJ":
22
- lang2token = {
23
- 'zh': "[ZH]",
24
- 'ja': "[JA]",
25
- }
26
- elif args.languages == "C":
27
- lang2token = {
28
- 'zh': "[ZH]",
29
- }
30
- assert(torch.cuda.is_available()), "Please enable GPU in order to run Whisper!"
31
- model = whisper.load_model(args.whisper_size)
32
- speaker_annos = []
33
- for file in filelist:
34
- print(f"transcribing {parent_dir + file}...\n")
35
- options = dict(beam_size=5, best_of=5)
36
- transcribe_options = dict(task="transcribe", **options)
37
- result = model.transcribe(parent_dir + file, **transcribe_options)
38
- segments = result["segments"]
39
- # result = model.transcribe(parent_dir + file)
40
- lang = result['language']
41
- if result['language'] not in list(lang2token.keys()):
42
- print(f"{lang} not supported, ignoring...\n")
43
- continue
44
- # segment audio based on segment results
45
- character_name = file.rstrip(".wav").split("_")[0]
46
- code = file.rstrip(".wav").split("_")[1]
47
- if not os.path.exists("./segmented_character_voice/" + character_name):
48
- os.mkdir("./segmented_character_voice/" + character_name)
49
- wav, sr = torchaudio.load(parent_dir + file, frame_offset=0, num_frames=-1, normalize=True,
50
- channels_first=True)
51
-
52
- for i, seg in enumerate(result['segments']):
53
- start_time = seg['start']
54
- end_time = seg['end']
55
- text = seg['text']
56
- text = lang2token[lang] + text.replace("\n", "") + lang2token[lang]
57
- text = text + "\n"
58
- wav_seg = wav[:, int(start_time*sr):int(end_time*sr)]
59
- wav_seg_name = f"{character_name}_{code}_{i}.wav"
60
- savepth = "./segmented_character_voice/" + character_name + "/" + wav_seg_name
61
- speaker_annos.append(savepth + "|" + character_name + "|" + text)
62
- print(f"Transcribed segment: {speaker_annos[-1]}")
63
- # trimmed_wav_seg = librosa.effects.trim(wav_seg.squeeze().numpy())
64
- # trimmed_wav_seg = torch.tensor(trimmed_wav_seg[0]).unsqueeze(0)
65
- torchaudio.save(savepth, wav_seg, 22050, channels_first=True)
66
- if len(speaker_annos) == 0:
67
- print("Warning: no long audios & videos found, this IS expected if you have only uploaded short audios")
68
- print("this IS NOT expected if you have uploaded any long audios, videos or video links. Please check your file structure or make sure your audio/video language is supported.")
69
- with open("long_character_anno.txt", 'w', encoding='utf-8') as f:
70
- for line in speaker_annos:
71
- f.write(line)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Alycer/VITS-Umamusume-voice-synthesizer/ONNXVITS_transforms.py DELETED
@@ -1,196 +0,0 @@
1
- import torch
2
- from torch.nn import functional as F
3
-
4
- import numpy as np
5
-
6
-
7
- DEFAULT_MIN_BIN_WIDTH = 1e-3
8
- DEFAULT_MIN_BIN_HEIGHT = 1e-3
9
- DEFAULT_MIN_DERIVATIVE = 1e-3
10
-
11
-
12
- def piecewise_rational_quadratic_transform(inputs,
13
- unnormalized_widths,
14
- unnormalized_heights,
15
- unnormalized_derivatives,
16
- inverse=False,
17
- tails=None,
18
- tail_bound=1.,
19
- min_bin_width=DEFAULT_MIN_BIN_WIDTH,
20
- min_bin_height=DEFAULT_MIN_BIN_HEIGHT,
21
- min_derivative=DEFAULT_MIN_DERIVATIVE):
22
-
23
- if tails is None:
24
- spline_fn = rational_quadratic_spline
25
- spline_kwargs = {}
26
- else:
27
- spline_fn = unconstrained_rational_quadratic_spline
28
- spline_kwargs = {
29
- 'tails': tails,
30
- 'tail_bound': tail_bound
31
- }
32
-
33
- outputs, logabsdet = spline_fn(
34
- inputs=inputs,
35
- unnormalized_widths=unnormalized_widths,
36
- unnormalized_heights=unnormalized_heights,
37
- unnormalized_derivatives=unnormalized_derivatives,
38
- inverse=inverse,
39
- min_bin_width=min_bin_width,
40
- min_bin_height=min_bin_height,
41
- min_derivative=min_derivative,
42
- **spline_kwargs
43
- )
44
- return outputs, logabsdet
45
-
46
-
47
- def searchsorted(bin_locations, inputs, eps=1e-6):
48
- bin_locations[..., -1] += eps
49
- return torch.sum(
50
- inputs[..., None] >= bin_locations,
51
- dim=-1
52
- ) - 1
53
-
54
-
55
- def unconstrained_rational_quadratic_spline(inputs,
56
- unnormalized_widths,
57
- unnormalized_heights,
58
- unnormalized_derivatives,
59
- inverse=False,
60
- tails='linear',
61
- tail_bound=1.,
62
- min_bin_width=DEFAULT_MIN_BIN_WIDTH,
63
- min_bin_height=DEFAULT_MIN_BIN_HEIGHT,
64
- min_derivative=DEFAULT_MIN_DERIVATIVE):
65
- inside_interval_mask = (inputs >= -tail_bound) & (inputs <= tail_bound)
66
- outside_interval_mask = ~inside_interval_mask
67
-
68
- outputs = torch.zeros_like(inputs)
69
- logabsdet = torch.zeros_like(inputs)
70
-
71
- if tails == 'linear':
72
- #unnormalized_derivatives = F.pad(unnormalized_derivatives, pad=(1, 1))
73
- unnormalized_derivatives_ = torch.zeros((1, 1, unnormalized_derivatives.size(2), unnormalized_derivatives.size(3)+2))
74
- unnormalized_derivatives_[...,1:-1] = unnormalized_derivatives
75
- unnormalized_derivatives = unnormalized_derivatives_
76
- constant = np.log(np.exp(1 - min_derivative) - 1)
77
- unnormalized_derivatives[..., 0] = constant
78
- unnormalized_derivatives[..., -1] = constant
79
-
80
- outputs[outside_interval_mask] = inputs[outside_interval_mask]
81
- logabsdet[outside_interval_mask] = 0
82
- else:
83
- raise RuntimeError('{} tails are not implemented.'.format(tails))
84
-
85
- outputs[inside_interval_mask], logabsdet[inside_interval_mask] = rational_quadratic_spline(
86
- inputs=inputs[inside_interval_mask],
87
- unnormalized_widths=unnormalized_widths[inside_interval_mask, :],
88
- unnormalized_heights=unnormalized_heights[inside_interval_mask, :],
89
- unnormalized_derivatives=unnormalized_derivatives[inside_interval_mask, :],
90
- inverse=inverse,
91
- left=-tail_bound, right=tail_bound, bottom=-tail_bound, top=tail_bound,
92
- min_bin_width=min_bin_width,
93
- min_bin_height=min_bin_height,
94
- min_derivative=min_derivative
95
- )
96
-
97
- return outputs, logabsdet
98
-
99
- def rational_quadratic_spline(inputs,
100
- unnormalized_widths,
101
- unnormalized_heights,
102
- unnormalized_derivatives,
103
- inverse=False,
104
- left=0., right=1., bottom=0., top=1.,
105
- min_bin_width=DEFAULT_MIN_BIN_WIDTH,
106
- min_bin_height=DEFAULT_MIN_BIN_HEIGHT,
107
- min_derivative=DEFAULT_MIN_DERIVATIVE):
108
- if torch.min(inputs) < left or torch.max(inputs) > right:
109
- raise ValueError('Input to a transform is not within its domain')
110
-
111
- num_bins = unnormalized_widths.shape[-1]
112
-
113
- if min_bin_width * num_bins > 1.0:
114
- raise ValueError('Minimal bin width too large for the number of bins')
115
- if min_bin_height * num_bins > 1.0:
116
- raise ValueError('Minimal bin height too large for the number of bins')
117
-
118
- widths = F.softmax(unnormalized_widths, dim=-1)
119
- widths = min_bin_width + (1 - min_bin_width * num_bins) * widths
120
- cumwidths = torch.cumsum(widths, dim=-1)
121
- cumwidths = F.pad(cumwidths, pad=(1, 0), mode='constant', value=0.0)
122
- cumwidths = (right - left) * cumwidths + left
123
- cumwidths[..., 0] = left
124
- cumwidths[..., -1] = right
125
- widths = cumwidths[..., 1:] - cumwidths[..., :-1]
126
-
127
- derivatives = min_derivative + F.softplus(unnormalized_derivatives)
128
-
129
- heights = F.softmax(unnormalized_heights, dim=-1)
130
- heights = min_bin_height + (1 - min_bin_height * num_bins) * heights
131
- cumheights = torch.cumsum(heights, dim=-1)
132
- cumheights = F.pad(cumheights, pad=(1, 0), mode='constant', value=0.0)
133
- cumheights = (top - bottom) * cumheights + bottom
134
- cumheights[..., 0] = bottom
135
- cumheights[..., -1] = top
136
- heights = cumheights[..., 1:] - cumheights[..., :-1]
137
-
138
- if inverse:
139
- bin_idx = searchsorted(cumheights, inputs)[..., None]
140
- else:
141
- bin_idx = searchsorted(cumwidths, inputs)[..., None]
142
-
143
- input_cumwidths = cumwidths.gather(-1, bin_idx)[..., 0]
144
- input_bin_widths = widths.gather(-1, bin_idx)[..., 0]
145
-
146
- input_cumheights = cumheights.gather(-1, bin_idx)[..., 0]
147
- delta = heights / widths
148
- input_delta = delta.gather(-1, bin_idx)[..., 0]
149
-
150
- input_derivatives = derivatives.gather(-1, bin_idx)[..., 0]
151
- input_derivatives_plus_one = derivatives[..., 1:].gather(-1, bin_idx)[..., 0]
152
-
153
- input_heights = heights.gather(-1, bin_idx)[..., 0]
154
-
155
- if inverse:
156
- a = (((inputs - input_cumheights) * (input_derivatives
157
- + input_derivatives_plus_one
158
- - 2 * input_delta)
159
- + input_heights * (input_delta - input_derivatives)))
160
- b = (input_heights * input_derivatives
161
- - (inputs - input_cumheights) * (input_derivatives
162
- + input_derivatives_plus_one
163
- - 2 * input_delta))
164
- c = - input_delta * (inputs - input_cumheights)
165
-
166
- discriminant = b.pow(2) - 4 * a * c
167
- assert (discriminant >= 0).all()
168
-
169
- root = (2 * c) / (-b - torch.sqrt(discriminant))
170
- outputs = root * input_bin_widths + input_cumwidths
171
-
172
- theta_one_minus_theta = root * (1 - root)
173
- denominator = input_delta + ((input_derivatives + input_derivatives_plus_one - 2 * input_delta)
174
- * theta_one_minus_theta)
175
- derivative_numerator = input_delta.pow(2) * (input_derivatives_plus_one * root.pow(2)
176
- + 2 * input_delta * theta_one_minus_theta
177
- + input_derivatives * (1 - root).pow(2))
178
- logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator)
179
-
180
- return outputs, -logabsdet
181
- else:
182
- theta = (inputs - input_cumwidths) / input_bin_widths
183
- theta_one_minus_theta = theta * (1 - theta)
184
-
185
- numerator = input_heights * (input_delta * theta.pow(2)
186
- + input_derivatives * theta_one_minus_theta)
187
- denominator = input_delta + ((input_derivatives + input_derivatives_plus_one - 2 * input_delta)
188
- * theta_one_minus_theta)
189
- outputs = input_cumheights + numerator / denominator
190
-
191
- derivative_numerator = input_delta.pow(2) * (input_derivatives_plus_one * theta.pow(2)
192
- + 2 * input_delta * theta_one_minus_theta
193
- + input_derivatives * (1 - theta).pow(2))
194
- logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator)
195
-
196
- return outputs, logabsdet
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Ameaou/academic-chatgpt3.1/crazy_functions/test_project/cpp/longcode/jpge.cpp DELETED
@@ -1,1049 +0,0 @@
1
- // jpge.cpp - C++ class for JPEG compression.
2
- // Public domain, Rich Geldreich <[email protected]>
3
- // v1.01, Dec. 18, 2010 - Initial release
4
- // v1.02, Apr. 6, 2011 - Removed 2x2 ordered dither in H2V1 chroma subsampling method load_block_16_8_8(). (The rounding factor was 2, when it should have been 1. Either way, it wasn't helping.)
5
- // v1.03, Apr. 16, 2011 - Added support for optimized Huffman code tables, optimized dynamic memory allocation down to only 1 alloc.
6
- // Also from Alex Evans: Added RGBA support, linear memory allocator (no longer needed in v1.03).
7
- // v1.04, May. 19, 2012: Forgot to set m_pFile ptr to NULL in cfile_stream::close(). Thanks to Owen Kaluza for reporting this bug.
8
- // Code tweaks to fix VS2008 static code analysis warnings (all looked harmless).
9
- // Code review revealed method load_block_16_8_8() (used for the non-default H2V1 sampling mode to downsample chroma) somehow didn't get the rounding factor fix from v1.02.
10
-
11
- #include "jpge.h"
12
-
13
- #include <stdlib.h>
14
- #include <string.h>
15
- #if PLATFORM_WINDOWS
16
- #include <malloc.h>
17
- #endif
18
-
19
- #define JPGE_MAX(a,b) (((a)>(b))?(a):(b))
20
- #define JPGE_MIN(a,b) (((a)<(b))?(a):(b))
21
-
22
- namespace jpge {
23
-
24
- static inline void *jpge_malloc(size_t nSize) { return FMemory::Malloc(nSize); }
25
- static inline void jpge_free(void *p) { FMemory::Free(p);; }
26
-
27
- // Various JPEG enums and tables.
28
- enum { M_SOF0 = 0xC0, M_DHT = 0xC4, M_SOI = 0xD8, M_EOI = 0xD9, M_SOS = 0xDA, M_DQT = 0xDB, M_APP0 = 0xE0 };
29
- enum { DC_LUM_CODES = 12, AC_LUM_CODES = 256, DC_CHROMA_CODES = 12, AC_CHROMA_CODES = 256, MAX_HUFF_SYMBOLS = 257, MAX_HUFF_CODESIZE = 32 };
30
-
31
- static uint8 s_zag[64] = { 0,1,8,16,9,2,3,10,17,24,32,25,18,11,4,5,12,19,26,33,40,48,41,34,27,20,13,6,7,14,21,28,35,42,49,56,57,50,43,36,29,22,15,23,30,37,44,51,58,59,52,45,38,31,39,46,53,60,61,54,47,55,62,63 };
32
- static int16 s_std_lum_quant[64] = { 16,11,12,14,12,10,16,14,13,14,18,17,16,19,24,40,26,24,22,22,24,49,35,37,29,40,58,51,61,60,57,51,56,55,64,72,92,78,64,68,87,69,55,56,80,109,81,87,95,98,103,104,103,62,77,113,121,112,100,120,92,101,103,99 };
33
- static int16 s_std_croma_quant[64] = { 17,18,18,24,21,24,47,26,26,47,99,66,56,66,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99,99 };
34
- static uint8 s_dc_lum_bits[17] = { 0,0,1,5,1,1,1,1,1,1,0,0,0,0,0,0,0 };
35
- static uint8 s_dc_lum_val[DC_LUM_CODES] = { 0,1,2,3,4,5,6,7,8,9,10,11 };
36
- static uint8 s_ac_lum_bits[17] = { 0,0,2,1,3,3,2,4,3,5,5,4,4,0,0,1,0x7d };
37
- static uint8 s_ac_lum_val[AC_LUM_CODES] =
38
- {
39
- 0x01,0x02,0x03,0x00,0x04,0x11,0x05,0x12,0x21,0x31,0x41,0x06,0x13,0x51,0x61,0x07,0x22,0x71,0x14,0x32,0x81,0x91,0xa1,0x08,0x23,0x42,0xb1,0xc1,0x15,0x52,0xd1,0xf0,
40
- 0x24,0x33,0x62,0x72,0x82,0x09,0x0a,0x16,0x17,0x18,0x19,0x1a,0x25,0x26,0x27,0x28,0x29,0x2a,0x34,0x35,0x36,0x37,0x38,0x39,0x3a,0x43,0x44,0x45,0x46,0x47,0x48,0x49,
41
- 0x4a,0x53,0x54,0x55,0x56,0x57,0x58,0x59,0x5a,0x63,0x64,0x65,0x66,0x67,0x68,0x69,0x6a,0x73,0x74,0x75,0x76,0x77,0x78,0x79,0x7a,0x83,0x84,0x85,0x86,0x87,0x88,0x89,
42
- 0x8a,0x92,0x93,0x94,0x95,0x96,0x97,0x98,0x99,0x9a,0xa2,0xa3,0xa4,0xa5,0xa6,0xa7,0xa8,0xa9,0xaa,0xb2,0xb3,0xb4,0xb5,0xb6,0xb7,0xb8,0xb9,0xba,0xc2,0xc3,0xc4,0xc5,
43
- 0xc6,0xc7,0xc8,0xc9,0xca,0xd2,0xd3,0xd4,0xd5,0xd6,0xd7,0xd8,0xd9,0xda,0xe1,0xe2,0xe3,0xe4,0xe5,0xe6,0xe7,0xe8,0xe9,0xea,0xf1,0xf2,0xf3,0xf4,0xf5,0xf6,0xf7,0xf8,
44
- 0xf9,0xfa
45
- };
46
- static uint8 s_dc_chroma_bits[17] = { 0,0,3,1,1,1,1,1,1,1,1,1,0,0,0,0,0 };
47
- static uint8 s_dc_chroma_val[DC_CHROMA_CODES] = { 0,1,2,3,4,5,6,7,8,9,10,11 };
48
- static uint8 s_ac_chroma_bits[17] = { 0,0,2,1,2,4,4,3,4,7,5,4,4,0,1,2,0x77 };
49
- static uint8 s_ac_chroma_val[AC_CHROMA_CODES] =
50
- {
51
- 0x00,0x01,0x02,0x03,0x11,0x04,0x05,0x21,0x31,0x06,0x12,0x41,0x51,0x07,0x61,0x71,0x13,0x22,0x32,0x81,0x08,0x14,0x42,0x91,0xa1,0xb1,0xc1,0x09,0x23,0x33,0x52,0xf0,
52
- 0x15,0x62,0x72,0xd1,0x0a,0x16,0x24,0x34,0xe1,0x25,0xf1,0x17,0x18,0x19,0x1a,0x26,0x27,0x28,0x29,0x2a,0x35,0x36,0x37,0x38,0x39,0x3a,0x43,0x44,0x45,0x46,0x47,0x48,
53
- 0x49,0x4a,0x53,0x54,0x55,0x56,0x57,0x58,0x59,0x5a,0x63,0x64,0x65,0x66,0x67,0x68,0x69,0x6a,0x73,0x74,0x75,0x76,0x77,0x78,0x79,0x7a,0x82,0x83,0x84,0x85,0x86,0x87,
54
- 0x88,0x89,0x8a,0x92,0x93,0x94,0x95,0x96,0x97,0x98,0x99,0x9a,0xa2,0xa3,0xa4,0xa5,0xa6,0xa7,0xa8,0xa9,0xaa,0xb2,0xb3,0xb4,0xb5,0xb6,0xb7,0xb8,0xb9,0xba,0xc2,0xc3,
55
- 0xc4,0xc5,0xc6,0xc7,0xc8,0xc9,0xca,0xd2,0xd3,0xd4,0xd5,0xd6,0xd7,0xd8,0xd9,0xda,0xe2,0xe3,0xe4,0xe5,0xe6,0xe7,0xe8,0xe9,0xea,0xf2,0xf3,0xf4,0xf5,0xf6,0xf7,0xf8,
56
- 0xf9,0xfa
57
- };
58
-
59
- // Low-level helper functions.
60
- template <class T> inline void clear_obj(T &obj) { memset(&obj, 0, sizeof(obj)); }
61
-
62
- const int YR = 19595, YG = 38470, YB = 7471, CB_R = -11059, CB_G = -21709, CB_B = 32768, CR_R = 32768, CR_G = -27439, CR_B = -5329;
63
- static inline uint8 clamp(int i) { if (static_cast<uint>(i) > 255U) { if (i < 0) i = 0; else if (i > 255) i = 255; } return static_cast<uint8>(i); }
64
-
65
- static void RGB_to_YCC(uint8* pDst, const uint8 *pSrc, int num_pixels)
66
- {
67
- for ( ; num_pixels; pDst += 3, pSrc += 3, num_pixels--)
68
- {
69
- const int r = pSrc[0], g = pSrc[1], b = pSrc[2];
70
- pDst[0] = static_cast<uint8>((r * YR + g * YG + b * YB + 32768) >> 16);
71
- pDst[1] = clamp(128 + ((r * CB_R + g * CB_G + b * CB_B + 32768) >> 16));
72
- pDst[2] = clamp(128 + ((r * CR_R + g * CR_G + b * CR_B + 32768) >> 16));
73
- }
74
- }
75
-
76
- static void RGB_to_Y(uint8* pDst, const uint8 *pSrc, int num_pixels)
77
- {
78
- for ( ; num_pixels; pDst++, pSrc += 3, num_pixels--)
79
- pDst[0] = static_cast<uint8>((pSrc[0] * YR + pSrc[1] * YG + pSrc[2] * YB + 32768) >> 16);
80
- }
81
-
82
- static void RGBA_to_YCC(uint8* pDst, const uint8 *pSrc, int num_pixels)
83
- {
84
- for ( ; num_pixels; pDst += 3, pSrc += 4, num_pixels--)
85
- {
86
- const int r = pSrc[0], g = pSrc[1], b = pSrc[2];
87
- pDst[0] = static_cast<uint8>((r * YR + g * YG + b * YB + 32768) >> 16);
88
- pDst[1] = clamp(128 + ((r * CB_R + g * CB_G + b * CB_B + 32768) >> 16));
89
- pDst[2] = clamp(128 + ((r * CR_R + g * CR_G + b * CR_B + 32768) >> 16));
90
- }
91
- }
92
-
93
- static void RGBA_to_Y(uint8* pDst, const uint8 *pSrc, int num_pixels)
94
- {
95
- for ( ; num_pixels; pDst++, pSrc += 4, num_pixels--)
96
- pDst[0] = static_cast<uint8>((pSrc[0] * YR + pSrc[1] * YG + pSrc[2] * YB + 32768) >> 16);
97
- }
98
-
99
- static void Y_to_YCC(uint8* pDst, const uint8* pSrc, int num_pixels)
100
- {
101
- for( ; num_pixels; pDst += 3, pSrc++, num_pixels--) { pDst[0] = pSrc[0]; pDst[1] = 128; pDst[2] = 128; }
102
- }
103
-
104
- // Forward DCT - DCT derived from jfdctint.
105
- #define CONST_BITS 13
106
- #define ROW_BITS 2
107
- #define DCT_DESCALE(x, n) (((x) + (((int32)1) << ((n) - 1))) >> (n))
108
- #define DCT_MUL(var, c) (static_cast<int16>(var) * static_cast<int32>(c))
109
- #define DCT1D(s0, s1, s2, s3, s4, s5, s6, s7) \
110
- int32 t0 = s0 + s7, t7 = s0 - s7, t1 = s1 + s6, t6 = s1 - s6, t2 = s2 + s5, t5 = s2 - s5, t3 = s3 + s4, t4 = s3 - s4; \
111
- int32 t10 = t0 + t3, t13 = t0 - t3, t11 = t1 + t2, t12 = t1 - t2; \
112
- int32 u1 = DCT_MUL(t12 + t13, 4433); \
113
- s2 = u1 + DCT_MUL(t13, 6270); \
114
- s6 = u1 + DCT_MUL(t12, -15137); \
115
- u1 = t4 + t7; \
116
- int32 u2 = t5 + t6, u3 = t4 + t6, u4 = t5 + t7; \
117
- int32 z5 = DCT_MUL(u3 + u4, 9633); \
118
- t4 = DCT_MUL(t4, 2446); t5 = DCT_MUL(t5, 16819); \
119
- t6 = DCT_MUL(t6, 25172); t7 = DCT_MUL(t7, 12299); \
120
- u1 = DCT_MUL(u1, -7373); u2 = DCT_MUL(u2, -20995); \
121
- u3 = DCT_MUL(u3, -16069); u4 = DCT_MUL(u4, -3196); \
122
- u3 += z5; u4 += z5; \
123
- s0 = t10 + t11; s1 = t7 + u1 + u4; s3 = t6 + u2 + u3; s4 = t10 - t11; s5 = t5 + u2 + u4; s7 = t4 + u1 + u3;
124
-
125
- static void DCT2D(int32 *p)
126
- {
127
- int32 c, *q = p;
128
- for (c = 7; c >= 0; c--, q += 8)
129
- {
130
- int32 s0 = q[0], s1 = q[1], s2 = q[2], s3 = q[3], s4 = q[4], s5 = q[5], s6 = q[6], s7 = q[7];
131
- DCT1D(s0, s1, s2, s3, s4, s5, s6, s7);
132
- q[0] = s0 << ROW_BITS; q[1] = DCT_DESCALE(s1, CONST_BITS-ROW_BITS); q[2] = DCT_DESCALE(s2, CONST_BITS-ROW_BITS); q[3] = DCT_DESCALE(s3, CONST_BITS-ROW_BITS);
133
- q[4] = s4 << ROW_BITS; q[5] = DCT_DESCALE(s5, CONST_BITS-ROW_BITS); q[6] = DCT_DESCALE(s6, CONST_BITS-ROW_BITS); q[7] = DCT_DESCALE(s7, CONST_BITS-ROW_BITS);
134
- }
135
- for (q = p, c = 7; c >= 0; c--, q++)
136
- {
137
- int32 s0 = q[0*8], s1 = q[1*8], s2 = q[2*8], s3 = q[3*8], s4 = q[4*8], s5 = q[5*8], s6 = q[6*8], s7 = q[7*8];
138
- DCT1D(s0, s1, s2, s3, s4, s5, s6, s7);
139
- q[0*8] = DCT_DESCALE(s0, ROW_BITS+3); q[1*8] = DCT_DESCALE(s1, CONST_BITS+ROW_BITS+3); q[2*8] = DCT_DESCALE(s2, CONST_BITS+ROW_BITS+3); q[3*8] = DCT_DESCALE(s3, CONST_BITS+ROW_BITS+3);
140
- q[4*8] = DCT_DESCALE(s4, ROW_BITS+3); q[5*8] = DCT_DESCALE(s5, CONST_BITS+ROW_BITS+3); q[6*8] = DCT_DESCALE(s6, CONST_BITS+ROW_BITS+3); q[7*8] = DCT_DESCALE(s7, CONST_BITS+ROW_BITS+3);
141
- }
142
- }
143
-
144
- struct sym_freq { uint m_key, m_sym_index; };
145
-
146
- // Radix sorts sym_freq[] array by 32-bit key m_key. Returns ptr to sorted values.
147
- static inline sym_freq* radix_sort_syms(uint num_syms, sym_freq* pSyms0, sym_freq* pSyms1)
148
- {
149
- const uint cMaxPasses = 4;
150
- uint32 hist[256 * cMaxPasses]; clear_obj(hist);
151
- for (uint i = 0; i < num_syms; i++) { uint freq = pSyms0[i].m_key; hist[freq & 0xFF]++; hist[256 + ((freq >> 8) & 0xFF)]++; hist[256*2 + ((freq >> 16) & 0xFF)]++; hist[256*3 + ((freq >> 24) & 0xFF)]++; }
152
- sym_freq* pCur_syms = pSyms0, *pNew_syms = pSyms1;
153
- uint total_passes = cMaxPasses; while ((total_passes > 1) && (num_syms == hist[(total_passes - 1) * 256])) total_passes--;
154
- for (uint pass_shift = 0, pass = 0; pass < total_passes; pass++, pass_shift += 8)
155
- {
156
- const uint32* pHist = &hist[pass << 8];
157
- uint offsets[256], cur_ofs = 0;
158
- for (uint i = 0; i < 256; i++) { offsets[i] = cur_ofs; cur_ofs += pHist[i]; }
159
- for (uint i = 0; i < num_syms; i++)
160
- pNew_syms[offsets[(pCur_syms[i].m_key >> pass_shift) & 0xFF]++] = pCur_syms[i];
161
- sym_freq* t = pCur_syms; pCur_syms = pNew_syms; pNew_syms = t;
162
- }
163
- return pCur_syms;
164
- }
165
-
166
- // calculate_minimum_redundancy() originally written by: Alistair Moffat, [email protected], Jyrki Katajainen, [email protected], November 1996.
167
- static void calculate_minimum_redundancy(sym_freq *A, int n)
168
- {
169
- int root, leaf, next, avbl, used, dpth;
170
- if (n==0) return; else if (n==1) { A[0].m_key = 1; return; }
171
- A[0].m_key += A[1].m_key; root = 0; leaf = 2;
172
- for (next=1; next < n-1; next++)
173
- {
174
- if (leaf>=n || A[root].m_key<A[leaf].m_key) { A[next].m_key = A[root].m_key; A[root++].m_key = next; } else A[next].m_key = A[leaf++].m_key;
175
- if (leaf>=n || (root<next && A[root].m_key<A[leaf].m_key)) { A[next].m_key += A[root].m_key; A[root++].m_key = next; } else A[next].m_key += A[leaf++].m_key;
176
- }
177
- A[n-2].m_key = 0;
178
- for (next=n-3; next>=0; next--) A[next].m_key = A[A[next].m_key].m_key+1;
179
- avbl = 1; used = dpth = 0; root = n-2; next = n-1;
180
- while (avbl>0)
181
- {
182
- while (root>=0 && (int)A[root].m_key==dpth) { used++; root--; }
183
- while (avbl>used) { A[next--].m_key = dpth; avbl--; }
184
- avbl = 2*used; dpth++; used = 0;
185
- }
186
- }
187
-
188
- // Limits canonical Huffman code table's max code size to max_code_size.
189
- static void huffman_enforce_max_code_size(int *pNum_codes, int code_list_len, int max_code_size)
190
- {
191
- if (code_list_len <= 1) return;
192
-
193
- for (int i = max_code_size + 1; i <= MAX_HUFF_CODESIZE; i++) pNum_codes[max_code_size] += pNum_codes[i];
194
-
195
- uint32 total = 0;
196
- for (int i = max_code_size; i > 0; i--)
197
- total += (((uint32)pNum_codes[i]) << (max_code_size - i));
198
-
199
- while (total != (1UL << max_code_size))
200
- {
201
- pNum_codes[max_code_size]--;
202
- for (int i = max_code_size - 1; i > 0; i--)
203
- {
204
- if (pNum_codes[i]) { pNum_codes[i]--; pNum_codes[i + 1] += 2; break; }
205
- }
206
- total--;
207
- }
208
- }
209
-
210
- // Generates an optimized offman table.
211
- void jpeg_encoder::optimize_huffman_table(int table_num, int table_len)
212
- {
213
- sym_freq syms0[MAX_HUFF_SYMBOLS], syms1[MAX_HUFF_SYMBOLS];
214
- syms0[0].m_key = 1; syms0[0].m_sym_index = 0; // dummy symbol, assures that no valid code contains all 1's
215
- int num_used_syms = 1;
216
- const uint32 *pSym_count = &m_huff_count[table_num][0];
217
- for (int i = 0; i < table_len; i++)
218
- if (pSym_count[i]) { syms0[num_used_syms].m_key = pSym_count[i]; syms0[num_used_syms++].m_sym_index = i + 1; }
219
- sym_freq* pSyms = radix_sort_syms(num_used_syms, syms0, syms1);
220
- calculate_minimum_redundancy(pSyms, num_used_syms);
221
-
222
- // Count the # of symbols of each code size.
223
- int num_codes[1 + MAX_HUFF_CODESIZE]; clear_obj(num_codes);
224
- for (int i = 0; i < num_used_syms; i++)
225
- num_codes[pSyms[i].m_key]++;
226
-
227
- const uint JPGE_CODE_SIZE_LIMIT = 16; // the maximum possible size of a JPEG Huffman code (valid range is [9,16] - 9 vs. 8 because of the dummy symbol)
228
- huffman_enforce_max_code_size(num_codes, num_used_syms, JPGE_CODE_SIZE_LIMIT);
229
-
230
- // Compute m_huff_bits array, which contains the # of symbols per code size.
231
- clear_obj(m_huff_bits[table_num]);
232
- for (int i = 1; i <= (int)JPGE_CODE_SIZE_LIMIT; i++)
233
- m_huff_bits[table_num][i] = static_cast<uint8>(num_codes[i]);
234
-
235
- // Remove the dummy symbol added above, which must be in largest bucket.
236
- for (int i = JPGE_CODE_SIZE_LIMIT; i >= 1; i--)
237
- {
238
- if (m_huff_bits[table_num][i]) { m_huff_bits[table_num][i]--; break; }
239
- }
240
-
241
- // Compute the m_huff_val array, which contains the symbol indices sorted by code size (smallest to largest).
242
- for (int i = num_used_syms - 1; i >= 1; i--)
243
- m_huff_val[table_num][num_used_syms - 1 - i] = static_cast<uint8>(pSyms[i].m_sym_index - 1);
244
- }
245
-
246
- // JPEG marker generation.
247
- void jpeg_encoder::emit_byte(uint8 i)
248
- {
249
- m_all_stream_writes_succeeded = m_all_stream_writes_succeeded && m_pStream->put_obj(i);
250
- }
251
-
252
- void jpeg_encoder::emit_word(uint i)
253
- {
254
- emit_byte(uint8(i >> 8)); emit_byte(uint8(i & 0xFF));
255
- }
256
-
257
- void jpeg_encoder::emit_marker(int marker)
258
- {
259
- emit_byte(uint8(0xFF)); emit_byte(uint8(marker));
260
- }
261
-
262
- // Emit JFIF marker
263
- void jpeg_encoder::emit_jfif_app0()
264
- {
265
- emit_marker(M_APP0);
266
- emit_word(2 + 4 + 1 + 2 + 1 + 2 + 2 + 1 + 1);
267
- emit_byte(0x4A); emit_byte(0x46); emit_byte(0x49); emit_byte(0x46); /* Identifier: ASCII "JFIF" */
268
- emit_byte(0);
269
- emit_byte(1); /* Major version */
270
- emit_byte(1); /* Minor version */
271
- emit_byte(0); /* Density unit */
272
- emit_word(1);
273
- emit_word(1);
274
- emit_byte(0); /* No thumbnail image */
275
- emit_byte(0);
276
- }
277
-
278
- // Emit quantization tables
279
- void jpeg_encoder::emit_dqt()
280
- {
281
- for (int i = 0; i < ((m_num_components == 3) ? 2 : 1); i++)
282
- {
283
- emit_marker(M_DQT);
284
- emit_word(64 + 1 + 2);
285
- emit_byte(static_cast<uint8>(i));
286
- for (int j = 0; j < 64; j++)
287
- emit_byte(static_cast<uint8>(m_quantization_tables[i][j]));
288
- }
289
- }
290
-
291
- // Emit start of frame marker
292
- void jpeg_encoder::emit_sof()
293
- {
294
- emit_marker(M_SOF0); /* baseline */
295
- emit_word(3 * m_num_components + 2 + 5 + 1);
296
- emit_byte(8); /* precision */
297
- emit_word(m_image_y);
298
- emit_word(m_image_x);
299
- emit_byte(m_num_components);
300
- for (int i = 0; i < m_num_components; i++)
301
- {
302
- emit_byte(static_cast<uint8>(i + 1)); /* component ID */
303
- emit_byte((m_comp_h_samp[i] << 4) + m_comp_v_samp[i]); /* h and v sampling */
304
- emit_byte(i > 0); /* quant. table num */
305
- }
306
- }
307
-
308
- // Emit Huffman table.
309
- void jpeg_encoder::emit_dht(uint8 *bits, uint8 *val, int index, bool ac_flag)
310
- {
311
- emit_marker(M_DHT);
312
-
313
- int length = 0;
314
- for (int i = 1; i <= 16; i++)
315
- length += bits[i];
316
-
317
- emit_word(length + 2 + 1 + 16);
318
- emit_byte(static_cast<uint8>(index + (ac_flag << 4)));
319
-
320
- for (int i = 1; i <= 16; i++)
321
- emit_byte(bits[i]);
322
-
323
- for (int i = 0; i < length; i++)
324
- emit_byte(val[i]);
325
- }
326
-
327
- // Emit all Huffman tables.
328
- void jpeg_encoder::emit_dhts()
329
- {
330
- emit_dht(m_huff_bits[0+0], m_huff_val[0+0], 0, false);
331
- emit_dht(m_huff_bits[2+0], m_huff_val[2+0], 0, true);
332
- if (m_num_components == 3)
333
- {
334
- emit_dht(m_huff_bits[0+1], m_huff_val[0+1], 1, false);
335
- emit_dht(m_huff_bits[2+1], m_huff_val[2+1], 1, true);
336
- }
337
- }
338
-
339
- // emit start of scan
340
- void jpeg_encoder::emit_sos()
341
- {
342
- emit_marker(M_SOS);
343
- emit_word(2 * m_num_components + 2 + 1 + 3);
344
- emit_byte(m_num_components);
345
- for (int i = 0; i < m_num_components; i++)
346
- {
347
- emit_byte(static_cast<uint8>(i + 1));
348
- if (i == 0)
349
- emit_byte((0 << 4) + 0);
350
- else
351
- emit_byte((1 << 4) + 1);
352
- }
353
- emit_byte(0); /* spectral selection */
354
- emit_byte(63);
355
- emit_byte(0);
356
- }
357
-
358
- // Emit all markers at beginning of image file.
359
- void jpeg_encoder::emit_markers()
360
- {
361
- emit_marker(M_SOI);
362
- emit_jfif_app0();
363
- emit_dqt();
364
- emit_sof();
365
- emit_dhts();
366
- emit_sos();
367
- }
368
-
369
- // Compute the actual canonical Huffman codes/code sizes given the JPEG huff bits and val arrays.
370
- void jpeg_encoder::compute_huffman_table(uint *codes, uint8 *code_sizes, uint8 *bits, uint8 *val)
371
- {
372
- int i, l, last_p, si;
373
- uint8 huff_size[257];
374
- uint huff_code[257];
375
- uint code;
376
-
377
- int p = 0;
378
- for (l = 1; l <= 16; l++)
379
- for (i = 1; i <= bits[l]; i++)
380
- huff_size[p++] = (char)l;
381
-
382
- huff_size[p] = 0; last_p = p; // write sentinel
383
-
384
- code = 0; si = huff_size[0]; p = 0;
385
-
386
- while (huff_size[p])
387
- {
388
- while (huff_size[p] == si)
389
- huff_code[p++] = code++;
390
- code <<= 1;
391
- si++;
392
- }
393
-
394
- memset(codes, 0, sizeof(codes[0])*256);
395
- memset(code_sizes, 0, sizeof(code_sizes[0])*256);
396
- for (p = 0; p < last_p; p++)
397
- {
398
- codes[val[p]] = huff_code[p];
399
- code_sizes[val[p]] = huff_size[p];
400
- }
401
- }
402
-
403
- // Quantization table generation.
404
- void jpeg_encoder::compute_quant_table(int32 *pDst, int16 *pSrc)
405
- {
406
- int32 q;
407
- if (m_params.m_quality < 50)
408
- q = 5000 / m_params.m_quality;
409
- else
410
- q = 200 - m_params.m_quality * 2;
411
- for (int i = 0; i < 64; i++)
412
- {
413
- int32 j = *pSrc++; j = (j * q + 50L) / 100L;
414
- *pDst++ = JPGE_MIN(JPGE_MAX(j, 1), 255);
415
- }
416
- }
417
-
418
- // Higher-level methods.
419
- void jpeg_encoder::first_pass_init()
420
- {
421
- m_bit_buffer = 0; m_bits_in = 0;
422
- memset(m_last_dc_val, 0, 3 * sizeof(m_last_dc_val[0]));
423
- m_mcu_y_ofs = 0;
424
- m_pass_num = 1;
425
- }
426
-
427
- bool jpeg_encoder::second_pass_init()
428
- {
429
- compute_huffman_table(&m_huff_codes[0+0][0], &m_huff_code_sizes[0+0][0], m_huff_bits[0+0], m_huff_val[0+0]);
430
- compute_huffman_table(&m_huff_codes[2+0][0], &m_huff_code_sizes[2+0][0], m_huff_bits[2+0], m_huff_val[2+0]);
431
- if (m_num_components > 1)
432
- {
433
- compute_huffman_table(&m_huff_codes[0+1][0], &m_huff_code_sizes[0+1][0], m_huff_bits[0+1], m_huff_val[0+1]);
434
- compute_huffman_table(&m_huff_codes[2+1][0], &m_huff_code_sizes[2+1][0], m_huff_bits[2+1], m_huff_val[2+1]);
435
- }
436
- first_pass_init();
437
- emit_markers();
438
- m_pass_num = 2;
439
- return true;
440
- }
441
-
442
- bool jpeg_encoder::jpg_open(int p_x_res, int p_y_res, int src_channels)
443
- {
444
- m_num_components = 3;
445
- switch (m_params.m_subsampling)
446
- {
447
- case Y_ONLY:
448
- {
449
- m_num_components = 1;
450
- m_comp_h_samp[0] = 1; m_comp_v_samp[0] = 1;
451
- m_mcu_x = 8; m_mcu_y = 8;
452
- break;
453
- }
454
- case H1V1:
455
- {
456
- m_comp_h_samp[0] = 1; m_comp_v_samp[0] = 1;
457
- m_comp_h_samp[1] = 1; m_comp_v_samp[1] = 1;
458
- m_comp_h_samp[2] = 1; m_comp_v_samp[2] = 1;
459
- m_mcu_x = 8; m_mcu_y = 8;
460
- break;
461
- }
462
- case H2V1:
463
- {
464
- m_comp_h_samp[0] = 2; m_comp_v_samp[0] = 1;
465
- m_comp_h_samp[1] = 1; m_comp_v_samp[1] = 1;
466
- m_comp_h_samp[2] = 1; m_comp_v_samp[2] = 1;
467
- m_mcu_x = 16; m_mcu_y = 8;
468
- break;
469
- }
470
- case H2V2:
471
- {
472
- m_comp_h_samp[0] = 2; m_comp_v_samp[0] = 2;
473
- m_comp_h_samp[1] = 1; m_comp_v_samp[1] = 1;
474
- m_comp_h_samp[2] = 1; m_comp_v_samp[2] = 1;
475
- m_mcu_x = 16; m_mcu_y = 16;
476
- }
477
- }
478
-
479
- m_image_x = p_x_res; m_image_y = p_y_res;
480
- m_image_bpp = src_channels;
481
- m_image_bpl = m_image_x * src_channels;
482
- m_image_x_mcu = (m_image_x + m_mcu_x - 1) & (~(m_mcu_x - 1));
483
- m_image_y_mcu = (m_image_y + m_mcu_y - 1) & (~(m_mcu_y - 1));
484
- m_image_bpl_xlt = m_image_x * m_num_components;
485
- m_image_bpl_mcu = m_image_x_mcu * m_num_components;
486
- m_mcus_per_row = m_image_x_mcu / m_mcu_x;
487
-
488
- if ((m_mcu_lines[0] = static_cast<uint8*>(jpge_malloc(m_image_bpl_mcu * m_mcu_y))) == NULL) return false;
489
- for (int i = 1; i < m_mcu_y; i++)
490
- m_mcu_lines[i] = m_mcu_lines[i-1] + m_image_bpl_mcu;
491
-
492
- compute_quant_table(m_quantization_tables[0], s_std_lum_quant);
493
- compute_quant_table(m_quantization_tables[1], m_params.m_no_chroma_discrim_flag ? s_std_lum_quant : s_std_croma_quant);
494
-
495
- m_out_buf_left = JPGE_OUT_BUF_SIZE;
496
- m_pOut_buf = m_out_buf;
497
-
498
- if (m_params.m_two_pass_flag)
499
- {
500
- clear_obj(m_huff_count);
501
- first_pass_init();
502
- }
503
- else
504
- {
505
- memcpy(m_huff_bits[0+0], s_dc_lum_bits, 17); memcpy(m_huff_val [0+0], s_dc_lum_val, DC_LUM_CODES);
506
- memcpy(m_huff_bits[2+0], s_ac_lum_bits, 17); memcpy(m_huff_val [2+0], s_ac_lum_val, AC_LUM_CODES);
507
- memcpy(m_huff_bits[0+1], s_dc_chroma_bits, 17); memcpy(m_huff_val [0+1], s_dc_chroma_val, DC_CHROMA_CODES);
508
- memcpy(m_huff_bits[2+1], s_ac_chroma_bits, 17); memcpy(m_huff_val [2+1], s_ac_chroma_val, AC_CHROMA_CODES);
509
- if (!second_pass_init()) return false; // in effect, skip over the first pass
510
- }
511
- return m_all_stream_writes_succeeded;
512
- }
513
-
514
- void jpeg_encoder::load_block_8_8_grey(int x)
515
- {
516
- uint8 *pSrc;
517
- sample_array_t *pDst = m_sample_array;
518
- x <<= 3;
519
- for (int i = 0; i < 8; i++, pDst += 8)
520
- {
521
- pSrc = m_mcu_lines[i] + x;
522
- pDst[0] = pSrc[0] - 128; pDst[1] = pSrc[1] - 128; pDst[2] = pSrc[2] - 128; pDst[3] = pSrc[3] - 128;
523
- pDst[4] = pSrc[4] - 128; pDst[5] = pSrc[5] - 128; pDst[6] = pSrc[6] - 128; pDst[7] = pSrc[7] - 128;
524
- }
525
- }
526
-
527
- void jpeg_encoder::load_block_8_8(int x, int y, int c)
528
- {
529
- uint8 *pSrc;
530
- sample_array_t *pDst = m_sample_array;
531
- x = (x * (8 * 3)) + c;
532
- y <<= 3;
533
- for (int i = 0; i < 8; i++, pDst += 8)
534
- {
535
- pSrc = m_mcu_lines[y + i] + x;
536
- pDst[0] = pSrc[0 * 3] - 128; pDst[1] = pSrc[1 * 3] - 128; pDst[2] = pSrc[2 * 3] - 128; pDst[3] = pSrc[3 * 3] - 128;
537
- pDst[4] = pSrc[4 * 3] - 128; pDst[5] = pSrc[5 * 3] - 128; pDst[6] = pSrc[6 * 3] - 128; pDst[7] = pSrc[7 * 3] - 128;
538
- }
539
- }
540
-
541
- void jpeg_encoder::load_block_16_8(int x, int c)
542
- {
543
- uint8 *pSrc1, *pSrc2;
544
- sample_array_t *pDst = m_sample_array;
545
- x = (x * (16 * 3)) + c;
546
- int a = 0, b = 2;
547
- for (int i = 0; i < 16; i += 2, pDst += 8)
548
- {
549
- pSrc1 = m_mcu_lines[i + 0] + x;
550
- pSrc2 = m_mcu_lines[i + 1] + x;
551
- pDst[0] = ((pSrc1[ 0 * 3] + pSrc1[ 1 * 3] + pSrc2[ 0 * 3] + pSrc2[ 1 * 3] + a) >> 2) - 128; pDst[1] = ((pSrc1[ 2 * 3] + pSrc1[ 3 * 3] + pSrc2[ 2 * 3] + pSrc2[ 3 * 3] + b) >> 2) - 128;
552
- pDst[2] = ((pSrc1[ 4 * 3] + pSrc1[ 5 * 3] + pSrc2[ 4 * 3] + pSrc2[ 5 * 3] + a) >> 2) - 128; pDst[3] = ((pSrc1[ 6 * 3] + pSrc1[ 7 * 3] + pSrc2[ 6 * 3] + pSrc2[ 7 * 3] + b) >> 2) - 128;
553
- pDst[4] = ((pSrc1[ 8 * 3] + pSrc1[ 9 * 3] + pSrc2[ 8 * 3] + pSrc2[ 9 * 3] + a) >> 2) - 128; pDst[5] = ((pSrc1[10 * 3] + pSrc1[11 * 3] + pSrc2[10 * 3] + pSrc2[11 * 3] + b) >> 2) - 128;
554
- pDst[6] = ((pSrc1[12 * 3] + pSrc1[13 * 3] + pSrc2[12 * 3] + pSrc2[13 * 3] + a) >> 2) - 128; pDst[7] = ((pSrc1[14 * 3] + pSrc1[15 * 3] + pSrc2[14 * 3] + pSrc2[15 * 3] + b) >> 2) - 128;
555
- int temp = a; a = b; b = temp;
556
- }
557
- }
558
-
559
- void jpeg_encoder::load_block_16_8_8(int x, int c)
560
- {
561
- uint8 *pSrc1;
562
- sample_array_t *pDst = m_sample_array;
563
- x = (x * (16 * 3)) + c;
564
- for (int i = 0; i < 8; i++, pDst += 8)
565
- {
566
- pSrc1 = m_mcu_lines[i + 0] + x;
567
- pDst[0] = ((pSrc1[ 0 * 3] + pSrc1[ 1 * 3]) >> 1) - 128; pDst[1] = ((pSrc1[ 2 * 3] + pSrc1[ 3 * 3]) >> 1) - 128;
568
- pDst[2] = ((pSrc1[ 4 * 3] + pSrc1[ 5 * 3]) >> 1) - 128; pDst[3] = ((pSrc1[ 6 * 3] + pSrc1[ 7 * 3]) >> 1) - 128;
569
- pDst[4] = ((pSrc1[ 8 * 3] + pSrc1[ 9 * 3]) >> 1) - 128; pDst[5] = ((pSrc1[10 * 3] + pSrc1[11 * 3]) >> 1) - 128;
570
- pDst[6] = ((pSrc1[12 * 3] + pSrc1[13 * 3]) >> 1) - 128; pDst[7] = ((pSrc1[14 * 3] + pSrc1[15 * 3]) >> 1) - 128;
571
- }
572
- }
573
-
574
- void jpeg_encoder::load_quantized_coefficients(int component_num)
575
- {
576
- int32 *q = m_quantization_tables[component_num > 0];
577
- int16 *pDst = m_coefficient_array;
578
- for (int i = 0; i < 64; i++)
579
- {
580
- sample_array_t j = m_sample_array[s_zag[i]];
581
- if (j < 0)
582
- {
583
- if ((j = -j + (*q >> 1)) < *q)
584
- *pDst++ = 0;
585
- else
586
- *pDst++ = static_cast<int16>(-(j / *q));
587
- }
588
- else
589
- {
590
- if ((j = j + (*q >> 1)) < *q)
591
- *pDst++ = 0;
592
- else
593
- *pDst++ = static_cast<int16>((j / *q));
594
- }
595
- q++;
596
- }
597
- }
598
-
599
- void jpeg_encoder::flush_output_buffer()
600
- {
601
- if (m_out_buf_left != JPGE_OUT_BUF_SIZE)
602
- m_all_stream_writes_succeeded = m_all_stream_writes_succeeded && m_pStream->put_buf(m_out_buf, JPGE_OUT_BUF_SIZE - m_out_buf_left);
603
- m_pOut_buf = m_out_buf;
604
- m_out_buf_left = JPGE_OUT_BUF_SIZE;
605
- }
606
-
607
- void jpeg_encoder::put_bits(uint bits, uint len)
608
- {
609
- m_bit_buffer |= ((uint32)bits << (24 - (m_bits_in += len)));
610
- while (m_bits_in >= 8)
611
- {
612
- uint8 c;
613
- #define JPGE_PUT_BYTE(c) { *m_pOut_buf++ = (c); if (--m_out_buf_left == 0) flush_output_buffer(); }
614
- JPGE_PUT_BYTE(c = (uint8)((m_bit_buffer >> 16) & 0xFF));
615
- if (c == 0xFF) JPGE_PUT_BYTE(0);
616
- m_bit_buffer <<= 8;
617
- m_bits_in -= 8;
618
- }
619
- }
620
-
621
- void jpeg_encoder::code_coefficients_pass_one(int component_num)
622
- {
623
- if (component_num >= 3) return; // just to shut up static analysis
624
- int i, run_len, nbits, temp1;
625
- int16 *src = m_coefficient_array;
626
- uint32 *dc_count = component_num ? m_huff_count[0 + 1] : m_huff_count[0 + 0], *ac_count = component_num ? m_huff_count[2 + 1] : m_huff_count[2 + 0];
627
-
628
- temp1 = src[0] - m_last_dc_val[component_num];
629
- m_last_dc_val[component_num] = src[0];
630
- if (temp1 < 0) temp1 = -temp1;
631
-
632
- nbits = 0;
633
- while (temp1)
634
- {
635
- nbits++; temp1 >>= 1;
636
- }
637
-
638
- dc_count[nbits]++;
639
- for (run_len = 0, i = 1; i < 64; i++)
640
- {
641
- if ((temp1 = m_coefficient_array[i]) == 0)
642
- run_len++;
643
- else
644
- {
645
- while (run_len >= 16)
646
- {
647
- ac_count[0xF0]++;
648
- run_len -= 16;
649
- }
650
- if (temp1 < 0) temp1 = -temp1;
651
- nbits = 1;
652
- while (temp1 >>= 1) nbits++;
653
- ac_count[(run_len << 4) + nbits]++;
654
- run_len = 0;
655
- }
656
- }
657
- if (run_len) ac_count[0]++;
658
- }
659
-
660
- void jpeg_encoder::code_coefficients_pass_two(int component_num)
661
- {
662
- int i, j, run_len, nbits, temp1, temp2;
663
- int16 *pSrc = m_coefficient_array;
664
- uint *codes[2];
665
- uint8 *code_sizes[2];
666
-
667
- if (component_num == 0)
668
- {
669
- codes[0] = m_huff_codes[0 + 0]; codes[1] = m_huff_codes[2 + 0];
670
- code_sizes[0] = m_huff_code_sizes[0 + 0]; code_sizes[1] = m_huff_code_sizes[2 + 0];
671
- }
672
- else
673
- {
674
- codes[0] = m_huff_codes[0 + 1]; codes[1] = m_huff_codes[2 + 1];
675
- code_sizes[0] = m_huff_code_sizes[0 + 1]; code_sizes[1] = m_huff_code_sizes[2 + 1];
676
- }
677
-
678
- temp1 = temp2 = pSrc[0] - m_last_dc_val[component_num];
679
- m_last_dc_val[component_num] = pSrc[0];
680
-
681
- if (temp1 < 0)
682
- {
683
- temp1 = -temp1; temp2--;
684
- }
685
-
686
- nbits = 0;
687
- while (temp1)
688
- {
689
- nbits++; temp1 >>= 1;
690
- }
691
-
692
- put_bits(codes[0][nbits], code_sizes[0][nbits]);
693
- if (nbits) put_bits(temp2 & ((1 << nbits) - 1), nbits);
694
-
695
- for (run_len = 0, i = 1; i < 64; i++)
696
- {
697
- if ((temp1 = m_coefficient_array[i]) == 0)
698
- run_len++;
699
- else
700
- {
701
- while (run_len >= 16)
702
- {
703
- put_bits(codes[1][0xF0], code_sizes[1][0xF0]);
704
- run_len -= 16;
705
- }
706
- if ((temp2 = temp1) < 0)
707
- {
708
- temp1 = -temp1;
709
- temp2--;
710
- }
711
- nbits = 1;
712
- while (temp1 >>= 1)
713
- nbits++;
714
- j = (run_len << 4) + nbits;
715
- put_bits(codes[1][j], code_sizes[1][j]);
716
- put_bits(temp2 & ((1 << nbits) - 1), nbits);
717
- run_len = 0;
718
- }
719
- }
720
- if (run_len)
721
- put_bits(codes[1][0], code_sizes[1][0]);
722
- }
723
-
724
- void jpeg_encoder::code_block(int component_num)
725
- {
726
- DCT2D(m_sample_array);
727
- load_quantized_coefficients(component_num);
728
- if (m_pass_num == 1)
729
- code_coefficients_pass_one(component_num);
730
- else
731
- code_coefficients_pass_two(component_num);
732
- }
733
-
734
- void jpeg_encoder::process_mcu_row()
735
- {
736
- if (m_num_components == 1)
737
- {
738
- for (int i = 0; i < m_mcus_per_row; i++)
739
- {
740
- load_block_8_8_grey(i); code_block(0);
741
- }
742
- }
743
- else if ((m_comp_h_samp[0] == 1) && (m_comp_v_samp[0] == 1))
744
- {
745
- for (int i = 0; i < m_mcus_per_row; i++)
746
- {
747
- load_block_8_8(i, 0, 0); code_block(0); load_block_8_8(i, 0, 1); code_block(1); load_block_8_8(i, 0, 2); code_block(2);
748
- }
749
- }
750
- else if ((m_comp_h_samp[0] == 2) && (m_comp_v_samp[0] == 1))
751
- {
752
- for (int i = 0; i < m_mcus_per_row; i++)
753
- {
754
- load_block_8_8(i * 2 + 0, 0, 0); code_block(0); load_block_8_8(i * 2 + 1, 0, 0); code_block(0);
755
- load_block_16_8_8(i, 1); code_block(1); load_block_16_8_8(i, 2); code_block(2);
756
- }
757
- }
758
- else if ((m_comp_h_samp[0] == 2) && (m_comp_v_samp[0] == 2))
759
- {
760
- for (int i = 0; i < m_mcus_per_row; i++)
761
- {
762
- load_block_8_8(i * 2 + 0, 0, 0); code_block(0); load_block_8_8(i * 2 + 1, 0, 0); code_block(0);
763
- load_block_8_8(i * 2 + 0, 1, 0); code_block(0); load_block_8_8(i * 2 + 1, 1, 0); code_block(0);
764
- load_block_16_8(i, 1); code_block(1); load_block_16_8(i, 2); code_block(2);
765
- }
766
- }
767
- }
768
-
769
- bool jpeg_encoder::terminate_pass_one()
770
- {
771
- optimize_huffman_table(0+0, DC_LUM_CODES); optimize_huffman_table(2+0, AC_LUM_CODES);
772
- if (m_num_components > 1)
773
- {
774
- optimize_huffman_table(0+1, DC_CHROMA_CODES); optimize_huffman_table(2+1, AC_CHROMA_CODES);
775
- }
776
- return second_pass_init();
777
- }
778
-
779
- bool jpeg_encoder::terminate_pass_two()
780
- {
781
- put_bits(0x7F, 7);
782
- flush_output_buffer();
783
- emit_marker(M_EOI);
784
- m_pass_num++; // purposely bump up m_pass_num, for debugging
785
- return true;
786
- }
787
-
788
- bool jpeg_encoder::process_end_of_image()
789
- {
790
- if (m_mcu_y_ofs)
791
- {
792
- if (m_mcu_y_ofs < 16) // check here just to shut up static analysis
793
- {
794
- for (int i = m_mcu_y_ofs; i < m_mcu_y; i++)
795
- memcpy(m_mcu_lines[i], m_mcu_lines[m_mcu_y_ofs - 1], m_image_bpl_mcu);
796
- }
797
-
798
- process_mcu_row();
799
- }
800
-
801
- if (m_pass_num == 1)
802
- return terminate_pass_one();
803
- else
804
- return terminate_pass_two();
805
- }
806
-
807
- void jpeg_encoder::load_mcu(const void *pSrc)
808
- {
809
- const uint8* Psrc = reinterpret_cast<const uint8*>(pSrc);
810
-
811
- uint8* pDst = m_mcu_lines[m_mcu_y_ofs]; // OK to write up to m_image_bpl_xlt bytes to pDst
812
-
813
- if (m_num_components == 1)
814
- {
815
- if (m_image_bpp == 4)
816
- RGBA_to_Y(pDst, Psrc, m_image_x);
817
- else if (m_image_bpp == 3)
818
- RGB_to_Y(pDst, Psrc, m_image_x);
819
- else
820
- memcpy(pDst, Psrc, m_image_x);
821
- }
822
- else
823
- {
824
- if (m_image_bpp == 4)
825
- RGBA_to_YCC(pDst, Psrc, m_image_x);
826
- else if (m_image_bpp == 3)
827
- RGB_to_YCC(pDst, Psrc, m_image_x);
828
- else
829
- Y_to_YCC(pDst, Psrc, m_image_x);
830
- }
831
-
832
- // Possibly duplicate pixels at end of scanline if not a multiple of 8 or 16
833
- if (m_num_components == 1)
834
- memset(m_mcu_lines[m_mcu_y_ofs] + m_image_bpl_xlt, pDst[m_image_bpl_xlt - 1], m_image_x_mcu - m_image_x);
835
- else
836
- {
837
- const uint8 y = pDst[m_image_bpl_xlt - 3 + 0], cb = pDst[m_image_bpl_xlt - 3 + 1], cr = pDst[m_image_bpl_xlt - 3 + 2];
838
- uint8 *q = m_mcu_lines[m_mcu_y_ofs] + m_image_bpl_xlt;
839
- for (int i = m_image_x; i < m_image_x_mcu; i++)
840
- {
841
- *q++ = y; *q++ = cb; *q++ = cr;
842
- }
843
- }
844
-
845
- if (++m_mcu_y_ofs == m_mcu_y)
846
- {
847
- process_mcu_row();
848
- m_mcu_y_ofs = 0;
849
- }
850
- }
851
-
852
- void jpeg_encoder::clear()
853
- {
854
- m_mcu_lines[0] = NULL;
855
- m_pass_num = 0;
856
- m_all_stream_writes_succeeded = true;
857
- }
858
-
859
- jpeg_encoder::jpeg_encoder()
860
- {
861
- clear();
862
- }
863
-
864
- jpeg_encoder::~jpeg_encoder()
865
- {
866
- deinit();
867
- }
868
-
869
- bool jpeg_encoder::init(output_stream *pStream, int64_t width, int64_t height, int64_t src_channels, const params &comp_params)
870
- {
871
- deinit();
872
- if (((!pStream) || (width < 1) || (height < 1)) || ((src_channels != 1) && (src_channels != 3) && (src_channels != 4)) || (!comp_params.check_valid())) return false;
873
- m_pStream = pStream;
874
- m_params = comp_params;
875
- return jpg_open(width, height, src_channels);
876
- }
877
-
878
- void jpeg_encoder::deinit()
879
- {
880
- jpge_free(m_mcu_lines[0]);
881
- clear();
882
- }
883
-
884
- bool jpeg_encoder::process_scanline(const void* pScanline)
885
- {
886
- if ((m_pass_num < 1) || (m_pass_num > 2)) return false;
887
- if (m_all_stream_writes_succeeded)
888
- {
889
- if (!pScanline)
890
- {
891
- if (!process_end_of_image()) return false;
892
- }
893
- else
894
- {
895
- load_mcu(pScanline);
896
- }
897
- }
898
- return m_all_stream_writes_succeeded;
899
- }
900
-
901
- // Higher level wrappers/examples (optional).
902
- #include <stdio.h>
903
-
904
- class cfile_stream : public output_stream
905
- {
906
- cfile_stream(const cfile_stream &);
907
- cfile_stream &operator= (const cfile_stream &);
908
-
909
- FILE* m_pFile;
910
- bool m_bStatus;
911
-
912
- public:
913
- cfile_stream() : m_pFile(NULL), m_bStatus(false) { }
914
-
915
- virtual ~cfile_stream()
916
- {
917
- close();
918
- }
919
-
920
- bool open(const char *pFilename)
921
- {
922
- close();
923
- #if defined(_MSC_VER)
924
- if (fopen_s(&m_pFile, pFilename, "wb") != 0)
925
- {
926
- return false;
927
- }
928
- #else
929
- m_pFile = fopen(pFilename, "wb");
930
- #endif
931
- m_bStatus = (m_pFile != NULL);
932
- return m_bStatus;
933
- }
934
-
935
- bool close()
936
- {
937
- if (m_pFile)
938
- {
939
- if (fclose(m_pFile) == EOF)
940
- {
941
- m_bStatus = false;
942
- }
943
- m_pFile = NULL;
944
- }
945
- return m_bStatus;
946
- }
947
-
948
- virtual bool put_buf(const void* pBuf, int64_t len)
949
- {
950
- m_bStatus = m_bStatus && (fwrite(pBuf, len, 1, m_pFile) == 1);
951
- return m_bStatus;
952
- }
953
-
954
- uint get_size() const
955
- {
956
- return m_pFile ? ftell(m_pFile) : 0;
957
- }
958
- };
959
-
960
- // Writes JPEG image to file.
961
- bool compress_image_to_jpeg_file(const char *pFilename, int64_t width, int64_t height, int64_t num_channels, const uint8 *pImage_data, const params &comp_params)
962
- {
963
- cfile_stream dst_stream;
964
- if (!dst_stream.open(pFilename))
965
- return false;
966
-
967
- jpge::jpeg_encoder dst_image;
968
- if (!dst_image.init(&dst_stream, width, height, num_channels, comp_params))
969
- return false;
970
-
971
- for (uint pass_index = 0; pass_index < dst_image.get_total_passes(); pass_index++)
972
- {
973
- for (int64_t i = 0; i < height; i++)
974
- {
975
- // i, width, and num_channels are all 64bit
976
- const uint8* pBuf = pImage_data + i * width * num_channels;
977
- if (!dst_image.process_scanline(pBuf))
978
- return false;
979
- }
980
- if (!dst_image.process_scanline(NULL))
981
- return false;
982
- }
983
-
984
- dst_image.deinit();
985
-
986
- return dst_stream.close();
987
- }
988
-
989
- class memory_stream : public output_stream
990
- {
991
- memory_stream(const memory_stream &);
992
- memory_stream &operator= (const memory_stream &);
993
-
994
- uint8 *m_pBuf;
995
- uint64_t m_buf_size, m_buf_ofs;
996
-
997
- public:
998
- memory_stream(void *pBuf, uint64_t buf_size) : m_pBuf(static_cast<uint8*>(pBuf)), m_buf_size(buf_size), m_buf_ofs(0) { }
999
-
1000
- virtual ~memory_stream() { }
1001
-
1002
- virtual bool put_buf(const void* pBuf, int64_t len)
1003
- {
1004
- uint64_t buf_remaining = m_buf_size - m_buf_ofs;
1005
- if ((uint64_t)len > buf_remaining)
1006
- return false;
1007
- memcpy(m_pBuf + m_buf_ofs, pBuf, len);
1008
- m_buf_ofs += len;
1009
- return true;
1010
- }
1011
-
1012
- uint64_t get_size() const
1013
- {
1014
- return m_buf_ofs;
1015
- }
1016
- };
1017
-
1018
- bool compress_image_to_jpeg_file_in_memory(void *pDstBuf, int64_t &buf_size, int64_t width, int64_t height, int64_t num_channels, const uint8 *pImage_data, const params &comp_params)
1019
- {
1020
- if ((!pDstBuf) || (!buf_size))
1021
- return false;
1022
-
1023
- memory_stream dst_stream(pDstBuf, buf_size);
1024
-
1025
- buf_size = 0;
1026
-
1027
- jpge::jpeg_encoder dst_image;
1028
- if (!dst_image.init(&dst_stream, width, height, num_channels, comp_params))
1029
- return false;
1030
-
1031
- for (uint pass_index = 0; pass_index < dst_image.get_total_passes(); pass_index++)
1032
- {
1033
- for (int64_t i = 0; i < height; i++)
1034
- {
1035
- const uint8* pScanline = pImage_data + i * width * num_channels;
1036
- if (!dst_image.process_scanline(pScanline))
1037
- return false;
1038
- }
1039
- if (!dst_image.process_scanline(NULL))
1040
- return false;
1041
- }
1042
-
1043
- dst_image.deinit();
1044
-
1045
- buf_size = dst_stream.get_size();
1046
- return true;
1047
- }
1048
-
1049
- } // namespace jpge
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Anar0140/6.AI.Dashboard.Wiki.Chat.Cognitive.HTML5/README.md DELETED
@@ -1,12 +0,0 @@
1
- ---
2
- title: 6.AI.Dashboard.Wiki.Chat.Cognitive.HTML5
3
- emoji: 🦀
4
- colorFrom: gray
5
- colorTo: gray
6
- sdk: static
7
- pinned: false
8
- license: mit
9
- duplicated_from: AI-ZTH-03-23/6.AI.Dashboard.Wiki.Chat.Cognitive.HTML5
10
- ---
11
-
12
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/examples/community/stable_diffusion_controlnet_img2img.py DELETED
@@ -1,989 +0,0 @@
1
- # Inspired by: https://github.com/haofanwang/ControlNet-for-Diffusers/
2
-
3
- import inspect
4
- from typing import Any, Callable, Dict, List, Optional, Tuple, Union
5
-
6
- import numpy as np
7
- import PIL.Image
8
- import torch
9
- from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
10
-
11
- from diffusers import AutoencoderKL, ControlNetModel, DiffusionPipeline, UNet2DConditionModel, logging
12
- from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput, StableDiffusionSafetyChecker
13
- from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_controlnet import MultiControlNetModel
14
- from diffusers.schedulers import KarrasDiffusionSchedulers
15
- from diffusers.utils import (
16
- PIL_INTERPOLATION,
17
- is_accelerate_available,
18
- is_accelerate_version,
19
- randn_tensor,
20
- replace_example_docstring,
21
- )
22
-
23
-
24
- logger = logging.get_logger(__name__) # pylint: disable=invalid-name
25
-
26
- EXAMPLE_DOC_STRING = """
27
- Examples:
28
- ```py
29
- >>> import numpy as np
30
- >>> import torch
31
- >>> from PIL import Image
32
- >>> from diffusers import ControlNetModel, UniPCMultistepScheduler
33
- >>> from diffusers.utils import load_image
34
-
35
- >>> input_image = load_image("https://hf.co/datasets/huggingface/documentation-images/resolve/main/diffusers/input_image_vermeer.png")
36
-
37
- >>> controlnet = ControlNetModel.from_pretrained("lllyasviel/sd-controlnet-canny", torch_dtype=torch.float16)
38
-
39
- >>> pipe_controlnet = StableDiffusionControlNetImg2ImgPipeline.from_pretrained(
40
- "runwayml/stable-diffusion-v1-5",
41
- controlnet=controlnet,
42
- safety_checker=None,
43
- torch_dtype=torch.float16
44
- )
45
-
46
- >>> pipe_controlnet.scheduler = UniPCMultistepScheduler.from_config(pipe_controlnet.scheduler.config)
47
- >>> pipe_controlnet.enable_xformers_memory_efficient_attention()
48
- >>> pipe_controlnet.enable_model_cpu_offload()
49
-
50
- # using image with edges for our canny controlnet
51
- >>> control_image = load_image(
52
- "https://hf.co/datasets/huggingface/documentation-images/resolve/main/diffusers/vermeer_canny_edged.png")
53
-
54
-
55
- >>> result_img = pipe_controlnet(controlnet_conditioning_image=control_image,
56
- image=input_image,
57
- prompt="an android robot, cyberpank, digitl art masterpiece",
58
- num_inference_steps=20).images[0]
59
-
60
- >>> result_img.show()
61
- ```
62
- """
63
-
64
-
65
- def prepare_image(image):
66
- if isinstance(image, torch.Tensor):
67
- # Batch single image
68
- if image.ndim == 3:
69
- image = image.unsqueeze(0)
70
-
71
- image = image.to(dtype=torch.float32)
72
- else:
73
- # preprocess image
74
- if isinstance(image, (PIL.Image.Image, np.ndarray)):
75
- image = [image]
76
-
77
- if isinstance(image, list) and isinstance(image[0], PIL.Image.Image):
78
- image = [np.array(i.convert("RGB"))[None, :] for i in image]
79
- image = np.concatenate(image, axis=0)
80
- elif isinstance(image, list) and isinstance(image[0], np.ndarray):
81
- image = np.concatenate([i[None, :] for i in image], axis=0)
82
-
83
- image = image.transpose(0, 3, 1, 2)
84
- image = torch.from_numpy(image).to(dtype=torch.float32) / 127.5 - 1.0
85
-
86
- return image
87
-
88
-
89
- def prepare_controlnet_conditioning_image(
90
- controlnet_conditioning_image,
91
- width,
92
- height,
93
- batch_size,
94
- num_images_per_prompt,
95
- device,
96
- dtype,
97
- do_classifier_free_guidance,
98
- ):
99
- if not isinstance(controlnet_conditioning_image, torch.Tensor):
100
- if isinstance(controlnet_conditioning_image, PIL.Image.Image):
101
- controlnet_conditioning_image = [controlnet_conditioning_image]
102
-
103
- if isinstance(controlnet_conditioning_image[0], PIL.Image.Image):
104
- controlnet_conditioning_image = [
105
- np.array(i.resize((width, height), resample=PIL_INTERPOLATION["lanczos"]))[None, :]
106
- for i in controlnet_conditioning_image
107
- ]
108
- controlnet_conditioning_image = np.concatenate(controlnet_conditioning_image, axis=0)
109
- controlnet_conditioning_image = np.array(controlnet_conditioning_image).astype(np.float32) / 255.0
110
- controlnet_conditioning_image = controlnet_conditioning_image.transpose(0, 3, 1, 2)
111
- controlnet_conditioning_image = torch.from_numpy(controlnet_conditioning_image)
112
- elif isinstance(controlnet_conditioning_image[0], torch.Tensor):
113
- controlnet_conditioning_image = torch.cat(controlnet_conditioning_image, dim=0)
114
-
115
- image_batch_size = controlnet_conditioning_image.shape[0]
116
-
117
- if image_batch_size == 1:
118
- repeat_by = batch_size
119
- else:
120
- # image batch size is the same as prompt batch size
121
- repeat_by = num_images_per_prompt
122
-
123
- controlnet_conditioning_image = controlnet_conditioning_image.repeat_interleave(repeat_by, dim=0)
124
-
125
- controlnet_conditioning_image = controlnet_conditioning_image.to(device=device, dtype=dtype)
126
-
127
- if do_classifier_free_guidance:
128
- controlnet_conditioning_image = torch.cat([controlnet_conditioning_image] * 2)
129
-
130
- return controlnet_conditioning_image
131
-
132
-
133
- class StableDiffusionControlNetImg2ImgPipeline(DiffusionPipeline):
134
- """
135
- Inspired by: https://github.com/haofanwang/ControlNet-for-Diffusers/
136
- """
137
-
138
- _optional_components = ["safety_checker", "feature_extractor"]
139
-
140
- def __init__(
141
- self,
142
- vae: AutoencoderKL,
143
- text_encoder: CLIPTextModel,
144
- tokenizer: CLIPTokenizer,
145
- unet: UNet2DConditionModel,
146
- controlnet: Union[ControlNetModel, List[ControlNetModel], Tuple[ControlNetModel], MultiControlNetModel],
147
- scheduler: KarrasDiffusionSchedulers,
148
- safety_checker: StableDiffusionSafetyChecker,
149
- feature_extractor: CLIPImageProcessor,
150
- requires_safety_checker: bool = True,
151
- ):
152
- super().__init__()
153
-
154
- if safety_checker is None and requires_safety_checker:
155
- logger.warning(
156
- f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
157
- " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
158
- " results in services or applications open to the public. Both the diffusers team and Hugging Face"
159
- " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
160
- " it only for use-cases that involve analyzing network behavior or auditing its results. For more"
161
- " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ."
162
- )
163
-
164
- if safety_checker is not None and feature_extractor is None:
165
- raise ValueError(
166
- "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety"
167
- " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead."
168
- )
169
-
170
- if isinstance(controlnet, (list, tuple)):
171
- controlnet = MultiControlNetModel(controlnet)
172
-
173
- self.register_modules(
174
- vae=vae,
175
- text_encoder=text_encoder,
176
- tokenizer=tokenizer,
177
- unet=unet,
178
- controlnet=controlnet,
179
- scheduler=scheduler,
180
- safety_checker=safety_checker,
181
- feature_extractor=feature_extractor,
182
- )
183
- self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1)
184
- self.register_to_config(requires_safety_checker=requires_safety_checker)
185
-
186
- def enable_vae_slicing(self):
187
- r"""
188
- Enable sliced VAE decoding.
189
-
190
- When this option is enabled, the VAE will split the input tensor in slices to compute decoding in several
191
- steps. This is useful to save some memory and allow larger batch sizes.
192
- """
193
- self.vae.enable_slicing()
194
-
195
- def disable_vae_slicing(self):
196
- r"""
197
- Disable sliced VAE decoding. If `enable_vae_slicing` was previously invoked, this method will go back to
198
- computing decoding in one step.
199
- """
200
- self.vae.disable_slicing()
201
-
202
- def enable_sequential_cpu_offload(self, gpu_id=0):
203
- r"""
204
- Offloads all models to CPU using accelerate, significantly reducing memory usage. When called, unet,
205
- text_encoder, vae, controlnet, and safety checker have their state dicts saved to CPU and then are moved to a
206
- `torch.device('meta') and loaded to GPU only when their specific submodule has its `forward` method called.
207
- Note that offloading happens on a submodule basis. Memory savings are higher than with
208
- `enable_model_cpu_offload`, but performance is lower.
209
- """
210
- if is_accelerate_available():
211
- from accelerate import cpu_offload
212
- else:
213
- raise ImportError("Please install accelerate via `pip install accelerate`")
214
-
215
- device = torch.device(f"cuda:{gpu_id}")
216
-
217
- for cpu_offloaded_model in [self.unet, self.text_encoder, self.vae, self.controlnet]:
218
- cpu_offload(cpu_offloaded_model, device)
219
-
220
- if self.safety_checker is not None:
221
- cpu_offload(self.safety_checker, execution_device=device, offload_buffers=True)
222
-
223
- def enable_model_cpu_offload(self, gpu_id=0):
224
- r"""
225
- Offloads all models to CPU using accelerate, reducing memory usage with a low impact on performance. Compared
226
- to `enable_sequential_cpu_offload`, this method moves one whole model at a time to the GPU when its `forward`
227
- method is called, and the model remains in GPU until the next model runs. Memory savings are lower than with
228
- `enable_sequential_cpu_offload`, but performance is much better due to the iterative execution of the `unet`.
229
- """
230
- if is_accelerate_available() and is_accelerate_version(">=", "0.17.0.dev0"):
231
- from accelerate import cpu_offload_with_hook
232
- else:
233
- raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.")
234
-
235
- device = torch.device(f"cuda:{gpu_id}")
236
-
237
- hook = None
238
- for cpu_offloaded_model in [self.text_encoder, self.unet, self.vae]:
239
- _, hook = cpu_offload_with_hook(cpu_offloaded_model, device, prev_module_hook=hook)
240
-
241
- if self.safety_checker is not None:
242
- # the safety checker can offload the vae again
243
- _, hook = cpu_offload_with_hook(self.safety_checker, device, prev_module_hook=hook)
244
-
245
- # control net hook has be manually offloaded as it alternates with unet
246
- cpu_offload_with_hook(self.controlnet, device)
247
-
248
- # We'll offload the last model manually.
249
- self.final_offload_hook = hook
250
-
251
- @property
252
- def _execution_device(self):
253
- r"""
254
- Returns the device on which the pipeline's models will be executed. After calling
255
- `pipeline.enable_sequential_cpu_offload()` the execution device can only be inferred from Accelerate's module
256
- hooks.
257
- """
258
- if not hasattr(self.unet, "_hf_hook"):
259
- return self.device
260
- for module in self.unet.modules():
261
- if (
262
- hasattr(module, "_hf_hook")
263
- and hasattr(module._hf_hook, "execution_device")
264
- and module._hf_hook.execution_device is not None
265
- ):
266
- return torch.device(module._hf_hook.execution_device)
267
- return self.device
268
-
269
- def _encode_prompt(
270
- self,
271
- prompt,
272
- device,
273
- num_images_per_prompt,
274
- do_classifier_free_guidance,
275
- negative_prompt=None,
276
- prompt_embeds: Optional[torch.FloatTensor] = None,
277
- negative_prompt_embeds: Optional[torch.FloatTensor] = None,
278
- ):
279
- r"""
280
- Encodes the prompt into text encoder hidden states.
281
-
282
- Args:
283
- prompt (`str` or `List[str]`, *optional*):
284
- prompt to be encoded
285
- device: (`torch.device`):
286
- torch device
287
- num_images_per_prompt (`int`):
288
- number of images that should be generated per prompt
289
- do_classifier_free_guidance (`bool`):
290
- whether to use classifier free guidance or not
291
- negative_prompt (`str` or `List[str]`, *optional*):
292
- The prompt or prompts not to guide the image generation. If not defined, one has to pass
293
- `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`).
294
- prompt_embeds (`torch.FloatTensor`, *optional*):
295
- Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
296
- provided, text embeddings will be generated from `prompt` input argument.
297
- negative_prompt_embeds (`torch.FloatTensor`, *optional*):
298
- Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
299
- weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
300
- argument.
301
- """
302
- if prompt is not None and isinstance(prompt, str):
303
- batch_size = 1
304
- elif prompt is not None and isinstance(prompt, list):
305
- batch_size = len(prompt)
306
- else:
307
- batch_size = prompt_embeds.shape[0]
308
-
309
- if prompt_embeds is None:
310
- text_inputs = self.tokenizer(
311
- prompt,
312
- padding="max_length",
313
- max_length=self.tokenizer.model_max_length,
314
- truncation=True,
315
- return_tensors="pt",
316
- )
317
- text_input_ids = text_inputs.input_ids
318
- untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
319
-
320
- if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
321
- text_input_ids, untruncated_ids
322
- ):
323
- removed_text = self.tokenizer.batch_decode(
324
- untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]
325
- )
326
- logger.warning(
327
- "The following part of your input was truncated because CLIP can only handle sequences up to"
328
- f" {self.tokenizer.model_max_length} tokens: {removed_text}"
329
- )
330
-
331
- if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
332
- attention_mask = text_inputs.attention_mask.to(device)
333
- else:
334
- attention_mask = None
335
-
336
- prompt_embeds = self.text_encoder(
337
- text_input_ids.to(device),
338
- attention_mask=attention_mask,
339
- )
340
- prompt_embeds = prompt_embeds[0]
341
-
342
- prompt_embeds = prompt_embeds.to(dtype=self.text_encoder.dtype, device=device)
343
-
344
- bs_embed, seq_len, _ = prompt_embeds.shape
345
- # duplicate text embeddings for each generation per prompt, using mps friendly method
346
- prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
347
- prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
348
-
349
- # get unconditional embeddings for classifier free guidance
350
- if do_classifier_free_guidance and negative_prompt_embeds is None:
351
- uncond_tokens: List[str]
352
- if negative_prompt is None:
353
- uncond_tokens = [""] * batch_size
354
- elif type(prompt) is not type(negative_prompt):
355
- raise TypeError(
356
- f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
357
- f" {type(prompt)}."
358
- )
359
- elif isinstance(negative_prompt, str):
360
- uncond_tokens = [negative_prompt]
361
- elif batch_size != len(negative_prompt):
362
- raise ValueError(
363
- f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
364
- f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
365
- " the batch size of `prompt`."
366
- )
367
- else:
368
- uncond_tokens = negative_prompt
369
-
370
- max_length = prompt_embeds.shape[1]
371
- uncond_input = self.tokenizer(
372
- uncond_tokens,
373
- padding="max_length",
374
- max_length=max_length,
375
- truncation=True,
376
- return_tensors="pt",
377
- )
378
-
379
- if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
380
- attention_mask = uncond_input.attention_mask.to(device)
381
- else:
382
- attention_mask = None
383
-
384
- negative_prompt_embeds = self.text_encoder(
385
- uncond_input.input_ids.to(device),
386
- attention_mask=attention_mask,
387
- )
388
- negative_prompt_embeds = negative_prompt_embeds[0]
389
-
390
- if do_classifier_free_guidance:
391
- # duplicate unconditional embeddings for each generation per prompt, using mps friendly method
392
- seq_len = negative_prompt_embeds.shape[1]
393
-
394
- negative_prompt_embeds = negative_prompt_embeds.to(dtype=self.text_encoder.dtype, device=device)
395
-
396
- negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
397
- negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
398
-
399
- # For classifier free guidance, we need to do two forward passes.
400
- # Here we concatenate the unconditional and text embeddings into a single batch
401
- # to avoid doing two forward passes
402
- prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])
403
-
404
- return prompt_embeds
405
-
406
- def run_safety_checker(self, image, device, dtype):
407
- if self.safety_checker is not None:
408
- safety_checker_input = self.feature_extractor(self.numpy_to_pil(image), return_tensors="pt").to(device)
409
- image, has_nsfw_concept = self.safety_checker(
410
- images=image, clip_input=safety_checker_input.pixel_values.to(dtype)
411
- )
412
- else:
413
- has_nsfw_concept = None
414
- return image, has_nsfw_concept
415
-
416
- def decode_latents(self, latents):
417
- latents = 1 / self.vae.config.scaling_factor * latents
418
- image = self.vae.decode(latents).sample
419
- image = (image / 2 + 0.5).clamp(0, 1)
420
- # we always cast to float32 as this does not cause significant overhead and is compatible with bfloat16
421
- image = image.cpu().permute(0, 2, 3, 1).float().numpy()
422
- return image
423
-
424
- def prepare_extra_step_kwargs(self, generator, eta):
425
- # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
426
- # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
427
- # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502
428
- # and should be between [0, 1]
429
-
430
- accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
431
- extra_step_kwargs = {}
432
- if accepts_eta:
433
- extra_step_kwargs["eta"] = eta
434
-
435
- # check if the scheduler accepts generator
436
- accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
437
- if accepts_generator:
438
- extra_step_kwargs["generator"] = generator
439
- return extra_step_kwargs
440
-
441
- def check_controlnet_conditioning_image(self, image, prompt, prompt_embeds):
442
- image_is_pil = isinstance(image, PIL.Image.Image)
443
- image_is_tensor = isinstance(image, torch.Tensor)
444
- image_is_pil_list = isinstance(image, list) and isinstance(image[0], PIL.Image.Image)
445
- image_is_tensor_list = isinstance(image, list) and isinstance(image[0], torch.Tensor)
446
-
447
- if not image_is_pil and not image_is_tensor and not image_is_pil_list and not image_is_tensor_list:
448
- raise TypeError(
449
- "image must be passed and be one of PIL image, torch tensor, list of PIL images, or list of torch tensors"
450
- )
451
-
452
- if image_is_pil:
453
- image_batch_size = 1
454
- elif image_is_tensor:
455
- image_batch_size = image.shape[0]
456
- elif image_is_pil_list:
457
- image_batch_size = len(image)
458
- elif image_is_tensor_list:
459
- image_batch_size = len(image)
460
- else:
461
- raise ValueError("controlnet condition image is not valid")
462
-
463
- if prompt is not None and isinstance(prompt, str):
464
- prompt_batch_size = 1
465
- elif prompt is not None and isinstance(prompt, list):
466
- prompt_batch_size = len(prompt)
467
- elif prompt_embeds is not None:
468
- prompt_batch_size = prompt_embeds.shape[0]
469
- else:
470
- raise ValueError("prompt or prompt_embeds are not valid")
471
-
472
- if image_batch_size != 1 and image_batch_size != prompt_batch_size:
473
- raise ValueError(
474
- f"If image batch size is not 1, image batch size must be same as prompt batch size. image batch size: {image_batch_size}, prompt batch size: {prompt_batch_size}"
475
- )
476
-
477
- def check_inputs(
478
- self,
479
- prompt,
480
- image,
481
- controlnet_conditioning_image,
482
- height,
483
- width,
484
- callback_steps,
485
- negative_prompt=None,
486
- prompt_embeds=None,
487
- negative_prompt_embeds=None,
488
- strength=None,
489
- controlnet_guidance_start=None,
490
- controlnet_guidance_end=None,
491
- controlnet_conditioning_scale=None,
492
- ):
493
- if height % 8 != 0 or width % 8 != 0:
494
- raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
495
-
496
- if (callback_steps is None) or (
497
- callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
498
- ):
499
- raise ValueError(
500
- f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
501
- f" {type(callback_steps)}."
502
- )
503
-
504
- if prompt is not None and prompt_embeds is not None:
505
- raise ValueError(
506
- f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
507
- " only forward one of the two."
508
- )
509
- elif prompt is None and prompt_embeds is None:
510
- raise ValueError(
511
- "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
512
- )
513
- elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
514
- raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
515
-
516
- if negative_prompt is not None and negative_prompt_embeds is not None:
517
- raise ValueError(
518
- f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
519
- f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
520
- )
521
-
522
- if prompt_embeds is not None and negative_prompt_embeds is not None:
523
- if prompt_embeds.shape != negative_prompt_embeds.shape:
524
- raise ValueError(
525
- "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
526
- f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
527
- f" {negative_prompt_embeds.shape}."
528
- )
529
-
530
- # check controlnet condition image
531
-
532
- if isinstance(self.controlnet, ControlNetModel):
533
- self.check_controlnet_conditioning_image(controlnet_conditioning_image, prompt, prompt_embeds)
534
- elif isinstance(self.controlnet, MultiControlNetModel):
535
- if not isinstance(controlnet_conditioning_image, list):
536
- raise TypeError("For multiple controlnets: `image` must be type `list`")
537
-
538
- if len(controlnet_conditioning_image) != len(self.controlnet.nets):
539
- raise ValueError(
540
- "For multiple controlnets: `image` must have the same length as the number of controlnets."
541
- )
542
-
543
- for image_ in controlnet_conditioning_image:
544
- self.check_controlnet_conditioning_image(image_, prompt, prompt_embeds)
545
- else:
546
- assert False
547
-
548
- # Check `controlnet_conditioning_scale`
549
-
550
- if isinstance(self.controlnet, ControlNetModel):
551
- if not isinstance(controlnet_conditioning_scale, float):
552
- raise TypeError("For single controlnet: `controlnet_conditioning_scale` must be type `float`.")
553
- elif isinstance(self.controlnet, MultiControlNetModel):
554
- if isinstance(controlnet_conditioning_scale, list) and len(controlnet_conditioning_scale) != len(
555
- self.controlnet.nets
556
- ):
557
- raise ValueError(
558
- "For multiple controlnets: When `controlnet_conditioning_scale` is specified as `list`, it must have"
559
- " the same length as the number of controlnets"
560
- )
561
- else:
562
- assert False
563
-
564
- if isinstance(image, torch.Tensor):
565
- if image.ndim != 3 and image.ndim != 4:
566
- raise ValueError("`image` must have 3 or 4 dimensions")
567
-
568
- if image.ndim == 3:
569
- image_batch_size = 1
570
- image_channels, image_height, image_width = image.shape
571
- elif image.ndim == 4:
572
- image_batch_size, image_channels, image_height, image_width = image.shape
573
- else:
574
- assert False
575
-
576
- if image_channels != 3:
577
- raise ValueError("`image` must have 3 channels")
578
-
579
- if image.min() < -1 or image.max() > 1:
580
- raise ValueError("`image` should be in range [-1, 1]")
581
-
582
- if self.vae.config.latent_channels != self.unet.config.in_channels:
583
- raise ValueError(
584
- f"The config of `pipeline.unet` expects {self.unet.config.in_channels} but received"
585
- f" latent channels: {self.vae.config.latent_channels},"
586
- f" Please verify the config of `pipeline.unet` and the `pipeline.vae`"
587
- )
588
-
589
- if strength < 0 or strength > 1:
590
- raise ValueError(f"The value of `strength` should in [0.0, 1.0] but is {strength}")
591
-
592
- if controlnet_guidance_start < 0 or controlnet_guidance_start > 1:
593
- raise ValueError(
594
- f"The value of `controlnet_guidance_start` should in [0.0, 1.0] but is {controlnet_guidance_start}"
595
- )
596
-
597
- if controlnet_guidance_end < 0 or controlnet_guidance_end > 1:
598
- raise ValueError(
599
- f"The value of `controlnet_guidance_end` should in [0.0, 1.0] but is {controlnet_guidance_end}"
600
- )
601
-
602
- if controlnet_guidance_start > controlnet_guidance_end:
603
- raise ValueError(
604
- "The value of `controlnet_guidance_start` should be less than `controlnet_guidance_end`, but got"
605
- f" `controlnet_guidance_start` {controlnet_guidance_start} >= `controlnet_guidance_end` {controlnet_guidance_end}"
606
- )
607
-
608
- def get_timesteps(self, num_inference_steps, strength, device):
609
- # get the original timestep using init_timestep
610
- init_timestep = min(int(num_inference_steps * strength), num_inference_steps)
611
-
612
- t_start = max(num_inference_steps - init_timestep, 0)
613
- timesteps = self.scheduler.timesteps[t_start:]
614
-
615
- return timesteps, num_inference_steps - t_start
616
-
617
- def prepare_latents(self, image, timestep, batch_size, num_images_per_prompt, dtype, device, generator=None):
618
- if not isinstance(image, (torch.Tensor, PIL.Image.Image, list)):
619
- raise ValueError(
620
- f"`image` has to be of type `torch.Tensor`, `PIL.Image.Image` or list but is {type(image)}"
621
- )
622
-
623
- image = image.to(device=device, dtype=dtype)
624
-
625
- batch_size = batch_size * num_images_per_prompt
626
- if isinstance(generator, list) and len(generator) != batch_size:
627
- raise ValueError(
628
- f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
629
- f" size of {batch_size}. Make sure the batch size matches the length of the generators."
630
- )
631
-
632
- if isinstance(generator, list):
633
- init_latents = [
634
- self.vae.encode(image[i : i + 1]).latent_dist.sample(generator[i]) for i in range(batch_size)
635
- ]
636
- init_latents = torch.cat(init_latents, dim=0)
637
- else:
638
- init_latents = self.vae.encode(image).latent_dist.sample(generator)
639
-
640
- init_latents = self.vae.config.scaling_factor * init_latents
641
-
642
- if batch_size > init_latents.shape[0] and batch_size % init_latents.shape[0] == 0:
643
- raise ValueError(
644
- f"Cannot duplicate `image` of batch size {init_latents.shape[0]} to {batch_size} text prompts."
645
- )
646
- else:
647
- init_latents = torch.cat([init_latents], dim=0)
648
-
649
- shape = init_latents.shape
650
- noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
651
-
652
- # get latents
653
- init_latents = self.scheduler.add_noise(init_latents, noise, timestep)
654
- latents = init_latents
655
-
656
- return latents
657
-
658
- def _default_height_width(self, height, width, image):
659
- if isinstance(image, list):
660
- image = image[0]
661
-
662
- if height is None:
663
- if isinstance(image, PIL.Image.Image):
664
- height = image.height
665
- elif isinstance(image, torch.Tensor):
666
- height = image.shape[3]
667
-
668
- height = (height // 8) * 8 # round down to nearest multiple of 8
669
-
670
- if width is None:
671
- if isinstance(image, PIL.Image.Image):
672
- width = image.width
673
- elif isinstance(image, torch.Tensor):
674
- width = image.shape[2]
675
-
676
- width = (width // 8) * 8 # round down to nearest multiple of 8
677
-
678
- return height, width
679
-
680
- @torch.no_grad()
681
- @replace_example_docstring(EXAMPLE_DOC_STRING)
682
- def __call__(
683
- self,
684
- prompt: Union[str, List[str]] = None,
685
- image: Union[torch.Tensor, PIL.Image.Image] = None,
686
- controlnet_conditioning_image: Union[
687
- torch.FloatTensor, PIL.Image.Image, List[torch.FloatTensor], List[PIL.Image.Image]
688
- ] = None,
689
- strength: float = 0.8,
690
- height: Optional[int] = None,
691
- width: Optional[int] = None,
692
- num_inference_steps: int = 50,
693
- guidance_scale: float = 7.5,
694
- negative_prompt: Optional[Union[str, List[str]]] = None,
695
- num_images_per_prompt: Optional[int] = 1,
696
- eta: float = 0.0,
697
- generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
698
- latents: Optional[torch.FloatTensor] = None,
699
- prompt_embeds: Optional[torch.FloatTensor] = None,
700
- negative_prompt_embeds: Optional[torch.FloatTensor] = None,
701
- output_type: Optional[str] = "pil",
702
- return_dict: bool = True,
703
- callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
704
- callback_steps: int = 1,
705
- cross_attention_kwargs: Optional[Dict[str, Any]] = None,
706
- controlnet_conditioning_scale: Union[float, List[float]] = 1.0,
707
- controlnet_guidance_start: float = 0.0,
708
- controlnet_guidance_end: float = 1.0,
709
- ):
710
- r"""
711
- Function invoked when calling the pipeline for generation.
712
-
713
- Args:
714
- prompt (`str` or `List[str]`, *optional*):
715
- The prompt or prompts to guide the image generation. If not defined, one has to pass `prompt_embeds`.
716
- instead.
717
- image (`torch.Tensor` or `PIL.Image.Image`):
718
- `Image`, or tensor representing an image batch which will be inpainted, *i.e.* parts of the image will
719
- be masked out with `mask_image` and repainted according to `prompt`.
720
- controlnet_conditioning_image (`torch.FloatTensor`, `PIL.Image.Image`, `List[torch.FloatTensor]` or `List[PIL.Image.Image]`):
721
- The ControlNet input condition. ControlNet uses this input condition to generate guidance to Unet. If
722
- the type is specified as `Torch.FloatTensor`, it is passed to ControlNet as is. PIL.Image.Image` can
723
- also be accepted as an image. The control image is automatically resized to fit the output image.
724
- strength (`float`, *optional*):
725
- Conceptually, indicates how much to transform the reference `image`. Must be between 0 and 1. `image`
726
- will be used as a starting point, adding more noise to it the larger the `strength`. The number of
727
- denoising steps depends on the amount of noise initially added. When `strength` is 1, added noise will
728
- be maximum and the denoising process will run for the full number of iterations specified in
729
- `num_inference_steps`. A value of 1, therefore, essentially ignores `image`.
730
- height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
731
- The height in pixels of the generated image.
732
- width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor):
733
- The width in pixels of the generated image.
734
- num_inference_steps (`int`, *optional*, defaults to 50):
735
- The number of denoising steps. More denoising steps usually lead to a higher quality image at the
736
- expense of slower inference.
737
- guidance_scale (`float`, *optional*, defaults to 7.5):
738
- Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598).
739
- `guidance_scale` is defined as `w` of equation 2. of [Imagen
740
- Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale >
741
- 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`,
742
- usually at the expense of lower image quality.
743
- negative_prompt (`str` or `List[str]`, *optional*):
744
- The prompt or prompts not to guide the image generation. If not defined, one has to pass
745
- `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is less than `1`).
746
- num_images_per_prompt (`int`, *optional*, defaults to 1):
747
- The number of images to generate per prompt.
748
- eta (`float`, *optional*, defaults to 0.0):
749
- Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to
750
- [`schedulers.DDIMScheduler`], will be ignored for others.
751
- generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
752
- One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html)
753
- to make generation deterministic.
754
- latents (`torch.FloatTensor`, *optional*):
755
- Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image
756
- generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
757
- tensor will ge generated by sampling using the supplied random `generator`.
758
- prompt_embeds (`torch.FloatTensor`, *optional*):
759
- Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
760
- provided, text embeddings will be generated from `prompt` input argument.
761
- negative_prompt_embeds (`torch.FloatTensor`, *optional*):
762
- Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
763
- weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
764
- argument.
765
- output_type (`str`, *optional*, defaults to `"pil"`):
766
- The output format of the generate image. Choose between
767
- [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`.
768
- return_dict (`bool`, *optional*, defaults to `True`):
769
- Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
770
- plain tuple.
771
- callback (`Callable`, *optional*):
772
- A function that will be called every `callback_steps` steps during inference. The function will be
773
- called with the following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
774
- callback_steps (`int`, *optional*, defaults to 1):
775
- The frequency at which the `callback` function will be called. If not specified, the callback will be
776
- called at every step.
777
- cross_attention_kwargs (`dict`, *optional*):
778
- A kwargs dictionary that if specified is passed along to the `AttentionProcessor` as defined under
779
- `self.processor` in
780
- [diffusers.cross_attention](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/cross_attention.py).
781
- controlnet_conditioning_scale (`float`, *optional*, defaults to 1.0):
782
- The outputs of the controlnet are multiplied by `controlnet_conditioning_scale` before they are added
783
- to the residual in the original unet.
784
- controlnet_guidance_start ('float', *optional*, defaults to 0.0):
785
- The percentage of total steps the controlnet starts applying. Must be between 0 and 1.
786
- controlnet_guidance_end ('float', *optional*, defaults to 1.0):
787
- The percentage of total steps the controlnet ends applying. Must be between 0 and 1. Must be greater
788
- than `controlnet_guidance_start`.
789
-
790
- Examples:
791
-
792
- Returns:
793
- [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
794
- [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple.
795
- When returning a tuple, the first element is a list with the generated images, and the second element is a
796
- list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work"
797
- (nsfw) content, according to the `safety_checker`.
798
- """
799
- # 0. Default height and width to unet
800
- height, width = self._default_height_width(height, width, controlnet_conditioning_image)
801
-
802
- # 1. Check inputs. Raise error if not correct
803
- self.check_inputs(
804
- prompt,
805
- image,
806
- controlnet_conditioning_image,
807
- height,
808
- width,
809
- callback_steps,
810
- negative_prompt,
811
- prompt_embeds,
812
- negative_prompt_embeds,
813
- strength,
814
- controlnet_guidance_start,
815
- controlnet_guidance_end,
816
- controlnet_conditioning_scale,
817
- )
818
-
819
- # 2. Define call parameters
820
- if prompt is not None and isinstance(prompt, str):
821
- batch_size = 1
822
- elif prompt is not None and isinstance(prompt, list):
823
- batch_size = len(prompt)
824
- else:
825
- batch_size = prompt_embeds.shape[0]
826
-
827
- device = self._execution_device
828
- # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
829
- # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1`
830
- # corresponds to doing no classifier free guidance.
831
- do_classifier_free_guidance = guidance_scale > 1.0
832
-
833
- if isinstance(self.controlnet, MultiControlNetModel) and isinstance(controlnet_conditioning_scale, float):
834
- controlnet_conditioning_scale = [controlnet_conditioning_scale] * len(self.controlnet.nets)
835
-
836
- # 3. Encode input prompt
837
- prompt_embeds = self._encode_prompt(
838
- prompt,
839
- device,
840
- num_images_per_prompt,
841
- do_classifier_free_guidance,
842
- negative_prompt,
843
- prompt_embeds=prompt_embeds,
844
- negative_prompt_embeds=negative_prompt_embeds,
845
- )
846
-
847
- # 4. Prepare image, and controlnet_conditioning_image
848
- image = prepare_image(image)
849
-
850
- # condition image(s)
851
- if isinstance(self.controlnet, ControlNetModel):
852
- controlnet_conditioning_image = prepare_controlnet_conditioning_image(
853
- controlnet_conditioning_image=controlnet_conditioning_image,
854
- width=width,
855
- height=height,
856
- batch_size=batch_size * num_images_per_prompt,
857
- num_images_per_prompt=num_images_per_prompt,
858
- device=device,
859
- dtype=self.controlnet.dtype,
860
- do_classifier_free_guidance=do_classifier_free_guidance,
861
- )
862
- elif isinstance(self.controlnet, MultiControlNetModel):
863
- controlnet_conditioning_images = []
864
-
865
- for image_ in controlnet_conditioning_image:
866
- image_ = prepare_controlnet_conditioning_image(
867
- controlnet_conditioning_image=image_,
868
- width=width,
869
- height=height,
870
- batch_size=batch_size * num_images_per_prompt,
871
- num_images_per_prompt=num_images_per_prompt,
872
- device=device,
873
- dtype=self.controlnet.dtype,
874
- do_classifier_free_guidance=do_classifier_free_guidance,
875
- )
876
-
877
- controlnet_conditioning_images.append(image_)
878
-
879
- controlnet_conditioning_image = controlnet_conditioning_images
880
- else:
881
- assert False
882
-
883
- # 5. Prepare timesteps
884
- self.scheduler.set_timesteps(num_inference_steps, device=device)
885
- timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, strength, device)
886
- latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt)
887
-
888
- # 6. Prepare latent variables
889
- latents = self.prepare_latents(
890
- image,
891
- latent_timestep,
892
- batch_size,
893
- num_images_per_prompt,
894
- prompt_embeds.dtype,
895
- device,
896
- generator,
897
- )
898
-
899
- # 7. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
900
- extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
901
-
902
- # 8. Denoising loop
903
- num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
904
- with self.progress_bar(total=num_inference_steps) as progress_bar:
905
- for i, t in enumerate(timesteps):
906
- # expand the latents if we are doing classifier free guidance
907
- latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
908
-
909
- latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
910
-
911
- # compute the percentage of total steps we are at
912
- current_sampling_percent = i / len(timesteps)
913
-
914
- if (
915
- current_sampling_percent < controlnet_guidance_start
916
- or current_sampling_percent > controlnet_guidance_end
917
- ):
918
- # do not apply the controlnet
919
- down_block_res_samples = None
920
- mid_block_res_sample = None
921
- else:
922
- # apply the controlnet
923
- down_block_res_samples, mid_block_res_sample = self.controlnet(
924
- latent_model_input,
925
- t,
926
- encoder_hidden_states=prompt_embeds,
927
- controlnet_cond=controlnet_conditioning_image,
928
- conditioning_scale=controlnet_conditioning_scale,
929
- return_dict=False,
930
- )
931
-
932
- # predict the noise residual
933
- noise_pred = self.unet(
934
- latent_model_input,
935
- t,
936
- encoder_hidden_states=prompt_embeds,
937
- cross_attention_kwargs=cross_attention_kwargs,
938
- down_block_additional_residuals=down_block_res_samples,
939
- mid_block_additional_residual=mid_block_res_sample,
940
- ).sample
941
-
942
- # perform guidance
943
- if do_classifier_free_guidance:
944
- noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
945
- noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
946
-
947
- # compute the previous noisy sample x_t -> x_t-1
948
- latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample
949
-
950
- # call the callback, if provided
951
- if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
952
- progress_bar.update()
953
- if callback is not None and i % callback_steps == 0:
954
- callback(i, t, latents)
955
-
956
- # If we do sequential model offloading, let's offload unet and controlnet
957
- # manually for max memory savings
958
- if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None:
959
- self.unet.to("cpu")
960
- self.controlnet.to("cpu")
961
- torch.cuda.empty_cache()
962
-
963
- if output_type == "latent":
964
- image = latents
965
- has_nsfw_concept = None
966
- elif output_type == "pil":
967
- # 8. Post-processing
968
- image = self.decode_latents(latents)
969
-
970
- # 9. Run safety checker
971
- image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype)
972
-
973
- # 10. Convert to PIL
974
- image = self.numpy_to_pil(image)
975
- else:
976
- # 8. Post-processing
977
- image = self.decode_latents(latents)
978
-
979
- # 9. Run safety checker
980
- image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype)
981
-
982
- # Offload last model to CPU
983
- if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None:
984
- self.final_offload_hook.offload()
985
-
986
- if not return_dict:
987
- return (image, has_nsfw_concept)
988
-
989
- return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/configs/cascade_rcnn/cascade_mask_rcnn_x101_64x4d_fpn_20e_coco.py DELETED
@@ -1,13 +0,0 @@
1
- _base_ = './cascade_mask_rcnn_r50_fpn_20e_coco.py'
2
- model = dict(
3
- pretrained='open-mmlab://resnext101_64x4d',
4
- backbone=dict(
5
- type='ResNeXt',
6
- depth=101,
7
- groups=64,
8
- base_width=4,
9
- num_stages=4,
10
- out_indices=(0, 1, 2, 3),
11
- frozen_stages=1,
12
- norm_cfg=dict(type='BN', requires_grad=True),
13
- style='pytorch'))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Andy1621/uniformer_image_detection/configs/gn+ws/faster_rcnn_r101_fpn_gn_ws-all_1x_coco.py DELETED
@@ -1,3 +0,0 @@
1
- _base_ = './faster_rcnn_r50_fpn_gn_ws-all_1x_coco.py'
2
- model = dict(
3
- pretrained='open-mmlab://jhu/resnet101_gn_ws', backbone=dict(depth=101))
 
 
 
 
spaces/Andy1621/uniformer_light/kinetics_class_index.py DELETED
@@ -1,402 +0,0 @@
1
- kinetics_classnames = {
2
- "0": "riding a bike",
3
- "1": "marching",
4
- "2": "dodgeball",
5
- "3": "playing cymbals",
6
- "4": "checking tires",
7
- "5": "roller skating",
8
- "6": "tasting beer",
9
- "7": "clapping",
10
- "8": "drawing",
11
- "9": "juggling fire",
12
- "10": "bobsledding",
13
- "11": "petting animal (not cat)",
14
- "12": "spray painting",
15
- "13": "training dog",
16
- "14": "eating watermelon",
17
- "15": "building cabinet",
18
- "16": "applauding",
19
- "17": "playing harp",
20
- "18": "balloon blowing",
21
- "19": "sled dog racing",
22
- "20": "wrestling",
23
- "21": "pole vault",
24
- "22": "hurling (sport)",
25
- "23": "riding scooter",
26
- "24": "shearing sheep",
27
- "25": "sweeping floor",
28
- "26": "eating carrots",
29
- "27": "skateboarding",
30
- "28": "dunking basketball",
31
- "29": "disc golfing",
32
- "30": "eating spaghetti",
33
- "31": "playing flute",
34
- "32": "riding mechanical bull",
35
- "33": "making sushi",
36
- "34": "trapezing",
37
- "35": "picking fruit",
38
- "36": "stretching leg",
39
- "37": "playing ukulele",
40
- "38": "tying tie",
41
- "39": "skydiving",
42
- "40": "playing cello",
43
- "41": "jumping into pool",
44
- "42": "shooting goal (soccer)",
45
- "43": "trimming trees",
46
- "44": "bookbinding",
47
- "45": "ski jumping",
48
- "46": "walking the dog",
49
- "47": "riding unicycle",
50
- "48": "shaving head",
51
- "49": "hopscotch",
52
- "50": "playing piano",
53
- "51": "parasailing",
54
- "52": "bartending",
55
- "53": "kicking field goal",
56
- "54": "finger snapping",
57
- "55": "dining",
58
- "56": "yawning",
59
- "57": "peeling potatoes",
60
- "58": "canoeing or kayaking",
61
- "59": "front raises",
62
- "60": "laughing",
63
- "61": "dancing macarena",
64
- "62": "digging",
65
- "63": "reading newspaper",
66
- "64": "hitting baseball",
67
- "65": "clay pottery making",
68
- "66": "exercising with an exercise ball",
69
- "67": "playing saxophone",
70
- "68": "shooting basketball",
71
- "69": "washing hair",
72
- "70": "lunge",
73
- "71": "brushing hair",
74
- "72": "curling hair",
75
- "73": "kitesurfing",
76
- "74": "tapping guitar",
77
- "75": "bending back",
78
- "76": "skipping rope",
79
- "77": "situp",
80
- "78": "folding paper",
81
- "79": "cracking neck",
82
- "80": "assembling computer",
83
- "81": "cleaning gutters",
84
- "82": "blowing out candles",
85
- "83": "shaking hands",
86
- "84": "dancing gangnam style",
87
- "85": "windsurfing",
88
- "86": "tap dancing",
89
- "87": "skiing (not slalom or crosscountry)",
90
- "88": "bandaging",
91
- "89": "push up",
92
- "90": "doing nails",
93
- "91": "punching person (boxing)",
94
- "92": "bouncing on trampoline",
95
- "93": "scrambling eggs",
96
- "94": "singing",
97
- "95": "cleaning floor",
98
- "96": "krumping",
99
- "97": "drumming fingers",
100
- "98": "snowmobiling",
101
- "99": "gymnastics tumbling",
102
- "100": "headbanging",
103
- "101": "catching or throwing frisbee",
104
- "102": "riding elephant",
105
- "103": "bee keeping",
106
- "104": "feeding birds",
107
- "105": "snatch weight lifting",
108
- "106": "mowing lawn",
109
- "107": "fixing hair",
110
- "108": "playing trumpet",
111
- "109": "flying kite",
112
- "110": "crossing river",
113
- "111": "swinging legs",
114
- "112": "sanding floor",
115
- "113": "belly dancing",
116
- "114": "sneezing",
117
- "115": "clean and jerk",
118
- "116": "side kick",
119
- "117": "filling eyebrows",
120
- "118": "shuffling cards",
121
- "119": "recording music",
122
- "120": "cartwheeling",
123
- "121": "feeding fish",
124
- "122": "folding clothes",
125
- "123": "water skiing",
126
- "124": "tobogganing",
127
- "125": "blowing leaves",
128
- "126": "smoking",
129
- "127": "unboxing",
130
- "128": "tai chi",
131
- "129": "waxing legs",
132
- "130": "riding camel",
133
- "131": "slapping",
134
- "132": "tossing salad",
135
- "133": "capoeira",
136
- "134": "playing cards",
137
- "135": "playing organ",
138
- "136": "playing violin",
139
- "137": "playing drums",
140
- "138": "tapping pen",
141
- "139": "vault",
142
- "140": "shoveling snow",
143
- "141": "playing tennis",
144
- "142": "getting a tattoo",
145
- "143": "making a sandwich",
146
- "144": "making tea",
147
- "145": "grinding meat",
148
- "146": "squat",
149
- "147": "eating doughnuts",
150
- "148": "ice fishing",
151
- "149": "snowkiting",
152
- "150": "kicking soccer ball",
153
- "151": "playing controller",
154
- "152": "giving or receiving award",
155
- "153": "welding",
156
- "154": "throwing discus",
157
- "155": "throwing axe",
158
- "156": "ripping paper",
159
- "157": "swimming butterfly stroke",
160
- "158": "air drumming",
161
- "159": "blowing nose",
162
- "160": "hockey stop",
163
- "161": "taking a shower",
164
- "162": "bench pressing",
165
- "163": "planting trees",
166
- "164": "pumping fist",
167
- "165": "climbing tree",
168
- "166": "tickling",
169
- "167": "high kick",
170
- "168": "waiting in line",
171
- "169": "slacklining",
172
- "170": "tango dancing",
173
- "171": "hurdling",
174
- "172": "carrying baby",
175
- "173": "celebrating",
176
- "174": "sharpening knives",
177
- "175": "passing American football (in game)",
178
- "176": "headbutting",
179
- "177": "playing recorder",
180
- "178": "brush painting",
181
- "179": "garbage collecting",
182
- "180": "robot dancing",
183
- "181": "shredding paper",
184
- "182": "pumping gas",
185
- "183": "rock climbing",
186
- "184": "hula hooping",
187
- "185": "braiding hair",
188
- "186": "opening present",
189
- "187": "texting",
190
- "188": "decorating the christmas tree",
191
- "189": "answering questions",
192
- "190": "playing keyboard",
193
- "191": "writing",
194
- "192": "bungee jumping",
195
- "193": "sniffing",
196
- "194": "eating burger",
197
- "195": "playing accordion",
198
- "196": "making pizza",
199
- "197": "playing volleyball",
200
- "198": "tasting food",
201
- "199": "pushing cart",
202
- "200": "spinning poi",
203
- "201": "cleaning windows",
204
- "202": "arm wrestling",
205
- "203": "changing oil",
206
- "204": "swimming breast stroke",
207
- "205": "tossing coin",
208
- "206": "deadlifting",
209
- "207": "hoverboarding",
210
- "208": "cutting watermelon",
211
- "209": "cheerleading",
212
- "210": "snorkeling",
213
- "211": "washing hands",
214
- "212": "eating cake",
215
- "213": "pull ups",
216
- "214": "surfing water",
217
- "215": "eating hotdog",
218
- "216": "holding snake",
219
- "217": "playing harmonica",
220
- "218": "ironing",
221
- "219": "cutting nails",
222
- "220": "golf chipping",
223
- "221": "shot put",
224
- "222": "hugging",
225
- "223": "playing clarinet",
226
- "224": "faceplanting",
227
- "225": "trimming or shaving beard",
228
- "226": "drinking shots",
229
- "227": "riding mountain bike",
230
- "228": "tying bow tie",
231
- "229": "swinging on something",
232
- "230": "skiing crosscountry",
233
- "231": "unloading truck",
234
- "232": "cleaning pool",
235
- "233": "jogging",
236
- "234": "ice climbing",
237
- "235": "mopping floor",
238
- "236": "making bed",
239
- "237": "diving cliff",
240
- "238": "washing dishes",
241
- "239": "grooming dog",
242
- "240": "weaving basket",
243
- "241": "frying vegetables",
244
- "242": "stomping grapes",
245
- "243": "moving furniture",
246
- "244": "cooking sausages",
247
- "245": "doing laundry",
248
- "246": "dying hair",
249
- "247": "knitting",
250
- "248": "reading book",
251
- "249": "baby waking up",
252
- "250": "punching bag",
253
- "251": "surfing crowd",
254
- "252": "cooking chicken",
255
- "253": "pushing car",
256
- "254": "springboard diving",
257
- "255": "swing dancing",
258
- "256": "massaging legs",
259
- "257": "beatboxing",
260
- "258": "breading or breadcrumbing",
261
- "259": "somersaulting",
262
- "260": "brushing teeth",
263
- "261": "stretching arm",
264
- "262": "juggling balls",
265
- "263": "massaging person's head",
266
- "264": "eating ice cream",
267
- "265": "extinguishing fire",
268
- "266": "hammer throw",
269
- "267": "whistling",
270
- "268": "crawling baby",
271
- "269": "using remote controller (not gaming)",
272
- "270": "playing cricket",
273
- "271": "opening bottle",
274
- "272": "playing xylophone",
275
- "273": "motorcycling",
276
- "274": "driving car",
277
- "275": "exercising arm",
278
- "276": "passing American football (not in game)",
279
- "277": "playing kickball",
280
- "278": "sticking tongue out",
281
- "279": "flipping pancake",
282
- "280": "catching fish",
283
- "281": "eating chips",
284
- "282": "shaking head",
285
- "283": "sword fighting",
286
- "284": "playing poker",
287
- "285": "cooking on campfire",
288
- "286": "doing aerobics",
289
- "287": "paragliding",
290
- "288": "using segway",
291
- "289": "folding napkins",
292
- "290": "playing bagpipes",
293
- "291": "gargling",
294
- "292": "skiing slalom",
295
- "293": "strumming guitar",
296
- "294": "javelin throw",
297
- "295": "waxing back",
298
- "296": "riding or walking with horse",
299
- "297": "plastering",
300
- "298": "long jump",
301
- "299": "parkour",
302
- "300": "wrapping present",
303
- "301": "egg hunting",
304
- "302": "archery",
305
- "303": "cleaning toilet",
306
- "304": "swimming backstroke",
307
- "305": "snowboarding",
308
- "306": "catching or throwing baseball",
309
- "307": "massaging back",
310
- "308": "blowing glass",
311
- "309": "playing guitar",
312
- "310": "playing chess",
313
- "311": "golf driving",
314
- "312": "presenting weather forecast",
315
- "313": "rock scissors paper",
316
- "314": "high jump",
317
- "315": "baking cookies",
318
- "316": "using computer",
319
- "317": "washing feet",
320
- "318": "arranging flowers",
321
- "319": "playing bass guitar",
322
- "320": "spraying",
323
- "321": "cutting pineapple",
324
- "322": "waxing chest",
325
- "323": "auctioning",
326
- "324": "jetskiing",
327
- "325": "drinking",
328
- "326": "busking",
329
- "327": "playing monopoly",
330
- "328": "salsa dancing",
331
- "329": "waxing eyebrows",
332
- "330": "watering plants",
333
- "331": "zumba",
334
- "332": "chopping wood",
335
- "333": "pushing wheelchair",
336
- "334": "carving pumpkin",
337
- "335": "building shed",
338
- "336": "making jewelry",
339
- "337": "catching or throwing softball",
340
- "338": "bending metal",
341
- "339": "ice skating",
342
- "340": "dancing charleston",
343
- "341": "abseiling",
344
- "342": "climbing a rope",
345
- "343": "crying",
346
- "344": "cleaning shoes",
347
- "345": "dancing ballet",
348
- "346": "driving tractor",
349
- "347": "triple jump",
350
- "348": "throwing ball",
351
- "349": "getting a haircut",
352
- "350": "running on treadmill",
353
- "351": "climbing ladder",
354
- "352": "blasting sand",
355
- "353": "playing trombone",
356
- "354": "drop kicking",
357
- "355": "country line dancing",
358
- "356": "changing wheel",
359
- "357": "feeding goats",
360
- "358": "tying knot (not on a tie)",
361
- "359": "setting table",
362
- "360": "shaving legs",
363
- "361": "kissing",
364
- "362": "riding mule",
365
- "363": "counting money",
366
- "364": "laying bricks",
367
- "365": "barbequing",
368
- "366": "news anchoring",
369
- "367": "smoking hookah",
370
- "368": "cooking egg",
371
- "369": "peeling apples",
372
- "370": "yoga",
373
- "371": "sharpening pencil",
374
- "372": "dribbling basketball",
375
- "373": "petting cat",
376
- "374": "playing ice hockey",
377
- "375": "milking cow",
378
- "376": "shining shoes",
379
- "377": "juggling soccer ball",
380
- "378": "scuba diving",
381
- "379": "playing squash or racquetball",
382
- "380": "drinking beer",
383
- "381": "sign language interpreting",
384
- "382": "playing basketball",
385
- "383": "breakdancing",
386
- "384": "testifying",
387
- "385": "making snowman",
388
- "386": "golf putting",
389
- "387": "playing didgeridoo",
390
- "388": "biking through snow",
391
- "389": "sailing",
392
- "390": "jumpstyle dancing",
393
- "391": "water sliding",
394
- "392": "grooming horse",
395
- "393": "massaging feet",
396
- "394": "playing paintball",
397
- "395": "making a cake",
398
- "396": "bowling",
399
- "397": "contact juggling",
400
- "398": "applying cream",
401
- "399": "playing badminton"
402
- }
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AnishKumbhar/ChatBot/text-generation-webui-main/modules/chat.py DELETED
@@ -1,724 +0,0 @@
1
- import base64
2
- import copy
3
- import functools
4
- import html
5
- import json
6
- import re
7
- from datetime import datetime
8
- from pathlib import Path
9
-
10
- import gradio as gr
11
- import yaml
12
- from PIL import Image
13
-
14
- import modules.shared as shared
15
- from modules.extensions import apply_extensions
16
- from modules.html_generator import chat_html_wrapper, make_thumbnail
17
- from modules.logging_colors import logger
18
- from modules.text_generation import (
19
- generate_reply,
20
- get_encoded_length,
21
- get_max_prompt_length
22
- )
23
- from modules.utils import (
24
- delete_file,
25
- get_available_characters,
26
- replace_all,
27
- save_file
28
- )
29
-
30
-
31
- def str_presenter(dumper, data):
32
- """
33
- Copied from https://github.com/yaml/pyyaml/issues/240
34
- Makes pyyaml output prettier multiline strings.
35
- """
36
-
37
- if data.count('\n') > 0:
38
- return dumper.represent_scalar('tag:yaml.org,2002:str', data, style='|')
39
-
40
- return dumper.represent_scalar('tag:yaml.org,2002:str', data)
41
-
42
-
43
- yaml.add_representer(str, str_presenter)
44
- yaml.representer.SafeRepresenter.add_representer(str, str_presenter)
45
-
46
-
47
- def get_turn_substrings(state, instruct=False):
48
- if instruct:
49
- if 'turn_template' not in state or state['turn_template'] == '':
50
- template = '<|user|>\n<|user-message|>\n<|bot|>\n<|bot-message|>\n'
51
- else:
52
- template = state['turn_template'].replace(r'\n', '\n')
53
- else:
54
- template = '<|user|>: <|user-message|>\n<|bot|>: <|bot-message|>\n'
55
-
56
- replacements = {
57
- '<|user|>': state['name1_instruct' if instruct else 'name1'].strip(),
58
- '<|bot|>': state['name2_instruct' if instruct else 'name2'].strip(),
59
- }
60
-
61
- output = {
62
- 'user_turn': template.split('<|bot|>')[0],
63
- 'bot_turn': '<|bot|>' + template.split('<|bot|>')[1],
64
- 'user_turn_stripped': template.split('<|bot|>')[0].split('<|user-message|>')[0],
65
- 'bot_turn_stripped': '<|bot|>' + template.split('<|bot|>')[1].split('<|bot-message|>')[0],
66
- }
67
-
68
- for k in output:
69
- output[k] = replace_all(output[k], replacements)
70
-
71
- return output
72
-
73
-
74
- def generate_chat_prompt(user_input, state, **kwargs):
75
- impersonate = kwargs.get('impersonate', False)
76
- _continue = kwargs.get('_continue', False)
77
- also_return_rows = kwargs.get('also_return_rows', False)
78
- history = kwargs.get('history', state['history'])['internal']
79
- is_instruct = state['mode'] == 'instruct'
80
-
81
- # Find the maximum prompt size
82
- max_length = get_max_prompt_length(state)
83
- all_substrings = {
84
- 'chat': get_turn_substrings(state, instruct=False),
85
- 'instruct': get_turn_substrings(state, instruct=True)
86
- }
87
-
88
- substrings = all_substrings['instruct' if is_instruct else 'chat']
89
-
90
- # Create the template for "chat-instruct" mode
91
- if state['mode'] == 'chat-instruct':
92
- wrapper = ''
93
- command = state['chat-instruct_command'].replace('<|character|>', state['name2'] if not impersonate else state['name1'])
94
- wrapper += state['context_instruct']
95
- wrapper += all_substrings['instruct']['user_turn'].replace('<|user-message|>', command)
96
- wrapper += all_substrings['instruct']['bot_turn_stripped']
97
- if impersonate:
98
- wrapper += substrings['user_turn_stripped'].rstrip(' ')
99
- elif _continue:
100
- wrapper += apply_extensions('bot_prefix', substrings['bot_turn_stripped'], state)
101
- wrapper += history[-1][1]
102
- else:
103
- wrapper += apply_extensions('bot_prefix', substrings['bot_turn_stripped'].rstrip(' '), state)
104
- else:
105
- wrapper = '<|prompt|>'
106
-
107
- if is_instruct:
108
- context = state['context_instruct']
109
- else:
110
- context = replace_character_names(
111
- f"{state['context'].strip()}\n",
112
- state['name1'],
113
- state['name2']
114
- )
115
-
116
- # Build the prompt
117
- rows = [context]
118
- min_rows = 3
119
- i = len(history) - 1
120
- while i >= 0 and get_encoded_length(wrapper.replace('<|prompt|>', ''.join(rows))) < max_length:
121
- if _continue and i == len(history) - 1:
122
- if state['mode'] != 'chat-instruct':
123
- rows.insert(1, substrings['bot_turn_stripped'] + history[i][1].strip())
124
- else:
125
- rows.insert(1, substrings['bot_turn'].replace('<|bot-message|>', history[i][1].strip()))
126
-
127
- string = history[i][0]
128
- if string not in ['', '<|BEGIN-VISIBLE-CHAT|>']:
129
- rows.insert(1, replace_all(substrings['user_turn'], {'<|user-message|>': string.strip(), '<|round|>': str(i)}))
130
-
131
- i -= 1
132
-
133
- if impersonate:
134
- if state['mode'] == 'chat-instruct':
135
- min_rows = 1
136
- else:
137
- min_rows = 2
138
- rows.append(substrings['user_turn_stripped'].rstrip(' '))
139
- elif not _continue:
140
- # Add the user message
141
- if len(user_input) > 0:
142
- rows.append(replace_all(substrings['user_turn'], {'<|user-message|>': user_input.strip(), '<|round|>': str(len(history))}))
143
-
144
- # Add the character prefix
145
- if state['mode'] != 'chat-instruct':
146
- rows.append(apply_extensions('bot_prefix', substrings['bot_turn_stripped'].rstrip(' '), state))
147
-
148
- while len(rows) > min_rows and get_encoded_length(wrapper.replace('<|prompt|>', ''.join(rows))) >= max_length:
149
- rows.pop(1)
150
-
151
- prompt = wrapper.replace('<|prompt|>', ''.join(rows))
152
- if also_return_rows:
153
- return prompt, rows
154
- else:
155
- return prompt
156
-
157
-
158
- def get_stopping_strings(state):
159
- stopping_strings = []
160
- if state['mode'] in ['instruct', 'chat-instruct']:
161
- stopping_strings += [
162
- state['turn_template'].split('<|user-message|>')[1].split('<|bot|>')[0] + '<|bot|>',
163
- state['turn_template'].split('<|bot-message|>')[1] + '<|user|>'
164
- ]
165
-
166
- replacements = {
167
- '<|user|>': state['name1_instruct'],
168
- '<|bot|>': state['name2_instruct']
169
- }
170
-
171
- for i in range(len(stopping_strings)):
172
- stopping_strings[i] = replace_all(stopping_strings[i], replacements).rstrip(' ').replace(r'\n', '\n')
173
-
174
- if state['mode'] in ['chat', 'chat-instruct']:
175
- stopping_strings += [
176
- f"\n{state['name1']}:",
177
- f"\n{state['name2']}:"
178
- ]
179
-
180
- if 'stopping_strings' in state and isinstance(state['stopping_strings'], list):
181
- stopping_strings += state.pop('stopping_strings')
182
-
183
- return stopping_strings
184
-
185
-
186
- def chatbot_wrapper(text, state, regenerate=False, _continue=False, loading_message=True):
187
- history = state['history']
188
- output = copy.deepcopy(history)
189
- output = apply_extensions('history', output)
190
- state = apply_extensions('state', state)
191
- if shared.model_name == 'None' or shared.model is None:
192
- logger.error("No model is loaded! Select one in the Model tab.")
193
- yield output
194
- return
195
-
196
- just_started = True
197
- visible_text = None
198
- stopping_strings = get_stopping_strings(state)
199
- is_stream = state['stream']
200
-
201
- # Prepare the input
202
- if not any((regenerate, _continue)):
203
- visible_text = html.escape(text)
204
-
205
- # Apply extensions
206
- text, visible_text = apply_extensions('chat_input', text, visible_text, state)
207
- text = apply_extensions('input', text, state, is_chat=True)
208
-
209
- # *Is typing...*
210
- if loading_message:
211
- yield {'visible': output['visible'] + [[visible_text, shared.processing_message]], 'internal': output['internal']}
212
- else:
213
- text, visible_text = output['internal'][-1][0], output['visible'][-1][0]
214
- if regenerate:
215
- output['visible'].pop()
216
- output['internal'].pop()
217
-
218
- # *Is typing...*
219
- if loading_message:
220
- yield {'visible': output['visible'] + [[visible_text, shared.processing_message]], 'internal': output['internal']}
221
- elif _continue:
222
- last_reply = [output['internal'][-1][1], output['visible'][-1][1]]
223
- if loading_message:
224
- yield {'visible': output['visible'][:-1] + [[visible_text, last_reply[1] + '...']], 'internal': output['internal']}
225
-
226
- # Generate the prompt
227
- kwargs = {
228
- '_continue': _continue,
229
- 'history': output,
230
- }
231
- prompt = apply_extensions('custom_generate_chat_prompt', text, state, **kwargs)
232
- if prompt is None:
233
- prompt = generate_chat_prompt(text, state, **kwargs)
234
-
235
- # Generate
236
- reply = None
237
- for j, reply in enumerate(generate_reply(prompt, state, stopping_strings=stopping_strings, is_chat=True)):
238
-
239
- # Extract the reply
240
- visible_reply = re.sub("(<USER>|<user>|{{user}})", state['name1'], reply)
241
- visible_reply = html.escape(visible_reply)
242
-
243
- if shared.stop_everything:
244
- output['visible'][-1][1] = apply_extensions('output', output['visible'][-1][1], state, is_chat=True)
245
- yield output
246
- return
247
-
248
- if just_started:
249
- just_started = False
250
- if not _continue:
251
- output['internal'].append(['', ''])
252
- output['visible'].append(['', ''])
253
-
254
- if _continue:
255
- output['internal'][-1] = [text, last_reply[0] + reply]
256
- output['visible'][-1] = [visible_text, last_reply[1] + visible_reply]
257
- if is_stream:
258
- yield output
259
- elif not (j == 0 and visible_reply.strip() == ''):
260
- output['internal'][-1] = [text, reply.lstrip(' ')]
261
- output['visible'][-1] = [visible_text, visible_reply.lstrip(' ')]
262
- if is_stream:
263
- yield output
264
-
265
- output['visible'][-1][1] = apply_extensions('output', output['visible'][-1][1], state, is_chat=True)
266
- yield output
267
-
268
-
269
- def impersonate_wrapper(text, state):
270
-
271
- static_output = chat_html_wrapper(state['history'], state['name1'], state['name2'], state['mode'], state['chat_style'])
272
-
273
- if shared.model_name == 'None' or shared.model is None:
274
- logger.error("No model is loaded! Select one in the Model tab.")
275
- yield '', static_output
276
- return
277
-
278
- prompt = generate_chat_prompt('', state, impersonate=True)
279
- stopping_strings = get_stopping_strings(state)
280
-
281
- yield text + '...', static_output
282
- reply = None
283
- for reply in generate_reply(prompt + text, state, stopping_strings=stopping_strings, is_chat=True):
284
- yield (text + reply).lstrip(' '), static_output
285
- if shared.stop_everything:
286
- return
287
-
288
-
289
- def generate_chat_reply(text, state, regenerate=False, _continue=False, loading_message=True):
290
- history = state['history']
291
- if regenerate or _continue:
292
- text = ''
293
- if (len(history['visible']) == 1 and not history['visible'][0][0]) or len(history['internal']) == 0:
294
- yield history
295
- return
296
-
297
- for history in chatbot_wrapper(text, state, regenerate=regenerate, _continue=_continue, loading_message=loading_message):
298
- yield history
299
-
300
-
301
- def character_is_loaded(state, raise_exception=False):
302
- if state['mode'] in ['chat', 'chat-instruct'] and state['name2'] == '':
303
- logger.error('It looks like no character is loaded. Please load one under Parameters > Character.')
304
- if raise_exception:
305
- raise ValueError
306
-
307
- return False
308
- else:
309
- return True
310
-
311
-
312
- def generate_chat_reply_wrapper(text, state, regenerate=False, _continue=False):
313
- '''
314
- Same as above but returns HTML for the UI
315
- '''
316
-
317
- if not character_is_loaded(state):
318
- return
319
-
320
- if state['start_with'] != '' and not _continue:
321
- if regenerate:
322
- text, state['history'] = remove_last_message(state['history'])
323
- regenerate = False
324
-
325
- _continue = True
326
- send_dummy_message(text, state)
327
- send_dummy_reply(state['start_with'], state)
328
-
329
- for i, history in enumerate(generate_chat_reply(text, state, regenerate, _continue, loading_message=True)):
330
- yield chat_html_wrapper(history, state['name1'], state['name2'], state['mode'], state['chat_style']), history
331
-
332
-
333
- def remove_last_message(history):
334
- if len(history['visible']) > 0 and history['internal'][-1][0] != '<|BEGIN-VISIBLE-CHAT|>':
335
- last = history['visible'].pop()
336
- history['internal'].pop()
337
- else:
338
- last = ['', '']
339
-
340
- return html.unescape(last[0]), history
341
-
342
-
343
- def send_last_reply_to_input(history):
344
- if len(history['visible']) > 0:
345
- return html.unescape(history['visible'][-1][1])
346
- else:
347
- return ''
348
-
349
-
350
- def replace_last_reply(text, state):
351
- history = state['history']
352
-
353
- if len(text.strip()) == 0:
354
- return history
355
- elif len(history['visible']) > 0:
356
- history['visible'][-1][1] = html.escape(text)
357
- history['internal'][-1][1] = apply_extensions('input', text, state, is_chat=True)
358
-
359
- return history
360
-
361
-
362
- def send_dummy_message(text, state):
363
- history = state['history']
364
- history['visible'].append([html.escape(text), ''])
365
- history['internal'].append([apply_extensions('input', text, state, is_chat=True), ''])
366
- return history
367
-
368
-
369
- def send_dummy_reply(text, state):
370
- history = state['history']
371
- if len(history['visible']) > 0 and not history['visible'][-1][1] == '':
372
- history['visible'].append(['', ''])
373
- history['internal'].append(['', ''])
374
-
375
- history['visible'][-1][1] = html.escape(text)
376
- history['internal'][-1][1] = apply_extensions('input', text, state, is_chat=True)
377
- return history
378
-
379
-
380
- def redraw_html(history, name1, name2, mode, style, reset_cache=False):
381
- return chat_html_wrapper(history, name1, name2, mode, style, reset_cache=reset_cache)
382
-
383
-
384
- def start_new_chat(state):
385
- mode = state['mode']
386
- history = {'internal': [], 'visible': []}
387
-
388
- if mode != 'instruct':
389
- greeting = replace_character_names(state['greeting'], state['name1'], state['name2'])
390
- if greeting != '':
391
- history['internal'] += [['<|BEGIN-VISIBLE-CHAT|>', greeting]]
392
- history['visible'] += [['', apply_extensions('output', greeting, state, is_chat=True)]]
393
-
394
- unique_id = datetime.now().strftime('%Y%m%d-%H-%M-%S')
395
- save_history(history, unique_id, state['character_menu'], state['mode'])
396
-
397
- return history
398
-
399
-
400
- def get_history_file_path(unique_id, character, mode):
401
- if mode == 'instruct':
402
- p = Path(f'logs/instruct/{unique_id}.json')
403
- else:
404
- p = Path(f'logs/chat/{character}/{unique_id}.json')
405
-
406
- return p
407
-
408
-
409
- def save_history(history, unique_id, character, mode):
410
- if shared.args.multi_user:
411
- return
412
-
413
- p = get_history_file_path(unique_id, character, mode)
414
- if not p.parent.is_dir():
415
- p.parent.mkdir(parents=True)
416
-
417
- with open(p, 'w', encoding='utf-8') as f:
418
- f.write(json.dumps(history, indent=4))
419
-
420
-
421
- def rename_history(old_id, new_id, character, mode):
422
- if shared.args.multi_user:
423
- return
424
-
425
- old_p = get_history_file_path(old_id, character, mode)
426
- new_p = get_history_file_path(new_id, character, mode)
427
- if new_p.parent != old_p.parent:
428
- logger.error(f"The following path is not allowed: {new_p}.")
429
- elif new_p == old_p:
430
- logger.info("The provided path is identical to the old one.")
431
- else:
432
- logger.info(f"Renaming {old_p} to {new_p}")
433
- old_p.rename(new_p)
434
-
435
-
436
- def find_all_histories(state):
437
- if shared.args.multi_user:
438
- return ['']
439
-
440
- if state['mode'] == 'instruct':
441
- paths = Path('logs/instruct').glob('*.json')
442
- else:
443
- character = state['character_menu']
444
-
445
- # Handle obsolete filenames and paths
446
- old_p = Path(f'logs/{character}_persistent.json')
447
- new_p = Path(f'logs/persistent_{character}.json')
448
- if old_p.exists():
449
- logger.warning(f"Renaming {old_p} to {new_p}")
450
- old_p.rename(new_p)
451
- if new_p.exists():
452
- unique_id = datetime.now().strftime('%Y%m%d-%H-%M-%S')
453
- p = get_history_file_path(unique_id, character, state['mode'])
454
- logger.warning(f"Moving {new_p} to {p}")
455
- p.parent.mkdir(exist_ok=True)
456
- new_p.rename(p)
457
-
458
- paths = Path(f'logs/chat/{character}').glob('*.json')
459
-
460
- histories = sorted(paths, key=lambda x: x.stat().st_mtime, reverse=True)
461
- histories = [path.stem for path in histories]
462
-
463
- return histories
464
-
465
-
466
- def load_latest_history(state):
467
- '''
468
- Loads the latest history for the given character in chat or chat-instruct
469
- mode, or the latest instruct history for instruct mode.
470
- '''
471
-
472
- if shared.args.multi_user:
473
- return start_new_chat(state)
474
-
475
- histories = find_all_histories(state)
476
-
477
- if len(histories) > 0:
478
- unique_id = Path(histories[0]).stem
479
- history = load_history(unique_id, state['character_menu'], state['mode'])
480
- else:
481
- history = start_new_chat(state)
482
-
483
- return history
484
-
485
-
486
- def load_history(unique_id, character, mode):
487
- p = get_history_file_path(unique_id, character, mode)
488
-
489
- f = json.loads(open(p, 'rb').read())
490
- if 'internal' in f and 'visible' in f:
491
- history = f
492
- else:
493
- history = {
494
- 'internal': f['data'],
495
- 'visible': f['data_visible']
496
- }
497
-
498
- return history
499
-
500
-
501
- def load_history_json(file, history):
502
- try:
503
- file = file.decode('utf-8')
504
- f = json.loads(file)
505
- if 'internal' in f and 'visible' in f:
506
- history = f
507
- else:
508
- history = {
509
- 'internal': f['data'],
510
- 'visible': f['data_visible']
511
- }
512
-
513
- return history
514
- except:
515
- return history
516
-
517
-
518
- def delete_history(unique_id, character, mode):
519
- p = get_history_file_path(unique_id, character, mode)
520
- delete_file(p)
521
-
522
-
523
- def replace_character_names(text, name1, name2):
524
- text = text.replace('{{user}}', name1).replace('{{char}}', name2)
525
- return text.replace('<USER>', name1).replace('<BOT>', name2)
526
-
527
-
528
- def generate_pfp_cache(character):
529
- cache_folder = Path("cache")
530
- if not cache_folder.exists():
531
- cache_folder.mkdir()
532
-
533
- for path in [Path(f"characters/{character}.{extension}") for extension in ['png', 'jpg', 'jpeg']]:
534
- if path.exists():
535
- img = make_thumbnail(Image.open(path))
536
- img.save(Path('cache/pfp_character.png'), format='PNG')
537
- return img
538
-
539
- return None
540
-
541
-
542
- def load_character(character, name1, name2, instruct=False):
543
- context = greeting = turn_template = ""
544
- greeting_field = 'greeting'
545
- picture = None
546
-
547
- if instruct:
548
- name1 = name2 = ''
549
- folder = 'instruction-templates'
550
- else:
551
- folder = 'characters'
552
-
553
- filepath = None
554
- for extension in ["yml", "yaml", "json"]:
555
- filepath = Path(f'{folder}/{character}.{extension}')
556
- if filepath.exists():
557
- break
558
-
559
- if filepath is None or not filepath.exists():
560
- logger.error(f"Could not find the character \"{character}\" inside {folder}/. No character has been loaded.")
561
- raise ValueError
562
-
563
- file_contents = open(filepath, 'r', encoding='utf-8').read()
564
- data = json.loads(file_contents) if extension == "json" else yaml.safe_load(file_contents)
565
-
566
- if Path("cache/pfp_character.png").exists() and not instruct:
567
- Path("cache/pfp_character.png").unlink()
568
-
569
- picture = generate_pfp_cache(character)
570
-
571
- # Finding the bot's name
572
- for k in ['name', 'bot', '<|bot|>', 'char_name']:
573
- if k in data and data[k] != '':
574
- name2 = data[k]
575
- break
576
-
577
- # Find the user name (if any)
578
- for k in ['your_name', 'user', '<|user|>']:
579
- if k in data and data[k] != '':
580
- name1 = data[k]
581
- break
582
-
583
- if 'context' in data:
584
- context = data['context']
585
- if not instruct:
586
- context = context.strip() + '\n'
587
- elif "char_persona" in data:
588
- context = build_pygmalion_style_context(data)
589
- greeting_field = 'char_greeting'
590
-
591
- if greeting_field in data:
592
- greeting = data[greeting_field]
593
-
594
- if 'turn_template' in data:
595
- turn_template = data['turn_template']
596
-
597
- return name1, name2, picture, greeting, context, turn_template.replace("\n", r"\n")
598
-
599
-
600
- @functools.cache
601
- def load_character_memoized(character, name1, name2, instruct=False):
602
- return load_character(character, name1, name2, instruct=instruct)
603
-
604
-
605
- def upload_character(file, img, tavern=False):
606
- decoded_file = file if isinstance(file, str) else file.decode('utf-8')
607
- try:
608
- data = json.loads(decoded_file)
609
- except:
610
- data = yaml.safe_load(decoded_file)
611
-
612
- if 'char_name' in data:
613
- name = data['char_name']
614
- greeting = data['char_greeting']
615
- context = build_pygmalion_style_context(data)
616
- yaml_data = generate_character_yaml(name, greeting, context)
617
- else:
618
- name = data['name']
619
- yaml_data = generate_character_yaml(data['name'], data['greeting'], data['context'])
620
-
621
- outfile_name = name
622
- i = 1
623
- while Path(f'characters/{outfile_name}.yaml').exists():
624
- outfile_name = f'{name}_{i:03d}'
625
- i += 1
626
-
627
- with open(Path(f'characters/{outfile_name}.yaml'), 'w', encoding='utf-8') as f:
628
- f.write(yaml_data)
629
-
630
- if img is not None:
631
- img.save(Path(f'characters/{outfile_name}.png'))
632
-
633
- logger.info(f'New character saved to "characters/{outfile_name}.yaml".')
634
- return gr.update(value=outfile_name, choices=get_available_characters())
635
-
636
-
637
- def build_pygmalion_style_context(data):
638
- context = ""
639
- if 'char_persona' in data and data['char_persona'] != '':
640
- context += f"{data['char_name']}'s Persona: {data['char_persona']}\n"
641
-
642
- if 'world_scenario' in data and data['world_scenario'] != '':
643
- context += f"Scenario: {data['world_scenario']}\n"
644
-
645
- if 'example_dialogue' in data and data['example_dialogue'] != '':
646
- context += f"{data['example_dialogue'].strip()}\n"
647
-
648
- context = f"{context.strip()}\n"
649
- return context
650
-
651
-
652
- def upload_tavern_character(img, _json):
653
- _json = {'char_name': _json['name'], 'char_persona': _json['description'], 'char_greeting': _json['first_mes'], 'example_dialogue': _json['mes_example'], 'world_scenario': _json['scenario']}
654
- return upload_character(json.dumps(_json), img, tavern=True)
655
-
656
-
657
- def check_tavern_character(img):
658
- if "chara" not in img.info:
659
- return "Not a TavernAI card", None, None, gr.update(interactive=False)
660
-
661
- decoded_string = base64.b64decode(img.info['chara']).replace(b'\\r\\n', b'\\n')
662
- _json = json.loads(decoded_string)
663
- if "data" in _json:
664
- _json = _json["data"]
665
-
666
- return _json['name'], _json['description'], _json, gr.update(interactive=True)
667
-
668
-
669
- def upload_your_profile_picture(img):
670
- cache_folder = Path("cache")
671
- if not cache_folder.exists():
672
- cache_folder.mkdir()
673
-
674
- if img is None:
675
- if Path("cache/pfp_me.png").exists():
676
- Path("cache/pfp_me.png").unlink()
677
- else:
678
- img = make_thumbnail(img)
679
- img.save(Path('cache/pfp_me.png'))
680
- logger.info('Profile picture saved to "cache/pfp_me.png"')
681
-
682
-
683
- def generate_character_yaml(name, greeting, context):
684
- data = {
685
- 'name': name,
686
- 'greeting': greeting,
687
- 'context': context,
688
- }
689
-
690
- data = {k: v for k, v in data.items() if v} # Strip falsy
691
- return yaml.dump(data, sort_keys=False, width=float("inf"))
692
-
693
-
694
- def generate_instruction_template_yaml(user, bot, context, turn_template):
695
- data = {
696
- 'user': user,
697
- 'bot': bot,
698
- 'turn_template': turn_template,
699
- 'context': context,
700
- }
701
-
702
- data = {k: v for k, v in data.items() if v} # Strip falsy
703
- return yaml.dump(data, sort_keys=False, width=float("inf"))
704
-
705
-
706
- def save_character(name, greeting, context, picture, filename):
707
- if filename == "":
708
- logger.error("The filename is empty, so the character will not be saved.")
709
- return
710
-
711
- data = generate_character_yaml(name, greeting, context)
712
- filepath = Path(f'characters/{filename}.yaml')
713
- save_file(filepath, data)
714
- path_to_img = Path(f'characters/{filename}.png')
715
- if picture is not None:
716
- picture.save(path_to_img)
717
- logger.info(f'Saved {path_to_img}.')
718
-
719
-
720
- def delete_character(name, instruct=False):
721
- for extension in ["yml", "yaml", "json"]:
722
- delete_file(Path(f'characters/{name}.{extension}'))
723
-
724
- delete_file(Path(f'characters/{name}.png'))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/AnonymousForSubmission/Graphic_Score_and_Audio/generate_ssrl.py DELETED
@@ -1,104 +0,0 @@
1
- """
2
- Derived from https://github.com/ktatar/latent-timbre-synthesis by Tatar et. al., paper link: https://arxiv.org/pdf/2006.00408.pdf
3
-
4
- """
5
-
6
- from __future__ import absolute_import, division, print_function, unicode_literals
7
-
8
- import tensorflow as tf
9
- from tensorflow.keras import layers
10
- tf.keras.backend.clear_session()
11
-
12
- import random
13
- import numpy as np
14
-
15
- import os, sys, argparse, time
16
- from pathlib import Path
17
-
18
- import librosa
19
- import configparser
20
- import random
21
- import json
22
- import matplotlib.pyplot as plt
23
- import soundfile as sf
24
- import cv2
25
-
26
- parser = argparse.ArgumentParser()
27
- parser.add_argument('--config', type=str, default ='./configs.ini' , help='path to the config file')
28
- args = parser.parse_args()
29
-
30
- config_path = args.config
31
- config = configparser.ConfigParser(allow_no_value=True)
32
- try:
33
- config.read(config_path)
34
- except FileNotFoundError:
35
- print('Config File Not Found at {}'.format(config_path))
36
- sys.exit()
37
-
38
- #Configs
39
- sample_rate = config['audio'].getint('sample_rate')
40
- hop_length = config['audio'].getint('hop_length')
41
- bins_per_octave = config['audio'].getint('bins_per_octave')
42
- num_octaves = config['audio'].getint('num_octaves')
43
- n_bins = int(num_octaves * bins_per_octave)
44
- n_iter = config['audio'].getint('n_iter')
45
-
46
- batch_size = 32
47
-
48
- AUTOTUNE = tf.data.experimental.AUTOTUNE
49
-
50
- workdir2 = Path(os.getcwd())
51
-
52
- my_examples_folder_audio_recons = workdir2.joinpath('./SSRL_Media/Designed_Audio/')
53
-
54
- class Sampling(layers.Layer):
55
- """Uses (z_mean, z_log_var) to sample z, the vector encoding a digit."""
56
-
57
- def call(self, inputs):
58
- z_mean, z_log_var = inputs
59
- batch = tf.shape(z_mean)[0]
60
- dim = tf.shape(z_mean)[1]
61
- epsilon = tf.keras.backend.random_normal(shape=(batch, dim))
62
- return z_mean + tf.exp(0.5 * z_log_var) * epsilon
63
-
64
- def synthesize_audio(np_image):
65
- np_image = np.rot90(np_image, 3) # to adjust the dimensions
66
-
67
- # *********************** LOAD MODEL **********************
68
-
69
- with tf.keras.utils.CustomObjectScope({'Sampling': Sampling}):
70
- vae = tf.keras.models.load_model('./model.hp5')
71
- vae.summary()
72
-
73
- # Encoder
74
- encoder_audio = tf.keras.Model(inputs = vae.get_layer("encoder_input_audio").input, outputs = [vae.get_layer("z_mean_audio").output, vae.get_layer("z_log_var_audio").output], name='encoder_audio')
75
- encoder_audio.summary()
76
-
77
- encoder_visual_score = tf.keras.Model(inputs = vae.get_layer("encoder_input_visual_score").input, outputs = [vae.get_layer("z_mean_visual_score").output, vae.get_layer("z_log_var_visual_score").output], name='encoder_visual_score')
78
- encoder_visual_score.summary()
79
-
80
- # Decoder
81
- decoder_audio = tf.keras.Model(inputs = vae.get_layer('decoder_audio').input, outputs = vae.get_layer('decoder_audio').output, name='decoder_audio')
82
- decoder_audio.summary()
83
-
84
- decoder_visual_score = tf.keras.Model(inputs = vae.get_layer('decoder_visual_score').input, outputs = vae.get_layer('decoder_visual_score').output, name='decoder_visual_score')
85
- decoder_visual_score.summary()
86
-
87
- # Generate examples
88
- my_array = np_image / 255.0 # To scale pixel values
89
-
90
- user_input = tf.data.Dataset.from_tensor_slices(my_array).batch(batch_size).prefetch(AUTOTUNE)
91
-
92
- output_audio = tf.constant(0., dtype='float32', shape=(1,n_bins))
93
-
94
- for step, x_batch_train in enumerate(user_input):
95
-
96
- reconstructed_whole = vae([tf.random.uniform(shape=(1, 384)), x_batch_train]) # random uniform is for the other modality (audio)
97
- visual_score_z_mean_var = encoder_visual_score(x_batch_train, training = False)
98
- visual_score_z = Sampling()((visual_score_z_mean_var[0], visual_score_z_mean_var[1]))
99
- transferred_audio = decoder_audio(visual_score_z, training = False)
100
- output_audio = tf.concat([output_audio, transferred_audio], 0)
101
-
102
- output_np = np.transpose(output_audio.numpy())
103
- output_inv_32 = librosa.griffinlim_cqt(output_np[1:], sr=sample_rate, n_iter=n_iter, hop_length=hop_length, bins_per_octave=bins_per_octave, dtype=np.float32)
104
- sf.write(my_examples_folder_audio_recons.joinpath('generated_audio.wav'), output_inv_32, sample_rate)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Arthur678/vits-uma-genshin-honkai/text/cleaners.py DELETED
@@ -1,475 +0,0 @@
1
- """ from https://github.com/keithito/tacotron """
2
-
3
- '''
4
- Cleaners are transformations that run over the input text at both training and eval time.
5
-
6
- Cleaners can be selected by passing a comma-delimited list of cleaner names as the "cleaners"
7
- hyperparameter. Some cleaners are English-specific. You'll typically want to use:
8
- 1. "english_cleaners" for English text
9
- 2. "transliteration_cleaners" for non-English text that can be transliterated to ASCII using
10
- the Unidecode library (https://pypi.python.org/pypi/Unidecode)
11
- 3. "basic_cleaners" if you do not want to transliterate (in this case, you should also update
12
- the symbols in symbols.py to match your data).
13
- '''
14
-
15
- import re
16
- from unidecode import unidecode
17
- import pyopenjtalk
18
- from jamo import h2j, j2hcj
19
- from pypinyin import lazy_pinyin, BOPOMOFO
20
- import jieba, cn2an
21
-
22
-
23
- # This is a list of Korean classifiers preceded by pure Korean numerals.
24
- _korean_classifiers = '군데 권 개 그루 닢 대 두 마리 모 모금 뭇 발 발짝 방 번 벌 보루 살 수 술 시 쌈 움큼 정 짝 채 척 첩 축 켤레 톨 통'
25
-
26
- # Regular expression matching whitespace:
27
- _whitespace_re = re.compile(r'\s+')
28
-
29
- # Regular expression matching Japanese without punctuation marks:
30
- _japanese_characters = re.compile(r'[A-Za-z\d\u3005\u3040-\u30ff\u4e00-\u9fff\uff11-\uff19\uff21-\uff3a\uff41-\uff5a\uff66-\uff9d]')
31
-
32
- # Regular expression matching non-Japanese characters or punctuation marks:
33
- _japanese_marks = re.compile(r'[^A-Za-z\d\u3005\u3040-\u30ff\u4e00-\u9fff\uff11-\uff19\uff21-\uff3a\uff41-\uff5a\uff66-\uff9d]')
34
-
35
- # List of (regular expression, replacement) pairs for abbreviations:
36
- _abbreviations = [(re.compile('\\b%s\\.' % x[0], re.IGNORECASE), x[1]) for x in [
37
- ('mrs', 'misess'),
38
- ('mr', 'mister'),
39
- ('dr', 'doctor'),
40
- ('st', 'saint'),
41
- ('co', 'company'),
42
- ('jr', 'junior'),
43
- ('maj', 'major'),
44
- ('gen', 'general'),
45
- ('drs', 'doctors'),
46
- ('rev', 'reverend'),
47
- ('lt', 'lieutenant'),
48
- ('hon', 'honorable'),
49
- ('sgt', 'sergeant'),
50
- ('capt', 'captain'),
51
- ('esq', 'esquire'),
52
- ('ltd', 'limited'),
53
- ('col', 'colonel'),
54
- ('ft', 'fort'),
55
- ]]
56
-
57
- # List of (hangul, hangul divided) pairs:
58
- _hangul_divided = [(re.compile('%s' % x[0]), x[1]) for x in [
59
- ('ㄳ', 'ㄱㅅ'),
60
- ('ㄵ', 'ㄴㅈ'),
61
- ('ㄶ', 'ㄴㅎ'),
62
- ('ㄺ', 'ㄹㄱ'),
63
- ('ㄻ', 'ㄹㅁ'),
64
- ('ㄼ', 'ㄹㅂ'),
65
- ('ㄽ', 'ㄹㅅ'),
66
- ('ㄾ', 'ㄹㅌ'),
67
- ('ㄿ', 'ㄹㅍ'),
68
- ('ㅀ', 'ㄹㅎ'),
69
- ('ㅄ', 'ㅂㅅ'),
70
- ('ㅘ', 'ㅗㅏ'),
71
- ('ㅙ', 'ㅗㅐ'),
72
- ('ㅚ', 'ㅗㅣ'),
73
- ('ㅝ', 'ㅜㅓ'),
74
- ('ㅞ', 'ㅜㅔ'),
75
- ('ㅟ', 'ㅜㅣ'),
76
- ('ㅢ', 'ㅡㅣ'),
77
- ('ㅑ', 'ㅣㅏ'),
78
- ('ㅒ', 'ㅣㅐ'),
79
- ('ㅕ', 'ㅣㅓ'),
80
- ('ㅖ', 'ㅣㅔ'),
81
- ('ㅛ', 'ㅣㅗ'),
82
- ('ㅠ', 'ㅣㅜ')
83
- ]]
84
-
85
- # List of (Latin alphabet, hangul) pairs:
86
- _latin_to_hangul = [(re.compile('%s' % x[0], re.IGNORECASE), x[1]) for x in [
87
- ('a', '에이'),
88
- ('b', '비'),
89
- ('c', '시'),
90
- ('d', '디'),
91
- ('e', '이'),
92
- ('f', '에프'),
93
- ('g', '지'),
94
- ('h', '에이치'),
95
- ('i', '아이'),
96
- ('j', '제이'),
97
- ('k', '케이'),
98
- ('l', '엘'),
99
- ('m', '엠'),
100
- ('n', '엔'),
101
- ('o', '오'),
102
- ('p', '피'),
103
- ('q', '큐'),
104
- ('r', '아르'),
105
- ('s', '에스'),
106
- ('t', '티'),
107
- ('u', '유'),
108
- ('v', '브이'),
109
- ('w', '더블유'),
110
- ('x', '엑스'),
111
- ('y', '와이'),
112
- ('z', '제트')
113
- ]]
114
-
115
- # List of (Latin alphabet, bopomofo) pairs:
116
- _latin_to_bopomofo = [(re.compile('%s' % x[0], re.IGNORECASE), x[1]) for x in [
117
- ('a', 'ㄟˉ'),
118
- ('b', 'ㄅㄧˋ'),
119
- ('c', 'ㄙㄧˉ'),
120
- ('d', 'ㄉㄧˋ'),
121
- ('e', 'ㄧˋ'),
122
- ('f', 'ㄝˊㄈㄨˋ'),
123
- ('g', 'ㄐㄧˋ'),
124
- ('h', 'ㄝˇㄑㄩˋ'),
125
- ('i', 'ㄞˋ'),
126
- ('j', 'ㄐㄟˋ'),
127
- ('k', 'ㄎㄟˋ'),
128
- ('l', 'ㄝˊㄛˋ'),
129
- ('m', 'ㄝˊㄇㄨˋ'),
130
- ('n', 'ㄣˉ'),
131
- ('o', 'ㄡˉ'),
132
- ('p', 'ㄆㄧˉ'),
133
- ('q', 'ㄎㄧㄡˉ'),
134
- ('r', 'ㄚˋ'),
135
- ('s', 'ㄝˊㄙˋ'),
136
- ('t', 'ㄊㄧˋ'),
137
- ('u', 'ㄧㄡˉ'),
138
- ('v', 'ㄨㄧˉ'),
139
- ('w', 'ㄉㄚˋㄅㄨˋㄌㄧㄡˋ'),
140
- ('x', 'ㄝˉㄎㄨˋㄙˋ'),
141
- ('y', 'ㄨㄞˋ'),
142
- ('z', 'ㄗㄟˋ')
143
- ]]
144
-
145
-
146
- # List of (bopomofo, romaji) pairs:
147
- _bopomofo_to_romaji = [(re.compile('%s' % x[0], re.IGNORECASE), x[1]) for x in [
148
- ('ㄅㄛ', 'p⁼wo'),
149
- ('ㄆㄛ', 'pʰwo'),
150
- ('ㄇㄛ', 'mwo'),
151
- ('ㄈㄛ', 'fwo'),
152
- ('ㄅ', 'p⁼'),
153
- ('ㄆ', 'pʰ'),
154
- ('ㄇ', 'm'),
155
- ('ㄈ', 'f'),
156
- ('ㄉ', 't⁼'),
157
- ('ㄊ', 'tʰ'),
158
- ('ㄋ', 'n'),
159
- ('ㄌ', 'l'),
160
- ('ㄍ', 'k⁼'),
161
- ('ㄎ', 'kʰ'),
162
- ('ㄏ', 'h'),
163
- ('ㄐ', 'ʧ⁼'),
164
- ('ㄑ', 'ʧʰ'),
165
- ('ㄒ', 'ʃ'),
166
- ('ㄓ', 'ʦ`⁼'),
167
- ('ㄔ', 'ʦ`ʰ'),
168
- ('ㄕ', 's`'),
169
- ('ㄖ', 'ɹ`'),
170
- ('ㄗ', 'ʦ⁼'),
171
- ('ㄘ', 'ʦʰ'),
172
- ('ㄙ', 's'),
173
- ('ㄚ', 'a'),
174
- ('ㄛ', 'o'),
175
- ('ㄜ', 'ə'),
176
- ('ㄝ', 'e'),
177
- ('ㄞ', 'ai'),
178
- ('ㄟ', 'ei'),
179
- ('ㄠ', 'au'),
180
- ('ㄡ', 'ou'),
181
- ('ㄧㄢ', 'yeNN'),
182
- ('ㄢ', 'aNN'),
183
- ('ㄧㄣ', 'iNN'),
184
- ('ㄣ', 'əNN'),
185
- ('ㄤ', 'aNg'),
186
- ('ㄧㄥ', 'iNg'),
187
- ('ㄨㄥ', 'uNg'),
188
- ('ㄩㄥ', 'yuNg'),
189
- ('ㄥ', 'əNg'),
190
- ('ㄦ', 'əɻ'),
191
- ('ㄧ', 'i'),
192
- ('ㄨ', 'u'),
193
- ('ㄩ', 'ɥ'),
194
- ('ˉ', '→'),
195
- ('ˊ', '↑'),
196
- ('ˇ', '↓↑'),
197
- ('ˋ', '↓'),
198
- ('˙', ''),
199
- (',', ','),
200
- ('。', '.'),
201
- ('!', '!'),
202
- ('?', '?'),
203
- ('—', '-')
204
- ]]
205
-
206
-
207
- def expand_abbreviations(text):
208
- for regex, replacement in _abbreviations:
209
- text = re.sub(regex, replacement, text)
210
- return text
211
-
212
-
213
- def lowercase(text):
214
- return text.lower()
215
-
216
-
217
- def collapse_whitespace(text):
218
- return re.sub(_whitespace_re, ' ', text)
219
-
220
-
221
- def convert_to_ascii(text):
222
- return unidecode(text)
223
-
224
-
225
- def japanese_to_romaji_with_accent(text):
226
- '''Reference https://r9y9.github.io/ttslearn/latest/notebooks/ch10_Recipe-Tacotron.html'''
227
- sentences = re.split(_japanese_marks, text)
228
- marks = re.findall(_japanese_marks, text)
229
- text = ''
230
- for i, sentence in enumerate(sentences):
231
- if re.match(_japanese_characters, sentence):
232
- if text!='':
233
- text+=' '
234
- labels = pyopenjtalk.extract_fullcontext(sentence)
235
- for n, label in enumerate(labels):
236
- phoneme = re.search(r'\-([^\+]*)\+', label).group(1)
237
- if phoneme not in ['sil','pau']:
238
- text += phoneme.replace('ch','ʧ').replace('sh','ʃ').replace('cl','Q')
239
- else:
240
- continue
241
- n_moras = int(re.search(r'/F:(\d+)_', label).group(1))
242
- a1 = int(re.search(r"/A:(\-?[0-9]+)\+", label).group(1))
243
- a2 = int(re.search(r"\+(\d+)\+", label).group(1))
244
- a3 = int(re.search(r"\+(\d+)/", label).group(1))
245
- if re.search(r'\-([^\+]*)\+', labels[n + 1]).group(1) in ['sil','pau']:
246
- a2_next=-1
247
- else:
248
- a2_next = int(re.search(r"\+(\d+)\+", labels[n + 1]).group(1))
249
- # Accent phrase boundary
250
- if a3 == 1 and a2_next == 1:
251
- text += ' '
252
- # Falling
253
- elif a1 == 0 and a2_next == a2 + 1 and a2 != n_moras:
254
- text += '↓'
255
- # Rising
256
- elif a2 == 1 and a2_next == 2:
257
- text += '↑'
258
- if i<len(marks):
259
- text += unidecode(marks[i]).replace(' ','')
260
- return text
261
-
262
-
263
- def latin_to_hangul(text):
264
- for regex, replacement in _latin_to_hangul:
265
- text = re.sub(regex, replacement, text)
266
- return text
267
-
268
-
269
- def divide_hangul(text):
270
- for regex, replacement in _hangul_divided:
271
- text = re.sub(regex, replacement, text)
272
- return text
273
-
274
-
275
- def hangul_number(num, sino=True):
276
- '''Reference https://github.com/Kyubyong/g2pK'''
277
- num = re.sub(',', '', num)
278
-
279
- if num == '0':
280
- return '영'
281
- if not sino and num == '20':
282
- return '스무'
283
-
284
- digits = '123456789'
285
- names = '일이삼사오육칠팔구'
286
- digit2name = {d: n for d, n in zip(digits, names)}
287
-
288
- modifiers = '한 두 세 네 다섯 여섯 일곱 여덟 아홉'
289
- decimals = '열 스물 서른 마흔 쉰 예순 일흔 여든 아흔'
290
- digit2mod = {d: mod for d, mod in zip(digits, modifiers.split())}
291
- digit2dec = {d: dec for d, dec in zip(digits, decimals.split())}
292
-
293
- spelledout = []
294
- for i, digit in enumerate(num):
295
- i = len(num) - i - 1
296
- if sino:
297
- if i == 0:
298
- name = digit2name.get(digit, '')
299
- elif i == 1:
300
- name = digit2name.get(digit, '') + '십'
301
- name = name.replace('일십', '십')
302
- else:
303
- if i == 0:
304
- name = digit2mod.get(digit, '')
305
- elif i == 1:
306
- name = digit2dec.get(digit, '')
307
- if digit == '0':
308
- if i % 4 == 0:
309
- last_three = spelledout[-min(3, len(spelledout)):]
310
- if ''.join(last_three) == '':
311
- spelledout.append('')
312
- continue
313
- else:
314
- spelledout.append('')
315
- continue
316
- if i == 2:
317
- name = digit2name.get(digit, '') + '백'
318
- name = name.replace('일백', '백')
319
- elif i == 3:
320
- name = digit2name.get(digit, '') + '천'
321
- name = name.replace('일천', '천')
322
- elif i == 4:
323
- name = digit2name.get(digit, '') + '만'
324
- name = name.replace('일만', '만')
325
- elif i == 5:
326
- name = digit2name.get(digit, '') + '십'
327
- name = name.replace('일십', '십')
328
- elif i == 6:
329
- name = digit2name.get(digit, '') + '백'
330
- name = name.replace('일백', '백')
331
- elif i == 7:
332
- name = digit2name.get(digit, '') + '천'
333
- name = name.replace('일천', '천')
334
- elif i == 8:
335
- name = digit2name.get(digit, '') + '억'
336
- elif i == 9:
337
- name = digit2name.get(digit, '') + '십'
338
- elif i == 10:
339
- name = digit2name.get(digit, '') + '백'
340
- elif i == 11:
341
- name = digit2name.get(digit, '') + '천'
342
- elif i == 12:
343
- name = digit2name.get(digit, '') + '조'
344
- elif i == 13:
345
- name = digit2name.get(digit, '') + '십'
346
- elif i == 14:
347
- name = digit2name.get(digit, '') + '백'
348
- elif i == 15:
349
- name = digit2name.get(digit, '') + '천'
350
- spelledout.append(name)
351
- return ''.join(elem for elem in spelledout)
352
-
353
-
354
- def number_to_hangul(text):
355
- '''Reference https://github.com/Kyubyong/g2pK'''
356
- tokens = set(re.findall(r'(\d[\d,]*)([\uac00-\ud71f]+)', text))
357
- for token in tokens:
358
- num, classifier = token
359
- if classifier[:2] in _korean_classifiers or classifier[0] in _korean_classifiers:
360
- spelledout = hangul_number(num, sino=False)
361
- else:
362
- spelledout = hangul_number(num, sino=True)
363
- text = text.replace(f'{num}{classifier}', f'{spelledout}{classifier}')
364
- # digit by digit for remaining digits
365
- digits = '0123456789'
366
- names = '영일이삼사오육칠팔구'
367
- for d, n in zip(digits, names):
368
- text = text.replace(d, n)
369
- return text
370
-
371
-
372
- def number_to_chinese(text):
373
- numbers = re.findall(r'\d+(?:\.?\d+)?', text)
374
- for number in numbers:
375
- text = text.replace(number, cn2an.an2cn(number),1)
376
- return text
377
-
378
-
379
- def chinese_to_bopomofo(text):
380
- text=text.replace('、',',').replace(';',',').replace(':',',')
381
- words=jieba.lcut(text,cut_all=False)
382
- text=''
383
- for word in words:
384
- bopomofos=lazy_pinyin(word,BOPOMOFO)
385
- if not re.search('[\u4e00-\u9fff]',word):
386
- text+=word
387
- continue
388
- for i in range(len(bopomofos)):
389
- if re.match('[\u3105-\u3129]',bopomofos[i][-1]):
390
- bopomofos[i]+='ˉ'
391
- if text!='':
392
- text+=' '
393
- text+=''.join(bopomofos)
394
- return text
395
-
396
-
397
- def latin_to_bopomofo(text):
398
- for regex, replacement in _latin_to_bopomofo:
399
- text = re.sub(regex, replacement, text)
400
- return text
401
-
402
-
403
- def bopomofo_to_romaji(text):
404
- for regex, replacement in _bopomofo_to_romaji:
405
- text = re.sub(regex, replacement, text)
406
- return text
407
-
408
-
409
- def basic_cleaners(text):
410
- '''Basic pipeline that lowercases and collapses whitespace without transliteration.'''
411
- text = lowercase(text)
412
- text = collapse_whitespace(text)
413
- return text
414
-
415
-
416
- def transliteration_cleaners(text):
417
- '''Pipeline for non-English text that transliterates to ASCII.'''
418
- text = convert_to_ascii(text)
419
- text = lowercase(text)
420
- text = collapse_whitespace(text)
421
- return text
422
-
423
-
424
- def japanese_cleaners(text):
425
- text=japanese_to_romaji_with_accent(text)
426
- if re.match('[A-Za-z]',text[-1]):
427
- text += '.'
428
- return text
429
-
430
-
431
- def japanese_cleaners2(text):
432
- return japanese_cleaners(text).replace('ts','ʦ').replace('...','…')
433
-
434
-
435
- def korean_cleaners(text):
436
- '''Pipeline for Korean text'''
437
- text = latin_to_hangul(text)
438
- text = number_to_hangul(text)
439
- text = j2hcj(h2j(text))
440
- text = divide_hangul(text)
441
- if re.match('[\u3131-\u3163]',text[-1]):
442
- text += '.'
443
- return text
444
-
445
-
446
- def chinese_cleaners(text):
447
- '''Pipeline for Chinese text'''
448
- text=number_to_chinese(text)
449
- text=chinese_to_bopomofo(text)
450
- text=latin_to_bopomofo(text)
451
- if re.match('[ˉˊˇˋ˙]',text[-1]):
452
- text += '。'
453
- return text
454
-
455
-
456
- def zh_ja_mixture_cleaners(text):
457
- chinese_texts=re.findall(r'\[ZH\].*?\[ZH\]',text)
458
- japanese_texts=re.findall(r'\[JA\].*?\[JA\]',text)
459
- for chinese_text in chinese_texts:
460
- cleaned_text=number_to_chinese(chinese_text[4:-4])
461
- cleaned_text=chinese_to_bopomofo(cleaned_text)
462
- cleaned_text=latin_to_bopomofo(cleaned_text)
463
- cleaned_text=bopomofo_to_romaji(cleaned_text)
464
- cleaned_text=re.sub('i[aoe]',lambda x:'y'+x.group(0)[1:],cleaned_text)
465
- cleaned_text=re.sub('u[aoəe]',lambda x:'w'+x.group(0)[1:],cleaned_text)
466
- cleaned_text=re.sub('([ʦsɹ]`[⁼ʰ]?)([→↓↑]+)',lambda x:x.group(1)+'ɹ`'+x.group(2),cleaned_text).replace('ɻ','ɹ`')
467
- cleaned_text=re.sub('([ʦs][⁼ʰ]?)([→↓↑]+)',lambda x:x.group(1)+'ɹ'+x.group(2),cleaned_text)
468
- text = text.replace(chinese_text,cleaned_text+' ',1)
469
- for japanese_text in japanese_texts:
470
- cleaned_text=japanese_to_romaji_with_accent(japanese_text[4:-4]).replace('ts','ʦ').replace('u','ɯ').replace('...','…')
471
- text = text.replace(japanese_text,cleaned_text+' ',1)
472
- text=text[:-1]
473
- if re.match('[A-Za-zɯɹəɥ→↓↑]',text[-1]):
474
- text += '.'
475
- return text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Artrajz/vits-simple-api/bert_vits2/text/symbols.py DELETED
@@ -1,198 +0,0 @@
1
- punctuation = ["!", "?", "…", ",", ".", "'", "-"]
2
- pu_symbols = punctuation + ["SP", "UNK"]
3
- pad = "_"
4
-
5
- # chinese
6
- zh_symbols = [
7
- "E",
8
- "En",
9
- "a",
10
- "ai",
11
- "an",
12
- "ang",
13
- "ao",
14
- "b",
15
- "c",
16
- "ch",
17
- "d",
18
- "e",
19
- "ei",
20
- "en",
21
- "eng",
22
- "er",
23
- "f",
24
- "g",
25
- "h",
26
- "i",
27
- "i0",
28
- "ia",
29
- "ian",
30
- "iang",
31
- "iao",
32
- "ie",
33
- "in",
34
- "ing",
35
- "iong",
36
- "ir",
37
- "iu",
38
- "j",
39
- "k",
40
- "l",
41
- "m",
42
- "n",
43
- "o",
44
- "ong",
45
- "ou",
46
- "p",
47
- "q",
48
- "r",
49
- "s",
50
- "sh",
51
- "t",
52
- "u",
53
- "ua",
54
- "uai",
55
- "uan",
56
- "uang",
57
- "ui",
58
- "un",
59
- "uo",
60
- "v",
61
- "van",
62
- "ve",
63
- "vn",
64
- "w",
65
- "x",
66
- "y",
67
- "z",
68
- "zh",
69
- "AA",
70
- "EE",
71
- "OO",
72
- ]
73
- num_zh_tones = 6
74
-
75
- # japanese
76
- ja_symbols_legacy = ['I', 'N', 'U', 'a', 'b', 'by', 'ch', 'cl', 'd', 'dy', 'e', 'f', 'g', 'gy', 'h', 'hy', 'i', 'j',
77
- 'k', 'ky',
78
- 'm', 'my', 'n', 'ny', 'o', 'p', 'py', 'r', 'ry', 's', 'sh', 't', 'ts', 'u', 'V', 'w', 'y', 'z']
79
- ja_symbols = [
80
- "N",
81
- "a",
82
- "a:",
83
- "b",
84
- "by",
85
- "ch",
86
- "d",
87
- "dy",
88
- "e",
89
- "e:",
90
- "f",
91
- "g",
92
- "gy",
93
- "h",
94
- "hy",
95
- "i",
96
- "i:",
97
- "j",
98
- "k",
99
- "ky",
100
- "m",
101
- "my",
102
- "n",
103
- "ny",
104
- "o",
105
- "o:",
106
- "p",
107
- "py",
108
- "q",
109
- "r",
110
- "ry",
111
- "s",
112
- "sh",
113
- "t",
114
- "ts",
115
- "ty",
116
- "u",
117
- "u:",
118
- "w",
119
- "y",
120
- "z",
121
- "zy",
122
- ]
123
- num_ja_tones = 1
124
-
125
- # English
126
- en_symbols = [
127
- "aa",
128
- "ae",
129
- "ah",
130
- "ao",
131
- "aw",
132
- "ay",
133
- "b",
134
- "ch",
135
- "d",
136
- "dh",
137
- "eh",
138
- "er",
139
- "ey",
140
- "f",
141
- "g",
142
- "hh",
143
- "ih",
144
- "iy",
145
- "jh",
146
- "k",
147
- "l",
148
- "m",
149
- "n",
150
- "ng",
151
- "ow",
152
- "oy",
153
- "p",
154
- "r",
155
- "s",
156
- "sh",
157
- "t",
158
- "th",
159
- "uh",
160
- "uw",
161
- "V",
162
- "w",
163
- "y",
164
- "z",
165
- "zh",
166
- ]
167
- num_en_tones = 4
168
-
169
- normal_symbols = sorted(set(zh_symbols + ja_symbols + en_symbols))
170
- symbols = [pad] + normal_symbols + pu_symbols
171
- sil_phonemes_ids = [symbols.index(i) for i in pu_symbols]
172
-
173
- # legacy
174
- normal_symbols_legacy = sorted(set(zh_symbols + ja_symbols_legacy + en_symbols))
175
- symbols_legacy = [pad] + normal_symbols_legacy + pu_symbols
176
- sil_phonemes_ids_legacy = [symbols_legacy.index(i) for i in pu_symbols]
177
-
178
- # combine all tones
179
- num_tones = num_zh_tones + num_ja_tones + num_en_tones
180
-
181
- # language maps
182
- language_id_map = {"zh": 0, "ja": 1, "en": 2}
183
- num_languages = len(language_id_map.keys())
184
-
185
- language_tone_start_map = {
186
- "zh": 0,
187
- "ja": num_zh_tones,
188
- "en": num_zh_tones + num_ja_tones,
189
- }
190
-
191
- if __name__ == "__main__":
192
- zh = set(zh_symbols)
193
- en = set(en_symbols)
194
- ja = set(ja_symbols)
195
- print(zh)
196
- print(en)
197
- print(ja)
198
- print(sorted(zh & en))
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Awesimo/jojogan/op/fused_act_cpu.py DELETED
@@ -1,41 +0,0 @@
1
- import os
2
-
3
- import torch
4
- from torch import nn
5
- from torch.autograd import Function
6
- from torch.nn import functional as F
7
-
8
-
9
- module_path = os.path.dirname(__file__)
10
-
11
-
12
- class FusedLeakyReLU(nn.Module):
13
- def __init__(self, channel, negative_slope=0.2, scale=2 ** 0.5):
14
- super().__init__()
15
-
16
- self.bias = nn.Parameter(torch.zeros(channel))
17
- self.negative_slope = negative_slope
18
- self.scale = scale
19
-
20
- def forward(self, input):
21
- return fused_leaky_relu(input, self.bias, self.negative_slope, self.scale)
22
-
23
- def fused_leaky_relu(input, bias=None, negative_slope=0.2, scale=2 ** 0.5):
24
- if input.device.type == "cpu":
25
- if bias is not None:
26
- rest_dim = [1] * (input.ndim - bias.ndim - 1)
27
- return (
28
- F.leaky_relu(
29
- input + bias.view(1, bias.shape[0], *rest_dim), negative_slope=0.2
30
- )
31
- * scale
32
- )
33
-
34
- else:
35
- return F.leaky_relu(input, negative_slope=0.2) * scale
36
-
37
- else:
38
- return FusedLeakyReLUFunction.apply(
39
- input.contiguous(), bias, negative_slope, scale
40
- )
41
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/BIASLab/sars-cov-2-classification-fcgr/src/model_loader.py DELETED
@@ -1,39 +0,0 @@
1
- """Load model from /models"""
2
- import importlib
3
- import os
4
-
5
- from pathlib import Path
6
- from typing import Optional
7
-
8
- from tensorflow.python.eager.context import num_gpus
9
-
10
- OMMIT = {".ipynb_checkpoints","__pycache__","__init__","custom_layers","custom_losses"} # files to be ommited
11
- BASE_DIR = Path(__file__).resolve().parent # base directory unsupervised-dna
12
- BASE_MODELS = BASE_DIR.joinpath("models") # models directory
13
-
14
- class ModelLoader:
15
- "Load models for unsupervised learning using FCGR (grayscale images)"
16
-
17
- AVAILABLE_MODELS = [model[:-3] for model in os.listdir(BASE_MODELS) if all([ommit not in model for ommit in OMMIT])]
18
-
19
- def __call__(self, model_name: str, n_outputs: int, weights_path: Optional[Path]=None):
20
- "Get keras model"
21
-
22
- # Call class of model to load
23
- get_model = getattr(
24
- importlib.import_module(
25
- f"src.models.{model_name}"
26
- ),
27
- "get_model")
28
-
29
- # Load architecture
30
- model = get_model(n_outputs)
31
-
32
- # Load weights to the model from file
33
- if weights_path is not None:
34
- print(f"\n **load model weights_path** : {weights_path}")
35
- model.load_weights(weights_path)
36
-
37
- print("\n**Model created**")
38
-
39
- return model
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/BraydenMoore/MARCI-NFL-Betting/Source/Models/__init__.py DELETED
File without changes
spaces/CVPR/LIVE/thrust/thrust/detail/get_iterator_value.h DELETED
@@ -1,53 +0,0 @@
1
- #pragma once
2
- /*
3
- * Copyright 2008-2016 NVIDIA Corporation
4
- *
5
- * Licensed under the Apache License, Version 2.0 (the "License");
6
- * you may not use this file except in compliance with the License.
7
- * You may obtain a copy of the License at
8
- *
9
- * http://www.apache.org/licenses/LICENSE-2.0
10
- *
11
- * Unless required by applicable law or agreed to in writing, software
12
- * distributed under the License is distributed on an "AS IS" BASIS,
13
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
- * See the License for the specific language governing permissions and
15
- * limitations under the License.
16
- */
17
-
18
- #include <thrust/detail/config.h>
19
- #include <thrust/iterator/iterator_traits.h>
20
- #include <thrust/execution_policy.h>
21
- #include <thrust/detail/type_traits/pointer_traits.h>
22
- #include <thrust/system/detail/generic/memory.h> // for get_value()
23
-
24
- namespace thrust {
25
- namespace detail {
26
-
27
- // get_iterator_value specialization on iterators
28
- // --------------------------------------------------
29
- // it is okay to dereference iterator in the usual way
30
- template<typename DerivedPolicy, typename Iterator>
31
- __host__ __device__
32
- typename thrust::iterator_traits<Iterator>::value_type
33
- get_iterator_value(thrust::execution_policy<DerivedPolicy> &, Iterator it)
34
- {
35
- return *it;
36
- } // get_iterator_value(exec,Iterator);
37
-
38
- // get_iterator_value specialization on pointer
39
- // ----------------------------------------------
40
- // we can't just dereference a pointer in the usual way, because
41
- // it may point to a location in the device memory.
42
- // we use get_value(exec,pointer*) function
43
- // to perform a dereferencing consistent with the execution policy
44
- template<typename DerivedPolicy, typename Pointer>
45
- __host__ __device__
46
- typename thrust::detail::pointer_traits<Pointer*>::element_type
47
- get_iterator_value(thrust::execution_policy<DerivedPolicy> &exec, Pointer* ptr)
48
- {
49
- return get_value(derived_cast(exec),ptr);
50
- } // get_iterator_value(exec,Pointer*)
51
-
52
- } // namespace detail
53
- } // namespace thrust
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/thrust/thrust/iterator/detail/device_system_tag.h DELETED
@@ -1,40 +0,0 @@
1
- /*
2
- * Copyright 2008-2013 NVIDIA Corporation
3
- *
4
- * Licensed under the Apache License, Version 2.0 (the "License");
5
- * you may not use this file except in compliance with the License.
6
- * You may obtain a copy of the License at
7
- *
8
- * http://www.apache.org/licenses/LICENSE-2.0
9
- *
10
- * Unless required by applicable law or agreed to in writing, software
11
- * distributed under the License is distributed on an "AS IS" BASIS,
12
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- * See the License for the specific language governing permissions and
14
- * limitations under the License.
15
- */
16
-
17
- #pragma once
18
-
19
- #include <thrust/detail/config.h>
20
-
21
- // #include the device system's execution_policy header
22
- #define __THRUST_DEVICE_SYSTEM_TAG_HEADER <__THRUST_DEVICE_SYSTEM_ROOT/detail/execution_policy.h>
23
- #include __THRUST_DEVICE_SYSTEM_TAG_HEADER
24
- #undef __THRUST_DEVICE_SYSTEM_TAG_HEADER
25
-
26
- namespace thrust
27
- {
28
-
29
- typedef thrust::system::__THRUST_DEVICE_SYSTEM_NAMESPACE::tag device_system_tag;
30
-
31
- } // end thrust
32
-
33
- // TODO remove this in 1.8.0
34
- namespace thrust
35
- {
36
-
37
- typedef THRUST_DEPRECATED device_system_tag device_space_tag;
38
-
39
- } // end thrust
40
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/thrust/thrust/system/detail/sequential/gather.h DELETED
@@ -1,22 +0,0 @@
1
- /*
2
- * Copyright 2008-2013 NVIDIA Corporation
3
- *
4
- * Licensed under the Apache License, Version 2.0 (the "License");
5
- * you may not use this file except in compliance with the License.
6
- * You may obtain a copy of the License at
7
- *
8
- * http://www.apache.org/licenses/LICENSE-2.0
9
- *
10
- * Unless required by applicable law or agreed to in writing, software
11
- * distributed under the License is distributed on an "AS IS" BASIS,
12
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- * See the License for the specific language governing permissions and
14
- * limitations under the License.
15
- */
16
-
17
- #pragma once
18
-
19
- #include <thrust/detail/config.h>
20
-
21
- // this system has no special gather functions
22
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/LIVE/thrust/thrust/system/omp/detail/for_each.h DELETED
@@ -1,60 +0,0 @@
1
- /*
2
- * Copyright 2008-2013 NVIDIA Corporation
3
- *
4
- * Licensed under the Apache License, Version 2.0 (the "License");
5
- * you may not use this file except in compliance with the License.
6
- * You may obtain a copy of the License at
7
- *
8
- * http://www.apache.org/licenses/LICENSE-2.0
9
- *
10
- * Unless required by applicable law or agreed to in writing, software
11
- * distributed under the License is distributed on an "AS IS" BASIS,
12
- * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
- * See the License for the specific language governing permissions and
14
- * limitations under the License.
15
- */
16
-
17
-
18
- /*! \file for_each.h
19
- * \brief Defines the interface for a function that executes a
20
- * function or functional for each value in a given range.
21
- */
22
-
23
- #pragma once
24
-
25
- #include <thrust/detail/config.h>
26
- #include <thrust/system/omp/detail/execution_policy.h>
27
-
28
- namespace thrust
29
- {
30
- namespace system
31
- {
32
- namespace omp
33
- {
34
- namespace detail
35
- {
36
-
37
- template<typename DerivedPolicy,
38
- typename RandomAccessIterator,
39
- typename UnaryFunction>
40
- RandomAccessIterator for_each(execution_policy<DerivedPolicy> &exec,
41
- RandomAccessIterator first,
42
- RandomAccessIterator last,
43
- UnaryFunction f);
44
-
45
- template<typename DerivedPolicy,
46
- typename RandomAccessIterator,
47
- typename Size,
48
- typename UnaryFunction>
49
- RandomAccessIterator for_each_n(execution_policy<DerivedPolicy> &exec,
50
- RandomAccessIterator first,
51
- Size n,
52
- UnaryFunction f);
53
-
54
- } // end namespace detail
55
- } // end namespace omp
56
- } // end namespace system
57
- } // end namespace thrust
58
-
59
- #include <thrust/system/omp/detail/for_each.inl>
60
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CVPR/drawings-to-human/frontend/src/lib/utils.ts DELETED
@@ -1,3 +0,0 @@
1
- export function randomSeed() {
2
- return BigInt(13248873089935215612 & (((1 << 63) - 1) * Math.random()));
3
- }
 
 
 
 
spaces/CVPR/lama-example/saicinpainting/evaluation/masks/countless/README.md DELETED
@@ -1,25 +0,0 @@
1
- [![Build Status](https://travis-ci.org/william-silversmith/countless.svg?branch=master)](https://travis-ci.org/william-silversmith/countless)
2
-
3
- Python COUNTLESS Downsampling
4
- =============================
5
-
6
- To install:
7
-
8
- `pip install -r requirements.txt`
9
-
10
- To test:
11
-
12
- `python test.py`
13
-
14
- To benchmark countless2d:
15
-
16
- `python python/countless2d.py python/images/gray_segmentation.png`
17
-
18
- To benchmark countless3d:
19
-
20
- `python python/countless3d.py`
21
-
22
- Adjust N and the list of algorithms inside each script to modify the run parameters.
23
-
24
-
25
- Python3 is slightly faster than Python2.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Caoyunkang/Segment-Any-Anomaly/app.py DELETED
@@ -1,112 +0,0 @@
1
- import os
2
- import sys
3
-
4
- os.chdir('GroundingDINO/')
5
- os.system('pip install -e .')
6
- os.chdir('../SAM')
7
- os.system('pip install -e .')
8
- os.system('pip install opencv-python pycocotools matplotlib onnxruntime onnx ipykernel gradio loguru transformers timm addict yapf loguru tqdm scikit-image scikit-learn pandas tensorboard seaborn open_clip_torch einops')
9
- os.system('pip install torch==1.10.0 torchvision==0.11.1 -f https://download.pytorch.org/whl/cu113/torch_stable.html')
10
-
11
- os.chdir('..')
12
- os.mkdir('weights')
13
- os.chdir('./weights')
14
- os.system('wget https://dl.fbaipublicfiles.com/segment_anything/sam_vit_h_4b8939.pth')
15
- os.system('wget https://github.com/IDEA-Research/GroundingDINO/releases/download/v0.1.0-alpha/groundingdino_swint_ogc.pth')
16
- os.chdir('..')
17
-
18
- import sys
19
- sys.path.append('./GroundingDINO')
20
- sys.path.append('./SAM')
21
- sys.path.append('.')
22
- import matplotlib.pyplot as plt
23
- import SAA as SegmentAnyAnomaly
24
- from utils.training_utils import *
25
- import os
26
-
27
-
28
-
29
- dino_config_file = 'GroundingDINO/groundingdino/config/GroundingDINO_SwinT_OGC.py'
30
- dino_checkpoint = 'weights/groundingdino_swint_ogc.pth'
31
- sam_checkpoint = 'weights/sam_vit_h_4b8939.pth'
32
- box_threshold = 0.1
33
- text_threshold = 0.1
34
- eval_resolution = 1024
35
- device = f"cpu"
36
- root_dir = 'result'
37
-
38
- # get the model
39
- model = SegmentAnyAnomaly.Model(
40
- dino_config_file=dino_config_file,
41
- dino_checkpoint=dino_checkpoint,
42
- sam_checkpoint=sam_checkpoint,
43
- box_threshold=box_threshold,
44
- text_threshold=text_threshold,
45
- out_size=eval_resolution,
46
- device=device,
47
- )
48
-
49
- model = model.to(device)
50
-
51
- import cv2
52
- import numpy as np
53
- import gradio as gr
54
-
55
-
56
- def process_image(heatmap, image):
57
- heatmap = heatmap.astype(float)
58
- heatmap = (heatmap - heatmap.min()) / heatmap.max() * 255
59
- heatmap = heatmap.astype(np.uint8)
60
- heat_map = cv2.applyColorMap(heatmap, cv2.COLORMAP_JET)
61
- visz_map = cv2.addWeighted(heat_map, 0.5, image, 0.5, 0)
62
- visz_map = cv2.cvtColor(visz_map, cv2.COLOR_BGR2RGB)
63
-
64
- visz_map = visz_map.astype(float)
65
- visz_map = visz_map / visz_map.max()
66
- return visz_map
67
-
68
-
69
- def func(image, anomaly_description, object_name, object_number, mask_number, area_threashold):
70
- textual_prompts = [
71
- [anomaly_description, object_name]
72
- ] # detect prompts, filtered phrase
73
- property_text_prompts = f'the image of {object_name} have {object_number} dissimilar {object_name}, with a maximum of {mask_number} anomaly. The anomaly would not exceed {area_threashold} object area. '
74
-
75
- model.set_ensemble_text_prompts(textual_prompts, verbose=True)
76
- model.set_property_text_prompts(property_text_prompts, verbose=True)
77
-
78
- image = cv2.resize(image, (eval_resolution, eval_resolution))
79
- score, appendix = model(image)
80
- similarity_map = appendix['similarity_map']
81
-
82
- image_show = cv2.resize(image, (eval_resolution, eval_resolution))
83
- similarity_map = cv2.resize(similarity_map, (eval_resolution, eval_resolution))
84
- score = cv2.resize(score, (eval_resolution, eval_resolution))
85
-
86
- viz_score = process_image(score, image_show)
87
- viz_sim = process_image(similarity_map, image_show)
88
-
89
- return viz_score, viz_sim
90
-
91
-
92
- with gr.Blocks() as demo:
93
- with gr.Row():
94
- with gr.Column():
95
- image = gr.Image(label="Image")
96
- anomaly_description = gr.Textbox(label="Anomaly Description (e.g. color defect. hole. black defect. wick hole. spot. )")
97
- object_name = gr.Textbox(label="Object Name (e.g. candle)")
98
- object_number = gr.Textbox(label="Object Number (e.g. 4)")
99
- mask_number = gr.Textbox(label="Mask Number (e.g. 1)")
100
- area_threashold = gr.Textbox(label="Area Threshold (e.g. 0.3)")
101
- with gr.Column():
102
- anomaly_score = gr.Image(label="Anomaly Score")
103
- saliency_map = gr.Image(label="Saliency Map")
104
-
105
- greet_btn = gr.Button("Inference")
106
- greet_btn.click(fn=func,
107
- inputs=[image, anomaly_description, object_name, object_number, mask_number, area_threashold],
108
- outputs=[anomaly_score, saliency_map], api_name="Segment-Any-Anomaly")
109
-
110
- demo.launch()
111
-
112
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Carlos056/Cara/README.md DELETED
@@ -1,10 +0,0 @@
1
- ---
2
- title: Cara
3
- emoji: 🐨
4
- colorFrom: blue
5
- colorTo: gray
6
- sdk: static
7
- pinned: false
8
- ---
9
-
10
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
spaces/Chakri1997/ChatGPT-prompt-generator/README.md DELETED
@@ -1,14 +0,0 @@
1
- ---
2
- title: ChatGPT Prompt Generator
3
- emoji: 👨🏻‍🎤
4
- colorFrom: purple
5
- colorTo: pink
6
- sdk: gradio
7
- sdk_version: 3.16.2
8
- app_file: app.py
9
- pinned: false
10
- license: apache-2.0
11
- duplicated_from: merve/ChatGPT-prompt-generator
12
- ---
13
-
14
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/ChallengeHub/Chinese-LangChain/app.py DELETED
@@ -1,226 +0,0 @@
1
- import os
2
- import shutil
3
-
4
- from app_modules.presets import *
5
- from clc.langchain_application import LangChainApplication
6
-
7
-
8
- # 修改成自己的配置!!!
9
- class LangChainCFG:
10
- llm_model_name = 'THUDM/chatglm-6b-int4-qe' # 本地模型文件 or huggingface远程仓库
11
- embedding_model_name = 'GanymedeNil/text2vec-large-chinese' # 检索模型文件 or huggingface远程仓库
12
- vector_store_path = './cache'
13
- docs_path = './docs'
14
- kg_vector_stores = {
15
- '中文维基百科': './cache/zh_wikipedia',
16
- '大规模金融研报': './cache/financial_research_reports',
17
- '初始化': './cache',
18
- } # 可以替换成自己的知识库,如果没有需要设置为None
19
- # kg_vector_stores=None
20
- patterns = ['模型问答', '知识库问答'] #
21
-
22
-
23
- config = LangChainCFG()
24
- application = LangChainApplication(config)
25
- application.source_service.init_source_vector()
26
-
27
-
28
- def get_file_list():
29
- if not os.path.exists("docs"):
30
- return []
31
- return [f for f in os.listdir("docs")]
32
-
33
-
34
- file_list = get_file_list()
35
-
36
-
37
- def upload_file(file):
38
- if not os.path.exists("docs"):
39
- os.mkdir("docs")
40
- filename = os.path.basename(file.name)
41
- shutil.move(file.name, "docs/" + filename)
42
- # file_list首位插入新上传的文件
43
- file_list.insert(0, filename)
44
- application.source_service.add_document("docs/" + filename)
45
- return gr.Dropdown.update(choices=file_list, value=filename)
46
-
47
-
48
- def set_knowledge(kg_name, history):
49
- try:
50
- application.source_service.load_vector_store(config.kg_vector_stores[kg_name])
51
- msg_status = f'{kg_name}知识库已成功加载'
52
- except Exception as e:
53
- print(e)
54
- msg_status = f'{kg_name}知识库未成功加载'
55
- return history + [[None, msg_status]]
56
-
57
-
58
- def clear_session():
59
- return '', None
60
-
61
-
62
- def predict(input,
63
- large_language_model,
64
- embedding_model,
65
- top_k,
66
- use_web,
67
- use_pattern,
68
- history=None):
69
- # print(large_language_model, embedding_model)
70
- print(input)
71
- if history == None:
72
- history = []
73
-
74
- if use_web == '使用':
75
- web_content = application.source_service.search_web(query=input)
76
- else:
77
- web_content = ''
78
- search_text = ''
79
- if use_pattern == '模型问答':
80
- result = application.get_llm_answer(query=input, web_content=web_content)
81
- history.append((input, result))
82
- search_text += web_content
83
- return '', history, history, search_text
84
-
85
- else:
86
- resp = application.get_knowledge_based_answer(
87
- query=input,
88
- history_len=1,
89
- temperature=0.1,
90
- top_p=0.9,
91
- top_k=top_k,
92
- web_content=web_content,
93
- chat_history=history
94
- )
95
- history.append((input, resp['result']))
96
- for idx, source in enumerate(resp['source_documents'][:4]):
97
- sep = f'----------【搜索结果{idx + 1}:】---------------\n'
98
- search_text += f'{sep}\n{source.page_content}\n\n'
99
- print(search_text)
100
- search_text += "----------【网络检索内容】-----------\n"
101
- search_text += web_content
102
- return '', history, history, search_text
103
-
104
-
105
- with open("assets/custom.css", "r", encoding="utf-8") as f:
106
- customCSS = f.read()
107
- with gr.Blocks(css=customCSS, theme=small_and_beautiful_theme) as demo:
108
- gr.Markdown("""<h1><center>Chinese-LangChain</center></h1>
109
- <center><font size=3>
110
- </center></font>
111
- """)
112
- state = gr.State()
113
-
114
- with gr.Row():
115
- with gr.Column(scale=1):
116
- embedding_model = gr.Dropdown([
117
- "text2vec-base"
118
- ],
119
- label="Embedding model",
120
- value="text2vec-base")
121
-
122
- large_language_model = gr.Dropdown(
123
- [
124
- "ChatGLM-6B-int4",
125
- ],
126
- label="large language model",
127
- value="ChatGLM-6B-int4")
128
-
129
- top_k = gr.Slider(1,
130
- 20,
131
- value=4,
132
- step=1,
133
- label="检索top-k文档",
134
- interactive=True)
135
-
136
- use_web = gr.Radio(["使用", "不使用"], label="web search",
137
- info="是否使用网络搜索,使用时确保网络通常",
138
- value="不使用"
139
- )
140
- use_pattern = gr.Radio(
141
- [
142
- '模型问答',
143
- '知识库问答',
144
- ],
145
- label="模式",
146
- value='模型问答',
147
- interactive=True)
148
-
149
- kg_name = gr.Radio(list(config.kg_vector_stores.keys()),
150
- label="知识库",
151
- value=None,
152
- info="使用知识库问答���请加载知识库",
153
- interactive=True)
154
- set_kg_btn = gr.Button("加载知识库")
155
-
156
- file = gr.File(label="将文件上传到知识库库,内容要尽量匹配",
157
- visible=True,
158
- file_types=['.txt', '.md', '.docx', '.pdf']
159
- )
160
-
161
- with gr.Column(scale=4):
162
- with gr.Row():
163
- chatbot = gr.Chatbot(label='Chinese-LangChain').style(height=400)
164
- with gr.Row():
165
- message = gr.Textbox(label='请输入问题')
166
- with gr.Row():
167
- clear_history = gr.Button("🧹 清除历史对话")
168
- send = gr.Button("🚀 发送")
169
- with gr.Row():
170
- gr.Markdown("""提醒:<br>
171
- [Chinese-LangChain](https://github.com/yanqiangmiffy/Chinese-LangChain) <br>
172
- 有任何使用问题[Github Issue区](https://github.com/yanqiangmiffy/Chinese-LangChain)进行反馈. <br>
173
- """)
174
- with gr.Column(scale=2):
175
- search = gr.Textbox(label='搜索结果')
176
-
177
- # ============= 触发动作=============
178
- file.upload(upload_file,
179
- inputs=file,
180
- outputs=None)
181
- set_kg_btn.click(
182
- set_knowledge,
183
- show_progress=True,
184
- inputs=[kg_name, chatbot],
185
- outputs=chatbot
186
- )
187
- # 发送按钮 提交
188
- send.click(predict,
189
- inputs=[
190
- message,
191
- large_language_model,
192
- embedding_model,
193
- top_k,
194
- use_web,
195
- use_pattern,
196
- state
197
- ],
198
- outputs=[message, chatbot, state, search])
199
-
200
- # 清空历史对话按钮 提交
201
- clear_history.click(fn=clear_session,
202
- inputs=[],
203
- outputs=[chatbot, state],
204
- queue=False)
205
-
206
- # 输入框 回车
207
- message.submit(predict,
208
- inputs=[
209
- message,
210
- large_language_model,
211
- embedding_model,
212
- top_k,
213
- use_web,
214
- use_pattern,
215
- state
216
- ],
217
- outputs=[message, chatbot, state, search])
218
-
219
- demo.queue(concurrency_count=2).launch(
220
- server_name='0.0.0.0',
221
- share=False,
222
- show_error=True,
223
- debug=True,
224
- enable_queue=True,
225
- inbrowser=True,
226
- )
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/CikeyQI/meme-api/docs/update_doc.py DELETED
@@ -1,145 +0,0 @@
1
- import asyncio
2
- from pathlib import Path
3
- from typing import Any, Dict
4
-
5
- import filetype
6
-
7
- from meme_generator import get_memes
8
- from meme_generator.meme import Meme
9
-
10
- memes = sorted(get_memes(), key=lambda meme: meme.key)
11
-
12
- image_path = Path("docs/images")
13
-
14
-
15
- async def generate_preview_images():
16
- for meme in memes:
17
-
18
- async def generate_image(name: str, args: Dict[str, Any] = {}):
19
- for path in image_path.iterdir():
20
- if name == path.stem:
21
- return
22
-
23
- result = await meme.generate_preview(args=args)
24
- content = result.getvalue()
25
- ext = filetype.guess_extension(content)
26
- filename = f"{name}.{ext}"
27
- with open(image_path / filename, "wb") as f:
28
- f.write(content)
29
-
30
- await generate_image(f"{meme.key}")
31
- if args := meme.params_type.args_type:
32
- if instances := args.instances:
33
- for i, instance in enumerate(instances):
34
- await generate_image(f"{meme.key}_instance{i}", instance.dict())
35
-
36
-
37
- def meme_doc(meme: Meme) -> str:
38
- keywords = "、".join([f"`{keyword}`" for keyword in meme.keywords])
39
-
40
- patterns = "、".join([f"`{pattern}`" for pattern in meme.patterns])
41
-
42
- image_num = f"`{meme.params_type.min_images}`"
43
- if meme.params_type.max_images > meme.params_type.min_images:
44
- image_num += f" ~ `{meme.params_type.max_images}`"
45
-
46
- text_num = f"`{meme.params_type.min_texts}`"
47
- if meme.params_type.max_texts > meme.params_type.min_texts:
48
- text_num += f" ~ `{meme.params_type.max_texts}`"
49
-
50
- default_texts = (
51
- f"{', '.join([f'`{text}`' for text in meme.params_type.default_texts])}"
52
- )
53
-
54
- def arg_info(name: str, info: Dict[str, Any]) -> str:
55
- text = (
56
- f" - `{name}`\n"
57
- f" - 描述:{info.get('description', '')}\n"
58
- f" - 类型:`{info.get('type', '')}`\n"
59
- f" - 默认值:`{info.get('default', '')}`"
60
- )
61
- if enum := info.get("enum", []):
62
- assert isinstance(enum, list)
63
- text += f"\n - 可选值:{'、'.join([f'`{e}`' for e in enum])}"
64
- return text
65
-
66
- if args := meme.params_type.args_type:
67
- model = args.model
68
- properties: Dict[str, Dict[str, Any]] = (
69
- model.schema().get("properties", {}).copy()
70
- )
71
- properties.pop("user_infos")
72
- args_info = "\n" + "\n".join(
73
- [arg_info(name, info) for name, info in properties.items()]
74
- )
75
- else:
76
- args_info = ""
77
-
78
- if args := meme.params_type.args_type:
79
- parser = args.parser
80
- parser_info = parser.format_help()
81
- parser_info = parser_info.replace("update_doc.py", f"meme generate {meme.key}")
82
- else:
83
- parser_info = ""
84
-
85
- def image_doc(name: str) -> str:
86
- for path in image_path.iterdir():
87
- if name == path.stem:
88
- img_path = path.relative_to(Path("docs"))
89
- return (
90
- '<div align="left">\n'
91
- f' <img src="{img_path}" width="200" />\n'
92
- "</div>"
93
- )
94
- return ""
95
-
96
- preview_image = ""
97
- if args := meme.params_type.args_type:
98
- if instances := args.instances:
99
- preview_image = "\n\n".join(
100
- [
101
- f"> 参数:{instance.json(exclude={'user_infos'})}\n"
102
- + image_doc(meme.key + f"_instance{i}")
103
- for i, instance in enumerate(instances)
104
- ]
105
- )
106
- if not preview_image:
107
- preview_image = image_doc(meme.key)
108
-
109
- return (
110
- f"## {meme.key}\n\n"
111
- + f"- 关键词:{keywords}\n"
112
- + (f"- 正则表达式:{patterns}\n" if patterns else "")
113
- + f"- 需要图片数目:{image_num}\n"
114
- + f"- 需要文字数目:{text_num}\n"
115
- + (f"- 默认文字:[{default_texts}]\n" if default_texts else "")
116
- + (f"- 其他参数:{args_info}\n" if args_info else "")
117
- + (f"- 其他参数(命令行选项):\n```shell\n{parser_info}```\n\n" if parser_info else "")
118
- + "- 预览:\n"
119
- + f"{preview_image}"
120
- )
121
-
122
-
123
- def generate_toc():
124
- return "\n".join(
125
- f"{i}. [{meme.key} ({'/'.join(meme.keywords)})](#{meme.key})"
126
- for i, meme in enumerate(memes, start=1)
127
- )
128
-
129
-
130
- def generate_doc():
131
- doc = "# 表情列表\n\n以下为内置表情的关键词、所需参数等信息及表情预览\n\n按照表情的 `key` 排列\n\n\n"
132
- doc += generate_toc() + "\n\n\n"
133
- doc += "\n\n".join(meme_doc(meme) for meme in memes) + "\n"
134
- with open("docs/memes.md", "w") as f:
135
- f.write(doc)
136
-
137
-
138
- async def main():
139
- await generate_preview_images()
140
- generate_doc()
141
-
142
-
143
- if __name__ == "__main__":
144
- loop = asyncio.new_event_loop()
145
- loop.run_until_complete(main())
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Coweed/GoodTrip/README.md DELETED
@@ -1,10 +0,0 @@
1
- ---
2
- title: GoodTrip
3
- emoji: 🚀
4
- colorFrom: green
5
- colorTo: purple
6
- sdk: docker
7
- pinned: false
8
- ---
9
-
10
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
 
 
 
 
 
 
 
 
 
 
spaces/Cropinky/esrgan/realesrgan/data/realesrgan_dataset.py DELETED
@@ -1,192 +0,0 @@
1
- import cv2
2
- import math
3
- import numpy as np
4
- import os
5
- import os.path as osp
6
- import random
7
- import time
8
- import torch
9
- from basicsr.data.degradations import circular_lowpass_kernel, random_mixed_kernels
10
- from basicsr.data.transforms import augment
11
- from basicsr.utils import FileClient, get_root_logger, imfrombytes, img2tensor
12
- from basicsr.utils.registry import DATASET_REGISTRY
13
- from torch.utils import data as data
14
-
15
-
16
- @DATASET_REGISTRY.register()
17
- class RealESRGANDataset(data.Dataset):
18
- """Dataset used for Real-ESRGAN model:
19
- Real-ESRGAN: Training Real-World Blind Super-Resolution with Pure Synthetic Data.
20
-
21
- It loads gt (Ground-Truth) images, and augments them.
22
- It also generates blur kernels and sinc kernels for generating low-quality images.
23
- Note that the low-quality images are processed in tensors on GPUS for faster processing.
24
-
25
- Args:
26
- opt (dict): Config for train datasets. It contains the following keys:
27
- dataroot_gt (str): Data root path for gt.
28
- meta_info (str): Path for meta information file.
29
- io_backend (dict): IO backend type and other kwarg.
30
- use_hflip (bool): Use horizontal flips.
31
- use_rot (bool): Use rotation (use vertical flip and transposing h and w for implementation).
32
- Please see more options in the codes.
33
- """
34
-
35
- def __init__(self, opt):
36
- super(RealESRGANDataset, self).__init__()
37
- self.opt = opt
38
- self.file_client = None
39
- self.io_backend_opt = opt['io_backend']
40
- self.gt_folder = opt['dataroot_gt']
41
-
42
- # file client (lmdb io backend)
43
- if self.io_backend_opt['type'] == 'lmdb':
44
- self.io_backend_opt['db_paths'] = [self.gt_folder]
45
- self.io_backend_opt['client_keys'] = ['gt']
46
- if not self.gt_folder.endswith('.lmdb'):
47
- raise ValueError(f"'dataroot_gt' should end with '.lmdb', but received {self.gt_folder}")
48
- with open(osp.join(self.gt_folder, 'meta_info.txt')) as fin:
49
- self.paths = [line.split('.')[0] for line in fin]
50
- else:
51
- # disk backend with meta_info
52
- # Each line in the meta_info describes the relative path to an image
53
- with open(self.opt['meta_info']) as fin:
54
- paths = [line.strip().split(' ')[0] for line in fin]
55
- self.paths = [os.path.join(self.gt_folder, v) for v in paths]
56
-
57
- # blur settings for the first degradation
58
- self.blur_kernel_size = opt['blur_kernel_size']
59
- self.kernel_list = opt['kernel_list']
60
- self.kernel_prob = opt['kernel_prob'] # a list for each kernel probability
61
- self.blur_sigma = opt['blur_sigma']
62
- self.betag_range = opt['betag_range'] # betag used in generalized Gaussian blur kernels
63
- self.betap_range = opt['betap_range'] # betap used in plateau blur kernels
64
- self.sinc_prob = opt['sinc_prob'] # the probability for sinc filters
65
-
66
- # blur settings for the second degradation
67
- self.blur_kernel_size2 = opt['blur_kernel_size2']
68
- self.kernel_list2 = opt['kernel_list2']
69
- self.kernel_prob2 = opt['kernel_prob2']
70
- self.blur_sigma2 = opt['blur_sigma2']
71
- self.betag_range2 = opt['betag_range2']
72
- self.betap_range2 = opt['betap_range2']
73
- self.sinc_prob2 = opt['sinc_prob2']
74
-
75
- # a final sinc filter
76
- self.final_sinc_prob = opt['final_sinc_prob']
77
-
78
- self.kernel_range = [2 * v + 1 for v in range(3, 11)] # kernel size ranges from 7 to 21
79
- # TODO: kernel range is now hard-coded, should be in the configure file
80
- self.pulse_tensor = torch.zeros(21, 21).float() # convolving with pulse tensor brings no blurry effect
81
- self.pulse_tensor[10, 10] = 1
82
-
83
- def __getitem__(self, index):
84
- if self.file_client is None:
85
- self.file_client = FileClient(self.io_backend_opt.pop('type'), **self.io_backend_opt)
86
-
87
- # -------------------------------- Load gt images -------------------------------- #
88
- # Shape: (h, w, c); channel order: BGR; image range: [0, 1], float32.
89
- gt_path = self.paths[index]
90
- # avoid errors caused by high latency in reading files
91
- retry = 3
92
- while retry > 0:
93
- try:
94
- img_bytes = self.file_client.get(gt_path, 'gt')
95
- except (IOError, OSError) as e:
96
- logger = get_root_logger()
97
- logger.warn(f'File client error: {e}, remaining retry times: {retry - 1}')
98
- # change another file to read
99
- index = random.randint(0, self.__len__())
100
- gt_path = self.paths[index]
101
- time.sleep(1) # sleep 1s for occasional server congestion
102
- else:
103
- break
104
- finally:
105
- retry -= 1
106
- img_gt = imfrombytes(img_bytes, float32=True)
107
-
108
- # -------------------- Do augmentation for training: flip, rotation -------------------- #
109
- img_gt = augment(img_gt, self.opt['use_hflip'], self.opt['use_rot'])
110
-
111
- # crop or pad to 400
112
- # TODO: 400 is hard-coded. You may change it accordingly
113
- h, w = img_gt.shape[0:2]
114
- crop_pad_size = 400
115
- # pad
116
- if h < crop_pad_size or w < crop_pad_size:
117
- pad_h = max(0, crop_pad_size - h)
118
- pad_w = max(0, crop_pad_size - w)
119
- img_gt = cv2.copyMakeBorder(img_gt, 0, pad_h, 0, pad_w, cv2.BORDER_REFLECT_101)
120
- # crop
121
- if img_gt.shape[0] > crop_pad_size or img_gt.shape[1] > crop_pad_size:
122
- h, w = img_gt.shape[0:2]
123
- # randomly choose top and left coordinates
124
- top = random.randint(0, h - crop_pad_size)
125
- left = random.randint(0, w - crop_pad_size)
126
- img_gt = img_gt[top:top + crop_pad_size, left:left + crop_pad_size, ...]
127
-
128
- # ------------------------ Generate kernels (used in the first degradation) ------------------------ #
129
- kernel_size = random.choice(self.kernel_range)
130
- if np.random.uniform() < self.opt['sinc_prob']:
131
- # this sinc filter setting is for kernels ranging from [7, 21]
132
- if kernel_size < 13:
133
- omega_c = np.random.uniform(np.pi / 3, np.pi)
134
- else:
135
- omega_c = np.random.uniform(np.pi / 5, np.pi)
136
- kernel = circular_lowpass_kernel(omega_c, kernel_size, pad_to=False)
137
- else:
138
- kernel = random_mixed_kernels(
139
- self.kernel_list,
140
- self.kernel_prob,
141
- kernel_size,
142
- self.blur_sigma,
143
- self.blur_sigma, [-math.pi, math.pi],
144
- self.betag_range,
145
- self.betap_range,
146
- noise_range=None)
147
- # pad kernel
148
- pad_size = (21 - kernel_size) // 2
149
- kernel = np.pad(kernel, ((pad_size, pad_size), (pad_size, pad_size)))
150
-
151
- # ------------------------ Generate kernels (used in the second degradation) ------------------------ #
152
- kernel_size = random.choice(self.kernel_range)
153
- if np.random.uniform() < self.opt['sinc_prob2']:
154
- if kernel_size < 13:
155
- omega_c = np.random.uniform(np.pi / 3, np.pi)
156
- else:
157
- omega_c = np.random.uniform(np.pi / 5, np.pi)
158
- kernel2 = circular_lowpass_kernel(omega_c, kernel_size, pad_to=False)
159
- else:
160
- kernel2 = random_mixed_kernels(
161
- self.kernel_list2,
162
- self.kernel_prob2,
163
- kernel_size,
164
- self.blur_sigma2,
165
- self.blur_sigma2, [-math.pi, math.pi],
166
- self.betag_range2,
167
- self.betap_range2,
168
- noise_range=None)
169
-
170
- # pad kernel
171
- pad_size = (21 - kernel_size) // 2
172
- kernel2 = np.pad(kernel2, ((pad_size, pad_size), (pad_size, pad_size)))
173
-
174
- # ------------------------------------- the final sinc kernel ------------------------------------- #
175
- if np.random.uniform() < self.opt['final_sinc_prob']:
176
- kernel_size = random.choice(self.kernel_range)
177
- omega_c = np.random.uniform(np.pi / 3, np.pi)
178
- sinc_kernel = circular_lowpass_kernel(omega_c, kernel_size, pad_to=21)
179
- sinc_kernel = torch.FloatTensor(sinc_kernel)
180
- else:
181
- sinc_kernel = self.pulse_tensor
182
-
183
- # BGR to RGB, HWC to CHW, numpy to tensor
184
- img_gt = img2tensor([img_gt], bgr2rgb=True, float32=True)[0]
185
- kernel = torch.FloatTensor(kernel)
186
- kernel2 = torch.FloatTensor(kernel2)
187
-
188
- return_d = {'gt': img_gt, 'kernel1': kernel, 'kernel2': kernel2, 'sinc_kernel': sinc_kernel, 'gt_path': gt_path}
189
- return return_d
190
-
191
- def __len__(self):
192
- return len(self.paths)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/Cyril666/ContourNet-ABI/maskrcnn_benchmark/layers/focal_loss.py DELETED
@@ -1,61 +0,0 @@
1
- import torch
2
- from torch import nn
3
- from torch.nn import functional as F
4
-
5
- def Focal_Loss(pred, gt):
6
- # print('yes!!')
7
-
8
-
9
-
10
- ce = nn.CrossEntropyLoss()
11
- alpha = 0.25
12
- gamma = 2
13
- # logp = ce(input, target)
14
- p = torch.sigmoid(pred)
15
-
16
- loss = -alpha * (1 - p) ** gamma * (gt * torch.log(p)) - \
17
- (1 - alpha) * p ** gamma * ((1 - gt) * torch.log(1 - p))
18
-
19
- return loss.mean()
20
-
21
-
22
-
23
-
24
-
25
-
26
-
27
-
28
-
29
-
30
-
31
- # pred =torch.sigmoid(pred)
32
- # pos_inds = gt.eq(1).float()
33
- # neg_inds = gt.lt(1).float()
34
- #
35
- # loss = 0
36
- #
37
- # pos_loss = torch.log(pred + 1e-10) * torch.pow(pred, 2) * pos_inds
38
- # # neg_loss = torch.log(1 - pred) * torch.pow(pred, 2) * neg_weights * neg_inds
39
- # neg_loss = torch.log(1 - pred) * torch.pow(1 - pred, 2) * neg_inds
40
- #
41
- # num_pos = pos_inds.float().sum()
42
- # num_neg = neg_inds.float().sum()
43
- #
44
- # pos_loss = pos_loss.sum()
45
- # neg_loss = neg_loss.sum()
46
- #
47
- # if num_pos == 0:
48
- # loss = loss - neg_loss
49
- # else:
50
- # # loss = loss - (pos_loss + neg_loss) / (num_pos)
51
- # loss = loss - (pos_loss + neg_loss )
52
- # return loss * 5
53
-
54
-
55
-
56
-
57
- # if weight is not None and weight.sum() > 0:
58
- # return (losses * weight).sum() / weight.sum()
59
- # else:
60
- # assert losses.numel() != 0
61
- # return losses.mean()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/PIL/ImagePath.py DELETED
@@ -1,19 +0,0 @@
1
- #
2
- # The Python Imaging Library
3
- # $Id$
4
- #
5
- # path interface
6
- #
7
- # History:
8
- # 1996-11-04 fl Created
9
- # 2002-04-14 fl Added documentation stub class
10
- #
11
- # Copyright (c) Secret Labs AB 1997.
12
- # Copyright (c) Fredrik Lundh 1996.
13
- #
14
- # See the README file for information on usage and redistribution.
15
- #
16
-
17
- from . import Image
18
-
19
- Path = Image.core.path
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/misc/timeTools.py DELETED
@@ -1,88 +0,0 @@
1
- """fontTools.misc.timeTools.py -- tools for working with OpenType timestamps.
2
- """
3
-
4
- import os
5
- import time
6
- from datetime import datetime, timezone
7
- import calendar
8
-
9
-
10
- epoch_diff = calendar.timegm((1904, 1, 1, 0, 0, 0, 0, 0, 0))
11
-
12
- DAYNAMES = ["Mon", "Tue", "Wed", "Thu", "Fri", "Sat", "Sun"]
13
- MONTHNAMES = [
14
- None,
15
- "Jan",
16
- "Feb",
17
- "Mar",
18
- "Apr",
19
- "May",
20
- "Jun",
21
- "Jul",
22
- "Aug",
23
- "Sep",
24
- "Oct",
25
- "Nov",
26
- "Dec",
27
- ]
28
-
29
-
30
- def asctime(t=None):
31
- """
32
- Convert a tuple or struct_time representing a time as returned by gmtime()
33
- or localtime() to a 24-character string of the following form:
34
-
35
- >>> asctime(time.gmtime(0))
36
- 'Thu Jan 1 00:00:00 1970'
37
-
38
- If t is not provided, the current time as returned by localtime() is used.
39
- Locale information is not used by asctime().
40
-
41
- This is meant to normalise the output of the built-in time.asctime() across
42
- different platforms and Python versions.
43
- In Python 3.x, the day of the month is right-justified, whereas on Windows
44
- Python 2.7 it is padded with zeros.
45
-
46
- See https://github.com/fonttools/fonttools/issues/455
47
- """
48
- if t is None:
49
- t = time.localtime()
50
- s = "%s %s %2s %s" % (
51
- DAYNAMES[t.tm_wday],
52
- MONTHNAMES[t.tm_mon],
53
- t.tm_mday,
54
- time.strftime("%H:%M:%S %Y", t),
55
- )
56
- return s
57
-
58
-
59
- def timestampToString(value):
60
- return asctime(time.gmtime(max(0, value + epoch_diff)))
61
-
62
-
63
- def timestampFromString(value):
64
- wkday, mnth = value[:7].split()
65
- t = datetime.strptime(value[7:], " %d %H:%M:%S %Y")
66
- t = t.replace(month=MONTHNAMES.index(mnth), tzinfo=timezone.utc)
67
- wkday_idx = DAYNAMES.index(wkday)
68
- assert t.weekday() == wkday_idx, '"' + value + '" has inconsistent weekday'
69
- return int(t.timestamp()) - epoch_diff
70
-
71
-
72
- def timestampNow():
73
- # https://reproducible-builds.org/specs/source-date-epoch/
74
- source_date_epoch = os.environ.get("SOURCE_DATE_EPOCH")
75
- if source_date_epoch is not None:
76
- return int(source_date_epoch) - epoch_diff
77
- return int(time.time() - epoch_diff)
78
-
79
-
80
- def timestampSinceEpoch(value):
81
- return int(value - epoch_diff)
82
-
83
-
84
- if __name__ == "__main__":
85
- import sys
86
- import doctest
87
-
88
- sys.exit(doctest.testmod().failed)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/cdn/assets/index-03d58ab8.css DELETED
@@ -1 +0,0 @@
1
- .hide.svelte-ydeks8{display:none}
 
 
spaces/Davidsamuel101/PPTGenerator/src/app.py DELETED
@@ -1,18 +0,0 @@
1
- from src.summarizer import Summarizer
2
- import gradio as gr
3
-
4
- def inference(document):
5
- summarizer = Summarizer("sshleifer/distill-pegasus-cnn-16-4")
6
- slide_content = summarizer.extract_text(document)
7
- summarized_slides = summarizer(slide_content)
8
- markdown = summarizer.convert2markdown(summarized_slides)
9
- summarizer.remove_leading_empty_lines(markdown.file_name)
10
- return markdown.file_name
11
-
12
- with gr.Blocks() as demo:
13
- inp = gr.File(file_types=['pdf'])
14
- out = gr.File(label="Markdown File")
15
- inference_btn = gr.Button("Summarized PDF")
16
- inference_btn.click(fn=inference, inputs=inp, outputs=out, show_progress=True, api_name="summarize")
17
-
18
- demo.launch()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DemoLou/moe-tts/text/__init__.py DELETED
@@ -1,32 +0,0 @@
1
- """ from https://github.com/keithito/tacotron """
2
- from text import cleaners
3
-
4
-
5
- def text_to_sequence(text, symbols, cleaner_names):
6
- '''Converts a string of text to a sequence of IDs corresponding to the symbols in the text.
7
- Args:
8
- text: string to convert to a sequence
9
- cleaner_names: names of the cleaner functions to run the text through
10
- Returns:
11
- List of integers corresponding to the symbols in the text
12
- '''
13
- _symbol_to_id = {s: i for i, s in enumerate(symbols)}
14
-
15
- sequence = []
16
-
17
- clean_text = _clean_text(text, cleaner_names)
18
- for symbol in clean_text:
19
- if symbol not in _symbol_to_id.keys():
20
- continue
21
- symbol_id = _symbol_to_id[symbol]
22
- sequence += [symbol_id]
23
- return sequence
24
-
25
-
26
- def _clean_text(text, cleaner_names):
27
- for name in cleaner_names:
28
- cleaner = getattr(cleaners, name)
29
- if not cleaner:
30
- raise Exception('Unknown cleaner: %s' % name)
31
- text = cleaner(text)
32
- return text
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DragGan/DragGan-Inversion/training/training_loop.py DELETED
@@ -1,499 +0,0 @@
1
- # Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved.
2
- #
3
- # NVIDIA CORPORATION and its licensors retain all intellectual property
4
- # and proprietary rights in and to this software, related documentation
5
- # and any modifications thereto. Any use, reproduction, disclosure or
6
- # distribution of this software and related documentation without an express
7
- # license agreement from NVIDIA CORPORATION is strictly prohibited.
8
-
9
- """Main training loop."""
10
-
11
- import os
12
- import time
13
- import copy
14
- import json
15
- import pickle
16
- import psutil
17
- import PIL.Image
18
- import numpy as np
19
- import torch
20
- import dnnlib
21
- from torch_utils import misc
22
- from torch_utils import training_stats
23
- from torch_utils.ops import conv2d_gradfix
24
- from torch_utils.ops import grid_sample_gradfix
25
-
26
- import legacy
27
- from metrics import metric_main
28
-
29
- # ----------------------------------------------------------------------------
30
-
31
-
32
- def setup_snapshot_image_grid(training_set, random_seed=0):
33
- rnd = np.random.RandomState(random_seed)
34
- gw = np.clip(7680 // training_set.image_shape[2], 7, 32)
35
- gh = np.clip(4320 // training_set.image_shape[1], 4, 32)
36
-
37
- # No labels => show random subset of training samples.
38
- if not training_set.has_labels:
39
- all_indices = list(range(len(training_set)))
40
- rnd.shuffle(all_indices)
41
- grid_indices = [all_indices[i %
42
- len(all_indices)] for i in range(gw * gh)]
43
-
44
- else:
45
- # Group training samples by label.
46
- label_groups = dict() # label => [idx, ...]
47
- for idx in range(len(training_set)):
48
- label = tuple(training_set.get_details(idx).raw_label.flat[::-1])
49
- if label not in label_groups:
50
- label_groups[label] = []
51
- label_groups[label].append(idx)
52
-
53
- # Reorder.
54
- label_order = sorted(label_groups.keys())
55
- for label in label_order:
56
- rnd.shuffle(label_groups[label])
57
-
58
- # Organize into grid.
59
- grid_indices = []
60
- for y in range(gh):
61
- label = label_order[y % len(label_order)]
62
- indices = label_groups[label]
63
- grid_indices += [indices[x % len(indices)] for x in range(gw)]
64
- label_groups[label] = [
65
- indices[(i + gw) % len(indices)] for i in range(len(indices))]
66
-
67
- # Load data.
68
- images, labels = zip(*[training_set[i] for i in grid_indices])
69
- return (gw, gh), np.stack(images), np.stack(labels)
70
-
71
- # ----------------------------------------------------------------------------
72
-
73
-
74
- def save_image_grid(img, fname, drange, grid_size):
75
- lo, hi = drange
76
- img = np.asarray(img, dtype=np.float32)
77
- img = (img - lo) * (255 / (hi - lo))
78
- img = np.rint(img).clip(0, 255).astype(np.uint8)
79
-
80
- gw, gh = grid_size
81
- _N, C, H, W = img.shape
82
- img = img.reshape([gh, gw, C, H, W])
83
- img = img.transpose(0, 3, 1, 4, 2)
84
- img = img.reshape([gh * H, gw * W, C])
85
-
86
- assert C in [1, 3]
87
- if C == 1:
88
- PIL.Image.fromarray(img[:, :, 0], 'L').save(fname)
89
- if C == 3:
90
- PIL.Image.fromarray(img, 'RGB').save(fname)
91
-
92
- # ----------------------------------------------------------------------------
93
-
94
-
95
- def training_loop(
96
- run_dir='.', # Output directory.
97
- training_set_kwargs={}, # Options for training set.
98
- data_loader_kwargs={}, # Options for torch.utils.data.DataLoader.
99
- G_kwargs={}, # Options for generator network.
100
- D_kwargs={}, # Options for discriminator network.
101
- G_opt_kwargs={}, # Options for generator optimizer.
102
- D_opt_kwargs={}, # Options for discriminator optimizer.
103
- # Options for augmentation pipeline. None = disable.
104
- augment_kwargs=None,
105
- loss_kwargs={}, # Options for loss function.
106
- metrics=[], # Metrics to evaluate during training.
107
- random_seed=0, # Global random seed.
108
- num_gpus=1, # Number of GPUs participating in the training.
109
- rank=0, # Rank of the current process in [0, num_gpus[.
110
- # Total batch size for one training iteration. Can be larger than batch_gpu * num_gpus.
111
- batch_size=4,
112
- batch_gpu=4, # Number of samples processed at a time by one GPU.
113
- # Half-life of the exponential moving average (EMA) of generator weights.
114
- ema_kimg=10,
115
- ema_rampup=0.05, # EMA ramp-up coefficient. None = no rampup.
116
- # How often to perform regularization for G? None = disable lazy regularization.
117
- G_reg_interval=None,
118
- # How often to perform regularization for D? None = disable lazy regularization.
119
- D_reg_interval=16,
120
- augment_p=0, # Initial value of augmentation probability.
121
- ada_target=None, # ADA target value. None = fixed p.
122
- ada_interval=4, # How often to perform ADA adjustment?
123
- # ADA adjustment speed, measured in how many kimg it takes for p to increase/decrease by one unit.
124
- ada_kimg=500,
125
- # Total length of the training, measured in thousands of real images.
126
- total_kimg=25000,
127
- kimg_per_tick=4, # Progress snapshot interval.
128
- # How often to save image snapshots? None = disable.
129
- image_snapshot_ticks=50,
130
- # How often to save network snapshots? None = disable.
131
- network_snapshot_ticks=50,
132
- resume_pkl=None, # Network pickle to resume training from.
133
- resume_kimg=0, # First kimg to report when resuming training.
134
- cudnn_benchmark=True, # Enable torch.backends.cudnn.benchmark?
135
- # Callback function for determining whether to abort training. Must return consistent results across ranks.
136
- abort_fn=None,
137
- # Callback function for updating training progress. Called for all ranks.
138
- progress_fn=None,
139
- ):
140
- # Initialize.
141
- start_time = time.time()
142
- device = torch.device('cuda', rank)
143
- np.random.seed(random_seed * num_gpus + rank)
144
- torch.manual_seed(random_seed * num_gpus + rank)
145
- # Improves training speed.
146
- torch.backends.cudnn.benchmark = cudnn_benchmark
147
- # Improves numerical accuracy.
148
- torch.backends.cuda.matmul.allow_tf32 = False
149
- # Improves numerical accuracy.
150
- torch.backends.cudnn.allow_tf32 = False
151
- # Improves training speed.
152
- conv2d_gradfix.enabled = True
153
- # Avoids errors with the augmentation pipe.
154
- grid_sample_gradfix.enabled = True
155
-
156
- # Load training set.
157
- if rank == 0:
158
- print('Loading training set...')
159
- training_set = dnnlib.util.construct_class_by_name(
160
- **training_set_kwargs) # subclass of training.dataset.Dataset
161
- training_set_sampler = misc.InfiniteSampler(
162
- dataset=training_set, rank=rank, num_replicas=num_gpus, seed=random_seed)
163
- training_set_iterator = iter(torch.utils.data.DataLoader(
164
- dataset=training_set, sampler=training_set_sampler, batch_size=batch_size//num_gpus, **data_loader_kwargs))
165
- if rank == 0:
166
- print()
167
- print('Num images: ', len(training_set))
168
- print('Image shape:', training_set.image_shape)
169
- print('Label shape:', training_set.label_shape)
170
- print()
171
-
172
- # Construct networks.
173
- if rank == 0:
174
- print('Constructing networks...')
175
- common_kwargs = dict(c_dim=training_set.label_dim,
176
- img_resolution=training_set.resolution, img_channels=training_set.num_channels)
177
- G = dnnlib.util.construct_class_by_name(**G_kwargs, **common_kwargs).train(
178
- ).requires_grad_(False).to(device) # subclass of torch.nn.Module
179
- D = dnnlib.util.construct_class_by_name(**D_kwargs, **common_kwargs).train(
180
- ).requires_grad_(False).to(device) # subclass of torch.nn.Module
181
- G_ema = copy.deepcopy(G).eval()
182
-
183
- # Resume from existing pickle.
184
- if (resume_pkl is not None) and (rank == 0):
185
- print(f'Resuming from "{resume_pkl}"')
186
- with dnnlib.util.open_url(resume_pkl) as f:
187
- resume_data = legacy.load_network_pkl(f)
188
- for name, module in [('G', G), ('D', D), ('G_ema', G_ema)]:
189
- misc.copy_params_and_buffers(
190
- resume_data[name], module, require_all=False)
191
-
192
- # Print network summary tables.
193
- if rank == 0:
194
- z = torch.empty([batch_gpu, G.z_dim], device=device)
195
- c = torch.empty([batch_gpu, G.c_dim], device=device)
196
- img = misc.print_module_summary(G, [z, c])
197
- misc.print_module_summary(D, [img, c])
198
-
199
- # Setup augmentation.
200
- if rank == 0:
201
- print('Setting up augmentation...')
202
- augment_pipe = None
203
- ada_stats = None
204
- if (augment_kwargs is not None) and (augment_p > 0 or ada_target is not None):
205
- augment_pipe = dnnlib.util.construct_class_by_name(
206
- **augment_kwargs).train().requires_grad_(False).to(device) # subclass of torch.nn.Module
207
- augment_pipe.p.copy_(torch.as_tensor(augment_p))
208
- if ada_target is not None:
209
- ada_stats = training_stats.Collector(regex='Loss/signs/real')
210
-
211
- # Distribute across GPUs.
212
- if rank == 0:
213
- print(f'Distributing across {num_gpus} GPUs...')
214
- for module in [G, D, G_ema, augment_pipe]:
215
- if module is not None and num_gpus > 1:
216
- for param in misc.params_and_buffers(module):
217
- torch.distributed.broadcast(param, src=0)
218
-
219
- # Setup training phases.
220
- if rank == 0:
221
- print('Setting up training phases...')
222
- loss = dnnlib.util.construct_class_by_name(
223
- device=device, G=G, D=D, augment_pipe=augment_pipe, **loss_kwargs) # subclass of training.loss.Loss
224
- phases = []
225
- for name, module, opt_kwargs, reg_interval in [('G', G, G_opt_kwargs, G_reg_interval), ('D', D, D_opt_kwargs, D_reg_interval)]:
226
- if reg_interval is None:
227
- opt = dnnlib.util.construct_class_by_name(
228
- params=module.parameters(), **opt_kwargs) # subclass of torch.optim.Optimizer
229
- phases += [dnnlib.EasyDict(name=name+'both',
230
- module=module, opt=opt, interval=1)]
231
- else: # Lazy regularization.
232
- mb_ratio = reg_interval / (reg_interval + 1)
233
- opt_kwargs = dnnlib.EasyDict(opt_kwargs)
234
- opt_kwargs.lr = opt_kwargs.lr * mb_ratio
235
- opt_kwargs.betas = [beta ** mb_ratio for beta in opt_kwargs.betas]
236
- opt = dnnlib.util.construct_class_by_name(
237
- module.parameters(), **opt_kwargs) # subclass of torch.optim.Optimizer
238
- phases += [dnnlib.EasyDict(name=name+'main',
239
- module=module, opt=opt, interval=1)]
240
- phases += [dnnlib.EasyDict(name=name+'reg',
241
- module=module, opt=opt, interval=reg_interval)]
242
- for phase in phases:
243
- phase.start_event = None
244
- phase.end_event = None
245
- if rank == 0:
246
- phase.start_event = torch.cuda.Event(enable_timing=True)
247
- phase.end_event = torch.cuda.Event(enable_timing=True)
248
-
249
- # Export sample images.
250
- grid_size = None
251
- grid_z = None
252
- grid_c = None
253
- if rank == 0:
254
- print('Exporting sample images...')
255
- grid_size, images, labels = setup_snapshot_image_grid(
256
- training_set=training_set)
257
- save_image_grid(images, os.path.join(run_dir, 'reals.png'),
258
- drange=[0, 255], grid_size=grid_size)
259
- grid_z = torch.randn([labels.shape[0], G.z_dim],
260
- device=device).split(batch_gpu)
261
- grid_c = torch.from_numpy(labels).to(device).split(batch_gpu)
262
- images = torch.cat([G_ema(z=z, c=c, noise_mode='const').cpu()
263
- for z, c in zip(grid_z, grid_c)]).numpy()
264
- save_image_grid(images, os.path.join(
265
- run_dir, 'fakes_init.png'), drange=[-1, 1], grid_size=grid_size)
266
-
267
- # Initialize logs.
268
- if rank == 0:
269
- print('Initializing logs...')
270
- stats_collector = training_stats.Collector(regex='.*')
271
- stats_metrics = dict()
272
- stats_jsonl = None
273
- stats_tfevents = None
274
- if rank == 0:
275
- stats_jsonl = open(os.path.join(run_dir, 'stats.jsonl'), 'wt')
276
- try:
277
- import torch.utils.tensorboard as tensorboard
278
- stats_tfevents = tensorboard.SummaryWriter(run_dir)
279
- except ImportError as err:
280
- print('Skipping tfevents export:', err)
281
-
282
- # Train.
283
- if rank == 0:
284
- print(f'Training for {total_kimg} kimg...')
285
- print()
286
- cur_nimg = resume_kimg * 1000
287
- cur_tick = 0
288
- tick_start_nimg = cur_nimg
289
- tick_start_time = time.time()
290
- maintenance_time = tick_start_time - start_time
291
- batch_idx = 0
292
- if progress_fn is not None:
293
- progress_fn(0, total_kimg)
294
- while True:
295
-
296
- # Fetch training data.
297
- with torch.autograd.profiler.record_function('data_fetch'):
298
- phase_real_img, phase_real_c = next(training_set_iterator)
299
- phase_real_img = (phase_real_img.to(device).to(
300
- torch.float32) / 127.5 - 1).split(batch_gpu)
301
- phase_real_c = phase_real_c.to(device).split(batch_gpu)
302
- all_gen_z = torch.randn(
303
- [len(phases) * batch_size, G.z_dim], device=device)
304
- all_gen_z = [phase_gen_z.split(
305
- batch_gpu) for phase_gen_z in all_gen_z.split(batch_size)]
306
- all_gen_c = [training_set.get_label(np.random.randint(
307
- len(training_set))) for _ in range(len(phases) * batch_size)]
308
- all_gen_c = torch.from_numpy(
309
- np.stack(all_gen_c)).pin_memory().to(device)
310
- all_gen_c = [phase_gen_c.split(
311
- batch_gpu) for phase_gen_c in all_gen_c.split(batch_size)]
312
-
313
- # Execute training phases.
314
- for phase, phase_gen_z, phase_gen_c in zip(phases, all_gen_z, all_gen_c):
315
- if batch_idx % phase.interval != 0:
316
- continue
317
- if phase.start_event is not None:
318
- phase.start_event.record(torch.cuda.current_stream(device))
319
-
320
- # Accumulate gradients.
321
- phase.opt.zero_grad(set_to_none=True)
322
- phase.module.requires_grad_(True)
323
- for real_img, real_c, gen_z, gen_c in zip(phase_real_img, phase_real_c, phase_gen_z, phase_gen_c):
324
- loss.accumulate_gradients(phase=phase.name, real_img=real_img, real_c=real_c,
325
- gen_z=gen_z, gen_c=gen_c, gain=phase.interval, cur_nimg=cur_nimg)
326
- phase.module.requires_grad_(False)
327
-
328
- # Update weights.
329
- with torch.autograd.profiler.record_function(phase.name + '_opt'):
330
- params = [param for param in phase.module.parameters()
331
- if param.grad is not None]
332
- if len(params) > 0:
333
- flat = torch.cat([param.grad.flatten()
334
- for param in params])
335
- if num_gpus > 1:
336
- torch.distributed.all_reduce(flat)
337
- flat /= num_gpus
338
- misc.nan_to_num(flat, nan=0, posinf=1e5,
339
- neginf=-1e5, out=flat)
340
- grads = flat.split([param.numel() for param in params])
341
- for param, grad in zip(params, grads):
342
- param.grad = grad.reshape(param.shape)
343
- phase.opt.step()
344
-
345
- # Phase done.
346
- if phase.end_event is not None:
347
- phase.end_event.record(torch.cuda.current_stream(device))
348
-
349
- # Update G_ema.
350
- with torch.autograd.profiler.record_function('Gema'):
351
- ema_nimg = ema_kimg * 1000
352
- if ema_rampup is not None:
353
- ema_nimg = min(ema_nimg, cur_nimg * ema_rampup)
354
- ema_beta = 0.5 ** (batch_size / max(ema_nimg, 1e-8))
355
- for p_ema, p in zip(G_ema.parameters(), G.parameters()):
356
- p_ema.copy_(p.lerp(p_ema, ema_beta))
357
- for b_ema, b in zip(G_ema.buffers(), G.buffers()):
358
- b_ema.copy_(b)
359
-
360
- # Update state.
361
- cur_nimg += batch_size
362
- batch_idx += 1
363
-
364
- # Execute ADA heuristic.
365
- if (ada_stats is not None) and (batch_idx % ada_interval == 0):
366
- ada_stats.update()
367
- adjust = np.sign(ada_stats['Loss/signs/real'] - ada_target) * \
368
- (batch_size * ada_interval) / (ada_kimg * 1000)
369
- augment_pipe.p.copy_(
370
- (augment_pipe.p + adjust).max(misc.constant(0, device=device)))
371
-
372
- # Perform maintenance tasks once per tick.
373
- done = (cur_nimg >= total_kimg * 1000)
374
- if (not done) and (cur_tick != 0) and (cur_nimg < tick_start_nimg + kimg_per_tick * 1000):
375
- continue
376
-
377
- # Print status line, accumulating the same information in training_stats.
378
- tick_end_time = time.time()
379
- fields = []
380
- fields += [
381
- f"tick {training_stats.report0('Progress/tick', cur_tick):<5d}"]
382
- fields += [
383
- f"kimg {training_stats.report0('Progress/kimg', cur_nimg / 1e3):<8.1f}"]
384
- fields += [
385
- f"time {dnnlib.util.format_time(training_stats.report0('Timing/total_sec', tick_end_time - start_time)):<12s}"]
386
- fields += [
387
- f"sec/tick {training_stats.report0('Timing/sec_per_tick', tick_end_time - tick_start_time):<7.1f}"]
388
- fields += [
389
- f"sec/kimg {training_stats.report0('Timing/sec_per_kimg', (tick_end_time - tick_start_time) / (cur_nimg - tick_start_nimg) * 1e3):<7.2f}"]
390
- fields += [
391
- f"maintenance {training_stats.report0('Timing/maintenance_sec', maintenance_time):<6.1f}"]
392
- fields += [
393
- f"cpumem {training_stats.report0('Resources/cpu_mem_gb', psutil.Process(os.getpid()).memory_info().rss / 2**30):<6.2f}"]
394
- fields += [
395
- f"gpumem {training_stats.report0('Resources/peak_gpu_mem_gb', torch.cuda.max_memory_allocated(device) / 2**30):<6.2f}"]
396
- fields += [
397
- f"reserved {training_stats.report0('Resources/peak_gpu_mem_reserved_gb', torch.cuda.max_memory_reserved(device) / 2**30):<6.2f}"]
398
- torch.cuda.reset_peak_memory_stats()
399
- fields += [
400
- f"augment {training_stats.report0('Progress/augment', float(augment_pipe.p.cpu()) if augment_pipe is not None else 0):.3f}"]
401
- training_stats.report0('Timing/total_hours',
402
- (tick_end_time - start_time) / (60 * 60))
403
- training_stats.report0('Timing/total_days',
404
- (tick_end_time - start_time) / (24 * 60 * 60))
405
- if rank == 0:
406
- print(' '.join(fields))
407
-
408
- # Check for abort.
409
- if (not done) and (abort_fn is not None) and abort_fn():
410
- done = True
411
- if rank == 0:
412
- print()
413
- print('Aborting...')
414
-
415
- # Save image snapshot.
416
- if (rank == 0) and (image_snapshot_ticks is not None) and (done or cur_tick % image_snapshot_ticks == 0):
417
- images = torch.cat([G_ema(z=z, c=c, noise_mode='const').cpu()
418
- for z, c in zip(grid_z, grid_c)]).numpy()
419
- save_image_grid(images, os.path.join(
420
- run_dir, f'fakes{cur_nimg//1000:06d}.png'), drange=[-1, 1], grid_size=grid_size)
421
-
422
- # Save network snapshot.
423
- snapshot_pkl = None
424
- snapshot_data = None
425
- if (network_snapshot_ticks is not None) and (done or cur_tick % network_snapshot_ticks == 0):
426
- snapshot_data = dict(G=G, D=D, G_ema=G_ema, augment_pipe=augment_pipe,
427
- training_set_kwargs=dict(training_set_kwargs))
428
- for key, value in snapshot_data.items():
429
- if isinstance(value, torch.nn.Module):
430
- value = copy.deepcopy(value).eval().requires_grad_(False)
431
- if num_gpus > 1:
432
- misc.check_ddp_consistency(
433
- value, ignore_regex=r'.*\.[^.]+_(avg|ema)')
434
- for param in misc.params_and_buffers(value):
435
- torch.distributed.broadcast(param, src=0)
436
- snapshot_data[key] = value.cpu()
437
- del value # conserve memory
438
- snapshot_pkl = os.path.join(
439
- run_dir, f'network-snapshot-{cur_nimg//1000:06d}.pkl')
440
- if rank == 0:
441
- with open(snapshot_pkl, 'wb') as f:
442
- pickle.dump(snapshot_data, f)
443
-
444
- # Evaluate metrics.
445
- if (snapshot_data is not None) and (len(metrics) > 0):
446
- if rank == 0:
447
- print('Evaluating metrics...')
448
- for metric in metrics:
449
- result_dict = metric_main.calc_metric(metric=metric, G=snapshot_data['G_ema'],
450
- dataset_kwargs=training_set_kwargs, num_gpus=num_gpus, rank=rank, device=device)
451
- if rank == 0:
452
- metric_main.report_metric(
453
- result_dict, run_dir=run_dir, snapshot_pkl=snapshot_pkl)
454
- stats_metrics.update(result_dict.results)
455
- del snapshot_data # conserve memory
456
-
457
- # Collect statistics.
458
- for phase in phases:
459
- value = []
460
- if (phase.start_event is not None) and (phase.end_event is not None):
461
- phase.end_event.synchronize()
462
- value = phase.start_event.elapsed_time(phase.end_event)
463
- training_stats.report0('Timing/' + phase.name, value)
464
- stats_collector.update()
465
- stats_dict = stats_collector.as_dict()
466
-
467
- # Update logs.
468
- timestamp = time.time()
469
- if stats_jsonl is not None:
470
- fields = dict(stats_dict, timestamp=timestamp)
471
- stats_jsonl.write(json.dumps(fields) + '\n')
472
- stats_jsonl.flush()
473
- if stats_tfevents is not None:
474
- global_step = int(cur_nimg / 1e3)
475
- walltime = timestamp - start_time
476
- for name, value in stats_dict.items():
477
- stats_tfevents.add_scalar(
478
- name, value.mean, global_step=global_step, walltime=walltime)
479
- for name, value in stats_metrics.items():
480
- stats_tfevents.add_scalar(
481
- f'Metrics/{name}', value, global_step=global_step, walltime=walltime)
482
- stats_tfevents.flush()
483
- if progress_fn is not None:
484
- progress_fn(cur_nimg // 1000, total_kimg)
485
-
486
- # Update state.
487
- cur_tick += 1
488
- tick_start_nimg = cur_nimg
489
- tick_start_time = time.time()
490
- maintenance_time = tick_start_time - tick_end_time
491
- if done:
492
- break
493
-
494
- # Done.
495
- if rank == 0:
496
- print()
497
- print('Exiting...')
498
-
499
- # ----------------------------------------------------------------------------
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/DragGan/DragGan/stylegan_human/training_scripts/sg2/train.py DELETED
@@ -1,560 +0,0 @@
1
- # Copyright (c) SenseTime Research. All rights reserved.
2
-
3
- # Copyright (c) 2021, NVIDIA CORPORATION. All rights reserved.
4
- #
5
- # NVIDIA CORPORATION and its licensors retain all intellectual property
6
- # and proprietary rights in and to this software, related documentation
7
- # and any modifications thereto. Any use, reproduction, disclosure or
8
- # distribution of this software and related documentation without an express
9
- # license agreement from NVIDIA CORPORATION is strictly prohibited.
10
-
11
- """Train a GAN using the techniques described in the paper
12
- "Training Generative Adversarial Networks with Limited Data"."""
13
-
14
- import os
15
- import click
16
- import re
17
- import json
18
- import tempfile
19
- import torch
20
- import dnnlib
21
-
22
- import ast
23
- from training import training_loop
24
- from metrics import metric_main
25
- from torch_utils import training_stats
26
- from torch_utils import custom_ops
27
-
28
- #----------------------------------------------------------------------------
29
-
30
- class UserError(Exception):
31
- pass
32
-
33
- #----------------------------------------------------------------------------
34
-
35
- def setup_training_loop_kwargs(
36
- # General options (not included in desc).
37
- gpus = None, # Number of GPUs: <int>, default = 1 gpu
38
- snap = None, # Snapshot interval: <int>, default = 50 ticks
39
- metrics = None, # List of metric names: [], ['fid50k_full'] (default), ...
40
- seed = None, # Random seed: <int>, default = 0
41
-
42
- # Dataset.
43
- data = None, # Training dataset (required): <path>
44
- cond = None, # Train conditional model based on dataset labels: <bool>, default = False
45
- subset = None, # Train with only N images: <int>, default = all
46
- mirror = None, # Augment dataset with x-flips: <bool>, default = False
47
- square = None,
48
-
49
- # Base config.
50
- cfg = None, # Base config: 'auto' (default), 'stylegan2', 'paper256', 'paper512', 'paper1024', 'cifar', 'shhq'
51
- gamma = None, # Override R1 gamma: <float>
52
- kimg = None, # Override training duration: <int>
53
- batch = None, # Override batch size: <int>
54
-
55
- # Discriminator augmentation.
56
- aug = None, # Augmentation mode: 'ada' (default), 'noaug', 'fixed'
57
- p = None, # Specify p for 'fixed' (required): <float>
58
- target = None, # Override ADA target for 'ada': <float>, default = depends on aug
59
- augpipe = None, # Augmentation pipeline: 'blit', 'geom', 'color', 'filter', 'noise', 'cutout', 'bg', 'bgc' (default), ..., 'bgcfnc'
60
-
61
- # Transfer learning.
62
- resume = None, # Load previous network: 'noresume' (default), 'ffhq256', 'ffhq512', 'ffhq1024', 'celebahq256', 'lsundog256', <file>, <url>
63
- freezed = None, # Freeze-D: <int>, default = 0 discriminator layers
64
-
65
- # Performance options (not included in desc).
66
- fp32 = None, # Disable mixed-precision training: <bool>, default = False
67
- nhwc = None, # Use NHWC memory format with FP16: <bool>, default = False
68
- allow_tf32 = None, # Allow PyTorch to use TF32 for matmul and convolutions: <bool>, default = False
69
- nobench = None, # Disable cuDNN benchmarking: <bool>, default = False
70
- workers = None, # Override number of DataLoader workers: <int>, default = 3
71
-
72
- ):
73
- args = dnnlib.EasyDict()
74
-
75
- # ------------------------------------------
76
- # General options: gpus, snap, metrics, seed
77
- # ------------------------------------------
78
-
79
- if gpus is None:
80
- gpus = 1
81
- assert isinstance(gpus, int)
82
- if not (gpus >= 1 and gpus & (gpus - 1) == 0):
83
- raise UserError('--gpus must be a power of two')
84
- args.num_gpus = gpus
85
-
86
- if snap is None:
87
- snap = 50
88
- assert isinstance(snap, int)
89
- if snap < 1:
90
- raise UserError('--snap must be at least 1')
91
- args.image_snapshot_ticks = snap
92
- args.network_snapshot_ticks = snap
93
-
94
- if metrics is None:
95
- metrics = ['fid50k_full']
96
- assert isinstance(metrics, list)
97
- if not all(metric_main.is_valid_metric(metric) for metric in metrics):
98
- raise UserError('\n'.join(['--metrics can only contain the following values:'] + metric_main.list_valid_metrics()))
99
- args.metrics = metrics
100
-
101
- if seed is None:
102
- seed = 0
103
- assert isinstance(seed, int)
104
- args.random_seed = seed
105
-
106
- # -------------------------------------------
107
- # Dataset: data, cond, subset, mirror, square
108
- # -------------------------------------------
109
-
110
- print('square : ', square)
111
-
112
- assert data is not None
113
- assert isinstance(data, str)
114
-
115
- args.training_set_kwargs = dnnlib.EasyDict(class_name='training.dataset.ImageFolderDataset', path=data, use_labels=True, max_size=None, xflip=False, square=square)
116
- args.data_loader_kwargs = dnnlib.EasyDict(pin_memory=True, num_workers=3, prefetch_factor=2)
117
- try:
118
- training_set = dnnlib.util.construct_class_by_name(**args.training_set_kwargs) # subclass of training.dataset.Dataset
119
- args.training_set_kwargs.resolution = training_set.resolution # be explicit about resolution
120
- args.training_set_kwargs.use_labels = training_set.has_labels # be explicit about labels
121
- args.training_set_kwargs.max_size = len(training_set) # be explicit about dataset size
122
- desc = training_set.name
123
- print('desc: ', desc)
124
- del training_set # conserve memory
125
- except IOError as err:
126
- raise UserError(f'--data: {err}')
127
-
128
-
129
- if square: desc += '-square'
130
- else: desc += '-rectangle'
131
-
132
- if cond is None:
133
- cond = False
134
- assert isinstance(cond, bool)
135
- if cond:
136
- if not args.training_set_kwargs.use_labels:
137
- raise UserError('--cond=True requires labels specified in dataset.json')
138
- desc += '-cond'
139
- else:
140
- args.training_set_kwargs.use_labels = False
141
-
142
- if subset is not None:
143
- assert isinstance(subset, int)
144
- if not 1 <= subset <= args.training_set_kwargs.max_size:
145
- raise UserError(f'--subset must be between 1 and {args.training_set_kwargs.max_size}')
146
- desc += f'-subset{subset}'
147
- if subset < args.training_set_kwargs.max_size:
148
- args.training_set_kwargs.max_size = subset
149
- args.training_set_kwargs.random_seed = args.random_seed
150
-
151
- if mirror is None:
152
- mirror = False
153
- assert isinstance(mirror, bool)
154
- if mirror:
155
- desc += '-mirror'
156
- args.training_set_kwargs.xflip = True
157
-
158
- # ------------------------------------
159
- # Base config: cfg, gamma, kimg, batch
160
- # ------------------------------------
161
-
162
- if cfg is None:
163
- cfg = 'auto'
164
- assert isinstance(cfg, str)
165
- desc += f'-{cfg}'
166
-
167
- cfg_specs = {
168
- 'auto': dict(ref_gpus=-1, kimg=25000, mb=-1, mbstd=-1, fmaps=-1, lrate=-1, gamma=-1, ema=-1, ramp=0.05, map=2),
169
- 'shhq': dict(ref_gpus=-1, kimg=25000, mb=-1, mbstd=-1, fmaps=-1, lrate=-1, gamma=-1, ema=-1, ramp=0.05, map=8), # Populated dynamically based on resolution and GPU count.
170
- 'stylegan2': dict(ref_gpus=8, kimg=25000, mb=32, mbstd=4, fmaps=1, lrate=0.002, gamma=10, ema=10, ramp=None, map=8), # Uses mixed-precision, unlike the original StyleGAN2.
171
- 'paper256': dict(ref_gpus=8, kimg=25000, mb=64, mbstd=8, fmaps=0.5, lrate=0.0025, gamma=1, ema=20, ramp=None, map=8),
172
- 'paper512': dict(ref_gpus=8, kimg=25000, mb=64, mbstd=8, fmaps=1, lrate=0.0025, gamma=0.5, ema=20, ramp=None, map=8),
173
- 'paper1024': dict(ref_gpus=8, kimg=25000, mb=32, mbstd=4, fmaps=1, lrate=0.002, gamma=2, ema=10, ramp=None, map=8),
174
- 'cifar': dict(ref_gpus=2, kimg=100000, mb=64, mbstd=32, fmaps=1, lrate=0.0025, gamma=0.01, ema=500, ramp=0.05, map=2),
175
- }
176
-
177
- assert cfg in cfg_specs
178
- spec = dnnlib.EasyDict(cfg_specs[cfg])
179
- if cfg == 'auto' or cfg == 'shhq':
180
- desc += f'{gpus:d}'
181
- spec.ref_gpus = gpus
182
- res = args.training_set_kwargs.resolution
183
- spec.mb = max(min(gpus * min(4096 // res, 32), 64), gpus) # keep gpu memory consumption at bay
184
- spec.mbstd = min(spec.mb // gpus, 4) # other hyperparams behave more predictably if mbstd group size remains fixed
185
- spec.fmaps = 1 if res >= 512 else 0.5
186
- spec.lrate = 0.002 if res >= 1024 else 0.0025
187
- spec.gamma = 0.0002 * (res ** 2) / spec.mb # heuristic formula
188
- spec.ema = spec.mb * 10 / 32
189
-
190
- args.G_kwargs = dnnlib.EasyDict(class_name='training.networks.Generator', z_dim=512, w_dim=512, mapping_kwargs=dnnlib.EasyDict(), synthesis_kwargs=dnnlib.EasyDict(),square=square)
191
- args.D_kwargs = dnnlib.EasyDict(class_name='training.networks.Discriminator', block_kwargs=dnnlib.EasyDict(), mapping_kwargs=dnnlib.EasyDict(), epilogue_kwargs=dnnlib.EasyDict(),square=square)
192
- args.G_kwargs.synthesis_kwargs.channel_base = args.D_kwargs.channel_base = int(spec.fmaps * 32768)
193
- args.G_kwargs.synthesis_kwargs.channel_max = args.D_kwargs.channel_max = 512
194
- args.G_kwargs.mapping_kwargs.num_layers = spec.map
195
- args.G_kwargs.synthesis_kwargs.num_fp16_res = args.D_kwargs.num_fp16_res = 4 # enable mixed-precision training
196
- args.G_kwargs.synthesis_kwargs.conv_clamp = args.D_kwargs.conv_clamp = 256 # clamp activations to avoid float16 overflow
197
- args.D_kwargs.epilogue_kwargs.mbstd_group_size = spec.mbstd
198
-
199
- args.G_opt_kwargs = dnnlib.EasyDict(class_name='torch.optim.Adam', lr=spec.lrate, betas=[0,0.99], eps=1e-8)
200
- args.D_opt_kwargs = dnnlib.EasyDict(class_name='torch.optim.Adam', lr=spec.lrate, betas=[0,0.99], eps=1e-8)
201
- args.loss_kwargs = dnnlib.EasyDict(class_name='training.loss.StyleGAN2Loss', r1_gamma=spec.gamma)
202
-
203
- args.total_kimg = spec.kimg
204
- args.batch_size = spec.mb
205
- args.batch_gpu = spec.mb // spec.ref_gpus
206
- args.ema_kimg = spec.ema
207
- args.ema_rampup = spec.ramp
208
-
209
- if cfg == 'cifar':
210
- args.loss_kwargs.pl_weight = 0 # disable path length regularization
211
- args.loss_kwargs.style_mixing_prob = 0 # disable style mixing
212
- args.D_kwargs.architecture = 'orig' # disable residual skip connections
213
-
214
- if gamma is not None:
215
- assert isinstance(gamma, float)
216
- if not gamma >= 0:
217
- raise UserError('--gamma must be non-negative')
218
- desc += f'-gamma{gamma:g}'
219
- args.loss_kwargs.r1_gamma = gamma
220
-
221
- if kimg is not None:
222
- assert isinstance(kimg, int)
223
- if not kimg >= 1:
224
- raise UserError('--kimg must be at least 1')
225
- desc += f'-kimg{kimg:d}'
226
- args.total_kimg = kimg
227
-
228
- if batch is not None:
229
- assert isinstance(batch, int)
230
- if not (batch >= 1 and batch % gpus == 0):
231
- raise UserError('--batch must be at least 1 and divisible by --gpus')
232
- desc += f'-batch{batch}'
233
- args.batch_size = batch
234
- args.batch_gpu = batch // gpus
235
-
236
- # ---------------------------------------------------
237
- # Discriminator augmentation: aug, p, target, augpipe
238
- # ---------------------------------------------------
239
-
240
- if aug is None:
241
- aug = 'ada'
242
- else:
243
- assert isinstance(aug, str)
244
- desc += f'-{aug}'
245
-
246
- if aug == 'ada':
247
- args.ada_target = 0.6
248
-
249
- elif aug == 'noaug':
250
- pass
251
-
252
- elif aug == 'fixed':
253
- if p is None:
254
- raise UserError(f'--aug={aug} requires specifying --p')
255
-
256
- else:
257
- raise UserError(f'--aug={aug} not supported')
258
-
259
- if p is not None:
260
- assert isinstance(p, float)
261
- if aug != 'fixed':
262
- raise UserError('--p can only be specified with --aug=fixed')
263
- if not 0 <= p <= 1:
264
- raise UserError('--p must be between 0 and 1')
265
- desc += f'-p{p:g}'
266
- args.augment_p = p
267
-
268
- if target is not None:
269
- assert isinstance(target, float)
270
- if aug != 'ada':
271
- raise UserError('--target can only be specified with --aug=ada')
272
- if not 0 <= target <= 1:
273
- raise UserError('--target must be between 0 and 1')
274
- desc += f'-target{target:g}'
275
- args.ada_target = target
276
-
277
- assert augpipe is None or isinstance(augpipe, str)
278
- if augpipe is None:
279
- augpipe = 'bgc'
280
- else:
281
- if aug == 'noaug':
282
- raise UserError('--augpipe cannot be specified with --aug=noaug')
283
- desc += f'-{augpipe}'
284
-
285
- augpipe_specs = {
286
- 'blit': dict(xflip=1, rotate90=1, xint=1),
287
- 'geom': dict(scale=1, rotate=1, aniso=1, xfrac=1),
288
- 'color': dict(brightness=1, contrast=1, lumaflip=1, hue=1, saturation=1),
289
- 'filter': dict(imgfilter=1),
290
- 'noise': dict(noise=1),
291
- 'cutout': dict(cutout=1),
292
- 'bg': dict(xflip=1, rotate90=1, xint=1, scale=1, rotate=1, aniso=1, xfrac=1),
293
- 'bgc': dict(xflip=1, rotate90=1, xint=1, scale=1, rotate=1, aniso=1, xfrac=1, brightness=1, contrast=1, lumaflip=1, hue=1, saturation=1),
294
- 'bgcf': dict(xflip=1, rotate90=1, xint=1, scale=1, rotate=1, aniso=1, xfrac=1, brightness=1, contrast=1, lumaflip=1, hue=1, saturation=1, imgfilter=1),
295
- 'bgcfn': dict(xflip=1, rotate90=1, xint=1, scale=1, rotate=1, aniso=1, xfrac=1, brightness=1, contrast=1, lumaflip=1, hue=1, saturation=1, imgfilter=1, noise=1),
296
- 'bgcfnc': dict(xflip=1, rotate90=1, xint=1, scale=1, rotate=1, aniso=1, xfrac=1, brightness=1, contrast=1, lumaflip=1, hue=1, saturation=1, imgfilter=1, noise=1, cutout=1),
297
- 'body': dict(xflip=1, rotate90=0, xint=1, scale=1, rotate=0, aniso=1, xfrac=1, brightness=1, contrast=1, lumaflip=1, hue=1, saturation=1)
298
- }
299
-
300
- assert augpipe in augpipe_specs
301
- if aug != 'noaug':
302
- args.augment_kwargs = dnnlib.EasyDict(class_name='training.augment.AugmentPipe', **augpipe_specs[augpipe])
303
-
304
- # ----------------------------------
305
- # Transfer learning: resume, freezed
306
- # ----------------------------------
307
-
308
- resume_specs = {
309
- 'ffhq256': 'https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada-pytorch/pretrained/transfer-learning-source-nets/ffhq-res256-mirror-paper256-noaug.pkl',
310
- 'ffhq512': 'https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada-pytorch/pretrained/transfer-learning-source-nets/ffhq-res512-mirror-stylegan2-noaug.pkl',
311
- 'ffhq1024': 'https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada-pytorch/pretrained/transfer-learning-source-nets/ffhq-res1024-mirror-stylegan2-noaug.pkl',
312
- 'celebahq256': 'https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada-pytorch/pretrained/transfer-learning-source-nets/celebahq-res256-mirror-paper256-kimg100000-ada-target0.5.pkl',
313
- 'lsundog256': 'https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada-pytorch/pretrained/transfer-learning-source-nets/lsundog-res256-paper256-kimg100000-noaug.pkl',
314
- }
315
-
316
- assert resume is None or isinstance(resume, str)
317
- if resume is None:
318
- resume = 'noresume'
319
- elif resume == 'noresume':
320
- desc += '-noresume'
321
- elif resume in resume_specs:
322
- desc += f'-resume{resume}'
323
- args.resume_pkl = resume_specs[resume] # predefined url
324
- else:
325
- desc += '-resumecustom'
326
- args.resume_pkl = resume # custom path or url
327
-
328
- if resume != 'noresume':
329
- args.ada_kimg = 100 # make ADA react faster at the beginning
330
- args.ema_rampup = None # disable EMA rampup
331
-
332
- if freezed is not None:
333
- assert isinstance(freezed, int)
334
- if not freezed >= 0:
335
- raise UserError('--freezed must be non-negative')
336
- desc += f'-freezed{freezed:d}'
337
- args.D_kwargs.block_kwargs.freeze_layers = freezed
338
-
339
- # -------------------------------------------------
340
- # Performance options: fp32, nhwc, nobench, workers
341
- # -------------------------------------------------
342
-
343
- if fp32 is None:
344
- fp32 = False
345
- assert isinstance(fp32, bool)
346
- if fp32:
347
- args.G_kwargs.synthesis_kwargs.num_fp16_res = args.D_kwargs.num_fp16_res = 0
348
- args.G_kwargs.synthesis_kwargs.conv_clamp = args.D_kwargs.conv_clamp = None
349
-
350
- if nhwc is None:
351
- nhwc = False
352
- assert isinstance(nhwc, bool)
353
- if nhwc:
354
- args.G_kwargs.synthesis_kwargs.fp16_channels_last = args.D_kwargs.block_kwargs.fp16_channels_last = True
355
-
356
- if nobench is None:
357
- nobench = False
358
- assert isinstance(nobench, bool)
359
- if nobench:
360
- args.cudnn_benchmark = False
361
-
362
- if allow_tf32 is None:
363
- allow_tf32 = False
364
- assert isinstance(allow_tf32, bool)
365
- if allow_tf32:
366
- args.allow_tf32 = True
367
-
368
- if workers is not None:
369
- assert isinstance(workers, int)
370
- if not workers >= 1:
371
- raise UserError('--workers must be at least 1')
372
- args.data_loader_kwargs.num_workers = workers
373
-
374
- return desc, args
375
-
376
- #----------------------------------------------------------------------------
377
-
378
- def subprocess_fn(rank, args, temp_dir):
379
- dnnlib.util.Logger(file_name=os.path.join(args.run_dir, 'log.txt'), file_mode='a', should_flush=True)
380
-
381
- # Init torch.distributed.
382
- if args.num_gpus > 1:
383
- init_file = os.path.abspath(os.path.join(temp_dir, '.torch_distributed_init'))
384
- if os.name == 'nt':
385
- init_method = 'file:///' + init_file.replace('\\', '/')
386
- torch.distributed.init_process_group(backend='gloo', init_method=init_method, rank=rank, world_size=args.num_gpus)
387
- else:
388
- init_method = f'file://{init_file}'
389
- torch.distributed.init_process_group(backend='nccl', init_method=init_method, rank=rank, world_size=args.num_gpus)
390
-
391
- # Init torch_utils.
392
- sync_device = torch.device('cuda', rank) if args.num_gpus > 1 else None
393
- training_stats.init_multiprocessing(rank=rank, sync_device=sync_device)
394
- if rank != 0:
395
- custom_ops.verbosity = 'none'
396
-
397
-
398
- # Execute training loop.
399
- training_loop.training_loop(rank=rank, **args)
400
-
401
- #----------------------------------------------------------------------------
402
-
403
- class CommaSeparatedList(click.ParamType):
404
- name = 'list'
405
-
406
- def convert(self, value, param, ctx):
407
- _ = param, ctx
408
- if value is None or value.lower() == 'none' or value == '':
409
- return []
410
- return value.split(',')
411
-
412
- #----------------------------------------------------------------------------
413
-
414
- @click.command()
415
- @click.pass_context
416
-
417
- # General options.
418
- @click.option('--outdir', help='Where to save the results', required=True, metavar='DIR')
419
- @click.option('--gpus', help='Number of GPUs to use [default: 1]', type=int, metavar='INT')
420
- @click.option('--snap', help='Snapshot interval [default: 50 ticks]', type=int, metavar='INT')
421
- @click.option('--metrics', help='Comma-separated list or "none" [default: fid50k_full]', type=CommaSeparatedList())
422
- @click.option('--seed', help='Random seed [default: 0]', type=int, metavar='INT')
423
- @click.option('-n', '--dry-run', help='Print training options and exit', is_flag=True)
424
-
425
- # Dataset.
426
- @click.option('--data', help='Training data (directory or zip)', metavar='PATH', required=True)
427
- @click.option('--cond', help='Train conditional model based on dataset labels [default: false]', type=bool, metavar='BOOL')
428
- @click.option('--subset', help='Train with only N images [default: all]', type=int, metavar='INT')
429
- @click.option('--mirror', help='Enable dataset x-flips [default: false]', type=bool, metavar='BOOL')
430
- @click.option('--square', help='True for square, False for rectangle', type=bool, metavar='BOOL', default=False)
431
-
432
- # Base config.
433
- @click.option('--cfg', help='Base config [default: auto]', type=click.Choice(['auto', 'stylegan2', 'paper256', 'paper512', 'paper1024', 'cifar','shhq']))
434
- @click.option('--gamma', help='Override R1 gamma', type=float)
435
- @click.option('--kimg', help='Override training duration', type=int, metavar='INT')
436
- @click.option('--batch', help='Override batch size', type=int, metavar='INT')
437
-
438
- # Discriminator augmentation.
439
- @click.option('--aug', help='Augmentation mode [default: ada]', type=click.Choice(['noaug', 'ada', 'fixed']))
440
- @click.option('--p', help='Augmentation probability for --aug=fixed', type=float)
441
- @click.option('--target', help='ADA target value for --aug=ada', type=float)
442
- @click.option('--augpipe', help='Augmentation pipeline [default: bgc]', type=click.Choice(['blit', 'geom', 'color', 'filter', 'noise', 'cutout', 'bg', 'bgc', 'bgcf', 'bgcfn', 'bgcfnc', 'body']))
443
-
444
- # Transfer learning.
445
- @click.option('--resume', help='Resume training [default: noresume]', metavar='PKL')
446
- @click.option('--freezed', help='Freeze-D [default: 0 layers]', type=int, metavar='INT')
447
-
448
- # Performance options.
449
- @click.option('--fp32', help='Disable mixed-precision training', type=bool, metavar='BOOL')
450
- @click.option('--nhwc', help='Use NHWC memory format with FP16', type=bool, metavar='BOOL')
451
- @click.option('--nobench', help='Disable cuDNN benchmarking', type=bool, metavar='BOOL')
452
- @click.option('--allow-tf32', help='Allow PyTorch to use TF32 internally', type=bool, metavar='BOOL')
453
- @click.option('--workers', help='Override number of DataLoader workers', type=int, metavar='INT')
454
-
455
-
456
-
457
-
458
- def main(ctx, outdir, dry_run, **config_kwargs):
459
- """Train a GAN using the techniques described in the paper
460
- "Training Generative Adversarial Networks with Limited Data".
461
-
462
- Examples:
463
-
464
- \b
465
- # Train with custom dataset using 1 GPU.
466
- python train.py --outdir=~/training-runs --data=~/mydataset.zip --gpus=1
467
-
468
- \b
469
- # Train class-conditional CIFAR-10 using 2 GPUs.
470
- python train.py --outdir=~/training-runs --data=~/datasets/cifar10.zip \\
471
- --gpus=2 --cfg=cifar --cond=1
472
-
473
- \b
474
- # Transfer learn MetFaces from FFHQ using 4 GPUs.
475
- python train.py --outdir=~/training-runs --data=~/datasets/metfaces.zip \\
476
- --gpus=4 --cfg=paper1024 --mirror=1 --resume=ffhq1024 --snap=10
477
-
478
- \b
479
- # Reproduce original StyleGAN2 config F.
480
- python train.py --outdir=~/training-runs --data=~/datasets/ffhq.zip \\
481
- --gpus=8 --cfg=stylegan2 --mirror=1 --aug=noaug
482
-
483
- \b
484
- Base configs (--cfg):
485
- auto Automatically select reasonable defaults based on resolution
486
- and GPU count. Good starting point for new datasets.
487
- stylegan2 Reproduce results for StyleGAN2 config F at 1024x1024.
488
- paper256 Reproduce results for FFHQ and LSUN Cat at 256x256.
489
- paper512 Reproduce results for BreCaHAD and AFHQ at 512x512.
490
- paper1024 Reproduce results for MetFaces at 1024x1024.
491
- cifar Reproduce results for CIFAR-10 at 32x32.
492
-
493
- \b
494
- Transfer learning source networks (--resume):
495
- ffhq256 FFHQ trained at 256x256 resolution.
496
- ffhq512 FFHQ trained at 512x512 resolution.
497
- ffhq1024 FFHQ trained at 1024x1024 resolution.
498
- celebahq256 CelebA-HQ trained at 256x256 resolution.
499
- lsundog256 LSUN Dog trained at 256x256 resolution.
500
- <PATH or URL> Custom network pickle.
501
- """
502
- dnnlib.util.Logger(should_flush=True)
503
-
504
- # Setup training options.
505
- try:
506
- run_desc, args = setup_training_loop_kwargs(**config_kwargs)
507
- except UserError as err:
508
- ctx.fail(err)
509
-
510
- # Pick output directory.
511
- prev_run_dirs = []
512
- if os.path.isdir(outdir):
513
- prev_run_dirs = [x for x in os.listdir(outdir) if os.path.isdir(os.path.join(outdir, x))]
514
- prev_run_ids = [re.match(r'^\d+', x) for x in prev_run_dirs]
515
- prev_run_ids = [int(x.group()) for x in prev_run_ids if x is not None]
516
- cur_run_id = max(prev_run_ids, default=-1) + 1
517
- args.run_dir = os.path.join(outdir, f'{cur_run_id:05d}-{run_desc}')
518
- assert not os.path.exists(args.run_dir)
519
-
520
- # Print options.
521
- print()
522
- print('Training options:')
523
- print(json.dumps(args, indent=2))
524
- print()
525
- print(f'Output directory: {args.run_dir}')
526
- print(f'Training data: {args.training_set_kwargs.path}')
527
- print(f'Training duration: {args.total_kimg} kimg')
528
- print(f'Number of GPUs: {args.num_gpus}')
529
- print(f'Number of images: {args.training_set_kwargs.max_size}')
530
- print(f'Image resolution: {args.training_set_kwargs.resolution}')
531
- print(f'Conditional model: {args.training_set_kwargs.use_labels}')
532
- print(f'Dataset x-flips: {args.training_set_kwargs.xflip}')
533
- print()
534
-
535
- # Dry run?
536
- if dry_run:
537
- print('Dry run; exiting.')
538
- return
539
-
540
- # Create output directory.
541
- print('Creating output directory...')
542
- os.makedirs(args.run_dir, exist_ok=True)
543
- with open(os.path.join(args.run_dir, 'training_options.json'), 'wt') as f:
544
- json.dump(args, f, indent=2)
545
-
546
- # Launch processes.
547
- print('Launching processes...')
548
- torch.multiprocessing.set_start_method('spawn')
549
- with tempfile.TemporaryDirectory() as temp_dir:
550
- if args.num_gpus == 1:
551
- subprocess_fn(rank=0, args=args, temp_dir=temp_dir)
552
- else:
553
- torch.multiprocessing.spawn(fn=subprocess_fn, args=(args, temp_dir), nprocs=args.num_gpus)
554
-
555
- #----------------------------------------------------------------------------
556
-
557
- if __name__ == "__main__":
558
- main() # pylint: disable=no-value-for-parameter
559
-
560
- #----------------------------------------------------------------------------
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
spaces/ECCV2022/bytetrack/yolox/tracking_utils/evaluation.py DELETED
@@ -1,113 +0,0 @@
1
- import os
2
- import numpy as np
3
- import copy
4
- import motmetrics as mm
5
- mm.lap.default_solver = 'lap'
6
-
7
- from yolox.tracking_utils.io import read_results, unzip_objs
8
-
9
-
10
- class Evaluator(object):
11
-
12
- def __init__(self, data_root, seq_name, data_type):
13
- self.data_root = data_root
14
- self.seq_name = seq_name
15
- self.data_type = data_type
16
-
17
- self.load_annotations()
18
- self.reset_accumulator()
19
-
20
- def load_annotations(self):
21
- assert self.data_type == 'mot'
22
-
23
- gt_filename = os.path.join(self.data_root, self.seq_name, 'gt', 'gt.txt')
24
- self.gt_frame_dict = read_results(gt_filename, self.data_type, is_gt=True)
25
- self.gt_ignore_frame_dict = read_results(gt_filename, self.data_type, is_ignore=True)
26
-
27
- def reset_accumulator(self):
28
- self.acc = mm.MOTAccumulator(auto_id=True)
29
-
30
- def eval_frame(self, frame_id, trk_tlwhs, trk_ids, rtn_events=False):
31
- # results
32
- trk_tlwhs = np.copy(trk_tlwhs)
33
- trk_ids = np.copy(trk_ids)
34
-
35
- # gts
36
- gt_objs = self.gt_frame_dict.get(frame_id, [])
37
- gt_tlwhs, gt_ids = unzip_objs(gt_objs)[:2]
38
-
39
- # ignore boxes
40
- ignore_objs = self.gt_ignore_frame_dict.get(frame_id, [])
41
- ignore_tlwhs = unzip_objs(ignore_objs)[0]
42
-
43
- # remove ignored results
44
- keep = np.ones(len(trk_tlwhs), dtype=bool)
45
- iou_distance = mm.distances.iou_matrix(ignore_tlwhs, trk_tlwhs, max_iou=0.5)
46
- if len(iou_distance) > 0:
47
- match_is, match_js = mm.lap.linear_sum_assignment(iou_distance)
48
- match_is, match_js = map(lambda a: np.asarray(a, dtype=int), [match_is, match_js])
49
- match_ious = iou_distance[match_is, match_js]
50
-
51
- match_js = np.asarray(match_js, dtype=int)
52
- match_js = match_js[np.logical_not(np.isnan(match_ious))]
53
- keep[match_js] = False
54
- trk_tlwhs = trk_tlwhs[keep]
55
- trk_ids = trk_ids[keep]
56
- #match_is, match_js = mm.lap.linear_sum_assignment(iou_distance)
57
- #match_is, match_js = map(lambda a: np.asarray(a, dtype=int), [match_is, match_js])
58
- #match_ious = iou_distance[match_is, match_js]
59
-
60
- #match_js = np.asarray(match_js, dtype=int)
61
- #match_js = match_js[np.logical_not(np.isnan(match_ious))]
62
- #keep[match_js] = False
63
- #trk_tlwhs = trk_tlwhs[keep]
64
- #trk_ids = trk_ids[keep]
65
-
66
- # get distance matrix
67
- iou_distance = mm.distances.iou_matrix(gt_tlwhs, trk_tlwhs, max_iou=0.5)
68
-
69
- # acc
70
- self.acc.update(gt_ids, trk_ids, iou_distance)
71
-
72
- if rtn_events and iou_distance.size > 0 and hasattr(self.acc, 'last_mot_events'):
73
- events = self.acc.last_mot_events # only supported by https://github.com/longcw/py-motmetrics
74
- else:
75
- events = None
76
- return events
77
-
78
- def eval_file(self, filename):
79
- self.reset_accumulator()
80
-
81
- result_frame_dict = read_results(filename, self.data_type, is_gt=False)
82
- #frames = sorted(list(set(self.gt_frame_dict.keys()) | set(result_frame_dict.keys())))
83
- frames = sorted(list(set(result_frame_dict.keys())))
84
- for frame_id in frames:
85
- trk_objs = result_frame_dict.get(frame_id, [])
86
- trk_tlwhs, trk_ids = unzip_objs(trk_objs)[:2]
87
- self.eval_frame(frame_id, trk_tlwhs, trk_ids, rtn_events=False)
88
-
89
- return self.acc
90
-
91
- @staticmethod
92
- def get_summary(accs, names, metrics=('mota', 'num_switches', 'idp', 'idr', 'idf1', 'precision', 'recall')):
93
- names = copy.deepcopy(names)
94
- if metrics is None:
95
- metrics = mm.metrics.motchallenge_metrics
96
- metrics = copy.deepcopy(metrics)
97
-
98
- mh = mm.metrics.create()
99
- summary = mh.compute_many(
100
- accs,
101
- metrics=metrics,
102
- names=names,
103
- generate_overall=True
104
- )
105
-
106
- return summary
107
-
108
- @staticmethod
109
- def save_summary(summary, filename):
110
- import pandas as pd
111
- writer = pd.ExcelWriter(filename)
112
- summary.to_excel(writer)
113
- writer.save()