diff --git a/names.txt b/names.txt deleted file mode 100644 index 276a2c6747d1b409c8111798509dce23e564c4c2..0000000000000000000000000000000000000000 --- a/names.txt +++ /dev/null @@ -1,19913 +0,0 @@ -stabilityai/stable-diffusion -HuggingFaceH4/open_llm_leaderboard -dalle-mini/dalle-mini -facebook/MusicGen -jbilcke-hf/ai-comic-factory -AP123/IllusionDiffusion -pharmapsychotic/CLIP-Interrogator -microsoft/HuggingGPT -Gustavosta/MagicPrompt-Stable-Diffusion -camenduru-com/webui -DeepFloyd/IF -sanchit-gandhi/whisper-jax -suno/bark -ysharma/ChatGPT4 -mteb/leaderboard -damo-vilab/modelscope-text-to-video-synthesis -huggingface-projects/QR-code-AI-art-generator -CompVis/stable-diffusion-license -timbrooks/instruct-pix2pix -ysharma/Explore_llamav2_with_TGI -akhaliq/AnimeGANv2 -togethercomputer/OpenChatKit -anzorq/finetuned_diffusion -openai/whisper -fffiloni/img-to-music -sczhou/CodeFormer -hysts/ControlNet -DragGan/DragGan -fffiloni/CLIP-Interrogator-2 -huggingface-projects/diffuse-the-rest -tiiuae/falcon-180b-demo -JohnSmith9982/ChuanhuChatGPT -hysts/ControlNet-v1-1 -Vision-CAIR/minigpt4 -Logspace/Langflow -lnyan/stablediffusion-infinity -facebook/seamless_m4t -huggingchat/chat-ui -google/sdxl -HuggingFaceH4/starchat-playground -merve/ChatGPT-prompt-generator -microsoft/visual_chatgpt -fffiloni/zeroscope -akhaliq/ArcaneGAN -coqui/xtts -haoheliu/audioldm-text-to-audio-generation -lambdalabs/image-mixer-demo -vinthony/SadTalker -runwayml/stable-diffusion-v1-5 -HuggingFaceH4/zephyr-chat -PKUWilliamYang/VToonify -Xintao/GFPGAN -fffiloni/Image-to-Story -sd-concepts-library/stable-diffusion-conceptualizer -Salesforce/BLIP2 -HuggingFaceH4/falcon-chat -prodia/fast-stable-diffusion -PaddlePaddle/ERNIE-ViLG -zomehwh/vits-models -CarperAI/StableVicuna -camenduru-com/webui-docker -THUDM/GLM-130B -CVPR/Image-Animation-using-Thin-Plate-Spline-Motion-Model -multimodalart/LoraTheExplorer -multimodalart/latentdiffusion -skytnt/moe-tts -openai/point-e -uwnlp/guanaco-playground-tgi -CVPR/ml-talking-face -darkstorm2150/Stable-Diffusion-Protogen-x3.4-webui -tloen/alpaca-lora -multimodalart/dreambooth-training -runwayml/stable-diffusion-inpainting -lmsys/chatbot-arena-leaderboard -jbilcke-hf/AI-WebTV -huggingface-projects/diffusers-gallery -Xenova/whisper-web -Salesforce/BLIP -fffiloni/Pix2Pix-Video -Anonymous-sub/Rerender -nielsr/comparing-captioning-models -fffiloni/ControlNet-Video -jeffistyping/Youtube-Whisperer -BlinkDL/RWKV-World-7B -hysts/Shap-E -Sanster/Lama-Cleaner-lama -Yuliang/ICON -kakaobrain/karlo -elevenlabs/tts -vumichien/Whisper_speaker_diarization -BilalSardar/Voice-Cloning -lambdalabs/stable-diffusion-image-variations -akhaliq/GFPGAN -shi-labs/OneFormer -daspartho/prompt-extend -BlinkDL/ChatRWKV-gradio -shi-labs/Versatile-Diffusion -ysharma/OpenAI_TTS_New -Plachta/VITS-Umamusume-voice-synthesizer -project-baize/chat-with-baize -shariqfarooq/ZoeDepth -felixrosberg/face-swap -huggingface-projects/llama-2-13b-chat -bigcode/bigcode-playground -akhaliq/Real-ESRGAN -skytnt/anime-remove-background -warp-ai/Wuerstchen -huggingface-projects/stable-diffusion-multiplayer -HuggingFaceM4/idefics_playground -fffiloni/spectrogram-to-music -editing-images/ledits -ArtGAN/Diffusion-API -qingxu98/gpt-academic -marcop/musika -olivierdehaene/chat-llm-streaming -flax-community/dalle-mini -multimodalart/ChatGLM-6B -bigcode/bigcode-models-leaderboard -One-2-3-45/One-2-3-45 -huggingface-projects/llama-2-7b-chat -hf-accelerate/model-memory-usage -mosaicml/mpt-30b-chat -ydshieh/Kosmos-2 -zomehwh/vits-uma-genshin-honkai -ECCV2022/dis-background-removal -guoyww/AnimateDiff -dvruette/fabric -PAIR/Text2Video-Zero -hysts/ControlNet-with-Anything-v4 -Vokturz/can-it-run-llm -aadnk/whisper-webui -huggingface/bloom_demo -camenduru-com/one-shot-talking-face -doevent/prompt-generator -multimodalart/stable-diffusion-inpainting -AIGC-Audio/AudioGPT -ArkanDash/rvc-models-new -flamehaze1115/Wonder3D-demo -jiawei011/dreamgaussian -fffiloni/MS-Image2Video -adept/fuyu-8b-demo -hysts/SD-XL -fffiloni/Music-To-Image -cvlab/zero123-live -awacke1/Image-to-Line-Drawings -h2oai/h2ogpt-chatbot -Plachta/VALL-E-X -microsoft/Promptist -xinyu1205/recognize-anything -impira/docquery -ArtGAN/Video-Diffusion-WebUI -SteveDigital/free-fast-youtube-url-video-to-text-using-openai-whisper -sambanovasystems/BLOOMChat -doevent/Face-Real-ESRGAN -fffiloni/stable-diffusion-img2img -mandar100/chatbot_dialogpt -hakurei/waifu-diffusion-demo -lora-library/LoRA-DreamBooth-Training-UI -badayvedat/LLaVA -radames/stable-diffusion-depth2img -aliabid94/AutoGPT -ardha27/rvc-models -microsoft-cognitive-service/mm-react -codellama/codellama-13b-chat -haoheliu/audioldm2-text2audio-text2music -Manjushri/SDXL-1.0 -deepwisdom/MetaGPT -huggingface-projects/Deep-Reinforcement-Learning-Leaderboard -FaceOnLive/Face-Recognition-SDK -THUDM/CodeGeeX -nightfury/Image_Face_Upscale_Restoration-GFPGAN -akhaliq/Real-Time-Voice-Cloning -SemanticTypography/Word-As-Image -togethercomputer/GPT-JT -SpacesExamples/ComfyUI -trl-lib/stack-llama -jbilcke-hf/webapp-factory-wizardcoder -radames/dpt-depth-estimation-3d-obj -segmind/Segmind-Stable-Diffusion -tonyassi/face-swap -mattthew/SDXL-artists-browser -codeparrot/code-generation-models -huggingface-projects/magic-diffusion -ysharma/nougat -SimianLuo/Latent_Consistency_Model -akhaliq/demucs -VideoCrafter/VideoCrafter -prodia/sdxl-stable-diffusion-xl -Surn/UnlimitedMusicGen -diffusers/stable-diffusion-xl-inpainting -Matthijs/speecht5-tts-demo -optimum/llm-perf-leaderboard -An-619/FastSAM -Audio-AGI/AudioSep -ronvolutional/ai-pokemon-card -hwchase17/chat-langchain -songweig/rich-text-to-image -ai-forever/Kandinsky2.1 -mfidabel/controlnet-segment-anything -fffiloni/instant-TTS-Bark-cloning -darkstorm2150/protogen-web-ui -zomehwh/sovits-models -kevinwang676/Bark-with-Voice-Cloning -mms-meta/MMS -TencentARC/T2I-Adapter-SDXL -Voicemod/Text-to-Sing -TempoFunk/makeavid-sd-jax -EleutherAI/VQGAN_CLIP -hysts/DeepDanbooru -radames/Real-Time-Latent-Consistency-Model -phenomenon1981/DreamlikeArt-PhotoReal-2.0 -Audio-AGI/WavJourney -TencentARC/T2I-Adapter-SDXL-Sketch -ai-guru/composer -autoevaluate/model-evaluator -yizhangliu/Grounded-Segment-Anything -chansung/zero2story -FaceOnLive/ID-Document-Recognition-SDK -Adapter/T2I-Adapter -wangrongsheng/ChatPaper -hf4all/bingo -MAGAer13/mPLUG-Owl -xdecoder/Instruct-X-Decoder -codellama/codellama-playground -AP123/Upside-Down-Diffusion -akhaliq/JoJoGAN -bigcode/santacoder-demo -mike-ravkine/can-ai-code-results -pytorch/MiDaS -Open-Orca/Mistral-7B-OpenOrca -sudo-ai/zero123plus-demo-space -akhaliq/anything-v3.0 -DGSpitzer/TXT-2-IMG-2-MUSIC-2-VIDEO-w-RIFFUSION -coqui/CoquiTTS -jonigata/PoseMaker2 -hf-audio/open_asr_leaderboard -osanseviero/mistral-super-fast -chansung/co-write-with-llama2 -ThomasSimonini/Huggy -OFA-Sys/OFA-Image_Caption -ikechan8370/vits-uma-genshin-honkai -akhaliq/frame-interpolation -THUDM/CogVideo -Linaqruf/Animagine-XL -FaceOnLive/Face-Liveness-Detection-SDK -Rothfeld/stable-diffusion-mat-outpainting-primer -pharmapsychotic/sd-prism -multimodalart/mariogpt -carolineec/informativedrawings -fffiloni/SplitTrack2MusicGen -sanchit-gandhi/whisper-large-v2 -thomas-yanxin/LangChain-ChatLLM -upstage/open-ko-llm-leaderboard -CVPR/DualStyleGAN -NoCrypt/DeepDanbooru_string -bhaskartripathi/pdfChatter -weizmannscience/tokenflow -ysharma/Low-rank-Adaptation -VIPLab/Track-Anything -JingyeChen22/TextDiffuser -coqui/voice-chat-with-mistral -Gradio-Blocks/Story_and_Video_Generation -akiyamasho/AnimeBackgroundGAN -SmilingWolf/wd-v1-4-tags -fffiloni/VideoRetalking -Shuang59/Composable-Diffusion -osanseviero/i-like-flan -bookbot/Image-Upscaling-Playground -Curranj/Words_To_SQL -fffiloni/DragGAN -competitions/aiornot -weizmannscience/multidiffusion-region-based -jonigata/PoseMaker -NeuralInternet/Text-to-Video_Playground -openflamingo/OpenFlamingo -anzorq/chatgpt-demo -ngoctuanai/chatgptfree -gligen/demo -autoevaluate/leaderboards -anzorq/point-e_demo -abhishek/first-order-motion-model -internships/internships-2023 -nateraw/animegan-v2-for-videos -nielsr/dit-document-layout-analysis -huggingface-projects/wordalle -aadnk/faster-whisper-webui -h2oai/h2ogpt-chatbot2 -fffiloni/Image-to-MusicGen -yuntian-deng/ChatGPT -facebook/cotracker -EleutherAI/clip-guided-diffusion -keras-io/Enhance_Low_Light_Image -Gradio-Blocks/DualStyleGAN -yizhangliu/chatGPT -shikunl/prismer -PaddlePaddle/ERNIE-Layout -lmsys/chatbot-arena -akhaliq/lama -nielsr/text-based-inpainting -albarji/mixture-of-diffusers -BAAI/SegGPT -shgao/EditAnything -ArkanDash/rvc-models -nielsr/dpt-depth-estimation -chansung/llama2-with-gradio-chat -ml6team/controlnet-interior-design -laion/CoCa -seungheondoh/LP-Music-Caps-demo -artificialguybr/qwen-vl -ChenyangSi/FreeU -abhishek/StableSAM -facebook/ov-seg -xdecoder/SEEM -DAMO-NLP-SG/Video-LLaMA -flax-community/chef-transformer -tomg-group-umd/pez-dispenser -fffiloni/whisper-to-stable-diffusion -vllab/controlnet-hands -pszemraj/summarize-long-text -Lykon/DreamShaper-webui -kdrkdrkdr/ProsekaTTS -huggingface-projects/stable-diffusion-latent-upscaler -RamAnanth1/ControlNet -curt-park/segment-anything-with-clip -LinkSoul/Chinese-Llama-2-7b -radames/edit-video-by-editing-text -nyanko7/sd-diffusers-webui -georgefen/Face-Landmark-ControlNet -csuhan/LLaMA-Adapter -lykeven/visualglm-6b -fffiloni/prompt-converter -CikeyQI/QQsign -fffiloni/zeroscope-XL -vumichien/Generate_human_motion -RamAnanth1/Dolly-v2 -harmonai/dance-diffusion -vumichien/Lip_movement_reading -artificialguybr/video-dubbing -multimodalart/mindseye-lite -nupurkmr9/custom-diffusion -camenduru-com/converter -whitead/paper-qa -BAAI/AltDiffusion -nota-ai/compressed-stable-diffusion -ChallengeHub/Chinese-LangChain -sanchit-gandhi/musicgen-streaming -multimodalart/lora-roulette -hysts/BLIP2-with-transformers -Ekimetrics/climate-question-answering -Yntec/ToyWorld -hf-vision/object_detection_leaderboard -SkalskiP/SAM_and_MetaCLIP -ilumine-AI/Insta-3D -manhkhanhUIT/Image_Restoration_Colorization -facebook/incoder-demo -DEEMOSTECH/ChatAvatar -TencentARC/Caption-Anything -camel-ai/camel-agents -IDEA-CCNL/Taiyi-Stable-Diffusion-Chinese -22h/vintedois-diffusion-v0-1 -hackathon-pln-es/BioMedIA -safetensors/convert -deepset/retrieval-augmentation-svb -LinoyTsaban/edit_friendly_ddpm_inversion -katielink/biogpt-large-demo -fffiloni/image-to-sound-fx -tomofi/EasyOCR -aipicasso/cool-japan-diffusion-latest-demo -hysts/zeroscope-v2 -Matthijs/whisper_word_timestamps -radames/MusicGen-Continuation -mikeee/chatglm2-6b-4bit -sanchit-gandhi/whisper-jax-diarization -cocktailpeanut/AudioGen -radames/candle-segment-anything-wasm -Gradio-Blocks/neon-tts-plugin-coqui -deepdoctection/deepdoctection -gradio/theme-gallery -yuntian-deng/ChatGPT4 -Awiny/Image2Paragraph -MirageML/dreambooth -ThomasSimonini/Check-my-progress-Deep-RL-Course -weizmannscience/MultiDiffusion -diffusers/controlnet-openpose -Clebersla/RVC_V2_Huggingface_Version -mindee/doctr -nateraw/background-remover -skytnt/full-body-anime-gan -Pie31415/rome -RASMUS/Whisper-youtube-crosslingual-subtitles -IDEA-Research/Grounded-SAM -Deci/DeciLM-6b-instruct -aravinds1811/neural-style-transfer -balacoon/tts -xvjiarui/ODISE -radames/dpt-depth-estimation-3d-voxels -akhaliq/yolov7 -Manjushri/PhotoReal-V3.6 -bennyguo/threestudio -phenomenon1981/DreamlikeArt-Diffusion-1.0 -tetrisd/Diffusion-Attentive-Attribution-Maps -jbilcke-hf/VideoQuest -flax-community/image-captioning -society-ethics/about -SRDdev/Image-Caption -adirik/OWL-ViT -hf4h/biomedical-language-models -huggingface-projects/video-composer-gpt4 -mishig/jsonformer -huggingface-projects/repo_duplicator -doevent/dis-background-removal -Ella2323/Positive-Reframing -dwarkesh/whisper-speaker-recognition -patrickvonplaten/instruct-pix2pix -radames/PIFu-Clothed-Human-Digitization -zhigangjiang/3D-Room-Layout-Estimation_LGT-Net -OlaWod/FreeVC -segments/panoptic-segment-anything -zomehwh/rvc-models -mikonvergence/theaTRON -fffiloni/text-to-gif -simonduerr/ProteinMPNN -Matthijs/speecht5-vc-demo -ShilongLiu/Grounding_DINO_demo -shi-labs/Prompt-Free-Diffusion -fffiloni/zeroscope-img-to-video -mithril-security/blind_chat -ykilcher/apes -umm-maybe/AI-image-detector -innnky/nene-emotion -abhishek/dreambooth -Silentlin/DiffSinger -fffiloni/langchain-chat-with-pdf -huggingface/data-measurements-tool -ronvolutional/sd-spritesheets -Tune-A-Video-library/Tune-A-Video-Training-UI -TachibanaYoshino/AnimeGANv3 -AttendAndExcite/Attend-and-Excite -davila7/filegpt -chansung/LLM-As-Chatbot -Xenova/the-tokenizer-playground -r3gm/RVC_HF -hf-audio/whisper-large-v3 -akhaliq/SwinIR -kamiyamai/stable-diffusion-webui -Yuliang/ECON -tomg-group-umd/lm-watermarking -ShiwenNi/ChatReviewer -DreamSunny/stable-diffusion-webui-cpu -HuggingFaceM4/AI_Meme_Generator -prithivida/Gramformer -Hazzzardous/RWKV-Instruct -GMFTBY/PandaGPT -HuggingFaceH4/human_eval_llm_leaderboard -weizmannscience/text2live -sweetcocoa/pop2piano -deepset/should-i-follow -XCLiu/InstaFlow -facebook/Hokkien_Translation -Fantasy-Studio/Paint-by-Example -aipicasso/emi-latest-demo -competitions/ship-detection -InpaintAI/Inpaint-Anything -sentence-transformers/embeddings-semantic-search -havas79/Real-ESRGAN_Demo -taesiri/BLIP-2 -ysharma/ChatGPTwithAPI -brjathu/HMR2.0 -competitions/movie-genre-prediction -tonyassi/image-to-image-SDXL -PixArt-alpha/PixArt-alpha -lambdalabs/text-to-naruto -Deci/DeciDiffusion-v1-0 -naver-clova-ix/donut-base-finetuned-cord-v2 -ysharma/Talk_to_Multilingual_AI_WhisperBloomCoqui -modelscope/FaceChain -artificialguybr/qwen-14b-chat-demo -fffiloni/ProPainter -Xenova/distil-whisper-web -pyannote/pretrained-pipelines -huggingface/hf-speech-bench -THUDM/CogView2 -pszemraj/pdf-ocr -sophiamyang/Panel_PDF_QA -radames/whisper-word-level-trim -ysharma/InstructPix2Pix_Chatbot -GitMylo/bark-voice-cloning -lmz/candle-llama2 -dongsiqie/gptnb -PaddlePaddle/UIE-X -Mathux/TMR -deepseek-ai/deepseek-coder-33b-instruct -KenjieDec/RemBG -haotiz/glip-zeroshot-demo -bigcode/in-the-stack -kadirnar/yolov8 -vivien/clip -bigscience/bloom-book -DGSpitzer/DGS-Diffusion-Space -anzorq/sd-space-creator -jbilcke-hf/ai-clip-factory -Gradio-Blocks/Ask_Questions_To_YouTube_Videos -lambdalabs/text-to-pokemon -BatuhanYilmaz/Whisper-Auto-Subtitled-Video-Generator -maxmax20160403/vits_chinese -merle/PROTEIN_GENERATOR -OptimalScale/Robin-7b -LuChengTHU/dpmsolver_sdm -ybelkada/i-like-flan-ul2 -pcuenq/uncanny-faces -ArtGAN/Segment-Anything-Video -fffiloni/langchain-chat-with-pdf-openai -fffiloni/clone-voice-for-bark -FlowiseAI/Flowise -SpacesExamples/Fooocus -akhaliq/BlendGAN -nielsr/TrOCR-handwritten -YueMafighting/FollowYourPose -bguisard/stable-diffusion-nano -declare-lab/tango -justin-zk/Personalize-SAM -ThomasSimonini/SnowballFight -akhaliq/Music_Source_Separation -zama-fhe/encrypted_sentiment_analysis -nateraw/lavila -liuyuan-pal/SyncDreamer -hf-vision/nougat-transformers -valhalla/glide-text2im -hysts/Text2Human -nateraw/deepafx-st -ysharma/ChatGLM-6b_Gradio_Streaming -diffusers/controlnet-3d-pose -anzorq/hf-spaces-semantic-search -lmsys/mt-bench -Narrativaai/NLLB-Translator -doevent/Stable-Diffusion-prompt-generator -bigscience/promptsource -facebook/speech_matrix -openai/openai-detector -Intel/ldm3d -nielsr/LayoutLMv2-FUNSD -HarlanHong/DaGAN -aryadytm/remove-photo-object -nielsr/donut-docvqa -xdecoder/Demo -pritish/BookGPT -diffusers/controlnet-canny -NeuralInternet/BabyAGI -Dragonnext/Unicorn-proxy -radames/Real-Time-Latent-Consistency-Model-Text-To-Image -nateraw/yolov6 -huggingface-projects/color-palette-generator-sd -wpeebles/DiT -smangrul/peft-lora-sd-dreambooth -kadirnar/Tune-A-Video -coffeeee/nsfw-c0ffees-erotic-story-generator2 -sail/lorahub -Open-Orca/OpenOrca-Platypus2-13B -hackaprompt/playground -Monster/GPT4ALL -cncanon/locusts -fffiloni/Music-To-Zeroscope -Deci/DeciCoder-Demo -CompVis/text2img-latent-diffusion -huggingface/Model_Cards_Writing_Tool -pszemraj/document-summarization -zlc99/M4Singer -Kangarroar/ApplioRVC-Inference -nielsr/comparing-VQA-models -trysem/SD-2.1-Img2Img -Adapter/CoAdapter -owkin/substra -treadon/prompt-fungineer-355M -Vision-CAIR/MiniGPT-v2 -hackathon-pln-es/poem-generation-es -Pinwheel/GLIP-BLIP-Object-Detection-VQA -microsoft/GODEL-Demo -clem/Image_Face_Upscale_Restoration-GFPGAN -RamAnanth1/visual-chatGPT -DeepFloyd/deepfloyd-if-license -LinkSoul/LLaSM -CVPR/drawings-to-human -sayakpaul/cartoonizer-demo-onnx -mingyuan/MotionDiffuse -diffusers/sd-to-diffusers -kadirnar/diifusion-ad-template -ELITE-library/ELITE -PKUWilliamYang/StyleGANEX -ysharma/Zero123PlusDemo -PaddlePaddle/PaddleOCR -Alican/pixera -juancopi81/multilingual-stable-diffusion -Xhaheen/ChatGPT_HF -fishaudio/fish-diffusion -Salesforce/EDICT -DragGan/DragGan-Inversion -juancopi81/multitrack-midi-music-generator -yentinglin/Taiwan-LLaMa2 -anaxagoras7/gauravgs-text-summarizer -pcuenq/paella -anzorq/riffusion-demo -microsoft/ChatGPT-Robotics -ClueAI/ChatYuan-large-v2 -coreml-projects/transformers-to-coreml -zomehwh/vits-models-genshin-bh3 -ngthanhtinqn/Segment_Anything_With_OWL-ViT -akhaliq/PaintTransformer -akhaliq/VoiceFixer -prithivida/WhatTheFood -microsoft/document-image-transformer -hysts/list-of-demos -Warvito/diffusion_brain -teticio/audio-diffusion -akhaliq/Analog-Diffusion -Hello-SimpleAI/chatgpt-detector-single -Gladiaio/Audio-Transcription -jykoh/fromage -FrozenBurning/SceneDreamer -openaccess-ai-collective/rlhf-arena -Writer/instruct-palmyra-20b -PaddlePaddle/wav2lip -eugenesiow/remove-bg -huggingface/datasets-tagging -Gradio-Blocks/Codex_OpenAI -fcakyon/zero-shot-video-classification -fffiloni/gpt-talking-portrait -unity/ML-Agents-SoccerTwos -Tune-A-Video-library/Tune-A-Video-inference -vumichien/canvas_controlnet -CrucibleAI/ControlNetMediaPipeFaceSD21 -dylanebert/gaussian-viewer -fffiloni/coqui-bark-voice-cloning-docker -osanseviero/draw_to_search -juancopi81/whisper-demo-es-medium -riffusion/riffusion-playground -Algoworks/Image_Face_Upscale_Restoration-GFPGAN_pub -abyildirim/inst-inpaint -ioclab/brightness-controlnet -dhkim2810/MobileSAM -pycui/RealChar -jph00/pets -nickmuchi/semantic-search-with-retrieve-and-rerank -jjourney1125/swin2sr -Manjushri/SDXL-1.0-Img2Img-CPU -yizhangliu/Text-to-Image -thu-ml/unidiffuser -bigcode/bigcode-editor -OpenShape/openshape-demo -monra/freegpt-webui -Epoching/3D_Photo_Inpainting -akhaliq/DPT-Large -akhaliq/Pyxelate -deepklarity/poster2plot -eugenesiow/super-image -spacy/healthsea-demo -sxela/ArcaneGAN-video -hylee/White-box-Cartoonization -DucHaiten/webui -facebook/MaskCut -muhammadzain/AI_Resolution_Upscaler_And_Resizer -PulsarAI/huggingface-leaderboard -anton-l/rudall-e -microsoft/unispeech-speaker-verification -fffiloni/stable-diffusion-inpainting -simonduerr/diffdock -DianXian/Real-CUGAN -yangheng/Super-Resolution-Anime-Diffusion -hysts/LoRA-SD-training -camenduru-com/jupyter -Intel/Stable-Diffusion -rlancemartin/auto-evaluator -exbert-project/exbert -taesiri/ClaudeReadsArxiv -fffiloni/ControlVideo -kevinwang676/Personal-TTS -LinkSoul/AutoAgents -r3gm/AICoverGen -Norod78/Apocalyptify -akhaliq/CLIP_prefix_captioning -ml6team/Knowledge-graphs -EleutherAI/magma -multimodalart/rudalle -CVPR/MonoScene -Amrrs/openai-whisper-live-transcribe -fffiloni/imagic-stable-diffusion -merve/chatgpt-prompt-generator-v12 -JustinLin610/ImageBind_zeroshot_demo -kevinwang676/Voice-Changer -fffiloni/Image-Caption-2-Shap-E -TheStinger/Ilaria_RVC -nielsr/CLIPSeg -vumichien/Img_to_prompt -RamAnanth1/photoguard -giswqs/Streamlit -cbg342/GPT4-Unlimited-Plugins -nota-ai/compressed-wav2lip -RamAnanth1/InstructBLIP -radames/UserControllableLT-Latent-Transformer -monster-labs/Controlnet-QRCode-Monster-V1 -OFA-Sys/OFA-Visual_Grounding -keras-io/ocr-for-captcha -danielsapit/JPEG_Artifacts_Removal -ysharma/text-to-ner-to-image-to-video -society-ethics/DiffusionBiasExplorer -Pinwheel/SuperGlue-Image-Matching -megaaziib/hololive-rvc-models-v2 -WinterGYC/BaiChuan-13B-Chat -haoheliu/AudioLDM_48K_Text-to-HiFiAudio_Generation -ICCV2023/ICCV2023-papers -XzJosh/Azuma-Bert-VITS2 -ilumine-AI/Retro-to-3D -neuralmagic/sparse-mpt-7b-gsm8k -NATSpeech/DiffSpeech -microsoft/wavlm-speaker-verification -nickmuchi/article-text-summarizer -robinhad/ukrainian-tts -awacke1/Image-to-Multilingual-OCR -CVPR/Text2Human -anzorq/sd-to-diffusers -ysharma/Playground_AI_Exploration -hOTZR/new-Bing-with_your_cookies -wangrongsheng/ChatImprovement -fl399/deplot_plus_llm -Baptlem/UCDR-Net -Intel/Q8-Chat -qiantong-xu/toolbench-leaderboard -Xenova/text-to-speech-client -tonyassi/video-face-swap -Iker/Translate-100-languages -codeparrot/codeparrot-generation -CompVis/celeba-latent-diffusion -myscale/visual-dataset-explorer -bigscience-data/roots-search -whisper-event/whisper-demo -Intel/Stable-Diffusion-Side-by-Side -pszemraj/FLAN-grammar-correction -kadirnar/BioGpt -baulab/Erasing-Concepts-In-Diffusion -fffiloni/Video-Matting-Anything -zwq2018/Data-Copilot -mithril-security/TCO_calculator -hysts/daily-papers -fffiloni/train-dreambooth-lora-sdxl -Manmay/tortoise-tts -huggan/wikiart-diffusion-mini -k2-fsa/automatic-speech-recognition -kornia/Image-Stitching -JammyMachina/the-jam-machine-app -dreambooth-hackathon/leaderboard -dory111111/babyagi-streamlit -bkhmsi/Font-To-Sketch -SpacesExamples/nerfstudio -ought/raft-leaderboard -14-26AA/sovits_aishell3 -onnx/export -zama-fhe/encrypted_image_filtering -kazuk/image-to-video-film -TEXTurePaper/TEXTure -deprem-ml/deprem-ocr -chansung/LLaMA-7B -fffiloni/video2mmpose -shi-labs/Matting-Anything -GrandaddyShmax/AudioCraft_Plus -flax-community/code-clippy-problem-solver -sujitpal/clip-rsicd-demo -rendchevi/nix-tts -huggan/huggingnft -Gradio-Blocks/StyleGAN-NADA -CVPR/regionclip-demo -EuroPython2022/Step-By-Step-With-Bloom -JavaFXpert/GPT-3.5-Express-inator -Ryukijano/CatCon-One-Shot-Controlnet-SD-1-5-b2 -hirol/controlnetOverMask -kevinwang676/ChatGLM2-SadTalker-VC -fffiloni/DA-CLIP -Flux9665/SpeechCloning -radames/Depth-Image-to-Autostereogram -Gradio-Blocks/GPTJ6B_Poetry_LatentDiff_Illustration -impira/invoices -fffiloni/speech-to-image -OFA-Sys/OFA-OCR -huggingface/transformers-chat -ysharma/LangchainBot-space-creator -jyseo/3DFuse -jonjhiggins/MiDaS -runa91/bite_gradio -magicr/BuboGPT -LinkSoul/Chinese-LLaVa -competitions/wyze-rule-recommendation -openchat/openchat_3.5 -AILab-CVC/SEED-Bench_Leaderboard -Sharathhebbar24/One-stop-for-Open-source-models -distil-whisper/whisper-vs-distil-whisper -OFA-Sys/OFA-vqa -keras-io/Monocular-Depth-Estimation -hshr/DeepFilterNet2 -bigscience/license -rajistics/Financial_Analyst_AI -akhaliq/openjourney -fcakyon/video-classification -MirageML/point-e -keras-dreambooth/minecraft-landscape-demo -nateraw/voice-cloning -llamaindex/llama_agi_auto -maxmax20160403/sovits5.0 -litagin/rvc_okiba_TTS -gsaivinay/open_llm_leaderboard -showlab/Show-1 -Datatrooper/zero-shot-image-classification -mrm8488/FlappyBirds -Gradio-Blocks/HairCLIP -hysts/ViTPose_video -anakin87/fact-checking-rocks -ruslanmv/Clone-Your-Voice -SalML/TableTransformer2CSV -speechbox/whisper-speaker-diarization -joaogante/transformers_streaming -kevinwang676/Voice-Cloning-for-Bilibili -jbilcke-hf/Panoremix -artificialguybr/artificialguybr-demo-lora -Truepic/watermarked-content-credentials -dylanebert/igf -deepseek-ai/deepseek-coder-7b-instruct -PaddlePaddle/deoldify -facebook/XLS-R-2B-22-16 -ml6team/distilbart-tos-summarizer-tosdr -spacy/pipeline-visualizer -bigscience/BigScienceCorpus -Gradio-Blocks/latent_gpt2_story -Geonmo/nllb-translation-demo -nielsr/donut-cord -joaogante/contrastive_search_generation -MaxReimann/Whitebox-Style-Transfer-Editing -Matthijs/speecht5-asr-demo -cvlab/zero123 -yotamsapi/face-swap -mikonvergence/mask-and-sketch -auto-academic/auto-draft -bigcode/search -OpenGVLab/InternGPT -ennov8ion/3dart-Models -Dragonnext/scylla-proxy -radames/Candle-Phi-1.5-Wasm -merve/owlv2 -tonyassi/text-to-image -artificialguybr/VIDEO-TRANSLATION-TRANSCRIPTION -OFA-Sys/OFA-Generic_Interface -Daniton/MidJourney -lxe/simple-llm-finetuner -kunishou/Rapid-GPT -philschmid/igel-playground -rewoo/ReWOO-Demo -cownclown/Image-and-3D-Model-Creator -mikefish/CharacterMaker -Detomo/Lighten_dark_image -OFA-Sys/OFA-Text2Image_Generation -davertor/colorizing_images -stephenleo/stripnet -huggan/FastGan -doevent/3D_Photo_Inpainting -mattiagatti/image2mesh -johko/capdec-image-captioning -JavaFXpert/gpt-math-techniques -facebook/CutLER -carloscar/stable-diffusion-webui-controlnet-docker -LabelStudio/LabelStudio -autotrain-projects/dreambooth -competitions/CryCeleb2023 -stevengrove/GPT4Tools -wf-genius/Control-A-Video -vorstcavry/stable-diffusion-webui -akhaliq/ESPnet2-TTS -algomuffin/neural-search-engine -clip-italian/clip-italian-demo -osanseviero/tips-and-tricks -pleonova/multi-label-summary-text -facebook/StyleNeRF -hackathon-pln-es/Spanish-Nahuatl-Translation -EuroPython2022/Translate-with-Bloom -PaddlePaddle/chinese-stable-diffusion -nickmuchi/Earnings-Call-Analysis-Whisperer -AlexWortega/Kandinsky2.0 -Manjushri/SDXL-1.0-CPU -Xhaheen/Baith-al-suroor -taesiri/DeticChatGPT -nateraw/fuego -lunarring/latentblending -hadisalman/photoguard -sahil2801/CodeAlpaca -zomehwh/sovits-teio -Linly-AI/Linly-ChatFlow -Artrajz/vits-simple-api -SkalskiP/SAM_and_ProPainter -flax-community/DietNerf-Demo -shibing624/pycorrector -swzamir/Restormer -hysts/StyleGAN-Human -kn1ghtf1re/Photo-Realistic-Image-Stylization -ChenWu98/Stable-CycleDiffusion -ybelkada/image-to-music -phenomenon1981/MagicPrompt-Stable-Diffusion -ameerazam08/zoe-depth -NagaSaiAbhinay/UnCLIP_Image_Interpolation_Demo -DrSong/ChatGLM-6B-ChatBot -phenomenon1981/Dreamlikeart-Anime-1.0 -PAIR/PAIR-Diffusion -artificialguybr/freedom -julien-c/nllb-translation-in-browser -Xenova/doodle-dash -mrmocciai/rvc-genshin-v2 -descript/vampnet -Jacopo/ToonClip -NATSpeech/PortaSpeech -akhaliq/Mask2Former -bipin/image2story -huggingface/text-data-filtering -nielsr/perceiver-optical-flow -pytorch/YOLOv5 -ECCV2022/PARSeq-OCR -Gustavosta/MagicPrompt-Dalle -rajesh1729/youtube-video-transcription-with-whisper -maiti/stable-fashion -hkunlp/Binder -OAOA/DifFace -nielsr/swin2sr-image-super-resolution -jerpint/buster -joaogante/color-coded-text-generation -RamAnanth1/FairDiffusion -lamini/instruct-playground -ghoskno/ColorCanny-Controlnet -sam-hq-team/sam-hq -LibreChat/LibreChat -lmz/candle-yolo -r3gm/Ultimate-Vocal-Remover-WebUI -LeoLM/leo-hessianai-13b-chat -r3gm/Aesthetic_RVC_Inference_HF -asgaardlab/CLIPxGamePhysics -vishnun/CLIPnCROP -Gradio-Blocks/protGPT2_gradioFold -CVPR/LIVE -NimaBoscarino/playlist-generator -IoannisTr/Tech_Stocks_Trading_Assistant -Amrrs/yt-shorts-video-captioning -anzorq/openai_whisper_stt -adirik/image-guided-owlvit -BilalSardar/Text-To-image-AllModels -kazuk/youtube-whisper-10 -hwchase17/chat-your-data-state-of-the-union -gaviego/removebg -takuma104/multi-controlnet -fffiloni/lama-video-watermark-remover -tsungtao/controlnet-mlsd-for-livingroom -IlyaGusev/saiga_13b_llamacpp_retrieval_qa -IDEA-CCNL/Ziya-BLIP2-14B-Visual-v1-Demo -ICML2023/ICML2023_papers -ibm-nasa-geospatial/Prithvi-100M-sen1floods11-demo -skytnt/midi-composer -nt3awnou/Nt3awnou-rescue-map -merve/BLIP2-with-transformers -Flux9665/IMS-Toucan -akhaliq/FaceMesh -akhaliq/MT3 -nateraw/stylegan3 -hysts/StyleGAN-Human-Interpolation -awacke1/Art-Generator-and-Style-Mixer -nanomenta/sketch_frame_interpolation -keithhon/nllb-translation-demo-1.3b-distilled -Geonmo/socratic-models-image-captioning-with-BLOOM -daspartho/is-it-huggable -ryparmar/fashion-aggregator -RamAnanth1/chatGPT_voice -NoCrypt/pixelization -anonymous-pits/pits -mmlab-ntu/relate-anything-model -balacoon/revoice -justest/gpt4free -fffiloni/AnimateDiff-Image-Init -ibm-nasa-geospatial/Prithvi-100M-multi-temporal-crop-classification-demo -fffiloni/Music-To-Lyrics -HuggingFaceM4/ai_dad_jokes -fffiloni/sdxl-control-loras -Detomo/Japanese_OCR -abhibisht89/neural-search-engine -derina/MusicSpleeter -mfrashad/ClothingGAN -yhshin/latex-ocr -ml6team/keyphrase-extraction -neongeckocom/neon-tts-plugin-coqui -Gradio-Blocks/ViTPose -YiYiXu/it-happened-one-frame-2 -CVPR/unicl-zero-shot-img-recog -YoannLemesle/CLIPictionary -ECCV2022/Screen_Image_Demoireing -miesnerjacob/Multi-task-NLP -mozilla-foundation/youtube_video_similarity -davidtsong/whisper-demo -haofeixu/unimatch -WiNE-iNEFF/MinecraftSkin-Diffusion -AIML-TUDA/semantic-diffusion -fabiogra/moseca -RamAnanth1/ZoeDepth -chenyangqi/FateZero -Xanthius/llama-token-counter -fffiloni/animated-audio-visualizer -OpenGenAI/parti-prompts-leaderboard -lilacai/lilac -soggys/pompoms -librarian-bots/ranker -merve/pix2struct -Epoching/GLIDE_Inpaint -Norod78/VintageStyle -merve/chatbot-blog -nielsr/vilt-vqa -fabiochiu/text-to-kb -Gradio-Blocks/clip-guided-faces -spacy/gradio_pipeline_visualizer -ECCV2022/PSG -flowers-team/Interactive_DeepRL_Demo -jackyliang42/code-as-policies -fffiloni/text-2-music -kadirnar/yolox -Jayyydyyy/english-tokipona-translator -argilla/live-demo -camenduru-com/vscode -deepghs/wd14_tagging_online -susunghong/Self-Attention-Guidance -alvanlii/FROMAGe -giswqs/maxar-open-data -CobaltZvc/HyperBot -keras-dreambooth/ignatius -ShiwenNi/ChatResponse -zomehwh/vits-models-pcr -fffiloni/video_frame_interpolation -Xenova/react-translator -openaccess-ai-collective/wizard-mega-ggml -GuyYariv/AudioToken -tangshitao/MVDiffusion -FrankZxShen/so-vits-svc-models-ba -fb700/chatglm-fitness-RLHF -ysharma/WizardCoder34b -openskyml/dreamdrop-sd -DarwinAnim8or/Mistral-Chat -FriendlyJew/GoyimProxy -Detomo/ai-comic-generation -tonyassi/text-to-image-story-teller -fcakyon/sahi-yolov5 -keras-io/NeRF -ntt123/WaveGRU-Text-To-Speech -aryadytm/photo-colorization -aryadytm/remove-photo-background -visakh7843/Sheet_Music_Generator -innnky/soft-vits-vc -jamescalam/ask-youtube -Flux9665/ThisSpeakerDoesNotExist -juancopi81/youtube-music-transcribe -AdamOswald1/finetuned_diffusion -society-ethics/disaggregators -BAAI/dreambooth-altdiffusion -AP123/ai-avatars -huggingface-projects/AIvsAI-SoccerTwos -awacke1/Prompt-Refinery-Text-to-Image-Generation -huggingface-projects/Deep-RL-Course-Certification -yahma/rwkv-14b -hysts/PnP-diffusion-features -marlenezw/audio-driven-animations -Junity/TokaiTeio-SVC -cbg342/GPT-4-To-Midi -shengyi-qian/3DOI -bigcode/Reasoning-with-StarCoder -OpenBuddy/ChatWithBuddy -laogou717/bing -guanghap/nob-hill-noir -zenafey/fast-stable-diffusion -AP123/CerealBoxMaker -nateraw/stylegan3-interpolation -vivien/clip-slip -chrisjay/afro-speech -LilyF/Generate_Text_and_Audio -lukbl/LaTeX-OCR -huggingface-projects/dataset-profiler -LDY/ImageToLine -ysharma/Bloom-Creates-Meme -FinanceInc/Financial_Analyst_AI -taneemishere/html-code-generation-from-images-with-deep-neural-networks -CjangCjengh/Shanghainese-TTS -MirageML/lowpoly-world -dylanebert/FarmingGame -Mahiruoshi/Lovelive_Nijigasaki_VITS -AP123/text-to-3D -akhaliq/anything-v4.0 -pix2pix-zero-library/pix2pix-zero-demo -jbrinkma/segment-anything -longlian/llm-grounded-diffusion -zama-fhe/encrypted_health_prediction -Wazzzabeee/image-video-colorization -Voicemod/Text-To-Speech -r3gm/SoniTranslate_translate_audio_of_a_video_content -lmz/candle-whisper -elyza/ELYZA-japanese-Llama-2-7b-instruct-demo -Alifarsi/news_summarizer -devendergarg14/Paraphrasing_with_GPT_Neo -haakohu/DeepPrivacy -nazianafis/Extract-Tables-From-PDF -huggan/butterfly-gan -evaluate-metric/rouge -evaluate-metric/bleu -Theivaprakasham/layoutlmv3_invoice -ml6team/logo-generator -ruslanmv/TextToVideo-Dalle -kornia/edge_detector -EleutherAI/polyglot-ko-1.3b -RamAnanth1/whisper_to_emotion -innnky/nyaru-svc2.0 -CarlDennis/Lovelive-VITS-JPZH -sayakpaul/maxim-spaces -saltacc/anime-ai-detect -kinyugo/msanii -PirateXX/AI-Content-Detector -andzhk/PNGInfo -ysharma/pix2pix-zero-01 -competitions/SnakeCLEF2023 -kunishou/Japanese-Alpaca-LoRA-7b-DEMO -huggingface/devs -jax-diffusers-event/leaderboard -llamaindex/llama_index_sql_sandbox -presidio/presidio_demo -hackathon-somos-nlp-2023/SalpiBloomZ-1b7-v1 -ysharma/RedPajama-Chat-3B -openaccess-ai-collective/manticore-ggml -ysharma/ChatGPT-Plugins-UI-with-Langchain -ioclab/ai-qrcode-api -IDEA-CCNL/ziya2-13B-base -cncanon/gpt4 -Illia56/Youtube-Whisper-Llama -XzJosh/Taffy-Bert-VITS2 -Dragonnext/charybdis -mithril-security/starcoder_memorization_checker -Willow123/InternLM-XComposer -Modfiededition/Writing_Assistant -ThePixOne/open_domain_qa -akhaliq/GPEN -aubmindlab/Arabic-NLP -luca-martial/neural-style-transfer -obi/Medical-Note-Deidentification -osanseviero/fork_a_repo -saber2022/Real-CUGAN -hackathon-pln-es/Audio-Sentiment-Classifier -hysts/mediapipe-pose-estimation -rinong/StyleGAN-NADA -EuroPython2022/Write-Stories-Using-Bloom -SIGGRAPH2022/sketch2pose -Kororinpa/Amadeus_Project -menghanxia/disco -MirageML/sjc -OFA-Sys/chinese-clip-zero-shot-image-classification -sanchit-gandhi/chatGPT -innnky/nyaru4.0 -Qosmo/GPT-Infinite-Radio -p1atdev/AdverseCleaner -competitions/ChaBuD-ECML-PKDD2023 -Ziqi/ReVersion -gradio/theme_builder -Kevin676/VoiceFixer -RamAnanth1/stable-diffusion-xl -TencentARC/MasaCtrl -yuvalkirstain/PickScore -SpacesExamples/InvokeAI -openskyml/remove-background-on-image -opencompass/opencompass-llm-leaderboard -OpenMotionLab/MotionGPT -artificialguybr/pixel-art-generator -Mahiruoshi/BangDream-Bert-VITS2 -AlekseyKorshuk/huggingartists -Amrrs/image-to-text-app -dt/ascii-art -flax-sentence-embeddings/sentence-embeddings -shibing624/text2vec -hysts/Anime2Sketch -keras-io/bert-semantic-similarity -EuroPython2022/rev -nielsr/donut-rvlcdip -power2/sketch -tomrb/bettercallbloom -cafeai/cafe_aesthetic_demo -kadirnar/yolov7 -pragnakalp/one_shot_talking_face_from_text -AIFILMS/Pix2Pix-Video -stable-diffusion-ai/upscaling -jhtonyKoo/music_mixing_style_transfer -video-p2p-library/Video-P2P-Demo -Mishyface/image-to-video-film-3-kazuk-hugorowan-mishyface -abidlabs/music-separation -aicg/Moxxie-Proxy -MariaK/Check-my-progress-Audio-Course -dahaoGPT/ChatGLM2-6B-chatbot -sanchit-gandhi/musicgen-negative-prompting -IlyaGusev/saiga2_13b_gguf -Xenova/semantic-image-search -fffiloni/diffBIR -openskyml/super-fast-sdxl-stable-diffusion-xl -AlexWortega/food_calories -Cropinky/gpt2-rap-songs -kornia/Kornia-LoFTR -keras-io/Human-Part-Segmentation -nielsr/imagegpt-completion -pytorch/Tacotron2 -speech-recognition-community-v2/Leaderboard -awacke1/Sentence2Paragraph -EPFL-VILAB/MultiMAE -jph00/testing -kurianbenoy/audioclassification -Gradio-Blocks/EmojiGAN -evaluate-metric/bertscore -Gradio-Blocks/Create_GIFs_from_Video -HuSusu/SuperResolution -nightfury/StableDiffusion-Img2Img -JayRaghav/Image_segmentation -mohamedabdullah/Arabic-Spelling-Checker -johnrobinsn/MidasDepthEstimation -nakas/audio-diffusion_style_transfer -Loren/Streamlit_OCR_comparator -kazuk/youtube-whisper-04 -abidlabs/gradio-discord-bot-server -ramkamal2000/voice-cloning-yourtts -open-spaced-repetition/fsrs4anki_app -hongfz16/EVA3D -kermitt2/grobid -sparanoid/milky-green-sovits-4 -souljoy/ChatPDF -ysharma/OSChatbots_ChatGPT_ToeToToe -fffiloni/video2openpose2 -zetavg/LLaMA-LoRA-Tuner-UI-Demo -Acapellas/Extract_Vocals_Instrumentals -jcenaa/Segment-Any-RGBD -matthoffner/starchat-ui -lj1995/vocal2guitar -AlphaDragon/Voice-Clone -Robert001/UniControl-Demo -johnhelf/roop -HopeMan/DoomGuy -lykeven/CogVLM -fffiloni/sd-xl-lora-fusion -Detomo/Depth_estimation -Gladiator/Text-Summarizer -Norod78/ComicsHeroHD -Xenova/sponsorblock-ml -abidlabs/chatbot-stylized -akhaliq/Video_Search_CLIP -akhaliq/gpt-j-6B -hysts/stylegan3-anime-face-exp002 -julien-c/coqui -merve/write-with-transformer -mishig/smarter_npc -psistolar/pop-music-transformer -kornia/kornia-image-enhancement -SIGGRAPH2022/StyleGAN-XL -seduerr/semantic_search -AlekseyKorshuk/thin-plate-spline-motion-model -mattiagatti/mars_dtm_estimation -NAACL2022/CLIP-Caption-Reward -JMalott/ai_architecture -milyiyo/reimagine-it -itsyoboieltr/anpr -ml6team/Speaker-Diarization -innnky/vits-nyaru -Rongjiehuang/ProDiff -Epoching/DocumentQA -wukevin/foldingdiff -innnky/nyaru-svc2.0-advanced -esb/leaderboard -Catmeow/AI_story_writing -uwx/waveformer -afmck/stable-diffusion-inpainting-segmentation -tombetthauser/astronaut-horse-concept-loader -ringhyacinth/Nail-Diffuser -hugging-fellows/paper-to-pokemon -pragnakalp/OCR-image-to-text -salmanmapkar/audio-video-transcriber -johnslegers/epic-diffusion -team7/talk_with_wind -Hello-SimpleAI/chatgpt-detector-qa -akhaliq/small-stable-diffusion-v0 -kazuk/youtube-whisper-03 -hojining/Ultra_Fast_Anything_V4k_resolution -kamayali/anything-v4.0 -Kaludi/ChatGPT-BingChat-GPT3-Prompt-Generator_App -SpacesExamples/docker-examples -cyllum/soccertwos-analytics -zhangjf/chatbot -SjoerdTeunisse/upscaler -hackathon-somos-nlp-2023/PodcastNER-GPTJ -BAAI/vid2vid-zero -megaaziib/hololive-rvc-models -Nixtla/transfer-learning-time-series -yuchenlin/Rebiber -cloversid/rvc-ai -Realcat/image-matching-webui -myscale/ChatData -zideliu/styledrop -docparser/Text_Captcha_breaker -monra/freegpt-webui-chimera -CatNika/New_Cat_Proxy -damo-vilab/MS-Vid2Vid-XL-demo -YuxinJ/Scenimefy -Proxy1/Turbo -BridgeEight/internlm-20B-chat-w4-turbomind -bpHigh/AI-Research-Buddy -Mysterykey/Orange -HugoDzz/super-godot-galaxy -Deci/YOLO-NAS-Pose-Demo -MrBodean/Depthmap -Norod78/ComicsHero -fcakyon/sahi-yolox -nateraw/quickdraw -pierreant-p/huggingfab -tmabraham/fastai_pet_classifier -lkeab/transfiner -njanakiev/gradio-openai-clip-grad-cam -ysharma/text-to-image-to-video -ai-forever/mGPT -unity/ML-Agents-Pyramids -hysts/diffusers-anime-faces -gradio/xgboost-income-prediction-with-explainability -fffiloni/Stable-Diffusion-CPU -innnky/soft-vits-singingvc -beki/pii-anonymizer -fffiloni/stable-diffusion-color-sketch -NikeZoldyck/green-screen-composition-transfer -akhooli/poetry -OneAfterlife/MubertTTM -vivym/image-matting-app -akhaliq/dreamlike-diffusion-1.0 -deepghs/ml-danbooru-demo -society-ethics/model-card-regulatory-check -diffusers/convert -naotokui/TR-ChatGPT -assemblyai/Conformer1-Demo -keras-dreambooth/keras-dreambooth-riffusion-currulao -taishi-i/awesome-ChatGPT-repositories-search -sander-wood/text-to-music -alex-mindspace/gpt-agents -bigcode/bigcode-model-license-agreement -fffiloni/BedtimeStory -mrmocciai/rvc-models -HuggingFaceH4/falcon-chat-demo-for-blog -wyysf/GenMM -Yntec/fast_diffusion -fiz123321/nah -georgesung/llama2_7b_uncensored_chat -Iceclear/StableSR -soggys/tavern -R3DI/Uber_Realistic_Porn_Merge_V1.3 -turing-motors/heron_chat_blip -limcheekin/Mistral-7B-Instruct-v0.1-GGUF -ilumine-AI/AI-Creepypastas -Otter-AI/OtterHD-Demo -MAGAer13/mPLUG-Owl2 -Pavankunchala/Depth-Estimation-App -akhaliq/Style_Transfer -ECCV2022/bytetrack -flax-community/SentenceSimplifier -radames/sentence-embeddings-visualization -givkashi/SwinIR-Super-resolution -jjeamin/ArcaneStyleTransfer -templates/fastapi-uvicorn -probing-vits/attention-heat-maps -mecevit/english-to-sql -Tuana/PDF-Summarizer -Gradio-Blocks/anime-colorization -ICML2022/OFA -bigscience/petals-api -rkoushikroy2/portrait_photo_generator -sklearn-docs/anomaly-detection -tryolabs/norfair-demo -gradio/neon-tts-plugin-coqui -nielsr/TrOCR-Scene-Text-Recognition -open-source-metrics/models-explorer -awacke1/CB-GR-Chatbot-Blenderbot -itmorn/face_keypoint_3d -nateraw/stable-diffusion-music-videos -tomas-gajarsky/facetorch-app -yangheng/PyABSA -lojban/text-to-speech -SerdarHelli/SDF-StyleGan-3D -cynika/taffy -SteveDigital/free-mp3-to-text-using-openai-whisper -nlphuji/whoops-explorer -appl044/Chat-GPT-LangChain -SpacesExamples/vscode -RamAnanth1/REaLTabFormer -AIARTCHAN/openpose_editor -Thafx/sdrv20 -lifan0127/zotero-qa -RamAnanth1/conformer-asr -hugforziio/chat-gpt-ui -kazuk/youtube-whisper-19 -hahahafofo/image2text_prompt_generator -competitions/news-unmasked -navervision/Graphit-SD -SoulAbi/text-to-voice -fffiloni/LangChain-ChatGPT-plugins -lauraibnz/midi-audioldm -Masutxrxd/Masutxrxd -GrandaddyShmax/MusicGen_Plus -h2oai/wave-chatbot-ui -melihunsal/demogpt -avans06/whisper-webui-translate -GrandaddyShmax/MusicGen_Plus_hfv2 -kevinwang676/ChatGLM2-VC-SadTalker -ibm-nasa-geospatial/Prithvi-100M-demo -dongsiqie/bing -librarian-bots/huggingface-datasets-semantic-search -imseldrith/DeepFakeAI -radames/Gradio-llama2.mojo -Politrees/RVC_V2_Huggingface_Version -Jean-Baptiste/email_parser -NeuralStyleTransfer/neural-style-transfer -Ron0420/EfficientNetV2_Deepfakes_Image_Detector -akhaliq/convnext -akhaliq/coqui-ai-tts -breezedeus/CnOCR-Demo -julien-c/persistent-data -ntt123/vietTTS -samuelinferences/transformers-can-do-bayesian-inference -sunwaee/MT5-Questions-Answers-Generation-Extraction -RTLAI/BLIPsinki -awacke1/Image-Semantic-Search -osanseviero/tortoisse-tts -evaluate-metric/wer -Gradio-Blocks/document-qa -Gradio-Blocks/Multilingual-Aspect-Based-Sentiment-Analysis -doevent/FullSubNet-plus -microsoft/unicl-img-recog-demo -sklearn-docs/clustering -EuroPython2022/BayesCap -Team-PIXEL/PIXEL -mfumanelli/Stable-Diffusion-Loves-Cinema -tumuyan/vits-miki -innnky/nanami -sayakpaul/gopro-deblurring-maxim -camenduru-com/seamless -SpacesExamples/fastapi_t5 -JosephusCheung/ACertainsStrategyTalk -ybelkada/blip-image-captioning-space-large -unixpickle/car-data -SceneDiffuser/SceneDiffuserDemo -playgrdstar/compare-llms -vinid/webplip -hfl/VQA_VLE_LLM -22h/vintedois-diffusion-v0-2 -NeuralInternet/Audio-to-Text_Playground -josStorer/ChatGLM-6B-Int4-API-OpenAI-Compatible -Kevin676/ChatGPT-with-Voice-Cloning-for-All -kenton-li/chatdoctor_csv -UCAS/ChatGPT4 -FrankZxShen/vits-fast-finetuning-pcr -openMUSE/MUSE -uonlp/open_multilingual_llm_leaderboard -kevinwang676/Bark-Voice-Cloning -Yntec/PrintingPress -KarmKarma/rvc-models-genshinimpact -cncanon/freeturbo -lvwerra/harms-law -fiz123321/dumbcutie -RVVY/test01 -Oppenheimer57/claude-proxy -thirdai/BOLT2.5B -Eddycrack864/Applio-Inference -radames/OHIF-Medical-Imaging-Viewer -merve/compare_docvqa_models -openskyml/mistral-7b-chat -52Hz/SRMNet_real_world_denoising -Hellisotherpeople/HF-BERTopic -akhaliq/T0pp -farukozderim/Model-Comparator-Space-Builder -jonatasgrosman/asr -sohaibcs1/Image-to-Text-Summary -davidpiscasio/unpaired-img2img -jipenaflor/Youtube-Transcript-Summarizer -hackathon-pln-es/clasificador-comentarios-suicidas -d0r1h/youtube_summarization -bertin-project/bertin-gpt-j-6B -multimodalart/vqgan -gradio/pictionary -Tuana/GoT-QA-Haystack -evaluate-metric/seqeval -Gradio-Blocks/StyleGAN-Human -codeparrot/codegen-subspace -osanseviero/latent-video -aliabid94/GPT-Golf -CVPR/BrAD -Matthijs/mobilevit-deeplab-demo -EuroPython2022/Zero-Shot-SQL-by-Bloom -ICML2022/YourTTS -vivien/clip-owlvit -huggingface/transformers-stats -dbirks/diffuse-the-rest -fffiloni/sd-img-variations -smajumdar/nemo_multilingual_language_id -Catmeow/Face2Painting_From_Photo -kdrkdrkdr/ShirokoTTS -Sybghat/resume-parser -morenolq/galactica-base -Norod78/sd2-simpsons-blip -tomsoderlund/rest-api-with-gradio -camenduru-com/riffusion -abhishek/diffuzers -akhaliq/dreamlike-photoreal-2.0 -dotmet/Real-ESRGAN-Enhanced-Anime-Diffusion -Fr33d0m21/Music_Splitter -kadirnar/torchyolo -alvanlii/RDM-Region-Aware-Diffusion-Model -Nickhilearla135095/maximum_diffusion -SpacesExamples/jupyterlab -radames/nginx-gradio-reverse-proxy -IDEA-CCNL/Taiyi-BLIP -AlignmentResearch/tuned-lens -nyanko7/openai-translator -competitions/FungiCLEF2023 -idosal/oai-proxy -coffeeee/nsfw-c0ffees-erotic-story-generator -openaccess-ai-collective/manticore-13b-chat-pyg -shaocongma/faiss_chat -matthoffner/wizardcoder-ggml -Yntec/Dreamlike-Webui-CPU -jykoh/gill -ezioruan/roop -Truepic/ai-content-credentials -llSourcell/doctorGPT -imseldrith/FaceSwap -manavisrani07/gradio-lipsync-wav2lip -SpacesExamples/llama-cpp-python-cuda-gradio -ashhhh23/lordofthemysteries -librarian-bots/base_model_explorer -cakewalk/splat -Xenova/semantic-image-search-client -Illia56/fastest-whisper-v2-large -librarian-bots/recommend_similar_papers -worldsoupkitchen/lollipop -editing-images/ai-halloween-photobooth -thinkall/autogen-demos -Illia56/Chat-with-Youtube-video-Mistal-7b -openskyml/fast-sdxl-stable-diffusion-xl -artificialguybr/OPENHERMES-V2.5-DEMO -akhaliq/stylegan3_clip -frapochetti/blurry-faces -hysts/stylegan3-anime-face-exp001 -keras-io/low-light-image-enhancement -codeparrot/codeparrot-highlighting -spacy/healthsea-pipeline -spark-nlp/SparkNLP_NER -training-transformers-together/Dashboard -valhalla/minDALLE -vivien/trompeloeil -yangheng/Multilingual-Aspect-Based-Sentiment-Analysis -tomofi/Tesseract-OCR -ml6team/post-processing-summarization -NimaBoscarino/climategan -chuxiaojie/NAFNet -Gradio-Blocks/Leaderboard -evaluate-metric/perplexity -huggingface/library-metrics -Gradio-Blocks/zero-and-few-shot-reasoning -awacke1/TTS-STT-Blocks -Pentameric/DalleClone -belinghy/character-animation-motion-vaes -baudm/PARSeq-OCR -dhansmair/flamingo-mini-cap -yuntian-deng/latex2im -Hexii/Neural-Style-Transfer -nightfury/Colorizer_Models -Geonmo/laion-aesthetic-predictor -SWHL/RapidOCRDemo -xu1998hz/sescore -pyesonekyaw/faceforgerydetection -akhaliq/Evel_Space -beyond/genius -carlgira/dreambooth-image-editor -hf-accelerate/accelerate_examples -lambdalabs/text-to-avatar -bigcode/santacoder-search -daspartho/MagicMix -Hello-SimpleAI/chatgpt-detector-ling -SmilingWolf/danbooru2022_image_similarity -SUPERSHANKY/Finetuned_Diffusion_Max -society-ethics/StableBias -Noobian/PDF-QA -hysts/DDNM-HQ -shigel/aiemo -Xhaheen/Hyper_Bot_openai -avid-ml/bias-detection -akdeniz27/pix2struct-DocVQA -ParityError/Anime -Manjushri/SD-2X-And-4X-CPU -HuggingFaceH4/Falcon-vs-LLaMA -IoMa/stable-diffusion-webui-cpu-the-best -nasttam/Image-and-3D-Model-Creator -Riksarkivet/htr_demo -mshukor/UnIVAL -ibm-nasa-geospatial/Prithvi-100M-Burn-scars-demo -memef4rmer/llama2-7b-chat-uncensored-ggml -hf4h/bio-chem-foundation-models -nuttella/Otakumusic -elyza/ELYZA-japanese-Llama-2-7b-fast-instruct-demo -BraydenMoore/a-random-unsecured-camera -giswqs/solara-maxar -ProteinDesignLab/protpardelle -Illia56/Llama-2-voice -ngoctuanai/gpt4 -tonyassi/image-story-teller -coqui/ml-trivia -ysharma/Zephyr-Playground -LLMRiddles/LLMRiddles -Pclanglais/MonadGPT -OOlajide/common-nlp-tasks -Vijish/Crop-CLIP -akhaliq/bizarre-pose-estimator -aliabd/Anime2Sketch -autonomousvision/projected_gan -edemgold/conversation-bot -hshr/DeepFilterNet -kingabzpro/savtadepth -merve/GPT-2-story-gen -SerdarHelli/Segmentation-of-Teeth-in-Panoramic-X-ray-Image-Using-U-Net -conciomith/RetinaFace_FaceDetector_Extractor -brogelio/air_draw -hackathon-pln-es/es_nlp_gender_neutralizer -awacke1/Video-Summary -dataroots/SofaStyler -StanfordAIMI/radiology_report_generation -issam9/sumy_space -evaluate-metric/accuracy -nazneen/datasets-explorer -Gradio-Blocks/video_nca -huggingface/HuggingDiscussions -Pippoz/Hugging_Space -Gradio-Blocks/Object-Detection-With-DETR-and-YOLOS -Gradio-Blocks/pokemon-move-generator-app -Gradio-Blocks/pubmed-abstract-retriever -bigscience/ethical-charter -scikit-learn/baseline-trainer -runa91/barc_gradio -EuroPython2022/Scratchpad-w-BLOOM -DeepLabCut/MegaDetector_DeepLabCut -nickmuchi/DeepFace -theodotus/ukrainian-voices -Amrrs/podscript -Rothfeld/textual-inversion-init-token -ajayhk/colorize -igashov/DiffLinker -Xhaheen/meme_world -skytnt/anime-aesthetic-predict -lewtun/galactica-demo -Manjushri/SDXL-1.0-Inpainting-CPU -skytnt/waifu-gan -tryolabs/blogpost-cqa -hareshhecker/midjourney-v5 -SerdarHelli/StyleSDF-3D -speechbox/whisper-restore-punctuation -argilla/argilla-template-space -shivi/mask2former-demo -kazuk/youtube-whisper-00 -argilla/argilla-streamlit-customs -to-be/invoice_document_headers_extraction_with_donut -zeno-ml/diffusiondb -Thafx/Demucs_v4_2s_HT -Dao3/DreamlikeArt-PhotoReal-2.0 -freddyaboulton/dracula_revamped -Alpaca233/ChatPDF-GUI -keras-dreambooth/dreambooth_diffusion_hokusai -liujch1998/vera -gstaff/xkcd -JohnSmith9982/small_and_pretty -merve/starter_pack_generator -xswu/align_sd -bethecloud/storj_theme -Gradio-Themes/text2video2storj -gyrojeff/YuzuMarker.FontDetection -autotrain-projects/autotrain-advanced -sanchit-gandhi/bark -nickmuchi/DocGPT -SimFG/LangChain-Zilliz-Cloud -Xenos14/XenoEngine-SD-webui -huggingface-projects/huggingbots -ashrma/Chat-with-Docs -chansung/test-multi-conv -AIGText/GlyphControl -ayymen/Amazigh-tts -Faridmaruf/rvc-Blue-archives -ysharma/baichuan-7B -ThomasSimonini/SmartRobot -iitolstykh/age_gender_estimation_demo -iamAI123/whisper_model_speech_to_text -victor/SDXL-0.9 -mikeee/qwen-7b-chat -librarian-bots/dataset-to-model-monitor -kevinwang676/VALLE -dylanebert/list-of-splats -LeoLM/leo-hessianai-7b-chat -HusseinHE/psis -toshas/repainting_3d_assets -Illia56/llama-2-7b-chat -AIatUIUC/CodeLATS -abidlabs/gradio-lite-classify -mkrzyzan/face-swap -52Hz/CMFNet_deblurring -Harveenchadha/en_to_indic_translation -Hellisotherpeople/Unsupervised_Extractive_Summarization -MrBodean/VoiceClone -TitleGenerators/ArxivTitleGenerator -akhaliq/Detic -akhaliq/Spleeter -bertin-project/bertin -flax-community/koclip -hysts/Yet-Another-Anime-Segmenter -hysts/anime-face-detector -marshmellow77/contract-review -merve/KerasBERTv1 -mrm8488/GPT-J-6B -team-writing-assistant/grammar-correction -team-zero-shot-nli/zero-shot-nli -hackathon-pln-es/readability-assessment-spanish -hysts/StyleGAN3 -ybelkada/FocusOnDepth -hysts/gan-control -suvash/food-101-resnet50 -FrankAst/image_mixer -osanseviero/hugging-gallery -keras-io/Generating-molecular-graphs-by-WGAN-GP -seduerr/personality -CVPR/CVPR2022_papers -sklearn-docs/classification -keras-io/video-transformers -tfwang/PITI-Synthesis -fffiloni/stablediffusion-interpolation -patrickvonplaten/vq-vs-stable-diffusion -sayakpaul/video-classification-ucf101-subset -manu/the-rap-god-test -myscale/object-detection-safari -Podtekatel/ArcaneSVK2 -pxiaoer/ChatGPT -AlStable/AlPrompt -Kamtera/Persian-tts-CoquiTTS -JavaFXpert/GPT-3.5-Table-inator -sayakpaul/pokemon-sd-kerascv -pinecone/openai-ml-qa -SpacesExamples/streamlit-docker-example -russellc/comparing-captioning-models -Shad0ws/Voice_Cloning -mindspore-ai/Wukong-Huahua -kazuk/youtube-whisper-05 -kazuk/youtube-whisper-07 -hossay/image-to-sketch -kadirnar/Anime4k -katielink/compare-bio-llm -YazawaSunrise/so-vits-svc-LoveLive -kadirnar/Multilingual-Translation -bluelu/Product-Photo-Analyzer -ybelkada/detoxified-lms -zetabyte/text-to-voice -ashhadahsan/whisperX -Jayabalambika/my-app-space -librarian-bots/notebooks-on-the-hub -Rifd/ngees_doang -Gradio-Themes/theme_builder -zomehwh/sovits-tannhauser -ysharma/whisper-diarization -svdiff-library/SVDiff-Training-UI -snpranav/karenai -awacke1/ChatGPT-Memory-Chat-Story-Generator -ynhe/AskAnything -fffiloni/video-to-sound-fx -yenniejun/tokenizers-languages -huggingface-tools/text-to-video -sabman/map-diffuser -joaogante/assisted_generation_demo -ludwigstumpp/llm-leaderboard -OpenGVLab/VideoChatGPT -OpenGenAI/open-parti-prompts -ennov8ion/comicbook-models -nttdataspain/Image-To-Text-Lora-ViT -alaa-lab/InstructCV -tmaham/DS-Fusion-Express -Xenova/ai-code-playground -OpenGVLab/all-seeing -bigcode/OctoCoder-Demo -stabilityai/japanese-instructblip-alpha -FantasticGNU/AnomalyGPT -wffcyrus/falcon-180b-demo -PY007/TinyLlama-Chat -cncanon/chud -XzJosh/Nana7mi-Bert-VITS2 -AgentVerse/agentVerse -KoboldAI/KoboldAI-Lite -enzostvs/hub-api-playground -Roboflow/webcamGPT -sczhou/ProPainter -NeuML/txtai -Norod78/Face2Doll -Ron0420/EfficientNetV2_Deepfakes_Video_Detector -ThomasSimonini/Chat-with-Gandalf-GPT-J6B -ThomasSimonini/Stable-Baselines3 -Wootang01/text_generator -aakashb95/paraphrase-sentences -abnerh/video-to-subs -akhaliq/kogpt -akhaliq/mlsd -akhaliq/neural-waveshaping-synthesis -akhaliq/openpose -akhaliq/speechbrain-speech-seperation -architext/Architext_deployed -chuanenlin/pdf2preview -fcakyon/streamlit-image-comparison -flax-community/clip-reply-demo -julien-c/streamlit-cheatsheet -katanaml/table-query -keras-io/question_answering -peterbonnesoeur/pose_demo -razakhan/text-summarizer -speech-recognition-community-v2/FinalLeaderboard -tomofi/MMOCR -tomofi/ABINet-OCR -akhaliq/animeganv2-blocks -hackathon-pln-es/sonnet-poetry-generator-spanish -kmacdermid/RpgRoomGenerator -PaddlePaddle/U2Net -cakiki/keyword-extraction -vivien/depth-aware-caption -awacke1/AI-MovieMaker-Comedy -aryadytm/photo-low-light-enhance -Andy1621/uniformer_image_detection -Gradio-Blocks/uniformer_image_segmentation -ntranoslab/esm_variants -Gradio-Blocks/Story-to-video -CVPR/Bamboo_ViT-B16_demo -hysts/AnimeGANv3_PortraitSketch -nanom/syntactic_tree -SIGGRAPH2022/DCT-Net -mrdbourke/foodvision_big_video -GIZ/SDSN-demo -Fia/StableDiffusionCPU -mrm8488/OpenAI_Whisper_ASR -AIZ2H/Gradio331-3D-Models-AI-1 -ysharma/Voice-to-Youtube -sensahin/YouWhisper -beihai/Remove-Background-By-U2Net -simonduerr/ProteinMPNNESM -malteos/emnlp2022-papers -akhaliq/Inkpunk-Diffusion -AIML-TUDA/safe-stable-diffusion -matttrent/stable-diffusion-image-variations-embeds -cjayic/sovits-overwatch2 -triple-t/ttt-space -hwchase17/langchain-demo -awacke1/CloneAnyVoice -h2oai/h2o_wave_whisper -kazuk/youtube-whisper-02 -kazuk/youtube-whisper-08 -zomehwh/sovits-xiaoke -sblumenf/PDF-text-extractor -Eriberto/whisper-to-chatGPT -sasha/Image_Upscaling_Restoration_Colorization -deprem-ml/deprem_satellite_test -pierreguillou/Inference-APP-Document-Understanding-at-paragraphlevel-v1 -abidlabs/ControlNet -LaoCzi/YouTube_Summarize -ysharma/Gradio-demo-streaming -adt/models-table -ahmedxeno/depth_estimation -xuwenhao83/simple_chatbot -xp3857/text-to-image -JosefJilek/loliDiffusionSpace -baixing/hackathon_chatbot_simple -antonovmaxim/text-generation-webui-space -coraKong/voice-cloning-demo -NeuralInternet/ChatLLMs -darienacosta/chatgpt-coverwhale -josevalim/livebook -reach-vb/music-spectrogram-diffusion -llamaindex/llama_index_vector_demo -rishiraj/GPT4All -Kevin676/Voice-Cloning-with-Voice-Fixer -oguzakif/video-object-remover -junchenmo/OpenAI-Manager -nigeljw/ViewDiffusion -srush/GPTWorld -portal/guanaco-playground -Searchium-ai/Video-Search -davila7/try-gorilla -arbml/Ashaar -MackDX/Neptunia -hysts/Kandinsky-2-2 -mike-ravkine/can-ai-code-compare -diffusers/sdxl-to-diffusers -zamasam/hentai -xuqinyang/Baichuan-13B-Chat -Open-Orca/OpenOrcaxOpenChat-Preview2-13B -zej97/AI-Research-Assistant -TeraTTS/TTS -cummuniZm/kalfablyadki-sosut -chenxiYan/ChatHaruhi-OpenAI -eson/tokenizer-arena -SenY/GalGameUI -shivammehta25/Matcha-TTS -nupurkmr9/concept-ablation -XzJosh/LittleTaffy-Bert-VITS2 -r3gm/Advanced-RVC-Inference -banana-dev/demo-illusion-diffusion-hq -PIISA/PIISA_Demo -openskyml/midjourney-mini -AisingioroHao0/anime-fanwork -52Hz/SRMNet_AWGN_denoising -52Hz/SUNet_AWGN_denoising -AmazonScience/QA-NLU -GroNLP/neural-acoustic-distance -KPatrick/PaddleSpeechASR -PaddlePaddle/MiDaS_Large -Wootang01/question_generator_three -akhaliq/AnimeGANv1 -akhaliq/Speechbrain-Speech-enhancement -akhaliq/mdetr -bipin/multipurpose-ai -marcelcastrobr/CLIP-image-search -nbeuchat/actors_matching -obsei/obsei-demo -reach-vb/asr-pyctcdecode -savasy/SentimentHistogramForTurkish -team-indain-image-caption/Hindi-image-captioning -z-uo/monocular_depth_estimation -rowel/22k-image-classification -Hellisotherpeople/Gadsby -Aanisha/Image_to_story -StevenLimcorn/fastspeech2-TTS -beihai/GFPGAN-V1.3-whole-image -lukemelas/deep-spectral-segmentation -ShivamShrirao/CLIP-Zero-Shot-Classifier -awacke1/Zoom-Clip-Toon-Image-to-Image -anakin87/who-killed-laura-palmer -gradio/question-answering -evaluate-metric/code_eval -rajistics/receipt_extractor -Hila/RobustViT -GoodStuff/Cool -valurank/keyword_and_keyphrase_extraction -duchaba/120dog_breeds -unity/ML-Agents-PushBlock -chansung/segformer-tf-transformers -skytnt/lyric-generator-ja -taesiri/CLIPScore -nateraw/video-to-sketch -NAACL2022/GlobEnc -g8a9/ferret -cmarkea/sentiment-analysis -mrdbourke/foodvision_mini -gradio/depth_estimation -schibsted/facial_expression_classifier -fffiloni/scene-edit-detection -PaddlePaddle/PP-OCRv3-ch -osanseviero/TheMLGame -Armandoliv/whisper-biomedical-ner -chinhon/whisper_transcribe -taskswithcode/salient-object-detection -spacerini/gaia -ClueAI/CLUE_AIGC -Evel/Evel_Space -BAAI/AltDiffusion-m9 -Sentdex/LookingGlassRGBD -nakas/demucs_playground -SankarSrin/image-matting-app -pragnakalp/Question_Generation_T5 -RamAnanth1/prompt-extend-2 -binery/Table_Transformer_PaddleOCR -FredZhang7/paint-journey-demo -deelerb/3dselfie -akhaliq/webui-orangemixs -kazuk/youtube-whisper -faisalhr1997/blip-image-captioning-space-large -taesiri/CLIPSeg -society-ethics/featured-spaces-submissions -ysharma/LangChain_GradioBot -katielink/biogpt-qa-demo -ChrisPreston/diff-svc_minato_aqua -shubhajit07/dreamlike-photoreal-2.0 -fffiloni/x-decoder-video -dotmet/chatgpt_webui -wl-zhao/unipc_sdm -GT4SD/multitask-text-and-chemistry-t5 -M52395239m/Image_Face_Upscale_Restoration-GFPGAN -L0SG/BigVGAN -Willder/chatgpt-streamlit -awacke1/Image-to-Text-Salesforce-blip-image-captioning-base -RamAnanth1/roomGPT -fffiloni/simple-animation-doodle -zhangliwei7758/vits-uma-genshin-honkai -yuan2023/Stable-Diffusion-ControlNet-WebUI -hohonu-vicml/DirectedDiffusion -bigcode/near-deduplication -Hugorowan/image-to-video-film-2-og-by-kazuk -Kevin676/Voice-Cloning -NKU-AMT/AMT -keras-dreambooth/dreambooth-pug-ace -zomehwh/vits-models-ow2 -kenjiqq/aesthetics-scorer -kira4424/Tacotron-zero-short-voice-clone -AIBoy1993/segment_anything_webui -hackathon-somos-nlp-2023/T5unami-small-v1 -fengmuxi/ChatGpt-Web -StephanST/WALDOonline -rezaarmand/Perp-Neg -dexxxed/remove-object-from-photo -MuhammadHanif/Stable-Diffusion-High-Resolution -jurgendn/table-extraction -AiMimicry/sovits-models -OFA-Sys/ONE-PEACE_Multimodal_Retrieval -FrankZxShen/vits-fast-fineturning-models-ba -JUNGU/talktosayno -CognitiveLabs/GPT-auto-webscraping -estusgroup/ai-qr-code-generator-beta-v2 -Ricecake123/RVC-demo -allknowingroger/Image-Models-Test27 -coomdoomer/doomer-reverse-proxy -superdup95/su -JosephusCheung/LL7M-JS-Tokenizer -Sentdex/StableBeluga-7B-Chat -awacke1/PromptSuperHeroImageGenerator -openskyml/pigeon-chat -BasToTheMax/voicechange -mishig/phind-wizardcoder-playground -radames/TinyStories-Candle-Wasm-Magic -4com/stable-diffusion -Illia56/Code-Interpreter-Palm2 -microsoft/LLMLingua -juuxn/SimpleRVC -PulsarAI/thebloke-quantized-models -pseudolab/KOMUChat -latent-consistency/lcm-LoraTheExplorer -pseudolab/AI_Tutor_BERT -52Hz/CMFNet_deraindrop -AdamGustavsson/AnimeganV2Webcam -Babelscape/rebel-demo -EXFINITE/BlenderBot-UI -HridayKharpude/Tabla-Transcriber -Kodiks/turkish-news-classification -Narrativaai/GPT-J-6B-Demo -Yassine/Stego -abidlabs/The-Acquisition-Post-Generator -akhaliq/Face_Mesh -akhaliq/PAMA -akhaliq/TensorFlowTTS -akhaliq/midi-ddsp -akhaliq/steerable-nafx -bluebalam/paper-rec -chinhon/News_Summarizer -dnth/webdemo-fridge-detection -facebook/xm_transformer_600m -gradio/chatbot -jkang/demo-artist-classifier -mohitmayank/SummarizeLink -robinhad/ukrainian-stt -samarthagarwal23/QuestionAnswering_on_annual_reports -sunwaee/Perceiver-Multiclass-Emotion-Classification -team-ai-law-assistant/CUAD -vishnun/Colorify -Theivaprakasham/layoutlmv2_invoice -tomofi/CRAFT-TrOCR -hackathon-pln-es/gastronomia_para_to2 -Shruhrid/Next_Word_Prediction -huggan/projected_gan_art -multimodalart/diffusion -Gradio-Blocks/uniformer_video_demo -Gradio-Blocks/Gradio_YOLOv5_Det -hysts/mmdetection -keras-io/neural-style-transfer -bigscience-data/corpus-map -simonduerr/metal3d -doevent/background-remover -jw2yang/unicl-img-recog-demo -meeww/Minecraft_Skin_Generator -chrisjay/mnist-adversarial -robinhad/ukrainian-ai -keras-io/Object-Detection-Using-RetinaNet -flava/flava-multimodal-zero-shot -ALM/CALM -sasha/BiasDetection -joaogante/tf_xla_generate_benchmarks -TabPFN/TabPFNPrediction -pcuenq/latent-diffusion-seed -pritish/Image-Captioning -hank1996/yolopv2 -saadkiet/AI_Blog_generation_Powered_by_GPT_NEO_1.3B -mkutarna/audiobook_gen -ysharma/ernie_vilg_english -ugaray96/neural-search -mareloraby/topic2poem -gradio/image_segmentation -schibsted/Facial_Recognition_with_Sentiment_Detector -CjangCjengh/Sanskrit-TTS -mdnestor/media-downloader -Samhita/geolocator -johnslegers/stable-diffusion-1-5 -nateraw/music-visualizer -ysharma/lets_make_meme -osanseviero/esmfold -livebook-dev/livebook -riccardogiorato/playground_diffusion -alankabisov/youtube-video-summary -kevinszeto/stable-diffusion-animation -tracinginsights/F1-analysis -alibaba-pai/pai-diffusion-artist-xlarge-zh -0x90e/ESRGAN-MANGA -gblinc111/Intelligent-Photo-Blur-Using-Dichotomous-Image-Segmentation -achterbrain/Intel-Generative-Image-Dashboard -Xhaheen/Lexica_prompt_search -osanseviero/streamlit_1.15 -Yusin/Speech-ChatGPT-Speech -gojiteji/NAGISystem -ItsJayQz/GTA5_Artwork_Diffusion -rodolfoocampo/InfiniteStories -lvwerra/hf-review -Mahiruoshi/Lovelive-Nijigasaku-Chat-iSTFT-GPT3 -nightfury/img2audio_video_prompt_tags -multimodalart/finetuned-text-to-music -awacke1/Webcam-Object-Recognition-Yolo-n-Coco -hra/chatgpt-stock-news-snapshots -juliensimon/table_questions -kazuk/youtube-whisper-09 -tornadoslims/instruct-pix2pix -decodemai/chatgpt_prompts -bigcode/santa-explains-code -kadirnar/AnimeSR -society-ethics/Average_diffusion_faces -leave7/kazunaAI2.0 -gaspar-avit/Movie_Poster_Generator -CobaltZvc/Docs_Buddy -HuggingFaceH4/chatty-lms-old -nikitalokhmachev-ai/line-art-colorization -demo-crafters/leaderboard -Manjushri/Instruct-Pix-2-Pix -davila7/youtubegpt -Manjushri/OJ-V4-CPU -RamAnanth1/Video2Video-models -lukestanley/streaming_chat_with_gpt-3.5-turbo_using_langchain_sorta -AIML-TUDA/does-clip-know-my-face -jackculpan/chatwebpage.com -luongphamit/DreamShaper-webui -ZeroTech/ChatGPT -orpatashnik/local-prompt-mixing -zomehwh/sovits-goldship -llamaindex/llama_index_term_definition_demo -huggingfacejs/streaming-text-generation -WorldlineChanger/sayashi-vits-uma-genshin-honkai -rockeycoss/Prompt-Segment-Anything-Demo -sasha/find-my-pedro -sklearn-docs/MLP-Regularization -Kevin676/Raven-with-Voice-Cloning-2.0 -character-aware-diffusion/charred -TencentARC/VLog -kevinwang676/Bark-New-Version -huggingface-tools/text-to-image -ulasdilek/gpt_claude_dialogue -kevinwang676/rvc-models-new -kevinwang676/web-singer-2 -SeViLA/SeViLA -aaronb/DragGAN -giswqs/solara-geospatial -stanfordnlp/Backpack-Demo -yuhangzang/ContextDet-Demo -FrankZxShen/so-vits-svc-models-pcr -Aki004/herta-so-vits -ygtxr1997/ReliableSwap_Demo -blanchon/qrcode-diffusion -silk-road/ChatHaruhi -PSLD/PSLD -xnetba/text2image -smangrul/peft-codegen25 -EmilyBrat/ATF -kevinwang676/SadTalker -hsdcs/bingchat -foduucom/table-extraction-yolov8 -Yntec/DreamAnything -kevinwang676/VoiceChanger -foduucom/CandleStickScan-Stock-trading-yolov8 -Logspace/LangflowView -pankajmathur/psmathur-orca_mini_v3_7b -ntt123/Vietnam-male-voice-TTS -multimodalart/civitai-to-hf -gorilla-llm/gorilla-demo -jbilcke-hf/observer -optimum/optimum-benchmark-ui -eaglelandsonce/simplevectorization -artificialguybr/instagraph-gradio -SmileyTatsu/Smile -Wauplin/gradio-user-history -limcheekin/Mistral-7B-OpenOrca-GGUF -etri-vilab/Ko-LLaVA -pseudolab/Balanced-News-Reading -lavita/medical-question-answering-datasets -radames/Candle-BLIP-Image-Captioning -ylacombe/accessible-mistral -yuntian-deng/ChatGPT4Turbo -Sangmin/OpenAI_TTS -solara-dev/wanderlust -freddyaboulton/gradio_pdf -latent-consistency/lcm-lora-for-sdxl -pseudolab/SonGPT -Giuliano/Conversational-Datasets -JLD/clip-image-search -jiangjiechen/loren-fact-checking -NimaBoscarino/aot-gan-inpainting -abhilash1910/CartoonGAN -abidlabs/vision-transformer -akhaliq/VideoGPT -akhaliq/deepface -ck46/qg-qa -coolspaces/windows3.1 -edugp/perplexity-lenses -filio/animate -jerryyan21/wav2lip_demo_test -kaushalya/medclip-roco -mrm8488/summarizer_mlsum -nateraw/dino-clips -osanseviero/Apocalyptify_webcam -radames/NYTimes-homepage-rearranged -ucinlp/autoprompt -umichVision/virtex-redcaps -xvjiarui/GroupViT -osanseviero/6DRepNet -EdBianchi/JustMovie -Sultannn/YOLOX-Demo -poccio/ExtEnD -navervision/KELIP -hackathon-pln-es/DemoAcosoTwitter -nazneen/interactive-model-cards -awacke1/MusicMaker -hysts/StyleGAN2 -templates/flask -hysts/CelebAMask-HQ-Face-Parsing -huggan/crypto-gan -huggan/night2day -hysts/mediapipe-face-mesh -brentspell/hifi-gan-bwe -multimodalart/styleganxlclip -pplonski/interactive-presentation -emilylearning/causing_gender_pronouns -pie/Joint-NER-and-Relation-Extraction -dbuscombe/SatelliteSuperResolution -mfrashad/CharacterGAN -Gradio-Blocks/magnificento -evaluate-metric/cer -evaluate-metric/chrf -Gradio-Blocks/uniformer_image_detection -codeparrot/incoder-subspace -Gradio-Blocks/Hip_Hop_gRadio -kleinay/qasem-demo -Gradio-Blocks/gen-code-comparer -CVPR/Leaderboard -Spjkjlkkklj/dalle -CVPR/GroupViT -GooglyBlox/DalleFork -CVPR/SPOTER_Sign_Language_Recognition -sasha/WinoBiasCheck -unity/ML-Agents-Walker -keras-io/denoising-diffusion-implicit-models -NAACL2022/papers -mrm8488/bloom-spanish-prompts -codeparrot/code-explainer -fusing/celeba-diffusion -cffl/Exploring_Intelligent_Writing_Assistance -Zengyf-CVer/FaceRecognition -nev/CoNR -RoCobo/WiggleGAN -hasibzunair/fifa-tryon-demo -abdulmeLINK/programmer-bloom -ccolas/TastyPiano -gradio/timeseries-forecasting-with-prophet -Accel/media-converter -lambdalabs/LambdaSuperRes -pythiccoder/FastCoref -wenet/wespeaker_demo -HaloMaster/chinesesummary -FelixLuoX/codeformer -doevent/colorizator -vict0rsch/climateGAN -jinhybr/OCR-LayoutLM-v3-Document-Parser -NCSOFT/harim_plus -akhaliq/hassanblend1.4 -yo2266911/DeepDanbooru_string -bofenghuang/whisper-demo-french -SpacesExamples/fastapi_dummy -wavymulder/Analog-Diffusion -JavaFXpert/NimGPT-3.5 -wdcqc/wfd -Sakukaze/VITS-Umamusume-voice-synthesizer -akhooli/poetry2023 -syedusama5556/Real-ESRGAN-Demo -Miuzarte/SUI-svc-3.0 -dromerosm/gpt-info-extraction -TheWolf/Image-Upscaling-Playground -radames/whisper.cpp-wasm -kazuk/youtube-whisper-01 -decodemai/devils_advocate -bigbio/dataset-explore -thoucentric/Big-Five-Personality-Traits-Detection -Denliner/wd-v1-4-tags -BilalSardar/Lyrics-Text_to_music -Wauplin/pynecone-on-spaces-template -juliensimon/bridgetower-video-search -nikitalokhmachev-ai/interior-semantic-segmentation -SerdarHelli/Pix2Pix3D -pedrogengo/pixel_art -lhoestq/datasets-explorer -WitchHuntTV/WinnieThePoohSVC_sovits4 -asescodes/midjourney-prompt-generator-using-chatgpt -fffiloni/controlnet-animation-doodle -yuan2023/stable-diffusion-webui-controlnet-docker -JacobLinCool/tiktoken-calculator -Wauplin/bloomz.cpp-converter -keras-dreambooth/dreambooth-diffusion-akita-dog -nlphuji/whoops-explorer-full -SamiKoen/ChatGPT444 -lxe/lora-cerebras-gpt2.7b-alpaca-shortprompt -chatarena/chatarena-demo -hackathon-somos-nlp-2023/GIPBERT -chomakov/GPT-4_PDF_summary -ochyai/alo -qingxu98/academic-chatgpt-beta -dylanebert/UnityDemo -ORI-Muchim/BlueArchiveTTS -gradio/chatbot_streaming -Layer6/TR0N -deepghs/anime_object_detection -Phips/upscale_demo -zeno-ml/chatbot-report -Make-A-Protagonist/Make-A-Protagonist-inference -EduardoPacheco/DINOv2-Features-Visualization -NMEX/rvc-hoyo-game -Salavat/Interslavic-Translator-NLLB200 -IDEA-CCNL/Ziya-v1 -awacke1/ChatGPT-Streamlit-2 -dpc/mmstts -c-s-ale/ArxivChainLitDemo -imseldrith/Imagine -leonelhs/faceshine -safetensors/convert_large -thesven/image-to-story -fun-research/FC-CLIP -NeonLion92/nsfw-c0ffees-erotic-story-generator2 -wildoctopus/cloth-segmentation -jbilcke-hf/VideoChain-API -CoreyMorris/MMLU-by-task-Leaderboard -FFusion/FFusionXL-SDXL-DEMO -0xSynapse/PixelFusion -Hazem/Image_Face_Upscale_Restoration-GFPGAN -diffle/sd-xl -Shizune/neko-proxy -HuggingFaceM4/OBELICS-Interactive-Map -Gen-Sim/Gen-Sim -kneelesh48/Tesseract-OCR -ntt123/Vietnam-female-voice-TTS -trl-lib/trl-text-environment -qingxu98/grobid -InstaDeepAI/nucleotide_transformer_benchmark -techasad/midjourney-lite -Illia56/book-mind-ai -mingyuan/ReMoDiffuse -zenafey/prodia-studio -naver-ai/DenseDiffusion -Latryna/roop -eaglelandsonce/loglinecreator -mrm8488/xtts-spanish -radames/Candle-BERT-Semantic-Similarity-Wasm -kirp/tinyllama-chat -XzJosh/LAPLACE-Bert-VITS2 -derek-thomas/arabic-RAG -MultiTransformer/autogen-tutorials -Wataru/Miipher -XzJosh/otto-Bert-VITS2 -hysts/mistral-7b -XzJosh/Eileen-Bert-VITS2 -ilumine-AI/AI-3D-Explorable-Video -library-samples/zephyr-7b -enzostvs/stable-diffusion-tpu -pseudolab/KorLearnGame -limcheekin/zephyr-7B-beta-GGUF -limcheekin/openchat_3.5-GGUF -TeamTonic/MultiMed -KoboldAI/Koboldcpp-Tiefighter -pseudolab/interviewer_chat -fiz2/cloudy -BigSalmon/Paraphrase -DrishtiSharma/Text-to-Image-search-using-CLIP -Emanuel/twitter-emotions-demo -GEM/DatasetCardForm -Harveenchadha/hindi-speech-recognition-vakyansh-wav2vec2 -Hellisotherpeople/Interpretable_Text_Classification_And_Clustering -Huertas97/Inpaint_Me -MTTR/MTTR-Referring-Video-Object-Segmentation -Norod78/Dragness -Rules99/YouRadiologist -Wootang01/next_sentence -abidlabs/Echocardiogram-Segmentation -abidlabs/chatbot-minimal -akhaliq/SOAT -akhaliq/SpecVQGAN_Neural_Audio_Codec -akhaliq/TokenCut -akhaliq/animeganv2-onnx -anuragshas/restore-punctuation-demo -cahya/persona-chatbot -chinhon/fake_tweet_detector -chinhon/headline_writer -docs-demos/gpt2 -durgaamma2005/fire_detector -ehcalabres/EMOVoice -ethzanalytics/gpt2-xl-conversational -hgrif/rhyme-with-ai -hysts/bizarre-pose-estimator-tagger -jsylee/adverse-drug-reactions-ner -keras-io/super-resolution -moflo/nftGAN -nateraw/detr-object-detection -osanseviero/Neural_Image_Colorizer -pritamdeka/health-article-keyphrase-generator -simayhosmeyve/Image_Enhancement -team-language-detector/LanguageDetector -valhalla/XGLM-zero-shot-COPA -vishnun/CRAFT-OCR -ysharma/TranslateQuotesInImageForwards -zihaoz96/shark-classifier -dariush-bahrami/color_transfer -vobecant/DaS -hysts/StyleSwin -katanaml/LayoutLMv2-CORD -52Hz/SRMNet_thesis -ANDRYHA/FakeNewsClassifier -johnowhitaker/waterface -osanseviero/llama-leaderboard -ybelkada/interfacegan_pp -SIGGRAPH2022/Self-Distilled-StyleGAN -hysts/insightface-SCRFD -hysts/mediapipe-face-detection -cakiki/tensorflow-coder -edaiofficial/mmtafrica -AlekseyKorshuk/accompaniment-generator -evaluate-metric/sacrebleu -evaluate-metric/bleurt -evaluate-metric/squad -versae/gradio-blocks-rest-api -valurank/keyword-extraction-demo -Gradio-Blocks/Anime-BigGAN -codeparrot/codeparrot-subspace -Gradio-Blocks/stylish_ape -Himanshi/Face-Cartoonify-for-Video-Call-Privacy -bigscience/data_host_provider_agreement -Gradio-Blocks/Alexa-NLU-Clone -jho/MonocularDepth -awacke1/SimPhysics -aaronespasa/deepfake-detection -jeremyrmanning/multitext-to-video -misterbrainley/generate_dnd_images -CVPR/VizWiz-CLIP-VQA -cye/dalle-mini -temandata/ecommurz-talent-search-engine -CVPR/Object-Detection-With-DETR-and-YOLOS -hugginglearners/Paddy-Doctor -unity/ML-Agents-Worm -julien-c/push-model-from-web -keras-io/dual-encoder-image-search -hugginglearners/Multi-Object-Classification -hugginglearners/image-style-transfer -EuroPython2022/pulsar-clip -awsaf49/gcvit-tf -Kameswara/TextToVideo -NSC9/Artificial_Calculus_Teacher -ali-ghamdan/colorizer -sidharthism/fashion-eye-try-on-demo -osanseviero/VNext -ryanj/clothing_recommender -innat/Google-MediaPipe -dhansmair/flamingo-tiny-cap -Curranj/FlowerDiffusion -emilylearning/llm_uncertainty -wenet/wenet_demo -mareloraby/meter2poem-1 -taskswithcode/semantic_similarity -gradio/webcam -oconnoob/audio-intelligence-dashboard -open-source-metrics/repository-statistics -BatuhanYilmaz/Youtube-Transcriber -nightfury/Image-Colorization -emilyalsentzer/SHEPHERD -mjdolan/Holiday-StyleGAN-NADA -pierreguillou/question-answering-portuguese-with-BetterTransformer -Tahsin-Mayeesha/Bangla-Question-Generation -abhijitguha/chatbot_gpt3 -AI-DHD/Youtube-Whisperer -Matthijs/image2reverb -biodatlab/whisper-thai-demo -bayartsogt/whisper-demo-mongolian -Jumon/whisper-zero-shot-audio-classification -patrickvonplaten/convert -camenduru-com/webui-api -fffiloni/audio-to-spectrogram -mohitmayank/sentenceviz -aimstack/aim -whisper-event/winners -whisper-event/leaderboard -wavymulder/portraitplus -spiritupbro/text-to-3D -joeddav/zero-shot-demo -ThomasSimonini/ML-Agents-SnowballTarget -EDGAhab/VITS-Aatrox-AI -hjs8/CogVideo -Wryley1234/textual-inversion-training -deepghs/auto_image_censor -radames/instruct-pix2pix -myscale/Protein-Structure-Modeling -theintuitiveye/HARDblend -mano96/content_rewrite -sohojoe/soho-clip-embeddings-explorer -sayakpaul/evaluate-sd-schedulers -WiNE-iNEFF/HF_Simple_Prompt_Generator -johnnygreco/the-gpt-who-lived -asim266/image-background-remover -Mileena/PIFu-Clothed-Human-Digitization -user238921933/stable-diffusion-webui -taesiri/ChatGPT-ImageCaptioner -lint/anime_controlnet -Vastness0813/decapoda-research-llama-65b-hf -hwberry2/WhisperDemo -CactiStaccingCrane/OpenAssistant-oasst-sft-1-pythia-12b -salahIguiliz/ControlLogoNet -radames/gradio-request-get-client-ip -baixing/hackathon_test -Xhaheen/chatgpt_meme_world_ -Sortoite/PDFChatGpt -gradio/monochrome -ljsabc/Fujisaki -abidlabs/cinemascope -ja-818/speech_and_text_emotion_recognition -abidlabs/twitter-scorer -zomehwh/sovits-rudolf -adhisetiawan/anime-voice-generator -dawood/Kanye-AI -tomaarsen/span-marker-bert-base-fewnerd-fine-super -AutoBG/Auto-BoardGame -kazuk/youtube-whisper-12 -shivi/dolly-v2-demo -hahahafofo/prompt_generator -ArchitSharma/Digital-Photo-Color-Restoration -fffiloni/audioldm-text-to-audio-generation-copy -fffiloni/CoCa-clone -sklearn-docs/Gradient_Boosting_regression -zdxiaoda/sovits-4.0-V1-anime-character-model -PhilPome/seo-analysis-tool -firzaelbuho/rvc-models -hanzportgas/rvc-models -hahahafofo/ChatGLM-Chinese-Summary -leemeng/stablelm-jp-alpha -diffusers/controlnet-canny-tool -Oddity/ehartford-WizardLM-13B-Uncensored -colonelwatch/abstracts-index -segestic/HuggingChat -allinaigc/GPTAdvanceTemp0801 -vivlavida/generative-disco -sdart/SD_txt2img -AutoLLM/AutoAgents -AutoLLM/ArxivDigest -noamrot/FuseCap-image-captioning -mindtube/Diffusion50XX -rustformers/mpt-7b-instruct -failfast/2D-GameCreator -phoenix-1708/stable-diffusion-webui-cpu -HUBioDataLab/DrugGEN -attention-refocusing/Attention-refocusing -Aabbhishekk/MistralQnA -dekk-i386/pdflangchain -Royir/SynGen -huggingchat/chat-ui-template -HawkEye098432/Vocals_seperator -AI-Hobbyist/Hoyo-RVC -Dagfinn1962/stablediffusion-models -Manjushri/MusicGen -Raaniel/Audiomaister -Pontonkid/Real-Time-Multilingual-sentiment-analysis -keithhon/tortoise-tts-webui -jbilcke-hf/media-server -maknee/minigpt4.cpp -hf4all/web-ui -Vageesh1/Voice_Cloner -renumics/stable-diffusion-select-best-images -talhaty/Faceswapper -thecentuaro/oai-proxy-geoblock-zov-edition -Artples/llama-2-7b-chat -abhishek/sketch-to-image -jeonchangbin49/De-limiter -bilgeyucel/captionate -akdeniz27/LLaMa-2-70b-chat-hf-with-EasyLLM -MrKetchupp/nerijs-pixel-art-xl -allknowingroger/Image-Models-Test59 -Justin-Choo/Multi_diffuser-quick-diffusion-CN-ZH -linhdo/document-layout-analysis -smangrul/PEFT-Docs-QA-Chatbot -qoobeeshy/yolo-document-layout-analysis -Prof-Reza/Audiocraft_Music-Audio_Generation -sweepai/chunker -Justin-Choo/Waifu-Diffusion_WEB_UI -seanpedrickcase/Light-PDF-Web-QA-Chatbot -mlpc-lab/BLIVA -Yntec/ToyWorldXL -simonw/datasette-thebloke -4com/SD-XL-CPU -okeanos/uptimefactoryai -sdadas/pirb -catgirlss/kittens -hysts/BLIP-Diffusion -merve/Grounding_DINO_demo -librarian-bots/new-datasets-in-machine-learning -allknowingroger/Image-Models-Test193 -openaccess-ai-collective/jackalope-7b -IlyaGusev/saiga_mistral_7b_gguf -TheKitten/Fast-Images-Creature -mila-quebec/SAI -library-samples/InstructBLIP -SkalskiP/MetaCLIP -jbochi/madlad400-3b-mt -OpenDILabCommunity/LLMRiddlesChatGPTCN -choimirai/whisper-large-v3 -ADRXtractor/ADR_Xtractor -TheBritishLibrary/British-Library-books-genre-classifier-v2 -CALM/Dashboard -Ebost/animeganv2-self -Harveenchadha/Hindi_TTS -Hellisotherpeople/HF-SHAP -HugsVision/Skin-Cancer -Jacobo/syntax -Newtral/toxic-tweets-in-spanish-politics -akhaliq/Kapao -akhaliq/Keypoint_Communities -akhaliq/U-2-Net -akhaliq/poolformer -anton-l/youtube-subs-wav2vec -aseifert/writing-assistant -basakbuluz/turkish-question-answering -chuanenlin/foodnet -edemgold/IFA-summarizer -edugp/embedding-lenses -eugenesiow/mandarin-tts -frgfm/torch-cam -gorkemgoknar/moviechatbot -hysts/anime_face_landmark_detection -hysts/danbooru-pretrained -Gradio-Blocks/multilingual-asr -isabel/mental-health-project -jone/GFPGAN -keras-io/involution -keras-io/patch-conv-net -mbahrami/Auto-Complete_Semantic -ml6team/byt5_ocr_corrector -nielsr/perceiver-image-classification -osanseviero/HUBERT -pierreguillou/ner-bert-pt-lenerbr -qanastek/Etiqueteur-Morphosyntaxique-Etendu -rexoscare/Speech_to_Text_Hindi -rileho3909/Real-Time-Voice-Cloning -sbhatti2009/stock-analysis -severo/voronoi-cloth -smangrul/Text-To-Image -sunwaee/Face-Mask-Detection -wilmerags/tweet-snest -xiongjie/realtime-SRGAN-for-anime-example -hongaik/service_text_classification -atsantiago/Monocular_Depth_Filter -gryan-galario/manga-ocr-demo -iSky/Speech-audio-to-text-with-grammar-correction -abidlabs/streaming-asr -abidlabs/streaming-asr-paused -cakiki/doom -hackathon-pln-es/clasificador-de-tesis -awacke1/Streamlit-ASR-Video -julien-c/cube -awacke1/AI-Quantum -anegi/Comparing-dialogue-summarization-models -probing-vits/attention-rollout -huggan/sefa -ecarbo/deoldify-demo -huggan/sim2real -sunshineatnoon/TextureScraping -HighCWu/colorful-ascii-art -bigscience/SourcingCatalog -evaluate-metric/matthews_correlation -Gradio-Blocks/uniformer_image_demo -nagolinc/npcGenerator -nagolinc/styleGanHuman_and_PIFu -Gradio-Blocks/SlowMo_n_Timelapse_Your_Video -CVPR/BigDL-Nano_inference -valurank/Article_Summarizer_12_6_testing -awacke1/ASRGenerateStoryandVideo -Theivaprakasham/wildreceipt -yhavinga/pre-training-dutch-t5-models -hlydecker/MegaDetector_v5 -ThomasSimonini/Compare-Reinforcement-Learning-Agents -duchaba/skin_cancer_diagnose -taka-yamakoshi/tokenizer-demo -hugginglearners/rice-image-classification -big-kek/NeuroKorzh -awacke1/SentenceToGeneratedVideo -hugginglearners/brain-tumor-detection-mri -EuroPython2022/clickbaitonator -VietAI/En2Vi-Translation -keras-io/PointNet-Classification -pinecone/semantic-query-trainer -Qilex/EnglishToMiddleEnglish -nazneen/seal -Blaise-g/summarize-biomedical-papers-long-summary-or-tldr -josuelmet/Metal_Music_Interpolator -nickmuchi/Netflix-Semantic-Search-Whisperer -Vertaix/vendiscore -ECCV2022/ECCV2022_papers -PaddlePaddle/ERNIE-Zeus -autonomous019/image_story_generator -gradio/text_generation -ThomasSimonini/atari_agents -kornia/kornia-image-filtering -kornia/kornia-resize-antialias -breezedeus/pix2text -Shamima/extract-color-from-image -Msp/Document_Parser -juancopi81/mutopia-guitar-composer -nazneen/model-usage -AI-Zero-to-Hero/06-SL-AI-Image-Music-Video-UI-UX-URL -YaYaB/text-to-onepiece -imseldrith/Article-Rewriter -MarketINK/MarketINK -adirik/kakao-brain-vit -GIZ/embedding_visualisation -Chenyuwen/playground2 -sparanoid/demucs-gpu -tomaseo2022/imagen-a-pixel-art -cc1234/stashface -Adapting/TrendFlow -celebrate-ai/face-detection-cnn -Podtekatel/Arcane_Style_Transfer -nakas/Time-Domain-Audio-Style-Transfer -robinhad/qirimtatar-tts -dpe1/beat_manipulator -BoomerangGirl/MagicPrompt-Stable-Diffusion -BilalSardar/Object-Color-Detection-in-Video -binery/Donut_Receipt_v2 -akhaliq/wavyfusion -johnowhitaker/color-guided-wikiart-diffusion -Datasculptor/ImageGPT -TacosHero/flax-midjourney-v4-diffusion-2 -Tuana/find-the-animal -MirageML/depth2img -zwv9/webui-cpu -pragnakalp/Audio_Emotion_Recognition -alvanlii/whisper-small-cantonese -sayakpaul/demo-docker-gradio -Yasu55/stable-diffusion-webui -dreambooth-hackathon/dreambooth-hackathon-evaluator -Intel/qa_sparse_bert -Jojelf/dreamlike-photoreal-2.0 -unstructuredio/receipt-parser -awacke1/Biomed-NLP-AI-Clinical-Terminology -leuschnm/CrowdCounting-with-Scale-Adaptive-Selection-SASNet -ivelin/ui-refexp -Gxia/Lama-Cleaner-lama -sovitrath/pothole_yolov8_nano -Qosmo/video2music-demo -jamesliu1217/midjourney-v5 -h2oai/ner_annotation -thiagohersan/maskformer-satellite-trees-gradio -fcakyon/yolov8-segmentation -ai-moroz/webui-cpu -huggingface-projects/auto-retrain -wanglishan/pic-repaire2 -nickmuchi/fintweet-GPT-Search -juancopi81/whisper-youtube-2-hf_dataset -sayakpaul/convert-kerascv-sd-diffusers -sophiamyang/Panel_InstructPix2Pix -decodemai/Stable-Diffusion-Ads -0xhimzel/Detect-AI-Plagiarism -Everymans-ai/GPT-knowledge-management -ofikodar/chatgpt-resume-builder -neel692/NSFW-VS-SFW-Image-Classification -reach-vb/speech-t5-this-speaker-does-not-exist -Eriberto/chatGPT -HuggingFaceH4/instruction-model-outputs-filtered -gradio-tests/Image_Upscaling_Restoration_Colorization -kufei/nllb-translation-demo-1.3b-distilled -mdj1412/stock_news_summaries_AI -JYskyp/wildcards -kobkrit/openthaigpt -keremberke/awesome-yolov8-models -yujieq/RxnScribe -AIML-TUDA/FairDiffusionExplorer -podsni/Coverter-PDF-to-TXT -calmgoose/Talk2Book -keras-dreambooth/pink-floyd-division-bell -szk1ck/image-matting -buildingai/youtube-video-transcription-with-whisper -king007/GPT-Prompt-Generate-2 -hu-po/speech2speech -rakibulbd030/GFPGAN -gradio/soft -itacaiunas/remove-photo-object -DKDohare/Chat-GPT4-MAX -CGMatter/modelscope-text-to-video-synthesis -JenkinsGage/WritingHelper -p4vv37/CodeBERT_CodeReviewer -rakibulbd030/old_photo_restoration -ajndkr/boilerplate-x -kastan/ai-teaching-assistant -osanseviero/osanseviero-llama-alpaca-guanaco-vicuna -sudeepshouche/minimalist -keras-dreambooth/dreambooth-bored-ape -Mrchuw/text-to-image_6_by_6 -aiditi/nvidia_denoiser -sakasegawa/whisper-gijiroku-summary -lemonshochu/JPEG_Artifacts_Removal -hackathon-somos-nlp-2023/demo_DiagTrast -sklearn-docs/MNIST_classification_using_multinomial_logistic_L1 -kira4424/VITS-fast-fine-tuning -kxqt/Expedit-SAM -dromerosm/autogpt-agents -SJTU-CL/argugpt-detector -Dao3/image-to-video -posit/shiny-for-r-template -camel-ai/camel-data-explorer -innev/whisper-Base -posit/shiny-for-python-template -hsm-kd-master/photorealistic-images -Gladiator/gradient_dissent_bot -dorkai/singpt-2.0 -Celestinian/Topic-Detection -taesiri/HuggingGPT-Lite -sklearn-docs/Ordinary_Least_Squares_and_Ridge_Regression_Variance -AlekseyKorshuk/model-evaluation -MarcusSu1216/XingTong -Ash123/stable-diffusion-nano -philmui/globe -Zenne/chatbot_for_files_langchain -OpenDILabCommunity/DI-sheep -failfast/nextjs-hf-spaces -RoundtTble/dinov2-pca -luohy/SAIL-7B -internetsignal/Bark-w-voice-clone -BartPoint/VoiceChange -Annotation-AI/fast-segment-everything-with-image-prompt -szukevin/VISOR-GPT -new4u/whisper_large_v2_Audio_YT_to_text -OFA-Sys/expertllama -matthoffner/web-llm-embed -zhuolisam/resume-ranker -rakhlin/Coqui.ai -k1ngtai/MMS -meraih/English-Japanese-Anime-TTS -vinid/fashion-clip-app -mpatel57/WOUAF-Text-to-Image -michaelthwan/digest-everything-gpt -kevinwang676/M4Singer -teelinsan/aclpubcheck -HappyElephant/TextToSpeech -Walterchamy/Virtual_Assistant_v1 -visheratin/laion-nllb -DravensCursed/OPENAI-REVERSE-PROXY -Dreamsome/HuggingFace-Datasets-Text-Quality-Analysis -fartsmellalmao/combined-GI-RVC-models -mithril-security/poisongpt -TFanon/TFanon -kevinwang676/FreeVC -shuhulhandoo/face-swap -Dormin22/Proxy -Gananom/claudeisms -EnigmaOfTheWorld/Power_AI_Point -jbilcke-hf/LifeSim -Zaxxced/rvc-random-v2 -bhaskartripathi/pdfGPT_Turbo -Branon/oai-proxy -konverner/deep-voice-cloning -dongsiqie/sydney -yangfeixue/newbing -KevinQHLin/UniVTG -HuggingFaceM4/IDEFICS-bias-eval -Junity/Genshin-World-Model -Open-Orca/LlongOrca-7B-16k -yuangongfdu/ltu-2 -Grasswort/BingAI -Brasd99/TTS-Voice-Cloner -pomudachi/spoiled-brrats -NoCrypt/miku -jaumaras/Text-2-Speech -allknowingroger/Image-Models-Test92 -Sapphire-356/Video2MC -giskardai/giskard -tiiuae/falcon-180b-license -0xqtpie/doodle2vid -universeTBD/astrollama -fffiloni/sd-xl-custom-model -Olivier-Truong/XTTS_V1_CPU_working -hysts/ViTMatte -mrm8488/idefics-9b-ft-describe-diffusion-mj -Coweed/GoodTrip -xuyingliKepler/AI_News_Podcast -ysharma/LLaVA_v1 -AkitoP/umamusume_bert_vits2 -deniandriancode/zephyr-7b-alpha-chatbot -pseudolab/2023-Hackathon-Certification -cis-lmu/glotlid-space -guardiancc/video-face-swap -pseudolab/huggingface-korea-theme -pxiaoer/papers -FinGPT/FinGPT-Forecaster -novita-ai/Face-Stylization-Playground -Illia56/fastest-whisper-v3-large -52Hz/HWMNet_lowlight_enhancement -Amrrs/pdf-table-extractor -Amrrs/textsummarizer -CVPR/GFPGAN-example -DeepDrivePL/PaddleSeg-Matting -Flux9665/PoeticTTS -UNIST-Eunchan/Summarizing-app -kili-technology/plastic_in_river -Prathap/summarization -RobotJelly/Text_Or_Image-To-Image_Search -Shankhdhar/Rap-Lyric-generator -Wootang01/text_summarizer -abidlabs/Gradio-MNIST-Realtime -abidlabs/flagging -ajitrajasekharan/Bio-medical-NER-Model-Gradio-Demo -ajitrajasekharan/Image-Text-Detection -akdeniz27/contract-understanding-atticus-dataset-demo -akhaliq/Image_Search -akhaliq/MobileStyleGAN -akhaliq/mae -benthecoder/news-summarizer -bentrevett/emotion-prediction -bharat-raghunathan/song-lyrics-classifier -cdleong/langcode-search -Surfrider/surfnet -chrisjay/masakhane-benchmarks -crylake/img2poem -DebateLabKIT/deepa2-demo -deep-learning-analytics/GrammarCorrector -dnth/webdemo-microalgae-counting -docs-demos/openai-gpt -elozano/news-analyzer -flax-community/Multilingual-VQA -flax-community/gpt2-indonesian -flax-community/multilingual-image-captioning -gagan3012/ViTGPT2 -hysts/age-estimation-APPA-REAL -johnpaulbin/top_0 -juliensimon/voice-queries -keras-io/AdaIN -keras-io/ner_with_transformers -lewtun/twitter-sentiments -liminghao1630/TrOCR-printed -merve/streamlit-dataset-demo -nateraw/cryptopunks-generator -nielsr/DINO -osanseviero/gpt2_for_music -paulbricman/cybersalience -prithivida/neuspell-demo -pszemraj/ballpark-trivia -rajesh1729/live-twitter-sentiment-analysis -raynardj/modern-chinese-to-ancient-translate-wenyanwen -sonoisa/irasuto_search -tyang/electra_wikipedia_qa -Sa-m/Neural-Style-Transfer-Image-Stylization -it5/it5-demo -templates/gradio_opencv -ml6team/toxic-comment-detection-dutch -hackathon-pln-es/Sexismdetection -course-demos/Sketch-Recognition -Harveenchadha/Vakyansh-Hindi-TTS -egmaminta/indoor-scene-recognition-to-speech -akhaliq/ArcaneGAN-blocks -reach-vb/text-iterater -hackathon-pln-es/Spanish-Medical-NER -abhibisht89/Med7 -Harveenchadha/Vakyansh-Odia-TTS -hackathon-pln-es/modelo-juridico-mexicano -hackathon-pln-es/AbstractGen_ES -ecarbo/paddleOCR-demo -tomofi/Hive-OCR -huggingface/metric-explorer -huggingface/speech-bench-metrics-editor -huggan/pix2pix-uavid -huggan/ArtGAN -awacke1/Memory-Shared -shi-labs/FcF-Inpainting -h4d35/CLiPcrop -huggan/NeonGAN_Demo -lysandre/github-release -strickvl/redaction-detector -wenpeng/Sod_Inpaint -fabiochiu/title-generation -awacke1/AI-BigGAN-Image-Gen -Casio991ms/MathBot -Gradio-Blocks/RickandMorty-BlockParty -evaluate-metric/exact_match -evaluate-metric/meteor -evaluate-metric/google_bleu -flava/semantic-image-text-search -keras-io/EDSR -emilylearning/spurious_correlation_evaluation -iakarshu/docformer_for_document_classification -aseifert/ExplaiNER -GIZ/sdg_classification -keras-io/TabTransformer_Classification -keras-io/GauGAN_Conditional_Image_Generation -kargaranamir/ColorHarmonization -webshop/amazon_shop -scikit-learn/sentiment-analysis -noelshin/selfmask -CVPR/time -innat/HybridModel-GradCAM -hugginglearners/malayalam-news-classify -hugginglearners/pokemon-card-checker -CVPR/winoground-explorer -ml6team/semantic-search-demo -amarkc/Youtube-Transcript-Summarizer -AnkitGaur2811/Image_Conversion_app_using_Opencv -huggingface-projects/easy-analysis -PaulHilders/CLIPGroundingExplainability -awacke1/VideoSwap -sofmi/semantic-segmentation-revamped -awacke1/ASRGenerateStory -udion/BayesCap -hugginglearners/grapevine-leaves-classification -hugginglearners/emotion_in_tweets -mbarnig/lb_de_fr_en_pt_COQUI_VITS_TTS -EuroPython2022/Fin-Eng-ASR-autosubtitles -EuroPython2022/automatic-speech-recognition-with-next-gen-kaldi -keras-io/deit -katielink/brain_tumor_segmentation -vibey/article-summariser-for-final-project -ali-ghamdan/realesrgan-models -ldkong/TranSVAE -geraltofrivia/deoldify_videos -ivan-savchuk/medical-search -sidharthism/fashion-eye -ali-ghamdan/gfp-Gans -therealcyberlord/abstract-art-generation -SIGGRAPH2022/Text2Human -mascIT/AgeGuesser -mrdbourke/foodvision_big -CK42/sentiment-model-comparison -hasibzunair/LaTeX-OCR-demo -lfolle/DeepNAPSI -evaluate-measurement/toxicity -EuroSciPy2022/arxiv-cards -FluxWaveCorp/Ghostwriter-Bloom -pinecone/abstractive-question-answering -ruslanmv/Youtube-Video-Translator -chuanenlin/which-frame -Armandoliv/document_parser -gradio/animeganv2 -gradio/clustering -Shredder/CONBERT-3 -gradio/automatic-speech-recognition -ECCV2022/storydalle -awacke1/3DModelEditorWithAIV1 -keithhon/Real-Time-Voice-Cloning -jphwang/colorful_vectors -samusander/Transcribe.AI -Rothfeld/kmeans-pixelartifier -tafxle/Bloom_chat -rdp-studio/waifu-generator -kivantium/danbooru-pose-search -johnslegers/stable-diffusion-gui-test -crytion/DeepNude -imseldrith/Article-Generator -Eemansleepdeprived/Study_For_Me_AI -jiedong-yang/Speech-Summarization-with-Whisper -jamescalam/dream-cacher -terrierteam/splade -breadlicker45/Text-to-music-longer -jinhybr/OCR-layoutLM-Demo -Podtekatel/JoJo_Style_Transfer -hamel/hfspace_demo -siddh4rth/audio_to_text -Longliveruby/Spotify-Recommendation-System -yizhangliu/ImgCleaner -Andy1621/uniformerv2_demo -akhaliq/EimisAnimeDiffusion_1.0v -alibaba-pai/pai-diffusion-artist-large-zh -SerdarHelli/diffusion-point-cloud -Aphrodite/stable-diffusion-2 -crumb/sd2-prompter-aesthetic -GT4SD/regression_transformer -akhaliq/test-chatgpt -clem/dreambooth-pareidolia -ConceptArtHouse/webui-gameasset -victor/prompthero-openjourney -Bingsu/color_textual_inversion -kboaten/MIDI-Audio-Extension -bardsai/whisper-demo-pl -bradarrML/stablediffusion-infinity -xiaoyinqu/dreambooth -NbAiLab/whisper-norwegian-small -akhaliq/riffusion-riffusion-model-v1 -Artgor/digit-draw-detect -kadirnar/bsrgan -abidlabs/whisper-large-v2 -nooji/ImpCatcher -Korakoe/convert-sd-ckpt-cpu -Joom/Front-end-code-generation-from-images -vs4vijay/stable-diffusion -wavymulder/timeless-diffusion -AnnasBlackHat/Image-Similarity -peterkros/videomatting -sohojoe/soho-clip -ChrisPreston/meaqua -group2test/Protogen_x3.4_Official_Release -kdrkdrkdr/YuukaTTS -antonbol/vocal_remover -AIML-TUDA/unsafe-vs-safe-stable-diffusion -Zengyf-CVer/Gradio-YOLOv8-Det -SweetLuna/Kenshi-WebUI -trysem/Colorizer_Models -abcde1234www/tts -harmdevries/bigcode_planning -awacke1/WikipediaUltimateAISearch -theintuitiveye/FantasyMix-v1 -trysem/nuclearfu -mamiksik/commit-message-generator -akhaliq/basil_mix -katanaml-org/sparrow-ui -RamAnanth1/co_chat_voice -Korakoe/OpenNiji -rsunner/GPT-Index_simple_upload -samthakur/stable-diffusion-2.1 -lint/sdpipe_webui -mrm8488/santacoder-bash-completion -AI-Dashboards/Graph.Visualization.Plotly.Sunbursts.Treemaps.WebGL -tumuyan/Alist1 -huggingface/rlhf-interface -PirateXX/ChatGPT-Content-Detector -pierreguillou/DocLayNet-image-viewer -abcde1234www/ChatGPT-prompt-generator -Gertie01/MusicLM -Daniton/MagicPrompt-Stable-Diffusion -maliozer/microsoft-biogpt -shogi880/ChatGPT-StableDiffusion-CharacterDesign -akhaliq/Counterfeit-V2.5 -camenduru-com/wav2lip -huggingface-projects/diffusers-gallery-bot -ysharma/Chat_With_Blip2 -AsakuraMizu/moe-tts -keras-dreambooth/leaderboard -Datasculptor/Image2LineDrawing -alvanlii/domain-expansion -radames/aesthetic-style-nsfw-classifier -ysharma/Blip_PlaygroundAI -FooYou/marvel -RealTimeLiveAIForHealth/WebcamObjectRecognition -portal/Control-Net-Video -apruvd/Realtime_Speech_to_Image_Generator -pyInter/Liyuu_sovits4 -rabiyulfahim/Prompt-Refinery-Text-to-Image-Generation -hyoo/imagine -awacke1/RLHF.Cognitive.Episodic.Semantic.Memory -bachpc/table-structure-recognition -aodianyun/stable-diffusion-webui -fffiloni/RAFT -azer123456789/nicky007-stable-diffusion-logo-fine-tuned -Sloth-Alchemist/SlothAi.xyz -adirik/ALIGN-zero-shot-image-classification -kmaurinjones/wordle_wizard -anhnv125/recipe_generation -danielcwq/chat-your-data-trial -wanglettes/zw_chatgpt_01 -Pranay009/FACE2COMIC -Allakhazam/anythingV4 -keras-dreambooth/dreambooth_teddy -AIGC-Audio/Make_An_Audio -nithinraok/titanet-speaker-verification -baixing/hackathon_chatbot_openai_api -yixin6178/ChatPaper -hamacojr/CAT-Seg -totalbogus/prompthero-openjourney-v4 -deepparag/DreamlikeArt-Diffusion-1.0 -xiaolv/new-bings -MichaelT8093/Mandarin-TTS -Shocky/Pink-Anime -mikebars/huggingface -text-generation-inference/oasst-sft-1-pythia-12b -hackathon-somos-nlp-2023/learning-assistance -gstaff/sketch -Gradio-Themes/neural-style-transfer-whiteboard-style -finlaymacklon/smooth_slate -RamAnanth1/videocrafter -mrtlive/segment-anything-model -ImagineAI-Real/ImagineAI-Image-Generator -hackathon-somos-nlp-2023/flan-T5unami-base-v1 -hackathon-somos-nlp-2023/vg055-demo_analisis_de_sentimientos_textos_turisticos_mx_polarity -fl399/matcha_chartqa -gradio-client-demos/stable-diffusion -kazuk/youtube-whisper-17 -kazuk/youtube-whisper-18 -meyabase/oshiwambo-speech-greetings -Monster/Alpaca-LoRa -sklearn-docs/A_demo_of_the_Spectral_Bi-Clustering_algorithm -scutcyr/BianQue -abhi1nandy2/AI_Music_Team -Altinas/vits-uma-genshin-honkais -maurypb/mean_psychiatrist -sushmanth/hand_written_to_text -Kaori1707/Image-enhancement -shvuuuu/twitter-sentiment-analysis -mmlab-ntu/Segment-Any-RGBD -kfahn/Animal_Pose_Control_Net -lamini/instruct-3b-playground -sklearn-docs/regularization-path-l1 -alamin655/g-TTS -Fisharp/starcoder-playground -AtlasUnified/DeforumPromptGenerator -dhof/shapetest -hamacojr/SAM-CAT-Seg -HuggingFaceM4/obelics_visualization -MeiJuice/CheckGPT -fakezeta/pdfchat -Francesco/FairytaleDJ -lint/streaming_chatbot -Palplatine/artefact_memes -xfys/yolov5_tracking -deepghs/nsfw_prediction -huybery/deep-thinking -OpenGVLab/DragGAN -theaster/RVC-New-Arknights -m-a-p/Music-Descriptor -Caoyunkang/Segment-Any-Anomaly -matthoffner/starchat-ggml -neloreis/TheBloke-Wizard-Vicuna-13B-Uncensored-HF -ChanceFocus/FLARE -olivierdehaene/chat-ui-example -42digital/DeepFashion_Classification -sasaki-saku/www_www -h2oai/wave-university -ysharma/function-to-JSON -jbilcke-hf/webapp-factory-any-model -glt3953/app-text_generation_chatglm2-6b -mlfoundations/VisIT-Bench-Leaderboard -Tuana/what-would-mother-say -allknowingroger/Image-Models-Test14 -KyanChen/RSPrompter -jbilcke-hf/video-upscaling-server-1 -sagarkarn/text2image -Xorbits/xinference -allknowingroger/Image-Models-Test32 -allknowingroger/Image-Models-Test33 -wouaf/WOUAF-Text-to-Image -backrock/meta-llama-Llama-2-70b-hf -AIxPha/QSign -merve/my-own-llama-v2 -jbilcke-hf/AnimateDiff -allknowingroger/Image-Models-Test44 -Xenova/next-server-example-app -Ababababababbababa/poetry -jbilcke-hf/image-server -AIZero2HeroBootcamp/StaticHTML5Playcanvas -AIZero2HeroBootcamp/3DHuman -wwydmanski/esmfold -amagastya/SPARK -Thafx/sdrv51 -SenY/Civitai -allknowingroger/Image-Models-Test62 -Xuan2060320350/BingAI -Sumsub/Sumsub-ffs-demo -brainblow/AudioCreator_Music-Audio_Generation -AnonAndDesu/Desu_Proxy -pknez/face-swap-docker -HopeMan/3301 -viait/multi-fusion-sd-dalle -Brasd99/TTS-Voice-Conversion -dolphinchat/dolphinchat-llm-gpt-ui -Open-Orca/LlongOrca-13B-16k -ucanbaklava/stablediffusionapi-disney-pixar-cartoon -alfredplpl/ChatZMD -navdeeps002/codellama-CodeLlama-34b-hf -MohamedRashad/Audio-Separator -HoangHa/llama2-code -chilleverydaychill/roop -CatNika/Asian_Proxy -toloka/open-llm-leaderboard -ecker/vall-e -taejunkim/all-in-one -HopeMan/Claude -ysharma/open-interpreter -MrYXJ/calculate-model-flops -ysharma/falcon-180b-demo -librarian-bots/metadata_request_service -llmonitor/benchmarks -ennet/ChatDev -MakiAi/Image2VideoProcessingPipelin -digitalxingtong/Xingtong-Longread-Bert-VITS2 -EasyEasy/EasyProxy -Goutam982/RVC_V2_voice_clone -banana-dev/demo-faceswap -smakamali/summarize_youtube -allknowingroger/Image-Models-Test180 -Amiminoru/whoreproxy -XzJosh/Azusa-Bert-VITS2 -TLME/Bert-VITS-Umamusume-Genshin-HonkaiSR -ZachNagengast/vid2grid -tonyassi/image-segmentation -SeaLLMs/SeaLLM-Chat-13b -k2-fsa/text-to-speech -umoubuton/atri-bert-vits2 -pseudolab/GaiaMiniMed -pseudolab/GaiaMiniMed_ChatWithFalcon -openskyml/zephyr-7b-chat -pseudolab/schoolrecord_gen -hayas/CALM2-7B-chat -limcheekin/deepseek-coder-6.7B-instruct-GGUF -teowu/Q-Instruct-on-mPLUG-Owl-2 -aifartist/sdzoom-Latent-Consistency-Model -latent-consistency/Real-Time-LCM-ControlNet-Lora-SD1.5 -antigonus/cosmos -7Vivek/Next-Word-Prediction-Streamlit -Amrrs/image-caption-with-vit-gpt2 -BigSalmon/FormalInformalConciseWordy -BigSalmon/InformalToFormal -TheBritishLibrary/British-Library-books-genre-classifier -Huertas97/LeetSpeak-NER -KJMAN678/text_generate -Kirili4ik/chat-with-Kirill -NbAiLab/maken-clip-image -PaddlePaddle/U-GAT-IT-selfie2anime -PaddlePaddle/photo2cartoon -khoj/NSE -hunkim/DialoGPT -Theivaprakasham/layoutlmv2_sroie -Vaibhavbrkn/Question-gen -Wootang01/question_answer -Wootang01/text_generator_two -Yah216/Arabic-Sentiment-Analyser -YuAnthony/Voice-Recognition -abby711/FaceRestoration -abidlabs/Draw -akhaliq/SWAG -akhaliq/VQGAN_CLIP -akhaliq/VQMIVC -akhaliq/ctrl-sum -akhaliq/genji-python-6b -akhaliq/omnivore -arijitdas123student/meeting-summarizer -cesar/demoIAZIKA -chinhon/Headlines_Generator -darkproger/propaganda -docs-demos/bart-large-mnli -docs-demos/pegasus_paraphrase -elonmuskceo/persistent-data -espejelomar/Identify-the-breed-of-your-pet -fabiod20/italian-legal-ner -flax-community/roberta-hindi -flax-community/spanish-gpt2 -frapochetti/fast-neural-style-transfer -gogamza/kobart-summarization -gulabpatel/GFP_GAN -gulabpatel/Real-ESRGAN -huspacy/demo -hysts/bizarre-pose-estimator-segmenter -azizalto/vanilla-ml-algorithms -ibaiGorordo/Lane-Shape-Prediction-with-Transformers -joaopdrm/Emotion_Analisys -keras-io/CycleGAN -keras-io/conv-lstm -keras-io/deep-dream -kingabzpro/Rick_and_Morty_Bot -luisoala/glide-test -m3hrdadfi/gpt2-persian-qa -manhkhanhUIT/BOPBTL -mizoru/Japanese_pitch -muhtasham/legalBERT -osanseviero/AnimeGANv2-webcam -pytorch/ResNet -rajesh1729/text-summarization-gradio -raphaelsty/games -rashmi/sartorius-cell-instance-segmentation -rexoscare/Styleformer_demo -shawon100/text-paraphrasing -shujianong/pkm-card -taesiri/ConvolutionalHoughMatchingNetworks -vasudevgupta/BIGBIRD_NATURAL_QUESTIONS -wietsedv/xpos -xin/PatentSolver -yavuzkomecoglu/Turkish-Speech-Recognition -yhavinga/netherator -yrodriguezmd/Surgical_instruments_app -onnx/EfficientNet-Lite4 -akhaliq/RealBasicVSR -sarulab-speech/UTMOS-demo -tomofi/trocr-captcha -course-demos/audio-reverse -KPatrick/PaddleSpeechTTS -egmaminta/python-code-summarizer -malteos/aspect-based-paper-similarity -Belligerent/word-sense-disambiguation -IanNathaniel/Zero-DCE -unity/Indoor-Pet-Detection -onnx/mask-rcnn -onnx/faster-rcnn -kazimsayed/News-Article-Summarizer -CVPR/Demo-Balanced-MSE -godot-demo/godot-3d-trucks -godot-demo/godot-3d-voxel -Harveenchadha/Vakyansh-Malayalam-TTS -jw2yang/focalnet-modulators -hackathon-pln-es/extractive-qa-biomedicine -hackathon-pln-es/spanish-to-quechua-translation -templates/http-server -hysts/insightface-person-detection -hysts/ibug-face_alignment -huggan/Sketch2Shoes -nikhedward/TL-DR_summarize_it -huggan/pix2pix-map -SaulLu/diff-visualizer -yangheng/PyABSA-APC -huggan/cryptopunk-captcha -hysts/Manga-OCR -rajesh1729/animated-visualization-with-mercury-ipyvizzu -huggan/StyleGAN3 -SerdarHelli/Brain-MR-Image-Generation-with-StyleGAN -merve/anonymization -merve/fill-in-the-blank -merve/uncertainty-calibration -prairie-guy/Seasonal_Mood -ysharma/RickandLex_Interview_GPTJ6B -bigscience-data/bigscience-tokenizer -bigscience-data/bigscience-corpus -gradio/Echocardiogram-Segmentation -shibing624/nerpy -Finnish-NLP/Finnish-Automatic-Speech-Recognition -wahaha/u2net_portrait -BernardoOlisan/vqganclip -abdulmatinomotoso/Article_paraphraser -KenjieDec/GPEN -sanzgiri/cartoonify -qanastek/Alexa-NLU-Clone -Gradio-Blocks/are-you-wearing-a-mask -ntt123/vietnamese-handwriting -Gradio-Blocks/Pipeline-Tester -evaluate-metric/comet -evaluate-metric/sari -Gradio-Blocks/poor-mans-duplex -awacke1/AIDocumentUnderstandingOCR -Zengyf-CVer/Gradio_YOLOv5_Det_v4 -lopushanskyy/music-generation -johnowhitaker/whistlegen_v2 -basicv8vc/learning-rate-scheduler-online -angelina-wang/directional_bias_amplification -nateraw/modelcard-creator -bigscience-data/process-pipeline-visualizer -miesnerjacob/text-emotion-detection -keras-io/Credit_Card_Fraud_Detection -keras-io/MelGAN-spectrogram-inversion -ybelkada/bloom-1b3-gen -ZhangYuanhan/Bamboo_ViT-B16_demo -HALLA/HALL-E -awacke1/ASR-High-Accuracy-Test -cybernatedArt/Skin_disease_detection -alistairmcleay/cambridge-masters-project -CVPR/TokenCut -BigDL/bigdl_nano_demo -juliensimon/keyword-spotting -smangrul/Chat-E -Theivaprakasham/yolov6 -codeparrot/apps_metric -Shue/DIGIMAP-Group4-Animefy -hugginglearners/Identify_which_flower -carblacac/chatbot -awacke1/VideoSummary2 -AlexWortega/MailruQA -Msp/Document_Classification_DIT -huggingface/bloom-test-flax -ICML2022/ICML2022_papers -EuroPython2022/PaddleOCR -codeparrot/code-complexity-predictor -kornia/kornia-augmentations-tester -EuroPython2022/swinunetr-dicom-video -fabiochiu/semantic-search-medium -alphacep/asr -ghosthamlet/Write-Stories-Using-Bloom -platzi/platzi-curso-streamlit-segmentacion-imagenes -twigs/simplifier -omri374/presidio -ICML2022/PointCloudC -ysr/blurryAI -sidharthism/fashion-eye-try-on -vinai/VinAI_Translate -Enutrof/English-NigerianPidgin-Translator -ybelkada/petals -ky2k/image_denoise_demo -jorge-henao/ask2democracycol -oniati/mrt -EnzoBustos/IC-2022-Classificacao-de-Dados-Financeiros -JavierFnts/clip-playground -owaiskha9654/Video_Summarization -tner/NER -doevent/blip -RishShastry/ArtStyleClassifier -RoyalEagle/ArtGenerator -mohsayed/arabic_text_detection -jonathanli/youtube-sponsor-detection -daspartho/anime-or-not -kornia/homography-warping -pratikskarnik/face_problems_analyzer -BlitzEsports/TextToImage -AfrodreamsAI/afrodreams -yhavinga/rosetta -mideind/textaleidretting -johngoad/Face-Mesh -AIZ2H/06-Streamlit-NLP-Image-Semantic-Search-Images -jthteo/Whisper -ysharma/Voice-to-jokes -p208p2002/Question-Group-Generator -sneedium/dvatch_captcha_sneedium -itmorn/detect_face -NealCaren/transcript -nateraw/stable_diffusion_gallery -ai-danger/hot-or-not -MikailDuzenli/vilt_demo -dmvaldman/ICLR2023 -imseldrith/Article_Rewrite-Paraphrasing_Tool -YaYaB/text-to-magic -kotori8823/Real-CUGAN -awacke1/ASR-SOTA-NvidiaSTTMozilla -bwconrad/anime-character-classification -bowtiedhal/essay_outline_generator -kabita-choudhary/audio_to_text -luost26/DiffAb -digitiamosrl/recsys-and-customer-segmentation -tomaseo2022/Mejorar-Resolucion-Imagen -sayakpaul/fivek-retouching-maxim -Catmeow/Text_Generation_Fine_Tune -dentadelta123/grammarly -Yukki-Yui/moe-tts -jspr/autodrummer -huy-ha/semabs-relevancy -anonymousauthorsanonymous/uncertainty -tryolabs/transformers-optimization -RamAnanth1/Youtube-to-HF-Dataset -segadeds/Medical_Diagnosis -akhaliq/Text-to-Music -j43fer/MagicPrompt-Stable-Diffusion -zswvivi/ChineseMedicalT5 -Sup3r/Image-Upscaling-Playground -morenolq/galactica-base-api -TuringAgency/anic_gui -AnonymousForSubmission/Graphic_Score_and_Audio -pcuenq/dreambooth-training -clem/stable-diffusionv2_test -nightfury/Stable_Diffusion_2 -nakas/musika_api -Xhaheen/stable-diffusionv2_test_2 -datasciencedojo/YouTube-video-transcript-generator -guohuiyuan/Text-to-Music -os1187/free-fast-youtube-url-video-to-text-using-openai-whisper -hysts/multiresolution-textual-inversion -rizam/rakeebjaufer -sayakpaul/fetch-similar-images -YeOldHermit/Super-Resolution-Anime-Diffusion -muellerzr/accelerate-presentation -taquynhnga/CNNs-interpretation-visualization -mbazaNLP/kinyarwanda-nemo-asr-demo -vincentclaes/art-search-engine -Ramos-Ramos/visual-emb-gam-probing -Randolph/hadenjax-dreams -amitkayal/Article-Rewriter -imseldrith/Text-to-Image2 -HIT-TMG/dialogue-bart-large-chinese -drift-ai/art-search-engine -osanseviero/mishigify -tomsoderlund/text-summarizer -kadirnar/yolov6 -AIDHD/audio-video-transcriber -arbml/whisper-largev2-ar -hasibzunair/masksup-segmentation-demo -sayakpaul/tensorrt-tf -zachriek/chatgpt-clone -kouenYoung/anime-tts -xelu3banh/dpt-depth16 -Shad0ws/Videoclassifier-ZEROSHOT -abidlabs/whisper -Abhilashvj/haystack_QA -GIanlucaRub/DoubleResolution -DrHakase/full-body-anime-gan -BigData-KSU/VQA-in-Medical-Imagery -om-app/magic-diffusion -om-app/Promt-to-Image-diffusions -Rmpmartinspro2/Comic-Diffusion -keremberke/license-plate-object-detection -biodatlab/whisper-thai-yt-subtitles -umair007/all_in_one_converter -ccds/vits_onnx -HutzHoo/dreamlike-photoreal-2.0 -patrickvonplaten/protogen-web-ui -deepghs/deepdanbooru_online -vicalloy/GFPGAN -Arafath10/chatcode -kazuk/youtube-whisper-06 -b3xxf21f/A3Private -akhaliq/Counterfeit-V2.0 -Smithjohny376/Orangemixes -diffusers/check_pr -derek-thomas/top2vec -alirezamsh/small100 -redpeacock78/anything-v5.0 -Tirendaz/background-remover -ClassCat/YOLOS-Object-Detection -librarian-bot/webhook_metadata_reviewer -Elbhnasy/Foodvision_mini -mpuig/gpt3-email-generator -competitions/create -camenduru-com/tensor-rt -ai-forever/NotebooksRecognition -sheldon/xiaolxl-GuoFeng3 -juliensimon/bridgetower-demo -jbrinkma/deepmind-pushworld -active-learning/labeler -sheikyerbouti/riffusion-playground -hysts/DETA -multimodalart/Tune-A-Video-Training-UI-poli -spaces-ci-bot/webhook -yahma/rwkv-instruct -king007/invoices -hysts/Compare-DETA-and-YOLOv8 -mrm8488/santacoder-swift-completion -HuggingFaceH4/Elo -awacke1/AutoMLUsingStreamlit-Plotly -deprem-ml/ner-active-learning -DataScienceEngineering/1-SimPhysics-HTML5 -Blealtan/clip-guided-binary-autoencoder -AlphonseBrandon/speecht5-tts-demo -RamAnanth1/T2I-Adapter -JcRolling/cartoon-converter -PierreSHI/YOLOS_traffic_object_detection -Thafx/sdrv1_4 -deeplearning/audioldm-text-to-audio-generation -harkov000/peft-lora-sd-dreambooth -jskim/paper-matching -jin-nin/artist -RamAnanth1/human_preference -text-generation-inference/chat-ui -Mayank-02/Matching-job-descriptions-and-resumes -mbazaNLP/Speech-recognition-east-african-languages -bigjoker/stable-diffusion-webui -dylanmeca/ChatGPT-Assistant -cxeep/PaddleOCR -awacke1/AIZTH-03-09-2023 -jarvisx17/YouTube-Video-Summarization -WiNE-iNEFF/WebUI-Counterfeit-V2.5 -NoCrypt/SomethingV2 -Daextream/Whisper-Auto-Subtitled-Video-Generator -xp3857/Image_Restoration_Colorization -Hexequin/dreamlike-photoreal-2.0 -chriscelaya/streaming_chat_gpt-3.5-turbo_langchain -ashhadahsan/ai-book-generator -cooelf/Multimodal-CoT -keras-dreambooth/traditional-furniture-demo -suko/nsfw -keras-dreambooth/Pokemon-dreambooth -unilight/s3prl-vc-vcc2020 -patrawtf/shopify_csv_qa -burakaytan/turkish_typo_correction -mindart/infinite-zoom-stable-diffusion -ranjangoel/GPT-PDF -Alpaca233/ChatGPT-PPT-Generate -pszemraj/generate-instructions -gradio/seafoam -hackathon-somos-nlp-2023/leaderboard -zenml/zenml -LeoLeoLeo1/ChuanhuChatGPT -AI4PD/hexviz -somosnlp/somos-alpaca-es -silentchen/layout-guidance -ns2001/pdfgpt -bobu5/SD-webui-controlnet-docker -aliabid94/gpt_who -gstaff/whiteboard -Notalib/GPT-Whisper-Wolfram-Google-Test -drift-ai/faq-website -fffiloni/video2canny -SamerKharboush/chatGPT-Sam-Turbo -yxmnjxzx/Lama-Cleaner-lama -peterwisu/lip_synthesis -hugforziio/chat-gpt-batch -ParityError/Interstellar -superwise/elemeta -sakasegawa/whisper-speaker-diarization-assign -Aaaaaaaabdualh/poetry -Cletrason/Cletrason-toad-mario-movie -declare-lab/flan-t5-xl-lora -henryu/Clip-image2text -jjzha/skill_extraction_demo -fffiloni/mmpose-estimation -ochyai/ochyai_food -ieuniversity/Clothes_image_captioning -marinap/multilingual-image-search -gradio-client-demos/comparing-captioning-models -HaoFeng2019/DocTr -weijiawu/ImageEditAnything -niizam/sovits-models -Volkopat/arXivGPT -king007/Voice-Cloning -sajornad/ZoeDepth -lithiumice/SadTalker -HaoFeng2019/DocGeoNet -AI-Dashboards/Memory-Chat-Story-Generator-ChatGPT -satyamg1620/PCA-Image-Reconstruction -ardigen/ardisplay-i -HLasse/textdescriptives -mohsenfayyaz/DecompX -nsarrazin/serge -HighCWu/Style2Paints-4.5-Gradio -hra/Curriculum-BabyAGI -zeno-ml/openai-evals -Kevin676/Shanghainese-TTS-demo -cogcorp/assignment1 -mattmdjaga/segment_anything_base -Future-AI/image-matting -Celestinian/Prompt-Generator -Kaori1707/Depth-estimation -sander-wood/clamp_semantic_music_search -sander-wood/clamp_zero_shot_music_classification -mthsk/sovits-models-misc -Zhenhong/text-to-speech-SpeechT5-demo -lmattingly/cartoonify-yourself -Pie31415/control-animation -JFoz/Dog-Pose-Editor-Controlnet -joaogante/assisted_generation_benchmarks -lamini/instruct-playground-12b -ahmetfirat/KORKUT_A_Spacetime_Odyssey -Volkopat/SegmentAnythingxGroundingDINO -dy2dx2/Physics-Assistant -cc38300/constructionGPT -anisharitakula/sentiment_classifier -thecho7/deepfake -instruction-tuning-sd/instruction-tuned-sd -cloixai/stable-diffusion-webui-cpu -radames/gradio_window_localStorage -h2oai/wave-tour -koajoel/PolyFormer -kevinwang676/web-singer-new-2 -JCTN/controlnet-segment-anything -dorkai/text-generation-webui-main -voices/VCTK_British_English_Females -sradc/visual-content-search-over-videos -voices/voice-directory -vishnu23/OCR_with_image -Khaled27/Naptah -anzorq/spaces-semantic-search-api -yoinked/da_nsfw_checker -Superlang/ImageProcessor -nicehero/ManualMask -zou-code/gorilla-llm-gorilla-7b-hf-delta-v0 -shgao/MDT -llamaindex/text2image_prompt_assistant -renumics/cifar100-enriched -noamelata/Nested-Diffusion -Mountchicken/MAERec-Gradio -Cloudyy/bark-voice-cloning -raghavtwenty/cyber-attack-prediction -allen-eric/radiology-gpt -yuangongfdu/whisper-at -RitaParadaRamos/SmallCapDemo -Wauplin/space_to_dataset_saver -nomic-ai/atlas -leonelhs/GFPGAN -Yntec/DucHaiten-Webui-CPU -p1atdev/waifu_aesthetics -artificialguybr/liberte -ADOPLE/Adopleai-DocumentQA -MAPS-research/GEMRec-Gallery -Shad0ws/AI-Agent-with-Google-Search-APIs -Voicemod/Speech-to-Speech -NealCaren/TranscribeX -matthoffner/falcon-mini -TrustSafeAI/RADAR-AI-Text-Detector -odettecantswim/rvc-mlbb-v2 -h2oai/theme-generator -YaTharThShaRma999/WizardLM7b -allknowingroger/Image-Models-Test11 -cvsys/upscale -verkaDerkaDerk/face-mesh-workflow -Matthijs/mms-tts-demo -allknowingroger/Image-Models-Test16 -yaoshining/text-generation-webui -jbilcke-hf/MusicGen -course-demos/speech-to-speech-translation -nomic-ai/zhengyun21_PMC-Patients -hesha/upscaler -jbilcke-hf/video-interpolation-server -Ababababababbababa/SD-2.1-Img2Img -Dragonnext/Drago-Proxy -theaster/imoitari -oppappi/wd-v1-4-tags -TTT-9552/Y7cLhT3pE9gV4xW2nQ5 -HuggingAlgorithms/PDF-TextExtractor -luisotorres/wine-quality-predictions -VoiceHero69/changer -allknowingroger/Image-Models-Test38 -benzel34/fun -gradio/chatinterface_streaming_echo -tcfly/Flowise -xswu/HPSv2 -asas-ai/Arabic-LLM-Leaderboard -NMEX/rvc-hoyogame-v2 -r3gm/ConversaDocs -KAIST-Geometric-AI-Lab/salad-demo -mikeee/llama2-7b-chat-uncensored-ggml -mikeee/nousresearch-nous-hermes-llama2-13b-ggml -grzegorz2047/fast_diffusion -WinterGYC/Baichuan-13B-Chat-Int8 -grass-eater/grassproxy -foduucom/plant-leaf-detection-classification-yolov8 -Thafx/sdrv50 -PeepDaSlan9/stabilityai-stable-diffusion-xl-base-1.0 -vanderbilt-dsi/free-speech-app -qblocks/Monster-SD -huaiji3y/BingAI-Public -jbilcke-hf/upscaling-server -harshitv804/LawGPT -s3nh/WizardLM-1.0-Uncensored-Llama2-13b-GGML -Blessing/Asphalt-Pavement-Distresses-Detector -Manjushri/SDXL-1.0-Doodle-to-Image -remotewith/image-to-text-app -Justin-Choo/Counterfeit_WEB_UI -allknowingroger/Image-Models-Test83 -viait/stable-diffusion -felixz/meta_open_llm_leaderboard -mrspinn/goofyai-3d_render_style_xl -doncamilom/ChemCrow -damo-vilab/MS-Image2Video-demo -ridges/WizardLM-WizardCoder-Python-34B-V1.0 -Alfasign/dIFFU -thnqls/Phind-Phind-CodeLlama-34B-v2 -mofu-team/ggl-chk -Yntec/photoMovieX -radames/gradio-blender-bpy -BraydenMoore/MARCI-NFL-Betting -chengli-thu/ChatHaruhi-OpenAI -xeonm/image-to-audio-story -asigalov61/Allegro-Music-Transformer -insomniac0/Midnight -jordonpeter01/ai-comic-factory -daishen/LAiW -eaglelandsonce/QueryaWebsite -AIWaves/Software_Company -lalashechka/sdxl2 -XzJosh/nine1-Bert-VITS2 -openkg/llm_leaderboard -librarian-bots/collection-reading-list-generator -deepkyu/multilingual-font-style-transfer -eaglelandsonce/chromadbmeetupdemo -AzumaSeren100/XuanShen-Bert-VITS2 -kevinwang676/Voice-Cloning-for-YouTube -XzJosh/nanami-Bert-VITS2 -andreped/AeroPath -tonyassi/fashion-stylist-bot -LanguageBind/LanguageBind -jbochi/Candle-CoEdIT-Wasm -TheStinger/ILARIA_UVR -3B-Group/ConvRe-Leaderboard -innat/VideoMAE -SoAp9035/mistral-7b-fast-chat -Roboflow/Annotators -tonyassi/controlnet-explorer -XzJosh/Diana-Bert-VITS2 -rishiraj/zephyr -ennov8ion/500models -nagolinc/spritesheet_to_gif -chikoto/Umamusume-DeBERTa-VITS2-TTS-JP -abidlabs/gradio-lite-image -ethan-ai/goofyai-3d_render_style_xl -xuyingliKepler/nexaagent -AILab-CVC/SEED-LLaMA -library-samples/image-captioning-with-git -autotrain-projects/llm-merge-adapter -pseudolab/MistralMED_Chat -r3gm/Fast_Stable_diffusion_CPU -INDONESIA-AI/Lobe -nsarrazin/chat-ui-idefics -pseudolab/PatentClaimsExtraction -deepset/search-all-the-docs -Siyuan0730/OmniTutor -codelion/Grounding_DINO_demo -Dentro/face-swap -huolongguo10/chatglm3.cpp-int4 -openskyml/image-upscaler -AI-ANK/PaLM-Kosmos-Vision -MoonQiu/LongerCrafter -markllego/openai-gpt4-vision -xuyingliKepler/openai_play_tts -mrm8488/whisper-large-v3 -pseudolab/Colorful-illustration -52Hz/CMFNet_dehazing -AlgoveraAI/dcgan-crypto-punks -AlgoveraAI/web3-wallet -Andy1621/uniformer_image_demo -Andy1621/uniformer_video_demo -BigSalmon/MASKK -Buckeyes2019/NLP_Demonstration -CVPR/lama-example -chainyo/Translator -Detomo/Car_part_classification -Detomo/voice-japanese -Egrt/LicenseGAN -Francesco/torch-cam-transformers -Giuliano/T0 -GotAudio/Understanding-Women -HamidRezaAttar/gpt2-home -Harveenchadha/oiTrans -MarcBrun/basque-qa -Narrativa/fake-news-detection-spanish -NeuML/articlesummary -NeuML/wikisummary -Norod78/ComicsHeroU2Net -Norod78/Hebrew-GPT-Neo-Small -Qiwei97/Pubmed_Analyzer -SajjadAyoubi/CLIPfa-Demo -Sakil/essay_generator_app -Shreyas3006/Text-Summarizer-sdp -Wootang01/chatbot -abidlabs/image-classifier -abidlabs/keras-image-classifier -abidlabs/live-sketch-recognition -akdeniz27/turkish-zero-shot-text-classification-with-multilingual-models -akhaliq/AppleNeuralHash2ONNX -akhaliq/Speechbrain-audio-classification -akhaliq/Swin-Transformer -akhaliq/espnet2_asr -akhaliq/pedalboard -akhaliq/t5-base-fine-tuned-on-jfleg -akhaliq/yolov3 -am4nsolanki/hateful-memes -ansfarooq7/l4-project -bankholdup/rugpt3_song_writer -bentrevett/named-entity-recognition -cdleong/phonemize-text -chinhon/Commentaries_Headlines_Generator -chinhon/translation_eng2ch -davidefiocco/GPT3-summary -ebgoldstein/FRF_Coarse -edemgold/Tone-Transfer -elonmuskceo/sparknlp -elozano/tweet_eval -emrecan/zero-shot-turkish -erwanlc/Barman-T5 -eugenesiow/yolo-v5 -facebook/XLS-R-2B-EN-15 -flax-community/roberta-base-mr -flax-community/t5-vae -gogamza/kogpt2-base-v2 -gradio/GANsNRoses -gradio/HuBERT -gradio/gpt-neo -hysts/stylegan3-food101 -hysts/yolov5_anime -iamkb/zero-shot-nlp-classifier-multi-lang -ibaiGorordo/hugging-face-me -j-hartmann/emotion-classification-from-csv -jb2k/bert-base-multilingual-cased-language-detection -jkang/demo-gradcam-imagenet -jkang/demo-painttransformer -juliensimon/song-lyrics -keras-io/Flowers-Classification-MobileViT -keras-io/conditional-GAN -keras-io/multimodal_entailment -keras-io/supervised-contrastive-learning -luisoala/raw2logit -m3hrdadfi/zabanshenas -masterak25/LSTM_stock_prediction -mayhug/Real-CUGAN -mayhug/rainchan-anime-image-label -mayhug/rainchan-image-porn-detection -merve/BigGAN-ImageNET -merve/t5-playground -mrm8488/PromptSource -nateraw/huggingpics-explorer -nateraw/spotify-pedalboard-demo -ncduy/emotion-classifier -ncoop57/clifs -nielsr/vilt-nlvr -nlp-en-es/bertin-sqac -philsark/clip-guided-diffusion-identity -pierreguillou/question-answering-portuguese-t5-base -pritamdeka/pubmed-abstract-retriever -pytorch/3D_ResNet -qanastek/French-Part-Of-Speech-Tagging -r2d2/speech2text -risingodegua/wine_quality_predictor -samarthagarwal23/Scotch_recommendation -satpalsr/grammar-correction -spotify/huggingface-demo-song-lyrics -stevenkolawole/T5-multitasks-streamlit -taesiri/DeepSimilarity -tareknaous/arabic-empathetic-response-generation -tcapelle/wandb -trnt/twitter_emotions -ttheland/demo-butterfly-spaces -vaibhavarduino/anime-plus -widged/bart-generation -wolfrage89/company_segments_ner -xiatao/microsoft-trocr-base-printed -xiongjie/u2net_rgba -yseop/financial-relation-extractor-demo -abidlabs/remove-bg -akhaliq/MTTR -huggan/anime-face-generator -PaddlePaddle/animegan_v2_shinkai_53 -mertguvencli/trending-techs-on-data-science -Sakil/Humanoid_robot -xiaosu-zhu/McQuic -merve/sorting_hat -Wootang01/image_classifier -samueldomdey/ClipCosineSimilarityURL -kingabzpro/Urdu-ASR-SOTA -saefro991/aet_demo -vitaliykinakh/Galaxy_Zoo_Generation -tomofi/MaskTextSpotterV3-OCR -jervinjosh68/vit-age-classifier -hackathon-pln-es/jurisbert-test-finetuning-ner -apoorvumang/kgt5 -shibing624/similarities -shawarmabytes/stream-your-emotions -st0bb3n/Cam2Speech -aaronherrera/Calorie_Counter -sophiaaez/BLIPvOFAde -hysts/1adrianb-face-alignment -erikacardenas300/Company_Classifier -naver/SuperFeatures -SerdarHelli/Knee-View-Merchant-Landmark-Detection -senior-sigan/vgg_style_transfer -gdn/Question-Answer-Demo -mustapha/ACSR -hysts/TADNE -Manimaran/pokemon-classifier -osanseviero/food_classifier_v1 -ecarbo/text-generator-demo -Zengyf-CVer/gradio_yolov5_det -hysts/TADNE-image-search-with-DeepDanbooru -dnouri/crowd-counting -Vijish/SkinDeep -chuxiaojie/NAFSSR -merve/dataset-worldviews -AI-Dashboards/CP.Matplotlib.NetworkX.Streamlit.PyVis.Graphviz -aware-ai/german-asr -espejelomar/cat_or_dog_fastai -DrSnowbird/clip-image-search -jph00/minimal -prairie-guy/Art_Mood -davidrd123/WikiArt_20genre -awacke1/AI-Wikipedia-Search -gradio/same-person-or-different -nateraw/simple-video-to-video -nazneen/error-analysis -aryadytm/paraphrase -hasibzunair/image-recognition-demo -yerfor/SyntaSpeech -Fawaz/nlx-gpt -ReneeYe/ConST-speech2text-translator -pierreguillou/pdf-firstpage-to-img -bingbingbing/ImageEnhancement -awacke1/ChemistryMoleculeModeler -evaluate-metric/super_glue -evaluate-metric/f1 -evaluate-metric/mauve -Xhaheen/GPTJ_PLUS_DALL_E -AlexWortega/ruImageCaptionong -pierreguillou/layout-parser -Gradio-Blocks/beat-interpolator -GDavila/GIFify_OpenCV -Gradio-Blocks/ML-Aided-Code-Analysis -natdon/Michael_Scott_Bot -Gradio-Blocks/Michael_Scott_Bot_Gradio_Blocks -Gradio-Blocks/minority-asr -Gradio-Blocks/CBNetV2 -Pippoz/All_in_one -Gradio-Blocks/illustrated-spanish-poem -Gradio-Blocks/Speech-to-text -Gradio-Blocks/spurious_correlation_evaluation -keras-io/collaborative-filtering-movielens -osanseviero/hugging_eats -Theivaprakasham/layoutlmv3_sroie -nagolinc/LatentDiffusion_and_ESRGan -julien-c/merve-data-report -neurotech/Swahili-sentiment-analysis -hysts/MangaLineExtraction_PyTorch -keras-io/molecular-property-prediction -CVPR/v-doc_abstractive_mac -najoungkim/round-trip-dalle-mini -awacke1/GradioBlocksDemo-Transformers -Axolotlily/Interpolate -mindwrapped/pokemon-card-checker -denisp1/ChemistryMoleculeModeler -Zengyf-CVer/ocr_translate -Paaz/gpt2-lyrics -Madhuri/vqa_audiobot -milai-tk/clip-human-action-img2txt -mantisnlp/SearchMesh -zhezh/mm-commerce -ModIA/FrenchDroneKeyword -d4data/Bias-Fairness-in-AI -awacke1/NLP-Lyric-Chorus-Image -CVPR/monoscene_lite -trysem/AnimeGANv2 -kamalkraj/min-dalle -awacke1/ArtStyleLineDrawing -permutans/LayoutLMv3-FUNSD -doevent/Image2LineDrawing -keras-io/structured-data-classification-grn-vsn -nmud19/Sketch2ColourDemo -codeparrot/code-generator -EuroPython2022/mmocr-demo -kmkarakaya/Auto_Review_Generation_in_Turkish -EuroPython2022/OCR-Translate -Sangmin/Eiken-Essay-with-GPT3 -Curranj/Regex_Generator -Zengyf-CVer/Gradio_YOLOv5_Det_v5 -XAI/CHM-Corr -ICML2022/resefa -dnouri/monai-demo -aiEDUcurriculum/introtoAI-mental-health-project -cap99/ocr -User1342/WatchTower -owaiskha9654/Multi-Label-Classification-of-Pubmed-Articles -eldoraboo/zero-shot -lewiswu1209/MockingBird -themasterbetters/the-master-betters-translator -hasibzunair/melanoma-detection-demo -hhim8826/vits-ATR -Archan/ArXivAudio -owaiskha9654/Custom_Yolov7 -radames/gradio-url-params -shibing624/chinese-couplet-generate -AIZeroToHero/03-ImageSearchSimilar -panpan06/ImageSearchSimilar -jracca/04-learning-space -hysts/space-that-creates-model-demo-space -keithhon/logo-generator -wing-nus/SciAssist -MrSinan/Reconstruction -doevent/cartoonizer-demo-onnx -jrahn/yolochess -gradio/autocomplete -ruslanmv/Video-Translator -nschenone/lyric-buddy -Ammar-alhaj-ali/LayoutLMv3-FUNSD -anasanchezf/cloome -scikit-learn/gradio-skops-integration -NotFungibleIO/GFPGAN -kornia/line-segment-matching -ruslanmv/Text2Lip -gradio/diff_texts -daspartho/predict-subreddit -coledie/Fashion_VAE -Kamtera/Persian_Automatic_Speech_Recognition_and-more -RMeli/gnina-torch -theodotus/streaming-asr-uk -djgoettel/01-3DModel-GradioDemo -akhaliq/VideoMAE -manishjaiswal/05-SOTA-Question-Answer-From-TextFileContext-Demo -bassazayda/Whisper -fsdlredteam/BuggingSpace -freddyaboulton/gradio-google-forms -nightfury/StableDiffusion.Img2Img-Gradio -evaluate-measurement/honest -julien-c/nvidia-smi -RTL/videomatch -venz/AW-05-GR-NLP-Image2Text-Multilingual-OCR -awacke1/BlackjackSimulatorCardGameAI -jayesh95/Voice-QA -rodolfoocampo/IllustratedNarrativeDevice -awacke1/StoryWriterTextGenMem -cjayic/sd-dreambooth-jerma -MLSquad-TWCN/near-continuous-whispering -BilalSardar/StoryGenerator -mdnestor/URL-to-Whisper -johnslegers/stable-diffusion -freddyaboulton/atari_agents -Gazoche/text-to-gundam -Avkash/WhisperUI -diagaiwei/ir_chinese_medqa -sayakpaul/lol-enhancement-maxim -sayakpaul/sidd-denoising-maxim -sayakpaul/sots-indoor-dehazing-maxim -lewtun/stable-diffusion-demo -candlend/vits-hoshimi -jmparejaz/Audio_to_text_classification -GV05/stable-diffusion-mingle-prompts -omarelsayeed/SentenceSimilarity-Quran-v2 -chansung/segmentation-training-pipeline -FathomNet/MBARI_Monterey_Bay_Benthic -elonmuskceo/docker-aimstack -neko941/YOLOv5-Hololive_Waifu_Classification -lsmyrtaj/cse6242-dataminers -ianpan/bone-age-greulich-and-pyle -shivi/ChequeEasy -oucgc1996/Antimicrobial-peptide-generation -abidlabs/speak -unb-lamfo-nlp-mcti/nlp-mcti-preprocessing-single -BilalSardar/YoutubeVideoLink-To-MCQs-Generation -awacke1/DatasetAnalyzer -daspartho/text-emotion -haakohu/deep_privacy2 -akhaliq/Nitro-Diffusion -IDEA-CCNL/Erlangshen-UniMC-Zero-Shot -fxmarty/bettertransformer-demo -autoevaluator/shoes-vs-boots-vs-sandals -TopdeckingLands/Diffusion_Space -breadlicker45/galactica-base -montagekoko/anything-v3.0 -lingbionlp/PhenoTagger-Demo -aayushmnit/diffedit -Rahorus/openjourney -ORI-Muchim/PowerTTS -ORI-Muchim/RaidenTTS -AlishbaImran/Redox-Flow-Battery-Prediction -lvkaokao/INC-Dicoo-Diffusion -jpwahle/plagiarism-detection -JUNGU/VToonify -loralora/sovits_aishell3 -akhaliq/woolitize -plasmo/woolitize -armanokka/nllb-translation-demo -ahmedghani/svoice_demo -hpi-dhc/FairEval -gradio/chatbot_multimodal -tennant/MUG_caption -alexandrainst/zero-shot-classification -eskayML/mask_segmentation -dataminers/dataminers -Svngoku/TableTransformer2CSV -MoyAI/ProfNet -JohnnyPittt/audio-styling -ECE1786-AG/ArtIstic-GENREator -cmotions/beatlify -pragnakalp/Emotion_Detection -cahya/indonesian-whisperer -remzicam/voicebot_german -quantumiracle-git/OpenBiDexHand -lambdalabs/generative-music-visualizer -CodeDoes/FrostAura-gpt-neox-20b-fiction-novel-generation -RaviRaj988/Asking-question-to-video -robmarkcole/yolov5-ui -Knowles-Lab/tiger -lianzhou/stable-diffusion-webui -ORI-Muchim/NahidaTTS -kdrkdrkdr/HutaoTTS -ygangang/Image-Animation-using-Thin-Plate-Spline-Motion-Model -Shad0ws/imagetomusic -ygangang/CodeFormer -YeOldHermit/StableDiffusion_AnythingV3_ModelCamenduru -FloydianSound/Wlop_Diffusion -imseldrith/txt2img -jhlfrfufyfn/old-bel-tts -avirathtibrewala/YTToText -Malifex/CPU-Anything-V3.0-WebUI -SpacesExamples/secret-example -neuralmagic/question-answering -pragnakalp/Huggingface_Sentiment_Analysis -juancopi81/sd-riffusion -musicians/deepharmony -erwann/Face-editor -ybelkada/blip-image-captioning-space -Roxza/DialoGPT -abidlabs/images -ItsJayQz/Marvel_WhatIf_Diffusion -MountLiteraSwd/sd-dreambooth-library-riffusion-rage -cagatayodabasi/dreamlike-photoreal-1.0-CPU -breadlicker45/the-jam-machine-app -Joeythemonster/Text-To-image-AllModels -adpro/dpt-depth04 -adpro/dpt-depth16 -ThirdEyeData/Text-Summarization -kyuubi08/22h-vintedois-diffusion-v0-1 -teo-sanchez/prompt_specifier_recognizer -spookyspaghetti/Speech-Analyser -Missinginaction/stablediffusionwithnofilter -jackvial/frozen-lake -Khalida1w/denoising -awacke1/Docker-FlanT5-TextGeneratorTranslator -MCkernick/Image_Restoration_Colorization -deedax/Change-Your-Style -robertoberagnoli/openai-jukebox-1b-lyrics -vietvd/image-enhance -sophiamyang/Panel_apps -hrishikeshagi/ImagePromptGenerator -CYSD/AI-image-detector -sophiamyang/panel_example -tumuyan/Night_Enhancement -Groq/mlagility -adirik/ChangeIt -GT4SD/patent_generative_transformers -juliensimon/battle_of_image_classifiers -SalahZa/Tunisian-ASR-v0 -feizhengcong/video-stable-diffusion -pinecone/diffusion-image-search -ahmedale/Youtube-Whisperer -ClassCat/Medical-Image-Classification-with-MONAI -decodemai/business_tech_ideas -deepghs/anime-ai-detect-fucker -robjm16/domain_specific_ChatGPT -philschmid/furiosa-ai-ocr -vladocar/Text-to-Speech -ClassCat/Brain-tumor-3D-segmentation-with-MONAI -JUNGU/SuperGlue-Image-Matching -JUNGU/Whisper-Auto-Subtitled-Video-Generator -caffeinum/VToonify -roseyai/Chat-GPT-LangChain -KwabsHug/Language-Learn-Idea -vigneshv/TrOCR-handwritten -tumuyan/RealSR -julien-c/nbconvert -GT4SD/molecular_properties -decodemai/market_sizing -reha/Stick_Tech -StefanHex/simple-trafo-mech-int -Didisoftwares/GFPGAN -vialibre/edia -DataScienceGuild/ChatbotWithDataframeMemory -ClassCat/DETR-Object-Detection -GeneralNewSense/Text-to-Music -pcuenq/lora-pokemon -huggingface-projects/InstructPix2Pix-Chatbot-ui -tobiaspires/ad-image-generation -DarwinAnim8or/GPT-Greentext-Playground -yizhangliu/DalleClone -mariashay/DataViz-Mermaid -stable-bias/diffusion-bias-explorer -davanstrien/Doc-UFCN -sgonzalezsilot/Fake-News-Twitter-Detection_from-my-Thesis -singhk28/nocodeml -UVA-MSBA/Employee_Turnover_Ex -spacerini/imdb-search -Yusin/ChatGPT-Speech -breadlicker45/gpt-ya-gen -zjunlp/KGEditor -Raspberry-ai/main -keras-dreambooth/example-submission -prajdabre/CreoleM2M -tizze/websitechatbot -fffiloni/image-to-sound-fx-debug -Goodsea/deprem-ocr-paddleocr -rynod/LangChain_ChatGPTSlackBotBot -OFA-Sys/FAST-CPU-small-stable-diffusion-v0 -alsrbdni/magic-to-diffusion -mbazaNLP/Kinyarwanda-text-to-speech -juliensimon/xlm-v-base-language-id -HaloMaster/ChineseLLM -unstructuredio/unstructured-invoices -maxime/chat-with-your-telegram-chat -nickmuchi/Investor-Education-ChatChain -Future-Tense/Slo-Mo-YOLO-Video -AUST001/ChatGPT -bfh-nlp-circle/nlp-cirlce-demo -pierreguillou/Inference-APP-Document-Understanding-at-linelevel-v1 -AIFILMS/speecht5-tts-demo -giswqs/geospatial -GipAdonimus/Real-Time-Voice-Cloning -mindspore-ai/Zidongtaichu -lfoppiano/grobid-superconductors -ysharma/Stream_PlaygroundAI_Images -codejin/diffsingerkr -vincentclaes/DocumentQAComparator -kermitt2/grobid-crf -felixz/LLM-as-continuous-chat -Thorsten-Voice/demo -hwang1/anime-gan -hra/ChatGPT-Tech-Radar -Miuzarte/SUI-svc-4.0 -jeffeux/zhtwbloomdemo -Jackflack09/finetuned_diffusion2 -zjunlp/MolGen -awinml/2-qa-earnings-sentencewise -mushroomsolutions/chatgpt-3 -taesiri/CLIPSeg2 -yefengzi/vits-models -Teklia/doc-ufcn -hyoo/translate -nmaina/EleutherAI-gpt-j-6B -GeemiW/pdb_answers -svjack/ControlNet-Pose-Chinese -tecnolitas/MJ-prompt-generator -raghuram13/extract_text_from_image -akhaliq/multi-modal_chinese_stable_diffusion_v1.0 -2hack2furious/anonymizer -Shad0ws/Chat-with-Files -pierreguillou/Inference-APP-Document-Understanding-at-linelevel-LiLT-base-LayoutXLM-base-v1 -chansung/LLaMA-13B -hysts/cv_diffusion_text-to-image-synthesis_tiny -thelou1s/chat_gpt_space -30Kanika/Animal_Image_Classifier -dromerosm/chatgpt-info-extraction -jhj0517/Whisper-WebUI-Easy-Subtitle-Generator -simpx/chatdemo -keras-dreambooth/lowpoly-world-demo -Cartof/Chatbot -NeuralInternet/chattensor-prompt-generator-v12 -R-001/HumanAI -qinzhu/diy-girlfriend -mikaelbhai/GPTBhai_text -Detomo/Object_detection -safetensors/safetensors-checker -GingerBreadXD/trading-bot -echarlaix/openvino-export -Legal-ease/legal-ease -hack46/46jobs -priyanshu02/Linguistics-Accents -dteam/chatgpt-dteam -ispast/Genshin_MB_VITS_TTS -shigel/recipe -abidlabs/pakistan -DESUCLUB/BLLAMA -keras-dreambooth/piranesi-monument-art -thirdai/FoodUDT-1B -mikeion/research_guru -Gameel/TextToSpeech -PKaushik/Human-Part-Segmentation -yfkm/chat_gpt_space_public -meowingamogus69/stable-diffusion-webui-controlnet-docker -starsdeep/NAFNet -jonatanklosko/chai -JunchuanYu/SydneyAI-plus -ruanchaves/portuguese-offensive-language-detection -zuhuri/OpenAI_chatgpt-turbo-UI -radames/live-pose-maker-gradio -keras-dreambooth/galaxy-mergers -davanstrien/notebooks_on_the_hub -ragha108/aiyogi_text_to_audio -hra/ChatGPT-SEC-Filings-QA -crimeacs/phase-hunter -eIysia/VITS-Umamusume-voice-synthesizer -Kevin676/midjourney-v5 -Mrchuw/Image-Animation-using-Thin-Plate-Spline-Motion-Model -Gradio-Themes/informativedrawings-sketch-style -sawblade/prompt-extend -TerrificTerry/Club_Review_Antidetector -Aaaaaaaabdualh/topic2poem -sklearn-docs/IsolationForest-Model-for-Anomaly-Detection -abdvl/datahub_qa_bot -TencentARC/TagGPT -ruboin/faster-whisper-webui -gryhkn/free-fast-youtube-url-video-to-text-using-openai-whisper -GT4SD/PatentToolkit -hackathon-somos-nlp-2023/suicide-comments-es -dineshb/Speech2Text -jsebdev/stock_predictor -sklearn-docs/Out-of-Bag-estimates -sklearn-docs/SGD_Penalties -hra/ChatGPT-MindMap -JanDalhuysen/whisper-speaker-recognition -unstructuredio/irs-manuals -kazuk/youtube-whisper-15 -onursavas/Chinese_Document_Layout_Analysis -huggingfacejs/image-to-text -lakshmi324/DocuAI -k8tems/LangChain_AgentGPTBot -ixxan/multilingual-vqa -cuiyuan605/Text-to-Image -ThirdEyeData/Object-Detection-Using-FRCNN -shivi/calm_seafoam -segments/segment-anything-image-embedding -asiffarhankhan/custom-gpt-voice-assistant -congxin95/BMTools-demo -simonduerr/gradio-2dmoleculeeditor -ShawnAI/VectorDB-ChatBot -suchun/chatGPT_acdemic -atyshka/ai-detector -fedor-ch/langchain-ynp-test -leurez/moss -thestasi/Webui-Cpu-ExtensionV2-Publictest-WithCivitaiHelper -aadarsh-af/text_to_image -MrD05/text-generation-webui-space -sysf/Edge-TTS -sander-wood/clamp_similar_music_recommendation -merve/voice-cloning -devloverumar/AI-Content-Detector -bergrozen1213/3d-obj -hra/GPT4-makes-BabyAGI -FourthBrainGenAI/AI-Superstar-Space -mvnhat/langchain-agent-demo -bioriAsaeru/text-to-voice -HgMenon/Transcribe_V0.2 -vanderbilt-dsi/langchain-assistant -diivien/Music-Popularity-Prediction -HaleyCH/HaleyCH_Theme -Smithsonian/amazonian_fish_classifier -huggingface-tools/text-download -mouaddb/image2text-comp -paulokewunmi/omowe.ai -p1atdev/Anime-to-Sketch -Ikaros521/so-vits-svc-4.0-ikaros -pythainlp/wangchanglm-demo-cpu -SantiagoTesla/Rai_AI -arundevops47/chatbot-with-langchain-and-pinecone -lukesteuber/textual -asciicorp/Legal-ai -DJQmUKV/rvc-inference -achyuth1344/stable-diffusion-webui -optigesr/Bark-with-Voice-Cloning -zxcgqq/nsfw -jinlinyi/PerspectiveFields -alsrbdni/pdf-chat -awacke1/OpenAssistant-Chatbot-FTW-Open-Source -Moxxie-nolastname/Not-Moxxie-Proxy -HuangLab/CELL-E_2-Image_Prediction -openaccess-ai-collective/ggml-ui -muhammadzain/Background-changer-remover-backend -Carlosito16/aitGPT -onlyswan/swan-voice -HuangLab/CELL-E_2-Sequence_Prediction -aaronb/Anything2Image -mkshing/rinna-japanese-gpt-neox-3.6b-instruction-x -matthoffner/serp-chat -Willder/GPT-Token-Calculator -mirroring/upload_civitai_model -Selim321/youtube-summarizer -heliosbrahma/ai-pdf-assistant -Spark808/rvc-demo -YueMafighting/mmpose-estimation -izumi-lab/llama-13b-japanese-lora-v0-1ep -younver/speechbrain-speech-separation -FrankZxShen/vits-fast-finetuning-umamusume -xiaofenglingreal/Remove-Animation-Figures-Background -Adithedev/Text-Summarization-Tool -Vikas01/gender-age-detection -AFOL/GigaGan -justest/vicuna-ggml -lain-iwakura/lainchan-proxy -wtarit/nllb-th-en-translation -neural-ti/NeTI -ritikjain51/pdf-question-answering -heliosbrahma/product-description-generator -BIOML-SVM/SVM -Xalphinions/tab-cot -ramkamal2000/voice-conversion-ddp -haseeb-heaven/AutoBard-Coder -quinnpertuit/drake-ai-v1 -openaccess-ai-collective/ggml-runpod-ui -ayymen/MMS-ASR -FrexG/MMS-Ethiopian_Language-ASR -IoMa/stable-diffusion-webui-cpu -librarian-bots/MetaRefine -EmilyBrat/bratty-space-needs-correction -dhanushreddy29/Remove_Background -March07/PromptBench -porntech/sex-position-video -awacke1/ChatGPTStreamlit7 -zeno-ml/translation-report -muttalib1326/Human-Voice-To-Text -seedmanc/batch-laion-aesthetic-predictor -kevinwang676/DreamlikeArt-PhotoReal-2.0 -thomwolf/hf-star-history -deepghs/anime_image_classification -thefcraft/prompt-generator-stable-diffusion -rohanshaw/Bard -ashpepel/ashpepel -Workhack/chatgpt-prompt-playground -Kalvin-5/WizardLM-WizardCoder-15B-V1.0 -mike-ravkine/llm-webapps-results -renumics/cifar10-outlier -Selim321/image2image-stable-diffusion -udayvarma/Image-to-Line-Drawings -osanseviero/persistent-data-final -malper/taatiknet -allknowingroger/Image-Models-Test8 -realvest/realvest-app -OsituKengere/Sauti-Midjourney -primodata/all_in_gpt -SungBeom/chatwine-korean -mrrandom123/mattmdjaga-segformer_b2_clothes -huggingface-timeseries/time-series-score -editing-images/project -awinml/vicuna-7b-ggml-api -xnetba/MMS -allknowingroger/Image-Models-Test15 -awacke1/MultiPDF-QA-ChatGPT-Langchain -0xSpleef/openchat-openchat_8192 -coreml-community/ControlNet-v1-1-Annotators-cpu -allknowingroger/Image-Models-Test17 -arnold-anand/chat-with-pdf -AFRAC/NCM_DEMO -lijiacai/stable-diffusion-webui-cpu -abhishekgawade/Skin_disease_detection -davanstrien/label-studio-to-hub -Manjushri/PhotoReal-V2.0 -chansung/llm-discord-bot -nomic-ai/vicgalle_alpaca-gpt4 -FFusion/FFusionAI-Streamlit-Playground -mikeee/falcon-7b-ggml -zenafey/prodia -noes14155/img_All_models -Poupeto/RVC_Ryu7ztv -hysts-samples/space-monitor -Username85/G3 -Ekimetrics/Biomap -gsaivinay/Llama-2-13B-GGML-server -Hmjz100/MT3 -allknowingroger/Image-Models-Test41 -mikeee/llama2-7b-chat-ggml -faisalhr1997/chat-ggml -syaz01/rvc-anigames-v2 -jbilcke-hf/webapp-factory-llama2 -Sandiago21/speech-to-speech-translation-german -ZX9966/LLM-Research -Xenova/next-example-app -Tester002/Claudette -mbear/code-playground -gradio-discord-bots/Llama-2-70b-chat-hf -Artples/LLaMA-2-CHAT -qblocks/Monster-LLMs -freddyaboulton/llama-chat-discord-bot -Ababababababbababa/topic2poem -Ababababababbababa/poetry2023 -NeuroSenko/tts-silero -simsa/Fashion-Image-Captioning-using-BLIP-2 -Mashhoor/stabilityai-stable-diffusion-image-generator -donjuanplatinum/code -merve/MusicGen -whyu/MM-Vet_Evaluator -Justin-Choo/Dreamlikeart-Anime-ZH -diffle/ComfyUI -Dagfinn1962/stablediffusion-articlera -StarFox7/Llama-2-ko-7B-chat-ggml -allknowingroger/Image-Models-Test65 -zac/Coding_with_LLAMA_CPU -TheRealZoink/Zoink_OV3RL0AD -Thanaphit/yolov8-car-parts-and-damage-segmentation -erastorgueva-nv/NeMo-Forced-Aligner -victor/AudioGen -imageomics/Andromeda -hf-vision/detection_metrics -allknowingroger/Image-Models-Test79 -foduucom/product-detect-in-shelf-yolov8 -avaco/stablediffusionapi-disney-pixal-cartoon -Sidharthan/VideoSummarizer -gojiteji/LLM-Comparer -Andyrasika/Andyrasika-dreamshaper-sdxl-1.0 -allknowingroger/Image-Models-Test90 -merve/gradio-tgi -reach-vb/animated-audio-visualizer-1024 -pankajmathur/psmathur-orca_mini_v3_70b -allknowingroger/Image-Models-Test98 -krystaltechnology/image-video-colorization -bhaskartripathi/Llama-2-70b-chatbot -mygyasir/digiplay-PotoPhotoRealism_v1 -Liky1234/Bilibili -xiantian/xiantian -dolphinfusion/dolphinfusion-diffusion -dolphinfusion/SD-XL -allknowingroger/Image-Models-Test102 -pigeonchat-community/pigeon-chat -eson/bert-perplexity -sky24h/Free-View_Expressive_Talking_Head_Video_Editing -DataHunter/ostris-crayon_style_lora_sdxl -crystalai/FFusion-FFusionXL-09-SDXL -dongsiqie/bingai -techasad/image-to-audio-story -Nick1/rvc-models -allknowingroger/Image-Models-Test120 -krystian-lieber/codellama-34b-chat -rt33/terry -hhhwmws/ChatHaruhi-Xinghuo -mateuseap/magic-vocals -AxelBell/EasyOCR_text_recognition -GoAPI/Midjourney-zoom-video-generator-GoAPI -h1r41/OpenBuddy-Gradio -jbilcke-hf/space-factory -Cran-May/yugangVI -unstructuredio/unstructured-chipper-app -freddyaboulton/falcon-180b-demo-gradio-discord-bot -limcheekin/WizardCoder-Python-13B-V1.0-GGUF -Catspindev/monadical-labs-minecraft-skin-generator -allknowingroger/Image-Models-Test131 -hf4all/chatgpt-next-web-bing -yuchenlin/llama-token-counter -Lavena/claude -EronSamez/RVC_HFmeu -laiyer/llm-guard-playground -Ilzhabimantara/rvc-Blue-archives -chansung/palm-with-gradio-chat -alibaba-pai/easyphoto -SalahZa/Code-Switched-Tunisian-SpeechToText -tonyassi/nsfw-safety-checker -radames/Candle-T5-Generation-Wasm -GuujiYae/Grand-Narukami-Shrine -AchyuthGamer/ImMagician -tonyassi/vogue-runway-scraper -foduucom/stockmarket-future-prediction -pharma-IA/PharmaWise_Experto_GMP_V2C -44brabal/runwayml-stable-diffusion-v1-5 -zamasam/loligod -tsfeng/DeepDanbooru-string -distil-whisper/hallucination-analysis -Tomoniai/Demo_Mistral_Chat -deepliteai/yolobench -Detomo/ai-avatar-frontend -familytrain/upscaler2 -hzwluoye/gptnextweb-LangChain -zenafey/illusion -awacke1/MistralCoder -Nephele/bert-vits2-multi-voice -XzJosh/ranran-Bert-VITS2 -badayvedat/AudioSep -librarian-bots/new_hub_datasets -Felladrin/MiniSearch -dblasko/blip-dalle3-img2prompt -lalashechka/video2 -codys12/MergeLlama-7b -thuanz123/peft-sd-realfill -Sijuade/Stable-Diffusion -radames/gradio-lite-candle-SAM -realfill-library/RealFill-Training-UI -LaynzKunz/Advanced-RVC-Inference -arsalagrey/image-classfication-vue -deinferno/Latent_Consistency_Model_OpenVino_CPU -hacknc23/hacknc23 -leogabraneth/text-generation-webui-main -miracle01/speechemotion -librarian-bots/claim-papers -FL33TW00D/whisper-turbo -limcheekin/OpenHermes-2.5-Mistral-7B-GGUF -Amiminoru/Deus -Vithika/ISRO -Abhaykoul/Youtube_video_downloader -ngoctuanai/stable-diffusion -pseudolab/MiniMed_EHR_Analyst -Roboflow/HotDogGPT -Illia56/OpenAI_TTS -AUBMC-AIM/OCTaGAN -AlgoveraAI/ocean-marketplace -BigSalmon/GPTJ -Detomo/Image-Classification -DrishtiSharma/ASR_using_Wav2Vec2 -EdanMizrahi/OpenAItest -Enutrof/GenreClassifier -GEM/results -GEM/submission-form -Gabriel/Swe_summarizer -Giuliano/breast_cancer_prediction_tfjs -Hellisotherpeople/Reassuring_parables -Hitmanny/BigGAN-text-to-image -JLD/image-search -MilaNLProc/wordify -MrAI-Rohan/three-dog-breeds-detector -NbAiLab/maken-clip-sketch -NegativeSector/News_Article_Generator -NeuML/imagesearch -NeuML/similarity -OmarN121/NLP_for_Jobs -OthmaneJ/transcribe-distil-wav2vec2 -PaddlePaddle/paddlespeech -Sa-m/manifesto-explainer -Sadhaklal/coreference-neuralcoref -Sakil/image_generator -Sakil/sakil_text_summarization_app -Shruhrid/IMDB_movie_review -ThomasSimonini/Murder-on-horsea-island-prototype -ThomasSimonini/SB3_Atari -Wootang01/URL_news_summarizer -Wootang01/paraphraser_one -YuAnthony/Audio-Caption -abidlabs/Webcam-background-remover -abidlabs/crowd-speech -abidlabs/pytorch-image-classifier -adalbertojunior/image_captioning_portuguese -akdeniz27/turkish-named-entity-recognition -akhaliq/BiomedNLP-PubMedBERT-base-uncased-abstract-fulltext -akhaliq/SummerTime -akhaliq/codet5 -akhaliq/distilbart-cnn-12-6 -akhaliq/encoder4editing -akhaliq/kan-bayashi_ljspeech_joint_finetune_conformer_fastspeech2_hifigan -akhaliq/layout-parser -aliabd/blocks-image-audio -breathingcyborg/word2vec-for-products -cahya/indonesian-story -cakiki/arxiv-downloads -cakiki/facets-overview -ceshine/t5-paraphrasing -chinhon/malay_headlines_writer -ck46/extractive_summaries -danielferreira/emotion-text-classification -davidwisdom/la-metro -deep-learning-analytics/segformer_semantic_segmentation -docs-demos/bert-base-uncased -docs-demos/mt5-small-finetuned-arxiv-cs-finetuned-arxiv-cs-full -dragonSwing/wav2vec2-vi-asr -dtsh4rk/neural-style-transfer -echolee/faceanime4u -erc/entity-referring-classifier -facebook/XLS-R-300m-EN-15 -graceaiedu/Coffee -gracjans/Game-or-book-cover-classifier -haotieu/en-vi-translation -hitz02/TableQA -hysts/lbpcascade_animeface -azizalto/youtube_downloader -isabel/anime-project -isabel/pug-or-cat-image-classifier -ivanlau/language-detection-xlm-roberta-base -j-hartmann/emotion-similarity -jason9693/m2m-100 -jkang/demo-image-completion -jkang/demo-image-pyxelate -jkang/espnet2_librispeech_100h_word_vs_bpe_vs_char -julien-c/svelte-demo -julien-c/sveltekit-demo -julien-c/tailwind-gradient -kTonpa/Text2Cryptopunks -karolmajek/Detectron2-MaskRCNN -keras-io/keras-image-classifier -keras-io/keras-video-classification-cnn-rnn -keras-io/text-generation -keras-io/vit-small-ds -khxu/pegasus-text-summarizers -legoandmars/glide-inpainting -m3hrdadfi/typo-detector -malloc/OpenNMT-EN-DE-Translation -malmal/dog-breed-identifier -mariagrandury/roberta-qa-es -mariakatosvich/security -merve/taskmaster -micole66/test -mikeee/radiobee-aligner -ml6team/toxic-comment-detection-german -monsoon-nlp/spanish-flip -mrm8488/write-with-spanish-gpt-2 -muhammadayman/gradio-demo -muhtasham/TajBERTo -nateraw/pictionary -neurotech/cat_dog_audio_classifier -nikhilmane007/text_dissection -osanseviero/EfficientNetV2 -osanseviero/danfojs-test -osanseviero/test_gradio -patrickvonplaten/asv -pierreguillou/question-answering-portuguese -pytorch/AlexNet -pytorch/DCGAN_on_fashiongen -pytorch/X3D -rajesh1729/NER-using-spacy-gradio -rajesh1729/gradio-realtime-news-app -raynardj/duguwen-classical-chinese-to-morden-translate -rexoscare/Resume_screener -rg089/NewsHelper -risingodegua/hate-speech-detector -sagittariusA/media_bias_detection_CS -salti/arabic-question-paraphrasing -savasy/Twitter2SentiForTurkish -savasy/text-classification-for-Turkish -scp4950/fastspeech2-en-ljspeech-Demo -shauryaDugar/gradio-Note-Classifier -sohomghosh/FiNCAT_Financial_Numeral_Claim_Analysis_Tool -sonoisa/Irasuto_search_CLIP_zero-shot -stevenkolawole/T5-multitasks-gradio -ttj/t0-generation -ttj/wordle-helper -ubamba98/clipsimilarimagesearch -vasudevgupta/BigGAN -vasudevgupta/GOOGLE_SUMMER_OF_CODE -vishnun/SpellCorrectorT5 -widged/named-entity-recognition -widged/text-classification -xiatao/microsoft-trocr-large-printed -yoyololicon/Danna-Sep -zhenwusw/AnimeGAN -tensorflow/imagenet-efficientnet_v2_imagenet1k_b0-classification -awacke1/SOTA-Plan -eubinecto/idiomify -cakiki/netlogo-ants -calvininterview/bart-question-interactive -ivanlau/IntelliLabel -PaddlePaddle/animegan_v1_hayao_60 -PaddlePaddle/stylepro_artistic -Wootang01/keyword_extractor -akdeniz27/turkish-pos-tagging-with-xlm_roberta -opetrova/face-frontalization -mrm8488/Amazon-reviews-classification-es -manan/Score-Clinical-Patient-Notes -neuronys/distractors -BigSalmon/BackTranslation -onnx/GPT-2 -awacke1/SOTA-Summary -jsjuan/PlateNumberRecognition -amielle/patent-summarizer -webis/chat-noir -awacke1/VisualCluster -paulengstler/interpretable-vertebral-fracture-diagnosis -khizon/ActiveTransportDetection -huggan/BigGAN -Cahlil/Speech-Recognition-with-Speaker-Segmentation -browndw/docuscope-demo-spacy -godot-demo/godot-2d -smajumdar/nemo_conformer_rnnt_large_streaming -kingabzpro/real-time-Urdu-ASR -hackathon-pln-es/Sentence-Embedding-Bertin -awacke1/SOTA-MedEntity -NimaBoscarino/gradio-secrets -codeslake/RefVSR -pyodide-demo/gpt2-tokenizer -rajesh1729/toonify-mercury -FritsLyneborg/kunstnerfrits -satpalsr/RegNet-Image-Classification -webis-huggingface-workshop/chris_code_generation -awacke1/Search_Streamlit -tom-doerr/logo_generator -hysts/Hopenet -hylee/artline -hushell/pmf_with_gis -eetn/DALL-E -malper/unikud -hysts/atksh-onnx-facial-lmk-detector -Nooshinbr/story_generation -pplonski/deploy-mercury -ecarbo/text-generator-gpt-neo -MarcSkovMadsen/awesome-panel -Eddevs/brian-challenge -jungealexander/uspppm-demo -capstonedubtrack/Indiclanguagedubbing -jcjurado/DaVinci -akhaliq/arcanestyletransfer -n-e-w/glock_classifier -aeamaea/beard-detector -NataKaichkina/PredictSalary -Yehor/wav2vec2-uk-demo -strickvl/fastai_redaction_classifier -ofig/live-lm-critic -strickvl/redaction-detector-streamlit -gylleus/icongen -hylee/u2net_portrait -ashishraics/NLP -leakyrelu/MobilenetV2SSDLite_LPRnet -azizmma/question_generator -NimaBoscarino/hotdog-gradio -Jorgvt/CycleGAN-GTA-REAL -subatomicseer/2022-AdaIN-pytorch-Demo -emilylearning/causing_gender_pronouns_two -ntt123/handwriting -imamnurby/RecipeGen -evaluate-metric/precision -gputrain/UrbanSounds8K -gurgenblbulyan/video-based-text-generation -Andy1621/uniformer_image_segmentation -autoevaluate/error-analysis -GDavila/textblob_sentiment -pierreguillou/tesseract-ocr-pt -awacke1/AIandSmartTools -unlisboa/bart_qa_assistant -flava/zero-shot-image-classification -pyimagesearch/nmt-bahdanau -awacke1/StoryGenerator-MythsandLegends -rajistics/library_metrics_forecasting -awacke1/Seq2Seq-QAGenerator -reinformator/LL -sidphbot/Researcher -doevent/animegan-v2-for-videos -evaluate-measurement/perplexity -evaluate-metric/trec_eval -Gradio-Blocks/CloudSaveText2Speech -CShorten/Last-Week-on-ArXiv -MarcCote/ScienceWorld -breynolds1247/StarryNight_StyleTransfer -jordyvl/ece -Gradio-Blocks/speech-to-text-app -tinkoff-ai/caif -keras-io/timeseries-anomaly-detection-autoencoders -nateraw/helpful-snippets -keras-io/what-convnets-learn -keras-io/Attention_based_Deep_Multiple_Instance_Learning -shoukaku/movie_recommendation -ntt123/Connect-4-Game -erer/anima_pose_crop -bikemright/overweight-AI -Axolotlily/SketchThing -lambdaofgod/huggingface_explorer -segments-tobias/conex -keras-io/Self-supervised-learning-SimSiam -halilumutyalcin/spam-email-classifier-app -jharrison27/StoryWritingTransformers -ThomasSimonini/Unity-MLAgents-Pyramids -CVPR/Dual-Key_Backdoor_Attacks -jorge-henao/ask2democracy -Comet/txt2im-models -harish3110/emotion_detection -rosenthal/chess -shaneweisz/AutoCounterspeech -wvangils/Beatles_Poetry -MilesCranmer/PySR -logasja/LowKey -SerdarHelli/ThyroidTumorClassification -innat/Global.Wheat.Detection.MaskRCNN -CVPR/WALT -KyanChen/BuildingExtraction -j0hngou/vision-diffmask -kristyc/mediapipe-hands -hugginglearners/Hearts_Leaderboard -rajistics/Ask-Wiki -HighCWu/GPEN -HighCWu/GFPGAN-1.3 -DarkCeptor44/neural-art -awacke1/3DVirtualFood -hylee/AnimeGANv2 -awacke1/NLPSentenceSimilarityHeatmap -Wootang01/paraphraser_three -keras-io/drug-molecule-generation-with-VAE -QuoQA-NLP/KoQuillBot -fffiloni/ArcaneStyleTransfer_Webcam -shajmaan/movies_recommender -iakarshu/latr-vqa -EuroPython2022/bloom-prompts-spanish -hugginglearners/Ethiopian-Food-Classifier -hugginglearners/kvasir-seg -boli-ai/OIT -EuroPython2022/Model-Recommendation -EuroPython2022/illustrated-lyrics-generator -fcossio/measure-fiber-diameter -ICML2022/selection_bias_induced_spurious_correlations -0x7194633/nllb-1.3B-demo -derina/BartSummarizer -djl234/UFO -LDY/Text-To-Image -awacke1/CarePlanQnAWithContext -Ivanrs/batch-image-bg-remover -mikeee/gradio-deepl -awesomepotato2016/recommender -betheredge/air-vibrations -kunwarsaaim/Self-Debiasing -dawood/Model3D -shorthillstech/pybanking_churn -denisp1/Streamlit-Grammar-Corrector-Styler -xiang-wuu/yolov5 -rsatish1110/AudioToTextToStoryToImageToVideo -Norod78/SillyTedTalkSnippetGenerator -kamezawash/rembg -TabPFN/TabPFNEvaluation -keithhon/macaw-large-onnx-demo -nickmuchi/license-plate-detection-with-YOLOS -postbot/autocomplete-emails -Zengyf-CVer/watermarking_lab -awacke1/StreamlitTimerTest -relbert/Analogy -cymic/VITS-Tokaiteio -canturan10/satellighte -keithhon/Tesseract-OCR -abhibisht89/Donut_DocVQA -ipvikas/ALL_NLP_Tasks -Mwebrania/classification_of_maize_diseases -chaninder/SmartWaste -nafisehNik/mt5-persian-summary -mrfakename/Chat -AIZeroToHero/04-Image2OCR -AIZeroToHero/Video-Automatic-Speech-Recognition -cchaun/music_tagging -Sa-m/Vehicles-Detection-Custom-YoloV7 -versus666/play_with_stable_diffusion_v1-4 -yaelvinker/CLIPasso -PascalNotin/Tranception_design -ML-unipi/TermsOfServiceSummarization -nathanluskey/twitter_sentiment -Priyabrata017/Flamingo -pappymu/question-gen -ai4bharat/IndicNLG -mariofilho/gradio_tutorial -pinecone/extractive-question-answering -mrfakename/Translate -yfyangd/PictureBookUnderstanding -baaastien/AudioSpleeter -clemsou/pokemon_generator -VasudevaK/Information_Extractor -justYu2001/furniture-detection -TrLOX/img2img -kornia/kornia-edge-detection -Manjushri/Dall-E-Mini -owaiskha9654/PICO-Evidence-Based-Classification-Inference -ppsingh/annotation_dev -ipvikas/ImageProcessing -cmotions/new_beatles_songs -kornia/morphological_operators -kornia/total_variation_denoising -binery/Donut_Receipt -kornia/Line-Fitting -devfinwiz/Dynamic-QR -mrm8488/speech-to-diffusion -sarinam/speaker-anonymization -noeljb/hashtag-recommendation-engine -tbxg34/Satellite-Image-Recognition -gradio/model3D -gradio/spectogram -mfranzon/MagicBoard -nazneen/datapoints-explorer -Armandoliv/gpt2-tweets-generation-app -evaluate-measurement/regard -jone/Music_Source_Separation -aakashgoel12/nlp1 -simecek/is_promoter -wby/human-photo-3dize -isaiah08/dalle-mini-test -ysharma/test_speech_to_text -PaddlePaddle/LSeg -johngoad/Image-Caption -amsterdamNLP/CLIP-attention-rollout -AIZ2H/02-Gradio-Art-From-Text-And-Images -AIZ2H/05-SOTA-Question-Answer-From-TextFileContext -cadige/03-Streamlit-Video -tgohblio/stable-diffusion-basic -freddyaboulton/latent-diffusion-seed -akhaliq/sd-pokemon-diffusers -TRI-ML/risk_biased_prediction -sebastian-hofstaetter/fid-light-explorer -nightfury/SD-InPainting -coutant/detect-signature -UmairSyed/ObjectDetection -Marne/MockingBird -lulmer/paraphraser_ai -test-org-q/stable-diffusion -TusharNautiyal/Dynamic-Movie-Recommender-With-Sentiment-Analysis -ai-forever/PeterRecognition -jharrison27/streamlit-blenderbot -williambr/AIChatBot-SL-Chatbot-Blenderbot -amarjeets/OCR -SudhanshuBlaze/text-generation-gpt-neo -FahadAlam/Zero-Shot-Text-Classification -altryne/vidtranslator -darveen/text_summarizer -khaclinh/self-driving-anonymization -datasciencedojo/Paraphrasing -datasciencedojo/Question-Generator -datasciencedojo/Text-Generator -datasciencedojo/Zero-Shot-Text-Classification -AFCMEgypt/colorimetric_analyzer -datasciencedojo/Wikipedia-Article-Scrape -abidlabs/GFPGAN -ml6team/dynamic-pricing -shichen1231/Real-CUGAN -FSDL-Fashion/fashion_img_search -bigscience-data/pyserini-demo -awaawawawa/iurf7irfuyytruyyugb -ai-forever/scrabblegan-peter -AISuperheroes/09SL-AI-Image-Music-Video-AIUIUX -Akmyradov/dost.ai -sayakpaul/sots-outdoor-dehazing-maxim -nightfury/SD_Studio_AI_Text2Image_Image2Image_Generation -sil-ai/model-license -doevent/swin2sr -sparanoid/milky-green-svc -impira/flan-playground -lgrobol/troer -FathomNet/MBARI_Benthic_Supercategory_Object_Detector -hnmensah/Ghanaian-Language-Translator -cmudrc/lattice-interpolation -sayakpaul/raindrop-deraining-maxim -wvle/speech_to_text -terrierteam/doc2query -bigscience-data/scisearch -milyiyo/paraphrase_es -mesolitica/ms-tts-VITS -mikegarts/lotr -Jayeshbhaal/news_filter_for_social_wellbeing -tsambo/Demo_Sentiment_analysis -egumasa/engagement-analyzer-demo -osanseviero/esmfold_st -rondel/image-to-text-app -bofenghuang/speech-to-text -mrfakename/tts -shripadbhat/Question_Answering_Document -dvitel/codebleu -Lee008/PixelDayReal -yujieq/MolScribe -awacke1/Gradio-Gallery-Health-Medical-Icon-Sets -sanchit-gandhi/enhanced_direct_s2st -User1342/Ivory -TomLemsky/this_skin_does_not_exist -Emanuel/porttagger -naver-clova-ix/donut-base-finetuned-kuzushiji -freddyaboulton/dataset-viewer -Amrrs/fashion-aggregator-duplicated -DrGabrielLopez/GPT2_Chatbot -spiritupbro/Voice-Cloning -PublicPrompts/Pixel_diffusion -akhaliq/Ghibli-Diffusion -julien-c/dreambooth-training -bigcode/pii-public-demo -Avkash/WebcamFaceProcessing -kdrkdrkdr/AzusaTTS -mgxwrites/Mgx-Diffusion-v3.0 -priyank-m/m_OCR -fffiloni/mr-and-misses -windmaple/stable-diffusion-2 -NobleEpuz/openjourney -intelliarts/Car_parts_damage_detection -Southstar1/img-to-music -royyy/text_generator -kdrkdrkdr/HoshinoTTS -awacke1/PrompTart -christinac/text-decorator -MirageML/lowpoly-environment -IgorSense/Diffusion_Space2 -aidiary/tts-ljspeech-demo -akhaliq/supermarionation -Ramos-Ramos/albef-vqa -mandar100/blenderbot_chat -drift-ai/question-answer-text -deaf1296/finetuned_diffusion -fcakyon/timesformer -nerijs/coralchar-diffusion -AmrElsayeh/Interior_style_detector -EvgenyK/Text-To-Image -SudhanshuBlaze/neural-style-transfer-streamlit -yangheng/Waifu2X-Image-Scale -qisan/whisper-small-CN-YouTube-video-transcribe -whispy/Italian-ASR -akhaliq/runwayml-stable-diffusion-v1-5 -datasciencedojo/Mental_Health_Bot -PBJ/image_colorization_app -Nithila77/fashion-mnist -emilios/whisper-greek-demo -Harsh23Kashyap/StockMarketPredictor -Zaid/whisper-large-v2-ar -binarycache/voice_to_image -Svngoku/GFPGAN -emmetmayer/Large-Context-Question-and-Answering -morganreese8/rhymethyme -Kirokowa/hakurei-waifu-diffusion -robmarkcole/fire-detection-from-images -AvinashRamesh23/AIEditor -teamnassim/emotion-detection-app -fkunn1326/CoolJapaneseDiffusion -fkunn1326/waifu2x -MKFMIKU/Bi-Noising.Diffusion -ThirdEyeData/Network_Data_Anomaly -FloydianSound/Nixeu_Diffusion -ORI-Muchim/BarKeYaeTTS -sussahoo/table_extraction -livebook-dev/single_file_phx_bumblebee_ml -akhaliq/seek.art_MEGA -ThirdEyeData/Price_Optimization -SpacesExamples/single_file_phx_bumblebee_ml -rizam/rjgpt -tarteel-ai/demo-whisper-base-ar-quran -mueller-franzes/medfusion-app -Yusin/talking-stable-diffusion -kdrkdrkdr/ZhongliTTS -neuralmagic/nlp-ner -luigisaetta/whisper-demo -neuralmagic/cv-yolact -ales/whisper-small-belarusian-demo -froginsect/Lama-Cleaner-lama -Fatima990/text_generator1 -bofenghuang/whisper-demo-german -alexander1i/dreamlike-art-dreamlike-diffusion-1.0 -Autopixel/blurry-faces -adirik/stylemc-demo -facebook/Hokkien_Demo_on_GPU -kdrkdrkdr/HinaTTS -RASMUS/Youtube-videos-with-crosslingual-transcriptions -Curranj/GPT-QRI -bigcode/license -Joeythemonster/flax-midjourney-v4-diffusion -Zephyr65/Envvi-Inkpunk-Diffusion -ThirdEyeData/Retail-Anomaly -abdalrahmanshahrour/Summarization -djillegal/illegal_stable_img2img -jbrinkma/video-transcription -pieeetre/stable-diffusion-webui -kohbanye/pixel-art-style -rifkat/uz_news_classifer -FKBaffour/Streamlit_App_for_Sales_Forecasting -Joeythemonster/prompt-extend -morenolq/italian-summarization -JammyMachina/streamlit-jam-machine -keremberke/valorant-object-detection -ysharma/LiveScatterPlot -DeepLabCut/DeepLabCutModelZoo-SuperAnimals -gstaff/MagicGen -IzumiSatoshi/sketch2img-FashionMNIST -davidscripka/openWakeWord -amgross01/Stocks_Trading_Assistant -abdalrahmanshahrour/questionanswering -carlosabadia/face_detection -luluneko1/stable-diffusion-webui -keremberke/blood-cell-object-detection -ItsJayQz/Roy_PopArt_Diffusion -peteralexandercharles/Auto-Subtitled-Video-Generator -theintuitiveye/modernartstyle -pinecone/find-your-celebrity-match -kael558/InPaintAPI -HighCWu/anime-colorization-with-hint -nightfury/img2music -Hexequin/claudfuen-photorealistic-fuen-v1 -dafqi/indo_twitter_sentiment_app -camenduru-com/VITS-Umamusume-voice-synthesizer -pngwn/Stable-Diffusion-prompt-generator -murbas/Litmus-Voice-Age-Prediction -ThirdEyeData/Entity-Extraction -Axesys/Private-WebUI -trl-internal-testing/rlhf_dialog_experiment -perilli/tortoise-tts-v2 -ulysses115/ulysses115-pmvoice -datasciencedojo/Twitter-Scraper-with-Time-Series-Visualization -vinayreddy10/gpt3 -mohdelgaar/Clinical_Decisions -Lianjd/stock_dashboard -Rojastopher/Image-to-3D -umair007/all_in_one_converter_modified -fbrynpk/image-caption-generator -zjrwtx/xiaoyi_image_variations -Aditya9790/yolo7-object-tracking -leonel1122/openai-jukebox-5b-lyrics -markski/reddit-roast-me -Adapting/YouTube-Downloader -gauravgarg/youtube_transcript -toonist/DualStyleGAN -freddyaboulton/license-plate-reader -aichina/MagicPrompt-Stable-Diffusion -decodemai/future_in_words -decodemai/intersection_scenarios -faisalhr1997/wd14_tagging_online -Curranj/GPT-SQL -unilm/Promptist-faster -alsrbdni/speaker-diarization -Frorozcol/music_recommedation -CharyWind/webui-docker -discussion-bot/webhook -Lyra121/finetuned_diffusion -TrustSafeAI/NCTV -BilalSardar/karlo-cpu-api -ngxson/poet-cat -DReAMy-lib/dream -odhier/MGX-Midjourney-v4 -GT4SD/protein_properties -language-tools/language-demo -awacke1/Writing-Grammar-And-Paraphrase-w-Pegasus -grumpkin/cats -Div99/Chat-with-Div -amit-scans/Image-Text-Detection -Fr33d0m21/google-flan-t5-xxl -EmbeddedAndrew/examin8 -Shad0ws/Information_Extraction_with_ChatGPT -jonathang/Protein-Family-Ensemble -Harveenchadha/BioGPT -seawolf2357/kochatgpt -chewing/liandan -merzigo/MKAtaturkv2 -mariashay/DataViz-Plotly -xuanzang/prompthero-openjourney-v2 -camenduru-com/terminal -ericanthonymitchell/detectgpt -voltcutter/stable-diffusion-webui -omdenalagos/job_skill_cat -gfhayworth/hack_qa -multimodalart/coca-captioning -abidlabs/mic_or_file -camenduru-com/audioldm-text-to-audio-generation -MBZ/LoRA-DreamBooth-Training-UI -oschan77/animalsvision -vanessa9178/anime-anything-v4.0 -Re1e9/Flower_Classification_using_InceptionV3 -Deevyankar/Deep-AD -celise88/Pathfinder -james-oldfield/PandA -abidlabs/gpt-talking-portrait -anhnv125/FRN -awacke1/ResnetPytorchImageRecognition -harshasurampudi/gender-and-age -imseldrith/AI-Rephraser -jayyd/nlpconnect-vit-gpt2-image-captioning -emanlapponi/sound-refukculator -Grezz/generate_human_motion -Dipl0/Dipl0-pepe-diffuser -dukecsxu/chatGPT -gouravs300/ANPR -Solomon-y/img-to-music -mehradans92/decode-elm -multimodalart/pix2pix-zero -Neilblaze/WhisperAnything -end000/sberbank-ai-FRED-T5-1.7B -imseldrith/AI-Rewriter -Yeno/text-to-3D -zwormgoor/stock-photo-recognizer -Duskfallcrew/textual-inversion-training -Detomo/AnimeGAN -BramVanroy/text-to-amr -DataScienceEngineering/7-NER-Biomed-ClinicalTerms -Duskfallcrew/newdreambooth-toclone -Duskfallcrew/duskfall-tarot-card -Kaludi/Stable-Diffusion-Prompt-Generator_App -bkhmsi/AraPoet -awacke1/sileod-deberta-v3-base-tasksource-nli -NagaSaiAbhinay/unclip_text_interpolation_demo -awacke1/File-Memory-Operations-Human-Feedback-Gradio -Elbhnasy/ASD_Diagnosis -open-source-metrics/audio-stats -society-ethics/DiffusionClustering -lfoppiano/grobid-quantities -Crossper6/stable-diffusion-webui -awacke1/Bloom.Big.Science.Continual.Generator -ORI-Muchim/ONFIRETTS -GAS17/Dream-awAI-Image-Upscaling -rayan-saleh/whisper2notion -lfoppiano/grobid -zeno-ml/imagenette -joaogante/generate_quality_improvement -Guinnessgshep/AI_story_writing -giswqs/geemap -JacobLinCool/create-3d-icon -Thafx/sdAnalog -Thafx/sdrv1_3 -awacke1/Player-Card-Monster-Battler-For-Math-and-AI -vaibhavarduino/ChatGPT -akshatsanghvi/movie-recommender-system -FadouaFGM/Stackoverflow_Questions_Categorisation -Samuelblue/Onodofthenorth-SD_PixelArt_SpriteSheet_Generator -GT6242Causion/Causion -paimeng/anime-remove-background -fyodorschnotzdinger/paraphraser -ridai/img-to-music -szzzzz/toxic_detection -simplyjaga/neural_style_tranfer_using_dense_net -ddstua/Enhance_Low_Light_Image -awaiss/vits-models -Harsh12/Netflix-Movie-Recommender -ysharma/ControlNet_Image_Comparison -animesh651/ChatAPT_v1 -kuhnma2026/FortniteSkinPackAI -EtheE/SecurityAgent -podsnigame/twitter-scrapping -Thaweewat/ControlNet-Architecture -Alinadi98/movie_recommendation_system -Firefly777a/openai-moderation-api-demo -deepsynthbody/deepfake-ecg-generator -omlakhani/endoai -aijack/hair -Semii/OpenPoseSkeleton -ivanpc/Youtube_Audio -abidlabs/supabase -awacke1/Ontology-Gradio -AlexWelcing/MusicLM -ashawkey/chatgpt_please_improve_my_paper_writing -ivanmeyer/DreamlikeArt-PhotoReal-2.0 -gptbase/GPTBase -awacke1/HTML5-Aframe-3dMap-Flight -sciling/Face_and_Plate_License_Blur -mohit-217/invoice_by_mohit -ekatra/mobius-v2 -derek-thomas/QADemo -Laronix/Laronix_ASR_TTS_VC -pkiage/fast_arbitrary_image_style_transfer -youngs3/coqui-ai-tts-ko -GazeLocation/Visualization_Saliency -suigyu/AItest -cscan/CodeFormer -firefighter/PdfSumGPT -phlippseitz/Image-Text-Extraction-PaddleOCR -radames/diffusers-classifier-labeling -awacke1/CodeGen-YurtsAI-yurts-python-code-gen-30-sparse -liuxiaopai/background-remover -zhangjf/chatbot_code_friendly -nmaina/ChatGPTwithAPI -supertori/files -wwydmanski/meeting-summarizer -Allakhazam/Home -rishi9440/remove-photo-background -MZhaovo/Llama_Difu -darthPanda/Social_media_sentiment_tracker -bhaskartripathi/Text2Question -productizationlabs/MyChatGPTTurbo -jelly21/claudfuen-photorealistic-fuen-v1 -HiepPhuocSS/TimeSFormer -IISRFactCheck/claim_detection -Yasbok/Flan-T5-Chatbot -pyimagesearch/gif-creator -JohnTan38/NLLB-translation -RGBD-SOD/bbsnet -lthero/ChatGPT-lthero -nithinraok/NeMo-Offline-Speaker-Diarization -FreeGPT/FreeGPT -bahjat-kawar/time-diffusion -JUNGU/latex-ocr-wthGPT -safetensors/convert2 -jofaichow/shiny-numerati -cloudqi/MultisourceChat -Dao3/Top-20-Models -keras-dreambooth/dreambooth-kedis -keras-dreambooth/dreambooth_fantasy -keras-dreambooth/dreambooth_diffusion_toy -tbboukhari/Chatbot-produit-fr -awacke1/Maps.Markers.Honor.Iceland -keras-dreambooth/bengali_clay_universe -Lykon/NeverEnding-Dream-webui -lu2000/anything-midjourney-v4-1 -awacke1/Knowledge-graphs -AI-Dashboards/Topic-Modeling-Clusters-Free-Text -dawood/microsoft_windows -xc9/VITS-Umamusume-voice-synthesizer -ysharma/steampunk -swcrazyfan/ppt-generator -liuxiaopai/BelleGroup-BELLE-7B-2M -Saturdays/deepfake-detection -JohnSmith9982/ChuanhuChatGPT_Beta -nullzero-live/python-project-generator -yooch/yooch -zouguojun/chatPDF -awacke1/4.RealTime-MediaPipe-AI-From-Video-On-Any-Device -KarloDarlo/3D_Photo_Inpainting -keras-dreambooth/dreambooth_monkey_island -grosenthal/aineid -EinfachOlder/ChatGPT-prompt-generator -Saturdays/CiclopeIA -POPSICLE/pdfChatter -mikaelbhai/GPTBhai_TextToImage_DreamStudio -deadash/BelleGroup-BELLE-LLAMA-7B-2M -Marshalls/testmtd -keras-dreambooth/dreambooth_eighties_cars -digitalOSHO/webui -Uday007/startup-profit-predictor -keras-dreambooth/dreambooth_hogwarts_legacy -MisterZee/PIFu-Clothed-Human-Digitization -librarian-bots/dashboard -vjain/Trading-Chatbot -derful/Chatgpt-academic -mncai/chat-doctor-kr -tammm/vits-models -awacke1/Bloom.Generative.Writer -keras-dreambooth/seymour-cat-diffusion -Mrchuw/MagicPrompt-Stable-Diffusion -Lihuchen/AcroBERT -gojiteji/thatGPT -BilalSardar/Gpt4All -haonanzhang/ChatGPT-BOT -jackycedar/pdfs -LEL-A/translated-german-alpaca-validation -rishabh062/DocumentQuestionAnswerModel -hungln1102/emotion_classification_surreynlp_2023 -zixian/Zhenhuan-VITS -Gradio-Themes/guessing-game -phoenix1203/club_record_in_3_min -Woocy/541GPT -derek-thomas/dataset-creator-reddit-bestofredditorupdates -ParityError/LimeFace -aryadytm/chatmagic-ai -ayaanzaveri/faster-whisper-api -datasciencedojo/AudioTranscription -fastx/Gpt-4-chatbot -mthsk/sovits-models -finlaymacklon/boxy_violet -Aaaaaaaabdualh/meter2poem-1 -sklearn-docs/A_demo_of_the_Spectral_Co-Clustering_algorithm -raoyang111/speecht5-tts-demo -shiyuleixia/yolov8-segmentation -ceckenrode/Human.Feedback.Dynamic.JSONL.Dataset.Download -FourthBrainGenAI/GenerAd-AI -Kevin676/ChatGPT-with-Voice-Cloning-2.0 -LightSY/W2L-TD -VincentZB/Stable-Diffusion-ControlNet-WebUI -JanDalhuysen/ChatPDF -sklearn-docs/plot-k-means-digits -kkinc/gsdf-Counterfeit-V2.5 -TechWithAnirudh/eachadea-vicuna-13b -dominguesm/alpaca-ptbr-7b -PirateXX/Sentencewise-Perplexity -stupidog04/Video-to-Multilingual-OCR -Fazzie/PokemonGAI -umair007/ChatGPT-prompt-generator -snoop2head/Gomoku-GPT2 -AquaSuisei/ChatGPTXE -lyhue1991/yolov8_demo -sino72/Passenger_Reconization -VikramSingh178/MedicalImagingApplication -Aluxes/anime-remove-background -marinap/multimodal_similarity -sklearn-docs/post-pruning-decision-trees -ieuniversity/ScienceBrief_summarization -sklearn-docs/ensemble-trees-decision-surface -Cloudfaith/anon8231489123-gpt4-x-alpaca-13b-native-4bit-128g -sklearn-docs/SGD-convex-loss -sklearn-docs/gaussian-quantile-adaboost -ThirdEyeData/Supply-Chain-Causal-Analysis -JohnSmith9982/VITS-Umamusume-voice-synthesizer -silaseic/sheet_music_transpose_v2 -ieuniversity/Sciencebrief_translation -sklearn-docs/Lasso-model-aic-bic -gshotwell/multi-query-sentiment -Bostoncake/ChatAssistant -sklearn-docs/Lasso-dense-sparse-data -paddle-diffusion-hackathon/Neolle_Face_Generator -sklearn-docs/t-SNE-perplexity -taesiri/ImageNet-Hard-Browser -seekeroftruth/CognitoMaxima -Priyanka-Kumavat/Object-Detection -hamzapehlivan/StyleRes -cuiyuan605/chatgpt-demo -codertoro/gpt-academic -ztudy/chatbot -nota-ai/theme -ioanniskarkanias/chatbot-with-sources -yanli01/gpt01 -laksithakumara/stabilityai-stable-diffusion-2 -CVH-vn1210/make_hair -viveknarayan/Image_Colorization -KarmaCST/English-To-Dzongkha-Translation-NLLB-Fine-tuning -django-ochain/youtube-q-and-a -markburn/stack-llama -learnanything/stable-diffusion-xl -andreassteiner/robo-call -ZiyadCodes/ArabicGPT -Manjushri/Erebus -akoksal/LongForm-OPT-125M -fastx/customer-support-chatbot -JFoz/dog-controlnet -davanstrien/arch_demo -abidlabs/docquery -aimstack/bloom -SoulAbi/ChatGPT4 -UVA-GCOM/Group_1 -whiskyboy/CogsGPT -rxn4chemistry/synthesis-protocol-extraction -abidlabs/Acapellify-Frontend -hydai/InterviewPrepGPT -Intel/intel-xai-tools-cam-demo -jackyccl/segment-anything -SebastianBravo/simci_css -eldhoskj/speechbrain-tts-tacotron2-ljspeech -ppsingh/cpu-demo -typesdigital/TwitterPRO -DuckyPolice/StormDrainMega -JanBabela/Riffusion-Melodiff-v1 -rottenlittlecreature/Moon_Goblin -a-v-bely/spanish-task-generator -Yina/google-pix2struct-base -prath/low_light_image_enhancement -AutoGeneralAI/ChatGPT -Hazem/Pub_face -bird-watching-society-of-greater-clare/brainy -ahmadprince007/HolyBot -it-at-m/image-anonymizer -fynn3003/image_to_text -huggingfacejs/doc-vis-qa -AutoGeneralAI/voice-assistant -MathysL/AutoGPT4 -fynn3003/python_code_generator -nkasmanoff/SearchingFace -Arjav/TOS-Summarization -sunder-ali/Image_Denoising_Demo -posit/quarto-template -FourthBrainGenAI/TalkToMyDoc-Hitch-Hikers-Guide -Roboflow/web-demo -onursavas/langchain-chat-with-pdf -Mahiruoshi/vits-chatbot -hahahafofo/ChatPDF -IvaElen/nlp_proj -simplyjaga/movie_genius_openai -vinay123/panoptic-segment-anything -dstackai/dstack-template -ImagineAI-Real/ImagineAI-Image-Generator2 -kevinwang676/Voice-Cloning-Demo -softcatala/comparativa-tts-catala -ohmyteeth/seo-tools -thealphhamerc/text-to-speech -varun500/MBZUAI-LaMini-GPT-1.5B -inplisQlawa/anything-midjourney-v4-1 -suppsumstagza/text-to-image-stable-diffusion-v1-5 -pjmartorell/AnimeGANv3 -DiffusionArtco/RealisticPhotoModels -IvaElen/find_my_pic -abhishekmamdapure/llama-cpp-python -vama09/HashtagAndCaption -bell-tommy/SG161222-Realistic_Vision_V1.4 -Zulqrnain/FAST_NU_PAST_PAPERS -codeparrot/gradio-playground -dapeng629/simple_chatbot -yamashiro3/Whisper-gpt-voicescribe -DiffusionArtco/Diffusion200Max -liyucheng/selective_context -Zenne/chatbot_llama_index -Nahrawy/ControlLight -p-baleine/metaanalyser -Ameaou/academic-chatgpt3.1 -birkancelik18/chatbot -bergrozen1213/3d-obj-v2 -Scakmak/Chatbot -jayparmr/ICBINP_OG -abbbbbbbbbbbbbb/poetry -NeuralInternet/InfiniteGPT -sklearn-docs/SVM-Kernels -jatin-tech/SkinZen -Vageesh1/clip_gpt2 -aus10powell/TwitterAccounts -maxmon/auto_anno -ysharma/Effectively_Using_IF -hylee/finetuned_diffusion -paulbauriegel/voice-coe-data -neuroliptica/2ch_captcha -Masa-digital-art/planning-proposal-gpt-4 -HugoDzz/spaceship_drift -Gokul14/impira-layoutlm-document-qa -Hunter731/Unity3D-RTS -banana-projects/web3d -and-effect/Musterdatenkatalog -befozg/stylematte -tubui/rosteal -Not-Grim-Refer/huggingface-transformers-agents -Forbu14/LoiLibreQA -noes14155/runwayml-stable-diffusion-v1-5 -conceptofmind/PaLM_models -jhlfrfufyfn/bel-tts -Heshwa/html-code-generation-from-images-with-deep-neural-networks -yuanzhoulvpi/chinese_bloom_560_chat -dfalbel/gptneox-chat -Farazquraishi/pendora -nimadez/grammbot -ennov8ion/Scifi-Models -ennov8ion/semirealistic-models -doluvor/faster-whisper-webui -hjv28158/stable-diffusion-webui-cpu -ennov8ion/FantasyArt-Models -ennov8ion/dreamlike-models -Proveedy/dreambooth-trainingv15 -BramVanroy/mai-simplification-nl-2023-demo -empulse/ehartford-WizardLM-30B-Uncensored -mstager/ChileanGPT -sabirbagwan/WhatsappGroupAnalysis -SoulAbi/whisper-youtube-video-text -rubend18/ChatGPT-Prompt-Generator -Al-Chan/Vits_League_of_Legends_Yuumi_TTS -Manjushri/Nerybus -MISATO-dataset/Adaptability_protein_dynamics -dragonSwing/video2slide -onereal/Voice-Cloning-for-you -ChatGPT-GAIA/GAIA-GPT -asoria/duckdb-parquet-demo -Ababababababbababa/AraPoet -matthoffner/storywriter -Annotation-AI/fast-segment-everything -lgaleana/toolkit -SurendraKumarDhaka/Text-to-speech-converter -Kyo-Kai/Fsg-pp -Rardilit/Rardilit-Ciffusion_v0.1 -Shriharshan/Image-Caption-Generator -amasad/Replit-v1-CodeInstruct-3B -maksymalist/junk-judge -punith-098/controlnet-interior-design -sohomghosh/FinLanSer_Financial_Language_Simplifier -DonDoesStuff/Bing-AI-demo -janshah/demo-app-FALCON40b -Brasd99/JustClothify -petervavank/VoiceConvertion -aksj/Dreamland-GenAI-Music -potsawee/multiple-choice-QG -yash-srivastava19/insta_captions -Vipitis/ShaderCoder -matthoffner/chatbot -RuthBebe/sentiment_analysis -massi/prompter -melazab1/ChatGPT4 -gli-mrunal/GPT_instruct_chatbot -isaakkamau/whisper-video-caption -dukujames/Text-Image -vivianinhugging/TheBloke-guanaco-65B-HF -gaia-benchmark/leaderboard -Slep/CondViT-LRVSF-Demo -Nixic/rvc-models -Dxtrmst/TheBloke-WizardLM-Uncensored-Falcon-7B-GPTQ -MetaWabbit/Auto-GPT -amanatid/PubMedGPT -openaccess-ai-collective/arena-archived -Chaitanya01/InvestingPlatform -dragonSwing/isr -thegovind/LangFlow -yxmnjxzx/PubMedGPT -distbit/NousResearch-Nous-Hermes-13b -rakhlin/SpeechT5 -marusia/img_styler -kevinwang676/Voice-Changer-Light -oschan77/virtualoscar -BartPoint/VoiceChange_Beta -MikeTrizna/bhl_clip_classifier -internetsignal/audioLDMtext -NeuML/baseball -kevinwang676/test-1 -simonduerr/rosettafold2 -StephanST/OpenLanderONNXonline -alexrame/rewardedsoups -syf2023/chatbot -hanzportgas/rvc-models-v2 -Saturdays/Starchat_Saturdays -vasu0508/Meena_Chatbot -Jackflack09/diffuse-custom -hrdtbs/rvc-mochinoa -keivan/Is_he_fat -chenbowen-184/Martin-Valen-Text-to-Image -bingbing520/ChatGPT2 -anpigon/talktosayno -Icar/AICompanion -Inderdev07/facerecognition -simsantonioii/MusicGen-Continuation -zwhe99/MAPS-mt -HI915/Test02 -raseel-zymr/Document-QandA -mrstuffandthings/Bark-Voice-Cloning -Anish13/characterGPT -osanseviero/voice-cloning-public -SRDdev/EchoSense -xuxw98/TAPA -leonelhs/superface -patgpt4/MusicGen -bilgeyucel/prompt-lemmatizer -RockmanYang/vocal_remover -allknowingroger/Image-Models-Test2 -ElainaFanBoy/MusicGen -leonelhs/remove-background -lekkalar/chatgpt-for-pdfs-without-chat-history -UjjwalVIT/Text_analysis_and_metadata_app -willgibs/ControlNet-v1-1 -majinyu/recognize-detect-segment-anything -Babelscape/mrebel-demo -paulhebo/smart_qa -allknowingroger/Image-Models-Test5 -isaakkamau/Text-To-Speech -GirishKiran/sentiment -jbilcke-hf/template-node-wizardcoder-express -allknowingroger/Image-Models-Test6 -zhsso/roop -BertChristiaens/blip-diffusion -allknowingroger/Image-Models-Test7 -MSLAB/PaperGPT -Syrahealthorg/HealthCare_workforce -PineSearch/generateAudio -radames/OpenAI-CLIP-JavaScript -jeffyang123/ctheodoris-Geneformer -sooolee/beer-sommelier -ammarnasr/Code-Generation-with-Language-Specific-LoRa-Models -allknowingroger/Image-Models-Test10 -Sarath2002/YouTube_Video_Summarizer -justest/chatglm2-6b-int4 -shivammehta25/Diff-TTSG -yuzu34/rvc-hololive -randstad/Resume_Analyser -phamson02/tho_ai -allknowingroger/Image-Models-Test12 -nsarrazin/agents-js-oasst -kl08/personality_detectionV2 -rstallman/langchain-chat-with-pdf-openai -propilot/seo-powered-by-ia -Fredithefish/PixelRevive -songdaooi/Swap -wseo/i18n-huggingface -FFusion/FFusion.AI-beta-Playground -allknowingroger/Image-Models-Test19 -Shriharsh/Text_To_Image -ShoaibMajidDar/Blog_generator -stamps-labs/stamp2vec -nomic-ai/Gustavosta_Stable-Diffusion-Prompts -nomic-ai/WizardLM_WizardLM_evol_instruct_V2_196k -nomic-ai/hakurei_open-instruct-v1 -johko/NSQL-Text-To-SQL -Wayne-lc/drive_like_human -Ababababababbababa/Arabic_poem_classifier -lijiacai/chatgpt-next-web -Thunderstone/trial -Metal079/wd-v1-4-tags -1111u/oai-reverse-proxy -merve/my_own_oasst_falcon -renumics/whisper-commonvoice-speaker-issues -openaccess-ai-collective/oo-preview-gpt4-200k -JohanDL/GPT4Readability -KarmKarma/genshinimpact-rvc-models-v2 -allknowingroger/Image-Models-Test34 -allknowingroger/Image-Models-Test35 -CloudOrc/SolidUI -allknowingroger/Image-Models-Test36 -multimodalart/upload_to_hub_folders_progress_bar -gyugnsu/DragGan-Inversion -paulokewunmi/jumia_product_search -Amrrs/DragGan-Inversion -Sandiago21/speech-to-speech-translation-greek-with-transcription -leafShen/CodeFormer -allknowingroger/Image-Models-Test39 -allknowingroger/Image-Models-Test40 -CofAI/chat.v1 -jonathang/WeatherBoy -jbilcke-hf/VideoChain-UI -PeepDaSlan9/meta-llama-Llama-2-70b-chat-hf -allknowingroger/Image-Models-Test42 -openbmb/viscpm-paint -nmitchko/AI-in-Healthcare -WindVChen/INR-Harmon -richardr1126/sql-skeleton-wizardcoder-demo -allknowingroger/Image-Models-Test43 -gaodrew/constellation -Laden0p/Joeythemonster-anything-midjourney-v-4-1 -t0int/ehartford-Wizard-Vicuna-30B-Uncensored -Ababababababbababa/Ashaar -ZX9966/Fintech -FFusion/FFXL-SDXL-Convert-diffusers -rossellison/kpop-face-generator -allknowingroger/Image-Models-Test47 -renumics/stable-diffusion-strengths-weaknesses -lj1995/trump -freddyaboulton/echo-chatbot -bochen0909/speech-to-speech-translation-audio-course -johnberg/CLIPInverter -CofAI/optor -XxXBobMarleyXxX/oai-proxy -AIZero2HeroBootcamp/Memory -OptorAI/gen -justest/wav2lip -allknowingroger/Image-Models-Test50 -allknowingroger/Image-Models-Test51 -OuroborosM/STLA-BABY -LLaMaWhisperer/LegalLLaMa -s3nh/LLaMA-2-7B-32K-GGML -princessty/stabilityai-stable-diffusion-xl-base-1.0 -NohTow/LLM_watermarking -DVLH/consciousAI-question-answering-roberta-vsgshshshsbase-s-v2 -BaitMan/abroader-otters -CofAI/sd-2.1 -NohTow/Llama2_watermarking -towardsai-buster/buster -pikto/Diffuser -a121440357/bingAI -Abhay834/my_genai_chatbot -allknowingroger/Image-Models-Test57 -MattiaSangermano/IncentiveAI -tanishqvashisht/colorizeAnime -shayakh/sdrv50 -Insightly/web_scraper -Ekohai/bingAI -TechnoByte/ComfyUI-Kybalico -haouarin/pdftotext -omdena-lc/omdena-ng-lagos-chatbot-interface -0xSynapse/Segmagine -imjunaidafzal/LoRA-DreamBooth-Training-UI -linhdo/checkbox-detector -metricspace/OcTra -gorkemgoknar/moviechatbot-v2 -allknowingroger/Image-Models-Test67 -allknowingroger/Image-Models-Test69 -konbraphat51/Kato-DB -JoPmt/Txt-to-video -Manjushri/AudioGen-CPU -allknowingroger/Image-Models-Test71 -valeriylo/saiga_rag -gradio-discord-bots/StableBeluga-7B-Chat -NoCrypt/mikuTTS -YouLiXiya/Mobile-SAM -allknowingroger/Image-Models-Test76 -Jamel887/Rvc-tio887 -RoversX/Stable-Platypus2-13B-GGML -allknowingroger/Image-Models-Test77 -RedValis/Music-Helix -Aristore/Warp -cloudtheboi/Lofi4All -flatindo/generate5 -allknowingroger/Image-Models-Test84 -BramVanroy/llama-2-13b-chat-dutch-space -itxh888/Summarize-Webpage-Link -mygyasir/deep-voice-cloning -allknowingroger/Image-Models-Test85 -ai-maker-space/Barbie-RAQA-Application-Chainlit-Demo -randomtable/SD-WebUI -mmnga/vocabviewer -radames/transformers-js-sveltekit-static-example-app -allknowingroger/Image-Models-Test95 -aliabid94/idefics_playground -viait/dolphinchat-chatgpt-demo-ui -radames/transformers-js-sveltekit-server-example-app -aaaaaabbbbbbbdddddddduuuuulllll/poetry -aaaaaabbbbbbbdddddddduuuuulllll/topic2poem -aaaaaabbbbbbbdddddddduuuuulllll/AraPoet -themanas021/Youtube-Video-Summarizer -Codecooker/rvcapi -dolphinchat/global -sandrocalzada/swap_face -MechaXYZ/Audio-to-Text -walterclozet/invisiblecat-Uber_Realistic_Porn_Merge_V1.3 -kquote03/lama-video-watermark-remover -crobbi/LipNet -AhmedM20/Email_Marketing_Content_Generator -Polyhronis/codellama-CodeLlama-34b-Instruct-hf -ngaggion/Chest-x-ray-HybridGNet-Segmentation -codewithbalaji/WizardLM-WizardCoder-Python-34B-V1.0 -Myuu-tastic1/Myuung -bayartsogt/real-time-tokenizer -Statical/STC-IDM -aTrapDeer/Img2TimDillonRant -llm-blender/LLM-Blender -NemesisAlm/GeolocationCountryClassification -Omnibus/EZ-Voice-Clone -gustproof/sd_prompts -zamasam/death -hoyinli/demo-app -4com/4com-license -paufeldman/vv -Dify-AI/Baichuan2-13B-Chat -truong-xuan-linh/auto-comment-generation -CosmoAI/BhagwatGeeta -allknowingroger/Image-Models-Test126 -codefuse-ai/README -Yash911/IMAGEALCHEMY-TEXT-TO-VISUALS -nyanko7/niji-playground -tomandandy/MusicGen3 -TabbyML/tabby-template-space -pourmand1376/Seamlessm4t_diarization_VAD -exaggerated/PaddleOCR -cubzh/cubzh -wzhouxiff/RestoreFormerPlusPlus -kevinwang676/Bert-VITS2 -IlyasMoutawwakil/llm-bar-race -ntt123/vietnam-male-voice-wavegru-tts -AnticPan/Clothes2Human -digitalxingtong/Azuma-Bert-VITS2 -Statical/STC-LLM-CHAT -huggingface-projects/falcon180b-bot -colornative/goofyai-3d_render_style_xl -Loreleihunny/total_capy-love -Mysterykey/Mystery -banana-dev/demo-clip-interrogator -PeepDaSlan9/hpcai-tech-Colossal-LLaMA-2-7b-base -miittnnss/dcgan-image-generator -ylacombe/children-story -MultiTransformer/EZChat -prthgo/Spam-Message-Classifier -librarian-bots/collection_papers_extractor -cr7-gjx/Suspicion-Agent-Data-Visualization -alwayse/MMD_MP_Text_Dection -eaglelandsonce/weatherQnA -Kvikontent/kandinsky2.2 -Paresh/Facial-feature-detector -cr7-gjx/Suspicion-Agent-Demo -pharma-IA/PharmaWise_Experto_Data_Integrity_V2C -bunkalab/bunka-map -newgpt/chatgpt-4 -pharma-IA/PharmaWise_Prospecto_Generico_Acetilsalicilico_V2C -pharma-IA/PharmaWise_Prospecto_Generico_Vortioxetina_V2C -totemko/ostris-ikea-instructions-lora-sdxl -XzJosh/Gun-Bert-VITS2 -ShaLee/gpt35 -colbyford/evodiff -desudes/desu -AMR-KELEG/ALDi -blanchon/gaussian-splatting-kit -BirdL/DONOTUSEDemo -ura-hcmut/ura-llama-playground -XzJosh/XingTong-Bert-VITS2 -editing-images/ledtisplusplus -stevhliu/inpaint-mask-maker -AIWaves/SOP_Generation-single -shibing624/CLIP-Image-Search -SUSSYMANBI/nerijs-pixel-art-xl-sdxl -allknowingroger/Image-Models-Test197 -jiangjiechen/Auction-Arena-Demo -Manglik-R/PDF-ChatBot-BCS -duchaba/kinship_llm -xuyingliKepler/xuying_falcon -awacke1/MusicGenStreamFacebook -victorisgeek/SwapFace2Pon -freecs/A.I.R.S -megaaziib/RVC-V2-Huggingface-Version -hpa666/ham -vih-v/SDXL-1.0-Inpainting -Felladrin/Web-LLM-Mistral-7B-OpenOrca -XzJosh/Bella-Bert-VITS2 -Kvikontent/kviimager -DarwinAnim8or/Blip-Dalle3 -cdavenpo822/ToyWorld -arsalagrey/streaming-text-generation-vue -enzostvs/hair-colour -alonsosilva/tokenizer -silk-road/ChatHaruhi-RoleLLM-English -Gh6st66/invisiblecat-Uber_Realistic_Porn_Merge_V1.3 -Tonic/MistralMED_Chat -AdityaVishwakarma/LiveChecker -AILab-CVC/EvalCrafter -arsalagrey/object-detection-vue -xuyingliKepler/VecDBCompare -awacke1/CanAICode-Leaderboard-Customized -XzJosh/Wenjing-Bert-VITS2 -adumrewal/mtcnn-face-landmarks -parthb3/YouTube_Podcast_Summary -lunarflu/falcon-180b-demo-duplicate -gstaff/KiteWind -tonyassi/selfie-fashion-magazine -TheStinger/Ilaria_Upscaler -pseudolab/K23MiniMed -jerpint/RAGTheDocs -BREWDAcademy/Brewd-Diffusion -aftonrobotics/sisterlocation -pseudolab/moogeulmoogeul -OpenDILabCommunity/LLMRiddlesChatGPTEN -satrn088/Gender_Recognition -Roboflow/DINO-GPT4V -kevinwang676/OpenAI-TTS-Voice-Conversion -nus-cs5647-team-5/Mandarin_Tone_Evaluation -AhmedSSoliman/MarianCG-CoNaLa -Amrrs/gradio-sentiment-analyzer -Atsushi/kinoko-mini-AI -Bagus/speaker-verification-demo -Baishali/Pneumonia-Detection -BradSegal/Literature-Rating -Brayan/CNN_Tumor_Cerebral -CVPR/Example-Echocardiogram-Segmentation -Daniele/forma-locutionis -DarshanMM/OpenAICodexSummarizer -DeepDrivePL/BEiT-Semantic-Segmentation -Demonic/Text_Summarizer -Didier/Semantic_Search_arXiv -DrishtiSharma/Diarization -DrishtiSharma/Image-search-using-CLIP -Emanuel/pos-tag-bosque-br-demo -ErenYeager01/Traffic_sign_recognition -Giuliano/Conversational-Wikipedia -Harveenchadha/speech2speech -Ignahugging/Plants_classification -JonatanGk/cyberbullying-detector -Kuaaangwen/auto-grader -LegacyLeague/Legacy_League -MarkusDressel/cord -Nipun/KL-Divergence-1d -PeerChristensen/TrumpTweetsDevice -Recognai/veganuary_ner -Rick458/Desi-Food-Vision -Rules99/Bioinformatics_Project -RyanX/BookSearch -SLU-CSCI5750-SP2022/homework03_DigitClassificationKNN -Sa-m/Dogs-vs-Cats -Sa-m/YoloV5-Party-Symbol-Detector-V1 -Sakil/english_audio_transcriptor -Sakil/tweetlib6_app -Sammy03/neuralserach -Sanan/Infrared_Object_Detection_YOLOv5 -Saturdays/ReconocimientoEmociones -SaulLu/test -SebastianEnger/textgenerator -Sense-X/uniformer_image_demo -Sense-X/uniformer_video_demo -Siddhant/ESPnet2-SLU -Sultannn/Text_summarization_with-MBART -Vrk/SeeFood -Vrk/SkimLit -Wikidepia/IndoPara-Gen -Wootang01/Paraphraser_two -Wootang01/Punctuation_capitalization_corrector -Wootang01/part_of_speech_categorizer -Wootang01/vocabulary_categorizer -Wootang01/vocabulary_categorizer_two -abhibisht89/ADR_XTRACTER -abhilash1910/QA_Albert -abidlabs/english2german -abidlabs/voice-verification -aditi2222/Summarization_english -afry-south/lowlight-enhancement -agueroooooooooo/Transport_Mode_Detector -ahmedJaafari/Annarabic -ajitrajasekharan/NER-Biomedical-PHI-Ensemble -akdeniz27/turkish-qna-with-xlm-roberta -akhaliq/Car_Keypoints -akhaliq/DeBERTa-v3-base-mnli -akhaliq/Holistic -akhaliq/Pop_Music_Transformer -akhaliq/SimCSE -akhaliq/brain_segmentation -akhaliq/deeplab2 -akhaliq/fairseqs2 -akhaliq/pgan -akhaliq/t5-base-lm-adapt -albertvillanova/datasets-tagging -aliabd/SummerTime -gradio/calculator-flagging-options -aniket/gradsflow-text-classification -arampacha/chat-with-simpsons -arijitdas123student/gpt2-demo -asimokby/cv-parser-huggingface -austin/adr-detection -autosummproject/autosumm -ayaanzaveri/mnist -aymm/Task-Exploration-Hate-Speech -begar/amazon-reviews-demo -bguberfain/Detic -bhanu4110/Lungs_CT_Scan_Cancer -bipin/mltwitter -birdortyedi/instagram-filter-removal -breathingcyborg/reviews-actionable-insights -buio/attr-cond-gan -cahya/image-search -cakiki/facets-dive -carlosaguayo/cats_vs_dogs -cdleong/phonemize-audio -chaitanya9/emotion_recognizer -chicham/query_analysis -chinhon/Chinese_News_Headlines_Generator -chinhon/Speech_Sentiment_Analysis -danijelpetkovic/test-tts-inference-api -dechantoine/PokeGAN -deep-learning-analytics/Title_Generation -docs-demos/distilbert-base-uncased -dpc/vien -Datatrooper/sentimiento -Datatrooper/wine -dumitrescustefan/NamedEntityRecognition-Romanian -eddydecena/cat-vs-dog -equ1/mnist_interface -facebook/XLS-R-1B-EN-15 -facebook/XLS-R-2B-21-EN -flax-community/Mongolian-GPT2 -flax-community/TamilLanguageDemos -flax-community/alberti -gagan3012/IMD -gagan3012/project-code-py -gossminn/fillmorle-app -haotieu/Vietnamese-News-Summarizer -hi9/core4testing -hlopez/Waste-Detector -huggingface-course/amazon-reviews-demo -isabel/club-project -isabel/image-test -isabel/pet-project -jason9693/KoreanHateSpeechClassifier -jason9693/Soongsil-Bot-KoGPT -jmansfield89/Tweet_NLP_Sentiment_Analysis -jositonaranja/glide-text2img -joyson072/Stock_market_prediction -jrichez/disaster_tweets -jruneofficial/text2pixel -karolmajek/PaddleHub-BiSeNetV2 -keras-io/pixelcnn-mnist-image-generation -kingfisher/spacy-ner -kpriyanshu256/acronym-disambiguation -lev/nlp -levandong/MNIST-detect-deploy-webapp -lewtun/hslu-demo -mawady/Demo-integrated-gradients-alibi-gradio -mawady/demo-catsvsdogs-gradio -mayhug/rf5-anime-image-label -mbahrami/AutoComplete -merve/fourier-transform -merve/spaces-demo -mgczacki/toxicspans -mikeee/radiobee-dev -mikeee/ttw -mlkorra/competitive-analysis -mmcquade11/Image-to-Text -mmcquade11/autonlp-reuters-summarization -mmeendez/cnn_transformer_explainability -monsoon-nlp/AntiExplanation -muhammadayman/data_science_content_en_to_ar -napoles3d/st_parade -nata0801/RuEn_ASR_with_Voice_Recorder -nateraw/host-a-blog-on-huggingface-spaces -nlp-en-es/roberta-qa-es -nazianafis/Sentiment-Analysis -osanseviero/SMILES_RDKit_Py3DMOL_FORK -paulbricman/decontextualizer -piecurus/Summarizer -pietrolesci/wordify -prateekagrawal/roberta-testing -pushkaraggrawal/Summarizer -pytorch/DeepLabV3 -pytorch/NTSNET -pytorch/PGAN -pytorch/SSD -pytorch/WaveGlow -ra2w/TableQandA -rajesh1729/Text-analysis-with-spacy-and-streamlit -rajesh1729/question-answering-gradio -rebolforces/jcastles -sanjana/Loan-Prediction-Analysis -savasy/Multilingual-Zero-Shot-Sentiment-Classification -savasy/SentimentHistogramForEnglish -sentencebird/audio-noise-reduction -sentencebird/translation-word-order -seyia92coding/video-games-recommender -shahukareem/Wav2Vec2-Large-XLSR-53-Dhivehi -shashankanand13/used_car_prediction -shibing624/code-autocomplete -shubh2014shiv/Japanese_NLP -snoop2head/KoGPT-Conditional-Generation -springml111/T5_Paraphrase_demo -surendraelectronics/weatherApp -swcrazyfan/DeKingify -taesiri/LatexDiff -temp-late/manga-anime-premium -temp-late/manga-anime -tobiascz/SDSdemo -un-index/textgen6b -versae/modernisa -vesteinn/Bird-Classifier-CLIP-NABirds -vivien/causal-simulator -warwickai/fin-perceiver-demo -widged/gender-bias-evaluation -widged/text-paraphrasing -widged/text-summarization -xiaoxuezi/spleeter -xiongjie/face-expression-ja-example -yashsrivastava/speech-to-text-yash -tensorflow/esrgan-tf2 -yangtaowang/TokenCut -osanseviero/draw-minimal-copy3 -DataDoggo/Visionary -RivianG/Asis -atticus/image-text-retrival-huster -templates/fastapi_with_streamlit -Wootang01/chatbot_four -taka-yamakoshi/bert-priors-demo -tareknaous/Chatbot-DialoGPT -123harsh/gradio-easywriter -onnx/ArcFace -tareknaous/Empathetic-DialoGPT -davidmasip/racism-gr -samueldomdey/ClipCosineSimilarityUpload -EricaCorral/Chinese-To-English-Tools -farukozderim/zero-shotts -EricaCorral/Chinese-Tools-FAST -course-demos/distilbert-base-uncased-finetuned-imdb -z-uo/streamlit_music_demo -virender74/plant-disease -adlozano1/gibberish_detector -CVPR/visual-clustering -arpm01/financial-summarization -vivien/semanticsearch -ncats/EpiPipeline4RD -epdavid2/morsecode -calvininterview/interview-streamlit -NahuelCosta/DTW-CNN -bensonsantos/CANnet_Crowd_Counting -onnx/FCN -harveysamson/wav2vec2-speech-emotion-recognition -johnowhitaker/CLIPRGB-ImStack -iSky/spam-detector -cedssama/I3D_Sign_Language_Classification -abbylagar/multilingual_keyword_extractor -DerrylNessie/MangaCleaner -vanessbut/tldr_keywords -josedolot/HybridNet_Demo2 -gbach1lg/PhotoStyleTransfer -Sa-m/Auto-Translation -baguioni/Voice-Activity-Detection -utec/FedericoRodriguezDetectorSentimentalTwitter -chrismay/Sentiment-demo-app -pplonski/mercury-test-2 -johnowhitaker/orbgan_demo -ronvolutional/iframe-test -IPN/demo_cms_1 -nickil/weakly-supervised-parsing -hackathon-pln-es/Paraphrase-Bertin -AdityaMahimkar/PlagiarismChecker -Saturdays/spanish-quechua-detector -hackathon-pln-es/itama-app -radames/Jupyter-Kernel-Gateway-Flask -huggan/Colorb_GAN -awacke1/StreamlitCookies -hysts/MobileStyleGAN -awacke1/TimerASRLive -tomofi/Google-Drive-OCR -d0r1h/Hindi_News_Summarizer -awacke1/Video-View-Download -asdasdasdasd/Face-forgery-detection -GuiltySpark/amikus_text_summarizer -awacke1/HTML5-AR-VR -sil-ai/aqua-semantic-sim -kargaranamir/parstdex -Egrt/MaskGAN -webis-huggingface-workshop/f_demo_question_gen -hysts/ibug-face_parsing -hysts/TADNE-interpolation -huggan/cityscapes-pix2pix -nateraw/test-pix2pix-load -ecarbo/AutomaticSpeechRecognition -vikiiiii/musical-tone-123 -kargaranamir/visual-clutter -hysts/TADNE-image-selector -awacke1/Grammar-Styler -NahuelCosta/RUL-Variational -ma-xu/LIVE -PaddlePaddle/resnext101_32x16d_wsl -ulysse/lyme -awacke1/TextImg2Art -awacke1/QandAGenerator -jy46604790/Fake-News-Recognition -h4d35/CosineSim -yangy50/garbage-image-classification -ThomasSimonini/Conversation-in-a-Tavern -Bijoy2001/real-time-voice-recognition -pie/NER -jacklindsai/is_it_elon_musk -tficar/amazon-rating-calculator -yhshin/kr-article-summarizer -tomofi/NDLOCR -mgfrantz/pii_masking -Zengyf-CVer/Gradio_YOLOv5_Det_v2 -satpalsr/TransPose -rajesh1729/NLP-with-mercury-spacy -Epitech/AiOnIot-Antoine-Quentin-Valentin-Maxime -iamkb/voc-demo -BennoKrojer/imagecode-demo -DanteOz/Minimal-Endpoint -hylee/photo2cartoon -CVMX-jaca-tonos/YouTube-Video-Streaming-Spanish-ASR -thisisanshgupta/solo-coder-20B -CVMX-jaca-tonos/Identificar-lenguas-y-frases -bencoman/WhichWatersport -feng2022/Time-TravelRephotography -jbetker/tortoise -malteos/gpt-german -JerynC/catloaf -eduardofv/multilang_semantic_search_wikisimple -HighCWu/starganv2vc-paddle -Ezi/ModelCardsAnalysis -volen/nft-search -VietAI/ViNewsSum -dnth/gpt-neo-paraphrase -course-demos/draw2 -awacke1/AnimationAI -smc/pole_or_trafo -publichealthsurveillance/PHS-BERT -course-demos/Remove-bg -seduerr/text_analytics -Bavesh/Oral_Cancer_Detection -bankholdup/stylegan_petbreeder -valurank/Article_Summarizer -ntt123/mnist-rnn -azaninello/gpt2-general -ashishraics/MCQ-Generator -pierreguillou/document-layout-detection-dit-image-instances -evaluate-metric/mean_iou -evaluate-metric/squad_v2 -nihaldsouza1/clearlydefined_license_summarizer -armgabrielyan/search-in-video -nobrowning/M2M -Aniemore/Russian-Emotion-Recognition -Gradio-Blocks/Dog-Breed-Identification-App -HarryLee/eCommerceImageCaptioning -Ritvik19/SentiNet -tarteel-ai/latest-demo -awacke1/GenerativeWordsandImages -rushic24/DialoGPT-Covid-Help-Doctor -flava/neural-style-transfer -butterswords/nlc-explorer -dipesh/JarvisAI-Intent-Classification-Bert-Base-Cased -awacke1/WordGames -Yah216/Arabic_poem_classifier -awacke1/FirestorePersistence -teticio/inBERTolate -doevent/AnimeGANv2 -Gradio-Blocks/EDSR -matjesg/deepflash2 -evaluate-metric/rl_reliability -tinkoff-ai/response-quality-classifiers -hf-maintainers/README -SusiePHaltmann/HaltmannDiffusionv0 -yl4579/StyleTTS -nagolinc/liteDungeon -viktor-enzell/wav2vec2-large-voxrex-swedish-4gram -arunavsk1/Pubmed-Named-Entity-Recognition -czkaiweb/StarryNight -huspacy/example-applications -ConorDY/feedback-chatbot -mohitmayank/law-finder-ipc -hongaik/hc_text_classification -keras-io/3D_CNN_Pneumonia -unco3892/real_estate_ie -summerstay/vectorAPI -ashrestha/auto-multi-class -keras-io/metric-learning-image-similarity-search -neurotech/Swahili-NER-Tagger -bigscience-data/document-sizes -osanseviero/latent-converter -keras-io/text-classification-with-transformer -valurank/Article_summarizer_cnn_large_testing -Axolotlily/TextGen -dfskGT/parrot-paraphraser -sarunas856/tinder -rajistics/finbert_forwardlooking -dmccreary/spaces-demo -keras-io/Node2Vec_MovieLens -mullikine/ilambda -keras-io/semantic-image-clustering -keras-io/SpeakerRecognition -armandnlp/gpt2-TOD_app -rajeshradhakrishnan/malayalam-news-classify -jmcob/Transformers-StoryWriting -awacke1/AR-VR-IOT-Demo -awacke1/ChemistryModelerSMILES -sasha/MetricCompare -SergioMtz/MNIST_Digit_Recognition -Slender/image_editing_app -ThunderJames/PhotoRealistic -kkawamu1/huggingface_code_generator -nickmuchi/Face-Mask-Detection-with-YOLOS -beihai/PDF-Table-Extractor -WZT/DigiProj -ekojs/ml_food10 -CVPR/flava-multimodal-zero-shot -Akinade/Iris_App -maker57sk/linkedin_analysis -Axolotlily/DalleMini -mahidher/comment_toxicity -SusiePHaltmann/GPT-DALL-X -AlvearVanessa/Edad_biologica_retina -Chemsseddine/summarisation -keras-io/timeseries-classification-from-scratch -CVPR/transfiner -pyimagesearch/nmt-luong -mikachou/dog-breed-identification -alan-chen-intel/dagan-demo -djsull/aha-multi-label -ilan541/OncUponTim -DiweshUIT/Spectrometer -NLTM/IndicBART -keras-io/timeseries_forecasting_for_weather -j-m/formality_tagging -mrosinski/risk-predictor -jph00/daniel-img-fix -sumit12/SHIPMENT_PRICING_PREDICTION -anirudhmittal/humour-detection -ossaili/architectural_styles -ICML2022/Leaderboard -awacke1/Gradio-Blocks-Demo -awacke1/ArtStyleFoodsandNutrition -Hassan175/suicide-detection -MohamedRafik/Password_Generator -twobob/imagegpt -mrchtr/semantic-demo -saadob12/Chart_Data_Summarization -Msp/invoice_processing_layoutlmv3_custom -furrutiav/beto_coherence -ysharma/testing_llm -EuroPython2022/YOLOv5 -ahnafsamin/GroTTS-FastSpeech2 -QuoQA-NLP/QuoQaGo -amsterdamNLP/attention-rollout -EuroPython2022/latr-vqa -EuroPython2022/gpt2-TOD_app -BenjaminB/pyscript-demo -EuroPython2022/Face-Mask-Detection-with-YOLOS -darragh/swinunetr-dicom-video -EuroPython2022/ToxicCommentClassification -milyiyo/testing-diffusers -EuroPython2022/alpha-on-ridge-regression -nev/dalle-6D -missmeyet/Translate_Text_In_Images -zion581/sentiment_analysis_by_rohan -mrm8488/hf-diffusers -EuroPython2022/mediapipe-hands -flynster/FeinbergQuizNotes -nickprock/banking_intent_classification -EuroPython2022/banking_intent_classification -NomiWai/anime-collaborative-filtering-space -pythainlp/pythainlp -LDY/Chinese-Question-Answering -Sangmin/Eiken-Essay-Using-BLOOM -EuroPython2022/Sketch2ColourDemo -clarin-pl/datasets-explorer -nkatraga/7.22.CarePlanQnAWithContext -awacke1/ASRtoTexttoStorytoImagestoVideo -Preetesh/VideoSummaryfromYouTubeVideo -awacke1/VideoSummaryYoutube3 -Zengyf-CVer/color_generator -kyled/PhraseSentimentEmotionAnalysis -platzi/platzi-curso-streamlit-butterfly-gan -omlab/VL_checklist_demo -ekenkel/dog-identifier -Paatiii1712/stock_market_forcasting -aiEDUcurriculum/introtoAI-anime-project -aiEDUcurriculum/introtoAI-climate-change-project -aiEDUcurriculum/introtoAI-pets-project -keithhon/google-universal-sentence-encoder-v4-similarity-score -keithhon/T0pp -katielink/spleen_segmentation -samuelinferences/TabPFN -jmcob/StreamlitGrammarCorrectorStyler -micole66/video -KneeKhan/DSSG_Test -gstaff/test_space -ky2k/summarize_text -JulesBelveze/concepcy -nakamura196/yolov5-kunshujo -leumastai/BackgroundChanger -Akshat-1812/Dog-Vision -nagolinc/minDalle_GFPGAN -bahman/labequip -azadranjith/emotional_damage -MFawad/Emergency_vehicle_classifier -anikfaisal/weather_image_classifier -RobPruzan/automaticlitassesment -ShAnSantosh/Chatbot_Using_Pytorch -fedihch/InvoiceReceiptClassifierDemo -Einmalumdiewelt/German_text_summarization -simonduerr/3dmol.js -pustozerov/poc_call_transcription -suddu21/Garbage-Classification-VGG19 -cmu-adcs/videogenic -awacke1/LED-Long-Form-SummariesBeamLengthTokenRepNgramVariantsTDDGradio -anonymous-demo/Anonymous-TranSVAE-Demo -MrVicente/RA-BART -brayden-gg/decoupled-style-descriptors -eforebrahim/Cassava-Leaf-Disease-Classification -freddyaboulton/all_demos_3 -bigscience-data/bloom-tokens -sandeepmajumdar/Bloom-Slim-Text-Generation -versus666/ml_message_moderation -rbk1990/PersianChatRobot -Dimitre/sentence-similarity-use -CorvaeOboro/gen_ability_icon -afiz/sepia-image -panpan06/Image2OCR -loss4Wang/architecture_styles -mvsrujan/Damage_Type_Classifier -fornaxai/RNet -joao-victor-campos/netflix-recommendation-model -marioboy/doom -bigscience-data/bloom-tokenizer-multilinguality -Detomo/audio-stream-translate -eson/kplug -TheTimeTraveller/StableDiffusion -kevintang513/watch-watcher -freddyaboulton/Model3D -EuroSciPy2022/xgboost-income-prediction-with-explainability -EuroSciPy2022/timeseries-forecasting-with-prophet -GoldMan/img2prompt -Gurudev/youtube_timestamper -charlesnchr/ML-SIM -EuroSciPy2022/clustering -Norod78/WoWQuestTextGenerator -rainfly/test_speed -ai-forever/mGPT-armenian -paragon-analytics/Persuade -nrjvarshney/quiz -topcla/img-similarity -akpoflash/product-categories -lbourdois/Language-tags-demo -KevinGeng/Laronix_voice_quality_checking_system_FILEIO -Shredder/CONBERT-2 -DelinteNicolas/SDG -rrighart/color-tags -gradio/text_analysis -penpen/chinese-webnovel-translator -roborovski/Diffusle -coltonalexander/datasets -doevent/VintageStyle -tjburns/ask_marcus_aurelius -adirik/maskformer-demo -amsterdamNLP/contrastive-pairs -kkpathak91/Image_to_Text_Conversion -Timjo88/toy-board-game-QA -hashb/object-detection-yolo -gradio/musical_instrument_identification -Armandoliv/cars-parts-segmentation-resnet18 -ThankGod/anime-gan -merve/gradio-analysis-dashboard -Billyosoro/ESRGAN -PaddlePaddle/solov2 -1nferno/Imdb_sentiment -gradio/fake_diffusion -buzzChukomi/sd_grad -unilux/ASR_for_Luxembourgish -DanielPinsk/StableDiffusion -freddyaboulton/structured-data-classification -gradio/fake_gan -gradio/blocks_kinematics -gradio/image_classifier -sneedium/PaddleOCR-ULTRAFAST -datnth1709/FantasticFour-S2T-MT-demo -coutant/yolo-person -prismosoft/wav2lip -rachith/ZeroShot_StanceDetection -priyank-m/vit-bert-ocr -multimodalart/saymyname -fffiloni/stable-diffusion-touch-of-paint -taskswithcode/semantic_search -vincentclaes/emoji-predictor -ysharma/gradio_sketching_inpainting_LaMa -TeamHaltmannSusanaHWCEO/Fire-DiffusionV0.1Beta -nickmuchi/Plant-Health-Classifier -taskswithcode/semantic_clustering -coutant/back-translation -sohomghosh/FLUEnT -ltgoslo/ssa-perin -jeonsworld/whisper-medium-ko -ashiqabdulkhader/GPT2-Poet -Imran1/Yelp-reviews -introduck/introduck -Msp/Invoice_DocQA -AIZ2H/04-Gradio-SOTA-Seq2Seq-AutoQA -andresgtn/bean-leaf-health-classifier -manishjaiswal/01-3DModel-GradioDemo -manishjaiswal/02-Gradio-Art-From-Text-And-Images-Demo -Jonni/03-Streamlit-Vido_ASR-NLP -manishjaiswal/03-Stremlit-Video-ASR-NLP-Demo -leilaglewis/03-Streamlit-Video-ASR-NLP -texantech/03StreamlitVideoASRNLP -djgoettel/03-Streamlit-Video-ASR-NLP -djgoettel/04-Gradio-SOTA-Seq2Seq-AutoQA -texantech/04-Gradio-SOTA-Seq2Seq-AutoQA -manishjaiswal/04-Gradio-SOTA-Demo -manishjaiswal/06-Streamlit-NLP-Image-Semantic-Search-Images-Demo -Corran/qnagenerator -manishjaiswal/07-GraphViz-PyDeck-Map-AIUIUX-Demo -manishjaiswal/08-Search-Streamlit-Session-State-QueryParameters-Demo -manishjaiswal/09-Gradio-Multilingual-ImageToOCR-Demo -freddyaboulton/gradio-subapp -ahmedghani/whisper_asr -eliwill/ask-a-philosopher -steysie/sc_whisper -tkurtulus/sea-animals-classification -awacke1/BlenderbotGradioChatbotSOTA -micole66/hhhhhhhhh -lcw99/test_korean_chit_chat -kornia/geometry_image_transform_with_kornia -vonewman/my-sentiment-analyzer-app -AI-Zero-to-Hero/01-H5-Play-Canvas-Sim-Physics -AI-Zero-to-Hero/09-SL-Live-RealTime-Dashboard -open-source-metrics/transformers-checkpoints -sergiomar73/nlp-gpt3-zero-shot-classification-app -avatar2k/02-H5-AR-VR-IOT -venz/AW-02-H5-AR-VR-IOT -venz/AW-04-GR-Seq-2-Seq-QA-Auto-Gen -skura/sk-06-SL-AI-Image-Music-Video-UI-UX-URL -SantoshKumar/06-SD-SL-AI-Image-Music-Video-UI-UX -venz/AW-06-SL-AI-Image-Music-Video-UI-UX-URL -tomaseo2022/mp3-a-texto -itmorn/face_keypoint -lcw99/ko-dialoGPT-Korean-Chit-Chat -damilojohn/text-descrambler -osanseviero/riiaa -tumuyan/demucs -evawade17/Skin_cancer_detecter -kivantium/anime-pose-estimator -innocent-charles/Swahili-Question-Answer-App -ambreshrc/Docx_File_Translator -fsdl2022emotion/meme-manipulation-gradio-space -samusander/Create.Ai -AFCMEgypt/AFCM_iGEM_LFA -Joabutt/Colourizer -Tanapol/object_detection -ajayhk/JPEGArtifactRemover -egan/clothing-attribute-recognition -pratikskarnik/Indian-Food-Recognition -FelixLuoX/stable_diffusion_test -TusharNautiyal/Music-Genre-Classification -Callimethee/Imagine-CR -shripadbhat/Clinical_Note_Question_Answering -Ivanrs/canny-edge-detector -marmg/zshot -tomaseo2022/Text-a-Voz -datasciencedojo/Describe-Dataset -abdellatif/pokemon-detector -eskayML/cat-and-dog-classifier -shainis/book_reviews -AFCMEgypt/WCB -sourav11295/Blockchain -csanjay/DR_Predictor -r1391819/financial-researcher -for876543/plant-id-3 -binxu/Ziyue-GPT -gradio/stt_or_tts -gradio/video_component -ClaudioX/mg_sd_esp -17TheWord/RealESRGAN -williambr/NLPSentenceSimilarityHeatmap -williambr/CSVAnalyzer -cxeep/whisper-webui -razfar/anything-counter -nikhedward/ask_me_anything -binxu/Ancient-Chinese-Add-Punctuation -Andy1621/IAT_enhancement -lkw99/K_AnimeGANv2 -Colbe/basketball -evawade17/acne_detector -hshetty/movie-poster-generator -maisarah1109/autism_screening_on_adults -micole66/bloomz -determined-ai/detsd_demo -gbharti/fastai-model-deploy -kabita-choudhary/get_text_from_video -HemanthSai7/IntelligentQuestionGenerator -awacke1/AW-01ST-CSV-Dataset-Analyzer -cadige/05GR-Image-To-Multilingual-OCR -jthteo/hokkientranslator -indichealth/indic-health-demo -infinfin/style-transfer -jaybeeja/age_predictor -tomaseo2022/Eliminar-Fondo-Imagen -sswam/photo-checker -gradio/dashboard -ierhon/codegen -TusharNautiyal/BTC-Prediction -gradio/reverse_audio_main -souljoy/chinese_lyric_generation -breezedeus/antiOCR -awacke1/Biomed-NER-SNOMED-LOINC-CQM -thapasushil/Multiverse -nedtheminx/nllb-translation -airus/ss -Akshay-Vs/GPT-Based-Generator -xszqxszq/sovits-svc-mix -Lwhieldon/Fall22_UMBC606_AbstractSummarization -joaofranca13/CESAR-NN-Human-Expression-HF -KrishnaBakshi1/YoutubeVideoSummarizer -Ramos-Ramos/emb-gam-dino -datasciencedojo/Transcription -galopyz/Alien_vs_Ghost -eradhea/spanish_chat -elonmuskceo/shiny-orbit-simulation -AllAideas/SegmentacionVideo -awacke1/Z-3-ChatbotBlenderBot-GR -jinhybr/OCR-Receipt-Donut-Demo -Soumen/transform_image -maisarah1109/stock-prediction -Soumen/Text-Summarization-and-NLP-tasks -hzrr/dal_audio_inference -Arnaudding001/OpenAI_whisperLive -jinhybr/OCR-Invoice-LayoutLMv3 -spondej/stabel-diffusion-z-1.5 -brooksjordan/galadriel -mrfakename/neon-tts-plugin-coqui -ShapeNet/shapenet-explorer -BairaS/Tabular_ML -knkarthick/Meeting-Demo -Junlinh/memorability_prediction -johngoad/prompt-extend -barretto/sd4fun -pmgautam/english-to-nepali-translation -HenryNavarre/CarlosDrummondAndradeGenerator -ryu-akm/PetVision_37 -lvwerra/in-the-stack-gr -profoz/index_demo -RamAnanth1/Transcript_PDF -JackerKun/Text-to-Image-search-using-CLIP -AIZerotoHero-Health4All/01-Gradio-Speech2Text2Speech-AIPipeline -tomaseo2022/Whisper-Youtube -alecmueller/01-Speech2Text2Speech-GR -CarperAI/pile-v2-eda -Sup3r/img-to-music -jeycov/IsaTronDeteccion -akhaliq/redshift-diffusion -drift-ai/emoji-tagging -drift-ai/emoji-predictor -kittyposter12/Dungeons-and-Diffusion -Karwasze/Whisper-ASR-youtube-subtitles -sabre-code/Flower-Classification -tvt/Real-CUGAN -shiwan10000/CodeFormer -cmudrc/microstructure-strain -MEKHANE/Deforum -farkmu45/instagram-clothes-psychology-streamlit -xfh/min-stable-diffusion-web -Zeng1/Predict_furniture_weight_by_apparent_features -ugursahin/MovieSuggest -robin0307/MMOCR -sasha/AI_Carbon -BuBBLe1q/anything-v3.0 -victor/dreambooth-training -daayros/anything-v3.0 -weidacn/deepdanbooru -akhaliq/anything-v3.0-1 -amirhnikzad/MLSG_01 -fightglory/YoloV4-Webcam -bumsika/Redshift-Diffusion-Demo -Rowanchav/anything-v3.0 -Lwight/Ghibli-Diffusion -greendra/ultsd -israelgonzalezb/stable-diffusion -fadhilsadeli/Muhammad_Fadhil_Sadeli_HCK002 -bobsingh149/chestxray-classification-streamlit-demo -os1187/docquery -nadiaoktiarsy/deployment -sleepyml/colorizer -akhaliq/stable-diffusion-2 -pierretassel/JobShopCPRL -akhaliq/vox2 -Rain-2008730/TXT_GENERATOR_69420 -Ipkc/text_generator -Tinki/text_generator -Matthew567/text_generator -tenslai/mianhuatang -charly/text-to-speech -jerpint/babelfish -akhaliq/knollingcase -ORI-Muchim/MarinTTS -mbarnig/Mol_mer_e_DALL-E2_Bild -eskayML/object_detection_system -sachit-menon/classification_via_description -snoop2head/privacy-filtering-ner -eskayML/IMAGE_CAPTIONING -MirageML/lowpoly-town -MirageML/lowpoly-landscape -MirageML/lowpoly-game-building -calebaryee321/Whisper2Image -nightfury/Neural_Style_Transfer -vladocar/3dfood -os1187/pii-anonymizer -Hellisotherpeople/DebateKG -getrajeev03/text2sql -Xhaheen/GPT-JT-sallu -fjenett/GPT-JT -knkarthick/Meeting-Use-Cases -tomsoderlund/swedish-entity-recognition -yulet1de/StableDiffusion2 -akhaliq/AltDiffusion-m9 -Pfs2021Funny/Text-to-Music-ExtendedVersion -svjack/Question-Generator -amankishore/sjc -awacke1/AICodeFly -BLACKHOST/timer -BLACKHOST/Date -akdeniz27/zero-shot-text-classification-with-multilingual-t5 -vutuka/nllb-vutuka-translation -Samood/whos_dat_doggo -NicolasVana/image-captioning -cmudrc/microstructure-data-explorer -akhaliq/gigafractal2-diffusion -AshtonIsNotHere/xlmr-longformer_comparison -shripadbhat/whisper-demo -scikit-learn/pickle-to-skops -trysem/Vector-diFusion -tillyu/Emojimotion -alex42t/EssayChecker -mrfarazi/hairnet2-online -AlexMo/audio_summarizer -AndySAnker/DeepStruc -akhaliq/papercutcraft-v1 -Patt/demo_eng_ara_translate -segestic/COVIDPrediction -Ali-Maq/Calorie_Calculator -pragnakalp/bert_based_ner -pip64/generator-oskov -shivkumarganesh/whisper-demo-hi -antreyes/stabilityai-stable-diffusion-2 -4eJIoBek/Stable_Diffusion_1.4_openvino -freddyaboulton/openai-whisper-large -VaneM/Stable-Difussion-basic-app -profnecrya/T9_But_Bad -YuhangDeng123/Whisper-offline -liorda/chatGPT -razielpanic/CompVis-stable-diffusion-v1-4 -zhukovsky/Awais-Audio_Source_Separation -patsypatsy/gyijhmjm -osanseviero/livebook -Datasculptor/DescriptionGPT -PushkarA07/image-colorizer -pierreguillou/whisper-demo-french -Xhaheen/whisper-to-chatGPT -anaclaudia13ct/insect_detection -daveward/smaragd-hentaidiffusion -Malifex/flax-anything-v3.0 -ygangang/VToonify -vuu10/EnzRank -hetorol845/MiDaS -carlosalonso/Detection-video -kokuma/img-to-music -nbroad/openai-detector-base -IHaBiS/wd-v1-4-tags -HIT-TMG/dialogue-bart-large-chinese-DuSinc -pragnakalp/biobert_based_ner -tarteel-ai/demo-whisper-tiny-ar-quran -Heckeroo/Cyberpunk-Anime-Diffusion -Dogge/bigscience-bloomz-7b1 -JimmyTarbender/GPT2HistoryEvents -neuralmagic/nlp-text-classification -awacke1/DatasetAnalyzer1215 -bobathetheft/webui -SDbiaseval/find-my-butterfly -harish3110/document-parsing-demo -userzyzz/riffusion-riffusion-model-v1 -ThirdEyeData/ChangePointDetection -sugarbee/stanford-crfm-pubmedgpt -Xhaheen/Children_of_heaven -Lelliam/text_generator1 -Wootang01/text_generator_gpt3 -society-ethics/find-my-sea-slug -esencb/web -ameya123ch/FakeNewsDetector -pat229988/NLP-Audio-summarizer -ybelkada/blip-api -AriusXi/CodeGenerator -pragnakalp/Text_Summarization -alkzar90/rock-glacier-segmentation -EyeSeeThru/openjourney -Danielito/webui -ThirdEyeData/image_bluriness_prediction -AkashKhamkar/Job_Search_Engine -Hisjhsshh/dreamlike-art-dreamlike-diffusion-1.0 -zvam/hakurei-waifu-diffusion -facebook/Speech_Matrix_Demo_on_GPU -MountLiteraSwd/mount_ai_school -NickOrion21/stabilityai-stable-diffusion-2-1 -moscartong/LookingGlassRGBD -ramdane/search_jurist -tarteel-ai/whisper-base-demo-quran -hrishikeshagi/ImagetoText -BasToTheMax/TTS -Ariharasudhan/XAI_Class-Activation-Maps -cahya/websocket -PBJ/Toxic-Comment-Classification -AiiluoChen/webui -mrsteyk/mrsteyk-openchatgpt-neox-125m -johnslegers/custom-diffusion -ThirdEyeData/Occluded-House-Prediction -bigcode/santacoder-endpoint -ybelkada/cocoevaluate -tommy24/chatGPT2 -mrciolino/ppt_owl_vit -Rubens/recruiting -S0h9l/Coherent_Speech -Fuyuka29/Anime_Background_Remover -adpro/dpt-depth06 -IntelligenzaArtificiale/code-generation -syy404/whisper-webui -russellc/BLIP -RoAr777/fer -xelu3banh/AnimeGANv3_01 -Eduger/webui -Pudding/Anime-or-Real -GodParticle69/minor_demo -rifkat/Uz-NER -k2s0/prayer-generator -jgentes/demucs-gpu -nambiar4/DR-BERT -shubham1302/movie_recoomender_system -MLearningAI/AIart_sources_of_inspiration -pushkarraj/opt355m_paraphraser -SmartPy/ScisummNet -alexalmighty/dreamlike-art-dreamlike-diffusion-1.0 -marianna13/search-inside-a-video -natvill/stable-diffusion-webui -rifkat/UzGPT-uz -keremberke/football-object-detection -baffledexpert/roberta-base-openai-detector1 -keremberke/csgo-object-detection -sham-ml/crack_detection_classifier -om-app/chatGPT -keremberke/construction-safety-object-detection -imperialwool/funapi -keremberke/nfl-object-detection -awacke1/DockerImageRecognitionToText -harshasurampudi/car_or_truck -cjayic/soft-vc-widowmaker -daibs/bananafreshnessclass -vinayakdev/qa-generator -Shrikrishna/Which_Bollywood_Celebrity_Are_You -VISION23/V23ChatBot -nvshubhsharma/wav2lip_demo_test1 -keremberke/forklift-object-detection -Rmpmartinspro2/Waifu-Diffusers -Thabet/color-guided-wikiart-diffusion -ibvhim/Gradio-Apps -mrfshk/paint-diffusion -NikolaiB/Animal_Classifier -mrrandom123/Book_recommendation -ai4bharat/IndicNER -jlazoff/biblical-summarizer -masoodkhanpatel/twitter-trends-qatar -lion-ai/CBC-covid -Daniel947/stabilityai-stable-diffusion-2-1 -CC26011988/Opposition_Analysis -johnslegers/epic-diffusion-inference -keremberke/smoke-object-detection -ThirdEyeData/TagDiciphering -keremberke/aerial-sheep-object-detection -sarinam/speaker-anonymization-gan -nightfury/whisperAI -pianoweb/youtube-whisperer-pianoweb -QINGFNEG/White-box-Cartoonization -Eyeszik/webui -creative-ai/creative-demo -Kangarroar/streamlit-docker-example -lafi23333/aikomori -YuraM/Stable-Diffusion-Protogen-webui -rituthombre/QNim -jlmarrugom/voice_fixer_app -MINAMONI/anime-remove-background -jroust/darkstorm2150-Protogen_v2.2_Official_Release -ajcdp/Image-Segmentation-Gradio -juanpy/videoresumen -breadlicker45/Muse-gen -ussrcccp/Real-CUGAN -GT4SD/paccmann_gp -hf-hackathon-2023-01/Spotify -group2test/stable-diffusion-v1-5 -cynika/NFT_avatar -GT4SD/polymer_blocks -Nickhilearla135095/Google-Drive -Mackiemetal/dreamlike-photoreal-2.0 -yaklion/youtube -remzicam/XAI_privacy_intent -ShibaDeveloper/Text-To-Image -tomaseo2022/Youtube-Mp3 -TCheruy/SRGAN -peteralexandercharles/runwayml-stable-diffusion-v1-5 -awacke1/NLPContextQATransformersRobertaBaseSquad2 -Voicelab/vlT5-keywords-generation -kavi1025/Youtube-Whisperer -JUNGU/yolov8 -gpt3/travel -qisan/Depressed_sentimental_analysis -robosapiens/color-range-classifier -hakanwkwjbwbs/Linaqruf-anything-v3-better-vae -TheFellow42/webui -Lewdgirl89/Waifu-AI-WebUI -PhenixNova/Audio-VideoTranslator -zjrwtx/xiaoyi_drawing -amsterdamNLP/value-zeroing -mcbrs1/AskQ -ClassCat/wide-resnet-cifar10-classification -aliabid94/crossword -EDGAhab/Paimon-Talking -FKBaffour/Gradio_App_for_Sentiment_Analysis -Humbert/mmcls-retriever -bstrai/classification_report -TheWolf/DreamlikeArt-Diffusion-1.0 -FloydianSound/Redline_Diffusion_V1-5 -ClassCat/ViT-ImageNet-Classification -starlit7/KorPoliticsTTS -Yilin98/Stock_Prediction -teamnassim/Room-Occupancy-App -Sygil/INE-dataset-explorer -joonkim/bert-political-sentiment-analysis -Kamtera/persian-tts-mimic3 -kadirnar/yolor -rajistics/shiny-kmeans -ExperimentalAI/epic-diffusion -Fr33d0m21/Remodel_Dreamer -nyvrx/VoiceChat -Munderstand/sd-img-variations -Munderstand/whisper-to-chatGPT -Mileena/anything-v3.0 -eeyorestoned/midjourney-v5 -yukie/yukie-sovits3 -innnky/visinger2-nomidi -ItsJayQz/BreathOfTheWild_Diffusion -williamcfrancis/Deep-Blind-Motion-Deblurring -Jimmie/snake-species-identification -xiaomifan/anime-remove-background -society-ethics/ethical-charters -giustiniano/real_estate_classifier -CarlosMF/AI-ORUS-License-v1.0.0 -Alven/background-remover -JosePezantes/Violencia-politica-genero -nnaii/White-box-Cartoonization -eeyorestoned/maximum_diffusion -yuichi/pdf-ocr -trysem/coloria -trysem/visua -huang4414/White-box-Cartoonization -krrishD/Langchain_Code_QA_Bot -trysem/parrot-paraphraser -Zkins/Timmahw-SD2.1_Pokemon3D -Say123/Promting-Generative-Models -manish-pro/dL_avengers -awacke1/Try.Playing.Learning.Sharing.On.This -AEUPH/SENTIENCE_PROGRAMMING_LANGUAGE -BilalSardar/Like-Chatgpt-clone -andrewgleave/tokbot -yugan/summarize -saurav-sabu/QR-Code-Generator -tanav2202/captcha_solver -ThirdEyeData/Customer-Complaints-Categorization -thejagstudio/picxai -NeuroModern/MidJourney-SD-finetune -Duskfallcrew/prompthero-openjourney -Duskfallcrew/DreamlikeArt-PhotoReal-2.0 -deven367/yt-video-annotator-hf -mdj1412/movie_review_score_discriminator -Rbrq/DeticChatGPT -akashAD/yolov5-classify -Asahi402/Real-CUGAN -akhaliq/China-Chic-illustration -DataScienceGuild/WikipediaAIDataScience -curiousily/layoutlmv3-financial-document-classification -Duskfallcrew/lambdalabs-sd-pokemon-diffusers -Mixing/anime-remove-background -szk1ck/word_cloud -awacke1/NSFW_text_classifier -awacke1/google-flan-t5-base -awacke1/google-flan-t5-xl -awacke1/PubMed-Parrot-Paraphraser-on-T5 -ZilliaxOfficial/nyaru-svc-3.0 -mskov/whisper_fileStream -geloku/ai-academy -SpringAI/AiGenImg2Txt -Daniton/midjourney-singular -kohrisatou-infinity/KIP_01_beta -thoucentric/Shelf_Objects_Detection_Yolov7_Pytorch -adirik/efficientformer -pngwn/music-visualizer -blogclif/7Prompts -DataScienceGuild/AI-DataViz-Graphviz -DataScienceGuild/DataViz-Mermaid -DataScienceGuild/DataViz-Plotly -mariashay/DataViz-Graph -Ppranathi/chatty-chat -Froleptan/lambdalabs-dreambooth-avatar -Frederick/Clause_Segmentation_and_Classification -kadirnar/classifyhub -WAT-ai-AA/stable-diffused-adversarial-attacks -akhaliq/CarperAI-diff-codegen-350m-v2 -nanom/to_passive_voice -alsrbdni/remove-from-photo-background-removal -LiuZiyi/1-image-img2txt-easyocr -dhanushreddy29/comparing-captioning-models -hanjp/White-box-Cartoonization -awacke1/Google-Maps-Web-Service-Py -awacke1/Gradio-Maps-Latitude-Longitude -Amr453/Transcription -WhisperAI/WhisperAIWeb -LangChainHub-Prompts/langchain_submission -joacoetruu/telegram-bot-paraphraser -jannisborn/paccmann -123aa/pastel-mix -Datatrooper/boston_housing -asalhi85/DemoSmartathon -akshatsanghvi/spam-email-detection -nateraw/run-script-in-background -neuralmagic/image-classification -Stoa/budget_gpt -UmairMirza/Face-Attendance -dawood/audioldm-text-to-audio-generation -keneonyeachonam/Biomed-NER-AI-NLP-CT-Demo1 -awacke1/runwayml-stable-diffusion-v1-5 -rdp-studio/bili-nft-avatar -ismot/hel10 -active-learning/webhook -Lookimi/Interface -devashish07/food_vision_mini -suvash/usk-coffee-convnext-nano -BreadBytes1/SB-Dashboard -haoqi7/images -joshipunitram/crowd-counting-p2p -Marian013/PPCTA-FRONTEND -awacke1/DockerGoFlanT5 -jesherjoshua/faceai -satozen/openai-whisper-large-v2 -mrm8488/santacoder-dockerfiles-completion -GiladtheFixer/image-variations -felixz/Flan-T5-experiment -ThirdEyeData/Semantic-Search -csuer/nsfw-classification -yonikremer/grouped-sampling-demo -Joyeux/andite-anything-v4.0 -multimodalart/TAV-poli-2 -LightChen2333/OpenSLU -shnippi/Email_Generai-tor -UserXTheUnknown/stablediffusion-infinity -lhkhiem28/A-recognition-system -Noobian/DuaGenerator -demo-org/doccano -awacke1/microsoft-BioGPT-Large-PubMedQA -kaisugi/academic-paraphraser -Reggie/utilities2 -victor/ChatUI -rasyidf/coffee-grader -merve/deprem-ocr-migrate-ner -imseldrith/ChatGPT-Detection -deprem-ml/deprem-ocr-test -devoworm-group/membrane_segmentation -AyushP/PolicyCompareBot -devoworm-group/Lineage_Population -cloud-sean/AOAI-Form-Recognizer -ThirdEyeData/Object_Detection -Fazzie/Pokemon-GAI -Jasonyoyo/CodeFormer -awacke1/PandasDataframeAutoFilterStreamlit -nikitalokhmachev-ai/corner-detection -AI-Naga/Vehicle_Damage_Detection -imseldrith/BookTODataset -Jeffsun/LSP-LearningandStrivePartner-Demo -bigcode/santacoder-tokens -deprem-ml/deprem_keras-satellite_semantic_mapping-challange -harley001/anime-remove-background -zishuqianyu001/img-to-music -curtpond/mle10-glg-demo -flash64/biogpt-testing -Duskfallcrew/photography-and-landscapes -Duskfallcrew/duskfall-s-general-digital-art-model -sujithvamshi/vehicle-color-recognition -Adr740/Hadith_AI_Explorer -skadio/Ner4Opt -seawolf2357/sd-prompt-gen -Duskfallcrew/duskfall-s-vaporwave-aesthetic -Duskfallcrew/duskfall-s-manga-aesthetic-model -yuan2023/Stable-Diffusion-Prompt-Generator_App -zjunlp/MKG_Analogy -seayao/lambdalabs-sd-pokemon-diffusers -gato001k1/maximum_diffusion0k -society-ethics/DiffusionFaceClustering -vincentclaes/pdf-ocr -gronkomatic/Image-Animation-using-Thin-Plate-Spline-Motion-Model -mindspore-ai/Wuhan-LuoJiaNET -AIFILMS/scene-edit-detection -AIFILMS/Image-Animation-using-Thin-Plate-Spline-Motion-Model -RamAnanth1/iclr2023 -hra/music-recommendation -sandy9808/EleutherAI-gpt-j-6B -ThirdEyeData/Complaints_Roberta -lfoppiano/grobid-superconductors-tools -Chloe0222/Chloe -Purple11/Grounded-Diffusion -awacke1/GradioContinualGenerator -bhautikj/sd_clip_bias -projekt-rising-ai/Expert-Answer-Demo -hra/ChatGPT-Keyword2Blog -Podtekatel/Avatar2VSK -gradio/bokeh_plots -slush0/petals-playground -xiaoxin1111/vits-uma-genshin-honkai -MedicalAILabo/Xp-age -JeffJing/ZookChatBot -zss2341/chatgpt_with_email_password_logging -jvcanavarro/traits-prediction -RaidedCluster/Sniffusion_PomerAInian -ismot/1802t1 -HarshulNanda/EngHindi -XlalalaX/VITS-Umamusume-voice-synthesizer -awacke1/Sankey-Snacks -awacke1/AIOutline -Reha2704/VToonify -awacke1/AI-RPG-Self-Play-RLML-Health-Battler-Game -Covert1107/sd-diffusers-webui -Uday-07/testing -achimoraites/Summarizer-flan-t5-base-samsum -Paulog731/SD-2.1-Img2Img -awacke1/StreamlitSuperPowerCheatSheet -Thafx/sdlomo -molok3/alea31415-onimai-characters -passaglia/yomikata-demo -tarjomeh/Norod78-sd2-cartoon-blip -decluster/airplane_yolov5 -kermitt2/softcite-software-mentions -LearnableAI/FinTextSummaryDemo -king007/table_extraction -awacke1/SMART-FHIR-Assessment-Observation-SDKs -Dao3/DreamlikeArt-Diffusion-1.0 -bprzy/orchestration -SRDdev/Scriptify -Robotanica/trashsort -SUPERSHANKY/ControlNet_Colab -Dao3/MagicPrompt-Stable-Diffusion -portal/Multidiffusion -Mattdoc99/CollisonChat2 -csuer/vits -spacerini/chat-noir -zhongkaifu/medical_qa_chs -portal/Control-Nets -AlexWang/lama -zeno-ml/langchain-qa -fredinrh2026/Video-Games -Thafx/sdpp -mosidi/fi-ber-detec-api -HenryRom/MovieReccomender -Mileena/claudfuen-photorealistic-fuen-v1 -awacke1/VizLib-TopLargeHospitalsMentalHealth -awacke1/StreamlitWikipediaChat -maodd/chatgpt-clone -ahishamm/Whisper_STT -podsni/twitter_sentiment_id -shibing624/asian-role -spacerini/code-search -awacke1/VizLib-KeywordExtraction-Clustering-Translation -qwertyuiee/AnimeBackgroundGAN -0xJustin/0xJustin-Dungeons-and-Diffusion -cass1337/sdcharactercreator -trysem/bukGPT -ArtificialArtist007/Rate-my-Aiart -B-patents/patent-bert -Dao3/OpenArt -Shad0ws/Ask-Questions-to-Data -DReAMy-lib/dream_II -Stanlito/Bird_species -Thafx/sddlpr2 -ewgewgewg/IndexingAlpha -ulysses115/vits-models -30Kanika/disease-classifier -trysem/vintager -anon9i9/finetuned_diffusion_test -ai-art/upscaling -StealYourGhost/Joeythemonster-anything-midjourney-v-4-1 -arpitr/end_to_end_ml_app -JunchuanYu/Tools -DavidWeiZhang/sd-dreambooth-library-avator-generator -shreydan/youtube-QandA -awacke1/Github-Create-Read-Update-Delete -EcoCy/LoRA-DreamBooth-Training-UI -gregojoh/layoutlmv3_document -awacke1/Sentiment-analysis-streamlit -awacke1/Machine-translation -awacke1/Sentiment-aware-chatbot -chasetank/owner-manual -awacke1/Topic-modeling -king007/biogpt-testing -sharmaanupam/eigenvectors -yiningmao/metaphor-detection-baseline -awacke1/GenAI-Generate-New-Data-Resembling-Example -awacke1/Creative-Potential-Music-Art-Lit -awacke1/Data-Synthesizer-Synthesize-From-Multiple-Sources -Alashazam/StoryGenerator -rogergou/facebook-tts_transformer-zh-cv7_css10 -GolDNenex/Super-Resolution-Anime-Diffusion -PirateXX/AI-Content-Detector-From-PDF -podsni/YouTube_Summarize_Hades -hhalim/streamlit_ChatGPT_Peer -awacke1/Daredevil-Text-Generation -mirzaburanali/project-caption-generation -AIFILMS/ControlNet-Video -RlxDrk/huggingtweets-dolceragazza26-femdomfusion-mistressleiaa -visjia/ChatGPTAPI -IDKiro/DehazeFormer_Demo -desenmeng/ChatGPT -luodian/LoRA-DreamBooth-Training-UI -SoftChinchilla/Guizmus-SouthParkStyle -awacke1/EB-StableDiffusion-1.5-ImageGeneration -nateraw/text-generation -ThirdEyeData/Health-Insurance-Cross-Sell-Prediction -lzghades/skybox -Detomo/Chatgpt_with_awesome_prompt -CobaltZvc/Hyper_Bot -awacke1/HTML5-BabylonJS-Javascript-3DAnimation -awacke1/HTML5-Aframe-Framework -awacke1/HTML5-Aframe-Augmented-Reality-Model-Viewer -akshatsanghvi/Rice-Disease-Classifier -awacke1/Mental-Health-ICD10-to-DSM -Kevin676/SmartAI -mginoben/tagalog-profanity-classification -pierreguillou/Inference-APP-Document-Understanding-at-linelevel-v2 -Armored-Atom/Image-To-Motion -liuxiaopai/chatgpt-demo -shigel/ailol -gilbertb/ChatGPTwithAPI -JunchuanYu/Sydney-AI -Spico/writing-comrade -Ainterface/compare-gpt-models -OgiKazus/vits-uma-genshin-honkai -cscan/vocal_remover -akshayvkt/talk-To-SteveJobs -taishi-i/awesome-japanese-nlp-resources-search -louis030195/lsd-pt -ParisNeo/FaceRecognition -ThirdEyeData/Semantic-Search-Transformer -thomasjeon/runwayml-stable-diffusion-v1-5 -Qosmo/music-search-demo -pavelwong/Aitrial -yuenkayi/textgenerator -dorischeng/textgenerator -HUIYI/huiyili -priyam314/Neural_Style_Texture -Mileena/nitrosocke-Arcane-Diffusion -awacke1/Text-to-Image-stabilityai-stable-diffusion-2-1 -GanymedeNil/text2vec -ReFenter/img-to-music -pjjuplo/runwayml-stable-diffusion-v1-5 -yukkzer/google-flan-ul2 -ysharma/bokeh_plot_diffusers -enoreyes/rembg_remove_bg -ixciel/img-to-music -buggyhuggy/Fictiverse-Stable_Diffusion_Microscopic_model -Lianglan/Demo_Gpt3.5-turbo_model -victor/tata -pelinbalci/easyocr -ronig/protein_binding_search -EnigmaOfTheWorld/sherlocks_phoeniks -jonigata/PoseTweak -hra/stable-diffusion-tee-shirt -JeremyK/JewelryVision -zetabyte/text-to-voice2 -huggingface/minichain -TBF/AutomaticDatavisualization -abrar-lohia/text-2-character-anim -harsh0706/research-summarizer -victor/models-inference -NoCrypt/promptinspector-abuser -RamV/ChatRobo_II -awacke1/Joke-Book-AI-Jokes -BilalSardar/Black-N-White-To-Color -Adr740/CV_XPLORER_POC -awacke1/HTML5-Javascript-3D-Breakout-Game -Ragnov/STT-Grammar-Checker -etahamad/new-plant-disease-detection -Jack7510/trychatgpt -FER-Universe/FER-Benchmarking -jsr90/laMoinsChere -Mendel192/SAN-Demo -spicysouvlaki/streamlit-shell -mano96/Content_Generator -AashishKumar/Restaurant_voice_chatbot -christhegamechanger/background_swapping -keras-dreambooth/marvin_paranoid_android -donnyb/FalconVis -S4NX/NSFWGPT -srush/minichain -xiazi/anime-remove-background -p1atdev/ZoeSeg -ysharma/visual_chatgpt_dummy -test1444/Pose_Video -baixing/hackathon_chatbot_baixing_api -basit123796/basit -deepakmangla/krystv-hestyle-diffusion -ceckenrode/AI-Dashboard-03142023 -MacYang/Diamond-Sutra -Yan233th/so-vits-svc-models -yorkliang/my_first_chatbot -AI-Dashboards/AI.Dashboard.HEDIS.Terms.Vocabulary -SanchezVFX/dis -AIFILMS/StyleGANEX -ilhamstoked/Classification-Skin-Cancer -gfhayworth/sales_qa2 -HMS1997/RepoGPT -hv68/sample_tool_1 -AI-ZeroToHero-031523/README -evi0mo/vits-fastapi-server -GuXiaoBei/wechat-chatbot -keras-dreambooth/voyager -NeuralInternet/Text-Generation_Playground -white7354/anime-remove-background -Shrey-Patel/background-remover -Dao3/Text-To-image-AllModels -amarzana/Drop_image_to_short_story -DrGabrielLopez/BERTopic -radames/Detecting-Photoshopped-Faces-FALdetector -fadyabila/Heart-Failure-Death-Prediction -bedrock123/chatroom -qinzhu/moe-tts-tech -Rifd/Face-Real-ESRGAN -Zwicky18/Stable-difussion -keras-dreambooth/living_room_dreambooth_diffusion_model -victor/website-designer -zhen86/fashion_mnist_homework -Sapiensia/MakerDiffusion -keras-dreambooth/nuthatch-bird-demo -PushkarA07/Cover-Gen-audio2image -Web3Daily/WebGPT3 -ypchang/European_call_option-volatility-gradio -NotSarah/GoldRushJohn -Ilean/pdfGPTv2 -mmkuznecov/faceblur -Elegbede/Time_Series_Prediction -LittleLirow/fearflixai -azizalto/sqlify -OedoSoldier/chatglm_int4_demo -MuhammedAyman29/mm -Akira12312/admruul-anything-v3.0 -ai-create/re-generic -fgbwyude/ChuanhuChatGPT -AIGC-Audio/Make_An_Audio_inpaint -shibing624/ChatGPT-API-server -jefftko/Stable-Diffusion-prompt-generator -a7med146235/Ahmed -Vgi/andite-anything-v4.0 -gradio/default -gradio/base -cloudqi/CQI_Texto_para_imagem_PT_v0 -ahmedghani/Editing-Tools -awacke1/BERTopic-Topic-Modeler-NLP-ML -rimeAI/rimeui -onursavas/document-layout-analysis -edoz1986/johnslegers-epic-diffusion -Notmodern/andite-anything-v4.0 -zekewilliams/ControlNet -saifytechnologies/ai-text-to-video-generation-saify-technologies -awacke1/THREEJS-ChatGPT-ASR-Wikipedia-Twitter-Sentiment-FactChecker-VoiceClone -xl2533/FinDoc -rwizard/Chatbot-AI -keras-dreambooth/dreambooth-bioshock -Saturdays/ClassificationPeripheralBloodCell -mikaelbhai/GPTBhai_text_history -jkompalli/plant_disease_detection -Mrleo/MyChatGPT -SQSora/VITS-Umamusume-voice-synthesizer -derek-thomas/disc-golf-simulator -thelou1s/MidJourney -Dao3/ChatGLM-6B -AI-ZTH-03-23/2.Streamlit.GraphViz.Dynamic.Architecture.Diagram -AI-ZTH-03-23/6.AI.Dashboard.Wiki.Chat.Cognitive.HTML5 -haakohu/deep_privacy2_face -Lippppxy/AiAnimeVoice -Heathlia/modelscope-text-to-video-synthesis -awacke1/RLHF.Knowledge.Graph.GraphViz.Dynamic.Architecture.Diagram -raghu8096/Medical-Image-Classification -all-things-vits/CLIPGroundingExplainability -lharr345/alecsharpie-codegen_350m_html -cariai/somos-alpaca-es -souljoy/Pokemon-Stable-Diffusion-Chinese -pierreguillou/Inference-APP-Document-Understanding-at-paragraphlevel-v2 -Ronit28/ChatGPT4 -oshita-n/ImageQuestionAnswerring -hackathon-somos-nlp-2023/discriminacion_gitana -d8aai/finance-dashboard -Michelangiolo/startup-finder -maitri-vv/Hrishikesh332-autotrain-meme-classification-42897109437 -xingzhehe/AutoLink -zanyPhi/cats_vs_dogs -ndshal/interior-decor -elitecode/ChatGLM-6B-ChatBot -Re1e9/DoodleDecoder -szk1ck/image-collage -FoxMeo/fire-detector -gptjx/02 -xcgc/SD-webui-controlnet-docker -huolongguo10/HlgBot -vjain/SemanticPlaigarismChekcer -MGLDZM/chgpt -felix-weiland/llama_index_demo -ClementBM/connectfour -Laihiujin/OneFormer -maxcembalest/ask-arthur -aksj/Sea_Shanty -kyleledbetter/responsibleGPT -RamAnanth1/Pix2Struct -kaushikdatta/generate-webslides -ReganMayer/ChatGPT44 -WhyLIM/ChatGPT-academic -Shahrukh2016/Netflix_Recommender_System -hackengine/Paraformer-for-Chinese-Podcast -awacke1/Flan-Upvote-Downvote-Human-Feedback -kirch/Text2Video-Zero -exnav29/Real_Estate_Bot -Mahendra-Mk65/Midjourney-Online -freddyaboulton/test-blue -maykcaldas/MAPI_LLM -cinika/andite-anything-v4.0 -keras-dreambooth/dreambooth_dosa -demongaara/Gaara-pokemon-stable-diffusion -lujkis/ChatGPT4 -asd123Xiao/kafuu_chino_sovits4.0 -majweldon/AIScribe -hersia/youtube-video-transcription-with-whisper -kukr3207/forex_demo -QinBingFeng/ChatGPT -Muennighoff/code_eval_octopack -Thafx/sdp -simpie28/VITS-Umamusume-voice-synthesizer -YenLai/Superhuman -kastan/ai-teaching-assistant-beta -sanjayw/GPT4All -Kevin676/ChatGPT-with-Speech-Enhancement -sklkd93/CodeFormer -firefighter/TransDis-CreativityAutoAssessment -coldlarry/lr_pdf -pierreguillou/Inference-APP-Document-Understanding-at-paragraphlevel-v3 -jiaqingj/ConZIC -king007/Stable-Diffusion-ControlNet-WebUI -AlexWortega/AlexWortega-instruct_rugptlarge -heliosbrahma/voice-assistant -varunrayen/banana-dev-GPTrillion -artemkramov/f-coref-ua -JohnTan38/ChatGPT_LangChain -fastx/Lisa-Chatbot -Ajaxon6255/Emerald_Isle -ayaderaghul/photo2monet -AUST001/HDTV -DD0101/Disfluency-base -Izaias/Joeythemonster-anything-midjourney-v-4-1 -tanvirsingh01/jokesapart -Syrinx/WebtoonPlotGenerator -Rakot2223/faster-whisper-webui -Kevin676/ChatGPT-with-Voice-Conversion -jonathang/RapGPT -Aaaaaaaabdualh/poetry2023 -Amon1/ChatGPTForAcadamic -Sapiensia/diffuse-the-rest -tekkonetes/Chatbots -stanciu/declare-lab-flan-alpaca-xl -stanciu/declare-lab-flan-gpt4all-xl -radames/openplayground -ieuniversity/flirtify -helenai/openvino_transformers_streaming -sklearn-docs/Visualizing_the_stock_market_structure -FourthBrainGenAI/FourthBrainGenAI-ProductSnapAI -Olivernyu/sentiment_analysis_app -stanciu/anon8231489123-vicuna-13b-GPTQ-4bit-128g -Kevin676/Real-Time-Voice-Cloning -haohoo/Azure-OpenAI-QuickDemo -Tobalog/Simplified_Chinese_to_Traditional_Chinese -EnigmaOfTheWorld/ChanakyaNeeti -SouthCity/ShuruiXu -EveryPizza/Cartoony-Gradio-Theme -Norod78/distilgpt2_TextIteratorStreamer -mostro3000/AlekseyKorshuk-vicuna-7b -awacke1/Docker.VSCode.Integration.HF -ghlee94/MEDIAR -dodoya1/youtube_transcript -arthurdias/Webui-Cpu-ExtensionV2-Publictest-WithCivitaiHelper -flowerpixel/tashachan28-ranma_diffusion -Kevin676/Speechbrain-Speech-enhancement -ShotaA/TalkTuner -pchuri/slack-summary-bot -Vijish/Image_generator -Soumahara/Ojimi-anime-kawai-diffusion-demo -nateevo/memero -sunnyzhifei/ChatGPTOnline -milex-info/rave-inf -jmourad/TXT2IMG-MJ-Desc -Kevin676/Alpaca-LoRA-with-Voice-Cloning -franever/Pix2Pix-Video -Mecca/whisper-webui -Usually3/multilingual_vcloning -jhj0517/Segment-Anything-Layer-Divider -dhavala/KrishiGPT -jdinh/freeze-detection -jordonpeter01/dreamlike-photoreal-2.0 -younus93/pdfgpt -joshen/gpt-academic -IAMTFRMZA/DreamlikeArt-Diffusion-1.0 -sklearn-docs/k-means-initialization-evaluation -musadac/VilanOCR-Urdu-English-Chinese -MashiroSA/sovits-emu-voice-transform -helliun/gpt4-associative-memory -sklearn-docs/voting-classifier-decision-surface -vg055/demo_analisis_de_sentimientos_textos_turisticos_mx_tipo -sklearn-docs/Incremental-PCA -sklearn-docs/Univariate-feature-selection -teamnassim/Fictionista -cfwef/gpt -Priyanka-Kumavat/Supply-Chain -NicolasvonRotz/Lego-Bricks-AI -SRankChatGpt/Presentation-Assistant -jax-diffusers-event/canny_coyo1m -Software-System/De-Anios-a-Meses -Tbryan2/AssistantGM -kazuk/youtube-whisper-11 -kazuk/youtube-whisper-16 -sklearn-docs/Compressive_sensing_Tomography_reconstruction_with_L1_prior_Lasso -tomemojo/customerservice -sklearn-docs/ward-hierarchical-clustering -sailormars18/Yelp-reviews-usingGPT2 -rzzgate/Stable-Diffusion-ControlNet-WebUI -briankchan/grammar -sklearn-docs/Inductive_clustering -bamitsmanas/breast-cancer-detection -wallezen/so-vits-svc -openpecha/chatbot_tibetan -SoulAbi/whisper-audio-text-speaker-recognition -YUANAI/DiffspeechResearch -UndueTarget/youtube-whisper -luckli/anon8231489123-gpt4-x-alpaca-13b-native-4bit-128g -charanhu/GPT-4 -Brofu/Joeythemonster-anything-midjourney-v-4-1 -weanalyze/analyze_url -ysr/quran-semantic-search -prithvihehe/TheBotFather -doevent/kd -lizhen30/LangChainGo -sklearn-docs/text-feature-extraction-evaluation -jonathang/EBookGPT -AI-Dashboards/ScrabbleSolverWordThesaurus -momegas/megas-bot -awacke1/Transcript-AI-Learner-From-Youtube -mair-lab/mapl -j-min/IterInpaint-CLEVR -alx-ai/Real-ESRGAN-Demo -vorstcavry/visualstudiocode -bert9946/frame-interpolation -Pranjal-666/Heart_Disease -AlhitawiMohammed22/CER_Hu-Evaluation-Metrics -zeno-ml/audio-transcription -edenehuyh/Demo_RealESRGAN -KunalSinha2024/cledgeEssayIdeationTool -realambuj/Text-Summarization_using_Bert -HuseynG/ECS7022P-WGAN-GP -parseny/youtube_comment_generation -vorstcavry/vits-models-1 -long1111/langchain-chatglm -xianbao/sd-to-diffusers -Synthia/ChatGal -Yeshwant123/mcc -AB-TW/team-ai -mehdidc/text_to_image_ddgan -keneonyeachonam/Memory-Chat-Story-Generator-ChatGPT-041723 -Minoumimi/WaifuMakinTime -Vasanthgx/demo_minima_vasanth -segments/panoptic-segment-anything-api -thinh-researcher/cord-v2 -linfanluntan/Grounded-SAM -cheetah003/HMMC_t2v_search -mthsk/sovits-100orangejuice -wangrongsheng/ChatCitation -charlesai/CLIP -Kabriske/Multilingual_Video_Subtitler -perezcatriel/data_world_jobs -Sky5408er/vits-uma-genshin-honkai -ychenNLP/easyproject -simonduerr/molstar-gradio -Cicooo/vits-uma-genshin-honkai -szzzzz/chatbot -knkarthick/chat-llm-streaming -syedusama5556/Image-Animation-using-Thin-Plate-Spline-Motion-Model -zhone/stabilityai-stablelm-base-alpha-7b -mahati/GFPGAN1 -ztudy/prototype -Kevin676/AutoGPT -Chirag1994/Melanoma_Skin_Cancer_Detection_App -Tej3/DepthEstimation -fashion-demo-organization/fashion_demo -Zeebra/chatGPT_whisper_AI_voice_assistant -awacke1/Wikipedia-Twitter-ChatGPT-Memory-Chat -biglab/webui-screenrecognition -gstaff/articulator -darthPanda/chatpdf -blueeyiz702/flax-midjourney-v4-diffusion -rizmyabdulla/Medicine_predictor -sklearn-docs/sklearn-spectral-clustering -1yukikaze/img-to-music -JasonData/MathGenerator -shireenchand/depth-map -luckwill/chiakicc -weidexu/ChatGPT-with-Voice-Cloning-for-All -yukiarimo/Uta-AI -iamkhadke/GeneralChatBot -hemanth-thaluru/sdm-image-colorization-prj -frostymelonade/roberta-small-pun-identification -Monosmarinos/Pix2Pix-Video -Loke-60000/mio-amadeus -aodianyun/panoptic-segment-anything -a-v-bely/russian-task-generator -edenehuyh/BLIQ_ImageCaptioning -hkayabilisim/LIME -Kyan14/Mood_Based_Generative_Art -matthoffner/gguf-maker -TechWithAnirudh/langchain-chat-with-pdf -chaocai/superbot -lmalta/PDF_Doc_Search -ysharma/Gradio_Client_Chains -pablovela5620/grounding-sam -ericsali/language_translator -wetey/Headline-Content-Generator -Nicholaspei/LangChain-ChatLLM -daydayup1225/Chat-web -ZJunTvT/ZJunChat -ChandraMohanNayal/AutoGPT -prerna9811/musicapp -mrloler/oai-claude -Ikaros521/so-vits-svc-4.0-ikaros2 -sooolee/summarize-transcripts-gradio -moha222/gpt2-wikipedia -sander-wood/tunesformer -better57/CHATGPT -ErtugrulDemir/TextSummarizing -ErtugrulDemir/SpeechEmotionRecognition -ondrejbiza/isa -Yati05/TF-CodeT5-base -amitjamadagni/qs-benchmarks -Shashashasha/so-vits-fork-yoshi -moplat90/Chart2Data -lincquiQcaudo/Top-20-Diffusion -DhanushPrabhuS/pothole_yolov8_nano -wadhwani-ai/KKMS-Smart-Search-Demo -Harshveer/Finetuned_Diffusion_Max -Arijit-hazra/my-image-captioner -Davidsamuel101/PPTGenerator -nihalbaig/layoutlmv3_official_document -Serg4451D/DALLE -cihyFjudo/fairness-paper-search -mira-causality/counterfactuals -Tj/langchain-chat-with-pdf -lamini/README -recenWmenso/ChatGPT-with-Voice-Cloning-for-All -Ryukijano/it-happened-one-frame-2 -ymcmy/highlighter_demo -duchaba/sd_prompt_helper -maurypb/Donald-trump-chatbot -ferdmartin/GradApplicationDocsApp -nomnomnonono/Sound-Effect-Search -Saiteja/leaf-ViT-classifier -1pelhydcardo/ChatGPT-prompt-generator -groupeonepoint/WritingAssistant -SAMControlNet/SyntheticDataSAM -matthh/joyous_poetry_generator -Zhenhong/text-to-image-Stable-Diffusion-demo -JFoz/CoherentControl -gojiteji/SDTextTransmitter -IkechukwuAbuah/PDF_GPT -feregVcuzo/sanity-test-midi -awacke1/Generative-AI-Writers-Dashboard -AlexKoff88/stable_diffusion -Saturdays/chatbot_refugiados -IdaLee/DrawEasy -SmartPoint7/TwitterPRO -jxu124/vits-genshin -Uvini/Hotel-Reviews -florim/MedGPT -SuCicada/Lain-TTS -JKLUCY99/voice-cloning -BetterAPI/BetterChat -Duskfallcrew/Free-Illustration-Mix -textToSQL/talk_to_NP -awacke1/AI-Standard-Operating-Procedures -jacinthes/PubMed-fact-checker -bastiendechamps/geoguessr-bot -huggingface-tools/image-transformation -Kaludi/VirtualBrainGPT -hacksberg/plant -gbharti/stable-riffusion-walk -OswaldDev/Image-enhancement -glitch0011/MendoBERT_NER -OswaldDev/webuih -trhacknon/webui -johnsu6616/prompt-generator -fkhuggingme/gpt-academic -RichardMB1217/blip2 -alitrack/ChatPDF -Longtong/foodvision_mini_video -Sarfraz/NousResearch-gpt4-x-vicuna-13b -MirageML/shap-e -megamined/voice-gpt -Arielliu/just_talk -Milancheeks/AI_Music_Team -ben-epstein/ner-spans-to-tokens-tags -TeamMlx/MagicPrompt-Stable-Diffusion -ArdaSaygan/PollGeneratorApp -ELEVEN-001/ChatToFiles -Littlehongman/CLIPGPT-ImageCaptioner -DaFujaTyping/second-webui-docker -nirali/microsoft-trocr-large-handwritten -mav735/mri-assistent -iremkrc/chatbot-demo -taesiri/ViTPose -Tj/LangChain-ChatGPT-plugins -PranomVignesh/Detecting-unauthorized-person-with-firearms -jayparmr/CyberRealistic -elpsycongroo19/simple_chatbot -wasimmadha/entity-extraction -abbbbbbbbbbbbbb/AraPoet -abbbbbbbbbbbbbb/poetry2023 -asifhugs/InfiniteGPT -felix-weiland/appstore-search -Alcedo/yunmedia -AI-Dashboards/Streamlit-Plotly_Graph-Objects -sklearn-docs/Factor-Analysis-with-rotation -dhuynh95/HuberChat -abbbbbbbbbbbbbb/Arabic_poem_classifier -eddie5389/Object-Detection-With-DETR-and-YOLOS -artqwu/gradio-demo -ALSv/midjourney-v4-1 -glrh11/object-detection -woshixuhao/Rf_prediction -awacke1/Gradio-Gallery-Iceland -eswardivi/ChatwithPdf -kevinwang676/Bark-UI-with-Voice-Cloning-2 -LecJackS/wolfram-alpha-query -LuxOAI/ChatGpt-Web -Crossbro/succinctly-text2image-prompt-generator -Jouaoutch/Gradio -AI-Dashboards/Streamlit-Markdown-ChatGPT-CCD -SUSTech/llm-evaluate -chrisbodhi/explo -danielpedriniportfolio/AutoDA -remilia/Ghostly -hbui/RegBot-Chat-with-Docs -VeryYouQ/dis-background-removal -Xh3liumX/PDFGPT_increasedSiz -chaowei100/ChatGPT_Taiyi-Stable-Diffusion -gradio-client-demos/text-to-image -banana-projects/datasets-card-creator -Chris4K/german-sentiment-bert -sklearn-docs/Manifold-Learning-methods-on-a-severed-sphere -matthoffner/ggml-llm-cuda -rubberboy/stable-diffusion-webui -RuijiaTan/MultiPrincipalElementAlloyPropertyPredictor -Godrose0728/Aisound02 -shravanrevanna/hdfc-bank-statement -Shubham89/Meshwork-chatbot -omi0k/LoRA-DreamBooth-Training-UI -Zenne/chatbot_self_query -neuralworm/vinyl_sound_generator -kasun/comparing-captioning-models -SlowBette/ChatBot_gpt3.5 -dassum/Face-Id-Recognition -momegas/wowonen -niuzhiwei/stabilityai-stable-diffusion-2-1 -KKMobile/MagicPrompt-Stable-Diffusion -Writer/token-counter -DEBO-PROJECT/DEBO-V1 -shamaayan/Wisi -awinml/api_vicuna-AlekseyKorshuk-7B-GPTQ-4bit-128g-GGML -Rebskii/rvc-models-test -omb23/pettrainingmodel -kiroiineko/rvc-models-tragamundos -Has-ai/text-speech -ogawa0071/cyberagent-open-calm-small -bingbing520/ChatGPT -yangliuyi601/rvc-models -mrungta8/CitationalAmnesia -m-a-p/MERT-Music-Genre-Tagging-Prediction -vanderbilt-dsi/grant-writing-assistant -eaedk/Agri-Tech -FYP-23-S1-21/Refineverse_Plugin -Najaf-Zawar/Image-Super-Resolution -Najaf-Zawar/Old_Image-Restoration -Mozira/voice-models -beomi/KoRWKV-1.5B -muheiroiro/youtube_comments_chat -DonDoesStuff/openjourney-v4-demo -ennov8ion/stablediffusion-models -loveu-tgve/loveu-tgve-leaderboard -nijatzeynalov/AzVoiceSent -DHEIVER/Alzheimer -xdstone1/ai-bot-demo -Bonosa2/movies -zhicheng127/White-box-Cartoonization -ewave/Image-Animation-using-Thin-Plate-Spline-Motion-Model -caliex/Comparison-of-Manifold-Learning-methods -nontGcob/T2E_Vocabulary_Exam_Generator -augmented-surveys/retrodict -lewtun/donut-docvqa -Q-b1t/Dog_Emotions_Vision_Classifier -YenJung/ECG_MAC -calihyper/choosa_txt_to_img -Sagar48/claudfuen-photorealistic-fuen-v1 -ThirdEyeData/Image-Blur-Prediction -Saba99/GPT4ALL -robyramos/teste_memoria-chat -nlp-waseda/Kanbun-LM -gundruke/ua-thesis-absa -Agusbs98/automatic-ecg-diagnosis -timdettmers/guanaco-65b-4bit -Bonosa2/dall-e_image-generation -caltex1/streamlit_pdf_gpt -hlydecker/ImageBind_zeroshot_demo -Toaster496/openaccess-ai-collective-manticore-13b -lordvader31/text-matching -sohojoe/project_charles -touchscale/img-to-music -matthoffner/local-llm-doc-chat -youkaiai/gpt -hkayabilisim/hdmr -MesutUnutur/text_to_image_generationn -sihar/Online_Payment_Fraud_Detection -xiangdy/chatGPT -vilsonrodrigues/youtube-retrieval-qa -hlydecker/langchain-chat-with-pdf-openai -sklearn-docs/Kernel-Density-Estimation -Annotation-AI/fast-segment-everything-with-text-prompt -naman7415963/next-word-prediction -sklearn-docs/Gaussian-Mixture-Model-Initialization-Methods -Deepsheka/newdemo-app -mindtube/maximum_multiplier_places -mokashaa/Movies-Recommendation-System -Ritvik19/VidScripter -giswqs/solara -Mansib/Allure -step-3-profit/Midnight-Deep -mindtube/protogen-models -willhill/stabilityai-stable-diffusion-2-1 -Daniton/facebook-blenderbot-3Byx -Luelll/ChuanhuChatGPT -zhuowen999/vits_chinese -EinsteinCoder/sf-voicebot -cyberspyde/chatbot-team4 -muttalib1326/YOLOv8-Industrial-Equipments-safety-Detection -awacke1/Streamlit-ChatGPT -whocars123/yea -sweepai/anthropic-tokenizer -ulysses115/Nogizaka46-so -swufewyd/xyz-nlp-XuanYuan2.0 -Menna2211/Text-Image -sanjayw/tts -ericjohnson97/gpt_mavplot -Ankita0512ghosh/Weather_bot -Kimata/multimodal-deepfakes -Hugorowan/BardJukebox -deepthiaj/Electro_oneAPI -ealbinu/automatic-speech-recognition -FourthBrainGenAI/DeepLearningAIDemoChatBot -animeartstudio/AnimeArtmodels2 -julien-c/duckdb-full-text-search -django-ochain/AI-market-researcher -Q4234/a1 -TeamMlx/ehartford-Wizard-Vicuna-30B-Uncensored -SagarDa/voice-to-image-generation -onereal/rvc-models-convertvoice -animeartstudio/AnimeModels -animeartstudio/ArtModels -JoanGiner/DataDoc_Analyzer -matthoffner/chatbot-mini -izumi-lab/stormy-7b-10ep -rootvisionai/few_shot_sam -SMD00/Image_Colorization -rgres/Seg2Sat -EllieSiegel/Falcon-40B -tomzhang1019/ChatGPT -Retinalogic/pastel-mix -gersh/OpenAssistant-falcon-40b-sft-top1-560 -BlitzenPrancer/TheBloke-guanaco-65B-HF -piusanalytics/Personal_Prompt_Engineer -psychpsych/emilianJR-CyberRealistic_V3 -akbojda/aquarium-object-detection -danieldux/isco-gpt -LuxOAI/HUXTT -RisticksAI/ProfNet3-Snapy-support-chatbot -chungsarit/ytdownload -arshian/linearepitopemodels -dragonSwing/annotate-anything -rfrossard/Image-and-3D-Model-Creator -mikeee/multilingual-dokugpt -amaanadeen/ChurnCustomer -kmfoda/bittensor_lmeh_evaluations -IoMa/diffusers-gallery -danielsteinigen/NLP-Legal-Texts -emc348/faces-through-time -ammansik/youtube_summarizer -anshu-man853/webscrapping -DarkyMan/URPM -Panel-Org/panel-template -SpacesExamples/Gradio-Docker-Template -grisiemjahand/Image-and-3D-Model-Creator -remyxai/image-directory-to-video-tool -yjw5344/Bard_API -kausmos/clothsy -vbzvibin/Text2SQL -sdeeas/ChuanhuChatGPT -KaraAgroAI/CADI-AI -ttt246/brain -duchaba/yml_humana -bilby/bilby-retrievalqa -Silence1412/Stable_Diffusion_Cpu -derinsu/Background_Generator -raseel-zymr/LangChain-Youtube-Script-Generator -Malmika/Osana-WEB-GPT -Manzoor22/ptx0-pseudo-journey-v2 -jsu27/decomp-diffusion -wong26/faster-whisper-webui -akshatjain1004/deepfake-detector-with-explainability -gtome/NousResearch-Nous-Hermes-13b -yfor/Bili-Insight -yrvelez/ggml_chat -hanstyle/tts -JUNGU/Talk2Carnegie -awacke1/ChatGPTStreamlit11 -omartine/prompt-generator -bright1/Sepsis-Prediction-API -mpatel57/ConceptBed -DHEIVER/Anomalias_no_Trato_Gastrointestinal -alirezamsh/rquge -Brasd99/AnswerMate -marcusj83/MusicGenbruh -allandclive/Uganda_MMS -NHNDQ/KoTAN -sharathraju/489 -Rehman1603/Video-To-Text -0xHacked/zkProver -kasun/blip-large -Azurro/APT-1B-Base -SujanMidatani/resume_details_to_questions -aidealab/interior-ai -leonelhs/deoldify -Neelanjan/MoodMelody -HuggingFaceH4/reward-modeling-chat-ui -robinhad/kruk -RegalHyperus/rvc-anime-game -faizhalas/coconut -Blackroot/Fancy-Audiogen -ml-energy/leaderboard -theodotus/pythia-uk -kitrak-rev/AI-Clone -upthrustinc/seoAnalyzerGPT -Malmika/Physics-AI -Amrrs/QR-code-AI-art-generator -OptimalScale/Robin-33b -onursavas/Chat_with_PDF -FabioZe/WizardLM-WizardCoder-15B-V1.0 -odettecantswim/rvc-mlbb -Illumotion/Koboldcpp -tanminggang/Norod78-sd15-caricature-portraits-blip-captions -allknowingroger/New-Image-Models-Testing -studiobrn/SplitTrack -amoldwalunj/resume_matching_app -JoshMe1/YTYT -Tinny-Robot/tinny-bot -jpfearnworks/ai_agents -pip64/geston1 -akhaliq/openlm-research-open_llama_13b -sardor97/Classification_demo -biodatlab/NBDT-Recommendation-Engine -RahulSinghPundir/Sentiment-Analysis -Nixic/ffmo -pyresearch/pyresearch -Yesmyboi/Yes -RickyMartin-dev/Text_to_Image_Diffusion -renumics/cifar10-embeddings -arju10/traditional_cloth_recognizer -Xeraphinite/Coursera-GPT -gwang-kim/DATID-3D -PYTHONOPTIC/FOCUSGUMMY -awacke1/QRCodeAIWriterReaderImaging -verkaDerkaDerk/face-image-to-face-obj -simonduerr/pyvisdemo -fuqiang/txt2pic -autopilot-ai/Indic_sentence_completion -jbilcke-hf/template-node-ctransformers-express -BasToTheMax/openai-whisper-large-v2 -awacke1/ChatGPTStreamlit7-Private2 -DAOGEN/README -jackcao2023/THUDM-WebGLM -PineSearch/generatorImage -Tinny-Robot/Tinny-Robot-NCAIR-ChatBot -Antoine245/bot -FauziNL/Voice_anime2 -raphaelmerx/MMS-transcription -hayas-tohoku-workshop-2023/comparing-VQA-models -Ma5onic/MVSEP-MDX23-music-separation-model -pcuenq/irc -MattyWhite/ChatGPT-ImageCaptioner2 -ops-gaurav/tts -alanchan808/Ask_Tennis_Coach_Rick_Macci -rosebe/EcoSmart -leonelhs/rembg -Yunoposter/H377 -Jaehan/Translation-Korean2English-2 -bg6293/neuralmind-bert-base-portuguese-cased -angelhimi/anime-remove-background -awacke1/Voice-ChatGPT-Streamlit-12 -arixiii/open-reverse-proxy -JohnnyFromOhio/openai-jukebox-1b-lyrics -allknowingroger/Image-Models-Test9 -PeepDaSlan9/whisper-web -smatty662/TheBloke-Wizard-Vicuna-30B-Uncensored-fp16 -RavenBloody/Prototype03 -f2api/gpt-academic -shigel/recipe_0626 -ckul/Real-ESRGAN -zxc314/vits-uma-genshin-honkai -jbilcke-hf/webapp-factory-llama-node -Tekknoman/SG161222-Realistic_Vision_V1.4 -kingabzpro/falcon-1b-ChatBot -coreml-community/converter -DonDoesStuff/Free-GPT3.5 -NingKanae/anime-voice-generator -guymorlan/Arabic2Taatik -Warlord-K/TryOn -awinml/falcon-7b-instruct-api -propilot/transcribe-speech-to-text -SAUL19/imagen-audio -Superlang/ImageComposition -Duino/multy_tts -duchaba/ct_bactrian -dfurman/chat-all-in -balaramas/s2t_translator -awacke1/MemoryEmbeddingsChatGPT-1 -ayoolaolafenwa/ChatLM -ysharma/chatglm2-6b-4bit -splendid/image-generate -finding-fossils/metaextractor-data-review-tool -tsi-org/zeroscope -Mediocreatmybest/PipelineImageCaption -thesven/blog-content-writer -Youssef-Okeil/ArchitectureClassifier -allknowingroger/text-generation-webui-space-1 -arianaira/movie-recommender -felipekitamura/face_deid_ct -peb-peb/shravan -Nekomaru180/rvc-model -stamps-labs/swp-ui -btlee215/openchat-openchat -awacke1/VoiceGPT15 -crlandsc/tiny-audio-diffusion -SIH/geodata-harvester-app -Mandy234/Mandy234-myQAmodel -allknowingroger/Image-Models-Test18 -sujr/sujr-pix2struct-base -rbarman/Audio_Separation_Spleeter -librarian-bots/hub-analysis -amasad/sahil2801-replit-code-instruct-glaive -AirtistDesign/stablediffusionapi-rev-animated -HawkingChen/LangFlow -Cpp4App/Cpp4App -zeykz/rvc-mlbb-v2zey -bodah/RVC-Models-bo -sirfindcent/skimlit -nahue-passano/librispeech-corpus-generator -allknowingroger/New-Image-Models-Testing-2 -ivntl/MMS -miwaniza/ZoomVideoComposer -banana-projects/convai -giswqs/solara-template -Chen-Beer/LLMing -Mobin-Nesari/MM-Movie-Recommender -nomic-ai/MBZUAI_LaMini-instruction -nomic-ai/allenai_soda -nomic-ai/liuhaotian_LLaVA-Instruct-150K -nomic-ai/cnn_dailymail -nomic-ai/fnlp_moss-002-sft-data -nomic-ai/google_MusicCaps -nomic-ai/ceval_ceval-exam -nomic-ai/timdettmers_openassistant-guanaco -nomic-ai/succinctly_midjourney-prompts -nomic-ai/sahil2801_CodeAlpaca-20k -nomic-ai/ehartford_wizard_vicuna_70k_unfiltered -nomic-ai/wikisql -nomic-ai/IlyaGusev_ru_turbo_alpaca -turhancan97/yolov8-orientation -sub314xxl/StyleGAN-XL -savakholin/esm-2 -allknowingroger/Image-Models-Test23 -AliHaider0343/implicit-and-explicit-aspects-Extraction-in-Restaurant-Reviews-Domain -AliHaider0343/Restaurant-Domain-Sentence-Categories-Classification -allknowingroger/Image-Models-Test24 -SinaAhmadi/ScriptNormalization -dfurman/chat-gpt-3.5-turbo -allknowingroger/Image-Models-Test26 -lllqqq/so-vits-svc-models-pcr -DiamondYin/AnewGame -Nultx/stable-diffusion-webui-cpu -at2507/at2507_zeroshot_finetuned_sentiment -SarthakSidhant/Go-Cattle -navervision/MLSD -allknowingroger/Image-Models-Test29 -KT07/Speech_Analytics -huggingface-course/audio-course-u7-assessment -allknowingroger/Image-Models-Test30 -Ank0X0/Image-Upscaling-Playground -rdyzakya/IndoLEGO-ABSA -AchyuthGamer/OpenGPT-v1 -balaramas/indic_s2s -angelasnpang/segment-anything-ui -justest/embeddings-api -Wauplin/gradio-oauth-demo -Vinnybustacap/WizardLM-WizardLM-7B-V1.0 -MariaK/Audio-Course-Certification -Sandiago21/text-to-speech-italian -jjumper/Jump -Kajise/GPT4ALL-Falcon -ysharma/RedPajama-ChatInterface -Sandiago21/speech-to-speech-translation-italian -hysts/Kandinsky-2-1 -badmonk/model -Yabo/ControlVideo -daarumadx/bot -MWilinski/bot -karol99/Envvi-Inkpunk-Diffusion -Sandiago21/text-to-speech-spanish -wykonos/movie-recommender -3mrology/Chameleon_Text2Img_Generation_Demo -Endercat126/anything-v5-testing -CloseEric/CloseEric -DiamondYin/Voice-ChatGPT-Streamlit-12 -allknowingroger/Image-Models-Test37 -fffiloni/sd-wip-cinematic-mobile-adapt -songdaooi/ketsueki -Atom007/SDXL-base-9-CPU -jingwora/language-sentence-similarity -Melyoooo/test -sakuramoon/Blossom -codedog-ai/edu-assistant -TNR-5/lib -Aman30577/imageTool1 -Dagfinn1962/Dreamlikeart-Anime-1.0 -TNR-5/libt -CofAI/picgen -ai-maker-space/ArxivChainLitDemo -Sai004/ArticleAPI -OkamiFeng/Bark-with-Voice-Cloning -superdup95/openai_api_key_status -Binguii/Venus_Proxy -Abdullah-Habib/Text_to_Speech_Urdu -PrinceDeven78/Dreamlike-Webui-CPU -Dorado607/ChuanhuChatGPT -Hmjz100/YouTube-to-MT3 -TNR-5/netlist.v1 -CofAI/netlist -openbmb/viscpm-chat -Faridmaruf/rvc-genshin-v2 -Lewislou/Lewislou-cell-seg-sribd -tnt2011/dog_cat_classifier -lilucheng/sourcedetection -GenXDad/logo-wizard-logo-diffusion-checkpoint -naveed92/web_qa -jbilcke-hf/zeroscope-server-1 -Atualli/node-media-server -barunsaha/poem2pic -TNR-5/Search -lewispons/GrammarGuru -fatimaejaz/email_spame_classfier13 -CofAI/viewq -whoisterencelee/stabilityai-FreeWilly2 -umm-maybe/unitary-toxic-bert -ShreyaRao/SummarizeEasy -allknowingroger/Image-Models-Test45 -allknowingroger/Image-Models-Test -ZX9966/LOGO-Approximate-Computing-Technology -Xenova/llama2.c -luisotorres/gender-recognition-app -chongjie/MCC_slim -Harsh239/ChatBot -allknowingroger/Image-Models-Test46 -ashwin3005/first-space -camilosegura/traductor-multilenguaje -awacke1/HTML5Interactivity -vaishanthr/Hand-Detection-and-Segmentation -kat33/llama.cpp -freddyaboulton/echo-chatbot-gradio-discord-bot -sonali-tamhankar/WA-Hospital-Regulations-Chatbot -s3nh/GOAT-7B-COMMUNITY-CHAT -allknowingroger/Image-Models-Test48 -echometerain/whos-that-pokemon -Greenlight-AI/README -ZeroTwo3/one_shot_talking_face_from_text -b1sheng/kg_llm_leaderboard_test -allknowingroger/Image-Models-Test49 -ichelp/AUTOMATIC1111-stable-diffusion-webui -pe-nlp/mt-bench -Monster/Llama-2-7B-chat -miculpionier/Visual-Question-Answering -psalama/UT_Hackathon -ljrmary/UT_Hackathon2 -alonardo/Career_Companion -mehedihassan/ai-stable-diffusion-Text-to-Image -WinterGYC/Baichuan-13B-Chat-Int8-Docker -GroveStreet/GTA_SOVITS -AbandonedMuse/UnlimitedMusicGen -lukeslp/tts -xAbdoAT/kandinsky-community-kandinsky-2-2-decoder -vishnun/SnapCode -mikeee/llama-2-70b-guanaco-qlora-ggml -Anni123/AuRoRA -sub314xxl/SDXL-1.0-CPU -sub314xxl/SDXL-1.0-Img2Img-CPU -prospectai/email-checker -sub314xxl/stable-diffusion-img2img -billusanda007/MNIST -allknowingroger/Image-Models-Test53 -mikeee/chinese-llama-2-7b-ggml-q4 -thenethi1603/mygenAIChatbot -Geraldine/simple_contextual_chatbot -hemanthbylupudi/mygenAI -billusanda007/Resume-Ranker -mikeee/gradio-chatinterface -ShieldX/Llama2CSV -allknowingroger/Image-Models-Test54 -irvay/RVC_IR -billusanda007/DeepRank -mtyrrell/cpv_poc -rushankg/test-streamlit -Toinean/huggingfashion -awacke1/facebook-fastspeech2-en-ljspeech-0731 -sampath02061982/MyGenAi -awen666/web-ui -Dagfinn1962/stablediffusion-members -SDXL-ME/stabilityai-stable-diffusion-xl-base-1.0 -aurora10/GPT4ALL_CHATBOT -billusanda007/HireGPT -Branon/TurboKeys -allknowingroger/Image-Models-Test55 -model-man/speech-to-speech-translation -LaxmanOfficial/GenerativeAI -xiaolv/claude2_xiaolv -pikto/Elite-Scifi-Models -seanwendlandt/Video_TO_AnimatedGIF -gptishard/gpt-newbing -codedog-ai/codedog-demo -YumingYuan/Latex_OCR -WordLift/entity-linking -pvanand/RASA_moodbot -tanishqvashisht/sharingan -Bala2-03-2003/AIBALA -rakesh092/Voice_cloning -elsamueldev/gpt4all -Sentdex/StableBeluga2-70B-Chat -shaheerxd99/ml_bookquery_electrical -mumiao/BingAI -GAIR/Factool -fjyczcr/bingai -matthoffner/open-codetree -Sandiago21/automatic-speech-recognition-spanish -allknowingroger/Image-Models-Test58 -allknowingroger/Image-Models-Test61 -tanishqvashisht/horseToZebra -Binettebob22/fast_diffusion2 -omdena-lc/omdena-ng-lagos-chatbot-model -cxylz1/newbing -manutej/imagedemo1 -drift-ai/recruiter-assistant-jbfxrs -Sloth-Alchemist/tortoise-tts-webui -CofAI/chat -VinayHajare/Marathi-Audio-Transcriber-and-Translator -ifire/Architext_deployed -hoshilumine/combined-GI-RVC-models -DeveloperAkhil/Personal-Chatbot -allknowingroger/Image-Models-Test63 -tanishqvashisht/comicInator -LucasCodeBreak/MusicGen -Markjr/monadical-labs-minecraft-skin-generator -myway1990/text2video -akashdhiman79830/MyGenAIAvatar -jbilcke-hf/audio-server-1 -Galax/schafter_x_billy -pamixsun/glaucoma_screening -mikeee/wizardlm-1.0-uncensored-llama2-13b-ggmlv3 -allknowingroger/Image-Models-Test66 -Sakil/LLM_Question_Answering_ChatBot -0xSynapse/LlamaGPT -PeepDaSlan9/Universal-NER-UniNER-7B-definition -Stevross/Astrid-1B-UI -renumics/cifar100-sliceguard-demo -allknowingroger/Image-Models-Test68 -mkotan/mafese_feature_selection -masterzer0456/Ai1 -Sparticle/Llama2_7b_chat_Japanese_Lora -Sparticle/Llama2_13b_chat_Japanese_Lora -billusanda007/Enhancer -awacke1/MemeGenerator -thewise/Chat-W-Git -n0rwegiancoder/WizardLM-WizardLM-70B-V1.0 -AbelKidane/headdetector -allknowingroger/Image-Models-Test70 -allknowingroger/Image-Models-Test73 -bhavyagiri/retrieving-memes -rodevel1978/llama-2-13b-chat.ggmlv3.q4_K_S -shayakh/sdrv51 -harisansarkhan/CatFaceLandmarks -terapyon/gh-issue-search -Smotto/Vocal-Isolator -kevinwang676/VoiceChangers -allknowingroger/Image-Models-Test74 -allknowingroger/Image-Models-Test75 -rahgadda/bark-voice-generator -foduucom/thermal_image_object_detection -syx948/ChatPDF -x6/BingAi -imageomics/dashboard-prototype -BG5/midjourney -imageomics/dev-dashboard -JesseDuku/Hackathon_on_Plastic-free_rivers -jotap12/enso -PeepDaSlan9/Gryphe-MythoMax-L2-13b -PeepDaSlan9/Language-Learn-Idea -Justin-Choo/Multi-Diffusers_WEB_UI_CLEANED -nola-ai/Recipe_Meal_Planner -VinayHajare/Speech-To-Speech-Translation-For-Marathi-To-English -Justin-Choo/Anzu-mix_WEB_UI -allknowingroger/Image-Models-Test78 -allknowingroger/Image-Models-Test80 -mygyasir/remove-photo-object -Dagfinn1962/prodia2 -Hina4867/bingo -Alex132/togethercomputer-LLaMA-2-7B-32K -Bannermore/BingChat -allknowingroger/Image-Models-Test81 -allknowingroger/Image-Models-Test82 -c1ybaby/bingAI -Justin-Choo/QuickGen-Photo -shatrunjai/FutureMeMotivator -Supedsa/rvc-models -harisansarkhan/DogBreedClassification -najimino/video -PeepDaSlan9/rvc-models -shibing624/ChatPDF -Chitranshu/Dashboard-Uber -AIConsultant/MusicGen -qskaa/213 -TheProjectsGuy/AnyLoc -LamaAlQarni/Fire-Smoke-Detector -raul-padua/Image-Caption -drdevinhopkins/llSourcell-medllama2_7b -mygyasir/Real-Time-Voice-Cloning -cccc-c/bingo -allknowingroger/Image-Models-Test86 -sanwuchengqun/bingai -101-5/gpt4free -rektKnight/stable-diffusion-webui-cpu_dupli -analist/upscaler -mygyasir/ExperAI_Simulations -GTR-32X/uboa -ranchaya/AI-audio-generator -viait/stable-diffusion-license -ghuron/artist -allknowingroger/Image-Models-Test88 -allknowingroger/Image-Models-Test91 -sukiru/BlueArchiveTTS -Rfilippelli/Deci-DeciCoder-1b -JUNGU/Image-to-Story-Ko -PeepDaSlan9/animated-audio-visualizer -abouuuud/poetry -praveenku32k/SimpleConversationalApp -allknowingroger/Image-Models-Test94 -allknowingroger/Image-Models-Test96 -PeepDaSlan9/segmind-portrait-finetuned -Xuan2060320350/ChatSydney -Kunal7/Gradio-Squats -Xuan2060320350/ChatSydney-1 -JUNGU/Image-to-Story-Ko-multiplot -ehristoforu/Hackchat -crystalai/stabilityai-stable-diffusion-xl-refiner-1.0 -ashu3984/Dialogue_summarization -themanas021/Sentiment_Analysis -shanechin/Linaqruf-pastel-anime-xl-lora -aaaaaabbbbbbbdddddddduuuuulllll/poetry2023 -Amitontheweb/InstaoffyzFreeParaphraser -allknowingroger/Image-Models-Test97 -Sambhavnoobcoder/stable-diffusion-inpainting -CognitiveLabs/Research-Assistant -mygyasir/Fictiverse-Voxel_XL_Lora -viait/vscode -srisakthi2821/UcenAiBot -allknowingroger/Image-Models-Test101 -walterclozet/coffeeee-nsfw-story-generator2 -hekbobo/bingo -dolphinprojects/ProxySearch -WangJexi/panel_trial -callmesan/sai-bot-alpha -Ayushnangia/Whispercpp_yt -mygyasir/Stable-Diffusion-Fast -BBrother/Pandora -aupfe08/image_transform_with_AnimeGAN -openskyml/README -heroku/fse -tengqf/resumeGPT -HuggingFaceM4/IDEFICS_Data_Measurement_Tool -allknowingroger/Image-Models-Test105 -allknowingroger/Image-Models-Test107 -NEXAS/NEXAS-stable_diff_custom -fluffyfluff/multiple-pdf-chat -FathomNet/fathomnet2023-comp-baseline -romero61/hendata -allknowingroger/Image-Models-Test108 -allknowingroger/Image-Models-Test109 -harisansarkhan/Image-Classification-with-CIFAR-10 -podsysai/podsys -Iqbalzz/hololive-rvc-models -mygyasir/stablediffusionapi-epicrealism-epinikio -FedeFT/Head_Pose_Estimation_and_LAEO_computation -sandrocalzada/emotions_faceswap -allknowingroger/Image-Models-Test112 -allknowingroger/Image-Models-Test113 -mangiucugna/difficult-conversations-bot -matanmichaely/image_to_audio_story -WangQvQ/BEiT_Gradio -mygyasir/Stable-Diffusion-Fast111 -toiram/artificialguybr-LogoRedmond-LogoLoraForSDXL -toiram/goofyai-Leonardo_Ai_Style_Illustration -LDJA/hotdog_ld -Gabesantos1007/Dall-e -jhonparra18/ocr-LLM-image-summarizer -chansung/hf-inference-endpoint -ReyDev/Claude-Space -allknowingroger/Image-Models-Test118 -Sarfraz/ehartford-Samantha-1.11-CodeLlama-34b -deepghs/character_splitter -adasddas/dsaaaaaaaa2 -AchyuthGamer/NeonAI-Chat-UI -datastx/csv-analysis -Abhimurthy/Phind-Phind-CodeLlama-34B-v1 -Rehman1603/YouTubeToTextInVariousLanguage -logier/QQsign -AnimaLab/bias-test-gpt-pairs -allknowingroger/Image-Models-Test121 -Lngo/paragon-AI-blip2-image-to-text -allknowingroger/Image-Models-Test123 -DeeKayG/COCO-Google -CodingBillionaire/bark-voice-cloning -Justin-Choo/epiCRealism-Natural_Sin_RC1_VAE-WEB-UI -heath1989/prompt-r-gen-sd -sub314xxl/voicechange -Justin-Choo/AWPortrait-WEBUI-CPU -assemblyai/Conformer2-Demo -hardon-server/space-diffusion-txt2img-1-5 -hardon-server/prompthero-openjourney -hardon-server/dalle-mini -XEGAN/movie-recommendation-system -AEUPH/CosmosTV -askarov/I2VGen-XL -DaweiZ/toy-gpt -StaticalizaAI/GPT-4 -Kajise/Demucs_v4-FT_4s -Kajise/Demucs_v4-FT_2s -Yntec/Image-Models-Test -wffcyrus/SD-WebUI -veidlink/find_my_movie_hf -Samlund56/blip-image-captioning-large -giseldo/story_point_estimator_metrics -awacke1/acw-dr-llama-7b-chat -Kurkur99/Sentiment_analysis -alesa/conceptofmind-Yarn-Llama-2-13b-128k -doevent/vc -airsat/dalle-mini -osmanriver/Alist -sky24h/Controllable_Multi-domain_Semantic_Artwork_Synthesis -gpecile/encrypted-image-recognition -NoCrypt/sd_out_gallery -iknow-lab/ko-flan-zero -Billet/WizardLM-WizardMath-70B-V1.033 -nuttella/supa -PixelistStudio/3dart-Models -robinmia/speecht5-tts-demo -skavya/youtube_transcript_summarizer -Abdllh/AraPoet -Abdllh/topic2poem -Abdllh/poetry2023 -Abdllh/poetry -nsarrazin/agent-chat -Anindya/Marketing_Campaign_LLM -Abdllh/poetry202 -Venafi/Vikram-Explorer -turing-motors/heron_chat_git -allknowingroger/Image-Models-Test127 -ivuxy/somnium -dongyi/MMFS -kevinwang676/Bark-Coqui -ysharma/testing_gradio_wheels -allknowingroger/Image-Models-Test129 -allknowingroger/Image-Models-Test130 -Abdllh/Arabic_Poems_Generator -hardon-server/img2txt-server -nagauta/mediapipe-hair-segmentation -Rishabh055/Movie_recommendation_System -hardon-server/image2image-stable-diffusion -neosonics/Awais-Audio_Source_Separation -X1A/UniPoll -Kirihasan/rvc-jjjo -adhirk/ARKs_Contextual_Chronicle -allknowingroger/Image-Models-Test132 -deepaksarika01/youtube-video-qa-lamini -Thafx/sdrvxl1 -Ashrafb/Tesseract-OCR -Jeff2323/ai-comic-factory -diffusers/pipeline_stats -allknowingroger/Image-Models-Test133 -r3gm/vscode -allknowingroger/Image-Models-Test137 -fspecii/midi-composer -chemouda/arome_ai -Samarth991/Youtube-Video-ChatBot -Alfasign/remove-background-on-image -allknowingroger/Image-Models-Test139 -freeCS-dot-org/phi-1_5 -thecherub/welovekaban -jacktown/codefuse-ai-CodeFuse-CodeLlama-34B -limcheekin/CodeLlama-13B-oasst-sft-v10-GGUF -PVIT/pvit -wang2246478872/facebook-m2m100_1.2B -MercuryLeafer/img-to-music -allknowingroger/Image-Models-Test140 -allknowingroger/Image-Models-Test141 -Akash473/FunkoHairBeard -huggingface-projects/MusicGen-bot -Suniilkumaar/SwapMukham -hlydecker/RA-document-QAchat -Ashrafb/codellama-34b -mhenrichsen/DanskGPT -kiyer/pathfinder -KAIST-Geometric-AI-Lab/syncdiffusion-demo -Edisonymy/buy-or-rent -jpwahle/paraphrase-type-tasks -harpreetsahota/chat-with-website -AchyuthGamer/ImMagician-Image-Generator -allknowingroger/Image-Models-Test144 -allknowingroger/Image-Models-Test145 -skhanuja/zeno-winoground -allknowingroger/Image-Models-Test147 -allknowingroger/Image-Models-Test148 -floriankrempl/mtg_rules_bot -HoangHa/IELTS_Speaking_GPT -guardiancc/fast-stable-diffusion -digitalxingtong/Taffy-Bert-VITS2 -eaglelandsonce/UploadaDocAskaQuestion -opencompass/MMBench -openMUSE/parti-prompts-leaderboard -allknowingroger/Image-Models-Test150 -allknowingroger/Image-Models-Test151 -flocolombari/COLOMBARI_VIGNES-FERRINO_DERNIAUX_NIYONKURU -jskalbg/ChatDev01 -get-foundation/getdemo -mya-mya/SentenceMixer -allknowingroger/Image-Models-Test152 -ayush5710/Codellama-13b-integratable-chatbot -Artples/Chat-with-Llama-2-70b -giswqs/geospatial-dataviz -digitalxingtong/Nanami-Bert-VITS2 -valeriylo/rag_demo -ayush5710/palm-chatbot -digitalxingtong/Jiaran-Bert-VITS2 -openMUSE/MUSE-vs-SD.1.5 -allknowingroger/Image-Models-Test155 -allknowingroger/Image-Models-Test156 -hezhaoqia/vits-simple-api -FIT2125/stable-diffusion-webui-cpu -ayush5710/wizard-coder-34b-coding-chatbot -SeyedAli/Persian-Speech-Transcription -allknowingroger/Image-Models-Test159 -huggingface-projects/deepfloydif-bot -arborvitae/AI_Legal_documentation_assistant -digitalxingtong/Xingtong-Read-Bert-VITS2 -allknowingroger/Image-Models-Test160 -allknowingroger/Image-Models-Test161 -hf4all/bingo-api -Coweed/BadTrip -AchyuthGamer/ImMagician-Gradio -allknowingroger/Image-Models-Test164 -huggingface-projects/wuerstchen-bot -tube1925/sydney_new2.0 -benjaminzuckermanbasisscottsdale/Cardiovascular_Disease_Prediction_Service -Karan123penguin234/georgesung-llama2_7b_chat_uncensored -AngoHF/ANGO-Leaderboard -librarian-bots/tutorials -allknowingroger/Image-Models-Test168 -dongsiqie/lobe-chat -SeyedAli/Persian-Visual-Question-Answering-1 -AFischer1985/wizardlm-13b-v1-2-q4-0-gguf -PirateHFH/IllusionDiffusion -Mysterykey/todd -Detomo/CuteRobot -XzJosh/nine2-Bert-VITS2 -airesai/Mistral-7B-v0.1-Demo -onemriganka/palm2-pdf -Tonic/greenblast -javakhangnguyen/Object-Remove -allknowingroger/Image-Models-Test175 -TogetherAI/remove-background-on-image -awacke1/USMLE-Medical-License-Exam-EDA -Tonic/cybermints -KVNAditya/Personal_News_Summarization_Assistant -Mysterykey/Admin -MultiTransformer/snake_by_princepspolycap -digitalxingtong/Nailv-Bert-Vits2 -Mahiruoshi/MyGO_VIts-bert -AIQuest/lungCancerVgg19 -AlexMaoMao/ostris-ikea-instructions-lora-sdxl -Gigabot/ostris-ikea-instructions-lora-sdxl -mixcard/prompthero-openjourney-v4 -sporg/Ongo -Hexamind/GDOC -Keyven/Multimodal-Vision-Insight -allknowingroger/Image-Models-Test183 -allknowingroger/Image-Models-Test184 -vorstcavry/ComfyUI-XL-Vae-Public -greymatter72/goofyai-3d_render_style_xl -meraGPT/meraKB -ahmadawais/Mistral-Chat -allknowingroger/Image-Models-Test186 -allknowingroger/Image-Models-Test187 -k-kotetsu/upscaling-server-test-1 -RMXK/RVC_HFF -Tonic/BibleScriptures -Tonic/QuranInUrdu -RdnUser77/SpacIO_v1 -Hushh/Generative_QNA -ShawnLJW/image2coloringbook -allknowingroger/Image-Models-Test188 -snowcoin/bing -lewisliuX123/wechatglm_demo -mediaparty2023/test-autotrain -Hmjz100/ChatGPT4 -TIGER-Lab/TIGERScore -reonjy/sdxl -Ayush113/cricket_matchups -donimes977/roblox -allknowingroger/Image-Models-Test192 -silk-road/Luotuo-Fighter -teralomaniac/clewd -Weyaxi/open-llm-leaderboard-renamer -PhilSpiel/storyville -XzJosh/Ava-Bert-VITS2 -XzJosh/Ava2-Bert-VITS2 -AchyuthGamer/OpenGPT-Chat-UI -AFischer1985/AI-Interface -Betacuckgpt/ehartford-Wizard-Vicuna-30B-Uncensored123 -ura-hcmut/ura-llama-evaluation -allknowingroger/Image-Models-Test199 -roshithindia/text_summarization -NicoGargano/stroke -Audiogen/vector-search-demo -XzJosh/Jiaran-Bert-VITS2 -allknowingroger/Image-Models-Test204 -KOFTRFU204/AICoverGen -kobakhit/speech-to-chat -Mosharof/Women_with_Hijab_Detector -mipbkhn/SmartGPTpublic -XzJosh/Aatrox-Bert-VITS2 -manivannan7gp/Words2Image -tkelley353/acid -ML610/Mistral-7b-instruct-GGUF -innat/VideoSwin -AFlac199/openai-reverse-proxy -tsi-org/LLaVA -Harsh502s/Autonomous_Text_Tagging_App -nsaintsever/music-generation -lewisliuX123/wechatgpt3 -SAAZIZI/SummarizeAV -TPM-28/Real-ESRGAN_Demo -tsi-org/tts -hf4all/bingo-async-task -CoderMayhem/repello -XzJosh/ShanBao-Bert-VITS2 -mounikakadimi28/ml_salary_prediction -SakshiRathi77/SakshiRathi77-Wishper-Hi-Kagglex -CoPoBio/skin_cancer_risk_prediction -gheng/belanjawan-2024-chatbot -zomehwh/bert_vits2 -KonradSzafer/HF-QA-Demo -brightswitch/EleutherAI-llemma_34b -gstaff/mp4-converter -vih-v/Image_Face_Upscale_Restoration-GFPGAN -dwancin/inpaint -devisionx/auto-annotation-segmentation -vorstcavry/Vorst-Cavry-stablediffusion -deppfellow/steam-recsys -XS-1/BW_IMAGE_VIDEO_COLORIZER -library-samples/image-captioning-with-blip -VetriVendhan26/sentiment-analysis -Prasanna18/AnatomyBOT -jiaxianustc/mbp -THEGAMECHANGER/LandscapeColorizer -EngAbod/Liveness_Detection -SFP/ImCap -kevinwang676/ControlNet-with-GPT-4 -artfan123/AI-generated-art-classifier -olanigan/YoutubeAssistant -lfoppiano/document-qa -CikeyQI/Yunzai -aukaru/claude-wangy -StiveDudov/Image_Face_Upscale_Restoration-GFPGAN -sunxyz/Auto-keep-online -digitalxingtong/Bufeiyan-a-Bert-VITS2 -chendelong/citation-tool -datajuicer/overview_scan -Bazedgul/YoutubeVideo-Transcript-Summarization -AchyuthGamer/Free-Accounts-Generator -westy412/flowise -awacke1/MixtureOfMedicalExperts -DAMO-NLP-SG/CLEX-Chat -dingliyu/skillmix -LaynzKunz/Aesthetic_RVC_Inference_HF -mymiss/ComfyUI-ave -waheedwaqar/Toyota_Youtube_Chatbot -freddyaboulton/gradio_folium -pseudolab/medical-chatbot -Abhi5ingh/fashionsd -twizy/Linaqruf-animagine-xl -malay-91418/image-info -Aadi1149/Arkenbrien-text-to-image-Arkenbrien -TIMBOVILL/RVC-Noobie -manjunathshiva/BibleGPT -SeyedAli/Audio-Diffusion-style_transfer -degirum/yolov8 -Ferion/image-matting-app -innat/Video-FocalNet -cybergpt/bing-chat -xuyingliKepler/KET -TheStinger/Ilaria_TTS -geokanaan/arabeasy -ngoctuanai/gpt4en -JSP/ar -Niansuh/bingai -YeYeYes/QQsign -xuyingliKepler/autogenchat -normster/llm_rules -NiansuhAI/chat -rahul999r/Rahul_Kannada_TTS -multimodalart/LoraTheExplorer4 -mayura25/handwritten_digit_recognition -Clementapa/orang-outan-image-video-detection -locmaymo/Reverse-Proxy -bishu3011/hf-xample -openskyml/starchat-playground -openskyml/HuggingDiffusion -pseudolab/Finetune-Model -jonathanjordan21/ads-video-generator -eddiebee/image_to_black_and_white -xuyingliKepler/matt_scrpt_gen -phyloforfun/VoucherVision -AliSaria/MilitarEye -pseudolab/autotrain-Nuclear_Fusion_Falcon-0 -silk-road/ChatHaruhi-Needy -Saketh-Reddy/webhook_space -Intel/NeuralChat-ICX-INT4 -TeamTonic/hallucination-test -RolandZ/bing-image-creator -limcheekin/Yarn-Mistral-7B-128k-GGUF -nasa-cisto-data-science-group/satvision-base-demo -nafisehNik/girt-space -CognitiveLabs/GPT-4-Vision-Chat -ARTeLab/ARTeLab-SummIT -AUBMC-AIM/MammoGANesis -Abhilashvj/planogram-compliance -adorkin/BilingualEmojiPredictor -adorkin/ZeroShotClassificationEnRu -AlekseyKorshuk/instagram-filter-removal -AlekseyKorshuk/rugpt3 -AlexN/pull_up -AlgoveraAI/algovera_squad_active_passive_model -AmmarHuggingFaces/intro-to-hugging-face -Amrrs/github-star-tracking -Amrrs/numerizerlit -Amrrs/portfolio-github -Amrrs/portfolio -Anon4review/HIPTDemo -Anthos23/hummus -BigSalmon/Bart -BigSalmon/GPT2_Most_Probable -BigSalmon/MaskSeveralAtOnce -Burcin/ExtractiveSummarizer -Dabs/Floyd-Steinberg-Dithering -Dabs/UlamSpiral -Dabs/wordcloud -Danil/AnyNameHack -Davis/twitter_scraper -Devika/Briefly -Dref360/spectral-metric -EfkTur/nutriscore_app -Emclaniyi/music-recommendation-system-spotify -Endre/SemanticSearch-HU -Feynlee/Receipt_Parser -Gladiator/Sartorius-Cell-Segmentation -Hellisotherpeople/HF-KeyBERT -Hitmanny/GPT2-story-generation -HugoLaurencon/text-data-filtering-2 -Ignahugging/Image_filtering -Ignahugging/Sentiment-Analysis -IndicNLP/Demo -JadAssaf/STPI -JadAssaf/STPIzeimer -Jesuscriss301/prueba -Jimmie/similar-books -Jipski/Flos_gpt-2 -Jipski/MegStuart_gpt-2 -Joeri/fabry-perot -JonatanGk/catalonia-independence-detector -JonathanLehner/Chatbot_small_demo -JuliaKon/nlp12 -MKaan/multilingual-cpv-sector-classifier -Modfiededition/tweet_sentiment_extractor -MonkeyDBoa/AvengersDetector -Mradul/mlrc-bana -Muedgar/WeatherPrediction -Nalla/PDF_tables_to_CSV_output -Narrativa/poc -Narsil/gradiofold -Narsil/myspace -NbAiLab/maken-clip-text -PaddlePaddle/MiDaS_Small -ParthRangarajan/Centauri_Pilot -PrathamDesai/fastai_bear_classifier -Sakil/A_cover_letter_generator_for_jobs -Sakil/question_answering_app -SaulLu/test-demo -ShadyV/pcm-percent-calculator -SophieTr/TextSummarizationDemo -Souranil/VAE -Stanford-CS236g/example-pokemon-gan -Sultannn/Text_summarization_with-MT5 -hunkim/echo -hunkim/kakaogpt -Theivaprakasham/facedetect -Vasanth/QuestionAnswering -WaterKnight/neural-style-transfer -Wootang01/grammar_corrector -Wootang01/grammar_corrector_two -Wootang01/question_generator_two -Zahraebrahimi/IQA -Zakia/DIARC -abidlabs/english_to_spanish -abidlabs/image-identity -abidlabs/quickdraw2 -abidlabs/speech-translation -aditi2222/Title_generation -aditi2222/gradio_t5 -aditi2222/paragus_paraphrase_demo -aditi2222/sdffvb -aditi2222/updated_t5 -afcruzs/perceiver-image-classification-spanish -agungbesti/produksi -ajitrajasekharan/Qualitative-pretrained-model-evaluation -ajitrajasekharan/self-supervised-ner-biomedical -akhaliq/BLIP -akhaliq/DETR -akhaliq/Deit -akhaliq/Detectron2 -akhaliq/DialoGPT-small -akhaliq/Scientific_Title_Generator -akhaliq/hubert-xlarge-ls960-ft -akhaliq/longformer-scico -akhaliq/wav2vec2-large-robust-ft-libri-960h -algomuffin/jojo_fork -aliabd/new-chatbot-interface -aliabd/wav2lip -allisonye/sketchpad_multiplecharsmodel -alperbayram/Duygu_Analizi -amazon/README -anirbans403/wikisummarizer -anmol007/anmol-sentiment-analysis -anuragshas/Hindi_ASR -any0019/text-style-transfer-demo -ashishabraham22/WATCHA-READIN -astoken/weather_checker -avichr/HebEMO_demo -avorozhko/funbot -awfawfgehgewhfg/frawfafwafa -bespin-global/Bespin-QuestionAnswering -biu-nlp/AlephBERT -bubbletea98/Neo4J_Integration -cbensimon/streamlit-query-params -cbensimon/streamlit-ui-gallery -cdleong/random_emoji -chinhon/frequent_word_counter -cointegrated/toxic-classifier-ru -coolzude/Landmark-Detection -cpnepo/Harry-Potter-Q-A -crabz/sk-ner -cubbycarlson/karl -curt-tigges/anime-image-labeller -cvr/3classifier -danurahul/pop-music -davidcftang/LT -davidefiocco/zeroshotcat -dbdmg/robust-asr-it -dev114/sentiment-analysis -digitalWestie/huggingface-space -dnth/icevision_fridge_tutorial -dnth/rice-disease-classifier -dnth/testalgae -docs-demos/albert-base-v2 -docs-demos/dpr-question_encoder-bert-base-multilingual -docs-demos/electra_large_discriminator_squad2_512 -docs-demos/flaubert_small_cased -docs-demos/prophetnet-large-uncased -docs-demos/t5-base -docs-demos/xlm-roberta-base -docs-demos/xprophetnet-large-wiki100-cased-xglue-ntg -dpc/textgencompare -dreji18/Semantic-Search-using-DistilBert -dreji18/Text-Classification-App -dt/chatbot-es -dt/dt-demo -dt/ner_spanish -dyguay/object-detection-api -edemgold/QA-App -edemgold/generator -edugp/clip-spanish-demo-gradio -edugp/clip-spanish-demo -elaldana/shouldidrive -ethzanalytics/dialog-China -farukozderim/a -farukozderim/bug_test_1 -farukozderim/space-building-space-25 -farukozderim/space-building-space-30 -flax-community/GPT2-korean-demo -flax-community/SinhalaLanguageDemos -flax-community/netherformer -flax-community/spanish-image-captioning -g8a9/vit-gpt-italian-captioning -gagan3012/T5-Summarization -gagan3012/streamlit-tags -gagan3012/summarization -geekyrakshit/enhance-me -gingerale/Gnomespace -gorkemgoknar/metayazar -gradio/longformer -gulabpatel/Question-Answering_roberta -gulabpatel/chatbot_GPTNeo -gv/space_demo -harsh7251/cvFoodWebApp -hi9/Core-4-with-QA-on-UC -huggingface/Carbon-Compare -huggingface/README -azizalto/simple_forecast -ibombonato/silence-demo -ichsanprmn/papersumm -impyadav/Hindi-Song-Generation-GPT2 -imthanhlv/dual-encoder -inaccel/inception_v1_tf -inaccel/resnet50 -inaccel/yolov3_adas_pruned_0_9 -indonesian-nlp/luganda-asr -inigosarralde/mushroom_edibility_classifier -isabel/climate-change-project -jacklinquan/make24 -jason9693/SoongsilBERT-BEEP -jcmachicao/dialogatexto -jeang/ernie_demo_toy -jfarray/TFM_SimilitudSemantica_Textos -jgerbscheid/dpa-example -jitesh/storytelling -johnowhitaker/twitter_viz -jrichez/digit_recognizer -jshu/baeroml-hackathon2021 -jsxyhelu/skyseg -jueri/clean_bibtex -julien-c/hello-world -juliensimon/imdb-demo-space -karolmajek/YOLOR -kdemertzis/Earthquakes -keras-io/TF-GB-Forest -keras-io/bidirectional_lstm_imdb -keras-io/char-lstm-seq2seq -keras-io/integrated_gradients -keras-io/randaugment -keras-io/semi-supervised-classification -khizon/emotion-classifier-demo -kinensake/quanquan -kingfisher/similarity-heatmap -kingfisher/smart-search -kleinay/qanom-end-to-end-demo -kleinay/qanom-seq2seq-demo -korona777/HDB_Resale_Price_Prediction -kurone/cp_tags_prediction -learningfromemojis/TwitterEmojis -leoneat/comments_refiner -leopoldmaillard/ImageRetrieval -leung/test-01 -maher13/arabic-asr -makanaan/paraphrase -marcelcastrobr/zero-shot-classification-norsk-bert -mariagrandury/bertin-sqac -markscrivo/odddson -marshmellow77/rouge-scorer -mayerantoine/disaster-damage-classifier -maze/FastStyleTransfer -merve/french-story-gen -merve/gr-blocks -miccull/clip-rgb-interpolation -micole66/electra -micole66/mdeberta -micole66/momomo -micole66/zero-shot-deberta -mikeee/ultimatumbee -milamir/gradioSentimentAnalysis -mmcquade11/codex-reuters-summarization -mmcquade11/codex-text-summarizer -mnemlaghi/beauparleur -moflo/keras_stylegan -mohitmayank/EmojiFinder -moumeneb1/asr_model -msarmi9/multi30k -msulemannkhan/sentiment-classification-gradio -muhtasham/germanquad -nahidalam/meow -nata0801/ASR_Transformers_EnRuFr -nata0801/Question_Answering_App -nateraw/gradio-demo -nateraw/test-space-lfs -nedwards01/Gradient-Descent-Visualizer -nfel/Thermostat -nlpconnect/live-wikipedia-dpr -osanseviero/DINO_VIDEO -osanseviero/bidaf-elmo -osanseviero/biggan -osanseviero/demo-live -osanseviero/hugging-pic -osanseviero/test -paulbricman/conceptarium -paulbricman/lexiscore -paultay/image_generator -peter2000/E-Coicop-food-classifier -phucpd53/DocVQA_LayoutLMV2 -piecurus/speech_to_text -pierrefdz/ssl_watermarking -pngwn/nextjs -pytorch/Densenet -pytorch/EfficientNet -pytorch/Inception_v3 -pytorch/MobileNet_v2 -pytorch/RoBERTa -pytorch/SlowFast -pytorch/SqueezeNet -pytorch/Transformer_NMT -pytorch/Wide_Resnet -pytorch/open-unmix -pytorch/transformers -rahulb517/diffusion -rajesh1729/interactive-tweet-sentiment-visualization-dashboard -ravijoe/emotion_classifier -raynardj/x-language-search-ancient-with-modern-words -realrastayouth/knowledge-discovery-final-project-demo -reshinthadith/code-representation-learning -rexoscare/Text_summarization_app -rickystanley76/streamlit-hans-rosling -rubensmau/teste2 -samt/soteria-ml -samueldomdey/SentimentAnalysisSingle -sandrocalzada/DemoHF -sdutta28/AggDetectApp -seanbethard/whatsapp -seki/sk -sentencebird/image-color-vectorization -seyia92coding/Popular_Spotify_Albums -seyia92coding/Simple-Text-based-Gaming-Recommender -shahp7575/gpt-horoscopes -shamikbose89/title-generator-from-abstract -shaneavh/ada -shashankanand13/game-automation-webapp -shawon100/english-to-bangla-translation -shelby/scan_rotation_app -skylord/surubhi -sohomghosh/FinRead -sonoisa/qiita_title_generator -spacy/README -springml111/Pegasus_Paraphrase_demo -srishtiganguly/maskrcnn -kernelmachine/gpt3-quality-filter -stmnk/pygen -suguuuu/monodepth -suxiaomi/MT3 -tanaydeshmukh/gradio-sentiment-web-app -thebestteamever/fire_detection_project -tidy/styleflow -tobiascz/demotime -training-transformers-together/calc -tsereno/SportsTrainer -twinpiks/tst -tyang/simcse-mpnet-fuzz-tfidf -ucalyptus/PTI -victor/tailwind-static-space -winnielin/mySecretBox -winwithakash/Flight-Fare-Price-Prediction -wolfrage89/chaii_spaces -wolfrage89/finance_domain_translation_marianMT -xiaoshi/test -yabramuvdi/wfh-app-v2 -yseop/Finance -yu3ufff/quiz-bowl-qa -zeke/hello-spaces-gradio -zhenwusw/JoJoGAN -zhiqwang/assets -zyj1022/codeffe -senger/AI-TextGenerator -SebastianEnger/AI-TextGenerator -tensorflow/yamnet -osanseviero/mix_match_gradio -edbeeching/atari_live_model -osanseviero/draw123 -Reeve/Ohayou_Face -Sukhyun/course_recommender -MohamedSherif/Skin_Cancer_detection -alkzar90/streamlit-demo-example -Sukhyun/MBTI_translator -snakeeyes021/id-the-seas -kevinszuchet/waste-classification -davidmd/lane_detection_UNet_Model -qqaatw/realm-demo -onnx/ResNet -onnx/AlexNet -LamaAl/chatbot -templates/streamlit -Senayfre/CropHealth -yassTrad/extractiveSum -Wootang01/chatbot_three -Zeel/HeteroscedasticGP -MaximeTut/Emploi2021 -course-demos/generate-tone -XAI/VisualCorrespondenceHumanStudy -temp-late/rhyme-ai -jdposa/medical_ner_spanish -joheras/OpticDiskDetection -onnx/MNIST-Handwritten-Digit-Recognition -course-demos/Rick_and_Morty_QA -onnx/sub_pixel_cnn_2016 -akhaliq/beit -ahmedJaafari/AnnarabicRecord -onnx/BERT-Squad -onnx/BiDAF -nostalgebraist/frank-diffusion-streamlit -Sultannn/YOLOX_DEMO-Webcam -EricaCorral/Chinese-Tools-Advanced -AjulorC/question_answering_bot_deployed_with_Gradio -Heriot-WattUniversity/generate-tone -akdeniz27/spacy-turkish-demo -PaddlePaddle/pnasnet_imagenet -course-demos/marian-finetuned-kde4-en-to-fr -RobinWZQ/CCLAP -AlowaSawsan/Third-Molar-Segmentation -adityapathakk/crop-health -Time-travelRephotography/Time-travel_Rephotography -csuhan/opendet2 -imkaushalpatel/YOLOv3 -PaddlePaddle/resnet_v2_34_imagenet -Borda90/Titanic_Esp -IPN/Demo -osanseviero/flask_test -hackathon-pln-es/demo_flask -Kaldra/PollutionClassifier -SorbonneUniversity/tone -BigSalmon/GPT2Mask -onnx/yolov4 -osanseviero/accuracy_metric -imkaushalpatel/GoogleNet -huggan/pix2pix-facades -Deep1994/t5-paraphrase -arkmartov/arkmartov -datasith/image-classification-cast-parts -yash161101/deepwords -lcipolina/Print_Gallery -smajumdar/nemo_conformer_rnnt_large -d0r1h/LegSum -pog/Depression-Detector -r2d2/decision-triptych -pierrefdz/semantle -wrapper228/arxiv_classifier -teach/README -badongtakla/ithaca -tskolm/YouTube_comments_generation -BigSalmon/BackTranslation2 -BrianL/CoE197-Fil-DialectTranslator -ThirdIringan/Speech_Equation_Solver -AleksBlacky/Arxiv_paper_classifier -tallwhitestck/asl-fingerspelling-recognition -kotstantinovskii/YSDA_arxiv_classification -danielHora/Object_Detection_for_Self-Checkout_Stores -godot-demo/godot-2d-threads -abidlabs/full-context-asr -almostagi/QTL -utec/SpaceKonnor-tts_transformer-es-css10 -utec/my-first-space -utec/Spacelmaj -CristianGonzalez281098/Cheto -Rodrigo21/space1 -unlisboa/pokemon-image-classifier -Saturdays/Cardiosight -coco-gelamay/missing-items -balamurugan/search-10k-filings -AlgoveraAI/medical-image-classification -tmabraham/horse2zebra_cyclegan -Ifan/instant-ngp -ronvolutional/http-server -RugNlpFlashcards/Speech_Language_Processing_Jurafsky_Martin -GastonMazzei/escher-inpaint-project -IPN/streamlit_demo -Techis/resume-screening-tool -osanseviero/llama-classifiers -Harveenchadha/Vakyansh-Tamil-TTS -BramVanroy/spacey_conll -Aymene/FakeNewsDetector -mustdo12/U-Net_Segmentation -IPN/FirstSpaceTEST_Gradio -IPN/demo-sdamian -IPN/helloooooo -IPN/demo_ -IPN/demo_2_omar -IPN/demoipn -IPN/DM_pb -mojians/E2E-QA-mining -anuragshas/en-hi-transliteration -hysts/projected_gan -AdityaMahimkar/ParaPhraser -PaddlePaddle/ghostnet_x1_3_imagenet -givkashi/seam-carving -hitomi-team/README -pyodide-demo/self-hosted -awacke1/PersistState -awacke1/PersistURL -PierreCugnet/airline-sentiment-analysis -AICopilot/Dropbox -Saturdays/Tomatelo_a_pecho -Saturdays/mamamIA -ITESM/streamlit_graphs -Saturdays/desertIAragon -sil-ai/aqua-comprehensibility -tomofi/GOCR -Wootang01/image_classifier_four -tsantos/Hierarchical-Classification-System-for-Breast-Cancer -swcrazyfan/Kingify-2Way -tensorflow/efficientnetv2-s -webis-huggingface-workshop/omar_demo -webis-huggingface-workshop/ferdi_demo -webis-huggingface-workshop/sebastian_sentiments_demo -webis-huggingface-workshop/guldeniz-first-space -yesdeepakmittal/fake-news-classifier -hysts/ibug-emotion_recognition -manmeetkaurbaxi/YouTube-Video-Summarizer -Hdiopalma/anime-face-detector -Fah/gradio-prediction-conversionrate -awacke1/Memory-Streamlit -Tlaloc/Aerial_Unet -mgfrantz/reading_practice -Guldeniz/aerial-to-map -yale-CPSC-577/musical-tone-123 -SIB/Smart_Resume -Vijish/PoPd-PoPArT -ales/wav2vec2-cv-be-lm -helliun/antetoki -SRVM-kandregula/Resume_Enhancement -hysts/TADNE-image-viewer -adimmer/semi-supervised-wrappers -panik/Facial-Expression -bioniclelee/BoatDetectionCW -birdortyedi/cifr-pytorch -docs-demos/hubert-large-superb-er -docs-demos/paraphrase-xlm-r-multilingual-v1 -beihai/Image-Compression-with-SVD -haryoaw/id-recigen -NeuML/txtsql -Epitech/LinguaExpressus -Chris1/real2sim -Epitech/IA_NLP -hylee/apdrawing -tomofi/NEologd -eetn/Hellenic_AI_Society -BramVanroy/opus-mt -paulbricman/velma -Saturdays/FER -choiyk0103/TrOCR_app -vebie91/spaces-image-classification-demo -aziz7751/lan2lan -bohmian/simple_streamlit_app -abidlabs/call-sentiment-blocks-2 -andersab/QuijoBERT -Epitech/AIoT -AdWeeb/SuMmeet -Qiwei97/Airbnb_tool -Eddevs/README -chiulori/bertopic-reviews -BIASLab/sars-cov-2-classification-fcgr -xiaogang/res2net -lounguyen/MangoDetectionApp -igrab666/polish_text_summarization -dtrejopizzo/webcam -Meena/table-question-answering-space -m-newhauser/political-tweets -martinlmedina/tf_hub_Fast_Style_Transfer_for_Arbitrary_Styles_v2 -Cyril666/my_abi -osanseviero/ray_serve -akhaliq/mGPT -Saturdays/Focus_on_driving -mjaramillo/SpiceIcaroTP -JbIPS/DogBreed -merve/data-leak -merve/hidden-bias -merve/measuring-fairness -IIITT/SumMeet -awacke1/CSV2ClassifyVisualization -Saturdays/retinal-disease -akhaliq/arcanegannewtheme -Sacpapa/Zoidberg -Cropinky/hana_hanak_houses -ds21/Q-TicTacToe -wgpubs/fastai_2022_session1_is_marvel_character -atharvat80/Wikipedia2Vec-NED -rishirajacharya/picspeaks-hindi -probing-vits/class-attention-map -yuhe6/final_project -probing-vits/class-saliency -Epitech/userbank -Epitech/IOT_temperature -beingpraveen/streamlit_text_to_sql -davidrd123/Art_Movement -OOlajide/nyc-crimes -Orcun2/ToxicCommentClassifier -hylee/arcanegan -radames/Speech-Recognition-Example -jph00/minima -jamesnzeex/resale_HDB_price_prediction_model -tcapelle/spacy_wandb -EdBianchi/Social_Toximeter -Aristo/trafficsign -Saturdays/Student_Experience -calvin/MuseGAN -dev-andres/Caracola-app -CVMX-jaca-tonos/Spanish-Audio-Transcription-to-Quechua-Translation -LunchWithaLens/whichraptor -NasirKhalid24/Dalle2-Diffusion-Prior -Gradio-Blocks/README -awacke1/GraphViz-Demo -vestacasino/README -zeeba/minima -gerardo/elon_or_not -LamaAl/arabic-empathetic -deydebasmita91/Twitter_Live -AkshayDev/Lazy-Film-Reviews -akhaliq/dalle-flow -Zakia/cat_or_dog_predictor -awacke1/VideoPlayer -Zakia/chest_x_ray_pneumonia_predictor -sijunhe/poet -thepurplingpoet/superman -akhaliq/CaptchaCracker -bananabot/ThisMollywoodMovieDoesNotExist.com -Matonice/gradio-insurance-policy-summarizer -skalyan91/font_classifier -cesar/autotexto -spencer/socm -ganesh3/superheroclassifier -ialhashim/Colorizer -IsaacK/streamlit-test -luvarona/Practica1 -azaninello/ailai -hlopez/Twitter-Positivity-Analyzer -musfiqdehan/bangla-pos-tagger -Zengyf-CVer/Gradio_YOLOv5_Det_v2_2 -aibc/object-detection-demo -tlkh/textdiff -awacke1/ParallelSummaryModel -Saturdays/WomanLife -Zengyf-CVer/Gradio_YOLOv5_Det_v3 -Giedrius/mood_detector -Slava917/pronunciation-trainer -ashishraics/FillTheBlanks -kandysh/NER_Tagger -patent/demo3 -xiaogang/image_emotion -Epitech/alzheimer -azizalto/us_patent_kaggle -Chujinze/Res2Net -biubiubiiu/EFDM -rushic24/Priyanka-Chopra-TTS -johnnyfivefingers/summarymachine -valurank/Headline_generator -vinni1484/text-summarizer -YSU/aspram-realtime -freddyaboulton/ts-lags -Epitech/MLOps -vinni1484/text-keywords -mikachou/stackoverflow -fangyuan/lfqa_discourse -WillieCubed/song-to-sheet -Aravindan/BreedClassification -evaluate-metric/roc_auc -evaluate-metric/pearsonr -evaluate-metric/competition_math -evaluate-metric/recall -evaluate-metric/coval -evaluate-metric/ter -evaluate-metric/indic_glue -evaluate-metric/glue -evaluate-comparison/mcnemar -priyankasharma5882/Breed_Classification -simulate-tests/RiggedSimple -simulate-tests/BoxTextured -michaelgira23/debiasing-lms -marksverdhei/saved-you-a-click -seduerr/ethical_data -pierreguillou/duplicate-an-existing-space -pourmand1376/PrePars -Sa-m/Brand-Logo-Classification -farukozderim/comparison-space2 -yeqingmei123/face-test -messiah2305/duplicate-space -LuciaCw/greet -kandysh/clause_segmentation -ironbar/aprender_a_leer -dpv/Stage1Recycling -GroNLP/divemt_explorer -HFUniversity2022/final-project-abubakar -HarryLee/TextTopicModeling -nagolinc/safetyWaifu -rajistics/News_Topic_Clustering -awacke1/StreamlitStatefulSingleton -zhang0209/ImageDownloader -Hamda/AraJARIR -deepparag/Aeona-Chatbot -fbadine/uk_ireland_accent_classification -chaninder/ds3-ml-model -Jerimee/HelloWorld -rajistics/biobert_ner_demo -sub44/reddit-video-downloader11 -awacke1/SaveAndReloadDataset -mynti/plainly -abdulmatinomotoso/Plant_leaf_disease_classificaton -bookbot/Grad-TTS-Weildan-Playground -awacke1/TimeSeries -fmegahed/tavr_project -Xhaheen/facebook_OPT_350m_Language_model -evaluate-metric/README -zswwsz/Dissertation_txt_to_img -daniel-dona/tfg-demo -keras-io/siamese-contrastive -sriramelango/CV_Social_Classification -seduerr/communicaite -skydust/textsum -awacke1/Text2SpeechSentimentSave -clementgyj/FNLP_D_HD -comodoro/Coqui-STT-transcription -aritheanalyst/legalsummarizer -doevent/ArcaneGAN -iankur/img2tex -joaomaia/football_probs -Cyril666/ContourNet-ABI -kandysh/clause_segmentation_benepar -HaHaBill/LandShapes-Antarctica -keras-io/addition-lstm -GiordanoB/sumarizacao-abstrativa-portugues -neeraj-aditi/AIVOT-AI -awacke1/NLPAutoAI -theAIguy/triplet_margin_loss -Ritvik19/SudokuNet -awacke1/Emoji-Short-Codes -pplonski/dashboard -isabel/testing-streamlit -sriramelango/Social_Classification_Public -awacke1/DigitalCity -awacke1/MLOpsStreamlit -Ani1712full/Estimacion_tasa_morosidad -isabel/testing-blocks -keras-io/structured-data-classification -keras-io/CutMix_Data_Augmentation_for_Image_Classification -Avator/gradio-hugging-face -Sebasur90/observatorio_noticias -awacke1/SpeechStoryReadAloud -chainyo/optimum-text-classification -Narrativa/semantic_news_search -chlab/interactive_kinematic_planet_detector -SoArizonaAI/README -keras-io/conv_autoencoder -Abdul09/bingo_demo -nbroad/voice-queries-clinical-trials -naver/PUMP -wlf/dall-e -awacke1/QiskitQuantumNeuralNet -Kieranm/britishmus_plate_material_classifier_space -keras-io/WGAN-GP -Heisenberg08/Text2SQL -abdabbas/abd -lvwerra/bary_score -SLU-CSCI4750/Demo8_RegressionGradientDecentCompare -kaggle/amex -rajistics/cars -bigscience-data/filter_values_distributions -goarnaiz/Proyecto -HGZeon/test_model_2 -paochoa/DeOldification -AlgoveraAI/web3-wallet-streamlit -keras-io/image_classification_using_conv_mixer -keras-io/Image_Classification_using_Consistency_Training -keras-io/english-speaker-accent-recognition-using-transfer-learning -HiImJavivi/Practica2 -davidmasip/glaucoma-gr -berkeozd/AppReviewClassifiers -njgroene/age-gender-profilepic -BFH/BKMotionsAI -SIVAPRASATH/tamil-translator -2-2/blockchain.ai -rhuang/RL -jamoncj/entregable3 -abdabbas/skincancer-iraq -krislynn/krislynn -mlnotes/borrador_constitucion_chile -Firefly777a/summarization-demo-v1 -keras-io/ProbabilisticBayesianNetwork -douwekiela/dadc -sugo/v6yu7bgn -khanguyen/voice-password-app -keras-io/cct -DemocracyStudio/generate_nft_content -awacke1/GradioBlocksChangeEvent -ValarMorghulis/BudgetAllocation -Gavnoed/Kaloed -MaksMaib/PetGradioStyleTransf -awacke1/Transformers-StoryWriting -Saturdays/HUMANDS -denisp1/Transformers-StoryWriting -keras-io/ctc_asr -jharrison27/VR-DEMO -denisp1/AR-VR-IOT-DEMO -jmcob/AR-VR-IOT-Demo -pmuvval1/ChemistryMoleculeModelerTest -jharrison27/moleculemodeler -jmcob/ChemistryModelerSMILES -jbitel/dalle -awacke1/ContextQuestionAnswerNLP -EuroPython2022/README -keras-io/adamatch-domain-adaption -TIMAX/Logic-Translator -mwaseemrandhawa/sentiment_analysis -RaulS/D-Pose -denisp1/GraphViz-Demo -denisp1/AI-Quantum -arshy/medicalspecialty -Heisenberg08/Ai_Portrait_Mode -jkim1238/predictive_analysis -mindwrapped/gpt2-lotr-fellowship -Devaholic/fruit-demo -dennis-fast/Talk2Elon -azaninello/gpt2-general-english -langfab/movie-plot-genre-predictor -Wootang01/sentiment_analyzer_1 -ouiame/text -phmota/disarter_model -shaheer/mysent -LayBraid/SpaceVector_v0 -shaheer/textgeneration -nicole-ocampo/digimap-mp -MB311/Wordle_Performance_Checker -tonne/pycaret -egesko/DCGAN -keras-io/VQ-VAE -UdayPrasad/fashion-mnist -awacke1/MultiRhymeLyricSmith -ARTeLab/DTM_Estimation_SRandD -shouzen/canada-goose-v4 -logasja/Fawkes -SoundreameR/craiyon-exploration -Sreenivas98/FashionMIST_Classification -oussama/LayoutLMv1 -seanbenhur/tamilatis -aico/TrOCR-digit -davidfischer/ea-classifier -Conner/IAPdemo -kkawamu1/huggingface_multi_inference_rank_eval -speechbrain/README -awacke1/NLPImageUnderstanding -gangviolence/giftmediscordnitro -Polo45/README -osanseviero/tips -blastd/LimoneSorrentin -ullasmrnva/LawBerta -Margaret/mazzuma-sentiment-engine -yzha/ctc_eval -SaulLu/bloom-generations-viewer -suds/blah -aplejandro/HeartDisease -dineshreddy/WALT -djsull/aha-summarisation -UdayPrasad/mnist_classification -yairVag/Image_Captioning -samroni/gpt2_demo_gradioUI -Liviox24/LoanEligibilityPrediction -codenamewei/speech-to-text -matteopilotto/emotion_in_tweets -chali12/skill_extraction -PaulHilders/IEAI_CLIPGroundingExplainability -kamalkraj/Mega-Dalle -awacke1/ChatBotPersonalities -bhvsh/stroke-prediction -Nomanalvi/PDF_Convertor -tcapelle/calculadora_impuestos -gestiodinamica/recon_caras -chrisjay/simple-mnist-classification -jmaller/rnn-amywinehouse -awacke1/MusicLyricsAndAlbums -EuroPython2022/Leaderboard -Cub/README -atomiclabs/text_generation -datien228/text-summarizer -gestiodinamica/gdmk_genbase -sanchanhart/Warehouse_Apparel_Detection -oussamamatar/yolo-mediapipe -EuroPython2022/example-hello -ceyda/kornia-augmentations-tester -shivambhosale/spacenet3-unet-1024-1024 -jmaller/rnn-leonard_cohen -NAACL2022/README -hirsuitedevil/demo -NAACL2022/Spaces-Leaderboard -awacke1/CSVSentiment -awacke1/Gradio-Blocks-Demo-2 -awacke1/HFSpaceStreamlitHeatmap -Geethanjali/YouTube_Transcript_Summarizer -Moran/Aviv_Moran_Summarization -hf-task-exploration/ExploreACMnaacl -rushi29/AIP_pdf -sourav11295/Model_Recommendation -UzNutq/README -mfumanelli/geometric_mean -awacke1/GradioTranslation -awacke1/GradioTextToSpeechOrImages -awacke1/GradioDoubleChatbotTasteTest -jorge-henao/historias-conflicto-col -keras-io/conv_Mixer -keras-io/token_learner -Msp/Funsd_Layoutlm_V3_Pretrained -lvwerra/license -lvwerra/license-static -rajeshradhakrishnan/malayalam-tamil -Sa-m/YOLO-V7-Custom-Model-Pot-Hole-Detection -osanseviero/live_europython -dalexanderch/SweetNet -rycont/Biblify -Zengyf-CVer/Streamlit_YOLOv5_Model2x -EuroPython2022/excitingModel -EuroPython2022/Paddy_Disease_Classification -awacke1/VisionImageClassifierGradio -greco/survey_analytics_spaces -PaulEdwards/StarWords -freddyaboulton/blocks_inputs -AINLPRoundTable/README -jasmeet1001/jasmeetmoviebox -cannlytics/README -Dusan/clickbaitonator -senfu/tiny_gaze -ysharma/testing_blocks_inference -simonschoe/Call2Vec -AlexWortega/t5_predict_activity -awacke1/GroupSimilarDataCluster -cosmicdream/Image_Variations -Giuliano/image_classification -big-kek/NeuroSkeptic -ManjariSingh/evalml_forecast -EuroPython2022/viciu -EuroPython2022/batangkali -EuroPython2022/cloudspace -EuroPython2022/machinetestspace -EuroPython2022/Warehouse_Apparel_Detection -platzi/platzi-curso-gradio-clasificacion-imagenes -mishtert/tracer -Jimmie/identify_this_insect -jonas/sdg-policy-tracing -Ifeanyi/classify-images -EuroPython2022/pyro-vision -eliolio/yelp-reviews -cstimson/SentenceSimilarityHeatmapAndClustering -cstimson/ImageToOCR -awacke1/ImageOCRMultilingual -z-uo/HTS-Audio-Transformer -Saurav21/Blog-Generation -politweet-sh/politweet -platzi/platzi-curso-gradio-tf-clasificacion-imagenes -platzi/platzi-curso-gradio-asr -Amrrs/hubble-jwst-compare -smjain/zeroshotclassifier -smjain/gpt2_text_gen -bulentsofttech/gradio_s1000_veri_toplama_modeli -ubermenchh/dog-breed-classifier -awacke1/AskMeAnythingSemanticSearch -awacke1/BioMedContextHighlighter -nyx-ai/stylegan2-flax-tpu -freddyaboulton/sentiment-classification-interpretation-tabs -Swth/Hi -ICML2022/distilgpt2-finetuned-wikitext103 -Chirag4579/prakalpa-image-comparator -evaluate-metric/poseval -awacke1/HFSpaceStreamlitHeatmapNLP -nkatraga/7.22.first.hfstreamlitHeatmap -Myrna/VideoSummary2 -sidsriv/VideoSummaryfromYoutubeVideo -santoshsindham/VideoSummary -nkatraga/7.22.VideoSummary2 -uparasha/ASRtoTexttoStorytoImagestoVideo -akashagarwal/ASRGenerateStory -uparasha/AnimationUsingLottie -awacke1/AnimationUsingLottie -niksyad/CarePlanQnAWithContext -awacke1/CarePlanQnAWithContext2 -awacke1/Speeech2Text2Story2Images2Video -rajatus231/Speeech2Text2Story2Images2Video -NiiCole/FireExtinguishers -awacke1/BiomedCaseContextHighlight -williambr/CarePlanSOTAQnA -awacke1/StreamlitHeatmapAndCluster -vnemala/StreamlitHeatmapAndCluster -williambr/VideoSummaryGenerator -MateusA/StoryGenerator -ocordes/GradioSpeechToTextToMedia -awacke1/GradioSpeech2Text2Story2Images2Video -mm2593/Gradiospeech2Text2Story2Video -awacke1/PhysicsRacingDemoWith3DARVR -sdande11/HFSpaceStreamlitHeatmapNLP -sdande11/CarePlanQnAWithContext2 -awacke1/GraphVis3 -widged/bloom_demo -Ishayy/space_1 -imbikramsaha/cat-breed-classifier -ceyda/fashion_classification -rkingery/dumb-language-model -MadhuV28/VideoSumamry -timothepearce/mnist-classification -dia2diab/hackme_space -satani/bird_classifier -Juancho/forest_fire_detector -imagescientist/zebrafishtest1 -astroweb/README -smjain/insecure_code_detector -smjain/unixshell_command_gen -aiEDUcurriculum/introtoAI-clubs-project -Jai12345/App -riteshsingh/flower -sebastianM/CarDetectionAndModernity -ganning/asl-gloss -manjuvallayil/te-reo -evaluate-measurement/label_distribution -madara-uchiha/MovieMakerAI -jmcob/StreamlitGraphViz -awacke1/StreamlitGraphViz -denisp1/Streamlit-GraphViz-Demo -espejelomar/dientes -awacke1/WebAssemblyStreamlitLite-stlite -poooja2012/ethio_hydro -Anuj-Panthri/imdb_review_sentiment -osanseviero/shiny -dblitzz21/food-spoonycal -ekosetiawan/flowers_classifier -Xhaheen/regex_by_bloom -ali-ghamdan/image-colors-corrector -mosses/constructMaker -ner4archives/ner4archives-NEL-vizualizer-app -keras-io/shiftvit -elinteerie/NigeriaFoodAI -Santarabantoosoo/Sentiments_topic_modeling_ITALIAN -omlab/vlchecklist_demo -RubenAMtz/pothole_detector -elena-k/OmdenaTriesteLongCovid -kwangjong/food-classifier-MobileNetV3 -srini047/text-based-sentiment-analyzer -manan/fruit-classifier -windmaple/lit -kvignesh17/YoutubeVideoSummarization -harishrb/Translate-To-Spanish -mikeee/convbot -rsatish1110/VideoSummaryGenerator -harishrb/TraveLingo -georeactor/code-probability-of-injection -Mostafa92/detecting_plant_leaf_diseases -mbarnig/lb-de-en-fr-pt-COQUI-STT -munichnlp/README -MadSid/Fast-L2 -AyameYODAYO/xijinpingx -osanseviero/gradio_auth -Aabdelhamidaz/animals -qmjnh/FLowerCLassification -mihyun/may1 -0x7194633/mbrat-ru-sum -hangjoni/food_classifier -deelight-del/minima -Jour/Translate -Yuqi/Gender_Classifier -esumitra/superheroes -awacke1/StreamlitHeatmapKMeansCluster -Cambino/dog-classifier-gradio -freddyaboulton/EDSR-freddy -suddu21/garbage-classification -Dinoking/Flower-Classification-v1 -Dinoking/Garbage-Classifier-V2 -SaffalPoosh/faceRecognition -Plashkar/test-gradio-sdk -versus666/uplift_lab -jaleesahmed/employee-experience -vcasadei/banana-defect-detection -djsull/aha-curse-class -SagarPatel/YouMatter -kitkeat/effective_argumentative_writing_prediction -Plashkar/diabetes-predict -usingh49/us1 -Dinoking/Garbage-Classifier-V3 -nakamura196/yolov5-ndl-layout -Hackathon2022/BigColumnDiabetes -jaleesahmed/correlation-and-visualization -jaleesahmed/data-description -jaleesahmed/model-development -madoss/gdiy -Aravindan/butterfly_classification -nivalk/dermAI -pycs/aircraft -disham993/anime_protagonist_classifier -ethanmb/monkeypox-model -Ali-Omrani/CCR -hugginglearners/llama_or_alpaca -vbzvibin/gavs-hackathon_v1 -Dinoking/Garbage-Classifier-V4 -dbmdz/detectron2-model-demo -irJERAD/tahiti-or-hawaii -apat27/pox-classifier -iannn/TheDiscussionChat -Xhaheen/Regex_by_OpenAI -rajistics/interpet_transformers -victorialslocum/reciparse_visualizer -Daniel-Saeedi/sent-debias -icon-it-tdtu/mt-vi-en-optimum -pouchedfox/SP -PatrickTyBrown/LoanDocumentClassifier -Xhaheen/tasweer -Daniel-Saeedi/auto-debias -Parthjain9925/DigitRecognizer -sofmi/MegaDetector_DLClive -Dinoking/Garbage-Classifier-V6 -jamesbradbury333/fastai-week-2 -nerusskyhigh/drawingstyle -sasa25/1 -chidojawbreaker/ct-i-rad -sandeepmajumdar/nlp-sorcery -dawood/Plot -instantnoodle/Fruits-classifier -rsandadi/BearDetector -idsedykh/codebleu2 -awacke1/Hackathon2022 -chuoguejiofor/CatBreedClassifier -KNDLR/trash-ai -smjain/smjainvoice -Dinoking/Guccio-AI-Designer -jspr/tweet-ab -LightAI/README -captchaboy/fastest-8kun-captchas-solver -feng2022/styleganhuman_copy -pinecone/gif-search -pinecone/yt-search -freddyaboulton/3.1.4.9-all-demos -Qilex/ColorpAI -offside/offsidespace -AlirezaSM/bear_classifier -Rekanice/hf_minimal_sushi -qile0317/Bacteria-Classification -BigSalmon/TestAnyGPTModel -TF2SA/template_generator -marioboy/neil-breen -metroidmen/face-restoration-Tencent -dentadelta123/GuardrailDetection -jonathanmg96/TFG-YOLOP -owaiskha9654/Yolo-v7 -aronvandepol/KGPT -Eitan177/mutation_profiler -LawalAfeez/science-lab -Shivam29rathore/shorter-finbert -AIZeroToHero/README -dquisi/StoryGenerator -michael-p/mi-vi-be -AIZeroToHero/02-Transformers-Sentence2Paragraph -jracca/00-learning-space -dmccreary/AaronsClass -dmccreary/Art-From-Text-And-Images -jracca/01-learning-space -jracca/02-learning-space -bdp-AI/03-ImageSearchSimilar -AIZeroToHero/05-RealtimeStreamlitASR -jracca/05-learning-space -jonswain/pka_classifier -freddyaboulton/blocks-js-methods -leo-step/imagenet-demo -sajjadking86/appbot -Paarth/ForgeT5 -vladisov/fn -Gorilla115/shakespeareify -teamtom/flower_classifier -Artificio/AdversarialArt -mtulow/geospatial_deep_learning_app -codesue/dystopedia -joaquinu/merluzo -chidojawbreaker/UTI -Supsies/CodingandMore -FredMagick/Stable-diffusion-Bias-test -kios/Natural_Disaster_Classification -awacke1/AI-Atari-Live-Streamlit -bookbot/Wikipedia-Scraper -Msp/docVQA_donut -MadhuV28/Image_Background_Sidebar_Lottie_Animation -chidojawbreaker/transformer-health -arngpt/Summarizer-Trax -mbarnig/translation-lb-en-with-3-models -nmenezes0/fast-ai-example -torfasonc/Accord_or_Civic -darragh/bloom_demo_long -awacke1/StreamlitClipboardInteraction -freddyaboulton/timeseries-forecasting-with-prophet -phenolicat/hobbitese_id -neek05/NLP-AMLO -sandeepmajumdar/Generate_Image_From_Text -johnson906/recipedia -Jack-Ahan/fruit-vegetable-classifier -pinecone/movie-recommender -sbroy10/01-NLP-Sentence2Paragraph -locust/01-NLP-Sentence2Paragraph -awacke1/2-NLP-Seq2SeqQAGenerator -sbroy10/02-NLP-Seq2SeqQAGenerator -locust/02-NLP-Seq2SeqQAGenerator -sbroy10/03-NLP-SOTA-MedEntity -AIZeroToHero/03-NLP-MLM-SOTA-MedEntity -AIZeroToHero/3-NLP-MLM-MaskedLanguageModel -locust/03-NLP-MLM-MaskedLanguageModel -locust/04-NLP-KE-WordCloud -sbroy10/05-NLP-CPVisGraph -rogman/Flamingo-Gradio-ImageDescribe -gngpostalsrvc/Hyderabad_India_AI_Soft_skills -Imran1/Flower-image-classification -Dana19/biden_or_clinton -MrSinan/LFW-MaskedRecogntion -AnnasBlackHat/Image-Downloader -SalmanHabeeb/Blatt -Winterflower/question-generator -ThankGod/image-classifier -Ali-C137/Motivation-Letter-Generator -BilalSardar/QuestionAndAnswer -Clatonh/moth_or_butterfly -ccaglieri/convnext_diabetic -EuroSciPy2022/classification -edthecoder/chicken_breeds -EdBianchi/ThemeParksAccidents_RDF-SPARQL -Rida/Semantic-Segmentation -archietram/Medical_Image_Classifier -torfasonc/indianfoodclassifier -VishnuTransformer/TrOCR_Handwritten -gradio/translation -EstebanDC/UCS_JG -1nferno/Single_Digit_Detection -ysharma/test_diffusion -yusufani/TrCLIP -selld/bag_classifier -yakubashsd/oim_images -Aadhithya/Binance-Crypto-Tracker -ysharma/testing_stablediff -abdabbas/breast_cancer -ryancahildebrandt/all_in_one_sentence_embeddings -hallochen/firstspace -gradio/sentiment_analysis -autonomous019/Story_Generator_v2 -power2/JoJoGan-powerhow2 -williambr/StreamlitMapPractice -mm2593/AIDrivenUI-Maps -thelou1s/yamnet -Sangamesh/Cat_Dog_Classifier -mmaguero/Auto-Complete_Semantic -wasay/FaceRecogTUKL -Rick93/image_to_story_naive -dumitrescustefan/romanian-text-generation -iSpr/ksic_ai_coding_census2015 -leaner9988/Myspace -awacke1/TrapFlamenco -qgrantq/Girl_gradio -mya-mya/SengaFiller -charlesnchr/VSR-SIM -mxs2019/nba-player-classifer -Shredder/CONBERT -marksverdhei/word_definition -zzzzzz/text2image -captchaboy/FAST-ABINet-OCR -zhoucr/ai-koni -SmartPy/chaii-qa-task -Funbi/Chat2 -toasty-tobi/movie-recommender-deployed -Will-Wade/AnimeOrDisney -DorisB/streamlit-app -TM9450/Income_prediction -Intae/deepfake -DiViorg/categories_error_analysis -gilmar/health_insurance_app -Ammar-alhaj-ali/LayoutLMv3-Invoice -baaastien/Spleeter_and_ASR -gradio/image_classification -BilalQ/Stable_Difussion -amirDev/crowd-counting-p2p -rahulmallah/first-app -LeahLv/image-captioning-v4 -Avatarize/ECON -jaimin/Paraphrase -thinh-huynh-re/webrtc -Deepak107/Bottle_images -Sacso/FlowerDi -Armandoliv/t5-summarize-app-scitldr -Taoheed-O/spam_detector_app -iaanimashaun/glaucomanet -KhrystynaKolba/lviv_temp -Ariharasudhan/Kenya_food_classification -gradio/leaderboard -isyslab/NeuroPred-PLM -shayantabasian/shayantip -sadafpy/Malaria-Infected-Cell-Predictor -NotFungibleIO/Conversational-CSV -ccolas/EmotionPlaylist -mxxtnn/Predict_the_cost_of_medical_bills -Filimize/English_To_French -mxxtnn/Predict_medical_expenses -Mayanand/emotion-recognition -laurabarreda/genre_prediction -beau-badilla/faker-clf -Riakzu/parkinson_detection -slone/myv-translation-2022-demo -MarioWasTaken/BackroomsIG -pksx01/Audio-MNIST -bzd4576/sovits-sin -GAITOR/MLMondayDemo-Week1 -BohdanPytaichuk/art-video-generation -ESG-TFM-UV/ESG_API_BATCH -haseena97/malaysian_dessert -LoveAsAConstruct/Stable_Diffusion -Dana19/ImageRecognition_FaceCount -captchaboy/sendmespecs -merve/gradio-analysis-dashboard-minimal -BABASA/README -Taoheed-O/Titanic -Aashiue/speech_to_text -PaddlePaddle/jieba_paddle -PaddlePaddle/transformer_zh-en -3bdo7ss/Neutron_Chatbot -ner4archives/NER4Archives-analytics -anthonygaltier/text_2_price__real_estate -professorbrat/melanoma_classification -gradio/outbreak_forecast -Prodramp/multitabbedinterface -nightcap79/nightspace -HarryLee/Key2Text -scikit-learn/tabular-playground -y0himba/SDWEBUI -jjjonathan14/model-assist-labeling -XPMaster/Covid19_ICU_prediction -pablo1n7/iberianGAN -simonduerr/smilesdrawer -awacke1/Git-GPG-Git-Actions-01-GraphViz -awacke1/GithubAction02 -Joabutt/waifugeneration -Jack000/glid-3-xl-stable-classifier -codebox/diffuse-flood -domenicrosati/scite-qa-demo -jvahala/dummy -sneedium/pixelplanetocr -sneedium/captcha_pixelplanet -gradio/blocks_outputs -gradio/hello_blocks -gradio/generate_tone -gradio/audio_debugger -gradio/blocks_joined -gradio/hello_world_3 -gradio/image_classifier_interface_load -gradio/calculator -gradio/blocks_essay_update -gradio/streaming_stt -gradio/hello_login -gradio/kitchen_sink -gradio/zip_files -gradio/interface_parallel_load -gradio/reversible_flow -gradio/video_identity -gradio/concurrency_with_queue -gradio/stream_frames -gradio/sepia_filter -gradio/stock_forecast -gradio/blocks_style -gradio/zip_to_json -gradio/reverse_audio -gradio/ner_pipeline -johngoad/stock_forecast -kornia/image-registration-with-kornia -annt/mrc_uit_squadv2 -gigant/slideshow_extraction -operance/revit-id-to-guid -XPMaster/KSA_Weather_Prediction -ForBo7/FloodDetector -stogaja/xpathfinder -marcderbauer/vice-headlines -cgunadi/CDSS_Demo -Zayn/Image_Captioning_Using_Vision_Transformer_and_GPT-2 -yetoneful/README -soyasis/how-to-generator -tancnle/recycling-ai -mbarnig/Mol_mer_e_chineesescht_Bild -sinian/nihao -tamirshlomi/pets -freddyaboulton/saymyname -binarycache/medical_imaging -rkrstacic/Chatbot-integration-built-on-processes -Chenyuwen/playground -MS19/TestSpaceFastAI -dansome/Document_Summarization -tru2610/ImageClassification -chcomet/cholec80-position-encoder -pustozerov/poc-handwriting-ocr -aaronstaclara/towards-financial-inclusion -jphwang/architectural_styles -Kok4444/meme_kok -williambr/SteamlitMapPractice2 -scite/README -Xhaheen/ASR_Whisper_OpenAI -theodotus/buffered-asr-uk -cupkake14/bean_vit_classifier -AIZ2H/03-Streamlit-Video-ASR-NLP -salaz055/leafclassification -AIZ2H/07-GraphViz-PyDeck-Map-AIUIUX-Demo -AIZ2H/08-Search-Streamlit-Session-State-QueryParameters -AIZ2H/Gradio-Multilingual-ImageToOCR -raees/Riot-Detector -suresh-subramanian/bean-classification -betterme/mestreamlit -Westwing/Seasonal_classifier -mun-ahmd/HairType -simulate-tests/unity-test -awacke1/3D-Models-GLB-Animation-Gradio -rehanuddin/01-3DModel-GradioDemo -cadige/01-3DModel-GradioDemo -leilaglewis/01-3dModel-GradioDemo -Jonni/01-3DModel_Gradio -texantech/01-3DModel-GradioDemo -awacke1/02-Gradio-Art-From-Text-And-Images -leilaglewis/02-Gradio-Art-From-Text-And-Images -Jonni/02-Gradio-ArtFromText -rbalacha/02-Gradio-Art-From-Text-And-Images -rehanuddin/02-GradioArt-From-Text-And-Images -cadige/02-Gradio-Art-From-Text-and-Images -rbalacha/03-Streamlit-Video -awacke1/03StreamlitVideoASRNLP -rehanuddin/03StreamlitVideoASRNLP -djgoettel/02-Gradio-Art-From-Text-And-Images -rajkumar1611/01-3DModel-GradioDemo -awacke1/04-Gradio-SOTA -rbalacha/04-Gradio-SOTA-Seq2Seq -rehanuddin/04-Gradio-SOTA -cadige/04-Gradio-SOTA -leilaglewis/04-Gradio-SOTA -Jonni/04-Gradio_SOTA -Jonni/05-QandA-from-textfile -rajkumar1611/02-Gradio-Art-From-Text-And-Images -manishjaiswal/11-Gradio-Text-Sequence-Few-Shot-Generative-NLP-Images-Demo -daffyshaci/bert-keyword-extraction -sneedium/dvatch_captcha_sneedium_old -mlkorra/YT_Captions_Generator -msc/artrash -diegoakel/kitchenorbedroom -rkp74/MCQ-Generation -cmudrc/cite-diversely -Vishwas1/BloomDemo2 -Tianze/play -Greencapabara/OpenAI-whisper-with-upload.no-time-limit -tadeyina/Bean_Leaves -aldrinjenson/harry-potter-character-classifier -alexbakr/aircraft-detection -Anonymous-123/ImageNet-Editing -GitHunter0/100_prisoners_problem_app -DarthVaderAI/Diffusion-Art -iqbalc/Speech-to-text-demo -sloppyjoe/doodoodetective -freddyaboulton/chicago-bike-share-dashboard -elexxuyafei/chart927 -nightfury/Stable_Diffusion -eswardivi/Bark_Texture_Images_Classification -qwebeck/echo-net-dynamic-segmentations -FahadAlam/Question-Generator -awacke1/ChatbotBlenderBotStreamlit -p208p2002/chinese-sentence-checking -santrox/phcspmedpredic -awacke1/PyGame2D -awacke1/AIArtReviewStreamlit -jie1/jie_test4 -awacke1/PerceiverEmotionClassifier -paragon-analytics/Employee-Turnover -moadams/rainbowRainClassificationAPP -nightfury/SD-Inpaint-Touch -tomaseo2022/Enlace-Youtube-a-Texto -bkhalaf/testapp -bryantmedical/oral_cancer -salashvijay/audiototxttosentiment -RachAmm/Wav2vec-vs-Whisper -hvtham/text_mining_21C11027 -gradio/NYC-Airbnb-Map -PKaushik/humandetect -AI-Zero-to-Hero/02-H5-AR-VR-IOT -AI-Zero-to-Hero/03-GR-AI-Text2ArtGenerator -AI-Zero-to-Hero/04-GR-Seq-2-Seq-QA-Auto-Gen -AI-Zero-to-Hero/07-SL-Chatbot-Blenderbot -AI-Zero-to-Hero/08-GR-Chatbot-Blenderbot -Damstra/safety-hazard-classifier -AI-Zero-to-Hero/10-GR-AI-Wikipedia-Search -tsaditya/GPT-Kalki -NimaKL/spamd -awacke1/CB-SL-Chatbot-Blenderbot -peekaboo/Chatbot_Streamlit -jharrison27/gradio-blenderbot -SriniJalasuthram/SJ-01-H5-Play-Canvas-Sim-Physics -venz/AW-01-H5-Play-Canvas-Sim-Physics -sparswan/AW-01-H5-Play-Canvas-Sim-Physics -SShaik/SS-01-H5-Play-Canvas-Sim-Physics -raghung/Play-Canvas-Sim -awacke1/AW-02-H5-AR-VR-IOT -SriniJalasuthram/SJ-02-H5-AR-VR-IOT -skaur20/AW-02-H5_AR-VR-IOT -SantoshKumar/SD-H5-AR-VR-IOT -dlenzen/AW-02-H5-AR-VR-IOT -sparswan/AW-02-H5-AR-VR-IOT -SShaik/SS-02-H5-AR-VR-IOT -starbotica/llamaoalpaca -awacke1/AW-03-GR-AI-Text2ArtGenerator -SantoshKumar/03-SD-GR-AI-Text2ArtGenerator -venz/AW-03-GR-AI-Text2ArtGenerator -dlenzen/AW-03-GR-AI-Text2ArtGenerator -SShaik/SS-03-GR-AI-Text2ArtGenerator -sparswan/SP-03-GR-AI-Text2ArtGenerator -vijv/AW-03-GR-AI-Text2ArtGenerator -awacke1/AW-04-GR-Seq-2-Seq-QA-Auto-Gen -sparswan/SP-04-GR-Seq-2-Seq-QA-Auto-Gen -vijv/VV-04-GR-Seq-2-Seq-QA-Auto-Gen -sparswan/SP-05-GR-NLP-Image2Text-Multilingual-OCR -SriniJalasuthram/SJ-05-GR-NLP-Image2Text-Multilingual-OCR -awacke1/AW-05-GR-NLP-Image2Text-Multilingual-OCR -purdue780/SS-05-GR-NLP-Image2Text-Multilingual-OCR -vijv/VV-05-GR-NLP-Image2Text-Multilingual-OCR -dlenzen/AW-05-GR-NLP-Image2Text-Multilingual-OCR -skura/sk-05-GR-NLP-Image2Text-Multilingual-OCR -SShaik/SS-05-GR-NLP-Image2Text-Multilingual-OCR -SriniJalasuthram/SJ-06-SL-AI-Image-Music-Video-UI-UX-URL -SShaik/SS-06-SL-AI-Image-Music-Video-UI-UX-URL -dlenzen/AW-06-SL-AI-Image-Music-Video-UI-UX-URL -awacke1/AW-06-SL-AI-Image-Music-Video-UI-UX-URL -sparswan/SP-06-SL-AI-Image-Music-Video-UI-UX-URL -vijv/VV-06-SL-AI-Image-Music-Video-UI-UX-URL -mchopra/VV-05-GR-NLP-Image2Text-Multilingual-OCR -gradio/queue-benchmark -richds/openai_whispercxd -krrishD/vasudevgupta_bigbird-roberta-natural-questions -krrishD/Helsinki-NLP_opus-mt-zh-en -krrishD/Helsinki-NLP_opus-mt-de-en -krrishD/google_pegasus-cnn_dailymail -esc-bench/ESC -awacke1/CardGame -Wootang01/stable_diffuser_1 -Tabaxi3K/FrankenFlic -vonewman/mon-application-de-traduction-de-text -Sharccc92/streamlit_in_web -Varadgundap/mov-rec-sys -gstdl/streamlit-startup-campus -mgama1/fresh_rotten_fruit -anubhavmaity/minima -andresgtn/sidewalk-semantic-segmentation -fedahumada/speech-to-text -firatozdemir/OAGen_Linear -felenitaribeiro/WhatArtStyleIsThis -shriarul5273/Kenyan_Food_Classification_Gradio -Tanaanan/ATK_OCR_Classification_FastAI -FahadAlam/Speaker-Diarization -Chatop/Lab10 -yiw/text -binhnase04854/Invoice-VQA -nichaphat/text_generation -Kelas/translation -jeffhaines/Ethical_Judgment_Generator -azizbarank/Turkish-Sentiment-Analysis -dfm42/orangeloaf -com48com/corndog -TheFriendlyNPC/French_Translation_Audio -Cam-Brazy/BearTest -tarun52/sentiment -awacke1/MindfulStoryMemoryMaker -hexenbiest/OceanApp -krisnadwipaj/interactive-dashboard -awacke1/NLPStoryWriterWithMemory -freddyaboulton/xgboost-income-prediction-with-explainability -aswinkvj/image_captioning -nickmuchi/FaceId-Corise-Project -Dana19/animal_classifier -andresgtn/face-id -micole66/mpk2 -ElAnon/emsai -anubhavmaity/bike-classification -mehzhats/dogbreedidentifier -Aomsin/Lab10_630510654 -brendenc/Keras-Reshape-Layers -cymic/Waifu_Diffusion_Webui -ElAnon/6btest -amydeng2000/hotpots -datasciencedojo/Chatbot -datasciencedojo/Hand-Keypoint-Detection-Realtime -datasciencedojo/Handpose -ElAnon/nsumr -ZiLaiJuan/GRADIO -zoheb/segformer_demo -nexhi1/Homework4_Fashion_MNIST_dataset -aaronbi/hw04 -Hexii/FoodVision -arkiitkgp/stablediff-demo -Gaurav261/medical_image_classification -albertvillanova/datasets-report -AISuperheroes/README -masoodkhanpatel/food21 -sneedium/endchan_captcha_solver -MEKHANE/3D_Ken_Burns -sourav11295/Movie_Recommendation -nikesh66/gramamrly -datalayer/README -ThankGod/face-id -mdnestor/YouTube-to-MT3 -Sanjar/airi_text_classification -SpindoxLabs/companies_NER -Sanjar/kun_uz_test -saas18/minidellayeni -alexeikud/identidog -datasciencedojo/Face-Mesh -awacke1/ExplainableAIForGovernance -datasciencedojo/Finger-Counting-Right-Hand -pyimagesearch/nmt-transformer -jie1/succ1 -Abuzariii/Text-Generation-with-GPT-2 -Funbi/Textgen -masdar/MedImage_Processing -datasciencedojo/AmericanSignLanguage-Detection -lexlms/README -rbarman/Openvino_Text_Detection -Wootang01/text_generator_three -deesea/safe_or_not -chadpanda/PEPE-Semantics -SalML/3dMoleculeViz -Ivanrs/harris-corner-detector -rafayqayyum/IdentifyDogBreed -ddiddi/bhasha.dev -anzorq/zedzek -ddiddi/LibreTranslateEN -jeffhaines/rice-disease-identifier -ReneGuo/cat_or_dog -ShkShahid/Auto-encoder_For_Image_Reconstruction -Ynot-ML/bird_recogniser -awacke1/CSVDatasetAnalyzer -ThankGod/movie-poster-diffusion -ishaal007/gadgets_classifier -furiosa-ai/ocr -taishi-i/nagisa_bert-fill_mask -nightfury/SD_Text-2-Image -ruiite/car_parts_detection -awacke1/AIZTH-CSVDataAnalyzer -Miya1337/NovelAI -Joom/Xtramrks -XGBooster/WhisperingDiffusion -AndrewRWilliams/video-whisper -javiermontesinos/whisper -Arnaudding001/FrenchTranslationAI -Anustup/NS_AI_LABS -segadeds/simpsons -DarrenK196/catvsdog -uragankatrrin/MHN-React -anisub/movie-poster-generator -andresgtn/find-the-next-james-bond -suresh-subramanian/crowdsourced-movieposter-demo -krrishD/stacktrace-QA -JavierIA/gccopen -Epitech/hand-sign-detection -paj/dubharv -zoheb/yolos_demo -Cvandi/remake -nloc2578/QAG_Pegasus -shweta44/IndianFoodClassification -sylphinford/imgxnr -archietram/Predict_Age_and_BMI_from_Images -johnslegers/bilingual_stable_diffusion -zbellay/job-automation -Rahmat/Phishing-Detect -mboth/klassifizierungDatenpunkte -EdwardHiscoke/piggie_or_potatoe -Epitech/UpscaleAI -Kavindu99/movie-poster -Ellight/Steady-state-heat-conduction-GANs-Vision-Transformer -archietram/Multiple_Object_Detector_PASCAL_2007 -maisarah1109/stock_prediction -Komeng/Stock_Prediction -GrantC/learning_goals_bloom -oscars47/Thinking_Parrot_Reading_Club -micole66/weird_normal -lubin1997/removebackground -masjc/agc -craigchen/alime-qa-a2q-generator -Maharani/stock_prediction -awacke1/RealTimeLiveSentimentAnalyzer -awacke1/RealTimeLiveSentimentGradio -awacke1/SNOMED-LOINC-eCQM -Epitech/Money-Recognition -ltomczak1/lungcancer_subclassifier -wesleygalvao/image_filtering -aziz28/hash_app -nikoirsyad44/hash-app -Sasidhar/information-extraction-demo -AISuperheroes/01ST-CSV-Dataset-Analyzer -AISuperheroes/02GR-ASR-Memory -AISuperheroes/03GR-Chatbot-Memory -AISuperheroes/05GR-Image-To-Multilingual-OCR -AI-Dashboards/Graph.NLP.Sentence.Similarity.Heatmap.KMeansCluster -AISuperheroes/07GR-NLP-Seq2Seq-AutoQA -AISuperheroes/08GR-KitchenSink-AIUIUX -AISuperheroes/10SL-RealTimeDSDashboard-Live-AIUIUX -Sunshine123/hezhendejiqiren -wiraindrak/summary-of-summarizer -Mojobones/speech-seperator-fixed -yms9654/translate -a5656789/ganqx -NirmalKumarC/CSV_Dataset_Analyzer_Copied -cadige/01ST-CSV-Dataset-Analyzer -cugiahuy/CB-GR-Chatbot-Blenderbot-AW03 -awacke1/03-AW-ChatbotBlenderbot -cadige/03GR-Chatbot-Memory -LandonBurlingham/04GR-StoryGen-Memory -awacke1/04-AW-StorywriterwMem -LandonBurlingham/05AW-OCR-Multilingual -Sudhansu/05GR-Image-To-Multilingual-OCR -avatar2k/image-ocr-ex5-multi-lingual -Sudhansu/07GR-NLP-Seq2Seq-AutoQA -LandonBurlingham/07-Seq2Seq -awacke1/08-KitchenSink -awacke1/09-AI-ImageMusicVideo -ahmedriad1/vehicle-identifier -xyha/sd -awacke1/WikipediaProfilerTestforDatasets -tomaseo2022/Traductor-Voz-de-Video -Ivanrs/image-matching-sift-orb -vs4vijay/playground -Akmyradov/chatbot_testing -kargaranamir/Hengam -guney/photo-with-code -michuS/overwatchClassificator -danupurnomo/fifa-2022-rating-prediction -qwe3107231/Real-CUGAN -shahp7575/what_coffee_machine -harmdevries/transformer_inference -awacke1/PrivateRealTimeDashboard -tdaslex/README -maraoz/trail-camera -iakarshu/lilt -shionhonda/sushi-diffusion -select-case/Can_You_Hug_the_Bear -cmudrc/wecnet -Yukki-Yui/White-box-Cartoonization -Norod78/PumpkinHeads -chansung/segformer-training-pipeline -SWHL/PaperEdgeDemo -marcusphantom/01-3DmodelDemo -topdeck-embeds/README -yfzhoucs/TinyLanguageRobots -salsasteve/catdog -AzizR/FaceRecognitionGradio -aziz28/fernet-app -aziz28/rsa-app -Kay2048/IKay -xiaye/Real-CUGAN -xiaoyi233/xiaoyi -pplonski/NLP-SpaCy-Mercury -yangtommy6/Computer_Vision_Project -HCMUT-GraduateThesis-HNTThinh/rgbdsod-multimae-demo -AyakuraMei/Real-CUGAN -dingjian/luckpainting -Vishwas1/GPTStoryWriter -leslyarun/grammar_correction -gradio/blocks_flashcards_main -gradio/main_note_main -gradio/model3d_component_main -gradio/chatbot_component_main -gradio/hello_login_main -gradio/pictionary_main -gradio/leaderboard_main -gradio/sentence_builder_main -gradio/musical_instrument_identification_main -gradio/video_identity_main -gradio/neon-tts-plugin-coqui_main -Solis/Solis -android16/facial-recognition -GV05/text-emotion-detector -Danielsun888/pocSearch -easyh/NerDH_Visualisierer -epsilonator/euclidean_distance -syedislamuddin/base_editors -AkashKhamkar/QnA-generator -uRmario/arin -unb-lamfo-nlp-mcti/README -jknero/ppggpt -jknero/rembackkk -Avkash/Satellite_Segmentation_Prediction -uranus0516/uranus -tumuyan/wavlm-speaker-verification -Deepak107/NSFW-Detection -alaka/tinder-data-explorer -leslyarun/fbeta_score -thliang01/Dogs-V-Cats-Classifier -rajesh1729/mercury-jupyternotebooks -matteopilotto/foodvision_mini -giulio98/codebleu -konol/konmol -AkiKagura/Marco-Generation -PICOF/YusamiAlchemy -FathomNet/UWROV_Deepsea_Detector -DimaKoshman/MovieRecommender -0xcyborg/minter_latest -gradio/sine_curve -mirodil/bird-classifier-with-resnet18 -on1onmangoes/mango1 -ysharma/text_to_joke -billsar1912/stock-prediction -AkiKagura/Marco-Generation-Img2img -Egrt/GCycleGAN -huggingface-projects/README -Omdena-Milan/milan-chapter-agrifoods -bharathraj-v/audio-content-analysis -MarcusAGray/demo -ikram9820/sd_dreambooth-20im -Ideon/Samay -DataNerd2021/song_recommendation_app -elonmuskceo/shiny-cpu-info -consciousAI/question_answering -Dana19/outfit_color_guide -brooksjordan/pet-classifier-tutorial-fastai -humblepenguin/mental-health-chatbot -VishalF5/Text_Similarity -terrierteam/retrieve -terrierteam/monot5 -consciousAI/question_generation -Ishaan1510/deep_learn -freddyaboulton/inference-endpoint-dashboard -superdatas/LICENSE -lakshmi324/complaintBox -gradio/dashboard_main -ronvolutional/sk-node -Swan608/Spaceair -zebahgr/Credit__app -planet10/semantic-search -Msninmx/shamzam -Rongjiehuang/GenerSpeech -jonathang/dob_breed -weiren119/AudiogramDigitization -gradio/multiple-api-name-test -SLAYEROFALL3050/AudioGenerator -niallguerin/iris -joheras/glove-relations -lakshmi324/BankOcr -Kr1n3/Fashion-Items-Classification -Catmeow/Count_objects_in_picture -einanao/cobra -AIZero2Hero4Health/1-ASRLiveSpeechRecognition-GR -AIZero2Hero4Health/2-BiomedEntityRecognition-GR -AIZero2Hero4Health/3-ChatbotBlenderbot-GR -AIZero2Hero4Health/4-ImageSimilaritySearch-SL -AIZero2Hero4Health/5-ImageToLineDrawing-GR -AIZero2Hero4Health/8-NLPSimilarityHeatmapCluster-SL -AIZero2Hero4Health/9-Seq2SeqQAGenerator-GR -AIZero2Hero4Health/7-ClinicalTerminologyUIUX-GR -AIZero2Hero4Health/5-QuantumStreamlitAIDashboard-SL -Kunal7/squats-analysis -Brij1808/Blog_Generator -FarziBuilder/Last -jamesjohnson763/ASRLiveSpeechRecognition-GR -jamessteele/ChatbotBlenderbot-GR -apratap5/Z-3-ChatbotBlenderbot-GR -vslasor/VLS3-ChatbotBlenderbot-GR -ashishgargcse/ClinicalTerminologyUIUX-GR -Robo2000/ClinicalTerminologyUIUX-GR -jamesjohnson763/ClinicalTerminologyUIUX-GR -apratap5/Abhay-ASRLiveSpeechRecognition-ZR -FarziBuilder/WORK -apratap5/Abhay-2-BiomedEntityRecognition-GR -apratap5/Abhay-3-ChatbotBlenderbot-GR -vslasor/VLS7-ClinicalTerminologyUIUX-GR -vslasor/VLS10-VideoAudioSummarizer-GR -vslasor/VLS1-ASRLiveSpeechRecognition-GR -rexwang8/qilin -Soumen/image_to_text -johnslegers/ImageProcessService -simonwalo/Histwords-Webapp -divano/test -Ayemos/highlight_text_based_on_surprisals -rondel/summarizer_app_test -abidlabs/stable-diffusion-v1-5 -KayO/cats_vs_dogs -motionsh/BioMAT -jonathang/dog_breed_v2 -Classly/README -Joabutt/test -eradhea/chat_voice_spanish -Hexii/Cat-Breed-Classifier -Alexxggs/ggvpnewen -mgonnzz/retinoblastoma-classification-app -fhatje/glomseg -pankajsthr/test-stable -DiegoLigtenberg/realtimespeech -ishaal007/CarDamageDetection -hizkifw/clipbooru -silvesterjk/Talking_Yak_STT -ML-Demo-Challenge/test -Tipbs/wikipedia_summary -Frorozcol/mariposas -camilacorreamelo/medicalDetection -JayKen/propertySearch -paragon-analytics/ResText -dejinlee/art -haung/clear -victor/spaces-collection -wangyanbing1989/text2image -zestyoreo/vtryon -HarshulNanda/HARM_ML_web_app -j10sanders/rubber-duck -HarshulNanda/HARM_ML -renatotn7/EspacoTeste -evaluate-metric/mase -mabusdogma/facerecognition -renatotn7/teste2 -elplaguister/Yuuka_TTS -awinml/dl-optimizers -leftbyte/sweetOrSavory -Ivanrs/test -evansdianga/malaria -silvesterjk/stt-sematic-measure -dammasimbung/Cardiovascular-Detecting-App -mattclifford1/IQM-VIS -shasaurabh/bird_forest -andrewburns/flat-icons-v1 -Svis/3d_image_generator -HarshulNanda/HARM_ML_App_ludwig -kabita-choudhary/summary -Ngadou/NLP -Ngadou/Social_Engineering_Detection -Jack003/PixelDayAvatoon -nurrahmawati3/deployment-hck2 -fadhilsadeli/deploy-hck2 -EMS-TU-Ilmenau/deepest-demo -abidlabs/en2fr -shravankumar147/cat_or_dog -arnavkartikeya/SCRIPture-final -shravankumar147/IsCat -awacke1/ClinicalTerminologyAISearch -CM-15/NLP-demo -anonymousauthorsanonymous/spurious -clement13430/lab1_iris -taniaa/visual -Armaliltril/qbee -STEM-academie/Kennismaking_AI_Foto_Herkennen -Plaban81/English_To_hindi_Language_Translator -EvanMarie/cats_n_dogs -EvanMarie/faces_three -twoeyedraven/COVID-Fake-News-Detection -ianpan/diabetic-retinopathy -guostonline/FDV-dashboard -EvanMarie/hot_or_not -sangamsingh21/EDA_usaccidents -tumuyan/speaker-verification -yvonnekr/parkingdetector -DrGabrielLopez/fractal-generator -ayaanzaveri/detr -csaguiar/stable-diffusion-pt -Daffa/image-classification -andr290606/HD-test-run -micole66/ugly-or-sexy -sanderland/recipe-gen -saisriteja/signlangauge -Omar7Hany/Conv_Kickstart -windowcleaningtoronto/README -Awesimo/jojogan -zxw/clueai_demo -AlexZou/SCUTAUTO210b -lvwerra/in-the-stack -Tartan-Ishan/Expression_Classifier -langdonholmes/piilo -freddyaboulton/fastapi-request -Laughify/Among_Us_Logic_AI_Generator -forklift-app/forklift-images -EstebanDC/EP_settlement -Pranjal2041/SemSup-XC -lgabrielb/fruit_classifier -grofte/zero-shot-labse -Testys/diabetes-app -abidlabs/Voice-Cloning -arbml/whisper-tiny-ar -leoberniga/Write-Stories-Using-Bloom -Datasculptor/sd-prism -senger/AI-Text-Generator -GIanlucaRub/Titanic -AlexZou/Deploy_Restoration -Xixeo/Text-to-Music -AIZerotoHero-Health4All/01-Speech2Text2Speech -AIZerotoHero-Health4All/03-BiomedNER-1117-Gradio -Robo2000/ClinicalTerminologyAISearch-GR -alecmueller/12-ChatBotBlenderbot-GR -kael558/Interpolation -multimodalart/xformers-here-we-go-again -issam9/yt-transcribe-and-search -weijiang2009/AlgmonTTSService -akhaliq/space-that-creates-model-demo-space -Cyntexa/README -incolor/facial_expression_classifier -cdgranadillo/summaries_mT5_multilingual -bigslime/stablediffusion-infinity -RamAnanth1/whisper_biomed_ner -stratussox/yolov5_inference -Xiaohan/NLP -erbanku/lama -vincent1bt/Line_Art_Colorization -motyar/openjourney -Jh137/Jh137-ai-painting -AnnonSubmission/xai-cl -pranked03/amazon-product-comparer -akhaliq/Nitro-Diffusion2 -rscolati/titanic -vaibhavsharda/semantic_clustering -theMonkeyGuy/monkeyclassifier -Copy233/copy -lohitkavuru14/anpr-yolov7 -santoshtyss/QuickAd -taichi/pizza-net -Rohith33/BearClassifiyer -AdithyaSNair/Diabetes_analysis -aslasdlkj/Podfusion -AhmedTambal/malaria -yongjae/whisper-webui -newsteam/stable-diffusion-img2img -vladocar/openjourney -Rutakate21/anything-v3.0 -hxu296/Texify-Youtube -chinmaysharma1020/malware_classification -karay/diar_speech -bumsika/ai-bros-diffusion -victor/test-docker -datasciencemmw/README -glyszt/vt -TheHouseOfAI/ActionRecognition -Froleptan/stablediffusion-infinity -gabortoth74/openjourney -Senpaisora6/dreambooth-training -AJRFan/dreambooth-training -bsenst/keras-image-classifier -Eightone3D/anything-v3.0 -alaaawad/image-to-text-app -kyotoyx/medical-diagnosis -breadlicker45/galactica-1.3b-contrastive-sampling -TheThanos/anything-v3.0_krn -EricA1/openjourney -USERNAME0/abcdefghi -Wootang01/text_generator_four -Wootang01/text_generator_five -Wootang01/text_generator_six -AlexKozachuk/anything-v3.0 -vntonie/anything-v3.0 -oronird/sign_translate -huai/chinese_stable_diffusion -PeterQUB/Berries -catontheturntable/Ghibli-Diffusion -akhaliq/dreambooth-training -breadlicker45/TextGen -clem/dreambooth-training_v2 -bino-ocle/audio-intelligence-dash -datasciencemmw/ContextXLA-demo -jimr1603/galactica-base-api -datasciencemmw/ContextXLA-beta-demo -elijahcilfone/dreambooth-training -HimeFuji/How_to_laugh -Xhaheen/Face-Real-ESRGAN -abdullah/Voice-Cloning -Peter1/AnimeGANv3 -haya44433/anything-v3.0 -Aleqsd/openjourney -evoss/NLP_text_analyzer -darkCat/Anime-image-classification -marktrovinger/whisper-translate -dbredvick/whisper-webui -ecuador123456789/ejemplo1 -eatsleepeat/FastHelloWorld -CassBunny/anything-v3.0 -Aphrodite/AIChatBot-SL-Chatbot-Blenderbot -ss123wq/demucs -anzahabi/MuhammadGarinAnzahabi_HCK002 -vonbarnekowa/stable-diffusion -OmegaYuti/anything-v3.0 -intelliarts/Car_damage_detection -segestic/paraphraseArticle -empy-ai/Token-classification -Xhaheen/stable-diffusion-21 -Mayanand/Image-Captioning -omidreza/speechtopictogram -DonnyChuang/test_generator -ELam/text_generator -jaklin/text_generator -whale-shark/text_generateor -dipperpines/text_generator -MarcyWu/text_generator -P1ne4ppl/Text_generator -Matthew1917/text_generator -EllaTsoi/text_generator -Swying/text_generator -Tommyyyyyy-20/text_generator -billyyyyy/text_generator -Andy0409/text_generator -Katyyy/text_generator -blossom618/text_generator -12Venusssss/text_generator -HANOGHTIC/text_generator -juntsu/Text_generator1 -Kavinloll/text_generator -guohuiyuan/Real-CUGAN -fkunn1326/Image-search-using-CLIP -zlpnvrtnk/dvatch_captcha_sneedium_fork2 -NickyGenN1/ImageClassification -aliabd/non-interactive-dataframe -gabrielgmendonca/chilton -cmudrc/truss-data-explorer -eskayML/Salty-Conversational-Bot -eskayML/English-to-French-Translation -krithiksai/weather_based_on_tree_photos -ConvLab/README -joushe/moe-tts -sklearn-docs/hierarchical-clustering-linkage -tiedaar/economics_summary_grader -tdros/zoafind -cmudrc/3d-printed-or-not -VivianShi/Coconet-Pytorch -yellowdolphin/happywhale-demo -Kartik2192/Abcd -SarmadBashir/REFSQ2023_ReqORNot_demo_app -Superintelligence1130/Recursive_self-improvement_system -raphael0202/logo-clip-demo -cuiltheory/stable-diffusion-2-base -MarcCote/TextWorldExpress -YBiryukov/AncientEgyptianHieroglyphsRecognition -DRAGSclub/README -mowang/mowang -carisackc/Clinical -Foremost/NER -jatinshah/hn-search -lvkaokao/dreambooth-training -Abeer123/Pokemon_Digimon -ecody726/stable-diffusion -dovanquyet/PsyPlus -os1187/contract-review -os1187/code-explainer -cmudrc/kaboom -os1187/news-summarizer -MirageML/lowpoly-office -MirageML/fantasy-sword -MirageML/fantasy-scene -MirageML/lowpoly-cyberpunk -ericjuliantooo/paraphrase -lakshmi324/Vehicle_Damage_Detector -moro23/sentiment-anlysis-app -jonaskaszian/boardgame-recognizer -olyolik/book_genre -eskayML/AUTOMATIC_SPEECH_RECOGNITION -ysharma/GPT-JT-copy -Arcader7171/positive -vialibre/edia_lmodels_en -UMich-siads699-fa22-spotamood/spotamood -lavanyakumaran31/resume_parser_app -santhosh97/gretel-image-generation-demo -nightfury/CLIP_Interrogator_for_SD2_Img2Prompt -awacke1/LionImageSearch -Chrysoula/voice_to_text_swedish -belgrano91/SentenceRecognizer -YeaHi/woman-diffusion -abidlabs/middle-ages-islamic-art -Frorozcol/dreambooth-training -wzsxb233/ALTESOL_Language-Technology-ResearchGroup_Faceia-Peter-Shamini -bryanmildort/stockpricepredict -cmudrc/wecnet-api -sbavery/pseudometer -MarcNg/fastspeech2-vi-infore -Hydrangea/myProject -Mikey211/Project -nurrahmawati3/churn -muhammadjulz/frontend-telco-churn -garasense/P2ML1_Telco_Customer_Churn -vovahimself/jukwi-vqvae -mandar100/chatbot_godel_large -Campfireman/whisper_lab2 -akhaliq/tpkify-v1 -sasha/Draw-Me-An-Insect -dhruvshettty/dutch-whisperer -gradio/altair_plot_main -ieftimov/confusingflags -ToniDan/DanToniGPT2FormalInformal -arjunpatel/best-selling-video-games -LeeroyVonJenkins/cat-dog-classifier -songallery/my -TYH71/gradio-ml-skeleton -AIGuardians/SummarizeWikipediaDocument -lakshmi324/Fake_airpods_Detector -Nathanotal/GuessTheTranscription -osanseviero/whisper_demo_builder -danielbellon/ml-techniques-project -osanseviero/whisper-medium -svjack/Entity-Property-Extractor-zh -MLT-2022/Project -svjack/Translate-Chinese-to-English -Inthv/NER -BLACKHOST/Banner -supermy/speech-to-image -arnavkundalia/AppleScabDetection -wldmr/punct-tube-gr -carterw/evolutionary-playlist-builder -akhaliq/paint-by-example -Envyyyy/vehicle_detection -Anilegna/Colour-Personallity -starship006/mini_shakespeare -A666sxr/Genshin_TTS -brcprado/AutoML_MODEL_TRAINING -Abdulkader/HumanMotionsDetector -BrunoHempel775/Byzu -ML701G7/taim-gan -Mohammednabil/Control_The_world -whispy/Whisper-Ita-V2 -ieftimov/pasta-everywhere -delmaksym/Huggy -oscars47/thinking_parrot_reading_club_redux -os1187/gpt2-chatbot -brcprado/removeBG -ritwikbiswas/incoder-complete -Yuras/CorpusBy -etweedy/pet_breeds -MiloSobral/PortiloopDemo -jojoanne/cuisinerecommendation -LAKSJAKLCNDWNVWHEFKJH/asdfghjkl -datainsight1/Medical_Prescriptions -WRH/wrhwang_foodvision_mini -pip64/zaglyt-api -segestic/CovidPredictiongr -osanseviero/ChatGPT_MANY_LANGS -NeoonN/Video_whisper -privatewins/nitrosocke-redshift-diffusion -hamza50/rhymethyme -Wenjing2/ChatGPT_HF -sasaro/webui -timmostone/stabilityai-stable-diffusion-2 -parkermini/general -YeOldHermit/Linaqruf-anything-v3.0 -Hxxx/finding_friends -Patt/demo_gradio -Shivraj8615/Huggy -Ruilmon/hakurei-waifu-diffusion -TornikeO/dreambooth-training -Markfm/webui2 -RunningYou/mediapipe_inpainting -Aleistair/anything5 -VaneM/ChatBot-Text-to-Speach-es -Akseluhr/whisper-sv-SE-auhr -AIZerotoHero-Health4All/02-ClinicalTerminology -AIZerotoHero-Health4All/03-Datasets -VaneM/text-to-image-es -muhtasham/whisper-demo-tj -Yilin98/Whisper-Small-Swedish -harry18456/TestChatGPT -shihabulislamarnob/AI-Image-Enlarger -Shokunin/runwayml-stable-diffusion-v1-5 -Txandim/runwayml-stable-diffusion-v1-5 -YuhangDeng123/Whisper-online -spark-ds549/fal2022-videoanalysis-v2 -Txandim/stabilityai-stable-diffusion-2-1-base -tayislost/lambdalabs-sd-image-variations-diffusers -Txandim/mrm8488-bloom-560m-finetuned-sd-prompts -zhukovsky/JorisCos-DCCRNet_Libri1Mix_enhsingle_16k -hs1l/Date -Enderfga/mtCNN_sysu -Patt/demo_hf -Datasculptor/stabilityai-stable-diffusion-2-1 -yeonn/text_generator -JennyS/text_generator -wootang03/text_generator -Badaleeloveashley/badaleeloveashley -Kellyasrfuhioj/stydbdcg -SasunNN/SASN -4H17Joycelyn/text_generater -xfbhsdfndjndghz/Ultraman -MoonMoonMoonMoon/text_generator -nicole1214/text_generator -Aaaad/Dddde -4F22/text_generator -4f20/text_generator -bunnyg20081061/world2 -seecuecue/text_generator -CosmicSage/Linaqruf-anything-v3.0 -jlondonobo/whisper-pt-demo -pierreguillou/whisper-demo-portuguese -rizam/rakeeb_text-classification -CosmicSage/Linaqruf-anything-v3.0pruned -humeur/Swedish-Whisper-from-Youtube -hedronstone/whisper-large-v2-demo-sw -rizam/literature-research-tool -dawggydawg/stabilityai-stable-diffusion-2-1-rpg -amir0900/s -almino/WhisperYoutube -TornikeO/dreambooth -Txandim/nitrosocke-Arcane-Diffusion -rishikesh/twitterEngagementPredictor -zeynepgulhan/whisper-medium-cv-tr-demo -geninhu/whisper-vietnamese -etweedy/Find_objects -nbiish/ghostDance -Anish13/fruit -DeividasM/whisper-medium-lt -lingdufreedom/IDEA-CCNL-Taiyi-Stable-Diffusion-1B-Chinese-v0.1 -rpa45/ai_hands_classifier -Toraong/color_textual_inversion -prosiaczek/webui -kaidorespy/CompVis-stable-diffusion-v1-4 -marcoruizrueda/flax-midjourney-v4-diffusion -bradarrML/diffuse-the-rest -bradarrML/Diffusion_Space -softcatala/whisper-demo-catalan -bradarrML/magic-diffusion -bradarrML/runwayml-stable-diffusion-v1-5 -antinous/dreambooth-training -alaaawad/CLIPSeg_x_SD -Javtor/Biomedical-topic-categorization -YESO/YESOdreambooth -User1342/Bubble-Check-In -HewDew/Linaqruf-anything-v3.0 -cfr26575/webui -nightfury/dreamlike-art-dreamlike-diffusion-1.0 -ktonggg/webui -SaintPepe/google-ddpm-church-256 -JUNGU/emotion-ko-state -BasalGanglia/stabilityai-stable-diffusion-2 -esafwan/esencb-text-image -gorkemgoknar/movie_chat_gpt_yourtts -Javtor/Biomedical-topic-categorization-2022only -Taha07/pneumonia-detection-WebApp -kazumak/sdspace -reganagam/TB-Project -kazumak/webui -koby-Jason/Music_recommend -shriarul5273/Yolov7 -zncook/chatGPT -jirufengyu/face_recognition -Jour/Translation-to-small -Jour/Translate-bloomz -Kontrol/plasmo-food-crit -ygangang/deoldify -van4oo/eimiss-EimisAnimeDiffusion_1.0v -gigant/romanian-whisper -gorkemgoknar/gptChatYourTTS -gorkemgoknar/movie_chat_gpt_yourtts_fileinput -JUNGU/remove-bg-edit -Yuyang2022/Translation_yue_to_any -MaksTim/FirstTimi -oluyemitosin/Honda_or_Mercedes -deepdml/whisper-demo-mix-es -segestic/ArticlePara -bradarrML/EleutherAI-gpt-j-6B -akhaliq/sd2-dreambooth-ClaymationXmas -tiagones/nitrosocke-spider-verse-diffusion -Vavavoom/stable-diffusion-depth2img -anuragshas/whisper-large-v2-demo-hi -emre/garanti-mybankconcept-img-gen -weijiang2009/AlgmonOCRService -breadlicker45/badapple -rrichaz/TTS-STT-Blocks -Umarpreet/argument_gate -victor/autotrain-victormautotraindreambooth-FS8JGUBRYX-2450175922 -mimimibimimimi/ACertainModel -kaleidophon/almost_stochastic_order -scikit-learn/blog-example -dimaseo/dalle-mini -arpagon/whisper-demo-large-v2-es -Foti/webui -Wootang01/chinese_generator_translator -Wootang01/chinese_translator_generator -herberthe/nitrosocke-Ghibli-Diffusion -PrismaticAI/MangaMaker -LongBeattz/runwayml-stable-diffusion-v1-5 -Wootang01/text_augmenter1 -Wootang02/text_generator1 -GadaiEngin-GBOX/GadaiEngineNeo-A -AlekseyCalvin/dreambooth-training3 -hoang1007/wav2vec2 -RobinZ2021/remove_background -SpacesExamples/test-docker-go -shripadbhat/whisper-bulgarian-demo -AnnaPalatkina/fine_grained_SA -Heckeroo/waifu-diffusion -richardblythman/stabilityai-stable-diffusion-2-1 -RRVSS/SVS -project-ori/README -erty9/webui -AlekseyCalvin/Make-Putin-Queer -danielcodex/first-prod -bhn4477/Car_orientation -Duckymalone/dreamlike-art-dreamlike-diffusion-1.0 -simonl0909/whisper-cantonese-demo -AlekseyCalvin/Make_Putin_Queer_Please-use-trp-token -srivarshan/argumentation-quality-analyzer -carisackc/ClinicalNoteDemo -Apk/anything-v3.0 -Nortrom8844/summarize-long-text -Abdulkader/Abdulkader-T5-MedRepAnalyzer -awacke1/BigCodeStackSearch1215 -awacke1/Clinical.Terminology.Search.LOINC.Panels.SNOMED.ICD.OMS -rrichaz/DataAnalyzer -Robo2000/DatasetAnalyzer-GR -kabita-choudhary/audio -misza222/extractframe -qwieug123467/Linaqruf-anything-v3.0 -sgangireddy/whisper-largeV2-mls-spanish-demo -vulkano/yulet1de-hentaidiffusion -Sphila/Sphila-Diffusion -Shypanties22/FantasyMe -xkhaloda/Envvi-Inkpunk-Diffusion -cm107/agv-demo -neuralmagic/cv-yolo -power2/powerswp -PrajwalS/GODEL-Demo-nxt -Yuelili/RealNagrse -pragnakalp/BERT_based_QnA -ben91/Mush_recognition -salmanmapkar/youtube-audio-video-diarizer-and-transcriber -Shiry/whisper-demo-hebrew-large -pierreguillou/extracao_das_palavras_frases_chave_em_portugues -tom-beer/birds-israel -datasciencedojo/Brain_Stroke_Prediction -JLD/docker-hello-world -machinelearnear/dreambooth-quino -Innoglean/README -stale2000/DnDItem -nakas/ChessGPT_Stockfish -KbL19/invokeAI -Sathrukan/Bird_classification -Huniu/niuniu -awacke1/SKLearnSkopsTabularEditor -nateevo/docu-searcher -akhaliq/Marvel_WhatIf_Diffusion -fkunn1326/Kokohachi-NoAI-Diffusion -yipinggan/Predict_progressive_collapse_resistance_with_DCN -belectron/Seen-Zan5 -codesue/streamlit-tfx -EyeSeeThru/anything-v3.0 -TangibleAI/mathtext -Buatong/Computing -benjaminperkins/yulet1de-hentaidiffusion.peoplegenerator -MMars/whisper-small-ar-demo -jacobbeckerman/Youtube-Whisperer -stasimus/p350-fastapi -ybelkada/blip-vqa-space -Xuechan/clothing_classifier -HugoSchtr/DataCat_Yolov5 -zfj41/webui -trysem/remini-free -camenduru-com/riffusion-api -macaodha/batdetect2 -yikaizhou/my-anything-v3 -DDD2222/webui -ItsJayQz/Classic_Telltale_Diffusion -ItsJayQz/Civilizations_6_Diffusion -freddiezhang/honor -MountLiteraSwd/stabilityai-stable-diffusion-2 -Guknadereve/stabilityai-stable-diffusion-2-1 -MountLiteraSwd/mount_ai_school1 -MountLiteraSwd/Linaqruf-anything-v3.0 -abidlabs/min-dalle-later -lterriel/YOLOv5_medieval_register -arbml/whisper-small-ar -arbml/whisper-small-cv-ar -Monteg/anything-v3.0 -hrishikeshagi/chatbot -hareshhecker/dreamlike-art-dreamlike-diffusion-1.0 -hrishikeshagi/MusicGenerator -maxspad/nlp-qual-space -sandeepsign/catordog -LiminalDiffusion/README -yo2266911/uma_voice -6shen7/Linaqruf-anything-v3.0 -ThomasSimonini/Deep-Reinforcement-Learning-Leaderboard -MountLiteraSwd/stabilityai-stable-diffusion-7 -HungHN/appsgenz-openjourney -darkroonn/hakurei-waifu-diffusion -unilux/ASR_for_Luxembourgish_w2v -swhyuni/Digital-Financial-Advisory-for-Mutual-Funds -ikun12/ikun -ayapoooooo123/Balloon_Diffusion -bryanlincoln/bryan-sd1.5-v2 -rwitz2/lambdalabs-dreambooth-avatar -Joeythemonster/MGX-Midjourney-v4 -DrHakase/word2img -Joeythemonster/magic-diffusion -Joeythemonster/finetuned_diffusion -DrishtiSharma/Whisper-Serbian-Transcriber -niks-salodkar/Age-Prediction-Demo -amitkot/he2en -YE01/saya-vits -xelu3banh/dpt-depth01 -xelu3banh/dpt-depth02 -redpeacock78/anything-v3.0 -Adam111/stable-diffusion-webui -ladiyusuph/potato_disease_classifier -LukeLB/shocking_guiness -schoemann/vanGogh_in_Kaiserswerth -XaSkeL/dreambooth -adpro/dpt-depth03 -adpro/dpt-depth07 -adpro/dpt-depth13 -adpro/dpt-depth15 -aaditkapoorbionlp/clinical_trial_match -Mikey211/computing -Hydrangea/computing -Mikey211/computing2 -oscars47/Thinking_Parrot_1.0.1 -Red54/convert-sd-ckpt -Banjoo/What_The_Bun -Gopal101/Netflix-Data-Analytics -amoghv/Fast-food-classifier -k2s0/talk-to-god -k2s0/ask-theologian -Rubens/semantic_similarity -oscars47/Thinking_Parrot_1.1.0 -neulab/tldr_eval -ayapoooooo123/openai-jukebox-1b-lyrics -716this/review-star-prediction-app -ChongCJ/fish -XPMaster/Motor_Vehicle_Collisions_NY -abc123desygn/Marvel_WhatIf_Diffusion -XPMaster/premium_insurance_prediction -Maaz66/GPT3-SPANISH-CHATBOT-PUBLIC -Anmol12385/chat123 -Blockinger/OVAChatGPT -nooji/GenieOnHuggingFaceSpaces -dream-textures/README -emre/emre-whisper-medium-turkish-2 -rzimmerdev/lenet_mnist -agiats/text_highlight_bccwj -azamat/twitter_geocoder -GIanlucaRub/DoubleResolution-Monitor -biodasturchi/esmfold_bio -verence333/InfoAnalyzer -ShreyashS/NLP-Sentiment_Analysis -rifkat/Uz-Text-Summarization -KaburaJ/binary_image_classification_app -Yusin/docker_test -remzicam/ted_talks_summarizer -HusseinHE/webui_blank -Datasculptor/AIart_sources_of_inspiration -yuanpei/robotinder-dev -awacke1/ArtNovelComicBookComposer -surendra962/ranking -BilalSardar/AutoML-Model-Training -lunarfish/furrydiffusion -MohamedRashad/Diffusion4Fashion -breadlicker45/breadlicker45-MusePy -VietVuiVe/PhanLoaiTraiCay -ysharma/test-flufflemarkednoser-cat -binery/Paddle_OCR -TheDustOfTimes/webui -sky1/sky -247Readings/README -hongtu/DeepDanbooru_string -Linann/DeepDanbooru_string -tommy24/test -abdalrahmanshahrour/ImageGeneration -Danky/dreamlike-art-dreamlike-diffusion-1.0 -awacke1/MadLibs -Ottermad/pet-classifier -rishikesh/365DataScience -uin-malang/README -MAli7319/Comment_Analysis -kweyamba/gradio-sentiment-analyzer -om-app/dmini -AIDHD/GrammarCorrector -qianyexingchen/Real-CUGAN -sky009/Qiliang-bart-large-cnn-samsum-ChatGPT_v3 -Bong15/Rewrite -sowas/stabilityai-stable-diffusion-2-1 -sushimashi/webui -Stearns/soar-d-rules-knowledge-inspector -Djdjeuu/MGX-Midjourney-v4 -Stearns/crl-demo -grey1227/experiment_terminator -Ninjagolover69/text_generator1 -Luna-Crestt/How_is_it_ze -genomics-england/anonymise_this -keremberke/clash-of-clans-object-detection -0x1337/vector-inference -NeoonN/Aurora -Stearns/Soar -esraa-abdelmaksoud/Dominant-Ad-Colors-Detection -ajashari/ajashari-ari-color -everm1nd/musika -ybbat/raven-or-crow -harshasurampudi/which_avenger -medici/dreambooth-training -thibobo78/stabilityai-stable-diffusion-2-1 -awacke1/AI-EIO-Editor -Neprox/like-it-or-not -irene-glez/whatsapp_chat_analyzer_streamlit -BreetheRun/mitchtech-vulcan-diffusion -pstan/webui1 -QINGFNEG/Real-CUGAN -phanstudio/webui -ChihChiu29/mychatbot -xiaozhong/Real-CUGAN -Pfs2021Funny/Basunat-Cinematic-Diffusion_demo -shengzi/uer-gpt2-chinese-cluecorpussmall -Pfs2021Funny/The-CG-Diffusion -shengzi/shibing624-gpt2-dialogbot-base-chinese -Pushpak77/fastspeech2-TTS -Candeloro/DeepDanbooru_string -PsykoNOT/hakurei-waifu-diffusion -DCXGAO/DeepDanbooru_string -Xhaheen/stable-diffusion-depth2img-test -Rmpmartinspro2/EimisAnimeDiffusion_1.0v -Datasculptor/car-data -Neovega/ogkalu-Comic-Diffusion -xiao2023/DeepDanbooru_string -catasaurus/text2int -labonny/facial-expression -MysticTony/webui -CourserLi/classify -hrishikeshagi/NewChatbot -sushmitxo/galactica2_6.7b -robertoberagnoli/whisper -hyuan5040/Speech-ChatGPT-Speech -cshallah/qna-ancient-1 -Roxza/vintedois -Freiburg-AI-Research/dermoscopic_image_generation -hyuan5040/ChatWithSpeech -indy256/protogen_v2.2 -xcocogoatx/WaifuCreatorAi -kanokon/GUI -Andreean/Sentiment-Analysis-Bitcoin -akhaliq/cool-japan-diffusion-2-1-0 -niks-salodkar/Fashion-Prediction-Demo -hugface33/dream -Rohith33/facedetector -frnka/football -awacke1/AI.Dashboard.Mermaid.Model.HTML5 -peteralexandercharles/wav2vec2-uk-demo -ADobrovsky/Plant_Disease_Classification_Project -MaplePanda/PandaG-diffusion-2-1 -koyomimi/Real-CUGAN -MaplePanda/Gstable-diffusion-2-1 -hdaifeh93/README -saltacc/RandomPrompt-v1 -arnepeine/monaspeech -mukish45/potato-disease-classification -zswvivi/ChineseMedicalQA -Tritkoman/Bloom -kilog/dreamlike-art-dreamlike-diffusion-1.0 -mithril-security/NonSuspiciousImageDecoder -subrota2k2/mt_en-de -VIOD/Real-CUGAN -VIOD/anime-ai-detect -awacke1/BiasMitigatorForFairEquityData -thinkersloop/finetuned-dl-cord-v2 -iricardoxd/chat_spanish -hhalim/hadi_first_day_in_HF -niaoquan/anime-remove-background -datasciencedojo/Twitter-Trends-Analyzer -Shad0ws/Chatbot_OpenAI -zhuwx/Real-CUGAN -adpro/Stable-Diffusion-Side-by-Side01 -ke666/anime-ai-detect -Avin1221/darkstorm2150-Protogen_x3.4_Official_Release -tomar79/webcam -RedBaron5/PatentSolver -xuyaxiong/HandwrittenDigits -Stanlito/Foodvision_mini -santhosh/NLLB-Translator -windoge/anime-ai-detect -neigui/White-box-Cartoonization -mcqueenfu/johnslegers-epic-diffusion -hallll/text_image_forgery_detection -waiwaiwai/Real-CUGAN -shui45/Real-CUGAN -keremberke/garbage-object-detection -billsar1912/YOLOv5x6-marine-vessels-detection -oskarvanderwal/MT-bias-demo -konerusudhir/mp_art_search_1_1 -piuba-bigdata/discurso-de-odio -huhlim/cg2all -iben/syntetic-text-detector -jroust/rooster -lindeberg/whisper-webui -Hexequin/Linaqruf-anything-v3.0 -harshhpareek/bertscore -kesally/anime-remove-background -Andy1621/UniFormerV2_mit_demo -cenji1109285052/anime-ai-detect -lemon7/White-box-Cartoonization -uisjqo/DeepDanbooru_string -lyf46/point-e -LZY123ai/anime-remove-background -GouDiya/anime-remove-background -rerdscf/webui -TEL123/Real-CUGAN -om-app/remove-background -chuyin/anime-ai-detect -OverSky/mio-amadeus -aaronW/PaddlePaddle-plato-mini -modjunkie/MGX-Midjourney-v4 -safebuster2/sudoku -aziki/anime-remove-background -QianFeng/White-box-Cartoonization2308 -anzoutian/White-box-Cartoonization -chansung/textual-inversion-pipeline -109peko/anime-remove-background -109peko/DeepDanbooru_string -MMars/Question_Answering_DistilBert_Finetuned_on_SQuAD -mmfuente95/Basic_EN_FR_Translator -BwayKC/prompthero-openjourney-v2 -Lawlieties/dreamlike-art-dreamlike-photoreal-2.0 -GT4SD/moler -Jour/Bloom-Translation -nightfury/Riffusion_real-time_image-to-music_generation -mohamadsadeghrio/Aplod -BhaskarKapri/Animal -micole66/zero-shot-4 -Ali36Ahmad/MagicPrompt-Stable-Diffusion -Ali36Ahmad/magic-diffusion -gggh/anime-remove-background -johnslegers/Epic-Diffusion-webui -salmanmapkar/whisper-to-chatGPT -MINAMONI/White-box-Cartoonization -Felixogunwale/Imagedeblurr -peteralexandercharles/automatic-speech-recognition-with-next-gen-kaldi -vargha/facebook-wmt19-en-de-gradio -BwayKC/darkstorm2150-Protogen_v2.2_Official_Release -stjiris/README -hareshhecker/prompthero-openjourney-v2v3 -ussrcccp/White-box-Cartoonization -AQaTaHaGoD/GoD -yuanmochu/Real-CUGAN -GT4SD/paccmann_rl -jjie/DeepDanbooru_string -group2test/sd-space-creator -Violette/Protogen_x3.4_Official_Release -clem/comparing-captioning-models -pngwn/huguru -GT4SD/advanced_manufacturing -group2test/stable-diffusion-2-1-base -ismot/8testi1 -Nathanotal/stockholmHousingValuation -micole66/Zero-Shot-Classification-Pretrained -ravisingh15/ligand_distance -GT4SD/geodiff -GT4SD/hf-transformers -camenduru-com/RabbitMQ -Candeloro/anime-remove-background -Shad0ws/crowdcounting -zcodery/anime-remove-background -maureenmugo/projects -Arvi/Performance_predictor_and_feedback_generator -subhendupsingh/dis-background-removal -lognat0704/chatGPT -thiagohersan/maskformer-coco-vegetation-gradio -nightfury/Magic_Text_to_prompt_to_art_Diffusion -hyxhb/anime-remove-background -prof-freakenstein/anurag-bit-Ai-avatar-Generator -pawelklimkowski/tylko-dreams -coutant/multilingual-sentence-similarity -Luna-Crestt/Da-ze -peteralexandercharles/Voice-Cloning -gstdl/screener-saham-demo -OPM-TECH/CompVis-stable-diffusion-v1-4 -w2106856508/DeepDanbooru_string -xinhai/Spleeter -rossflynn/health -awacke1/AGameForThat -peteralexandercharles/whisper-restore-punctuation -Ibtehaj10/cheating-detection -test12356/SUI-svc-3.0 -mhmdrza/stabilityai-stable-diffusion-2 -zea10/ogkalu-Comic-Diffusion -nbortych/sentiment -jreji/RestNet -Malifex/cocoa-diffusion -KeyDev/NOC-classification -JUNGU/face-swap -camenduru-com/inspector -Yttrin/prompthero-openjourney -ClassCat/mnist-classification -ferrarrinicky/sd1.5.NSFW -NachtYoru/Linaqruf-anything-v3-better-vae -hstrejoluna/dreambooth-training -Ibtehaj10/cheating-detection-FYP -kornia/Face-Detection -bayoubastard/KoboldAI-fairseq-dense-13B-Shinen -kerria/finetuned_diffusion -tilos/Real_Time_Traffic_Prediction -SidneyChen/mbti_prediction -ClassCat/mnist-classification-ja -RealKintaro/Offensive-Speech-Detection-From-Arabic-Dialects -DavidLijun/FI -piuba-bigdata/README -eldobbins/coral-spawning-detector -renatotn7/question-answering-portuguese-with-BetterTransformer -Joshua1808/PaginaWeb -elcom/README -posicube/mean_reciprocal_rank -LobsterQQQ/Nail-Set-Art -pietrocagnasso/paper-title-generation -LobsterQQQ/Text-Image-3D_Model -LobsterQQQ/text2img -rti-international/rota-app -torileatherman/news_headline_sentiment -hakanwkwjbwbs/stabilityai-stable-diffusion-2-base -shivalk/myfirst -Munderstand/CLIP-Interrogator-3 -dtrejopizzo/texto-a-imagenes-intel -EDGAhab/Aatrox-Talking -Mikey211/GUI2 -rajistics/shiny-test -LudvigDoeser/TSLA_stock_predictions -yuan1615/EmpathyTTS -tommyL99/Stock_Market_Prediction -Artbogdanov/monet-manet -abdalrahmanshahrour/ArabicQuestionAnswering -Monan/webui -svjack/bloom-daliy-dialogue-english -mw00/chess-classification -jolucas/llm_lab -leonel1122/Analog-Diffusion -georgescutelnicu/neural-style-transfer -marclelarge/knn_encoder_decoder -Angelaangie/personal-chat-gpt -Smithjohny376/andite-anything-v4.0 -LouieDellavega/dreamlike-photoreal-2.0 -hgd/kk -whz20041223/anime-remove-background -Taper5749/yolov8-2ndspace -YourGodAmaterasu/GPTChatBot -spock74/whisper-webui -Simbals/TextRetrieval -SalmanHabeeb/MaskDetector -tommy24/this-is-indeed-cool -clement13430/RIOT_GAME -tommy24/image -tigersinz/Linaqruf-anything-v3-better-vae -jeanmidev/marvel_snap_related_items_recsys -soldni/viz_summaries -ziguo/Real-ESRGAN -adyjay/andite-anything-v4.0 -MrMoans/stabilityai-stable-diffusion-2-1 -vkganesan/AdaIN -ryankkien/LOLDodgeTool -MRiwu/Collection -PascalLiu/FNeVR_demo -awacke1/Science-NER-Spacy-Streamlit -Ame42/rwms -Fr33d0m21/stabilityai-stable-diffusion-2-1 -TheOrangeJacketBrigade/GenerateOngCodeAI -Mahmoud7/mobile_price_prediction -peteralexandercharles/space-that-creates-model-demo-space -coutant/yolov8-detection -ahnafsamin/GroTTS-Tacotron2-24mins -GT4SD/keyword_bert -awacke1/Webcam-Stream-Mesh-Landmark-AI -eengel7/news_headline_sentiment -abcde1234www/personal-chat-gpt -Fr33d0m21/Text_image_3d -Fr33d0m21/chatbot_dialogpt -eeyorestoned/Nitro-Diffusion -tmtsmrsl/twitter-sentiment -abcde1234www/aibot -celery22/gradio_plant_classify_app -Kyllano/ShrimpClassifier -7eu7d7/anime-ai-detect-fucker -miyu0609/gsdf-Counterfeit-V2.0 -abhishek/scikit-learn-tabular-playground -smartinezbragado/reddit-topic-modelling -amulyaprasanth/car_price_prediction -AdithyaSNair/Medical_price_prediction -raudabaugh/rsna-breast-cancer-detection -Antonpy/stable-diffusion-license -Shenhe/anime-ai-detect -Rimi98/InsectRecognizer -lvwerra/show-pdf -shoukaku/face-emotion-recognizer -ivanokhotnikov/longformer-base-health-fact -freddyaboulton/whisper-to-stable-diffusion -pepereeee/DreamlikeArt-PhotoReal-2.0 -Daroach/anime-remove-background -Mayer21/text_to_image2 -akhaliq/scikit-learn-tabular-playground -lavrtishakov/EleutherAI-gpt-j-6B -THEMUNCHERCRUNCHER/teachif -nnaii/anime-remove-background -nnaii/anime-ai-detect -MSHS-Neurosurgery-Research/TQP-atEDH -Tao0000/stabilityai-stable-diffusion-2-1 -voidKandy/WW1_Poet_Bot -selldone/README -iamtahiralvi/stabilityai-stable-diffusion-2-1 -etweedy/dreambooth-tessa -huathedev/findsong -iamtahiralvi/yanekyuk-bert-uncased-keyword-extractor -tengxiu/img-to-music -adrian065105/andite-anything-v4.0 -CguCsie/README -hanithar/Trees -marcogallen/emotion_classifier -Lycorisdeve/DeepDanbooru_string -boda/arabic-names-generator -awacke1/Web-URL-HTTP-Parameters-Get-Set -NeilRokad/dreambooth-training -CODEACON/README -trysem/confusion -huang4414/DeepDanbooru_string -sheikhDeep/car-recognizer -Hc123/anime-remove-background -abdulsamod/crop_yield -emilycrinaldi/AirBNB -fozouni123/linkeddata -wuuthradd/prompthero-openjourney -isaacjeffersonlee/Legal-Grammar-Error-Corrector -HumanDesignHub/Ra-Diffusion_v.1 -kllmagn/sberbank-ai-rugpt3large_based_on_gpt2 -mehdidc/ae_gen -stevechong/cny-goodluck-detector -Crackedids/README -WayneLinn/Singapore_Air_Quality_Prediction -aiden09/plasmo-woolitize -neigui/img-to-music -trysem/DreamShaper-3.3 -fariyan/gif_studio -ma3ter3ky/test -abrar-adnan/vehicle-recognizer -airus/img-to-music -leonel1122/maximum_diffusion_no_pulp -lorenzoscottb/phrase-entailment -dawood/chatbot-guide -awacke1/NLPDemo1 -hhalim/NLPContextQATransformersRobertaBaseSquad2 -allieannez/NLPContextQASquad2Demo -imseldrith/BotX -sanjayw/nlpDemo1 -abhilashb/NLP-Test -AdamGoyer/is_it_fly -clevrpwn/CompVis-stable-diffusion-v1-4 -lRoz/j-hartmann-emotion-english-distilroberta-base -Ame42/UBTH -drdata/ArtNovelComicBookComposer -gradio/examples_component_main -awacke1/SpaceBuggyPlaycanvasHTML5 -ClassCat/Spleen-3D-segmentation-with-MONAI -sblumenf/read_it_later -geniusguy777/Face_Recognition -wuhuqifeidekun/White-box-Cartoonization -HaiTang/DeepDanbooru_string -Jamos1/AI_gamer89-insta -Ayaka-daisuki/anime-remove-background -faressayadi/n-gpt -Disguised/anime_character_recognizer -bugbounted/Whisper-Auto-Subtitled-Video-Generator -Fbr55555/hassanblend-HassanBlend1.5.1.2 -saurav-sabu/Car-Price-Prediction -ma3ter3ky/FruitClassifierModel -sanaghani12/Gradio-Huggingface -andzhk/PGNInfo-test -jsdt/lol-predictor -dawood/chatbot-guide-multimodal -Temptingchina/Real-CUGAN -oyjp1234/andite-anything-v4.0 -rang1/White-box-Cartoonization -kwinten/attrition -chachkey/anime-remove-background -pranavbapte/Car_type_detection -ahuss/pet -aegrif/spell_generation -ethansmith2000/image-mixer-demo -SumDimDimSum/yulet1de-hentaidiffusion -nyaasaT/Nyanator -agamthind/foodvision_mini -camenduru-com/chisel -Rinox06/webui -davanstrien/qdrant_test -Joabutt/furry-diffusion -BilalSardar/facrec -3i2irg/first-app -Duskfallcrew/anything-v3.0 -redpeacock78/anything-v4.0 -vialibre/edia_full_es -Duskfallcrew/MagicDreamlike -achajon/prompthero-openjourney-v2 -zonglin03/White-box-Cartoonization -aadit2697/movie_recommender -TorsteinAE/YoutubeSummarizer -luciancotolan/Fraud_ExpertSystem -awacke1/ChatbotWithFilePersistence -OtmanSarrhini/foodvision_mini -Ayya/anime-remove-background -hjs8/text-to-3D -mukish45/Hindi-Audio-To-Text -Asahi402/White-box-Cartoonization -DataScienceGuild/WikipediaAIWithDataframeMemory -Asahi402/anime-remove-background -AnshuK23/Customer-review-analysis -kukuhtw/VToonify -trysem/dfr -awacke1/ASRSpeechRecognition1 -hhalim/WikipediaAIDataScience -radames/hello-pytesseract -procat22/minimal -giorgiolatour/aqiprediction -abc123desygn/timeless-diffusion -ussarata/storygen -awacke1/bigscience-data-sgpt-bloom-1b7-nli -DanielCL/try-out-openai-text-summarizer -Jerkinjankins/ogkalu-Comic-Diffusion -Duskfallcrew/darkstorm2150-Protogen_x5.8_Official_Release -Duskfallcrew/shindi-realistic-skin-style -Unggi/title_extraction_bart_logical -rrighart/product-defects -lengxi/White-box-Cartoonization -Tirendaz/pytorch_cat_vs_dog -Lycorisdeve/White-box-Cartoonization -dieselprof/stabilityai-stable-diffusion-2 -SamKenX-Hub-Community/README -awacke1/google-pegasus-pubmed -awacke1/google-bigbird-pegasus-large-pubmed -awacke1/microsoft-BiomedNLP-PubMedBERT-base-uncased-abstract-fulltext -awacke1/Stancld-longt5-tglobal-large-16384-pubmed-3k_steps -awacke1/bigscience-T0_3B -oldplayer1871/anime-remove-background -nehuggingface/cant -Daimon/translation_demo -lris/anime-remove-background -lris/DeepDanbooru_string -Faboor/README -oyyy/TeleGPT -sr5434/QuoteGeneration -axuint/OpenNiji -codebender/gpt-2-rumblings -AppleQAQ/anime-remove-background -Metal079/Sonic_Character_tagger -AndreLie95/Diabetes_Risk_Prediction -Chukwuka/FoodVision-Model -bhasker412/IDD-YOLO-Tracking -luxiya/anime-remove-backgrou -kaesb/fastaicourse -SmokingBrisket321/rocky_or_rambo -ZeroCool94/sygil-diffusion -Daniton/superjourney -eeemef/demo-cats-vs-dogs -Boadiwaa/Recipes -zonglin03/Real-CUGAN -awacke1/gpt2-demo -MrSashkaman/StyleTransfer -Nickhilearla135095/webui -victorbahlangene/Star-wars-app -SHULGIN/MiDaS -masapasa/biogpt -mshkdm/VToonify -awacke1/CodeParrot-Copilot-Alternative -awacke1/NovelAI-genji-python-6B -awacke1/EleutherAI-gpt-j-6B -awacke1/facebook-incoder-6B -awacke1/Salesforce-codegen-16B-multi -jspr/paperchat -Eveltana/eveltana -AI-Chatbot-Master/Chatbots -Duskfallcrew/duskfalltest -umm-maybe/mitsua-diffusion-cc0 -Zubia/clipdemo -rzuruan/DeepDanbooru_string -Duskfallcrew/Duskfallcrew-duskfallai -Goblin-of-Games/README -playgrdstar/ancient-chinese-calligraphy -hhalim/DAvaViz-graph -hhalim/dataViz-mermaid -hhalim/datavis-plotly -michelecafagna26/High-Level-Dataset-explorer -voroninvisuals/lama -sanjayw/mask2former-demo -akhaliq/pastel-mix -Duskfallcrew/duskfallai_webui -LEBEI/00002 -avysotsky/asklethain -Stereo0001/Model3D -edvanger/White-box-Cartoonization -Falpx/DeepDanbooru_string -Lucifer741/emoji-predictor -hush1/anime-remove-background -Xikless/instructpix2pix -andreishagin/Class_modify -lotrlol/Spotify-Recommendation-System -stable-bias/diffusion-faces -nateevo/asesor-transito -hush1/White-box-Cartoonization -Kaixuanliu/textual-inversion-training -miruchigawa/hakurei-waifu-diffusion -thejagstudio/procom -Mayanand/Automatic-Number-Plate-Recognition -santiviquez/noisy_human -superprpogresor/Bringing-Old-Photos-Back-to-Life -tumuyan/realsr-docker -huazhao/DeepDanbooru_string -huazhao/anime-remove-background -emirhannnn32/car_prediction -style0427/anime-remove-background -yunyue/anime-remove-background -Gyuyu/andite-anything-v4.0 -georgesX/finetuned_diffusion -alphahg/academic-paper-translate-summary -OFA-Sys/small-stable-diffusion-v0 -adpro/avinev3_04 -Sibanjan/Email -ulysses115/diffsvc_test -vakosta/Code2Comment -wybxc/of-diffusion-demo -mengmeng02/DeepDanbooru_string -Sevenlee/bert-Chinese -Trancoso/README -jarvisx17/En_ASR_wave2vec2 -cxrhr/anime-remove-background -xqq/Real-CUGAN -LeeHotmen/webui-docker -sanshi-thirty/anime-remove-background -yame/Real-CUGAN -OnurKerimoglu/Classifymoods -dascruz/pets -SRDdev/HingMaskedLM -JaeSwift/GTA5_Artwork_Diffusion -User1342/RUNIC -SDbiaseval/identities-knn -EmanAbelwhab/foodvision_mini -Larvuz/instruct-pix2pix -Yukiiiiii/color_transformation -tanaysk/stockpricepred -ArtificialWF/Voice-Recognition -chumeng/anime-ai-detect -manhdo/head_pose_estimation_tracking_app -Jacob209/AUTOMATIC-promptgen-lexart -hiraltalsaniya/YOLOv7_face_mask -krushna/text_in_image -Zulqrnain/NewsSummarizer -msy666/White-box-Cartoonization -ebgoldstein/FRF_Heavies -osbm/streamlit-helloworld -MikeyAulin/stabilityai-stable-diffusion-2-1 -jharrison27/NPI-maps -hhalim/EleutherAI-gpt-j-6B -deepghs/gchar_online -krushna/url-or-text_summarizer_or_caption_generator -yasserofff/runwayml-stable-diffusion-v1-5 -nikravan/Text2Sql -renatotn7/unicamp-dl-translation-en-pt-t5 -arrayxhunter/bearish -avin1103/SLAM -pplonski/mercury-hugging-face -bgk/lodosalberttr1 -YFHAki/DeepDanbooru_string -osanseviero/argilla-template-space -fattest/stabilityai-stable-diffusion-2-1 -SashaKerbel/HandwritingClassifier -awacke1/Biomed-NER-AI-NLP-CT-Demo1 -ceckenrode/Biomed-NER-AI-NLP-CT-Demo1 -awacke1/Bloom.Human.Feedback.File.Ops -awacke1/stabilityai-stable-diffusion-2-1 -awacke1/andite-pastel-mix -evanpierce/3D_Photo_Inpainting2 -harshasurampudi/Which_Planet -Kaludi/CSGO-Weapon-Classification_App -BaiyuS/Real-CUGAN-YZ -GuardianUI/ui-refexp-click -sritang/hack_qa2 -Kaludi/Food-Category-Classification_App -xyz-labs/openjourney -peteralexandercharles/streamlit_1.15 -jayyd/fashion-collect -TexR6/AttentionMaps -kdrkdrkdr/LisaTTS -furqankassa/d4data-biomedical-ner-all02032023 -keneonyeachonam/d4data-biomedical-ner-all-020323 -ceckenrode/d4data-biomedical-ner-all232023 -ahmedxeno/brain_tumor_vs_normal_classification -keneonyeachonam/punctuation-Token-Classification -furqankassa/Punctuation-token -ceckenrode/PunctuationTokenClassification -keneonyeachonam/NER-Ontonodes -ceckenrode/NEROntoNotes -furqankassa/flair-ner-english-ontonotes-large -venkatks515/VenkatASR -ahmedxeno/kidney_disease_classification_CT_scan -nosson/code-classifier -datasciencedojo/Article-Scraping -subhc/Guess-What-Moves -EnD-Diffusers/Photography-Test -awacke1/Requests-Interpret -Sim1604/Twitter_Sentiment_Analysis -awacke1/Embedded_Space_Test -aquaaaaaaaaaaaa/AI-minato_aqua -a1455/DeepDanbooru_string -yuan2023/img-to-music -Lookimi/TuberTranscript -rghdrizzle/fox_dog_wolf_identifier -zfz/img-to-music -JUNGU/pixera_gen -EAraid12/LoRA-DreamBooth-Training-UI -adba/Real-CUGAN -oldfart/removaltool -UltraMarkoBR/SoftHunter -haoqi7/research -ivy-1911/vits-uma-genshin-honkai -kemao/anime-remove-background -phongtruong/gsdf-Counterfeit-V2.5 -Detomo/generate_wifi_qrcode -king007/table_questions -aleloved02/Salesforce-codet5-large -mukish45/Coconut_Grade_Classification -daresay/employee-handbook-chat -stonking-com/stonking -Chukwuka/Dog_Breed_ImageWoof -oms12/dfgan -Lalo42/hassanblend-HassanBlend1.5.1.2 -EliotLacroix/Fine-tuned_Resnet_Face_Segmentation -pepereeee/prompthero-funko-diffusion -oliveiracwb/MBP -BigBoyBranding/README -Tristan/static-rlhf-interface -BreadBytes1/CC-Dashboard -vvd2003/Animals -camenduru-com/lsmith -Carlosito16/HXM-summarization -hhalim/google-flan-t5-large -PrussianBlue/White-box-Cartoonization -TwoCH4/White-box-Cartoonization -keneonyeachonam/NPR_AI_NER_020623 -ceckenrode/Biomed-NLP-AI-Clinical-Terminology -lalasmrc/facebook-blenderbot-400M-distill -kitkatchoco/openjourn -zjxchina/vits_seki -BridgeTower/bridgetower-video-search -muyi12314/anime-remove-background -firasggg/andite-anything-v4.0 -shainis/Art_Generation_with_Neural_Style_Transfer -sayakpaul/demo-custom-css -derek-thomas/sentence_diagrams -king007/CoCa -Faryne/yulet1de-hentaidiffusion -untovvn/Hello-SimpleAI-chatgpt-detector-roberta -nlphuji/whoops-explorer-analysis -mattritchey/geocoder_gradio -deprem-ml/README -hcapp/sd-dreambooth-library-herge-style -KnowingFly/Linaqruf-anything-v3.0 -PeepDaSlan9/CompVis-stable-diffusion-v1-4 -y-boy/Deforum -kalebu/LangChain_heyooBot -awacke1/DockerTensorRTTerminal -awacke1/AutoStableDiffusionTxt2ImgImg2Img -dennydotio/fastai -yapzanan/testNLLB -walisonhs/stabilityai-stable-diffusion-2 -PeepDaSlan9/facebook-wav2vec2-large-960h-lv60-self -xiaohuajiejie/styletransfor -wolfgangmeyers/stable-diffusion-inpainting-vae -elitecode/Detect_Emotions -aaronW/chat-robot -lwchen/CodeFormer -Josh98/nl2bash_m -dgnk007/dgnk007-heat -king007/docquery -SiddharthK/dslim-bert-large-NER -krushna/Auto_Insta_Post-V2 -SegevC/bf_predictor -huggingface/uie -GC6848/alpha_stocks_screener -BucketHeadP65/confusion_matrix -mano96/plagiarism -mattritchey/QuickAddresses -gaouzief/b -bccearth35660/machinelearning -ashkanforootan/af_chatGPT -cvegvg/Lama-Cleaner-clean -mattritchey/HRRR_animate -rune-m/age_guesser -jacobduncan00/Hosioka-AniReal -AyushP/PolicyChatBot -pedi611/gradio-whisper-to-stable.diffusion -devoworm-group/nucleus_segmentor -keneonyeachonam/Visualization-Plotly-Sunbursts-Treemaps-and-WebGL-020823 -cmudrc/AddLat2D -kabita-choudhary/speaker_Diarization -hfmax/SpeciesChecker -PeepDaSlan9/B2B-APG -alsrbdni/MagicPrompt-Stable-Diffusion -spock74/whisper-speaker-diarization -bejaeger/filled-stacks-search -maj34/Eye-Handicapped-Service -LiuZiyi/1-video-video2txt-whisper-yt -ManDag004/animals -LLLLLLLyc/anime-remove-background -awacke1/PandasDataframeAutoFilter -SrRaptor/Imagy -Hazzzardous/RWKV-Instruct-1B5 -awacke1/GradioAutoPlotFromCSV -awacke1/GradioAutoCSVLoaderToPlotly -NNDM/img-to-music -Boops88/gsdf-Counterfeit-V2.5 -DonaSmix/anime-remove-background -Aer0xander/sd-to-diffusers -Dipl0/Dipl0-pepe-diffuser-bot -ericsc/Korakoe-OpenNiji -magnadox/nlpconnect-vit-gpt2-image-captioning -Hero0963/sentiment_analysis_demo_01 -awacke1/StreamlitCSVFiletoPlotlyExpress -CirnoW/anime-ai-detect -Kaludi/Food-Category-Classification-And-Recipes-Recommender_App -awacke1/GenerativeAI-ChatInStreamlitWithTCPIP -awacke1/StreamlitEmotionWheelSunburst -awacke1/DungeonCrawlerWithReinforcementLearningMonster -awacke1/PlayableMovingLottieAnimationStreamlit -awacke1/StreamlitDealOrNoDeal -awacke1/QuoteOfTheDayStreamlit -awacke1/QuoteOfTheDayWithSearch -fahmiaziz/auto_meringkas -awacke1/TwoPlayerDiceGameCraps -awacke1/StreamlitMapBoxCityNames -aichina/Pix2Pix-Video -Jojohickman21/IvyLeague_Logo_Classifier -BreadBytes1/PL-Dashboard -awacke1/ZorkHF -awacke1/StreamlitCalendar -ravithejads/videoques -Noobian/How-To-Generator -elitecode/Captioner -pbsszoomA19/pbsszoomA19 -awacke1/StreamlitMultiplayerTicTacToe -awacke1/StreamlitMIDIPlayer -awacke1/AutoMLPandasProfilingSunburst -FrancXPT/stabilityai-stable-diffusion-2-1 -AFischer1985/German-Flan-T5 -victorbahlangene/NLP-News-Scraping-Summarization-Sentiment-App -opengl/Stable-Diffusion-Protogen-x3.4-webui -guymorlan/English2ShamiDialect -AlStable/Duke -princeml/emotion_streamlite_app -keneonyeachonam/AutoML_UsingStreamlit_Plotly_020923 -Iqbaljanitra/brandshoesprediction_nike_converse_adidas -NMEX/vits-uma-genshin-honkai -fhipol/deeplearning -vinayakporwal/remove-bg -vinayakporwal/ImageCreator -unstructuredio/chat-your-data-isw -awacke1/STEM-MathExercise -Nexxt/MagicPrompt-Stable-Diffusion -Daniton/prompthero-openjourney-lora -zanderchase/chat-your-data-chef -awacke1/StreamlitSTEMDataScienceEngineerDash -spuun/AI-image-detector -frncscp/Patacotron -awacke1/DnD-Character-Sheet -awacke1/AdventureGame -nmaina/gpt2chat -imabhi/book_Reader -Shine1916/MyChat -lijk20/ClueAI-ChatYuan-large-v1 -ethanrom/pcb_det -as-god/gsdf-Counterfeit-V2.5 -deprem-ml/deprem_satellite_semantic_whu -yash-srivastava19/TRINIT_EzDub_ML01 -xxixx/DeepDanbooru_string -ravinmizia/Twitter_Depression_Sentiment -cymic/Talking_Head_Anime_3 -end000/yandex-RuLeanALBERT -TRaw/darkstorm2150-Protogen_x3.4_Official_Release -LeeroyVonJenkins/hard-hat-detection -johiny/gsdf-Counterfeit-V2.5 -awacke1/ClickableImages -monkeyboss/xiaolxl-GuoFeng3 -PeepDaSlan9/EleutherAI-gpt-j-6B -Bokanovskii/Image-to-music -Duskfallcrew/isometric-dreams-sd-1-5 -mohaktnbt/openai-whisper-large -DataScienceEngineering/README -DataScienceEngineering/2-GradioLiveASR -DataScienceEngineering/4-Seq2SeqQAT5 -DataScienceEngineering/4-GeneratorCalcPipe -DataScienceEngineering/6-TreemapAndSunburst -gestiodinamica/giz_visualizacion -mazenAI/livine-demo -rbuell/iepassist_app -Hallucinate/demo -Kaludi/OpenAI-Chatbot_App -palondomus/fastapi -peteralexandercharles/WhisperAnything -oliveiracwb/MBP2 -dajuzi/img-to-music -manu-codes/dysperse -Detomo/naomi-app-api -paascorb/question_answering_TFM -enoreyes/langchain-gsp-demo -cahya/indochat -yongchang111/Real-CUGAN -CrabApple/prompthero-openjourney-v2 -pietrocagnasso/paper-highlights-extraction -shivansh123/Gradio -awacke1/1-SimPhysics -jpoptum/1-SimPhysics -danielcwang-optum/1_SimPhysics -awacke1/2-LiveASR -Rdceo26Rmrdceo26/README -awacke1/4-GeneratorCalcPipe -danielcwang-optum/4-GeneratorCalcPipe -Mahathi/4-GeneratorCalcPipe -awacke1/4-Seq2SeqQAT5 -danielcwang-optum/6-TreemapAndSunburst -jpoptum/6-TreemapAndSunburst -mkhan328/TreemapAndSunburst -awacke1/6-TreemapSunburst -julyThree/anime-remove-background -nateraw/dockerplayground -sarat2hf/table_in_image_to_csv_app -Lianglan/NLLB200-Translate-Distill-600 -AUST001/video -jayyd/Guess_famous_personalities_using_GPT-3 -Damnbro/andite-anything-v4.0 -tianpanyu/ChatYuan-Demo -AIFILMS/image-to-sound-fx -cloixai/dalle-minii -AIFILMS/generate_human_motion -AIFILMS/riffusion-playground -xusheng/anime-remove-background -xusheng/anime-ai-detect -AIFILMS/audioldm-text-to-audio-generation -blogclif/CF25 -wdnmd12/Real-CUGAN -AI-Naga/Parking_Space_Counter -Yuichiroh/ACL2Vec -GenerationsAI/GenAi-Pix2Pix-Video -gfhayworth/chat_qa_demo2 -suyuxicheng/anime-remove-background -ashhadahsan/summarizer-space -ehugfaces/stabilityai-stable-diffusion-2-1 -AI-Naga/Roof_Element_Identification -ceckenrode/sileod-deberta-v3-base-tasksource-nli -keneonyeachonam/sileod-deberta-v3-base-tasksource-nli-021423 -Gaborandi/PubMed_Downloader -deprem-ml/intent-leaderboard-v13 -nothinglabs/minima -hsinyuuuuuuu/cat -hjie3185/cat.identification -CornSnakeID/CornSnakeMorphID -Sevenlee/text_Image_stable-diffusion -zwitshr/justinpinkney-pokemon-stable-diffusion -oyl344531959/White-box-Cartoonization -EngrZiaQazi/Chat-GPT -Roixy/hakurei-waifu-diffusion -RyanJiang/anime-remove-background -mahmuod/CLIP-Interrogator -CyStorm/instruct-pix2pix -AE-NV/sentiment-productreview -marccgrau/whisper-asr-diarization -tridragonevo/chat-gpt-voice-stream -iambuoyant/vscode -adolfont/livebook-hf-test -aheskandani/FilesTools -iamrobotbear/gradio-auth-new -teeessstt/ytukjykuyutyku -zhongkaifu/mt_enu_chs -catasaurus/sound-distance -awacke1/BigScienceBloomRootsMemory -MMYang/microsoft-BioGPT-Large -Hskbqn/DeepDanbooru_string -zhuzhao/background-remover -chilge/taoli -wootang04/text_generator -Jasmine0725/text_generator -Eunice0120/text_generator -Yoyo1123/text_generator -GigiWasThere/Text -L1211/New_space1 -MelodyKwok/text_generator -LarissaHung/text_generator -Kittylo/text_generator -VioletWLT/Lucylol_wan -YoHoCo0o0/Gradio -lucylol/mirrorsai1 -Bianca0930/Bianca -GloryGranger80888/Gradio -SophiaGaogao/sophia -Destinycy/Destiny_LOL -alimeituan/gpt2 -mutonyilewis/Pothole_detection -KatieChau/text-generator -NatalieIp/test-generator -awacke1/GradioVoicetoTexttoSentiment -awacke1/GradioUpdateUI -colossalturtle4/andite-pastel-mix -cfj108/CompVis-stable-diffusion-v1-4 -baotoan2002/Chatbot-OpenAI -pojitha/sinhala_hate_speech -lunadebruyne/EmotioNL -Elbhnasy/Eye-Tracking-Diagnosis -Mississippiexhib/theintuitiveye-HARDblend -biantao/anime-remove-background -seanshahkarami/clip-explorer -afdqf2bs/CompVis-stable-diffusion-v1-4 -keneonyeachonam/Docker-FlanT5-TextGeneratorTranslator-021623 -mmk7/stock_trader -Datasculptor/3D-Room-Layout-Estimation_LGT-Net -ceckenrode/Docker-FlanT5-TextGeneratorTranslator -gauravahuja/nlpconnect-vit-gpt2-image-captioning -UchihaZY/White-box-Cartoonization -impulsewu/Real-CUGAN -YuFuji/CalqTalk -mfuentesmagid/Video_AI_Capabilities -spacerini/miracl-chinese -ismot/1702t1 -wqferan/chatgpt_webui -Mattdoc99/ElonYTsearch -qkorbit/AltDiffusion -itskiller/aiimage -alsrbdni/copy-ai.com -awacke1/StreamlitPydeckMapVisualViewStateForLatitudeLongitude -imabhi/Book_Translator -cloixai/webui -rachana219/MODT2 -dukecsxu/hotdogclassifier -RealTimeLiveAIForHealth/VoicetoTexttoSentiment -DShrimp/PoseMaker -jbraun19/Webcam-Object-Recognition-Yolo-n-Coco -RealTimeLiveAIForHealth/ASR-High-Accuracy-Test -spacerini/miracl-french -furqankassa/Docker-FlanT5-TextGeneratorTranslator -awacke1/GradioFlanT5BloomAndTaskSource -alc15492/MSemoji850NEW -LabelStudio/README -keneonyeachonam/DockerImageRecognitionToText021723 -Mileena/CLIP -awacke1/StreamlitChooseYourOwnAdventure -T-1000/runwayml-stable-diffusion-v1-5 -sajjade/hassanblend-hassanblend1.4 -cleanmaster/akagi-sovits3 -shreydan/khaanaGPT -abdullah040/TextBook -Andres99/Tune-A-Video-Training-UI -zahadneokurkycz/sd-img-generator -hunkim/es-gpt -luoshang/Real-CUGAN -cleanmaster/so-vits-svc-akagi -awacke1/Streamlit-Pyplot-Math-Dice-Game -habeebb5/biogpt-demo -awacke1/Hexagon-Dice-Fractal-Math-Game -awacke1/PlantFractalsMathGameWithJuliaSetnStrangeAttractors -awacke1/Dice-Roll-Fractals-STEM-Math -Tritkoman/Tritkoman-EnglishtoChurchSlavonicV2 -awacke1/Emojitrition-Fun-and-Easy-Nutrition -spacerini/xsum-search -navaaesarosh/navaaesarosh-saqi_v0 -micole66/photo-chooser -JacobLinCool/captcha-recognizer -pasinic/White-box-Cartoon -sadgaj/3demo -Paulog731/runwayml-stable-diffusion-v1-5 -awacke1/ActingGameMechanicsForSocialIntelligence -nickloughren/Robot-or-Not -wldmr/gradio_default -awacke1/Engineering-Magic-Picture-Dice-Vocabulary-Game -awacke1/Engineering-or-Magic-Q-A-IO -awacke1/Pandas-Gamification-Mechanics -nri1600/AI-bot -Mileena/WebUIDx -zengwj/GPT2-chitchat-training-huggingface -cfj108/prompthero-openjourney -skyxinsun/Gustavosta-MagicPrompt-Stable-Diffusion -glfpes/stabilityai-stable-diffusion-2-1 -Kluuking/google-vit-base -longlh/longlh-agree-disagree-neutral-classifier -b7evc/stabilityai-stable-diffusion-2-1 -Irnkvezz/SIC98-GPT2-python-code-generator -GipAdonimus/openai-jukebox-1b-lyrics -Karumoon/test007 -guopx/Real-CUGAN -lingye/anime-ai-detect -AUST001/Translation -rolisz/sentence_transformers_canonical -samakarov/Lama-Cleaner -tumuyan/vnc -KyanChen/FunSR -AlexMason/anime-remove-background -chrisbodhi/minima -Tinsae/CoWork -blueslmj/anime-remove-background -Haokko/AronaTTS -Rolajim/proyecto -awacke1/Assessment-By-Organs -Rimi98/NegativeCommentClassifier -awacke1/CardGameMechanics -awacke1/SMART-FHIR-Assessment-Blood-Pressure -awacke1/Assessment.SMART.FHIR.Exercise.Panel -achimoraites/Page-Summary -dgottfrid/clipcluster -awacke1/Dice-Roll-Treemap-Plotly -awacke1/SpeechRecognitionwithWav2Vec2 -jman1991/google-flan-t5-xxl -awacke1/VisualLibraryofTop20LibsForDataScienceandAI -awacke1/VizLib-BeautifulSoup -Jhoeel/rfmAutoV3 -JeffTao/anime-remove-background -sohamagarwal00/chatgpt_implementation -svjack/prompt-extend-gpt-chinese -loocake/anime-remove-background -aichina/youtube-whisper-09 -vaibhavarduino/chatGPT-Wrapper -cenji1109285052/img-to-music -Gifted030/movie_reviews_prediction -rolisz/ner_comparation -SuSung-boy/LoRA-DreamBooth-Training-UI -Pennywise881/wiki-chat -awacke1/VizLib-Keras-n-Plotly -king007/OCR-Invoice-LayoutLMv3 -awacke1/VizLib-Mahotas -awacke1/VizLib-Matplotlib -awacke1/VizLib-Numpy -shaun-in-3d/stabilityai-stable-diffusion-2 -sundar7D0/semantic-chat-demo -trysem/TableIMG2-CSV -freddyaboulton/git-large-coco -JUNGU/cartoonizer-demo-onnx-sota -tcvieira/bm25-information-retrieval -awacke1/SMART-FHIR-Assessment-BMI -awacke1/VizLib-Altair -wesliewish/anime-remove-background -joaogabriellima/Real-Time-Voice-Cloning -awacke1/VizLib-PyDeck -LeeroyVonJenkins/construction-safety-object-detection -zhicheng127/Real-CUGAN -Mattdoc99/CollisonGPTChat -systash/hashtag_and_named_entity_generator -kamakepar/sberbank-ai-rugpt3large_based_on_gpt2 -kamakepar/sberbank-ai-rugpt3large -Marioseq/openai-whisper-tiny.en -imabhi/multilingual_image_translator -xxx1/vqa_blip_large -BMukhtar/facemaskDetector -touchscale/DeepDanbooru_string -Haitangtangtangtang/AnimeBackgroundGAN -pierreguillou/bloomz-english -MRroboto/Loacker_app -nonya21/hakurei-lit-6B -Abbasghanbari/Abo -awacke1/SMART-FHIR-Kits-SDC-HL7 -ahmedghani/Inference-Endpoint-Deployment -Pennywise881/wiki-chat-v2 -xiaojidan1314/anime-remove-background -svjack/English-Comet-Atomic -YashGb/HelpMeTalk -sanjaykamath/BLIP2 -Sequence63/anime-ai-detect -Sequence63/Real-CUGAN -FriendlyUser/YoutubeDownloaderSubber -sanchit-gandhi/whisper-language-id -keneonyeachonam/SMART-FHIR-Streamlit-1-022223 -chatFAQs/Gradio -Cristiants/captiongeneration -HARISH246/3D -touchscale/White-box-Cartoonization -awacke1/CardGameActivity -niansong1996/lever-demo -zhongkaifu/mt_jpnkor_chs -awacke1/CardGameActivity-GraphViz -awacke1/CardGameActivity-TwoPlayerAndAI -Qrstud/gpt -Ayaka2022/anime-aesthetic-predict -azapi/img-to-music -zhongkaifu/mt_chs_enu -nivere/Pix2Pix-Video -nivere/ControlNet-Video -sandm/anime-aesthetic-predict -sandm/anime-remove-background1 -sandm/anime-ai-detect -meraGPT/write-with-vcGPT -kingz/nlpconnect-vit-gpt2-image-captioning -lizhongping2713/StableDiffusion-WebUI -liyating/3d -saad-abdullah/knn-for-gdp-to-happiness-predictor -co42/scatterplot_component_main -awacke1/VizLib-TopLargeHospitalsMinnesota -RomanCast/inspect_mlm -xiaoguolizi/anime-ai-detect -awacke1/ClinicalTerminologyNER-Refactored -Sailors/What-National-Park-Should-You-Visit -sieferan2023/Music_Recommendation -christse2026/WinterActivities -hasselhe2023/SoccerPosition2.0 -liudao/andite-anything-v4.0 -Qrstud/ChatGPT-prompt-generator -awacke1/VizLib-GraphViz-SwimLanes-Digraph-ForMLLifecycle -CZ5624/anime-remove-background -rubend18/parafrasis_espanol_t5 -awacke1/VizLib-GraphViz-Folium-MapTopLargeHospitalsinWI -overlordx/starlight -Woodsja2023/Basketball -bspSHU/Onodofthenorth-SD_PixelArt_SpriteSheet_Generator -wldmr/deeppunct-gr -edad/bigscience-bloom -232labs/VToonify -rueckstiess/english-to-mql -sdpetrides/MNIST-Generator -awacke1/VizLib-SVGWrite-Streamlit -artblack01/Pix2Pix-Video -awacke1/StreamlitSharedChatToFiles -MatzeFix/openai-whisper-large-v2 -Duskfallcrew/wd-v1-4-tags -dnth/edgenext-paddy-disease-classifier -fclong/summary -anusurabhi/girl_race_detector -Dalun/andite-anything-v4.0 -alysa/vieTTS -RobertoJ07/IARJ -thelou1s/stabilityai-stable-diffusion-2 -arjun2364/SEBIS-code_trans_t5_large_source_code_summarization_csharp_multitask -jungwoonshin/deepfake_detection_reimplementation -Ilkin/semantic-search-demo-3 -xiaoweigo/White-box-Cartoonization -dsmai/dogorcat -Heber/google-flan-t5-xl -barnga/DL -BigChungux/Pet_Survey -breadlicker45/gpt-youtuben-gen -awacke1/VizLib-TopLargeHospitalsNewJersey -botmaster/generate-mother-2 -TabooAM/What-game-you-should-play -rktraz/art_style_classifier -awacke1/ZeroShotClassifiers-Facebook-bart-large-mnli -dog/expressjs-hello-world -zeno-ml/translation-critique -erinak/test1 -Limuru/DeepDanbooru_string -qq12122211/Real-CUGAN -JONER20/EleutherAI-gpt-neo-1.3B -Lippmann/White-box-Cartoonization -Lippmann/DeepDanbooru_string -davila7/semantic-search -siviltoplumtech/metadata -ysharma/dummy_phtogrd_blocks -CarlDennis/HYTTS -awacke1/CardCrafter-CraftCustomCards -Smiling333/speechbrain-soundchoice-g2p -awacke1/CardEvolution-LevelUpCards -jetwill/IDEA-CCNL-Taiyi-Stable-Diffusion-1B-Chinese-v0.11 -overlordx/elonmusk -rajivmehtapy/knowledgefactoryapi -pashas/openai-whisper-large-v2 -Inia2567/anime-ai-detect -awacke1/CardEvolution-BoardLayout -awacke1/CardEvolution-PlayingBoard -teamtom/RockPaperScissors -awacke1/Image-Recognition-Multiplayer-Chat-Game -Nyari/Super-Resolution-Anime-Diffusion -augmentedimaginationhackathon/paperstocode -wuhao2222/WarriorMama777-OrangeMixs -rbarman/resnet50-example -AIhackrOrg/README -mushroomsolutions/Medical-Image-Classification -Xsciss/hakurei-waifu-diffusion -wangguanlin/vits_Kazari -Duskfallcrew/Animated_Dreams -Duskfallcrew/duskfall-alters-portrait-plus -lolikme/gsdf-Counterfeit-V2.0 -aijack/jojo -snowr3/hakurei-waifu-diffusion -kaizen97/bear-classifier -awacke1/MultiplayerImageRecognition -awacke1/MultiplayerImageRecognition-Gradio -fsqhn/anime-remove-background -king007/google-flan-t5-test -king007/parrot-t5-test -EricKK/gsdf-Counterfeit-V2.5 -saicharantej/article-cortex -Rominn/vits-uma-genshin-honkai -Hasan777/IlluminatiAI-Illuminati_Diffusion_v1.0 -aijack/object -aj-data/AP2223_P1 -koustubhavachat/Ghibli-Diffusion -aijack/seg -Dikshant09/disease-prediction-api -aijack/Track -K3sco/Linaqruf-anything-v3.0 -pendragon107/firstmodel -suryabbrj/ContentModX -Arnasltlt/KlauskKnygos -mushroomsolutions/Gallery -DataForGood/bechdelai-demo -Armored-Atom/DiFuse_Your_Thoughts -Lightxr/sd-diffusers-webui -Kaludi/AI-Assistant-revChatGPT_App -Night-Ling/anime-remove-background -joe-aquino/keras_pretty_face -Spyhack225/second-brain -nihalbaig/BD-Vehicle-Detection -TWV87/LDA_Vis -smy503/EfficientNet -usamakenway/Stable-diffusion-prompt-generator-1m-examples -Kytrascript/lambdalabs-sd-pokemon-diffusers -ashuonnet/skillrecommender -naqibhakimi/sk -ntcwai/prompt-engine -nateraw/real-esrgan -k0ntra/WHISPER_FA -Nepmods/kawaiiAI -racdroid/Salesforce-blip-image-captioning-base -yinting/Salesforce-codegen-16B-mono -xt0r3/AI-Hype-Monitor -cropdusting/starcraft2-races -vtk51/Lama-Cleaner-lama -lianglv/microsoft-resnet-50 -hg2001/age-classifier -jingxiangmo/Azza -Duskfallcrew/Gambit_and_Rogue -awacke1/Github-Streamlit -bergum/commerce-demo -jvcanavarro/emotion-recognition -Armored-Atom/gpt2 -deborabmfreitas/churn-prediction-deploy -bruvvy/nitrosocke-Nitro-Diffusion -jackli888/stable-diffusion-webui -srikanthsrnvs/togethercomputer-GPT-JT-6B-v1 -abidismail/22h-vintedois-diffusion-v0-1 -xxx1/VQA_CAP_GPT -pranavbup/Commercial-aircraft-classification -elun15/image-regression -FebryanS/Wakaranai -TheresaQWQ/timpal0l-mdeberta-v3-base-squad2 -mayuri120/anime-remove-background -Lanerdog/22h-vintedois-diffusion-v0-1 -michellehbn/I-Love-HuggingFace -MiguelVGP/bearclassifier -lraqi/alii -ad2/youtube-whisper -wendys-llc/roboflow2huggingface -andrew3279/Bloom_test -awacke1/Named-entity-resolution -awacke1/Text-summarization -awacke1/Question-answering -awacke1/Text-classification -awacke1/Text-generation -synissalty/andite-anything-v4.0 -Metatron/IlluminatiAI-Illuminati_Diffusion_v1.0 -hanhanbeea/anime-aesthetic-predict -raphael0202/category-classifier -CanIpleas/gpt2 -shuvojitkoley007/mrs-shuvojit-koley -Emmawang/audio_summarizer -futureagi/CheckGPT -kiin/andite-anything-v4.0 -awacke1/Data-Augmentation -BigChungux/Pet_Survey2 -Sardor-Odil/StableDiffusion -takanabe/space-demo-andite-anything-v4.0 -rwcuffney/PlayingCardPrediction -tykimos/TarotGPT -awacke1/Domain-Transfer-Learning-Pandas-Profiling -elkraken/Video-Object-Detection -jpoptum/Daredevil-Text_generation -hhalim/streamlit_bed_hospital -cakiki/bokeh_plots -DarwinAnim8or/NoSleep-Story-Generator -GranataDizzyDive/dizzydive -ifrit98/terenceGPT -qianwj/yehiaserag-anime-pencil-diffusion -portal/Top-20 -notsq/diffuse-the-rest -charanhu/GPT-J-6B -1toTree/lora_test -Datasculptor/OpenAI-Chatbot_App -17TheWord/vits-models -LiuZiyi/2-image-img2sketch-opencv -YotamNitzan/domain-expansion -CognitiveAIForHealth/README -dog/fastapi-document-qa -hamidr-bd1/v3 -lwdragon/token_classfication -zhc134/chatgpt-streamlit -awacke1/Streamlit-Data-Synthesis-Example -almn-uhc/Streamlit-Data-Synthesis-Example -awacke1/Examples-Of-AI-0302 -Biswa13/Examples-Of-AI-2023 -almn-uhc/Examples-of-AI -richardyoung/Examples-of-AI-2023 -asistaoptum/examples-AI-020323 -light22/Real-CUGAN -awacke1/d4data-biomedical-ner-all-0302 -almn-uhc/Sentiment-Analysis-Streamlit -BasToTheMax/22h-vintedois-diffusion-v0-1 -FEIMENG/andite-anything-v4.0 -Biliovo/anime-remove-background -jatinbittu13/selfie-nonselfie -ahsansbaig/instructor_dashboard -mariosmsk/epyt-viewer -gtx4010661/dandelin-vilt-b32-finetuned-vqa -najimino/aicv -welp234rt/rabiawerqayyum-autotrain-mental-health-analysis-752423172 -wuxi/Real-CUGAN -pripishchik/clip-image -Gallifraid/prompthero-openjourney-v2 -awacke1/Assessment.Health.Conditions.By.Cost -awacke1/Games-In-Python -kumahiyo/line-bot-stable-diffusion -Hurtle/DeepDanbooru_string -rinsora/White-box-Cartoonization -mabrotha/ChatGPT-prompt-generator -Swindu/ProsusAI-finbert -ivanmeyer/Finetuned_Diffusion_Max -Shuhul/New_Flix -NPU/hallucination_in_image_captioning_demo -Neo-Salvatore/GPTBase -ivanmeyer/dreamlike-photoreal-2.0 -yoimiya/White-box-Cartoonization -xxx1/chatgpt -apsys/hetfit -Hanseul/Salesforce-codegen-6B-multi -hololee/dreambooth-training -pysunny/gradio-pysunny -Quake24/thepainter -awacke1/HTML5-ThreeJS -awacke1/HTML5-ThreeJS-3D -awacke1/HTML5-BabylonJS-Javascript-LSystems -awacke1/HTML5-DNA-Sequence -awacke1/HTML5-Aframe-Lsystems -awacke1/HTML5-Aframe-3D-Maps -awacke1/HTML5-3D-Map-Hospitals -morinop/BetterSelfie -awacke1/Feature-Extraction-microsoft-codebert-base -awacke1/Image-to-Text-nlpconnect-vit-gpt2-image-captioning -awacke1/Token-Classification-NER-dslim-bert-base-NER -awacke1/Zero-Shot-Classification-valhalla-distilbart-mnli-12-1 -awacke1/Zero-shot-classification-facebook-bart-large-mnli -MiguelVGP/redfruits -pedrogengo/style_loss_showdown -awacke1/GPU-Memory-Detector -awacke1/GPU-Memory-Detector-HTML5 -faisalhr1997/Salesforce-blip2-opt-2.7b -silvaKenpachi/bearClassifierInference -hectorjelly/SoccerTwos-Challenge-Analytics-Extra -fernfromecuador/dallinmackay-Tron-Legacy-diffusion -awacke1/HealthConditionsTest -Kevin676/TalktoAI -zetabyte/stable-diffusion -GeekTony/Examples-Of-AI -awacke1/Health-Care-AI-and-Datasets -GeekTony/Gradio-Ontology -Whatcoldwind/csgo_investment -nikhil567/Turkey-Syria-Earthquake -slumgods/chatgpt-slumgods -LeeroyVonJenkins/OCR-Invoice-LayoutLMv3 -awacke1/DnD-Character-Sheet2 -xu1998hz/sescore_english_mt -xu1998hz/sescore_german_mt -xu1998hz/sescore_english_coco -xu1998hz/sescore_english_webnlg -Soybean01/White-box-Cartoonization -wangrongsheng/ChatGPT -Soybean01/anime-ai-detect -rmazarei/mann-e-mann-e_4_rev-1-3 -blessingmwiti/openai -pro15671/anime-remove-background -kaguraaya/anime-remove-background -qiuyue1/White-box-Cartoonization -awacke1/GPU-Memory-Detector-Aframe -Dai1123/CalqChat -Xhaheen/Hyper_Bot_ben -Langame/explorer -samusander/Snore.Ai -Bigshot/RSA-v0.1.2 -Vegecken/sovits4dzl -Ebo010/hot-dog -xiaoguaiguai/playground2 -shibinashraf36/drugrecommendationsystem -Nadaal/dost5 -Nadaal/chatgpt-demo -drdata/kohbanye-pixel-art-style -helenai/openvino-stable-diffusion -ahiruguagua/aiemo -cscan/demucs -James1208/Salesforce-codegen-350M-mono -pkiage/time_series_autocorrelation_demo -pkiage/time_series_decomposition_demo -KevlarVK/content_summarizer -YuanMio/vits-uma-genshin-honkai -Snb-ai/vuia -Tiju1996/resume-parser -awacke1/Self-Modifying-Graph-Visualization -differentai/infinite-memory-chatgpt -awacke1/Health-Condition-Actions-For-Health-and-Savings -pytholic/streamlit-image-classification-demo -awacke1/Spending-Simulation -michaelgartner/CompVis-stable-diffusion-v1-4 -sigit/permadi -pkiage/credit_risk_modeling_demo -D008/space-from-a-model -thomasjeon/stabilityai-stable-diffusion-2-1 -veb-101/driver-drowsiness-detection -AGITM/ToneCorrectionRecognition -owenchak/testgenerator -EricLam/yamatohome -Paco1112/Super-writing-tool -RonHoHo/Ronhohohhohoho05 -Wootang02/textgenerator -tomdeng/textgenerator -NicholasKwok/textgenerator -tomcheng/textgeneration -Felix0810/textgenerator -Averyng/averyng -anumkn/Anuradha -221091lstwcm/textgenerator -221090Lstwcm/textgenerator -Christyyu/textgenerator -yuszeying/textgenerator -generalHolmogorets/README -LittleYuan/My-Real-Bot -smallyu/img-to-music -Gato582/runwayml-stable-diffusion-v1-5 -TornikeO/dis-background-removal -awacke1/Games-Phaser-3-HTML5 -king007/remove-background -bluesky314/LangChain_gpt_indexBot -Mohit-321/WhatsappchatAnalyzer -GLTdd/ChatgptBot -noofa/wowsers -sessex/CLIPSeg2 -InsertUserHere9999/MGX-Midjourney-v4 -hhalim/google-flan-t5-large-test -FKBaffour/Expresso_Customer_Churn_Prediction -HuskyTho/EleutherAI-gpt-neo-1.3B -awacke1/Text-to-Speech-facebook-fastspeech2-en-ljspeech -awacke1/ASR-openai-whisper-base -awacke1/ASR-openai-whisper-large -awacke1/Audio-Sentiment-harshit345-xlsr-wav2vec-speech-emotion-recognition -awacke1/Audio-Sentiment-superb-hubert-base-superb-er -awacke1/CodeGen-Salesforce-codegen-350M-mono -BeeMon/dreambooth-training -qym/ChatGPT-prompt-generator -ishanam/xray-classification -Toor1989/Toor1989 -ReFenter/DeepDanbooru_string -hero-intelligent/MT3 -jmyungjoon/cartoon -tyao/CompVis-stable-diffusion-v1-4 -xp3857/ph-oj-2 -kriss-ml/Boston-House-Price -awacke1/Survey-Assess-Plan-UI -Namit2111/ChatGpt_Detector -xp3857/ds-pg-5-8 -Ridzuan/random_name_selector -thiagolira/ChatMaquiavel -dreamreyansan/hakurei-waifu-diffusion -Soumahara/hakurei-waifu-diffusion -smakubi/flowers -mushroomsolutions/Image_Annotation -awacke1/GPU-RTX-Nvidia-Nsight-Starter-AI-Kit -BL00DY-257/dolle-mini-lol -thelou1s/chatgpt-demo -w0rd-driven/livebook -awacke1/Top-Ten-Board-Games-Map-Making-Strategy -radames/hello-huggingface.js -Kimata/Sanskrit-TTS -Songj/DotaHeroClassifier -tribe-ai/document-qa-comparator -Alashazam/Harmony -gaurxvreddy/Xtinguish -younker/chatgpt-turbo -LanQian/ChatGPT -pragmaticslab/bary_score -pragmaticslab/depth_score -adwod/Streamlite_ViT_2000 -hugging-fellows/img-to-music -Boilin/URetinex-Net -breadlicker45/story-gen -KaguraNana/XiaokunChatGPT -cristalcorp/CompVis-stable-diffusion-v1-4 -EveryPizza/stabilityai-stable-diffusion-2 -Shakeb100/GroomingGenie_AI -suryabbrj/vit-gpt-caption-model-CMX -shidokan/ai.Life -Savethecats/README -bedrock123/andite-anything-v4.0 -abrar-adnan/speech-analyzer -AONYLMR/anime-ai-detect -AONYLMR/anime-remove-background -AONYLMR/White-box-Cartoonization -chasetank/manual_assistant -Neo-Salvatore/translate-locale -RamV/ChatRobo -eeshawn11/naruto_hand_seals -ChillyFaze/runwayml-stable-diffusion-v1-5 -jpjpjpjpjp/HylandDocumentVisualQA -de3sec/Image-Upscaling-Playground -hojumoney/WarriorMama777-OrangeMixs -fffiloni/live-ml5-handpose-p5js -awacke1/HTML5-AFrame-VR -ceckenrode/HTML5-Aframe-3D-Maps -Cboudreau/AI_ZeroToHero -awacke1/VizLib-TopLargeHospitalsNewJersey-03-09-2023 -ankushsethi02/VizLib-TopLargeHospitalsNewJersey-03-09-2023 -simplomatic/ChatGPT-prompt-generator -freshield/ChatGPT-gradio -sgvkamalakar/Water_Potability_Prediciton_app -XingHe0127/Chatbot -rcajegas/HTML5-Aframe-3DMAP-FLIGHT -ygtrfed/pp-web-ui -imju/flower_detector -ceckenrode/Cognitive-AI-Episodic-Semantic-Memory-Demo -awacke1/sileod-deberta-v3-base-tasksource-nli-2 -rcajegas/WHO_1 -leesooleon/xiaolxl-GuoFeng3 -PeepDaSlan9/andite-anything-v4.0-b2b -fariyan/image-to-text -shed219/ChuanhuChatGPT -dgongor/WhisperDemo -Otega99/minima -omarelsayeed/test -willianmcs/visual-chatgpt -lost123/DeepDanbooru_string -domro11/data_dynamos -suancaixianyu/Real-CUGAN -chriscelaya/merve-chatgpt-prompts-bart-long -3druga/ae-6 -bortle/astrophotography-object-classifier -davila7/llm-vs-llm -Feraxin/chatGPT -gradio/code_main -omarelsayeed/A7ades-Similarity-Quran-v2 -awacke1/HL-V2.x-Transformer-Parser -eele0011/Nlp -awacke1/Clinical-Terminology-FHIR-Assessment -jacob-petterle/cloudtop-deployer -productizationlabs/MyChatGPTDavinci -stunner007/old-car-price-predictor -awacke1/Gamification-Grabble -dccif/Real-CUGAN -xiaoti/Real-CUGAN -awacke1/Gamification-AI-Boggle -kanden/vits-uma-genshin-honkai -zhuce/vits -WZUN666/vits-uma-genshin-honkai -Kaludi/Virtual-AI-Career-Coach_App -Rimi98/Reptile-Museum -klcqy/anime-ai-detect -klcqy/DeepDanbooru_string -liuyuchen777/DanDanGPT -cat630/ChuanhuChatGPT -xiaorong/fork2-so-vits -AI-Edify/demo-gpt3.5-turbo -Surendra/chatbot -AntiUser/DeepDanbooru_string -king007/anime-anything-promptgen-v2-test -snjyor/ChatGPT_demo -awacke1/Gamification-Word-Search -QinQiuFox/get_ppt -janewu/hualao -awacke1/Torch-Git-Markdown-NLP -zee2221/Hyper_Bot -Jeffreylex/bigscience-bloom -awacke1/Streamlit-ALBERT-Transformers-Sequence-Classify-Visualize -awacke1/Joke-Book-No-Pun-Intended -farandclose/AudioChatGPT -awacke1/Word-Search-AI-To-Teach-AI -Hermit591/anime-remove-background -awacke1/Twitter-Sentiment-Live-Realtime -awacke1/Finite-State-Machine-Demo -awacke1/3d-Breakout-Game-Three.JS -awacke1/Three.JS-TheCube-Game -awacke1/HTML5-Tower-Building-3D-Game -SI2252/README -ParisNeo/Blip_QA -leftcoastkidd/runwayml-stable-diffusion-v1-5 -qtp/README -Dao3/openai-translator -Stereo0001/MagicPrompt-Stable-Diffusion -productizationlabs/ContentModeration -LHL3341/Hand-Write-Number-Recognization -JohnTan38/GODEL-v1_1-large-seq2seq -CoffeeBrewer/CompVis-stable-diffusion-v1-4 -AdVisual/MaskCut -de3sec/rembg_remove_bg -de3sec/Front-end-code-generation-from-images -ai-art/magic-diffusion-generator -khan994/sketch -roxas010394/parts-of-cars -Xlinelabs/togethercomputer-GPT-NeoXT-Chat-Base-20B -awacke1/Bird-Species-Migration-Month-Map -charbaaz356/Chat-GPT-LangChain-R -hersia/V_Admin_Bot -wilbertpariguana/Demo-Bot -xxx1/zh-clip -AndrewMetaBlock/emilyalsentzer-Bio_ClinicalBERT -qqqwt/chatgptpaper -yunyunyun/DGSpitzer-Cyberpunk-Anime-Diffusion -bookbot/SpeechLine -PhotoPranab/Joeythemonster-anything-midjourney-v-4-1 -misteca/ChatGPT -xp3857/aa-pr-2 -ricezilla/video_tampering_detection -WMisingo/license-plate-number-recognition-app -PKaushik/HumanCounter -DinoPiteko/youtube-whisper-04 -Dineshkumars/Text-Summarization -gradio/chatbot_dialogpt_main -weanalyze/stock_predictor -keneonyeachonam/FHIR-Streamlit-ChatGPT-031323 -AhmedKhairullah/dmo -weanalyze/twitter_scraper -jslin09/legal_document_drafting -AnandSoni2001/StockMarket -Sloth-Alchemist/Test.xyz -productizationlabs/IBCFProductRecommendations -owsgfwnlgjuz/bsrgan -villageideate/TrenBot -Jamphus/G -awacke1/Pandas-Profiling-CSV-XLSX-XLS -awacke1/Embedding-Iframe-HTML5-to-Gradio -awacke1/Media-Pipe-Facial-Mesh-Matching-3D -gradio/chatbot_simple -gradio/gallery_selections -awacke1/AI.Dashboard.Wiki.Chat.Cognitive.HTML5 -AI-Dashboards/README -shivangibithel/Text2ImageRetrieval -longht/vietnamese-disfluency-detection -CobaltZvc/sherlocks_pheonix -qwerrsc/vits-uma-genshin-honkai -vibhorvats/Joeythemonster-anything-midjourney-v-4-1 -rabiyulfahim/text-to-image -victor/victor-autotrain-satellite-image-classification-40975105875 -sagu7/sagu7-dating-avatar-model -rabiyulfahim/dalle-mini -Ordenador/classify-text-with-bert-hate-speech -prabhu46/registerandlogin -awacke1/AI.Dashboard.Gradio.Streamlit.HTML5 -furqankassa/AI-Dashboard-0134 -keneonyeachonam/MermaidModelHTML5Demo-031423 -keneonyeachonam/AI-Dashboard-031423 -lpnguyen/calculator -Vorkrath/CarperAI-diff-codegen-6b-v2 -nikitothkakad/runwayml-stable-diffusion-v1-5 -Dochee/Chatbot_Dialog_Bot -Neomyst/gertrude-model -Daniton/streaming_chat_with_gpt-3.5-turbo_using_langchain_sorta1234 -Gilvan/XRaySwinGen -akalin/DeepDanbooru_string -sudhir2016/Emotion -Rami/validate_chat_utd -awacke1/HEDIS.Roster.Dash.Component.Service -awacke1/HEDIS.Roster.Dash.Component.SDOH -awacke1/HEDIS.Dash.Component.Top.Clinical.Terminology.Vocabulary -henryz/streaming_chat_with_gpt-3.5-turbo_using_langchain_sorta -vinic1999/foodvisionbig -kajalag/Whatsapp_Chat_Analyzer -smruthi49/makeup -AriaMei/TTSdemo -ceckenrode/AI.Dashboard.HEDIS.Terminology.Vocabulary.Codes -AI-Dashboards/HEDIS.Assessment.PHQ9.GADD7.SDoH -keyu-tian/SparK -protoxx91/webui-docker -awacke1/Assessments.Clinical.Terminology.FHIR.PHQ.GAD.SDOH -ashishtanwer/RAD -protoxx91/stable-diffusion-webui-controlnet-docker -kingabzpro/Loan_Classifier -lpnguyen/continuous-discrete-time -aryan1107/ChatGPT-prompt-generator -awacke1/visual_chatgpt -awacke1/chatgpt-demo -awacke1/chatGPT -zihanch/zihan -kahnchana/clippy -yuan1615/EmpathyVC -PeepDaSlan9/EleutherAI-gpt-j-6B-B2BMGMT -najimino/pdf2gpt -chjun/movie_rating_bot -aodianyun/ChatGLM-6B -2gauravc/search_summary_chatgpt -yuyuyu-skst/White-box-Cartoonization -sepal/MeetingTranscriber -asafAdge/Detic -awacke1/Model-Easy-Button1-ZeroShotImageClassifier-Openai-clip-vit-large-patch14 -awacke1/Easy-Button-Zero-Shot-Text-Classifier-facebook-bart-large-mnli -chenglu/chenglu-my_awesome_model -jamatas/anime-ai-detect -Daniton/Midjourney-Disney -awacke1/EasyButton-openai-clip-vit-large-patch14 -JerEpoch/Button-openai-clip-vit-large-patch14 -srikotha/facebook-bart-large-mnli -ceckenrode/Easy-Button-Zero-Shot-Text-Classifier-facebook-bart-large-mnli -srikotha/bigscience-bloom -ceckenrode/runwayml-stable-diffusion-v1-5 -ceckenrode/bigscience-bloom -awacke1/EasyButton-runwayml-stable-diffusion-v1-5 -JSanchez79/js-test-facebook-bart-large-mnli -srikotha/runwayml-stable-diffusion-v1-5 -Dao3/DaJuZi_OrangeCatTheGreat -snjyor/You_Say_I_Draw -ceckenrode/AI-Dashboard-Zero-Shot-Text-Image-Models -Gradio-Themes/README -ywqisok/ysyy -Libra7578/Promt-to-Image-diffusions -donalda/Gustavosta-MagicPrompt-Stable-Diffusion -hslu-di/Reust_Yannic -zcxhcrjvkbnpnm/gpt4-demo -thiagolira/ChatPequenoPrincipe -naveed92/topic_segmentation -awacke1/RealTime-MediaPipe-AI-From-Video-On-Any-Device -Duskfallcrew/flowers-2-1-768 -rholtwo/Easy-Button-Zero-Shot-Text-Classifier-facebook-bart-large-mnli -rholtwo/Easy_button_runwayml-stable-diffusion-v1-5 -LanQian/ChatChuanHu -falcondai/code-as-policies -DemoLou/moe-tts -baby123/sd -C6AI/HDRL -mixshare/hackathon_chatbot_openai_api -Betacuckgpt/togethercomputer-GPT-JT-Moderation-6B -Dao3/SuperChatGPT -Rifd/Gxtaucok -Saturdays/CardioSight_dup -golda/gagal-jantung-2023 -apsys/normflows -3i2irg/SF-model -hkanumilli/DigitClassifier -dma123/gpt-js -AymanKUMA/Speech-Bubbles-detector -Basit12345/basit123 -Shrey-Patel/Image-Searcher -tddschn/yaml-parser -asalhi85/ArabiToolsDialecRecognition -parsi-ai-nlpclass/F22-Adversarial-QA -pengtony/hackathon_chatbot_openai_api -darkknightxi/mangoes -Ayakasuki/anime-ai-detect -ulysses115/PP-OCRv3-ch2 -akuysal/demo-app-streamlit -akuysal/demo-app-gradio -Dao3/MBTI_Test -cxm1207/ChatBOT -awacke1/Northern.Lights.Map.Streamlit.Folium -GorroRojo/nitrosocke-Ghibli-Diffusion -awacke1/AI.Dashboard.Maps -Raghvender/VideoCaptionWhisper -akuysal/SMS-spam-Turkish-sklearn -muratcelik/Image_Inpainting_w_context-encoder -akuysal/SMS-spam-English-sklearn -daikooo/DialoGPT-finetune-mental-health-chatbot -zoeozone/mrm8488-Alpacoom -helliun/beism -mustapha/chatAlpaca -chasetank/Visual-GPT-3.5-Turbo -ypchang/Variance_Reduction-European_call_option-volatility -awacke1/MN.Map.Hospitals.Top.Five -ypchang/Variance_Reduction-European_call_option-volatility_K-3D -awacke1/NVidiaRaytraceMirrorAframeThreeJS -cc1799/vits-uma-genshin-honkai -awacke1/NVidia.Raytrace.Mirror.HTML5.ThreeJS -onglaoxiteen/LoRa -Cherrycreamco/webui -awacke1/Thor.Odin.Baldur.Sleipnir.Myths -alalalyuqing/White-box-Cartoonization -nikhil5678/turkey-syria-earthquake-tweets -BENE2007/runwayml-stable-diffusion-v1-5 -nikolaiii/CompVis-stable-diffusion-v1-4 -karynaur/mnist-cloned -Vgi/nu-dialogue-sfc2022-stable-diffusion -tj5miniop/distilgpt2 -Sortoite/Simple-OpenAI-Chatbot -amoldwalunj/image_to_text -jimschat/VITS-Umamusume-voice-synthesizer -A1draw-12196y/DeepDanbooru_string -A1draw-12196y/anime-ai-detect -luncnymph/ChatGPT4 -OdinStef/Chatapp -dwolfe66/text-generation-webui-space -Sortoite/pdfGPT -amj/Voice-Cloning -Jimpa666/AI-PadelCoach -kedarnathdev/AQIprediction -Jishnnu/Emotion-Detection -Soumahara/Falah-iraqi-cafes -MuhammedAyman29/Fruits -abhishek-kumar/ChatGPT4 -NJCIT-Nie/README -masbejo99/modelscope-text-to-video-synthesis -saga24/nitrozen-gpt -szzzzz/sentiment_classification -PirateXX/ChatGPT-Detector -ap66/Real-CUGAN -songwy/VITS-Umamusume-voice-synthesizer -biingshanak/vits-uma-genshin-honkai -sysf/textspeech -actboy/ChatGLM-6B -UncleX/CompVis-stable-diffusion-v1-4 -sdpkjc/ChatPaper -pasha006/Environment -mordechaih/theintuitiveye-HARDblend -Datasculptor/StyleGAN-NADA -jefftko/DreamShaper-webui -AI-Dashboards/AI.Dashboard.Streamlit.Index.For.Assessments -radames/SPIGA-face-alignment-headpose-estimator -falconpranav/testgpt -Vgi/darkstorm2150-Protogen_x3.4_Official_Release -radames/Gradio-demo-video-image-webcam-upload -awacke1/Topic-Wizard-SKlearn -cloudqi/CQI_Fala_para_Texto_PT_V0 -awacke1/Streamlit.Data.Editor -360macky/first-space -rajistics/call-sentiment-demo2 -ruangguru/rg-ds-chatbot-gradio -Fakermiya/Nsfw-Sfw_Classifier -CC123123/blip2_t -mvnhat/gpt-qa-demo -fsqhn/anime-remove-background2 -DeepLearning101/Speech-Quality-Inspection_Meta-Denoiser -felicco/andite-pastel-mix -art3mis011/plantdiseasedetection -qipchip/facebook-blenderbot-3B -qipchip/allenai-cosmo-xl -erbanku/stabilityai-stable-diffusion-2-1 -bradley6597/gdrive-illustration-search -Samuelcr8/EVA -Samuelcr8/Chatbot -qingdiziqing/anime-remove-background -keneonyeachonam/NLPGraphOMSandLOCUS-032123 -ILyaz03/My_Personal_Teacher -kingli999/riffusion-riffusion-model-v12 -studentofplato/Onodofthenorth-SD_PixelArt_SpriteSheet_Generator -itacaiunas/Ghibli-Diffusion -cjwzfczr12398/DeepDanbooru_string -Notmodern/hakurei-waifu-diffusion -rajistics/h2o_wave_transformers -awacke1/Markdown-Analyzer -ryansilk/quantycs -awacke1/StreamlitDotEdgeGraphViz-Images-SVG -everythingfades/Math-Stats-AP -supun9/face-verification -srepalli3/Demo01_GC_Content -ChristopherMarais/Andrew_Alpha -ShaunWithGPT/ChuanhuChatGPT -Nahidabyer/img-to-music -awacke1/Streamlit.GraphViz.Dynamic.Architecture.Diagram -raghu8096/PDF-QA -oshita-n/ControlNet -Jimmie/Urban8K-mini -suhailidrees/dogs_cats -Linkthat/IntentClassification -lewisrxliu/1 -ADUPA/README -Write2Learn/Transcriber -preechanon/Cutto -pythainlp/pythainlp-thainer-corpus-v2-base-model -fizban/simiandb -Ridwanz/sdrv1_4 -hannanrozal/stable-diffusion-image-variations -Robooze/transcription_loud -AI-ZTH-03-23/README -svummidi/pulseDemo -awacke1/Streamlit-Azure-IDR-Diagram -divilis/chatgpt -wanghaha13/ChuanhuChatGPT -lexi1343/Hi -annchen2010/ChatGPT -weishao2019/ChuanhuChatGPT -stchakman/Fridge2Dish -jarvis1997/fr_demo1 -Bakuman/Real-CUGAN -Detomo/Aisatsu-robot -maminghui/ChatGPT -geniius/ogkalu-Comic-Diffusion -SmonF/Dialogue_summarizer -czovoa/cbbb -AI-ZTH-03-23/3.HTML5-Aframe-3dMap-Flight -AI-ZTH-03-23/4.RealTime-MediaPipe-AI-From-Video-On-Any-Device -AI-ZTH-03-23/5.StreamlitWikipediaChat -AI-ZTH-03-23/8.Datasets-NER-Biomed-ClinicalTerms -BillBojangeles2000/WikiGPT -wz758727829/ChuanhuChatGPT -IAMTFRMZA/image-recognition-demo -xiaohuolong/ChuanhuChatGPT -MichaelT8093/AnimeGANv3 -awacke1/1.ChatGPT-HuggingFace-Spaces-NLP-Transformers-Pipeline -merler/1.ChatGPT-HuggingFace-Spaces-NLP-Transformers-Pipeline -AISloth/1.ChatGPT-HuggingFace-Spaces-NLP-Transformers-Pipeline -JohnC26/ChatGPTwithAPI -JohnC26/2.Streamlit.GraphViz.Dynamic.Architecture.Diagram -awacke1/HTML5-Dashboard -JennBiggs/HTML5-Dashboard -Anar0140/4.RealTime-MediaPipe-AI-From-Video-On-Any-Device -JohnC26/AI.Dashboard.Wiki.Chat.Cognitive.HTML5 -Anar0140/6.AI.Dashboard.Wiki.Chat.Cognitive.HTML5 -JohnC26/AI.Dashboard.Gradio.Streamlit.HTML5 -JohnC26/7-NER-Biomed-ClinicalTerms -JohnC26/MN.Map.Hospitals.Top.Five -JohnC26/StreamlitWikipediaChat -JohnC26/Gradio-Maps-Latitude-Longitude -Nattiman/chatsummarizercapstoneproject -sophatvathana/my-research-llama-65b-hf -williamzhou2023/GPT2 -vonewman/demo-app-streamlit -Kaludi/QR-Code-Generator-Streamlit_App -ejbejaranos/spanishAlpaca -ejbejaranos/somos-alpaca-es -NoamSiegel/gpt-workouts -lc202301/ChuanhuChatGPT -posak/Tune-A-Video-Training-UI -mzltest/gpt2-chinese-composition -charles0519/ChuanhuChatGPT -hahahehe99340/chatgpt -heine123/heine123-promotion1 -andreslu/orion -OttoYu/Tree-ConditionHK -Nithesh-101/Satellite_Image_Segmentation -YuDou/ChuanhuChatGPT -yiguid/ChatGPT -Datasculptor/LoRA-DreamBooth-Training-UI -d8aai/image-search -konstantinG/text2image -awacke1/InContextLearning-PromptTargeting -rajaatif786/VirBert2 -vivsmouret/Dipl0-pepe-diffuser -senquan/ChuanhuChatGPT -neveu/img-to-music -fiyen/YangyangChatGPT -suqionglin/White-box-Cartoonization -suqionglin/anime-ai-detect -kaushikdatta/new-car-inventory -CAPTY222/runwayml-stable-diffusion-v1-5 -roveliu/ChatGPT4 -MohammedMaaz/PDF-TEXT-BASED-QA -cstorm125/foodydudy_for_lesson1 -awacke1/TopTenAIGeneratedSolutionsAnyoneCouldUse -awacke1/StreamlitSolution-To-Your-Problem-Generator -awacke1/Tank.Moves.Tank.Fires.Tank.AvoidsObstacles.Combat -Detomo/aisatsu-api -straka/poison_ivy -fuxin123zz/ChuanhuChatGPT -rishabh062/donutCordImgToCsv -Sunil90/ChatGPT4 -thelou1s/ehcalabres-wav2vec2-lg-xlsr-en-speech-emotion-recognition -Dryash/ChatGPT4 -pchuri/image2text -monisazeem/ChatGPT4 -Gasi/White-box-Cartoonization -vedet9/ipl -Detomo/aisatsu-app-api -iqsoft/README -mikaelbhai/GPTBhai_TextToImage -smfry010/text-to-image -SilenWang/ReviewGPT -Ajaymaurya1008/meme-identifier -awacke1/Wikipedia.Chat.Multiplayer -awacke1/Streamlit.ChatWikiwriter.Multiplayer -Eroggen/ChatGPT4 -heiyuan/ChatGPT -frankio/goatheadrecordschatbot -MBA98/DiabeticRetinopathyDetection -Zwicky18/vits-models -Crow34/Comicdraw -awacke1/Word.Search.Experiments -grvgl/ChatGPT4 -Sohag1/Handwritten-text-Recognition-Using-TrOCR -cc00/THUDM-chatglm-6b-int4-qe -laxmikant/ChatGPT4 -joeli88/astrologer -goliathaiconsulting/ecommerce-platform -9752isme/ChatGPT4 -siddh4rth/narrify -Vipul-Chauhan/20newsgroup_QA -cc1234/stashtag -awacke1/GLB.Loader.HTML5 -Aloento/9Nine-VITS -lychees/Stable-Diffusion-ControlNet-WebUI -Aniquel/WizApp -pwilczewski/banking_crisis_dashboard -Kai-GL/ChatGPT4 -Vipitis/ShaderEval -DrBenjamin/AI_Demo -deadash/BelleGroup-BELLE-7B-gptq -manjuvallayil/video_text -pxovela/ball-classifier -Darkk88/medium-GPT4 -liushilei/hackathon_chatbot_baixing_api -meraGPT/chat-with-myGPT -pinots/ChatGPT4 -tracinginsights/F1_API -Aloento/9Nine-PITS -Benebene/Chat-question-answering -Abrish-Aadi/Chest-Xray-anomaly-detection -cchuang2009/News-Forum -NeuralInternet/Alpaca-LoRA-Serve -gunti/ChatGPT4 -JoshuaWS3/hakurei-waifu-diffusion -Rimi98/Relax-Teacher -facat/alpaca-lora-cn -RGBD-SOD/depth2rgb-dpt -AlexReverie/ImageSonification -jmartinezot/find_plane_pointcloud -mserras/somos-alpaca-es -gngpostalsrvc/COHeN_demo -iceburg/ChatGPT4 -awacke1/RLHF.Evals -Highway/infrastructure-cost-data-classifier -dcsjsuml/README -awacke1/RLHF.Reinforce.Learn.With.Human.Feedback -peterpull/MediatorBot -gaochangyun/bert-base-chinese -coolprakashjj/Bradley-Siderograph-Public -xiaoxicc/susu -zzz666/ChuanhuChatGPT -Wayben/ChatGPT -chenxx/ChuanhuChatGPT -shideqin/test -thelou1s/TensorflowHubSpice -tracinginsights/api -TSjB/QM_RU_translator -HMinions/new-Bing-with_your_cookies -startway/whisper -akhilkalwakurthy/AxisGPTv3 -manhngolibo/manhngo -MajinBog/ItsJayQz-GTA5_Artwork_Diffusion -muchuam/anime-remove-background -ayaanzaveri/whisper-webui -Snb-ai/gpt2 -wxiaofei/vits-uma-genshin-honkai -rachittshah/doc-qa -achimoraites/TextClassification-roberta-base_ag_news -Pepsr/Chatbot -xxie92/proteinml-demo-dssp-duplicate -zivpollak/EyeCareXV002 -asbeabi/PoCs -AHzizi/WaifuVoiceGen -ianlianetlai/talk -bedrock123/nlp-vit-gpt2-image-captioning -jinmao/2 -BHD/google-pix2struct-screen2words-base -ruanchaves/portuguese-question-answering -awacke1/RLHF.Evals.Intake.Upvote.Downvote -molinsp/codegen_exploration -JohnTan38/calculator -Fcou/ChatGPT3.5 -Snowling/White-box-Cartoonization -jarvisbot/ChatImprovement -zekewilliams/video -niv-al/peshperima -abidlabs/Lime -jroust/prompthero-openjourney -starlit7/USPoliticsTTS -aliabid94/new-theme -radames/face-landmarks-gradio -geeek/text-moderation-score -Mikan1103/anime-remove-background -tanish2502/ChatGPT-AI-Assistant-App -baruga/gpt4-sandbox -Cosmo-Hug/Cosmo-Hug-FeverDream -Fazen/ask-youtube -awacke1/Markdown.Streamlit.Teaching.Colleges -deedax/TLDR-the-TnC -ParisNeo/MBart50Translator -awacke1/Team.Click.Battle.Multiplayer -jdczlx/ChatGPT-chuanhu -xhd456/anime-remove-background -Mochine/hackathon_chatbot_openai_api -Shularp/marian_translation_test_th_ar_en -caojiachen1/ChatGPT -ruanchaves/hashtag-segmentation -andikalfauzi/Churn-Prediction -Iruc/weirdcore-diffusion -greenlights/gitapp -Li6699/myChat -vedalken/text2Pokemon -sirmews/supabase-bookmarks -awacke1/Write-Stories-Using-Bloom -duanzhihua/AI-ChatGPT -sidhusmart/prompthero-openjourney-v4 -vkdhiman93/cerebras-Cerebras-GPT-1.3B -JonysArcanjo/App_predict_House_price -dperales/Fraud_Detection_Pycaret -Hanyin/anime-remove-background -diy2023/databricks-dolly-v1-6b -vrajeshbhatt/Automated-Ticket-Management-System -goliathaiconsulting/airbnb-search-engine -gradio/theme_builder_main -darthPanda/romeo_and_juliet_chatbot_with_gptIndex -AlekseyKorshuk/michellejieli-NSFW_text_classifier -xillegas/duolingo-bot -laitkor/remove_background -awacke1/Vesuvius.Challenge -kieranberton23/plantdx -Kokuen/oobabooga-windows -jennysun/jwsun-multisubject-render-model -comet-team/kangas-direct -awacke1/Intrinsic.Bias.Analyzer -awacke1/Bloom.QA.Translation.LLM.AI -EyanAn/vits-uma-genshin-honkai -Navneet574/algerian-forest-fire-prediction -sharjeel1477/Brain -golda/Churn_pred -cahodk/live-ml5-facemesh-p5js -Devic1/LinearRegression -JoeStrout/simple-llama-finetuner -aliabd/whisper -Aniquel/WizApp_Code_Generator -EnigmaOfTheWorld/MemeWorld -thelou1s/food_calories -ThirdEyeData/Rogue_Component_Prediction -ysharma/llamas -johnyang/ChatPaper111 -pscpeng/ChuanhuChatGPT -wzq10314/VITS-Umamusume-voice-synthesizer1 -upGradGPT/GPT_Interview_beta -nyaridori/charactercreator -ThirdEyeData/Maximum_Repair_Prediction -jaymie/Virtus -chaozi/anime-remove-background -jinonet/digital-agency-website -segestic/HealthBlock -victor/autotrain-advanced-dreambooth -amagastya/JOY -aayushrawat/recommender-model -keras-dreambooth/Dreambooth-mandelbulb-flower -LEL-A/german-alpaca-test -dawood/PDFChatGpt -konfuzio-com/PP-OCRv3-ch -NimaKL/FireWatch5k -Kevin676/ChatGPT-with-Voice-Cloning -keras-dreambooth/dreambooth-markhor -sf-checkin/checkin -alexpaul/microsoft-codebert-base -ThirdEyeData/Component_Repair_Time_Prediction -smdcn/stabilityai-stable-diffusion-2-1 -smdcn/stabilityai-stable-diffusion-2-1-base -awacke1/Lunar.Lander.Asteroids.Continual.Self.Play -rohan13/coursera-qa-bot -lanbogao/ytdlp-whisper -ori1026/OriChatGPT -izumo092/TestSecret888 -hongweii/anime-ai-detect -awacke1/Emoji.Enumerator.Menu -VCPirlo/CatCat -xxxxxxianYu/vits-xxxxxxxxxxxxxxxxxx -weide/OpenChatKit -sai22/vits-models -sirmews/url-summarizer-playground -awacke1/HTML5.3D.Flight.with.Gravity -awacke1/HTML5.Aframe.Frogger.Test -awacke1/HTML5.Wordle.Solver -awacke1/Azure.Streamlit.Github.Actions.Azure.Container.Registry.Docker.AKS -awacke1/Markdown.Streamlit.EDA.Generic.Loader.Presenter.Memory -awacke1/Streamlit.Azure.SDK.Terraform -kzachos/PDF-chatbot -zhanpj/ChatGPT -michael135/dontalk -knotdgaf/gradiotest -Detomo/AI-Galary -awacke1/Positive.Reframing.Organization.Culture -Harsh12/Rossmann_Sales_Prediction -Keyurmistry/Joeythemonster-anything-midjourney-v-4-1 -Kevin676/ChatGPT-with-Voice-Cloning-in-Chinese -bradley6597/Spell-Bee-Solver -ppsantiago/chatGPT -awacke1/Amygdala.Hijacking.Using.Graph.Model -awacke1/Graph.Model.Feedback -Kevin676/ChatGPT-with-Smooth-Voice -VishyVish/Face-ID-duplicated -awacke1/Streamlit.Funny.Feedback.Upvote.Downvote -khanrc/tcl -gstaff/guessing-game -Kevin676/ChatGPT-with-Smooth-Voice-1.0 -ThirdEyeData/Next_Failure_Prediction -SnJForever/GPT-LANG -amongusrickroll68/openai-jukebox-5b-lyrics -Dantra1/CeliaSensei -FlippFuzz/whisper-webui -xfambi/zapi -RikyXDZ/NesiaChan -q846392920/vits-uma-genshin-honkai -GipAdonimus/PAIR-text2video-zero-controlnet-canny-gta5 -Alesteba/NeRF_ficus-pxl -raoyang111/img-to-music -stistko/CzechPunctuation -arattinger/emoji-diffusion -kepajide/keyiwei -shahzaibelbert/CHATGPT-Detector -Kevin676/Demucs_v4 -luciancotolan/R50-deforestation -Ariharasudhan/YoloV5 -kingsotn/tt-ai -phildunphy/Reverse_Asset_Allocation -savhascelik/FLAN-T5 -Rian000/Sayashi -cathrineee/CLIP-image-search -nomnomnonono/Siri-via-Whisper-ChatGPT -kerls/is-this-food-photo-instagram-worthy -Arthur678/vits-uma-genshin-honkai -amarax/cowtopia -CanKorkut/turkish-hatespeech-detection -osbm/token_merger_demo -Treav/DICOMDeidentify2 -awacke1/Assess.LOINC.Panel.Extractor -josh59999/webui -Kevin676/s3prl-vc-vcc2020 -WhyLIM/GWAS -tanvirsingh01/projectFeeder -ypx123/vits-uma-genshin-honkai -Ezi/Licences_check -PeepDaSlan9/carecoach-gpt-neo-1.3B-finetune-v2-B2BMGMT -VK123/ChatGPT4 -Letheoricien/demo -sefaozalpadl/LabelStudio -panchajanya1999/chatgenius -Letheoricien/MLPC2023_MumBot -michellaneous/Baymax -iamrobotbear/cosine-match -Cletrason/dalle2-dreamweddingbooth -wushuangBaOYWHA/chatpdf -xxbb/VITS-Umamusume-voice-synthesizer -Dobeuinc/README -MichaelT8093/ImageAnimation -SnailsLife/gsdf-Counterfeit-V2.5 -ken4005/Uhi-ChatGPT -Letheoricien/MLPC_2023_NATHEO -jishnupsamal/sports-sustainability -axart-software/simple-beat-generator -livinNector/TaNER -dsymbol/whisper-webui -jellyw/landscape-rendering -yiluxiangbei/baize-lora-7B -awacke1/Human.Feedback.Dynamic.JSONL.Fields -awacke1/Human.Feedback.Dynamic.JSONL.Dataset.Download -awacke1/Azure.Terraform.Github.Actions.Web.App.MLOps -Flux9665/Blizzard2023IMS -SameerR007/Movie_Recommendation_updated -Cletrason/Cletrason-toad-in-the-mario-movie -toraleap/chatbot -furqankassa/Human.Feedback.Dynamic.JSONL.Dataset.Download -dawood/PDFChatGpt-test -TerrificTerry/HAAO_AI -stanciu/DanielPinheiro-gpt4all -Devound/chavinlo-gpt4-x-alpaca -hpratapsingh/Movie_Recommendation_system -stanciu/andite-anything-v4.0 -panchajanya1999/spam-classifier -Kevin676/Clone-Your-Voice -awacke1/PoetandKnowIt -lyf/faster-whisper-webui -NowLoadY/ocr-gpt -BWQ/Chatgpt -ashzzf/vits-uma-genshin-honkai -tappyness1/error-analysis-cv-segmentations -glt3953/AIPaint -xl2533/MakeInstruction -skyler36237/vits-uma-genshin-honkai -tenhulek/prompthero-openjourney-v4 -itsjacksimon/runwayml-stable-diffusion-v1-5 -mikelix1970/ChatGPT4 -qdd319/ChuanhuChatGPT -xiaolongbaox/gpt2.0 -almontalvao/Sentiment_Analysis_Streamlit -cactusfriend/nightmareprompts -JohnPinto/Human_Activity_Recognition-HAR-Video_Classification-HMDB51-Dataset -srijan2024/SentimentAnalysis -daveckw/custom-chatgpt -Navpreet/rabbit3 -motroma/prompthero-openjourney -stanciu/decapoda-research-llama-13b-hf -milan2000/Milan_generativeAI_AD-test -jasonjones/Batman-AdMaker -4Taps/SadTalker -Msp/opensource_chat_assistants -Dimitre/stablediffusion-canarinho_pistola -xxie92/antibody_visulization -sikao/README -damilojohn/Playlist_Generator_For_Afrobeats -triggah61/li5 -sklearn-docs/feature-importance-rf -Dao3/chatwithdocs -Metatron/LEO -svjack/ControlNet-Face-Chinese -IstvanPeter/openai-whisper-tiny -hiDenorIYamano/srt-translator -oliver2023/mm-react -sudthakur/yt_summary -Wanlau/sovits-4.0_datealive -ORI-Muchim/MinamiTTS -espnet/TTS -kiwifly/nicky007-stable-diffusion-logo-fine-tuned -seo-sean/andite-pastel-mix -smith2020/WhatsApp-chat-analysis-summary -runninghsus/lupe-bsoid -Atualli/yoloxTeste -Pritish100/AA0_LeLO_v_2.0 -PureNaCl/Toxic-Tweets-MS2 -pikapikaPikachu/chatbot -awacke1/QuickLearner -awacke1/InstructGPT -awacke1/Spinning.Model-1-10 -betterme/Nice -Tshackelton/IBMPlex-DenseReadable -almontalvao/GenAds-AI -nickprock/nickprock-bert-italian-finetuned-ner -CelesteChen/GPT-token -uchuukaizoku/CharacterClassifier -cyliawardana/Womens_Clothing_Sentiment_Analysis -7thHeaven/GPT2WordPress -awacke1/Docker.Jupyterlab.Integration.HF -miku-hutao/vits-uma-genshin-honkai -prikmmo9/finetuned_diffusion -mmDigital/therapy-bot -lout33/Youtube-Whisperer -phildunphy/SALT-curated-asset-allocation -thanhtvt/uetasr -Billius/VizLib-TopLargeHospitalsNewJersey-04-07-2023 -UtkMal/fresh-or-rotten-apple -pakooo/Text2Image -irprojectteamnith/IR-project-multilingual -Billius/runwayml-stable-diffusion-v1-5-04-07-2023 -MesonWarrior/vk -cloud-sean/csv-copilot -JunchuanYu/SegRS -Usually3/text-to-image -willdguo/fastai_l2 -beskrovnykh/danielsearch -wazhendeshiniya/White-box-Cartoonization -vivek-a666/Health_Forecast -altafalam3/Text-Summarizer -vrajeshbhatt/Job-Title-Prediction -GPTMonster/KBprototype_first -Qrstud/andite-anything-v4.0 -darkartsaibwd/Envvi-Inkpunk-Diffusion -bigyunicorn/sashimi_identifier -ashutosh1919/quantum-perceptron -Skyler123/TangGPT -lhkhiem28/A-segmentation-system -oliver2023/chatgpt-on-wechat -awacke1/Streamlit.Graphviz.Stories.JSONL -snehilsanyal/scikit-learn -Kevin676/Raven-with-Voice-Cloning -awacke1/Balloon.Club -jordonpeter01/stable-diffusion -jordonpeter01/SD-2.1-CPU -jordonpeter01/stabilityai-stable-diffusion-2-1-base -Cobalt337/lambdalabs-sd-pokemon-diffusers -WatchOutForMike/Character -Namit2111/id_verfiy -ivotai/VITS-Umamusume-voice-synthesizer -yiwangshangxian/anime-remove-background -Abubakari/Sales_Prediction -riyueyiming/gpt -globalmatt/catsanddogs -lawliet/CS224-knowledge-discovery -DevashishBhake/SERModel -shj7972/gradiospace -ccarr0807/HuggingGPT -20four60/Auto-GPT -samle/prompthero-openjourney-v4 -Worlandil/ChatGPT4 -yuntian-deng/Gradio-Popup-Confirmation-Demo -Falah/female -shiyi11/anime-ai-detect -AeroXi/english-ai -Pranjal-666/DL_bearTypeTest -divyahansg/text-generation-webui-space -emmaenglish/sentiment-analysis-of-text-app -Jeffgold/BackgroundEraser -ieuniversity/Pangea -Promptengineering/anon8231489123-vicuna-13b-GPTQ-4bit-128g -sadickam/Domestic-Building-Construction-Cost-Planning -Sarst/VITS-Umamusume-voice-synthesizer2 -shiguangshiwo/anime-remove-background -ochyai/ochyai_test -yancey001/Linaqruf-anything-v3.0 -chufeng09/Panel_PDF_QA -ieuniversity/News-Translator -adriansd12/Bible_Index -halek3550/thaimop -Navneet574/Kidney_Stone_Prediction -KalbeDigitalLab/pathology_nuclei_segmentation_classification -kenttate937/pelisplusss -xiaoyun235/White-box-Cartoonization -s1241003/translate_gpt -gradio/space-api-fetcher -RedYan/nitrosocke-Ghibli-Diffusion -Wing0820/Real-CUGAN -vg055/demo_analisis_de_sentimientos_textos_turisticos_mx_polarity -Jerseyborn/openai-whisper-large-v2 -hussain-shk/IndiSent -arcAman07/KanyeGEN -hackathon-somos-nlp-2023/ask2democracy -vg055/roberta-base-bne-finetuned-analisis-sentimiento-textos-turisticos-mx-pais -sklearn-docs/Hierarchical-clustering-dendrogram -dylanmcc/beaverdam -futuristicdude/andite-anything-v4.0 -CNXT/CHaTx -CNXT/GPTx -yuyijiong/quad_match_score -sklearn-docs/SGD-max-margin-seperation-hyperplane -smallyu/dalle-mini -jobcher/background-removal -sklearn-docs/Lasso_and_elasticnet_for_sparse_signals -dddmiku/vits-uma-genshin-honkai -Fox1997/vits-uma-genshin-honkai -bitcool/humarin-chatgpt_paraphraser_on_T5_base -megatron7/bert-base-chinese -yanli01/wrwj -qi3/White-box-Cartoonization -yeashwant/chatgpt-prompt-generator-v12 -prerna9811/Chord -johnsamuel/stabilityai-stable-diffusion-2-1 -Lasion/NCKH_2023 -IntelligenzaArtificiale/ChatGLM-6B-Int4-API-OpenAI-Compatible -gsrathoreniks/web_ui -poiiii/clefourrier-graphormer-base-pcqm4mv1 -xizhongluomu/Real-CUGAN -sairam9/ChatGPT4 -sklearn-docs/huber-vs-ridge-regression-for-outliers -ras0k/WhisperX-v2 -daveckw/prompt-2-sd -AlawnCN/webui-docker -UtkMal/Classifying-snake-breeds -Celestinian/Nora-Inference -SebastianSchramm/Cerebras-GPT-111M-instruction-playground -mrwenchen/stabilityai-stable-diffusion-2-1 -THEBOGLER/toxicman -BHO/URDtest -Kevin676/Gpt4All -Artples/Named-Entity-Recognition -kglema/lemitar.AI -Raaniel/Keyword_demo -kazuk/youtube-whisper-13 -awacke1/SelfCareDimensionsPositiveReframing -radwulf101/ChatGPT4 -sklearn-docs/Random_sample_consensus -Ikaros521/VITS-fast-fine-tuning_nymph -dyhzq/vits-uma-genshin-honkai -rires-kasai/whisper-transcription -Qiukai/gpt -Ryukijano/fastai_pet_classifier_resnet50 -cldelisle/test -Chintan-Donda/KKMS-KSSW-HF -paschalc/ImageRecognitionDemo -cpwan/RLOR-TSP -xangma/chat-pykg -NoFearNoDistractions/ChatGPT4 -pplonski/mr -Voicelab/vlT5-rfc-generation -AiBototicus/BucksAI-2 -awacke1/LLMMethodologyToImproveLearning -awacke1/AzureContainerAppsAIArchitecture -8star/DeepDanbooru_string -mb1te/PSII_FINAL -awacke1/Memory-Chat-Story-Generator-Bloom -AiBototicus/BucksAI-3 -doctorsafe/mychat -sklearn-docs/Comparison_K_Means_MiniBatchKMeans -AiBototicus/BucksAI-4 -birgermoell/syllables_app -haoyu/age_detection -bugbugbug/vits-uma-genshin-honkai -YangHao520/testShare -sklearn-docs/Plot-Ridge-Coefficients-as-A-Function-of-the-Regularization -Aitor/CVchat -andufkova/articles -radames/gradio-chatbot-read-query-param -sklearn-docs/mean-shift-clustering -Duskfallcrew/Osenayan_Mix -sklearn-docs/receiver-operating-characteristic-with-cross-validation -Duskfallcrew/Duskfallcrew-Osenayan_Mix -hololabs/bibleyouread -sklearn-docs/voting-classifier-plots -raomaya/COVID_travel_dashboard -changlisheng/shangChat -wanfeimsn/stabilityai-stable-diffusion-2-1 -sklearn-docs/affinity-propagation-clustering -dhfdh/stable-Diffusion-Inpainting-with-Segment-Anything -zhangbo2008/chainyo-alpaca-lora-7b -Anonumous/RuImageCaptioning -YukiKurosawaDev/ChatGLM -0xtanmoysamanta/espnet-kan-bayashi_ljspeech_vits -asafAdge/color_clustering -Zpwang-AI/InsultingLanguageDetection -Jacks2003/3D_Photo_Inpainting -yunzai123/anime-ai-detect -ceckenrode/SelfCareDimensionsPositiveReframing -ceckenrode/Memory-Chat-Story-Generator-Bloom -ceckenrode/Memory-Chat-Story-Generator-ChatGPT -B1360976/waste-management-system -katanaml-org/sparrow-ml -spenceryonce/gpt2 -lewisrxliu/3.3 -dorkai/singpt -sklearn-docs/Joint-feature-selection-with-multi-task-Lasso -cyanab/GlobalVoice1 -nateraw/jupyterlab-test2 -whilefalse/CLIP -nateraw/huggingface-user-stats -chkla/PromptCardsPlayground -Nikitowie/Lama-Cleaner-lama -Libra7578/Image-to-video -olivianuzum/TwitterTwin -dawood17/SayBot_Enchancer -Raaniel/Search_Engine2.0 -soodoku/ethnicolr -sklearn-docs/ridge-coefficients-vs-L2 -dfyinc/GeniusChat -kdb8756/Pip_Counter -cloudwp/prompt-machine -luckli/22h-vintedois-diffusion-v0-1 -luckli/chavinlo-gpt4-x-alpaca -KarmaCST/Dzongkha-To-English-Translation-NLLB-Fine-tuning -rainy3/chatgpt_academic -AiPalsDev/Translate_It -lambdasec/santafixer-demo -weide/ChuanhuChatGPT2 -xinchen0215/gradioTest -sredevops/README -Plsek/CADET -sklearn-docs/MNIST-Agglomerative-Clustering -MingGatsby/VoiceFixer -MohitGupta/Eng2Indic_Translitration -thieutrungkien/Hosioka-Baka-Diffusion -CoWork/dreambooth-training-public -Adr740/SmartHadithFR -srush/gradio_tools -PranayVerma/IRIS -srush/minichain-table -Xixeo/Face_Recognition -naxida/anime-remove-background -jleexp/Youtube-Whisperer -adamcasson/transformer-flops-calculator -SmallSpider/DeepDanbooru_string -Cosmopolitan/stabilityai-stable-diffusion-2-1 -lora-x/Backpack -UndueTarget/audioFILE_to_text -yueyouxin/runwayml-stable-diffusion-v1-5 -IcelandAI/Iceland-Top-Ten-Things-To-See -iamkhadke/chatbot -IcelandAI/AnimalsOfIceland -awacke1/Streamlit-Clipboard-Monitor-Javascript -hhhhardman/VITS-Umamusume-voice-synthesizer -hhhhardman/VITS -awacke1/File-Memory-Human-Feedback-Streamlit -IcelandAI/Foods-and-Drinks-of-Iceland -Pearx/ChatGPT-Assistant -itacaiunas/gerador-imagens -Proxdigestpills1/README -Faizanshaikh/runwayml-stable-diffusion-v1-5 -Cloudfeng/anime-remove-background -JediHustle/beartector -Lilflerkin/WellNexus -stanciu/eachadea-legacy-vicuna-13b -nikansh/hamyar_riazi -Duskfallcrew/EpicMix_Realism_WebUi -HESOAYM/ElviraMulti -Afnaan/chatbots -karelgideon/talent-fair-h8-karel -Greysuki/whisper-api-compress -humbe/comunico -itintelpro/MyCybersecHelper -iamkhadke/pix2struct_docvqa -fael33/NAWNIE-golden-hour-photography -sensho-lx/MubertTTM -cloudwp/Top-20-Diffusion -tanvirsingh01/YourMoodDiary -cloudwp/DreamShaper-webui -cloudwp/simpleGPT -giiift/expert_system -kony1337/frame-interpolation-fix -Natsha/mocap-ai -onuri/asst -rgergw/White-box-Cartoonization -varun500/flan-alpaca-base -BigChia/bird_classifier -Jmmianda/memo -blaziant/ysda_nlp_ops -ankitnag0/ChatGPT4 -Abduhoshim/speech_emotion_detection -NechkaP/arxiv-streamlit-lab -effluxriad/YouTube-comments-generator -jusancp99/imagenes_similares -diazcalvi/KIONAPI -ferdmartin/DogBreedsApp -realambuj/Image-Captioning-App-using-BLIP -AdamWEE80/VoiceTTS -timo1227/Image -Kevin676/Telephone-Interviewing_PpaddleSpeech-TTS -luotr123/myWeb -sklearn-docs/Feature-Transformations-with-Ensembles-of-Trees -v-nemala/similar-images -Cecil8352/vits-models -svjack/Question-Generator-on-Chinese-Doc -cactusAtSea/influencerGPT -6Eternal9/ChatGPT4 -Big-Web/MMSD -Vasanthgx/Pet_Classifier_vasanth -nkigumnov/banks-ethics-sentiment -SuCicada/Lain-vits -pkarthik15/docchat -Dute8788/anime -theholycityweb/HuggingGPT -55dgxxx558/anime-remove-background -aLIdAmIrI/math-help -kazgafa/ChatGPT4 -ThirdEyeData/Customer-Conversion-Prediction -yock116/ChuanhuChatGPT -theblocknoob/hugging-face-space -anjaymabskuy/Linaqruf-anything-v3.0 -datagpt/url2info -sklearn-docs/bayesian-ridge-regression -SkidPC/SweetLuna-Aurora -dfgnota/gpt-doc-mem -volhack/vits-uma-genshin-honkai -awacke1/Slot-Machine-HTML5 -awacke1/Slot-Machine-Animal-Safari -diaoren/OpenSetObstacleDetection -kumar989/Health_Vision_1 -csumbdante/fire-api -Mahiruoshi/lovelive-ShojoKageki-vits -anekcb/Bee4Med -suryabbrj/CollegeProjectV2 -alexrods/Smartcity-Traffic-Detection -duong11111/ChatGPT4.0 -PrathmeshZ/StoryTellGPTneo13 -victor/tesla -Vasanthgx/Cats_vs_Dogs_vasanth -cannlytics/skunkfx -xiang2811/ChatGPT -alicelouis/NSCLC_classification -galang123/test123test -shvuuuu/Credit_Card_Churn_Predictor -prasanna2003/ChatOPT -teragron/docuchat-webui -DmitriiKhizbullin/camel-data-explorer -ssreeramj/tiger-town-hall-chatbot -sklearn-docs/birch_vs_minibatchkmeans -CillySu/prompthero-openjourney-v4 -Immi007/ChatGPT4 -louiszhuang/pony -eunjae/LoRA-DreamBooth-Training-UI -theabdullahzeeshan/seven -Intoval/privateChatGPT -nebula/counting-anything -fb700/chat3 -manh-linh/Linh-Gradio -meaqua33/White-box-Cartoonization -Ekittl01/Endeavors -Li2024/chatai -teli168/human-centered-summarization-financial-summarization-pegasus -realambuj/Image_Classifier_using_RESNET50 -ho11laqe/nnUNet_calvingfront_detection -xiayi/anime-remove-background -ch1n3du/bird_or_forest -Gmq-x/gpt-academic -tyoung560/ai-assist -Ailexcoder/GPT4ALL1 -salamat/first_app -jerichosy/DIGIMAP-Colorization-Web-App -hrishikeshpai30/hrishikeshpai30-wavlm-libri-clean-100h-large -msawant/sample_assist -maxineattobrah/EmotionDetection -xiaoxiao140420/anime-remove-background -hectorduran/wavescomparing -Haleyok/stablelm-tuned-alpha-chat -tsailada/Emily -DuckyPolice/stabilityai-stable-diffusion-2-1 -tsailada/Chefsky -spring-chatbot/customer-service-assistant -zylj/MiniGPT-4 -aodianyun/whisper -TechShark20/handwespeak -jsscclr/CLIP-Interrogator -Robinn/WordSent -ledetele/KrystalPDF -cccccch/VITS-fast-fine-tuning-DingZhen -MasterThesisCBS/NorPaca_GPT -a245757/rebornrun -EagleLoveAI/ChatGPT_Application_Robot -EnigmaOfTheWorld/TechnoForge_Automotive -michael1943/geektime-ai-class -huanghun/yuyinkelongChatGPT-with-Voice-Cloning-for-All -2ndelement/voicevox -jordonpeter01/laudos -Jaggi/ImageGenration -HadiTajari/Penguins_pred_App -Ryukijano/Ryukijano-controlnet-fill-circle -PushkarA07/Sanskrit-Text-To-Speech -noman1408/speechToSpeechGPT -d3finit/AI -kalvjam/chgpt -Ifeanyi/tellme.ai -Jaffermirza17/ProjectPythonClass -testingcodehere/oai-proxy -hectorduran/wordsimilarity -Hashom132/stabilityai-stable-diffusion-2 -UVA-GCOM/Group_4 -blaziant/ysda_nlp_ops_update -jkubacki/pokedex -hesha/anime-remove-background -Kevin676/NLLB-Translator -JerryYou/ChatGPT-prompt-generator -ParagKesharDas360/MovieRecommadationApp -datagpt/pdf2gpt -sklearn-docs/Out-of-Bag-Random-Forest -rajeshradhakrishnan/english-malayalam -speeddemonau/OpenAssistant-stablelm-7b-sft-v7-epoch-3 -kernel982/Youtube-Transcriber -Feifei315/Joeythemonster-anything-midjourney-v-4-1 -Monster/alpaca-lora_13b_q -nsakki55/my-aim-demo -DEfiAnTH/SPSpace -Stephen2022/daxing -TD-jayadeera/Password_Strength_Prediction -snoopyv126/gpt -ShreyashNadage/InvestmentCopilot -HighCWu/Style2Paints-4-Gradio -Pluviophile/vits-uma-genshin-honkai -amasgari06/ChatGPT4 -ANLPRL/NER_On_Oral_Medicine -vishal2023/Pneumonia-detection -pplonski/my-notebooks -seblutzer/ChatGPT4 -trholding/SpeechCloning -Feifei315/flax-midjourney-v4-diffusion -JackBAI/MassageMateNLP -McClane-Lee/fnlp-moss-moon-003-base -ivanho92/training -typesdigital/TD-OpenWeatherMap-API -FrozenWolf/Neural-Style-Transfer -typesdigital/image-to-text-app-td -typesdigital/twitter-pro -huohguohbo/Chatbot_REQUIRES_OPENAI_KEY -Danuuo/GPTDocs -XyBr0/test -kenton-li/yolo_cell -Fareso/minima -jotarodadada/animeCf -radames/gradio_streaming_webcam_blocks -jmesikto/whisper-webui -UVA-GCOM/Shuran_Ivy_Anlin_Robin -matthoffner/baby-gorilla-agi -sahshd/ChuanhuChatGPT -Kedreamix/YoloGesture -Ntabukiraniro/Recipe -tharunk07/crop-prediction -HachiRe/Fusani -yusendai/fnlp-moss-moon-003-sft-plugin -huedaya/hf-openai-whisper-dev -fueny/git7fueny -NicolasGaudemet/WritingAssistant -caoyiming/vits-uma-genshin-honkai -zhengyu123/ighchatgpt -cryptoanonymous77/README -Sambhavnoobcoder/pneumonia-detector-v1 -FriendlyUser/bark -momegas/megabots -godspeedsystems/README -typesdigital/telegram-chatbot -Jarex/TwitterBot -Mcdimmy/Clothing-Identifier -rick200213/Text2speech -addiopattio/idkman -MathysL/pwa -Finnone/stabilityai-stablelm-tuned-alpha-7b -Crow34/Joi -zhuyuheng/IMossGPT -vorstcavry/VoCh-beta -Ananthap4/itineraryGenerator -asd998877/TsGpt -sagar-kris/harry-mack-bot -typesdigital/CryptoUpdate -typesdigital/Gpt4all -Sky5408er/anime-remove-background -yuhanbo/chat-gpt -zox47/succinctly-text2image-prompt-generator -Navneet574/Drug_Classification -Navneet574/Heart_Disease_Prediciton -msmilauer/AutoGPT-duplicated2 -yuukicammy/vit-gpt2-image-captioning -jibay/test -typesdigital/CodeX -tanmaysindia/vasista22-whisper-hindi-large-v2 -Rams901/flight-chat -Raghav001/Experiment -Shadow344/ogkalu-Comic-Diffusion -Akbartus/U2net-with-rgba -RKocielnik/bias-test-gpt -venkataseetharam/similaritysearchnew -yashzambre/EXCEL -Xule/ChuanhuChatGPT -dapaipai/ChatGPT4 -Bishnupada/Fine-tuning-using-Hugging-face-transformers -simplyjaga/movie_genius -harry991/geektime-ai-course-demo -Lenery/Dolly-v2 -uchuukaizoku/CharcaterClassifier1 -koalaYuan/gradio-demo -Alpaca233/LangchainPDF -Rakesh30/Sentence_Embedding-App -Tape/yoga -dhanushreddy29/microstructure-project -hdm1/mindtune -dorkai/dorkgpt -sinz2002/ChuanhuChatGPT -easrng/text-to-emoji -MikoProduction/PneumoniaDetector -LLxD/prompthero-openjourney-v4 -caslabs/sanity-test-midi -ApathyINC/CustomGPT -luckybender/ChatGPT4 -typesdigital/HealthBOT -srossitto79/RajuKandasamy-dolly-v2-3b-8bit -jvde/sovits-webui -ericmichael/openai-playground-utrgv -amgad59/Keras_cv_wedding_dress -bizvideoschool/ScriptWriterTest -typesdigital/CODEX-explore -arxnov/anotest -amarchheda/ChordDuplicate -Terminus0501/vits-uma-genshin-honkai -sklearn-docs/early_stopping_of_gradient_boosting -HaMerL/ChaosinChat -Hoodady/3DFuse -zhuge09/CompVis-stable-diffusion-v4 -pd4solutions/ATLChatbot -Renxd/devast -aarontanzb/Langchain_query_app -Sparkles-AI/design-look-a-likes -XAI/Cleaning-ImageNet-Hard -inesani/ner-log -cloudwp/place_of_Imagination -noahzev/bark -cryptoanonymous/02dlyaPerevoda3dVideoV2DAnime -Nour33/sci_summ -sheraznaseer/test_pdfqa_2304 -Revanth200218/Project -DaCuteRaccoon/dalle-mini -usamakenway/bark-Ai-audio -AhmedBadrDev/stomach -timpal0l/chat-ui -radames/gradio_audio_streaming_blocks -omkarmore83/t5-base -chaozn/fastai_dogs_vs_cats -Basil2k4/VPSnguyenmanh -1gistliPinn/ChatGPT4 -marcilioduarte/Credit-Worthiness-Risk-Classification -Ryukijano/Real-CUGAN -bhavyagiri/recyclopes -nanglo123/GTSRB-Deployment -arslvn/statuscertificate -bhaskartripathi/Text2Diagram -BertChristiaens/youtube-dl -wrldreform/TextImagine-1.0-March-2023 -wrldreform/Text2ImageStable2.1 -meetv25/ML -awacke1/REBEL-Knowledge-Graph-Generator -scedlatioru/img-to-music -echozf/dfsg -timqian/like-history -CNXT/PiX2TXT -mrLarry/image_variation -abtExp/source_separation -diacanFperku/AutoGPT -katebor/Taxonomy -rahimimiladofficial/fastai_pet_classifier -wonoqo/AlphaGPT -FourthBrainGenAI/MarketMail-AI-Space -giacomov/pdffigures2 -HuguesdeF/moulinette -tjeagle/Subaru -thealphhamerc/audio-to-text -timothynn/demo-space -XyBr0/DogBreedClassifier -tioseFevbu/cartoon-converter -merve/alpaca-tr-crowdsource -stomexserde/gpt4-ui -netiMophi/DreamlikeArt-Diffusion-1.0 -Next7years/CatHeiHei_v1 -tuan2010/DocumentGPT -ferdmartin/GradApplicationDocsApp2 -Yuankai/ChatReviewer -EnigmaOfTheWorld/GenZBot -llovantale/ChatGPT4 -Devap001/top-5_movies_recommendation -himanshu5111/sports_classifier -tokudai/GODEL-Demo -kashif/probabilistic-forecast -JiaoFa/bert-base-chinese -patrickvonplaten/ckpt-to-diffusers -caslabs/midi-autocompletion -B10915003/B10915003-autotrain-jimmy-test-face-identification-53251125423 -Narsil/graph_spectrum -chenman/Meina-MeinaMix -DevashishBhake/Face_Mask_Detection -zhangs2022/ChuanhuChatGPT -javihp/microsoft-speecht5_tts -himanshubhardwaz/nlpconnect-vit-gpt2-image-captioning -sklearn-docs/Pipeline-ANOVA-SVM -SharkGaming/VisualAI -GiladtheFixer/test_sentiment -Serg4451D/DALLE2STANDARD -philipalden/InvisibleCities -tomasonjo/chat-algobook -gotiQspiryo/whisper-ui -inamXcontru/PoeticTTS -Samuelxm/WeatherBot -rd13/Pix2Pix-Video -Ryukijano/canny_coyo1m -Serg4451D/PixelArtGenerator -burberg92/resume_summary -RustX/CSV-ChatBot -SkyYeXianer/vits-uma-genshin-honkai -trialanderror/HowMyZsh -Araloak/fz -ls291/ChatSQL -xzx0554/2222 -Sohaibahmad/AIdetector -apsys/HSSR -igtsolutions/igtsolutions -xiaoV28/GFPGAN -terfces0erbo/CollegeProjectV2 -shielamms/en-es-translator -arnikdehnavi/energy-consumption -Ryukijano/jax-diffusers-event-canny-coyo1m -bgk/sipariseng -groupeonepoint/french-email-generator -Phantom3306/AI-image-detector -gui-sparim/Calculadoras_DDA -kenton-li/ChatArxiv -kevinwang676/ChatGLM-int4-demo -RaIDooN/huggyllama-llama-13b -lucaspedrajas/IF -bhkkhjgkk/Voice -caiocdcs/sports-classifier -maxwelljgordon/whisper-speaker -vmoras/SAM_test -huolongguo10/huolongguo10-check_sec -rounak40/fast-whisper-large-v2 -aiotedu/aiotchat -kukuhtw/AutoGPT -givenvessel399/M.me -Singularity666/RadiXGPT_ -NicolasGaudemet/LongDocumentSummarizer -QiuLingYan/ChanYuan-large-v2 -nomnomnonono/Background-Image-Generation-for-Online-Meeting -datagpt/pdf2summary -fatiXbelha/sd -nwpuwolf/succinctly-text2image-prompt-generator -1phancelerku/anime-remove-background -simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735 -congsaPfin/Manga-OCR -ticomspire/turkey-syria-earthquake-tweets -sklearn-docs/feature_agglomeration -PeepDaSlan9/CarperAI-stable-vicuna-13b-delta -zmengaf/comp652_final_demo -usbethFlerru/sovits-modelsV2 -7thHeaven/ochyai_food -rorallitri/biomedical-language-models -miku8miku/Voice-Cloning-for-Bilibili -contluForse/HuggingGPT -deepozzzie/chatgpt -ZaidBAIDADADAD/runwayml-stable-diffusion-v1-5 -Shad0ws/STORYGPT -heegyu/gorani-v0 -weiyuanchen/stabilityai-stable-diffusion-2-1 -inreVtussa/clothingai -Natnael1234/SIL-ChatGPT-Training-Demo -quidiaMuxgu/Expedit-SAM -surmensipa/VITS-Umamusume-voice-synthesizer -awacke1/Tensorflow-AI-Driven-Personalization -niro-private/chatCSV -falterWliame/Face_Mask_Detection -Pranjal-666/COVID_classify_sequence -andaqu/ask-youtube-gpt -OLKGTOIP/Real-CUGAN -mmdrezamoraditabrizi/mmd -Learner/jax-diffuser-event-battlemaps -DeclK/pose -epexVfeibi/Imagedeblurr -Alesmikes/elvire01 -Alesmikes/Elvirespeak -bsenst/flask_inference_api -Daniton/THUDM-chatglm-6b-int4-qe -swarm-agents/swarm-agents -nurano/dsadsa -Isotonic/image-generator -avivdm1/AutoGPT -Isaoudata/WaltWhitman-GPT -THUDM/ImageReward -awacke1/Streamlit-AI-Letter-UI -yoinked/audio-diffusion -falcondai/stego-lm -Kwasiasomani/Streamlit-Sentimental-Analysis -NoorAzam/model4 -Pranjal-666/Potato-leaf-disease-detection -Iqbaljanitra/Face-Emotions-Prediction -ankitinter9/my-draw-self-journey -nandodeomkar/Project -irfan844108/pdfGPT -Hyeonseo/ChatGPT-ko-translation-prompt -gentlemanhu/succinctly-text2image-prompt-generator -Danielzero/GPT3.5 -cchuang2009/CO2 -madoss/ask-datagen -Cognomen/CatCon-Controlnet-WD-1-5-b2 -123Kumar/vits-uma-genshin-honkai123 -abdelrahmantaha/ocr -MikeTrizna/amazonian_fish_classifier -DevashishBhake/Question_Generation -LouisSanna/reco_fish -henryu/Multimodal-GPT -Yarumo/prompthero-openjourney-v4 -adrabi-abderrahim/english-pronunciation-practice -panotedi/milestone3 -ericxlima/DogBreedClassifier -Cat125/text-generator-v2 -HarshulNanda/VV -stephenmccartney1234/astrobot2 -DiffusionArtco/AnimeTop50 -awacke1/Fiction-Generator -bobrooos/test -replit/README -GaenKoki/voicevox -lordvader31/almithal -keisuke-tada/gpt-playground -tialenAdioni/chat-gpt-api -mee-asukoht/flan-t5-small -ather23/NinedayWang-PolyCoder-2.7B -1acneusushi/gradio-2dmoleculeeditor -Gaeomg/Kaludi-chatgpt-gpt4-prompts-bart-large-cnn-samsum -Mk-ai/README -SoulAbi/text-prompt-to-audio-generation -raedeXanto/academic-chatgpt-beta -KrisLiao/NaturalLanguageVideoSearch -EstebanDC/Compression_Index -awacke1/Generative-AI-Procedure-Cost-Summary -awacke1/AI-ChatGPT-CPT-Body-Map-Cost -DiffusionArtco/scifi-art-creator -Logic06183/ML_Classifier_Hub -Ankit6396/100-Free-ChatGPT4 -sana123/Sinhala_Audio-to-Text -tera-td/whisper-gpt -groupeonepoint/LongDocumentQuestioner -vyurchenko/l3m -DiffusionArtco/Diffusion50 -oluyemitosin/YOLO -gkmike/ckip-joint-bloom-3b-zh -eaedk/Tuto_Sentiment_Analysis_App -MehdiAmirate/Botv2 -victor/test-autotrain -mrfakename/lmsys-fastchat-public -abhimanyuniga/chavinlo-gpt4-x-alpaca -Alealejandrooo/deathCertReader -awacke1/DogCatGraph -BetterAPI/BetterChat_new -sohamb23/informational-therapy-chatbot -BorisovMaksim/denoising -Aveygo/AstroSleuth -awacke1/Streamlit-Dog-Cat-Graph -devseek/accident_detection -at2507/SM_NLP_RecoSys -DiffusionArtco/Interior-design-models -Ubai/Space -tayyabali1/llama-65b-hf -Frilles/FoodVision_Big -thak123/Whisper-Konkani -Jarkchen/af1tang-personaGPT -Kedareeshwar/Dental-Caries-Diagnosis -Sk4372/stabilityai-stable-diffusion-2-base -awacke1/Generative-AI-SOP -multimodalart/redirectme -jigo/jobposting -erbanku/gpt-academic -osanseviero/discord_example -kpyuy/chat -Kathir0011/YouTube_Video_Assistant -Facepounder/gpt2-xl -serpdotai/mean-shift-clustering -liubing80386/succinctly-text2image-prompt-generator -Juno360219/Gg -SUPERpuper/Text-to-image-AI-3 -gsharma/url-summarizer -ruangguru/ds-chatbot-internal -Goya11/zimu -Derni/Onodofthenorth-SD_PixelArt_SpriteSheet_Generator -anonderpling/repo_uploader -OhMondon/Walking-Assistant-for-the-Visually-Impaired -Y-T-G/Blur-Anything -awacke1/Generative-AI-EACN -balgot/text-to-stylegan3 -awacke1/AI-ChatGPT-EACN -textToSQL/mp3_transcribe_prompt -SSahas/caption_images -sklearn-docs/pcr_vs_pls_regression -NoriZC/vits-models -Shiro26/MendoBERT_RE -NadaKhater/SignLanguageClassification -straka/poison-ivy-detector -Raaniel/Support-and-resistance -jshong/crabGPT -Suhailshah/image-captioning-with-vit-gpt2 -marketono/MidJourney -johnsu6616/TXT2IMG-MJ-Desc -streamzer/runwayml-stable-diffusion-v1-5 -MuthuPalaniyappanOL/RentPricePrediction -trhacknon/youtube-video-to-text-generation -RichardMB1217/blip -colakin/video-generater -chats-bug/ai-image-captioning -megemini/shanshui -leezhongjun/chatgpt-free -awacke1/Generative-AI-Provider -awacke1/AI-ChatGPT-Provider -Jamkonams/AutoGPT -SanketJadhav/Plant-Disease-Classifier -LoveWaves/123 -cloudstack/CSV-ChatBot -reilnuud/polite -keminglu/instruction-following-open-world-information-extraction -Aniquel/bert-large-uncased-whole-word-masking -sparkyrider/OpenAI-SHAP-E -docpois/ask -A-Celsius/Caption-Generator -DaleChen/AutoGPT -aryan29/movie-recommender-system -sklearn-docs/multilabel_classification -niew/vits-uma-genshin-honka -MMMMQZ/MQZGPT -lunarflu/modbot -ms180/espnet_onnx_demo -chilge/Fushimi -s1591428/README -chenmgtea/cn_tts -HugoHE/monitoringObjectDetection -cloudwp/sd -FacundoSander/PdfQA -itsmohsinali/anpr1 -RobLi/ControlNet-v1-1 -yhevis/Real-CUGAN2 -krazyxki/V-1488abed -MingGatsby/Grounding_DINO_demo -dragonSwing/LangChain-ChatGPT-plugins -thu-coai/DA-Transformer -helkoo/hackDjellaba -EdwinC/edwin -tmnam20/code-summarization -DaFujaTyping/hf-Chat-ui -xcchen/vits-uma-genshin-honkai -chiye/background-remover -xcchen/xcchenvits-uma-genshin-honkai -MuskanMjn/Segmenting_greek_coins_using_Segmental_Clustering -rinme/vits-models -skf15963/summary -jbondy007/Video_Search_CLIP -Widium/Style-Recreation -Minty22120/DeepDanbooru_string -Jo0xFF/4xArText -PlanetHades361/Change-Your-Style -appy-agency/sprigs -prajwalkhairnar/facial_emotion_detection_multiclass -pikaduck/DungeonMaster -HEROBRINE7GAMER/belal-llm-streaming -kn14/STT_CNN -rootuserlinux/GPT4 -DhruvShek/chatlm -tchebagual/runwayml-stable-diffusion-v1-5 -Tj/starcoder-playground -jb30k/LegalENG -Dimentian/LLMs-Stable-Vicuna-13B -sklearn-docs/Precision-Recall -bibekyess/bgpt -SantiagoTesla/Self_Chatbot -dxcy/Real-CUGAN -camileLDJ/allenai-cosmo-xl -FFZG-cleopatra/latvian-twitter-sentiment-classifier -pierluigizagaria/crysis-voice-cloning -ramiin2/AutoGPT -p208p2002/Compute-Optimal-Model-Estimator -Anandhju-jayan/image-captioning-cloned -Manthanx/catsdogs -awacke1/PyVis-Knowledge-Graph-From-Markdown -Sriharsha6902/Chat-Analyser -lukesteuber/contechnical -Juno360219/lambdalabs-sd-image-variations-diffusers -vjain/AudioChat -hakanwkwjbwbs/stablediffusionapi-anime-diffusion -jb30k/LegalWW -Lamai/LAMAIGPT -DrewKarn/CarperAI-stable-vicuna-13b-delta -Laughify/Moon-Knight-Txt-2-Img -Dialogues/chat-ai-safety -soufiane3/ChatGPT4 -awacke1/Streamlit_Plotly_Graph_Objects -itbeard/CarperAI-stable-vicuna-13b-delta -UVA-MSBA/M4_Team8 -yishenzhen/LangChain-Zilliz -tvrsimhan/music-sep -sklearn-docs/Segmenting_greek_coins_using_Segmental_Clustering -abbbbbbbbbbbbbb/meter2poem-1 -abbbbbbbbbbbbbb/topic2poem -achyuth1344/stable-diffusion-web-ui -Yuzu22/rvc-models -dsaigc/trans_for_sd -Um124/Lung_Cancer_Prediction -realAshish/Calculator -ImagineAI-Real/MidJourney-Diffusion -fffiloni/Music_Source_Separation -zijia88/Sewer_Endoscopy_Risk_Identification -dorkai/dorkai-DALL-E -dsxailab/Lama-Cleaner-lama-12 -diffusers/latent-upscaler-tool -dorkai/pygmalion -Yossefahmed68/microsoft-BiomedNLP-PubMedBERT-base-uncased-abstract-fulltext -SmokeAndAsh/4bit-gpt4-x-alpaca-13b-roleplay-lora-4bit-v2 -kevinwang676/voice-conversion-yourtts -bcg-unet/demo -Not-Grim-Refer/GitHub-Tool -SatwikKambham/Image-Classifier -Dachus/Realfee -wasertech/French_Wav2Vec2_ASR -Ahmedmewloud/Depplearnig -Kynlo/google-flan-t5-xl -Alphts/Robot -predictive-singularity/Singularity -Forever003/VPN -Widium/Image-Recreation -bballaek17/ChatGPT4 -ZeroGPT/GPTZero -Winnie-Kay/Distbert-Sentiments -Rojban/LangFlow -sajinpgupta/Medicine_Prescription_Gen -cryddd/junelee-wizard-vicuna-13b -rumeysakara/ChatGPT4 -nicolaorsini/DICE -catundchat/tts_cn -jozzy/langchain -issenn/so-vits-svc-4.0-spaces-sample -aashay26/Next_Word_Prediction -jeevanb/ChatGPT4 -awacke1/Docker-Examples-Top-5-Demo -DarwinAnim8or/convert-to-safet -CNXT/TXT2PiX -mohsenfayyaz/DivarGPT -society-ethics/StableBiasGen -sanjayw/starchat-playground -johnsu6616/SD_Helper_01 -sanjayw/starcoder-playground -alpha99/alphak -andaqu/ask-reddit-gpt -emresvd/text_summarizer -gaviego/mnist -DanielSan7/judini-video -samalba/demo -nettsz/stabilityai-stable-diffusion-2 -starlit7/NewKorPoliticsTTS -ai-create/colab -mileslilly/City-classifier -Yarumo/whisper -allinaigc/internet_GPT_venice -euphi/smmry -Um124/Global_Warming_Analysis -innev/GPT2-large -zibb/frontalface-cascade -GookProxy/Gyul -davda54/chat-nort5 -Jamerrone/DreamShaperWebEmbed -abhi-pwr/underwater_trash_detection -dirge/voicevox -Tirendaz/Cancer-Detection -Bonosa2/parrot-chat-bot -AnimalEquality/chatbot -sandraw11031/virtual-staging -fffiloni/chatbot-media-test -chansung/tfx-vit-pipeline -simonraj/ThinkingRoutines -mscsasem3/CHAAT -NexusInstruments/offensive-hugging-face -WUXIAOMO/stabilityai-stable-diffusion-2-1-test-space -chow-q/cut-image -Technozam/mcqs -ty00369/IDEA-CCNL-Taiyi-BLIP-750M-Chinese -prathmeshrmadhu/odor-dino -xly66624/Brayton-cycle -Harsimran19/SegmentationGAN -Sojab/voice-recognition -ysharma/dummy99 -teven-projects/calculator -rizmyabdulla/tiny-Question-answering -AndyCer/TheBloke-stable-vicuna-13B-HF -banana-projects/talking-egg -kavyasree/hair_type -EinfachOlder/HuggingChat -almakedon/faster-whisper-webui -tweakdoor/stabilityai-stable-diffusion-2-1 -kcagle/AutoGPT -Cybsechuman/Consistency_analysis -PeepDaSlan9/togethercomputer-RedPajama-INCITE-Chat-3B-v1 -shencc/gpt -mohammadT/Arabic-Empathetic-Chatbot -jvictoria/LogicChecker -kapilmi/AI-lab -PantOfLuck/my_stable_diffusion_webui -Alfasign/HuggingGPT-Lite -AdithyaSNair/PCOS_Prediction -changkeyculing/chatgpt-detector-single -andyssj/entregable2 -Alichuan/VITS-Umamusume-voice-synthesizer -Drexx007/Drexx-Ai-Chat -a3en85/ChatGPT4 -sklearn-docs/Early-stopping-of-Stochastic-Gradient-Descent -sklearn-docs/SGD-Weighted-Samples -4RiZ4/stabilityai-stable-diffusion-2 -whitphx/gradio-static-test -Boranbruh/ehartford-WizardLM-7B-Uncensored -dejavusss/philschmid-flan-t5-base-samsum -Ingenious/README -Godrose0728/sound-link -CartelFi/README -sklearn-docs/Nearest_Neighbor_Regression -Ikaros521/moe-tts -uih-zyn/runwayml-stable-diffusion-v1-5 -presucc/anime-remove-background -Anthony7906/MengHuiMXD_GPT -Xinyoumeng233hu/SteganographywithGPT-2 -crashedice/signify -innovatorved/ImageColorizationUsingGAN -elitecode/logichecker -uohna/nlp-web-app -Lavanya30/hiddenhunger -Longtong/FoodVisionBig -maurol/lyrics-translator -Martlgap/LiveFaceID -Onekee/ehartford-Wizard-Vicuna-13B-Uncensored -lingbionlp/PhenoTagger_v1.2_Demo -kong003/first_demo -hahahafofo/vits-uma-genshin-honkai -Dacoolkid/Oba_-s -Harsimran19/DepthGAN -Raghav001/API -Shad0ws/ImageModelTestEnvironment -chauvet/stabilityai-stable-diffusion-2-1 -banana-projects/coref -dachenchen/real -daphshen/corgi-classifier -Juno360219/xlm-roberta-base -Fengbinbin/gpt-academic -Dacoolkid/Sleek -ali-ghamdan/deoldify -rafaelglima/ChatGPT4 -sssdtgvg/Sex -badrih21/ML_module -SameerR007/ImageCaptioning_streamlit -paulbauriegel/simple_whisper -captchaboy/pleroma_captcha_solver -dachenchen/HiWantJoin -nuwa/ehartford-WizardLM-13B-Uncensored -davila7/ConstitutionalAI -shawndimantha/hackaithon_generate_email -hungchiayu/CaptionFLAN-T5 -rchak007/BackTester -aditii09/hindi-asr -Cong723/gpt-academic-public -Bakar31/PotterQuest -thebetterindia/ai -JawadBIlal/Crack_Detection -Adesoji1/Panel_PDF_QA -Ramse/TTS_Hindi -weiwandaixu/ChatGPT3.5 -dorkai/SINGPT-Temporary -DkLead/facebook-tts_transformer-ru-cv7_css10 -ecaridade/albertina -RandomCatLover/thesis_finetuned_classifier -wangfowen/hackaithon_app -DarwinAnim8or/Pythia-Greentext-Playground -Soumahara/stablediffusionapi-anything-v5 -Soumahara/sakistriker-Anything_V5_Prt -jonanfu/demo_clase_platzi -Akshat231/super_space -AlanMars/QYL-AI-Space -MVV/3dTopDenoising -skyxx/skyxxChat -panda1835/leopard -sanchezNa/runwayml-stable-diffusion-v1-5 -analyticsinmotion/README -brhiza/EdisonChen-tts -hannahaa/MovieAI -chaozn/face_emotion_classifier -Xinxiang0820/nitrosocke-Ghibli-Diffusion -joshuadunlop/Epic-GPT4-App -Basav/openai-whisper-medium -russel0719/deepfake_detector -crazyjetsai/finetuneai -naisel/pegasus-with-samsum-dataset -huggingface-timeseries/probabilistic-forecast -Keenlol/Wood_Classification -JoeyFoursheds/ClonerHug -OpenMind-AI/starchat-playground -santiviquez/ai-act -omdenatopekachapter/left_ejection_fraction -nontGcob/T2E-demo -Phasmanta/Space2 -unidiffuser-testing/unidiffuser-testing -bigPear/digitalWDF -BongoCaat/ArtGenerator -MrTitanicus/rvc-models -abionchito/rvc-models -NeuralJunkie/HebLens -JCTN/stable-diffusion-webui-cpu -Bambicita/rvc-models -JosephTK/review-sentiment-analyzer -NEARHUb/video-transcoder -typesdigital/TTS -BlitzKriegM/argilla -hmtxy1212/README -Demi2809/rvc-models -Pixeled/dogcat -RinInori/Vicuna_ChatBot -vietvd/modnet -AIGE/A_B -grld26/Whisper-Swak-v4 -msafi04/abstractive_summarization -miaomiaoren/vits-uma-genshin-honkai -SERER/VITS-Umamusume-voice-synthesizer -randomarnab/Img_caption_project_using_ViT_GPT2 -januvojt/emotion-recognition -awinml/api_vicuna-openblas -course-demos/whisper-small -googlyeyes/question_generation_swayam -Tatusho/TTS -AICODER009/food_detection -williamstein/ehartford-WizardLM-7B-Uncensored -aliabid94/reverse_audio -giesAIexperiments/coursera-assistant-3d-printing-applications -BradAllgood/fastai_chapter2_new -dhmeltzer/qg_generation -bortle/moon-detector -Unachstudents/README -Tony1810/FootballPosition -Raksama/ChatToPdf -ramwar/ix-ask-your-books -dukujames/ML-Sentiment -arakimk/SakamataFontDCGAN -theodotus/asr-uk-punctuation-capitalization -ChengZ/DeepDanbooru_string0 -AICODER009/Food101_Detection -nikiandr/assym_sem_search -xinyu2/anime-remove-background -ennov8ion/Landscapes-models -lhg99/gradio-demo -BisratWorku/Bear_classifier -grisuji/min_dog_classifier -neongeckocom/streaming-llm -cmudrc/Interp_Imaging -RinInori/vicuna_finetuned_6_sentiments -kokofixcomputers/chat-ui -pakyenn/streamlit_datatool -resquared/sales-bot -0xSynapse/Image_captioner -sahillalani/chargerbot -awacke1/Face_Recognition_with_Sentiment -HSFamily/StoryMaker -aliabid94/tts -suhaaspk/PPAP -Not-Grim-Refer/Code-to-Detailed-English-Description -Not-Grim-Refer/Detailed-English-Description-to-Code -IwanK/heart_failuere -Nyashi/rvc-models-epic -wall-e-zz/anime-ai-detect -Ld75/pyannote-voice-activity-detection -Amitesh007/elevenlabs-stt -Nultx/VITS-TTS -Pranjal-666/User-Behaviour-Model -DataSage/Book_Recommend -hosst/hosst -hosst/HomeLLM -hosst/ApplianceLLM -hosst/ProfessionLLM -HOSSTOS/README -samehmamin/argillatest -WYF20618/Real-CUGAN -rubinmc/Image-Animation-using-Thin-Plate-Spline-Motion-Modeldfdfdddddddddddddddddddddd -tiedong/Goat -locknsw/nomic-ai-gpt4all-13b-snoozy -heliosbrahma/ai-youtube-assistant -JsonLite/gp -Cat125/text-generator-v3 -arnikdehnavi/citationPrediction -RandomCatLover/plants_disease -ishaan812/mediHelp -rohan13/grady -gabibi7am/rvc-models -shawndimantha/transcribesong1 -sklearn-docs/Comparison-of-Manifold-Learning-methods -kurianbenoy/Pallakku -frncscp/bullerengue -3laa2/Text2img -NovaSerial/anime-remove-background -AndyCer/TehVenom-MPT-7b-Chat-Instruct-LongCTX-Merge -duchaba/yml_hackathon_img_mindy -ucalyptus/DragGAN-unofficial -matthoffner/monacopilot -duchaba/yml_hackathon_img_maggie -duchaba/yml_hackathon_img_ardy -cifkao/context-probing -KGHL/img-to-music -voices/VCTK_British_English_Males -Nesip/Aeala-GPT4-x-AlpacaDente2-30b -codersgyan/espnet-kan-bayashi_ljspeech_vits -MAMADREZAMORADIam/Hgyukhfgtffftt -Martin1998/question_answering -Alcom/chaoyi-wu-PMC_LLAMA_7B -patti-j/omdena-mental-health -SamiAlghamdi/FirstEver -MUmairAB/BreastCancerDetector-app -Supawich/hololive_AI_fan_art_classifier -bgadaleta/mars -rahulmishra/transformerModel -awinml/alpaca-cpp -ahmed-masry/UniChart-Base -agutfraind/llmscanner -epochs-demos/MedicalImagingApp -safi842/FashionGen -Seogmin/NLP -fr1ll/sketch-to-1d-SRME -Jikiwi/sovits-models -bebetterfeng/CarperAI-stable-vicuna-13b-delta -xwsm/gpt -ShadowDominator/image-to-text-khmer-ocr -realAshish/SG161222-Realistic_Vision_V1.4 -hanaum/clip-test -rohan13/Roar -duchaba/yml_hackathon_prompt_monty -joey1895/tsspace01 -ShadowDominator/sentence-sentiment-analysis -ShadowDominator/paragraph-similarity -Quickturtle005/mothership_hca -voices/VCTK_American_English_Females -SantiagoTesla/image_generator -Epitech/Scarecrow -ludusc/latent-space-theories -BlueRey/MendoBERT_QA -KingBlaze1227/PC-PICKERS -tatate/trolltrade -helidem/Projet-L3-Image -SNKRWRLD/SNKR_WRLD_Shoe_Picker -victor/test-12342324 -siya02/Konakni-TTS -Josekutty/project_01 -cc38300/ConstructionGPT-SL -coding-alt/IF -Quickturtle005/profitability_tool -xercon/chat-with-docs -osiria/classifier-zero-shot-italian -talaa/Financial-sentiment-news-analysis -Andy1621/uniformer_light -ShadowDominator/extract-photos-from-pdf -sklearn-docs/Caching-Nearest-Neighbors -camillevanhoffelen/langchain-HuggingGPT -sklearn-docs/Density-Estimation-for-a-Gaussian-mixture -theonerichy/wd-v1-4-tags -sklearn-docs/Detection-Error-Tradeoff-Curve -perc1val/CaptchaSolver -hjzhp/cgpt-online -pplonski/Artificial_Calculus_Teacher -juanhuggingface/ChuanhuChatGPT_Beta -Aityz/Aityz_Model_Eli5 -hujike/mj-laf -orangepony4/stabilityai-stable-diffusion-2-1 -amanmibra/void-demo-aisf -jasonwu92/image-search-playground -utkuarslan5/yodazer -sh0kul/DTPDC-Deploy -rainbowemoji/etf-assistant -AutoGeneralAI/chatgpt-clone -dasanik2001/FYP_G15_RCCIIT -TILK/UrgencyBot -Akim/claudeAPI -rstallman/Beta.AI.Barrister -FreeHamish/Manaforge -nexuhs/ChatGPT4 -Wangchunshu/RecurrentGPT -ankush29/CheckGPT -Jellyfish042/punctuation_mark_prediction -Juliojuse/human_health_gradio -kamaldeep132/pdfGPT -Hahsgsgsy/teston -yuragoithf/mlg_image_classification -bonrix/text_detection_easyocr -bla/tranny -kalyas/dpt-depth-estimation -VinayDBhagat/GenerateCustomerInsights -jx-yang/deep-thinking -QinBingFeng/dalle-mini -GreenRaptor/MMS -hilmyblaze/WebUI-Counterfeit-V2.5 -Ironbasin/anime-ai-detect -Potato-ML/Spaceship_Titanic -mfkeles/Track-Anything -yixin6178/arXiv2Latex -hbestm/gpt-academic-play -raravena80/trulensplay -Addai/Breast_cancer_detection_with_deep_transfer_learning -FroggyQc/ehartford-WizardLM-7B-Uncensored -MichaelXin/openai-test -Silence1412/Text2img -MingGatsby/multi-query-sentiment -ccmusic-database/README -Choisuren/AnimeGANv3 -tiiuae/README -Ababababababbababa/Sha3bor_Aragpt2_Base -Ababababababbababa/Arabic_poetry_Sha3bor_mid -HReynaud/EchoDiffusionDemo -tusharust/darkstorm2150-Protogen_x5.8_Official_Release -hamedmohamed/microsoft-speecht5_tts -Pattr/DrumClassification -dorkai/ChatUIPro -technocenter/MUmairAB-Breast_Cancer_Detector -JosephTK/object-detection-count -truera/trulens -g0blas/cap-recognizer -abby-mcdonald/CardioPro -awacke1/API-Demo -divish/guanaco-playground-tgi-2 -aminghias/text_analytics_project -Thanhdotr/facebook-fastspeech2-en-ljspeech -sklearn-docs/SVM-Anova-SVM-with-univariate-feature-selection -sklearn-docs/KDE-of-Species-Distributions -kidcoconut/spcstm_omdenasaudi_liverhccxai -Annotation-AI/fast-segment-everything-with-drawing-prompt -jaseci/NERGPT -sklearn-docs/Test-with-permutations-the-significance-of-a-classification-score -sklearn-docs/Plotting-Cross-Validated-Predictions -sklearn-docs/Demonstration-of-multi-metric-evaluation-on-cross_val_score-and-GridSearchCV -sklearn-docs/Isotonic-Regression -sanaghani12/emotiondetection -sklearn-docs/Gaussian-Classification-on-XOR -seanghay/khmer-tts -ShoukanLabs/OpenNiji-Dataset-Viewer -DeepakJaiz/QA_evaluator -sklearn-docs/Gaussian-Classification-on-Iris -0xAnders/ama-bot -sklearn-docs/Normal-Ledoit-Wolf-and-OAS-Linear-Discriminant-Analysis-for-classification -sabirbagwan/Sip -MLIFY/Chatter -sklearn-docs/Gaussian-Mixture-Model-Ellipsoids -sklearn-docs/Gaussian-Mixture-Model-Covariance -utkuarslan5/persona -MLIFY/ehartford-WizardLM-30B-Uncensored -MLIFY/openaccess-ai-collective-manticore-13b -akashjeez/akashjeez -barani/ControlNet -smukerji/pdfBot -ImPavloh/voiceit -Annelisseishere/Streamlit_GPT -JPTHEGOAT/SG161222-Realistic_Vision_V1.4 -swaptr/image-captioning -jeycov/PIB-PAARCIAL-FIN -amanmibra/void-emb-demo -hosst/carers -besarismaili/fastai_pet_classifier -sysopo/impira-layoutlm-document-qa -rogera11/Art-Style-Classifier -rdecler/MySpace -freestok/corn-diseases -dermetfak/healthcare_ai_loop -umitgunduz/news-extractor -FunnyDannyG/VoiceFixer -micahCastillo/gpt-report-analysis -Oumar199/Fake-Real-Face-Detection -sddwt/guanaco -xiaobaiyuan/theme_land -skimai/DragGAN_Streamlit -linweiyt/aiwrite -darthPanda/chatpdf_app -inayet/inayet-autotrain-price-prediction-1331950922 -ozgur34/qb-Engine2 -Wings77/ChatGPT4 -twdac/BuChengFangYuan-ChineseJapaneseTranslation -olimpa/CVORG -hitty/Movie-Recommendation-System -hari31416/Style-Transfer -MINAMONI/img-to-music -WinWut/Lofi-music-style-transfer -justest/chatglm-6b-int4 -danushkhanna/Phishing_Domain_Detector -GiorgiSekhniashvili/geo-whisper -FineLong/stabilityai-stable-diffusion-2 -DataRaptor/ActionNet -samisnotmyname/Instagram-Carousel-Prompt-Generator -Hobis/bark-voice-cloning-polish-HuBERT-quantizer -davidanthony-ai/DIGITALIXSA -analyticsinmotion/word-error-rate -FranklinWillemen/TARS -hitty/Vegetable_Classifier -KingChronos/ChatGPT4 -middha/Torpedoes -typesdigital/BLOOMChat -MajdOD/gradio-Stroke-prediction -xYousha/AlphaGPT -Arikkod/FoodVisionMini -aulhan/microsoft-codereviewer -olimpa/Agenda-Inter -wiwide/40bqa -michaelwja/burn-detection -bhavyapandya/Next-Word-Prediction -SHIBATAATSUSHI/aioccupationaltherapist2 -FER-Universe/Face-Benchmarking -edisonlee55/hysts-anime-face-detector -lyimo/asrv2 -Menna2211/TxTimg -vinayakchuni/PayalVinayakClassifier -indikamk/MisconAI -arihantvyavhare/device_detector_img2txt -Menna2211/ImCaptioning -Rardilit/Rardilit-Panther_v1_test1 -crawly/White-box-Cartoonization -Mellow-ai/PhotoAI_Mellow -Ragio/endometrial_disease_prediction -robyramos/analise_perfil_v2 -spuun/blip-api -Hexamind/iPADS -roontoon/Demo-TTI-dandelin-vilt-b32-finetuned-vqa -Lwalid/Daam_Inpainting -LCaligari/deepsynthbody-deepfake_ecg -jganzabalseenka/NER-spanish -Abhishek92kumar/layoutlmv3-finetuned-cord_100 -AhmedRashwan369/ChatGPT4 -ari7thomas/bible.ai -apetulante/bert-emotion -Naszirs397/rvc-models -michaelwja/maskformer-satellite-trees-gradio -Tej3/ECG_Classification -AIKey/facetofacechat -AIKey/ai_date -camenduru-com/imdb -nameissakthi/Invoice-Extraction-1 -Amite5h/EuroSAT_ -Superying/vits-uma-genshin-honkai -AIKey/TestStatic -tigergoo/ai -sohoso/anime348756 -Suweeraya/Breast_Cancer_Ultrasound_Image_Segmentation -Abubakari/Sepsis-prediction-streamlit-app -totsunemario/minimal -SolenopsisCampo/Automatic1111_Stable_Diffusion -isaakkamau/Whisper-Video-Subtitles -olimpa/CalendarJs -surgelee/SG161222-Realistic_Vision_V1.4 -QINGCHE/TSA -Locomocool/MooseOrDeer -martingrados/gradio-google-sheet -PrabhuKiranKonda/Streamlit-PDF-Assistant-Docker -agunes/ChatGPT4 -0xeureka/ehartford-WizardLM-13B-Uncensored -jbyun/music-separation -LuxOAI/BGCGW -neuesql/sqlgptapp -olimpa/projectAlphaDB -gnakan/airtable-QA -Mohamedoz/chatmoh -aliabid94/golfy -Lanerdog/deepsynthbody-deepfake_ecg6666 -Annotation-AI/segment-similarthings -raaec/Pix2Pix-Video-prv -king007/pdfChatter -xxccc/gpt-academic -winglema/ChatGPT4 -animeartstudio/QuickGen-Photo -animeartstudio/QuickGen-Art -speakjan/EleutherAI-gpt-j-6b -pongping/converter -sinksmell/ChatPDF -sci4/AnimateYourDream -sudip1310/BANAO_Tiny_Shakespeare -SMOOTHY1962/redstonehero-realisian_v40 -threestoneyang/vits-uma-genshin-honkai -shifei/gradio -breehill1994/SG161222-Realistic_Vision_V1.4 -R34Koba/ClaudeProxyGaming -LuxOAI/ResumeBud -Dauzy/whisper-webui -LuxOAI/guanaco-playground-tgi -Q4234/a2 -eaedk/Sentiment_Analysis_App_Docker_deployed -remyxai/remyxai-classifier-labeler -apozzuoli98/shark-or-whale-classifier -liammcdevitt73/LoL-Support-Classifier -qiantong-xu/sambanovasystems-codegen-16B-mono-toolbench -ml595/myfirstspace -thegenerativegeneration/FNeVR_demo -Seetha/IMA-pipeline-streamlit -Juno360219/albert-base-v2 -Juno360219/cloudqi-cqi_text_to_image_pt_v0 -alibidaran/General_image_captioning -Juno360219/stabilityai-stable-diffusion-2-1 -LuxOAI/GPT4-30b -awacke1/PermutationsAndSequencesGPT -dolceschokolade/chatbot-mini -Ank0X0/text-to-3d-shap-e-webui -Sreekumar1608/langchain-chat-with-pdf-openai -sccstandardteam/ChuanhuChatGPT -Laurie/IDEA-CCNL-Ziya-LLaMA-13B-v1 -OzoneAsai/gptsan -abhi1280/QR_generator -neojex/LuxembourgishTextClassifier -UltimateAICourse/Prompt-Engineering -Hamish/openai_demo -Hakim571/Food-Classification -jeffrymahbuubi/bert-advanced-cnn-hate-speech-classification -welloff/ChatGPT-prompt-generator -PurtiSharma/toxic_comments -After-the-Dark/paragraph-similarity -kmirijan/NBA-Stats -pord123/model_demo -Frorozcol/financIA -osiria/distilbert-italian-cased-ner -Vishnu-sai-teja/Dog-vs-Cats-2 -tonwuaso/SentimentAnalysisModel -giswqs/solara-demo -AIOSML/README -FrancisLi/advance_autotrain -sebsigma/geodata-harvester-app -randt/stabilityai-stable-diffusion-2-1 -lsli/lab -manu1612/spamdet -menciusyue/stabilityai-stable-diffusion-2 -WangZeJun/bloom-820m-chat -Kuachi/ai-voice -Hexamind/swarms -dusanstanis/TheBloke-guanaco-65B-HF -cownclown/TehVenom-MPT-7b-WizardLM_Uncensored-Storywriter-Merge -sklearn-docs/Face-completion -Cloudy1225/stackoverflow-sentiment-analysis -MaxKazak/RuBert-base-russian-emotions-classifier-goEmotions -g0blas/chicken-breed-recognizer -sd9972/autotune -kolibril13/tldraw-solara-test -VuAI/VN98 -Taithrah/Minimal -Vikas01/Attendence_System -Woogiepark/stabilityai-stable-diffusion2 -prasanthntu/dog-vs-cat-classifier -osiria/bert-italian-cased-ner -dukai289/learning_streamlit -shoukosagiri/stable-diffusion-webui-cpu -vishnu23/web_scrap -mrrandom123/image_creative_caption_new -hands012/gpt-academic -g0urav-hustler/Image-Caption-Generator -dukai289/scripts -ludvigolsen/plot_confusion_matrix -sunilkumardash9/pdf-GPT -Lazyhope/RepoSnipy -ggwvits/vits-uma-genshin-honkai -simpx/tiiuae-falcon-7b -XuZhang999/ProArticles -Falah/stablediffusionDB -eatcosmos/hackaprompt -LENMON/ProxyGPT -saurshaz/HuggingGPT -Abubakari/Sepsis-fastapi-prediction-app -gersh/ehartford-based-30b -s3nh/acceptable-self-instructs -Queensly/FastAPI_in_Docker -Raghav001/PDF -amasad/Replit-v2-CodeInstruct-3b -prasanthntu/who-is-the-hero -mayajwilson76/insurance-stress-testing-demo -briancatmaster/Tropic-AI -lavan2012/free-fast-youtube-url-video-to-text-using-openai-whisper -abokbot/wikipedia-search-engine -FawnPythn/andite-anything-v4.0 -Akmyradov/TurkmenSpeechRecogntion -mikeee/docs-chat -Veera-Ruki/AutoPoem-Generator -camenduru-com/sl -ik/twi-ewe-mss-tss -LennardZuendorf/legalis -HariSathwik/OmdenaAI-Jordan -Kuachi/hololive -awinml/api-instructor-xl-1 -ixiangjin/GPT4ALL -rfrossard/ChatGPT-PPT-Generate -rfrossard/langchain-chat-with-pdf -BramVanroy/mateo-demo -sadjava/emotion-classification -ikoghoemmanuell/Sales-Prediction-App-Streamlit -suyash007/MRS-SUYASH -mengdeweide/VITS -Whalb/GPT4ALL -SurendraKumarDhaka/Shakespeare-AI -hlydecker/Augmented-Retrieval-qa-ChatGPT -prognosis/inference-bloom-doc-qa -pdjewell/sommeli_ai -1line/AutoGPT -MesutUnutur/germanToEnglishTextToImage -altndrr/vic -MesutUnutur/chatgptFinetune -ivn888/Twitter-dashboard -kyauy/ClinFly -ysharma/dummyy112233 -xujunhao/AudioLM -shuanglei/promptGenerator -NicoleGoh/Anime_Recommendation -cmseibold/cxas-demo -Cletrason/cloudqi-cqi_text_to_image_pt_v0 -awacke1/ChatGPTStreamlit3 -andfanilo/streamlit-drawable-canvas-demo -Harsh502s/Anime-Recommender -kbora/minerva-generate-docker -Panel-Org/panel-demo-image-classification -eswat/Image-and-3D-Model-Creator -awacke1/ChatGPTStreamlit4 -amanatid/ArxivGPT_Streamlit -rriverar75/dientes -alessveloz/lenssssw-roblox-clothing-ai-maker -jewellery/ChatGPT4 -NanoMachin/Free-Palestine -PeepDaSlan9/OpenAssistant-reward-model-deberta-v3-large-v2 -awacke1/ChatGPT-Streamlit-5 -hlydecker/falcon-chat -Jimmyfreelancer/Pix2Pix-Video -SIH/Augmented-Retrieval-qa-ChatGPT -Taocan/Chatty -Jokerkid/porntech-sex-position -Soyoung97/gec-korean-demo -vishnu0001/text2mesh -kaustubh35/tax -Akmyradov/TurkmenTTSweSTT -marlhex/test1 -zkunn/Alipay_Gradio_theme -uooogh/webui -nosdigitalmedia/dutch-youth-comment-classifier -Jerry0203/sentence_embedding -AlterM/Zaglyt2-transformer-test -Guilhh-kell0/Jennifer-Home -hk59775634/OpenAI-Manager -abidlabs/audioldm-text-to-audio-generation -Igor2004/newSpace -ArturStepanenko/digitsSpace -DexterSptizu/drug_interaction -victor/tesTETZTRZE -fisehara/openai-whisper-base -radames/Falcon-40b-Dockerfile -DailyBibleMotivation/README -bparks08/falcon-chat-40b-1 -dandan4272/hand_gesture_rec -myrad01/Inpaint-Anything -rhineJoke/test_faclon-7b -subwayman/btc-chat-bot -Srihari1611/Gender_Classification -alexyuyxj/emotion-classify -Starcodium/README -Superlang/remove_background -crazybber/docker-demo-t5-translation -BillBojangeles2000/bart-large-cnn-samsum -ShreyaRao/QuotesForU -DarkyMan/OrangeMixes -ky2k/Toxicity_Classifier_POC -alexyuyxj/zh-en-translation -liuzq/free-creation -rudayrude/free-fast-youtube-url-video-to-text-using-openai-whisper -friedrichor/friedrichor-stable-diffusion-2-1-realistic -Kelvinhjk/QnA_chatbot_for_Swinburne_cs_course -jonas/KaraAgro-Cadi-AI -mithril-security/Santacoder-demo -neko321/Voice-Changer1 -internetsignal/audioLDM -blmdsydm/faster-whisper-webui -LovnishVermaPRINCE/attendanceviaface -colutti/timpal0l-mdeberta-v3-base-squad2 -lekkalar/chatgpt-for-pdfs -SantiagoMoreno-UdeA/NER_RC -patvfb/worldofshares -CaliforniaHealthCollaborative/Emoji2KaktovicEncryptKey -CaliforniaHealthCollaborative/README -EnigmaOfTheWorld/Interior_home -petervavank/Advoice -cuixuhan/888 -drdoggo/Medical_Image_Understanding_with_VLMs -alitudil0/Sillyfinity -chopey/DhivehiTransliteration -nitinacap/chatgpt4all -Insuz/Mocha -lint/meetingsummary -Subhraj07/minio -danfsmithmsft/falcon-chat -NonnaRose/Image-Caption -awacke1/ChatGPTStreamlit6 -Th3BossC/TranscriptApi -varunkuntal/text2_img_text_demo -nicholasKluge/Aira-Demo -genevera/AudioToken -coyotte508/test-req -dantosxd/gorilla-llm-gorilla-mpt-7b-hf-v0 -vpivn/Cooling-Water-Thermal-Evolutions -maxomorphic/DogBreedIdentifier -rovargasc/calificacion -awacke1/ChatGPTStreamlit8 -Hexamind/QnA -Angello06/SoylaloGaming -BigSalmon/AbstractTwst -gerhug/dalle-mini -olive100/face_merge -nilaymodi/dandelin-vilt-b32-finetuned-vqa -DataWizard9742/LessonPlanGenerator -R1ckShi/funasr_app_clipvideo -CaliforniaHealthCollaborative/Mermaid.Md -victor/test213213123123 -Malmika/Osana-Chat-Friend -randt/redstonehero-RPG-v5-itr17_A10T -all-things-vits/class-attention-map -fernfromecuador/SG161222-Realistic_Vision_V1.4 -9prayer/ubiq-chat-cpu -victor/ahahahah12 -openlamm/LAMM -awacke1/ChatGPTStreamlit9 -danterivers/music-generation-samples -alamin655/Personas -Dukcar/Pix2Pix-Video -DavidHosp/Movie_Recommendation_System -cormerod/gaime -Woogiepark/nlpconnect-vit-gpt2-image-captioning -theadedolapo/Car_price_prediction -d8aai/simple-paper-qa -YaTharThShaRma999/Testtrial1 -hebert2099/MusicGen -Hakim571/Food-Recommendation -cmagganas/chainlit-arxiv -sachinrcz/isItCarOrPlaceOrBus -SujanMidatani/resume_details_extractor -JudgmentKazzy/JosefJilek-loliDiffusion -tappyness1/error_analysis_obj_det -martykan/SZZ -NickNYU/NickFriendsHouse -Wrathless/Dkrotzer-MusicalMagic -Gamero-xD/stabilityai-stable-diffusion-2-1 -cooelf/Retro-Reader -caldervf/maven-5 -Wrathless/pyannote-voice-activity-detection -GFXY/stabilityai-stable-diffusion-2-1-base -GFXY/stablediffusionapi-anything-v5 -GFXY/Maseshi-Anything-v3.0 -XPMaster/manafeth -Ama434/neutral-barlow -michaljunczyk/pl-asr-bigos-workspace -Izumazu/ProxyTest -jeffrymahbuubi/foodvision-mini -mblackman/kandinsky-blend -zhtet/RegBotBeta -WelcomeToTheClub/VMware-open-llama-7b-open-instruct -PeepDaSlan9/VMware-open-llama-7b-open-instruct -YaTharThShaRma999/ChatwithDolly -sheikyerbouti/pawelppppaolo-gpt4chan_model_float16 -all-things-vits/Attend-and-Excite -PunPk/AI_FallingAsleepDriving -Yntec/Single-Stable-Diffusion-Model-Test -mnauf/detect-bees -XuLiFeng/godxin-chinese_alpaca_plus_lora_7b -backway0412/A2 -geraldvillaran/dolly-chat -kangjian99/Panel_PDF_QA -liaokun/web -Katsuki098/test03 -Yiqin/ChatVID -TestingCompany/ChatPDF -gabrielyokai/reverse -RICHARDMENSAH/SEPSIS-PREDICTION-STATUS-APP -The13DvX/README -Paperboxiv/Dunhuang_GPT -tom-beer/hotel-recommender -flokabukie/Sepsis-status-prediction-fast-api -Haxan786/Tel -Juli08/janitorai -MarkMcCormack/NLP-EduTech-App -DonDoesStuff/streamusic -Boynn/AI -dakaiye/dky_xuexi -omdena/omdena-chatbot -mentalmao/nitrosocke-spider-verse-diffusion -MetaWabbit/Basic_Prompt_Generation_Tool -czczycz/QABot -natexcvi/trade-assistant-ui -1vash/demo-flask-docker-template -vruizext/transformers-xray-classification -newbietk/chatGPT-T1 -JAWEE/stablediffusionapi-majicmixrealistic -asyafiqe/pdfGPT-chat -tarunika-03/PersonalityPrediction_Psychology -abhaskumarsinha/MinimalGPT-Felis_Catus -TinkerFrank/AppleClassifier -dexrm/Weewee -kadirbalalan/text-summarizer -OllieWallie/Openai -marrocovin/OPENAI_KEY -Trickshotblaster/idk-bruh -keilaliz123/test05 -PeepDaSlan9/idk-bruh -ZGDD/chat-robot -PeggyWang/ehartford-WizardLM-Uncensored-Falcon-40b -hysts-samples/save-user-preferences -Miam97/Test02 -radames/gradio_get_video_metadata_timestamp -John1986/test -EzioArno/Goofy -kiskisbella/janitor -spexight/no.2 -2kaara/oreo -hieupt/image_style_transfer -eivind-n/P360-AI-Help -SujanMidatani/speechToText -eaedk/agri-tech-fastapi -tresdtres/TresDtres_AI -kaveh/wsi-generator -gameg/Docker -yuragoithf/mlg_personal_info_remover -Ricdeq/optimaldesign -dariusstone7/PFE -abhaskumarsinha/MinimalGPT-Ragdoll -MaxP/demo-document-qa -spillwaysofyoursoul/janitorai -tarfandoon/CryptoEN -fgibarra/fraud-prevention -khachapuri69/madoka -muttalib1326/Detecting-Objects-in-Images -anandaa/careerpal -propilot/propilot-calling-functions -McLovin171/runwayml-stable-diffusion-v1-5 -SpacesExamples/Gradio-Docker-Template-nvidia-cuda -dinnovos/english-teacher -woahtheremonkey/vzvsvs -NeptunoIA/neptuno-proxy -PeepDaSlan9/HuggingFaceH4-starchat-alpha -crystalai/constellation -zilderish/ngekzild -revstartups/salessimulator -RoyKwok/Gradio -tianyang/lemur-7B -thinkcol/chainlit-example -dietician/rewriteData -kasun/git-large -kasun/blip-base -osanchik/PicFinder -kusumakar/Image_Describer -hhhyrhe/vits-uma-genshin-honkai -daarumadx/xd -Aashir01/Live_Transcription -Pratick/CLAVIS -Tihsrah/Meetings -Sreeja123/memristor-based-neural-search-optimization-GUI -naliveli/myspace -St4arsp0laris/PPolar -maxmon/digital_double -Alisonbakers/Fml -CreBea/Test2 -olimpa/Celdas2celdas -w1zrd/MusicGen -umutozdemir/medicalai-ClinicalBERT -scaratootie/scarar -Femurbreaker/Femur -Motheatscrows/mmnsfww -qprinceqq/noise-greeter-demo -jeycov/Piel_cancer_prueba -jytole/hftesting -Candyraider/Proxy4 -SpaceNMagic/OPEN_AI -leonelhs/Zero-DCE -kusumakar/Text_to_image_using_Stable_diffusers -dvc890/go-chatgpt-api -teralomaniac/chatbing -koushik-org/Trading_QA_Bot -teddyhugzz/venus -RockmanYang/Demucs_v4_2s_HT -goodeatmen/Test -Savenly/hriddy -Inderdev07/Attendance-FaceRecognition -tarunika-03/personality-pred -Evanell/Venus -Rehman1603/SkinDisease -AlphaGPT/PaperSummary -awacke1/StreamlitComponentsStylingMarkdown -Detomo/detect_greeting_app -amitjainmldesign/amitapp -driller/pyconqa -samavi/openai-clip-vit-base-patch32 -Ironicsarcastic/Nse -parasmech/Image_captioning_nlpconnect -dinnovos/translator -JustMeJellybean/Jellybean -Ellabella1/ai-cover -typesdigital/WeatherIAPP -jaskugler/timdettmers-guanaco-65b-merged -DenniSciFi/IconAutomation -XIAOAssembly/Asrtrolobot -YangHao520/TestITP -Azai8915/ChubVenusTest -ThisThings/tdymndftbdfbvsgv -Lolicringw6969/Lol -lilholla/2099 -wlpzr/Test1 -Aaajdhdhdhahdbbaabs/Hshdhdhd -yukiiiwasneverhere/yuki -LINOlk/Akak -ardha27/rvc-hololive -kklol/lovelypan -mehnaazasad/give-me-a-title -Vincentim27/Plant_Nutrition_Prediction_ARIA -wikidere/crying -Amjadd/BookGPT -SuperSucklet/Sex -Hise/rvc-hololive-models -YONG627/456123 -fuckyoudeki/AutoGPT -fatmacankara/ASCARIS -eaedk/agri-tech-fastapi-with-GUI -rhineJoke/baichuan -cyberoleg/b2719240e190e2a649150d94db50be82838efeb0 -Giuvyz/rvc-genshin -Alfasign/Einfach.Stable_DiffPomrpter -openfoodfacts/ingredient-extraction -onliner/QR-generator -ElainaFanBoy/IRONY-Real-ESRGAN -VectorologyArt/prompthero-openjourney -VectorologyArt/Sygil-Sygil-Diffusion -RegalHyperus/rvc-lovelive-genshin -slyjay412/darkstorm2150-Protogen_x5.8_Official_Release -renumics/cifar100-outlier -renumics/mnist-outlier -renumics/beans-outlier -hensam92/YouTubeSummary -Weshden/Nsfw1 -sunmaiyyyy/combined-GI-RVC-model -itberrios/stable_edit -alandavidgrunberg/Cannes_Chatbot -Keay/Sae -Eieichicken/yyayyaya -HawkEye098432/DunnBC22-trocr-base-handwritten-OCR-handwriting_recognition_v2 -Hsft/VenusAi -fazni/Resume-filter-plus-QA-documents -Terma/Chat -ccwu0918/classify_image -Monelmo/Testing -syam417/rvc -soldguu/yumyum -NebulaVortex/falcon-chat -update0909/Manager_Promotion -sd-dreambooth-library/Baysa110 -omarelsayeed/AUDIO-ENHANCEMENT -sd-dreambooth-library/Baysaa1 -nick2655/Intelibotprivatedata -Keyradesu/Oka -hitoroooooo/hitohito -JoshMe1/UAS_MCL_FAREL -chengzl18/DeepTHULAC -huggingpaul/logo-wizard-logo-diffusion-checkpoint -csamuel/decapoda-research-llama-13b-hf -Alfasign/chat-llm-streaming -meluvsguaca/iluvguacastoo -meowmeow369/meow -Kyron2975/Linaqruf-anything-v3.0 -MoEternal/Hoshino -SappyInk/Ink -micooldra/bears -chuuyasleftlung/meowmeow -Username47337/key -ph0b0s122/Tex02 -RahulSinghPundir/MentalHealth -kaicheng/chatgpt_web -serhatderya/controlnet_v11_scribble_ui -Rii12/Test03 -JuanHaunted/humming_space -ltim/visual_chatgpt -Mo9/DionTimmer-controlnet_qrcode-control_v11p_sd21 -enesbol/case_dif -RajkNakka/NER-fine-tuning -gustavoespindola/SmartStay -Ayanoaisho/L -Luccadraw24/Amelia -Jialu/T2IAT -kasjkldjsalkj/fyodorahitevoy -Flyingpotato42/gpt4all-tweaked -rainslayer/rifles-classifier -Xyan-shuo2/Shoshoo -Eli-chan/Test03 -JCTN/stable-diffusion-webui-cjtn -ShermanAI/ChatSherman -ChrisCaviar/ControlNet-v1-1 -DpNaze/webui-docker -Sinestreaa/Test02 -qxllphl/qxllphl -allknowingroger/Image-Models-Test3 -pranked03/IssueFixerGPT -Gyjkkih/WizardLM-WizardCoder-15B-V1.0 -biranchi125/gpt2_experiment -qinzhu/Claude100K-API -PeepDaSlan9/bigscience-bloom -Usaki108/VoiceChange -Shawn37/UTR_LM -alexiserodriguez/whisper-transcription-app -robyramos/estimativa_historia -InnovTech/InnovTech.ProAI -asquirous/tv_desktop_classifier -vuvienweestword/godhelpmepttwo -Ajit025/Text_to_Image_conversion -ShahzadAhmed/DeepFaceApp -Ash58947/Jan -compasspathways/Sentiment2D -RecursosRegenerativos/README -jordonpeter01/Whisper-Auto-Subtitled-Video-Generator -gebebieve/gen -adorp/ControlNet-v1-1-duplicate -jordonpeter01/Whisper-Auto-Subtitled-Video-Generator-1-Public -ZhaoYoujia/ImageRecognition -snowcatcat/stable-diffusion-webui-cpu -youngtsai/Mandarin-TTS -YYar/Pr.O.A -nikhilba/donut-ocr -Nickwwww572/Test02 -BhagatSurya/convet_pdf_to_txt -shenfangqi/Retrieval-based-Voice-Conversion-WebUI -allknowingroger/Image-Models-Test4 -ThirdEyeData/Object-Detection-For-Electrical-Domain -juanpardo/gradio-GUI-FinalProject -notreallyintrested/Naseej-noon-7b -RenXXV/Test02 -miumiunana/miumiu02 -raphael-gl/ai-days-subtitles-demo -MikeTrizna/racemose_classifier -chinmayapani/LangFlow -sophiamyang/test-panel -jason137/text-to-sql -awacke1/StreamlitTestforSTEM -Kirihasan/rvc-holo -vincentmin/TalkToMe -danielritchie/yomomma -meowooooo/maybe -alkz/spacefast -ZettaFi/SeeFood -Snake12b/wizard-Vicuna-13B-Uncensored-HF -Situme/Wockabocka -awacke1/QRCodeAI -DanielGartop/SexAI -safora/myfirstspace -H2o6O2/Something -ec7719/Excel -Moses25/llama-7b-chatbot -alanchan808/Ask_Tennis_Coach_Patrick_Mouratoglou -mwahha/gwanh -wu981526092/Optimal_Cluster_Analysis_with_PCA_Visualization -mobu123456/venusai -jbilcke-hf/template-node-python-express -lqinyli/ali -Aoron/Test02 -youplala/StoreCopilot -leonelhs/carvekit -Protatoes/proxy_shit -Wanwan1215/Louisa -awacke1/runwayml-stable-diffusion-v1-5-06212023 -mpl8fjk/runwayml-stable-diffusion-v1-5 -awacke1/ChatGPTStreamlit7-Private -DeeeTeeee01/VODAFONE-CUSTOMER-CHURN-PREDICTION-APP -ammarnasr/Sem-GAN-Bird-Image-Generator -Gh-st/DUDUDU -rstallman/Mayfair-Partner-Music -rstallman/web-scraping -dinnovos/chatbot-shoe-store -GlimmeringStars/Testing -Giozh/openai-reverse-proxy -kai0226/hotdog-detection -joaocalista/insurance-premium-prediction -tomahawk24/roneneldan-TinyStories-33M -kyrontunstall/stablediffusionapi-waifu-journey-2 -hayas-tohoku-workshop-2023/sample-depth-estimation -SUSSYMANBI/Alex-diffusion-beta -hudsonhayes/Vodafone_CRM_Chatbot -MarcoLYH/Extractive-QA-Chatbot -XiNiu/XSpace -asciicorp/hotel-chat -aieye/named_entity_recognition_tutorial -osanseviero/nerfies-test -Mwebrania/clasma_database -anupam210/Flight_ATA_Class -SaltyFishAB/anime-ai-detect -SaltyFishAB/anime-aesthetic-predict -Joao77/Lolicombr -reach-vb/whisper_word_timestamps -PrabhuKiranKonda/fastapi-postgres-todo-api -Falah/object_detection -ankush37/phishingDetection -pedromsfaria/BTRUE_BOT -TuanScientist/BTCforecasting -snowc2023/ask_the_doc -JollyOmnivore/Fusion92_ChatGPT_Sandbox -Rohit001/emotion_detection -LeoDog896/yolov8n-asl -matthoffner/falcon-40b-instruct-ggml -MetaDans/AIBOT -terapyon/pyhackcon-qa2 -Qualinguis/Fraudulent_or_not -Vynock/rvc-wefu -Gregory-L/EleutherAI-gpt-neo-1.3B -JayceeAngel/openai-reverse-proxy -dinhminh20521597/OCR_DEMO -Priyanka-Kumavat/Customer-Complaint-Segmentation-Model -hudsonhayes/HudsonHayes-DocumentQA -Jarvis2301/Aku -anhalu/transformer-ocr -amish1729/LFUNet -ramonpzg/music-recsys-app -DeathRoad/PornagraphyIsGreat -abtech/README -UholoDala/Churn_Prediction -rstallman/Contract-AI -deeepsig/bear_classifier -rstallman/legisbot-text -chennaiai/hotdog -Brasd99/SquadDetective -CyberPeace-Institute/SecureBERT-NER-Space -penscola/customer_churn_rate -gradio/annotatedimage_component_main -SMD00/Image_Summarizer -MercurialAi/OncologyGPT -skylarx2x/openai-reverse-proxy -Sarath2002/Form_Understanding_using_LayoutLMV3 -ahuang11/name-chronicles -tanquangduong/ner-biomedical-abstract -Dalvo/Moxxie -Jaehan/Question-Answering-1 -FEFE2023/VENUSAIESPACIO1 -Jaehan/Translation-Korean2English-1 -aravindh-s/multiocr -taiwhis/Nhandien_nhom36 -Thumas/DogCat -sgonzalezsilot/TFM-DATCOM -allknowingroger/SatelliteSuperResolution -GIGACHAhoon/BasicNNYoutubeSentimentTop5CommentPrediction -qinzhu/diy-girlfriend-online -Kreaols/ChuanhuChatGPT -awacke1/CharacterZoo -Jaehan/Text-Summarization-1 -spuun/nsfw-det -Jaehan/zero-shot-classification-1 -halfdevil/demochat -Jaehan/zero-shot-classification-2 -Jaehan/Text-Generation-1 -Miko-opiko/openai-reverse-proxy -the-bucketless/where-to-shoot -Jaehan/Text-Generation-2 -Jaehan/Text-Generation-3 -Jaehan/Text-Generation-4 -Jaehan/Text-Generation-5 -projecte-aina/transcripcio-fonetica-catala -KuraiYuki/openai-reverse-proxy -BOXNYC/shirley -Jaehan/Text2Text-Question-Generation-1 -Jaehan/Text2Text-Text-Summarization -Jaehan/Text2Text-Sentiment-Analysis -Yram/Docker -anigaundar/intel_imgclf -Jaehan/Image-Classification-Using-a-Vision-Transformer-1 -Jorgerv97/Herramienta_interactiva_ensenyanza_tecnicas_aprendizaje_supervisado_salud -Tahnik/spreadsight-demo -SupawitMarayat/imgaug_img_microscope -777DUKE/Ballin -tech9/fashion1 -tappyness1/one_dash -jamesyoung999/whisper_word_timestamps -zadkiel04/rvc-yoshino -DCandE/rvc-models -chawiii/open-reverse-proxy -chanhi0603/Create_subtitles_for_videos_ChatGPT -ctcconstruc/README -dashues/frieda -penscola/sale_predictions -kellyxiaowei/OWL-ViT -pedromsfaria/Whisper_Diariazacao -JFN/gpt2 -Jaehan/ChatBot-1 -productdesigning/README -Jaehan/Code-Generator-1 -MrSalman/Image_captioning -RoryT0ishi/Meow -TohsakaSu/AQI-predictor -Parantonio/IA_voices -ankur-bohra/AliShaker-layoutlmv3-finetuned-wildreceipt -marker22/Bark-Voice-Cloning -PeepDaSlan9/Bark-Voice-Cloning -Vern0n/pls_work -anen/DentalGPT -StatsByZach/app -Abdullah-Habib/Rabbit_or_Hare -aitoala/huggingCuys -Lizzbitt/pi2 -Leozin11/openai-reverse-proxy -vincentliaw/runwayml-stable-diffusion-v1-5 -MinzChan/ChatGPT-PPT-Generate-With-Azure-OpenAI-API -yaful/DeepfakeTextDetect -sxunwashere/rvc-voice -Noahfinncee/Test02 -AUST001/True-GPT4 -Walterchamy/Kiitec_virtual_assistant -binly/ChatGPT4 -stevengrove/GPT4News -tappyness1/spaced_repetition_footwork -deepakchawla-cb/ai-interviewer -ckul/image-quality-assessment -huggingface-projects/Leaderboard-Restart -kartik016/aadharORPanClassifier -felixrosberg/FaceAnonymization -owen10086/lala -Zeltoria/Anime -wisnuarys15/rvc-wisnu5 -TheFunniestValentine/rp -Guochun/THUDM-chatglm2-6b -Zeltoria/anime-voice-generator -k4black/codebleu -Glasscupps/Hello -benkabod/README -DonDoesStuff/GPT3.5-voice -MajinSonic/EarthnDusk-EpicMix6_Realism -Torcat/torcat-test -Basil2k4/botbasil203 -kingabzpro/AI-ChatBot -justest/vicuna-v1.3-ggml -thabangndhlovu/ConstiChat -bimal590/Text_Classify -hamzakashif/kandinsky-2.1 -SumanthKarnati/SumanthKarnati-Image2Ingredients -MercurialAi/OncologyGPT_Temperature_Control -lmangani/chdb -davidscmx/fire_detector -1ucii/Lab04 -awacke1/GeographyandPopulationDensityUnitedStates -SumanthKarnati/SumanthKarnati-Image2Ingredients2 -dahaoGPT/THUDM-chatglm2-6b -rstallman/AI-Contract-Sheet -rstallman/Westminster-AI-Sheet -LightFury9/knee_osteoarthritis_classification -Ekittl01/impira-layoutlm-document-qa -IELTS8/ISF -nishantup/LLMsIntro -focusit/BhagwadGita -b-monroe/rvc-VoiceAI -Not-Grim-Refer/Reverse-Prompt-Engineering-Code -jaisidhsingh/cluster-summ -elitecode/Custom_ChatBot -yueranseo/mygpt -AnthonyTruchetPoC/persistent-docker -librarian-bots/README -kevinwang676/rvc-mlbb-v2 -anjani18/life -chasemcdo/hf_localai -Mwebrania/clasmaLAB -notable12/DermDetectAI -propilot/ai-speech-recognition -Ukrania/RVC-Models -putaalzasa/test -putaalzasa/lasttry -lopesdri/ObjectDetection -BrunoBall/Kaludi-ARTificialJourney-v1.0-768 -wilmars/cluster-app -Cropinky/esrgan -07jeancms/minima -BlackCub/ChatGPT4 -lanyingtianyan/ChatGPT2 -Jdnsn/Alexander -nr-rofiq/coba_chatbot -herosly/open-reverse-proxy -Jessi05/Gege30 -herder/DragDiffusion -fishhome/test -randstad/Workllama_Simple_Resume_Analyzer -anderbogia/dtp-asr-demo-v2 -Dhrushreddy/profile1 -btawaken/myownAi -Ripaxxs/Mom -Ripaxxs/Tommy -CAMP-ViL/Xplainer -awacke1/VoiceChatGPT-13 -okeefe4ai/donut-cord -Anitha0531/SpeechtoText -alamin655/replit-3B-inference -Djplaye/Stuff3 -Jackie2235/QueryExpansionForEtsy -tzafrir/formajourney -flemag/zeroscope -LukeMoore11/LukeMoore11-Big-Benjamin -glt3953/app-text_generation_openai -lucken/DL101 -santa1666/gradio_albert_demo -isabellaaa/heyy -Carterclear/swarm-agents -beephids/paper-llm -hamelcubsfan/AutoGPT -rstallman/chatgpt4 -Thafx/sdrv30 -Gertie01/enhanced-dalle2 -Allie7/Nose -PickleYard/stable-diffusion-webui-cpu -Yuki1111/Yuki -DHEIVER/Pedrita -Tasendodificilterumnome/Foiounao -seangsho/Boo -patimus-prime/strain_selection -allknowingroger/Image-Models-Test13 -ka1kuk/fastapi -ADOPLE/AdopleAI-ResumeAnalyzer -pragyachik/togethercomputer-RedPajama-INCITE-Chat-3B-v1 -Sachyyx/Sarah -eisenjulian/matcha_chartqa -duycse1603/math2tex -stbucht/GPT -geraskalnas/TheBloke-stable-vicuna-13B-HF -moussaalmoussa/ChatGPT4 -gauthamk/EuroSAT-ResNet34 -tovaru/vits-for-ba -UGK/UGK -JourneyDB/JourneyDB -MLVKU/Human_Object_Interaction -daedalus314/quantum-lora-quote-generation -adarsh8986/stabilityai-stable-diffusion-2-1-base -CuriousDolphin/MobileSAM -apexxlegends/README -spitfire4794/photo -gvozdev/subspace -hkayabilisim/clusternea -icehelmetminer/runwayml-stable-diffusion-v1-5 -nakas/MusicGenDemucs -MercurialAi/OncologyGPT_Probabilities -leuschnm/TemporalFusionTransformer -waryhx/venustor01 -Eden124/Eden124 -wjw777/ChatGPT4 -jpatech/dogcat -renumics/cifar10-outlier-low -Deva123d/AI_Image_Tools -Masa-digital-art/movie-trailer-16k -xnetba/Chat_advance -wu981526092/Stereotype_Detection -eve01version/evespace2 -foghuang/ChatGLM2-6B -STF-R/docker-test3 -MertYeter/evrimci -SwayamAK/CodeGPT -Saffy/minipets -shalinig/magorshunov-layoutlm-invoices -jianyq/ResumeBot -Chris4K/llms_compare -lusea/Voice-Cloning-for-Bilibili -lusea/rvc-Qinggan -jordonpeter01/Top-20-Diffusion-g -AnthonyErosion/HoctotAI -mrtimmydontplay/extra -Sumit7864/Image-Enhancer -mrtimmydontplay/api -shivammittal274/LLM_CA -mrtimmydontplay/120 -CazimirRoman/summarize-your-webpage-api-with-gradio -Yudha515/Rvc-Models -willhill/stable-diffusion-webui-cpu -DHEIVER/timeseries-anomaly-detection-autoencoders -wendys-llc/panoptic-segment-anything -Guilherme34/LiminalAI-cpu -visitaspro/VisitasPRO -ChevyWithAI/rvc-aicover -ivyblossom/sentiment-analysis -DHEIVER/AnimeGANv2 -hao007/Image-Caption -linyi888/FreedomIntelligence-HuatuoGPT-13b-delta -rr1/gpb -df2619/Hauser -slyjay412/stabilityai-stable-diffusion-2 -XuebaoDingZhen/YOLOv50.0.1 -lwj786/chatglm2-6b-int4 -awacke1/ChatGPT-QA-Translation-Summary-14 -evilandme/stable-diffusion-xl -ivn888/Rome-in-transit -mkmenta/try-gpt-1-and-gpt-2 -Mohamed90/Geoappfolium -allknowingroger/huggingface -BulatF/StreamlitSentiment -willdzierson/nlp_to_dates -StarbucksCN/starbucks_doc -subhajitmaji/MusicGen -PeepDaSlan9/nitrosocke-mo-di-diffusion -devduttabain/facebook-musicgen-small -way007/Salesforce-xgen-7b-8k-base -shiyi11/QQsign -richardzhangy26/yandian_flow_classification -Aityz/Aityz-3B -ting520/66 -kevinwang676/vits-fast-finetuning-pcr -alphunt/diffdock-alphunt-demo -andreasmartin/faq -arjundutta10/Arjun_AI -Astroomx/Mine -habash/WizardLM-WizardCoder-15B-V1.0 -batmac/captioner -lsy641/distinct -xosil14935/ExamCram -ShawnAI/Milvus-Embedding-Client -zhanghaohui/szu-gpt-academic -Kairi7865/Kairi2 -oliverlevn/ocean_faster_RCNN -Plutanico/PlutanicoTeste2 -randstad/Skills_Education_Gaps_Finder -MrZak/Learn-Up -randstad/ResumeSummarizer -Jason1112/ML-GUI -glt3953/app-text_image_hed -NS11890/demo-app -NeonLion92/OpenChatKit-neon -Hyperion1970/JosefJilek-loliDiffusion -ssdfsdfa/demo -TMojo/FoodVision_Mini -tangjicheng123/deepdanbooru -posit/gptneox-chat -ehristoforu/NLLB-Translator -lvwerra/python-interpreter -suyash-rastogi/dog_cat_classifier -DeeeTeeee01/SentimentAnalysis -MWSB2011/MicBot -Tatiana2u1/Tatiana -Borpos/openchat-openchat -justinstberger2dwww2/artificialguybr-freedom -DHEIVER/DICOM_to_JPG_Converter -luwujie/QQsign -zzznavarrete/minima -hostea/openbmb-cpm-bee-10b -darroncole928/hi -mikeee/WizardCoder-15B-1.0-GGML -MatrixYao/how_many_data_points_zh -edgar-treischl/IliartGPT -allknowingroger/Image-Models-Test20 -davanstrien/label-studio -godfiry/runwayml-stable-diffusion-v1-5 -brainstone/qr -Visgift/nyami -trueuserr/psmathur-orca_mini_v2_7b -Pascall/OASSapi_00 -cm-community/README -naughtondale/monochrome -Duckichan1/Jen_ -kaleidoscope-data/data-cleaning-llm -brianaaas/BeedAiTe -parsaesmaeilie/RecommenderSysteam -DHEIVER/FetalRiskPrognosticator -praveenku32k/Chatbot -YUCHUL/nlpai-lab-kullm-polyglot-5.8b-v2 -zhubao315/Salesforce-xgen-7b-8k-inst -allknowingroger/Image-Models-Test21 -allknowingroger/Image-Models-Test22 -Golyass/Recomender-System-Hybrid-Method -Mehrdadbn/Movie-recommender-system -deepdoctection/Document-AI-GPT -AnandSoni2001/StockMarketPrediction -amirhosseinkarami/MovieRecommender -sabirsayyed/merc_or_bmw -nomic-ai/fka_awesome-chatgpt-prompts -nomic-ai/OpenAssistant_oasst1 -nomic-ai/Anthropic_hh-rlhf -nomic-ai/tatsu-lab_alpaca -everton-santos/vicuna-ggml -semomos3/Movie_Recommender -nomic-ai/databricks_databricks-dolly-15k -nomic-ai/glue -nomic-ai/stanfordnlp_SHP -nomic-ai/yahma_alpaca-cleaned -nomic-ai/wikitext -nomic-ai/GAIR_lima -nomic-ai/yizhongw_self_instruct -nomic-ai/openai_webgpt_comparisons -nomic-ai/lambdalabs_pokemon-blip-captions -nomic-ai/bigcode_ta-prompt -nomic-ai/nomic-ai_gpt4all-j-prompt-generations -th1nhng0/symato-cc-statistic -nomic-ai/nomic-ai_gpt4all_prompt_generations -nomic-ai/super_glue -nomic-ai/squad -nomic-ai/YeungNLP_firefly-train-1.1M -nomic-ai/imdb -nomic-ai/openai_summarize_from_feedback -nomic-ai/Hello-SimpleAI_HC3 -nomic-ai/dair-ai_emotion -nomic-ai/common_voice -nomic-ai/BelleGroup_train_1M_CN -nomic-ai/WizardLM_WizardLM_evol_instruct_70k -nomic-ai/Dahoas_rm-static -nomic-ai/ehartford_WizardLM_alpaca_evol_instruct_70k_unfiltered -nomic-ai/samsum -nomic-ai/teknium_GPT4-LLM-Cleaned -Ttss4422/Joeythemonster-anything-midjourney-v-4 -nomic-ai/mosaicml_dolly_hhrlhf -nomic-ai/tweet_eval -nomic-ai/BelleGroup_train_2M_CN -nomic-ai/Hello-SimpleAI_HC3-Chinese -nomic-ai/openai_humaneval -rkareem89/daggregate_space -nomic-ai/0xJustin_Dungeons-and-Diffusion -nomic-ai/amazon_reviews_multi -nomic-ai/financial_phrasebank -nomic-ai/wangrui6_Zhihu-KOL -nomic-ai/ag_news -nomic-ai/allenai_prosocial-dialog -nomic-ai/daily_dialog -nomic-ai/facebook_winoground -nomic-ai/Chinese-Vicuna_guanaco_belle_merge_v1.0 -nomic-ai/squad_v2 -nomic-ai/swype_instruct -nomic-ai/wikiann -nomic-ai/go_emotions -nomic-ai/xtreme -nomic-ai/BelleGroup_multiturn_chat_0.8M -nomic-ai/BelleGroup_train_0.5M_CN -nomic-ai/sciq -nomic-ai/derek-thomas_ScienceQA -nomic-ai/csebuetnlp_xlsum -nomic-ai/gsm8k -nomic-ai/blended_skill_talk -nomic-ai/BelleGroup_train_3.5M_CN -nomic-ai/junelee_wizard_vicuna_70k -nomic-ai/piqa -nomic-ai/BelleGroup_school_math_0.25M -nomic-ai/Helsinki-NLP_tatoeba_mt -nomic-ai/Dahoas_full-hh-rlhf -nomic-ai/kunishou_databricks-dolly-15k-ja -mmsamuel/burger_generator -nomic-ai/empathetic_dialogues -nomic-ai/EleutherAI_lambada_openai -nomic-ai/codeparrot_apps -nomic-ai/neulab_conala -nomic-ai/conll2003 -allknowingroger/AI.Dashboard.Gradio.Streamlit.HTML5 -kidcoconut/spcdkr_omdenasaudi_liverhccxai -MaverickHans/selfie -MohamadRezo/flixPicks -sub314xxl/Analog-Diffusion -sub314xxl/HairCLIP -DebasishDhal99/Youtube_Playlist -sub314xxl/DualStyleGAN -Aiusernumber5/janitorai -DHEIVER/Kidney_Image_Classifier -sub314xxl/GFPGAN -Shibe/sahil2801-replit-code-instruct-glaive -hanzaq/Doc-Bot -bigbencat/internlm-internlm-chat-7b-8k -hehysh/stable-diffusion-webui-cpu-the-best -Miyuki13242/Daily -Valerina128503/U_1 -Artples/google-flan-t5-xl -Himanshusingh/KernAI-stock-news-distilbert -teganmosi/Translator -TensoraCO/code-explainer -TensoraCO/docquery -sebby5/eeeeee -theodotus/llama-uk -danialazimi10/demo_mrs -Deon07/prompthero-openjourney -Kaustubh-kapare94/ALPD -arxify/RVC-beta-v2-0618 -jjddckcivikviv/hhh -stefo/minimal -katasou/Music-discord-bot -awacke1/WildstuffV1 -racear/drolatic -abhisheky127/Fold_TransactionClassification -anastasiablackwood/Anastasiablackwood -ShoaibMajidDar/PDF-chatbot -futuristicdude/The_First_Principle_thinker -joaquin64800/XD -allknowingroger/Image-Models-Test25 -Branon/Proxy -lijiacai/ai-set -Ryukijano/ML-Agents-SoccerTwos -MindWaveStudios/README -sub314xxl/openchat-openchat -sub314xxl/zeroscope -sub314xxl/zeroscope-XL -Binguii/Ballen -Vageesh1/personality_chat -edjdhug3/chat-with-pdfs -kaveh/radiology-image-retrieval -MUmairAB/Masked-Language-Model-App -MUmairAB/MaskedLM_App -kenhugs/dsed -rubensmau/Dov_Tzamir -szk1ck/similarity_by_fasttext -VGues/NOG -Dagfinn1962/CPU -xlne/whtvr -bluuuuuuuu/test02 -kukkurukeroon/kukkurukeroon2 -Ibrahemqasim/Img -iqovocn/ChuanhuChatGPT -JairoDanielMT/CCPlatanos -trhacknon/free-fast-youtube-url-video-to-text-using-whisper -PeepDaSlan9/neon-tts-plugin-coqui -yejijue/img-to-music -deaaassws/QQsign1 -flow3rdown/word_sim -Vageesh1/Falcon_7B -MUmairAB/DistilBERT-MaskedLM -vinceL/YonKomaMangaGenerator -lazyboy450/RVCv2-Genshin -Adeeb-F/AI-Genrated-Image-Detector -Kitsune9tails/Test02 -MadhurGarg/digital-chat -jordonpeter01/AWS-CHATBOOT-SUPER -jesuspj/jesuspj -sub314xxl/radames-kandinsky-2-1-img2img -juuaaa/ambatakam -jesuspj/jp -plauder/geese -standardteam/ChatGPT4 -sub314xxl/MusicGen-Continuation -Rvtcheeto/Test02 -Ash58947/Bot -simonguest/cs-tutor -Nixtla/chatgpt-forecast -TitleOS/Seahorse-350m -yunfei0710/gpt-academic -oncetalk/syzymon-long_llama_3b -YumiKujo/K -Romanian/Ok -Atharv23m/Human-Stress-Detection -Mahbodez/knee_report_checklist -triggah61/chingu-music -allknowingroger/Image-Models-Test28 -conchdork/open-reverse-proxy -Thafx/sdrv40 -jungwoo9/foodvision_mini -juuaaa/aaaa -benfield/MBZUAI-Video-ChatGPT-7B -Alfasign/Midjourney_Prompt -Alfasign/nomic-ai-gpt4all-13b-snoozy -ojackalope/Daemon -Megareyka/imageRecognition -sajithlal65/emilianJR-epiCRealism -wliu88/StructDiffusionDemo -arcosx/CHO-cytotoxicity -Singularity666/VisionGPT-Automation2 -odettecantswim/vits-models-genshin -jbilcke-hf/audioldm-text-to-audio-generation -noa101/autoevaluate-extractive-question-answering -cppowboy/viscpm-chat -osanseviero/test_chatui -osanseviero/my-own-falcon -Vipitis/shadermatch -FishyFishFrisk/Reversyyy -Tuyet3005/Sentiment_Analysis_using_BERT -michellemli/PINNACLE -Ritori/Twilight_MoNiQi -T2007/T -Yumko/Idk -Fan-611177107/bigscience-bloomz-7b1-mt -Deva123d/WaveFormBot -jungwoo9/foodvision_big -vaishanthr/Simultaneous-Segmented-Depth-Prediction -ScottRobertsXR/image-captioning-01 -videfikri/aicover -PeepDaSlan9/OpenAssistant-falcon-7b-sft-mix-2000 -Jashvinu/NousResearch-Redmond-Hermes-Coder -Nattylegit/ChatGPT-Plugins-in-Gradio -ADOPLE/ResumeAnalyzer -ADOPLE/AdopleAI-Website-DocumentQA -warrenw/simple-gpt-interface -ADOPLE/ResumeSummarizer -Insightly/CSV-Bot -viktor-kertanov/painters -raseel-zymr/dAIgramGen -kadirnar/chat -Daniil-plotnikov/Daniil-plotnikov-russian-vision-v4 -huak95/personaGPT_custom -hugggof/vampnet -Madhur-01/text-summarizer -Uday007/Oil-Price-Predictor -Uday007/Purchased -Uday007/House-Price-Predictor -srkajol/westminister-ai-sheet -srkajol/AI-Chat-PDF -srkajol/legisbot-ai -KarinaCardozo/PrevencionFraude -srkajol/avocat-ia -srkajol/Singapore-Regulation-AI-Sheet -Reyes2024/Hua00666 -jayvaghasiya/winerybarreloak -assembleteams/curious -Uday007/Diamonds-price-predictor -Uday007/Penguin-BodyMass-Predictor -Uday007/Insurance-Predictor -ShayanP/Salesforce-codegen2-3_7B -rickysk/rickysk-videomae-base-ipm_all_videos -jackcat/GradioTest001 -junkmind/Deepfake_image -balaramas/indic_s2t -kumasan681104/React_St -ankush-003/ankush-003-nosqli_identifier -allknowingroger/Image-Models-Test31 -zakiu/Personal-TTS -bryanlegrand/instant_bedtime_story -renumics/whisper-commonvoice-noise-issues -812vaishnavi/gradio-land-cover-mapping -Kichkinya/reverseproxynya -jonathang/YoutubeSmartSpeed -daniellefranca96/styles-scribble-demo -NeonLion92/Chat-and-Battle-with-Open-LLMs-Neon92 -XPMaster/data_automation -placeme/Wander-Plan -Noobian/SplunkGPT -ahuang11/mapnstreets -inflaton/learn-ai -Dagfinn1962/diffusers-gallery -shigel/langchain-function-calling -jpdiazpardo/jpdiazpardo-whisper-tiny-metal -yangban/catordog -SnehaTiwari/Fashion-Image-generation -ysharma/ChatinterfaceTests -limobaidandan2515/ChatGPT4 -Salama1429/speech-to-speech-translation -Oloo-1/done -achref/neuro_internal_tools -justest/mdn-chatbot -serhany/huggingchat-try -imcaoxuan/runwayml-stable-diffusion-v1-5 -kyleebrooks/VectorDatabaseCreate -vaishanthr/Image-Classifier-TensorFlow -Fawis/Awooga_xd -dcq/freegpt-webui -Sandiago21/automatic-speech-recognition-greek -TheSxrynlxX/Idk -Gregory-L/openlm-research-open_llama_3b -iruku/and -soduhh/Text2Pix -multimodalart/upload_your_model -Sandiago21/text-to-speech-greek -Sandiago21/speech-to-speech-translation-greek -brany/QR-code-AI-art-generator -dariowsz/speech-to-speech-translation -ElisR/spherical_harmonics_visualisation -jlevin/dpv-finetuned-gpt2-tiny -ilpy/global-life-expectancy -AdithyaSNair/alzheimers_prediction_using_cnn -WanderingRose/Storm -Ralmao/glass_py -dcq/nodetest -firestalker/anime-tts -Ritori/Yura_GPT -DpNaze/Dreamlikeart -jt5d/kandinsky-community-kandinsky-2-2-prior -peter2489/translator -tlqkfdksldlrpwhswogksekrhzzz/translator_interpenr -Daniil-plotnikov/Daniil-plotnikov-russian-vision-v5-beta-3 -dpe1/can_this_pokemon_evolve -nikitaPDL2023/assignment4 -avishkararjan/Movie-Recommendation-Model -TungB/mini-photoshop -removebg/removebg -OnabajoMonsurat/Brain_tumor_prediction -aqlanhadi/qr-art -KeroKiki/Rin -Vinnybustacap/Gryphe-MythoLogic-13b -badmonk/up -DonDoesStuff/sd_xl_base_0.9 -Alfasign/diffusers-gallery -Abdullahw72/bark-voice-cloning -LUOYE-123/QQsign -tnrzk13/PneumoniaDetection -hishamomran/explicit_text_classifier -beastboy/WizardLM-WizardCoder-15B-V1.0 -crystalai/EleutherAI-gpt-j-6b -Dewa/Text-Summurisation -sarahyoung/taltech -omniinferlabs/README -luisrguerra/unrealdream -hisfog/SQLdepth -Sandiago21/text-to-speech-french -iamlonely/destroylonely -openbio/calculator -THEFIG/AI-chatbot -SunshineSalem/JanitorAI -RatKing243/Test -sub314xxl/webui-cpu-extension-test -chrisvnz/IFC-Extract-Properties -PurplePanda00/plant-leaf-detection -hexdq666/OAIRP -muLoo/dis-background-removal -Benson/text-generation -tbhyourelame/kay -tttarun/ocr_voter_list -chuan-hd/law-assistant-chatbot -DracoHugging/LicensePlateRecognition -redo62/image2text-comp -abhisheky127/QuaraAI_Translator -lu2000luk/RuttoniAI -suidu/MAGAer13-mplug-owl-bloomz-7b-multilingual -MohammedAlakhras/AI_Chat -Sandiago21/speech-to-speech-translation-spanish -autumn8/selectModel -ultgamerkient/GPT4ALL -kevinwang676/FreeVC-en -PockiBoi7/PockiGEN -reddysh/pleasework -livelaughcats/m -reddysh/pls -lIlIlllllmeng/QQsign1 -arpitneema/ArpitTestBert -Lee-Shang/sahi-yolox-duplicate -IVentureISB/Gen-AI -lanhuan1111/hello_world -hanskabvw1/chat -Fouzia/Harvard-USPTO_Patentability-Score -LTputin/Janitor_AI -figsfidds/moody_nana_classifier -wrdias/SD_WEBUI -lijiacai/ai-set-demo -UholoDala/Jj_Sentiment_Analysis_App -SetoKaishi12/Test02 -Andyrasika/Andyrasika-lora_diffusion -EXPOSUREEE/Ai-Image-Enhancer -ZApkh/test -justest/vercel -HuggingAlgorithms/Object-Detection-with-YOLO -Andyrasika/xlm-roberta-base-finetuned-panx-de -Andyrasika/distilbert-base-uncased-finetuned-emotion -glt3953/app-audio_video_transcribe -TNR-5/lib111 -azuboguko/sentence-transformers-paraphrase-multilingual-MiniLM-L12-v2 -TNR-5/chatorO -usernamelsp/QQsign -aseduto/sp500 -Kimata/multimodal_deepfake_detection -Akshay-More-007/starcoder -TNR-5/stabilityai-stable-diffusion-2-1 -SidKarthik/multi_doc_retrieval_agent -vaibhavarduino/better-autogpt -lykke-05/pleaselowrd -MrlolDev/Explore_llamav2_with_TGI -Itsjusttasiaa/Test02 -MrZak/LearnUp-4.1 -jtlowell/stable-diffusion-webui -sherjilozair/meta-llama-Llama-2-70b-chat-hf -freddyaboulton/test-discord-bot-v2 -CofAI/README -allknowingroger/Llama_v2 -bhandsab/meta-llama-Llama-2-70b-chat -bhandsab/meta-llama-Llama-2-70b-hf -gsaivinay/Llama-2-13B-GGML-UI -kevinwang676/Voice-Cloning-SadTalker -Eduardovco/Potato -veb-101/UWMGI_Medical_Image_Segmentation -DQChoi/gpt-demo -ExpertPrompters/AskIDF -Sai004/ArticlePredictor -ifire/mpt-7b-storywriter -boomsss/gamedayspx -ilmhona/chat-with-pdf -dahaoGPT/Llama2-70b-chat-demo -dahaoGPT/Llama2-70b-chatmodle-demo -Gffxs/Ey -zhaiqi/qq -tellview/suno-bark -EysCanacan/Scikit-LLM-Demo-Eys -qingjiu11/QQmm -maheshwaranumapathy/meta-llama-Llama-2-7b-hf -bitofurqan/meta-llama-Llama-2-70b-chat-hf -xuan23/test1 -drift-ai/recruiter-assistant -BadRobot147/SFQ3 -kingabzpro/ChatGPT-Gradio-Interface -zhanggrace/ImageSearch -maxjmohr/MSc_02_PDL_A4 -silencewing/server -taminactineo/taminactineo -realchenyuy/llama2-playground -renumics/navigate-data-issues -GreenCounsel/SpeechT5-sv -TNR-5/AI-WebTV -g4f/freegpt-webui -CofAI/tv -hehe520/stable-diffusion-webui-cpu -VIPLab/Caption-Anything -viniods/speech_recognition -eddydpan/clip-recycling -ljiy/GGG -TNR-5/Stable-Diffusion-Protogen-x3.4-webui -Utkarsh736/crick-pick -dongfang2021/ObjectDetection -ishan10/Science_Tutor -kevinwang676/ChatGLM2-SadTalker -UFOOO/README -AIlexDev/Einfach.Hintergrund -pikto/Elite-freegpt-webui -Sunbird/runyankole2english-stt -yardi/phrase-semantic-similarity -Old-Fat-Boy/Youtube_Thumbnail_CTR_Analyzer -LEOZHAO92/TTS -Omnibus/pdf-reader -GeekedReals/jonatasgrosman-wav2vec2-large-xlsr-53-english -AIMLApps/Botrite_wip -Tihsrah/Credit_Risk_Assessment -keivalya/alternovation -jitterz/testing -polymath707/bigscience-bloomz-7b1 -ehristoforu/Stable-Diffusion-Protogen-x3.4-webui -elvis-d/tweet-sentiment-analysis.GRADIO -cpluoiudy00001/QQsign -elvis-d/Tweet-Sentiment-Analysis-App.STREAMLIT -Keshav4/resume-data-extraction -tdnathmlenthusiast/food_classifier -ggwwu/THUDM-WebGLM -wambugu1738/meta-llama-Llama-2-13b-chat-hf -rafaelpadilla/coco_metrics -EdZ123/anime-collaborative-filtering-system -arikru/packstation-inspector -MaximilianChen/Casper -CofAI/CurrencyConverter -junjunn/rvc-models -CofAI/CalculatorUI -Stanlito/openvino_QandA -puripurikyuakyua/Gahana -Carlos056/Cara -Harshveer/Diffusion30x -MercurialAi/OncoMedleyMini -onursavas/meta-llama-2-7b-hf -mearidesu/test2 -indifendi/baby1 -saipanyam/QAGenie -sudxiaohan2/Real-CUGAN -ewg88/ai-forever-ruGPT-3.5-13B -ZenXir/FreeVC -birsardar/stable-diffusion-mat-outpainting-primer -Pauitbid/meta-llama-Llama-2-7b-hfx -gary109/hotdog-not-hotdog -elina12/asr_arabic -CofAI/LengthConverter -Stanlito/QandA-on-custom-PDF -zhiwucai/gpt2 -warrenw/simple-gpt-interface-2 -CofAI/urlcut -Pravincoder/Loan_Approval_Predictor -ccyo/chatgpt_bot -Dennis0402/QSign -pradosh/insurance_demo -lanyi2023/QQsign -Aspik101/Polish_Llama2 -raghuram13/Audiototext -awacke1/Speech2Text-FastSpeech2 -awacke1/SpeechToText-MS -awacke1/Text2Speech-0721 -TaliaKorobkin/facebook-fastspeech2-en-ljspeech -kmahtan2/facebook-fastspeech2-en-ljspeech -Tetel/secondbing -sjdata/Testinggrounds -VGG1555/VGG1 -awacke1/FastestText2SpeechEver -AnxiousNugget/janitor -imdebamrita/Handwritten-Digit-Recognition -vanderbilt-dsi/french-revolution-letter-writing -sjdata/Streamlit_test -elumamai/AI-ChatBot -anthonymikinka/wizard -magehunter45/ApartmentInvestorBot -JUNGU/gpt4kids -jangocheng/stable-diffusion-webui-cpu_with_prompt_pub -Swaraj912/FIRS0 -wangrongsheng/CareLlama -Sadashiv/BERT-NER -henryezell/freewilly -CofAI/njpad -afffffdf/QSign -felixfrosch/deep_learning_assignment -Sandiago21/text-to-speech-german -hkqiu/AI4P -1doemePnordwo/upscale -littlegoldfish/simple_chatbot -Arnx/MusicGenXvAKN -DHEIVER/detect_anomalies -jeycov/Mama_ca -t0int/CalderaAI-30B-Lazarus -TNR-5/test_dev_s -zaursamedov1/llama2-qlora-finetunined-NER -sawi/audio -elumamai/openai-whisper-large -MARSHALLXAARONDRAKEICO/ai-forever-ruGPT-3.5-13B -enadewan/ASK_FREDDY_BY_CONTRUCTOR_LEARNING -enadewan/ASK_FREDDY_BY_CL -geraskalnas/ODISE -ashercn97/AsherTesting -AIxPha/Real-CUGAN -kernelguardian/llama2action -Bajr/softly -Nanostuffs/nano.ai -Sandiago21/automatic-speech-recognition-german -Sandiago21/automatic-speech-recognition-french -Biaolin/stabilityai-FreeWilly1-Delta-SafeTensor -jjyaoao/speech-to-speech-translation-spanish -CXD200/QSign -boddles2/pyannote-speaker-diarization-2 -sabridsn/HOCR -Neuralpls/README -liimefruit/RVCollection -nms319/README -chongjie/PoseDiffusion_MVP -CofAI/chat.v2 -Mehdihassan/stable-ts -DHEIVER/VestibulaIA -unclesamjo/GTalkGPTV01 -youplala/chartGPT -DamianMH/Mlove -BasToTheMax/tensor -Plurigrid/bidirectional -Kwabbs/SENTIMENT_APP -GageWeike/GPT4i-FreeWilly2 -PeepDaSlan9/chatbot-arena -Bobertsonthethird/Test01 -chongjie/ZoeDepth_slim -Maqueda/SG161222-Realistic_Vision_V1.4 -samcaicn/bingai -Ryandhikaw/rvc-hololive -manymoon22173/RVC_MODELS -Rezuwan/parrot_classifier -pikto/ELITE-ChatGPT-Streamlit-2 -clibrain/dataset-curation -barabum/image-duplicate-finder -ploybtt/ploybtt -TNR-5/Chatui -jimmmyjoy56723/test -g0urav-hustler/PCB-Fault-Detection -ReThGe/Linet -chongjie/co-tracker_MVP -PeepDaSlan9/stabilityai-FreeWilly2 -MUmairAB/English-to-French -Ritori/play_with_baby_llama2 -awacke1/StreamlitAIPP1 -ryoung41/AIPairProgramming1 -kmahtan2/AIPairProgramming2 -jdhuka/AIPairProgramming1 -TaliaKorobkin/AIPairProgramming1 -ryoung41/HTML5Interactivity -jdhuka/HTML5Interactivity -ElricOon/EYE2 -arseny-chebyshev/vox-diffusion -luisotorres/cats-vs-dogs -awacke1/AnimatedGifGallery -jbilcke-hf/zeroscope-server-3 -awacke1/Mp4VideoGallery -chrisclark1016/Untappd_Predictor -wilsonbritten/inference-client-test -Jafta/chatglm2-6b-4bit -earneleh/paris -Large-LLM-Proxy-CAI/GateOfProxyClaude2.0 -osbm/prostate158-monai-inference -gradio-discord-bots/gpt-35-turbo -DHEIVER/analise_imagem_mama -kalarios/proxy -azusarang/so-vits-svc-models-ba_P -ActivatedOne/JorisCos-ConvTasNet_Libri1Mix_enhsingle_16k -nikoifirewall/First_shot_gradio_covid_sentiment_analysis -tripsby/travel-genie-json-public -Priyanka-Kumavat/Regression-Model -xuqinyang/Baichuan-13B-Chat-Int8-Cpp -deepskyreal/ai-mixer-hotchpotch -s3nh/mamba-gpt-3b -xuqinyang/Baichuan-13B-Chat-Int4-Cpp -awacke1/GetAllContent -ZalacDanijel/pujaguja -FilipBak/mushrooms -freddyaboulton/llama2-70b-discord-bot -gradio-discord-bots/llama-2-13b-chat-transformers -CofAI/chat.b4 -captainChan/CaptainChan -fabiodr/whisper-jax-diarization -kitt3nsn0w/yofeli -dogincharge/Shap-ER -jessica198601/jzlqy -tikendraw/movie-recommender -naotakigawa/qatool -nsarrazin/agents-js-llama -Plurigrid/LifeSim -Abhay1210/prompt-generator_V1 -CofAI/picscore -awacke1/AzureBlobStorage -Hazem/roop -ChandlerGIS/shortgpt -jordonpeter01/prompt-generator-public -Multi-chan/amy_project -tharunayak14/Text-Summarization -Q-bert/EarthQuakeMap -TRaw/starchat-assist -valeryk2/task7 -devisionx/autoannotation -sharathprasaath/Gender_identification_by_eye -Semibit/tts-server -VVallabh/AI-driven-Video-Generation-Tool -stistko/CzechCapitalization -TNR-5/dalle -SocialGouv/speech-to-speech-translation-french -krishw/MovieExplorer -lucinnerieux23/kotkindjn -Paulraj916/paulraj916 -H0n3y/Honeystesting -medkins/s2w-ai-DarkBERT -PikeAndVine/resize_color -vvv214/sdxldbooth -EmpathyFirstMedia/README -JonaSosa/spam_filter -mikeee/langchain-llama2-7b-chat-uncensored-ggml -alphakavi22772023/test_00 -VVallabh/AI-Powered-Subtitle-Generator -Saurabh46/MyChatGPT-DEMO -marshallzee/itenas-computer-vision-bot -AIZero2HeroBootcamp/VideoToAnimatedGif -AIZero2HeroBootcamp/MultiPDF-QA-ChatGPT-Langchain -AIZero2HeroBootcamp/AnimatedGifGallery -AIZero2HeroBootcamp/ChatGPTandLangchain -Kiran96/Article_summarizer_with_salesforce_CtrlSum -Tanor/Serbian-WordNet-Sentiment-Visualizer -AIZero2HeroBootcamp/TranscriptAILearnerFromYoutube -coraKong/WorldSimulation -AIZero2HeroBootcamp/FastSpeech2LinerGradioApp -rbigare/stablediffusionapi-architecture-tuned-model -Ryzal/rvc-models-new -jeevavijay10/code-gen -OptorAI/site -yl12053/so-vits-4.1-Grass-Wonder -ljrmary/UT_Hackathon -theriyaz/stabilityai-stable-diffusion-xl-base-1.0 -jdhuka/SuperSimple2linerText2Speech -awacke1/SuperSimple2LinerText2Speech -ryoung41/SuperSimple2LinerText2Speech -jeremymontgomeryoptum/Text2Speech -jdhuka/StaticHTML5PlayCanvas -affine/Time_Series_Model -MihaiPopa2/ChatGPT-Prompt-Generator -justest/PaddleSpeechASR -tushar310/chatgpt_clone -NeuroSenko/audio-processing-utils -FangLee/Generate-Music-in-Time-Series -daniyal214/gradio-caption-generator-git-large -Jonathancasjar/Detect_products_and_empty_spaces_on_a_Supermarket -devthedeveloper/Bark-with-Voice-Cloning -LuxOAI/stabilityai-StableBeluga2 -vishnu23/drone_image_segmentation -mehedihassan/stabilityai-StableBeluga -mehedihassan/AI-Text-to-speech -reimari/rvc-aa99 -curseofvenus/ChatGPT4 -limingcv/AlignDet -wahyupermana10/churn_prediction -cbr/swp -AIZero2HeroBootcamp/ExperimentalChatGPTv1 -TNR-5/zeroscope -AIZero2HeroBootcamp/ClassDescriptionAndExamplesStreamlit -starnek/mix-design-concrete -projecte-aina/aguila-7b -miculpionier/Fill-Mask -kaxap/wiki-multilingual-e5-large -xinli80/gradio-image-generator -PeepDaSlan9/poisongpt -Razkaroth/incidencia-delictiva -AntX-ai/README -Tiredmaker/OKC -AntX-ai/Fintech -nguyennghia0902/SentimentAnalysis_usingBERT -haywired/medibot-llama2 -allknowingroger/Image-Models-Test52 -1368565466ki/ZSTRD -1368565466ki/Satdia -TNR-5/Music-discord-bot -TNR-5/testbot -yash-srivastava19/CodeSmith -TNR-5/files-lumbot -lavanjv/falcon-mini -Wrightjay/togethercomputer-LLaMA-2-7B-32K -Izal887/rvc-hutao -CanonOverseer/Canons-Den -lunbot/add -Superintelligence1130/text-to-video-test -rahulsccl/GenAIMyAvatar -mohamedemam/bert_sentaces_similarty -sub314xxl/SDXL-1.0 -LavanyaBurlagadda/TChatBotWithPlayHT1 -sub314xxl/MusicGen -Zeelubha/Football-Prediction -Bl1tzie/Jam -Enigma007/Normalizer-Dashboard -sub314xxl/SD-XL -Enigma007/Medika -mkManishKumar/Bank-Customer-Churn -sub314xxl/image-server-1 -sub314xxl/sdxldbooth -TechGenHub/README -Fernando22/freegpt-webui -dianman666/bingai -sub314xxl/saiga2_13b_ggml -abnerzhang/ieltsGrade -Sidaddy/Beluga2ScriptGenerator -szk1ck/docker_test -szk1ck/similarity_by_fasttext_api -in18/stable-diffusion-webui-cpu -BobbyOleti/MyGenAIChatBot -AgProfile/chatbotopenaihere -Kashishmahajan/gradioLangChainOpenAI -AgProfile/GradioGenOpenAi -adityakabra/Patent-AI-V1 -ririah13/Test -Uday29/MyChatBot -GowthamSiddharth/MyAssist_ChatBot -sai1108/MyChatBot -PRABHKAR/MygenChatBot -jaiteja7849/MyGenAIChatBot -Kotinagendla/MyGenAIChatBot -Vignesh2496/project -Mahesh111/MaheshgenAIchatBot -YokoH/MIS_SALCHICHAS -vyshnaviii/MyGenAIchatbot -Naveentalluri/NaveenGenAIAvatar -patilyash22/ChatBotWithOpenAIAndLangChain -surampudiAdarsh/myfirstopenAIUsinggradio -vinayarukala31/mygenAIChatbot -Vijaykumarthummapala/Mygenaichatbot -imdebamrita/whatsapp_chat_analysis -BalaBhaskarudu/mygenAIChatbot -SirishaArveti/GenerativeAIChatBot -shivaaaa/myGenAIChatBot -Manikanta-06/myaichatbox -ishanchennupati/ishanavatarchatbot -vikram767/myGenAIchaTBoat -SaiRaam/AIAvatarchatbot -kpavankumar971/MyAiAvatar2.1 -Shannu/mygenAIAvatar -vamsikolla/MygenerativeAIchatbot -Harikumar4/MyGenApp -datatab/datatab-alpaca-serbian-3b-base -Menthe17/MyGenAINani -Nagireddys/MygenAI -JairParra/Captioning_and_Stable_Diffusion_Generation -lavanyaparise/myenAIchatbot -MOULI17/CmGenAIChatbot -tharunG17/TharunChatGPT -EmoHugger/MyGenAIChatBot -Madhes/GradioLangChainBota -Aishwini/myfirstaigen -akhil5466/MyGenAIAvatarSpeech -satyainjamuri6/MygenAIAvatarSpeech -leelaaaaaavvv/pavaniMyAIchatBot -Sunilkumarkanugula/SunilChatBot -aurora10/gradiolangchainchatbot -Sadhvi/ChatBot -loknitesh/MYGENAI -lalithakash2346/CortanaAI -pallesureshnaidu/MyGenAIChatBot -finny24/FinnyAiVoice -Vivekdunuka/MyAIChat -awacke1/ChatGPT-Genius-Assistant-4Writers -awacke1/ChatGPTGeniusWriter-HTML5-Output-1 -RajuGovvala/Raju123 -Naveen618/mygenAIAvatharSpeech -Kurugodu/myGenAiText -shivaatNXTWAVE/mygenai2 -emre/emre-llama-2-13b-mini -Mbilal755/Rad_Summarizer -sukh28/toxic_gradio_app -AdvertisingAgency/README -ganesh78/MyGenAIApp -eruuin/something -zhaoyuzhaoyu/stabilityai-stable-diffusion-xl-base-1.0 -talari/MyGenAiChatBot -paschar/StoryGenerator -himanshukale/WAppTastic -motleykrug/README -pavankumark/mygenaichatbot -armansakif/BenFake -sangareddyjaswanth/mygenaispeech -Naveentalluri/NaveenGenAI -akashpadala/MyGenAIChatBot -HelloMimosa/sail-rvc-Ai_Hoshino__From_Oshi_no_Ko___RVC_v2__300_Epoch -Dileepgorantala/dileepAI -akashpadala/myGenAIAvatarSpeech -kaicheng/ChatGPT_ad -Menthe17/Nani17092005 -Dileepgorantala/dileepVoiceAI -Vageesh1/PDF_QA -kelothu/gradiolangchainbotopenai -andryMLOPS/ASTA-GPT-3.8_web_ui -GuruVineeth/GenAIGPT -Naveentalluri/NavenAIvoice -NIVASVAKA8999/myaigen -kamranahmad92/gradialanchainChatBotOpenAi -kamranahmad92/chatgbtaigradientlanchain -warakram/gradiolangchainchatbotopen.Ai -kamranahmad92/GradioLanchainChatbotAi -zhangguofen/Real-CUGAN -kamranahmad92/GRADIOLANCHAINOPENAICHATBOT -bhavanaraj/myaivoice -kamranahmad92/Gradientlanchainopenaisuperchatbot -kamranahmad92/lanchaingradientsmartaibot -Srikanthpichika/sreegenAIApp -eslavathanil/myGenAIchatbot -Krishna3/mygenAIChatBot -Nesip/meta-llama-Llama-2-70b-chat-hf -swetha311/mygenAIspeechh -CormacMc/projectsub6 -sindhoorar/brain-tumor-classifier -omkar001/gradiolangchainchatbot -surya12003/suryabot -zishverse/zishanChatAI -169153tej/My-New-Gen-Ai-Chat-Bot -Friklogff/xx-xhai -qq37017934/QSign -AkshayKollimarala/MygenAI -Shreeradha/GradioChatBotAI -muneebashraf/Visual-Sentiment-Analyzer -Abhi1262/MyGenAIChatBot -AkshayKollimarala/MYAIVOICESPEECH -cbhasker/MyGenAlChatBot -lolakshi/dhoni -bhanuprasad3245/mygenAIchatbot -NanoT/demo -isabelahrens/facebook-fastspeech2-en-ljspeech-0731 -hannahross5/facebook-fastspeech2-en-ljspeech-0731 -udaykiran6703/UdayGenAI -yaswanthkumar/yashAIbot -janusurya/mygenchatBot -awacke1/Memory-0731 -hannahross5/Memory-0731 -awacke1/HTML5InteractivtyDemo -ehristoforu/llm-discord-bot -maha-vishnu/mahavishnu -surya12003/suryabot1 -Ravanan007/my1projectAi -cbhasker/bhasker1323genAIApp -wanxing28/QQsign -TNR-5/Testbkt -venkat8020/MyGenAiChatBot -kosurisiva/MyGenAiChatBot -KunalKharalkar/imagetostory -vinayarukala31/mygenAiAvatarspeech -patilyash22/ChatBotWithOpenAILangChainAndPlayHT -DEVINKofficial/Onodofthenorth-SD_PixelArt_SpriteSheet_Generator -kkumarkumar/MyGenAIchatbot -ishvalin/what_is_it -nithintechie/NithinGenAIAvatar -naeemalbustami/voiserec -cbhasker/bhaskergenAIAppSpeech -CofAI/picscore1 -minoluusa/chatbot1 -TRaw/dtet -locomotive/taxonomy-ml -ehristoforu/Testbot -w601sxs/b1ade-1b -JohnCalimoso/animalbreedidentificationversion1.5 -awacke1/HTML5-Aframe-Flight-Sim-Test -Srikanthpichika/SreeGenAIChatBot -Harshitthaa/Harshitthaamyfirstai -menghanxia/ReversibleHalftoning -pedrohc/productcounter -BaddaAshok0265/AshokGenAI -DaniilMIPT/greenatomtest -Rishwanth08/Naniai -housexu123/bingo-2.0 -VickyKira/NASAGPT -allknowingroger/Image-Models-Test56 -callmerk1986/AyurGenie -834188divi/cardiffnlp-twitter-roberta-base-sentiment-latest -Subbu-2004/MyNewAiAvatar -Ahmadjaved/Genaispeech -nicolehuangyx/stabilityai-stable-diffusion-xl-base-1.0 -priyankachinni/priyagenai -DUOMO-Lab/TransGPT -kamranahmad92/GradioLanchainSuperChatbot -ehristoforu/runwayml-stable-diffusion-v1-5 -rekhab0203/mygenAIChatbot -jbilcke-hf/360-server-1 -CofAI/Kemal-Diffusion -tejatrivikram/MyGenAIAvatar -kamranahmad92/GradioLanChainSuperChatBotAi -kamranahmad92/GradioLanChainSuperAIChatbot -Prasanthi123/myaiavatarammu -kowsik/MygenAIApps -fierce74/Galaxy_classifier -Nikithaniki/NikiGenAI -Mr-Hacker/GenAiTest2 -KOTTHADAKAVYA/mygenAIchatboard -Bumpeet/faceTracking -Luckya/MyGenAi -Haswanth/haswanthpalepu -likhith263/mygenAIchatbotproject -Varun6579/Lemma._tech -CosmoAI/ChitChat -alihug/GradioLangchainBotAI -unik-style/unik-ml -mugilan0610/mugilanbotchat -Avinash-12035/MyGenAIChatBot -giridharvaruganti/facial-keypoints-detection -Swatantradev/mynewgenAI -royal-16/Mr.Royal.newgenai -SkKalit/KalitGenAiChatbot -RohanAi/low-light-enhancement -CofAI/openjourney -poojasree2003/aiproject -Jeevika/MyGenAI -MyGenAIchatBot/Puji -Sandy0077/MyGenAISpeechBot -DVLH/nlpconnect-vit-gpt2-image-captioning -ehristoforu/Teststudio -ehristoforu/Chatuitwst -shiditya2003/MyGenerativeshiditya -SanjayreddyBaddipadiga/MyfirstGenAIChatBot -gagan3012/QalamV0.2 -hechenyang/bingAI -freddyaboulton/lk99 -love3510189/NewBing1 -greyskyAI/ChatRAS -EswarBilla/EswarGenAiChatbot -mani143/ai -weiyao255/NINGAI -portal/Xenova-Semantic-Image-Search -balenireekshana/MyGenAI -andy-rui/bingAI -bigcode/in-the-commitpack -Bingyunhu/hoping -lyln/bingAI-lyln -t110-ai-admin/InspectLens -luxuedong/bing2 -raskell/livebook -nyh/newbing -Ashish17/Ashish_Open_Chat_AI_17 -Konglinu/bingai -Dave37/gradiolangchainChatBotOpenAI -Enigma007/Classifier-Fasttext -dafeidun/dft -Chakri-kollepara-5/Mygena -kainy/rvc_okiba_TTS -Chakri-kollepara-5/ai -qsh612/bingAI -BBrother/NewBingAI -qushui/bing -NiuTaipu/moe-tts-test01 -mdkaif/genAIchatbot -Ash2219/AIchatbot -mygyasir/stablediff -sitong608/bingAI -tharun49/TharunAIChatBot -Sowmyashetty/Mygenaibot -Balalaxmi/JarvisAIchatbox -likhi993/MyAIchatbox -sathwik21/MyGenAichatbot -efchbd1013/animal_classification -supercyx3/ChatSydney -Sandiago21/automatic-speech-recognition-italian -tharun49/TharunAISpeech -Deeksh/genai -ahdsoft/Persian-Topic-Modeling -pikto/prodia -Dineshdc/MygenAIChatbot -ahdsoft/persian-keyphrase-extraction -balamanikandan/ai_project -dishanttembhurne/myGenAiChatbot -vinaynani/genchatbott -tharun49/TharunGenAISpeech -RiyaJangir/MyAIGenTool -Manickam/MyGenerativeAIApp -Swamyajulu/MyGenAIChatBot -IntSpace/llama-2.70b -Sandiago21/speech-to-speech-translation-german-2 -Uppuluri/mychatbotai -IntSpace/README -vlikhitharaj/mygenAIchatbot -KunamVishnu/MyGenAiChatBot -sanjayvy/ChatBotAI -pirahansiah/ComputerVision -nunekeerthi1/MyGenAIChatBot -Maharaja36/myGenAIApp -rodragon737/ocr_reader_space -ehristoforu/sbinterface -CleanML/demo -Janardhan2003/MyGenAIChatBot -Yogesh19/MyajiAi -Shravani585/gradioandlangchainchatboot -G-Deepika/MygenAIAvathar -PROJECTAIGPT/AIAvatarSPEECH -kananj/Daytona-Beach-Ambassador -Arun1217/mygenaiapp -Datasculptor/MusicGen -omsree/myGenAIapp-1 -Dave37/voicebot -UjwalBingi/mynewai -Naveejnk/MyGenAIChatBot -Yogesh19/Voiceai -Maharaja36/MyVoiceAssistand -VIKASNI1/VOICEGENAI -yenumulanarendraprasad/mygenaivoicebot -anthonymikinka/gorilla-llm-gorilla-7b-hf-delta-v1 -Ajaymekala/gradiolangchainChatBotOpenAI-1 -jayanthrahul/myaiownvoice -syrilion/syrilionchat -karlkode30/scn_detecta -Violetmae14/Violet -Violetmae14/Text-to-AnimeStudioVideo -swapniel99/cifar10 -PeepDaSlan9/AutoGPT -Guilherme34/Jennifer-Llama270b-Chatbot-with-vision-v1 -sixsixsix/BingAi -liang1213877964/ai -xiaolv/claude2_xiaolv_api_updata -Viswa934746/AIBALA -rakesh99/myvoicebot -Viswa934746/Sorryda -Bala2-03-2003/MygenvioceAI -Sivanraj/MyGenAIApp -pvanand/RASA-chat-interface-streamlit -mygenaisagar/MyGenAIsagarBot -LokeshMadaka/MyAIChatBot -ehristoforu/txt2img.neu -Shubham2003/chatWithPdfs -WL007/WL001 -satish2004/myaichanti2 -CyberHarem/find_my_waifu -Chandrasekahar2k/KVCSekharGenAIBot -dengmouren/minlik-chinese-alpaca-pro-33b-merged -ManjunathNili/manjuai -khadeer/skkhadeer -omarchik/az -LP-art/Bing -xnetba/ai-stable-diffusion-Text-to-Image -pratikshapatil0220/GenarativeAIChatBot -404ERRORms/bingAI -lunarflu/LevelBot -alibidaran/Davinci_EYE -Lijiahui/bingAI -Rizon-Lin/NewBing -jeycov/IADERM-UTOPIC-PFIZER -FireFrame/werz -laoniutyyugyiib/vuvuy -Nandhusnm/testing -MarkMcCormack/Automated-Grading-Dashboard -Hanqix/oxford_pet_classify -egvpprojects/Text-2-Speech -Rajagopal/ImageBind_zeroshot_demo2 -jayanthrahul/bhavanavoice -aloatalpine/streamlit_v3 -sagelewis71/ai-lawyer -yl12053/so-vits-4.1-Kitasan-Black -bobmunzir/meta-llama-Llama-2-70b-hf -zhangyd/bingo -lucas-w/mental-health-10 -wynb1314/bingAI -EtTKSf/uu -Yunshansongbai/SVC-Nahida -spiderdio/bingbing -Sowmyashetty/MyAichatbot -ANILYADAV/mygenaichatbot -Adieudale/Adieudale -diffle/sd-1.5 -padmanabhbosamia/Cifar10_Classfication -diffle/sd-2.1 -diffle/README -dgnk007/dgnk007-crow -multiple-moon/README -ALR03/gradiolangchainChatbotOpenAI -ehristoforu/Ultrasdspace -wall-e-zz/stable-diffusion-logo-fine-tuned -diffle/oj-4 -diffle/kandinsky-2.2 -Ritori/Ritori-Yura_GPT2 -llds/shengweibing -allknowingroger/Image-Models-Test60 -ehristoforu/Hubsd -TejaSree/gradioGenAI -Ashwanthram/myGenVoiceBot -TNK21/Text_summarizer -taidi/bingai2 -DakMak/gradio-start -oliverdixon/BereaAI -Saugatkafley/Bard-cover-letter -TNK21/Question_Answering -OscarLiu/MybingGPT -filehost/txt -bhanuprakash99/MyGenAIChatBot -MyGenAiUser/MyGenAiChat -bhanuprakash99/mygenAIAvatarSpeech -jeevankumar-s/stabilityai-stable-diffusion-xl-base-1.0 -madhumahima/MyGenerativeAIproject -abhijithkota/my_gen_ai_page -Tetel/chat -diffle/webdef -lelafav502/fallpt-chat -Ritori/TTS_Yui -YanzBotz/YanzBotz-Models -Q-bert/FaceGAN -Meltedmindz/nerijs-pixel-art-xl -EsoCode/text-generation-webui -jialewanga/jiale -ASJMO/freegpt -ehristoforu/T3 -bnkkkkknn/bnkkkkknn -Belshia/shia -nugrahatheo/Prediction-of-Credit-Card-Default -tsxc/newbing -damian0815/Erasing-Concepts-In-Diffusion -moyeli/BingAi -ehristoforu/imggend -dpaulsoria/AnimalDetector -ehristoforu/Diffehsj -larryyin/experian-bot -ehristoforu/Hwhswj -ehristoforu/Iro -AnjaneyuluChinni/AnjiChinniGenAIAvatar -louisedrumm/TutorBot -RamziRebai/hf_sum -DonDoesStuff/orca-mini-3b-chat -sanjay6886/SANJAY -jjw0126/Multi-ORGPT -puuuw/pu -ehristoforu/Dicto -irfank/katanaml-donut-demo-3 -mrneuralnet/P-DFD -junkmind/SOTER -rywiz/suno-bark-small -spatialgeneration/musicgen-mbd -AzulaFire/SparkDebate -shutterfree/newbing -sdfhg5243/segmind-tiny-sd -01zhangclare/bingai -sh20raj/sdxl -zhiyin123/MyBingAi -zifyu/public-newbing -mygyasir/XL -ysui10086/yvshengAI -B2gan/LLM_Can_See -ldhldh/demo -sakay/bingai -iberob/nerijs-pixel-art-xl -4th3n4/TraDeX -MiSuku/Suku8008m -Uncleming/AIGPT -boze7/newbing -eghth/wdferg -knotmesh/deepset-roberta-base-squad2 -ypf99/chatgpt -WhiteKnightAI/togethercomputer-LLaMA-2-7B-32K -s3nh/s3nh-chinese-alpaca-2-7b-GGML -Asmithayellow/Asmi -taesiri/Docx2Latex-Farsi -hans829/newbing -JenitaChristopher/MY_GEN_AI -UJCONTROL/bingAI -Uncleming/AiAi -Aadarsh4all/ChatWithBear -mrneuralnet/P-PD -sagiliManoj/ManojGenAIAvatar -Slammed96/Monero-WizardLM-Uncensored-SuperCOT-StoryTelling-30bb -Kushiii112/stabilityai-stable-diffusion-xl-base-1.0 -s3nh/senh-WizardVicuna-Uncensored-3B-0719-GGML -antonelli/outsidellms -mipbkhn/BreastCancer -Hunzla/whisperaudio -LIHUI123/LIHUI123 -Ggxcc4566/stabilityai-stable-diffusion-xl-refiner-1.0 -LUCKky/QQsign -Dharshinijayakumar/Dharshujayakumaraiapp -allknowingroger/Image-Models-Test64 -ma52525/bingai -zalaingjun/QQsign -libhost/tech -womeik/binbin -Ajay-user/Optical-Character-Recognition -TNR-5/semantic-image-search.img -LH66/BingAI -TNR-5/Image-Semantic-Searchj -ahdsoft/Persian-Automatic-Speech-Recognition -Izal887/Konci887 -sanniu/newchat -libhost/img -corpvs/test -LyrithAkari/Bing -ehristoforu/Imglibtest -libhost/img.lite -dbis/AI_Doctor_Bot -SrikanthPhalgun/Cifar10_ERAV1_GradCam_Demo -Curranj/chatbot -alcanodi/stabilityai-stable-diffusion-xl-base-1.0 -recaptime-dev/README -aniketingole92/gradiolangchainChatbotopenAI -MichaelWelsch/FreeVC -diffle/sd-xl.ui -EddyCode/Portfolio -billusanda007/Shortlisted_Candidate_Email_Sender -Hypersonic0945/GenAISample -Jack1804/stabilityai-stable-diffusion-xl-refiner-1.0 -Aziizzz/ChestXrayClassification -DHEIVER/Segmento_de_Angio_Coronariana_v3 -PeepDaSlan9/De-limiter -nicolasdec/cabrachat -kamidara/lolipaoi02 -gary109/HaleyCH_Theme -sq57/newbing -calvinchaochao/text_generation -akashdhiman79830/MYGenAIVoice -Poornima-fullstack/PoorniAI -pigling/chatGpt -red1xe/codeGPT -OldP1ng/QQsign -Andyrasika/Andyrasika-avatar_diffusion -839871171w/newbingAI -seok07/Voice-Changer1 -fbeckk/cell-seg -Error114/bingAI -naotakigawa/test-qatool -raylander/Infinite_zoom_SD -MilliMalinga/moghel-bot -Aspik101/Polish-vicuna-13b-v1.5 -bhfr/bing-ai -chenyihang/newbing -Justin-Choo/Diffusion50XX -shezanbaig/myLlama2 -yuxin099/fjyuxin -konghl/gpt -yiyi12123/BingAI -awacke1/MTBenchmarkForChatGPTMetricsScoring -ArcAhmedEssam/CLIP-Interrogator-2 -Sakil/research_paper_Question_answer -OmarSamehSaid/Text-Summerization -Augustya/ai-subject-answer-generator -slogers/openai-reverse-proxy -s3nh/totally-not-an-llm-AlpacaCielo2-7b-8k-GGML -shawhin/vanilla-chatbot -matthoffner/AudioCraft_Plus -asdastreer/stabilityai-stablelm-base-alpha-3b-v2 -RoversX/Nous-Hermes-Llama-2-7B-GGML -gebain/easylook -lkjhn/qllsdsg -lcw777789564/panzuowenji -oriname/orimono -zcy123/newbingzcy -JPMadsen/JP_Audio -hilloworld/chatgpt -mingu600/Tristana_reroll -saikumar622/testing -34we12er/newbing -mengmeng2/bing -Izal887/rvc-ram12 -kepl/add -kepl/g -Bishan/Speech_To_Text_Hindi -difinative/AIBuddy -pompuritz/keroppurin -Nikita22121671/stabilityai-stablecode-instruct-alpha-3b -Dralkkin/Lorule-Proxy -jordonpeter01/MusicGen -aravind123456789/OPENAIAPP -Filmor/Bot -ilikezx/newbing -RameshBanala/aivoicebot -CForGETaass/vits-uma-genshin-honkai -Aravindsssss/GradiolangchainChatBoatOpenAI -Aravindsssss/gradin -Zannriell/TextChatBot -JethroNatividad/GPT4ALLdupe1523623 -chenxc/qweqwe -billusanda007/Q-Maker -lukelike1001/poison-leaf-tracker -Ripo-2007/Ripo-2007-dreambooth_alfonso -willholt/JAMA_GPT -SanthoshG143/Mychataptaibot -shashi141/MyGenAIChatBot -awacke1/CardWriterPro -nicoladisabato/chat-summarization -lizhaoyin/newbing -chompionsawelo/whisper_transcribe -Zengwengen/nb -BalaBhaskarudu/Balu -Sefray/PylenaLineDetector_ICDAR2023 -PeepDaSlan9/Gryphe-MythoMix-L2-13b -lukelike1001/PlaceAnalysis -allknowingroger/Image-Models-Test72 -Sudhir87/Intervupro.ai -thelou1s/yamnet_test -rsh123/newbing -thunder-007/weld-canvas -0x876/Yotta_Mix -Chakri1997/ChatGPT-prompt-generator -gordonchan/h2oo -wilson1/bingai -awacke1/QuoteBotForQuotesMeditation -DQChoi/image_sticker -awacke1/Quote-Bot-AutoRepeater -waheedwaqar/ToyotaChatBot -canaxx/donut-mrz -parsa-mhmdi/persian-asr -lysine/auscultate -chixiao/chixiaobing -johnhelf/codeinterpreter-api -trakss1436/DocTalker -ff4214/Newbing -cowboyonmars/nerijs-pixel-art-xl -amine1956/NumbersStation-nsql-llama-2-7B -jordonpeter01/MusicGen2 -AlexKorGKLT/webui-cpua -umn-msi/fatchecker -zombieofCrypto/image_interpreter -lullNB/lullNew -lizi136/bingal -TrungTech/finBert -Sid-manale643/medLLAMA -Gators123/fusf_pdf_2023 -ssb4567/ssbflowise -ajsda/newAI -ajsda/newbing -MarBeanInc/MarBeanInc -Konglinu/myai -hnliu/GPTagger -flatindo/scaler -AhmedMagdy7/avatar1 -alecinvan/image-captioning-tts -cowboyonmars/Linaqruf-animagine-xl -litest/newbing -stable-bias/stable-bias -liliyRehtina/Stable-Diffusion-XL-two -idodo/experiment -eugenkalosha/Semmap -wy213/yangAI -liliyRehtina/PhotoReal-V2-with-SD-Upscaler-four -Jamel887/Rv-percobaan887 -xSaXx/llama2-70b-nochat -robingupta/Salesforce-codegen25-7b-instruct -jtpotato/firetrace -Justin-Choo/Grapefruit_WEB_UI -harish03/physicsv11-litbot -Sal-ONE/AI_Code_Gen -Justin-Choo/Lemon_WEB_UI -ashuNicol/Steam-game-Recommendation-System -warmazzzzz/bing-ai -compasspathways/Sentiment3D -JoPmt/Short_Bedtime_Stories -vishnusureshperumbavoor/vspbot-falcon-langchain -Satyam-Singh/garage-bAInd-Platypus2-70B -aiswaryasankar/entelligence.ai -DataDreamweavers/LegaWeaver -NSect/Image-Models-Test62 -NSect/RealisticPhotoModels -hamza50/document-reader -itzn0tm1les/Venuschub.ai -neuraldeepnet/NeuraldeepAI -Denevan/BingAI -cleaner/bing -hardydou/t2 -wangboyi/bingAI -Gradio-Themes/gmjk_qiangshou_gradio -KEINIE/Emory_Oxford_GER_Expert -zhuj/goodwork -mygyasir/fast_diffusion -Androidonnxfork/CivitAi-to-Diffusers -venkat-natchi/yolov3_obj_detector -Shreeraj/Metal_Defects_Classification_Application -xiaolv/claude2_xiaolv_api_file_chat -brainblow/beat_remixer -limcheekin/orca_mini_v3_13B-GGML -Kakashi098/Narrative -SuYuanS/AudioCraft_Plus -xikacat/xikacatbing -OnabajoMonsurat/Medical_Diagnosis_Chatbot -qudehu123/BingAI -RobotDall/WizardLM-WizardMath-70B-V1.0 -0019c/NewBing -d5gd5d/World -Ekitl02/stabilityai-stable-diffusion-xl-base-1.0 -Gaofish/AI_bing -yaolaoda/nw -Deepjyoti120/AssamTrainData -kepl/gpt -insaafS/AI-Story-Gen -inkyiyo/ikun -kingtest/BingAI -abc6666/newbing_AI -nanazi/newbing_wang -en-gin-eer/StableDiffusion-BaseModel-Lora-Graph -udaykalvala1234/Uday321 -camenduru/9 -YiLin1/Once -dalitongxue/dalitongxue -Madhur-01/Question-Answering-system -bai54188/BingAI3.0 -Justin-Choo/QuickGen-Anime -Bala2-03-2003/BRAHMAMAI -jiefeng222/bingAI -sadhaw/11212 -johnsamuel/RAGTest -LiuZhiwen0706/IELTS -Yash911/DiabetesModel -GOVS/Liu_Sir -Chirayuhumar/MyGenAIChatBot -roain/bing -SokWith/nbing -mygyasir/genious_bgremover -universalml/fast_diffusion -Chilangosta/text-to-pokemon -roshnirav1891/gradio-multilingual-translator -flatindo/generate2 -nmfasano5/content_based_movie_recommendation_system -flatindo/Image-Diffusion-WebUI -MercurialAi/Embeddings_Chat -qtoino/form_matcher -Xuan2060320350/Bing-1 -101-5/Bing-New -chansung/LLaMA2-Story-Showcase -Justin-Choo/Replicant_WEB_UI -bmhk/xiaobai -Nguyens/mlops-demo -rf5860/bg3_character_generator -adityapatkar/chatcsv -devdata/kapu -q896656681/xiaoxiannv -galaxy001/biying -Chitranshu/Dashboard-Dmart -PeepDaSlan9/candle-llama2 -Yash911/t2i -Mashir0/pximg -tym2008321/FCNB -Chitranshu/Dashboard-Zomato -trttung1610/musicgen -mikeee/s3nh-garage-bAInd-Stable-Platypus2-13B-GGML -t13718236382/newGPT -wolfzer/private-proxy -huangbatian/newbing -NN-BRD/OWL-ViT -amanatid/Adi_The_ArxivGPT_with_Voice -Tihsrah/Hinglish-Text-Normalizer -OIUGLK/bingo -JMCREATE/README -camenduru/10 -camenduru/11 -flatindo/4x-denoise -npc0/BookSumBeta -Josiah-Adesola/Text-Summarizer-Bart -PeepDaSlan9/SDXL-artists-browser -NN520/AI -hiihhiii/AI_Chat_Bot -Felix123456/bingo -iamstolas/STOLAS -KPCGD/bingo -7hao/bingo -tang155/bingo -aphenx/bingo -hdhzk/bingo -wilson1/bingo -regarex/SDXL-artists-browser -hzwluoye/gpt4 -sdhsdhk/bingosjj -luzhanye/bing -gotgitgood/33.GZUZ.33 -DhilshaM/MyGenAI -gypq/gypq3 -Charliee/BingAi -Lbin123/Lbingo -cccc-c/web-ui-pub -lightli/bingo-newbing -hzy123/bingo -allknowingroger/Image-Models-Test87 -jiejiejie0420/bingo -ExpUnGeD404/Bamber -chronopt-research/ViTExCo -michael2008bj/demo1 -amber0097/amberSign -amanatid/Melissa_The_PubMedGPT_with_Voice_and_featuring_answers -awacke1/KnowledgeDistillerToolMaker -yz333/real-bing -Makiing/coolb-in-gtest -Nihanvi/Text_summarization_using_transformers -sdhsdhk/bingo111 -diffle/license -parkyzh/bingo -TotoB12/llama2-7b-chat-ggml -doevent/df -pinkq/Newbing -srajan-kiyotaka/Bears -analist/qa_table -jt5d/docker-test1 -DataScienceGuild/ARIMA_test -PSMdata/langchain-llama2-7b-chat -Atualli/mediapipe-pose-estimation -zhoujiaxin/zhoujiaxinchatgpt -DHEIVER/Segmento_de_Angio_Coronariana_v5 -k2-fsa/automatic-speech-recognition-with-whisper -2023Liu2023/bingo -xuetao/bingo3 -fffffu/bing -unidata/Chinese-Llama-2-7b -lixq/bingo61 -AhmadHakami/Alzheimer_image_classification -yangogo/bingo -cozyanduofen/bingo -awacke1/PytorchStreamlitNeuralNetUI -Sourabh2/detectron2-segmentation -sazumiviki/meow2 -allknowingroger/Image-Models-Test89 -awacke1/MixtureOfExpertsMOEAnalysisForLLMRoles -shawn810720/Taiwan-LLaMa2 -mygyasir/Image-Models-Test92 -bupenghui/123 -srikanth-nm/ai_seeker -mikkoar/marco -rushankg/discovercourses -Grazon/ChitChat -open-spaced-repetition/fsrs4anki_previewer -fgenie/scamtext_PAL_self_consistency -Jayavathsan/ChatGPT_CloneWithSummary -uSerNameDDHL/bingo -miyaaa666/bingo -masakhane/dialogue-chat -StarCore/PaddleOCR -raul-padua/Barbie-RAQA-Application-Chainlit-Demo -themanas021/fake-news-gradio -TH5314/newbing -MikeTrizna/bhl_flickr_search -mlgeis/ArXivRecommenderSystem -jokguo/GPT4 -Waqasjan123/CompVis-stable-diffusion-v1-4 -sharmaditya/chatapp -Redgon/bingo -praveenku32k/SimilarWordFinderApp -abouuuud/meter2poem-1 -KiranK7/chatBOt-4 -othnielnaga/stabilityai-StableBeluga-7B -ds520/bingo -allknowingroger/Image-Models-Test93 -awacke1/ChatGPTPromptRoles4CoderSTEM -hesha/text-embeddings-transformers -limcheekin/ToolBench-ToolLLaMA-2-7b-GGML -srisakthi2821/SriChatBott -reach-vb/transformers-musicgen -sgxz/bingo -wanghuoto/gogoai -MyGenAiUser/MyGenAiVoiceChatBoat -siddhartha-mahajan/Semantic-Search-Engine -osanseviero/transformers-musicgen -AI-ANK/blackmirroroffice -gvw/js-space -harpreetsahota/RAQA-Application-Chainlit-Demo -Munna0912/URL_CLASSIFIER -ieeecsuna/ieee_cs_tools -raelfromgenesis/oai-proxy -sqc1729/bingi -wy213/213a -huytx267/function_retrieval -lpinnova/whisper_model_speech_to_text2 -pycoming/bingo -hanzza/audioRecognition -Prashanth35/Chit_Chat -aaaaaabbbbbbbdddddddduuuuulllll/Arabic_poem_classifier -aaaaaabbbbbbbdddddddduuuuulllll/Ashaar -Rainy-hh/Real-ESRGAN -foduucom/pan-card-detection -GeorgeOrville/bingo -whxxiaojiang/bingai -lcf001/newbingai -t-hugging-face/Fooocus -A00001/bingothoo -feedexpdition/gardio-patient-clinical-summary -mygyasir/SargeZT-controlnet-sd-xl-1.0-depth-16bit-zoe -PeepDaSlan9/Deci-DeciCoder-1b -will1885/will -mygyasir/digiplay-DreamShaper_8 -mygyasir/digiplay-AI-infinity-V1-fp16 -mygyasir/digiplay-AbsoluteReality_v1.8.1 -mygyasir/digiplay-helloRealisticMan_v1.0beta -ljjggr/bingo -wydgg/bingo-wyd-ai -katahdin0/pet_test -afasdfas/cringe_model -KindUnes/ImageNet -saitejad/llama-2-gen-with-speech -wrs/nb -liliyRehtina/color -ridges/speech -mygyasir/EliKet-lora-trained-xl-colab -mygyasir/FFusion-FFusionXL-BASE -zhangchuntao/ttg -allknowingroger/Image-Models-Test99 -ClearLove443/Robby-chatbot -allknowingroger/Image-Models-Test100 -leilevy/bingo -tassd/bingai -pixiou/bingo -vishvara-sharda/book_recommending -tanishqvashisht/catVsDog -tanishqvashisht/emotionDetector -gradio/dpt-depth-estimation-3d-obj -gkw2004/QQsign -sukiru/rvc-Blue-archives -Ernar246/OpenAI-Reverse-Proxy -Sambhavnoobcoder/StyleForge -wwwwwwww2/bingo -awacke1/HuggingfaceEvolution -g0blas/paper_task_suggestion -abascal/chat_with_data_app -awacke1/VotingCrowdsourceEvaluationApps -jinshengNuaa/test1 -mujicloud/nodeproxy -yaosynge/bingAI -Nee001/bing0 -wuhuik/bingo -yl12053/so-vits-4.1-Matikanefukukitaru -tabeina/bingo1 -dcarpintero/nlp-summarizer-pegasus -majiaoyu/pixelparty-pixel-party-xl -dovedovepigeon/yans-hackathon-baseline-image-generation -HeyAxolotl/Bio -dovedovepigeon/yans-hackathon-baseline-image-edit -VishnuVardhanBR/chatbot -firica/assistant -MohamedAlgebali/VideoQuERI -kargaranamir/LangID-LIME -SHSH0819/event_detection_app -fuloo/newbing -heiyubili/bingo -YlcldKlns/bing -zxy666/bingo-chatai666 -defengxiang/BIngAI -Frankapp/bingai -SHSH0819/FinancialNews_Summarization_APP -GXSA/bingo -dolphinchat/README -gauss314/vllc -ehristoforu/chat-client -aielon/first-chatbot -awacke1/PytorchKerasCompareContrast -Pengyey/bingo-chuchu -ljh1212/ljhai -t13718236382/bingoGPT4 -awacke1/Llama2ProWriterDrafter -Sourabh2/English2Manipuri -awacke1/Lightweight-Text-to-Image-Generation -moonbirdbooks/take-shelf-picture -innovatorved/whisper.api -Akhil-77/Toxicity_Detector -huaiji3y/bingo-Public -ehristoforu/runwayml-stable-diffusion-v1-5k -awacke1/MultiplayerTest1 -awacke1/MultiplayerTest2 -krafiq/deep-neural-networks-for-navier-stokes-equations -laocao1798/laocaoAI -james21/SD-XL -lhnrx/bai -xiaoei/203 -TochProud/QQ -arch-123/bingo -luxuedong/lxd -inuterro/hwata -whgwd2023/bingo -fffiloni/bark-transformers-example -aaboutblankk/digiplay-CamelliaMix_NSFW_diffusers_v1.1 -Pranjal-y/data_scraping_analysis -zhang-wei-jian/test -zhang-wei-jian/docker -allknowingroger/Image-Models-Test103 -allknowingroger/Image-Models-Test104 -TEnngal/bingo -nigel-chen/bingc -mygyasir/digiplay-NextPhoto_v3 -hudsonhayes/Multi-Doc-Virtual-Chatbot -SWHL/RapidASRDemo -aupfe08/stt_or_tts -Qiushixz/NewBing -zhoupin30/zhoupin30 -thov/medicalSegmentation -amongey/stable-diffusion-webui-cpu_duplixx -jhwen/bingo -Groenewaldt/stabilityai-stable-diffusion-xl-refiner-1.0 -cncn102/bingo1 -icayir/flofi_mini -themanas021/Image_Caption_Generation -radames/transformers-js-svelte-example-app -jekyl/JosefJilek-loliDiffusion -Brainclub5000/wesley7137-Llama-2-13B-Nous-Hermes-vicuna-uncensored-mastermod-spych -mrm8488/llama-2-7b-chat-cpp -Xeaser/rvc-tes -fsgmas/bingo -Dilmurat/bingo -allknowingroger/Image-Models-Test106 -unday/bing -nugrahatheo/Credit_Card_Fraud_Detection -AEUPH/AethericGPT -smf2010/ysfj -TEnngal/TEnngal -whxxiaojiang/bingai1 -hudsonhayes/PerformanceSummarisation -t13718236382/web-ui -Swapnilchand/NewSpace -kaanhho/speech-to-speech-translation -atwk-llm/README -PyaeSoneK/chatchat -universal-ml/Dream-Big -Katie-portswigger/Portswigger -wardlee/bingo -batuhantosun/Guided-Backpropagation -A-Celsius/ADR_Predictor -ImagineAI-Real/idefics_playground -dongsiqie/pandora -xjsyy/bingo-gpt -daddyjin/TalkingFaceGeneration -Omnibus/idefics_playground -rayman-studio/README -PacBio/NewBing_BioTree -GurudattaBS/GenDiseasePrediction -lekkalar/chatbot-pdf-gpt4key-langchain-chroma-prompttemp-tabs-dataframe-ocrmypdf-sqlite-csv-returns-json -allknowingroger/Image-Models-Test110 -allknowingroger/Image-Models-Test111 -XiangJinYu/Chat_PDF -TushDeMort/yolo -LIUjh520/bingo -Satyam1124q/genaii -Aaron299/bingo -d3vindia/RAPODIS -threadxl/bingo -znskiss/Qwen-VL -hanskabvw1/bingo -awacke1/LawsofSuccessandPower -Together1415/bingo -wonbeom/prompter_day_demo1 -RajkNakka/speech-to-speech-translation -hunger11243/VITS-Umamusume-voice-synthesizer -howrardz/bingo -Mohitsaini/app-alzh-disease -awacke1/PDFViewerwithUpdatesWorkBench -mygyasir/masterful-gligen-1-4-inpainting-text-box1 -mygyasir/stablediffusionapi-dreamlike-photoreal1 -MarkuzML/swap_face -pg-13/gettinglost-gui-test -cyhcctc/cyhbingo -dmeck/RVC-Speakers -ymc666/Sydney6 -Brightmzb/test -VoyagerYuan/Transformer_CatVAE_and_Signal_Game -littlesujin/littlesujin -CrafterHide/Sariwon -Adithedev/Keyword-Extractor -Hfgjhh/gpt -KaygNas/cut-it -open-spaced-repetition/fsrs4anki_simulator -jgurzoni/image_background_swapper -Grassss/nb -ggffdd/DeepDanbooru_string -avilaroman/escucha -ll0z0y/bingoa -LuoYQ/bing -K00B404/langchain-llama2-7b-chat-uncensored-ggml -padmanabhbosamia/Pascal -allknowingroger/Image-Models-Test114 -dingding27/bingo -allknowingroger/Image-Models-Test115 -atharvapawar/Email-Generator-App-Langchain-LLAMA2-LLM -penut85420/OpenCC-Converter -Toaster496/HugChatWithPlugin -DYSHITELGOOGLA/app -ggffdd/White-box-Cartoonization -awacke1/UnitedStatesMapAIandNLP -yigithan4568/bingo -ivylin0805/microsoft-codereviewer -programehr/GPT4ALL -renumics/commonlit-student-summaries -jbilcke-hf/speech-recognition-server-1 -anzorq/vits-kbd-male -NEXAS/NEXAS-stable_diff_personl -johanmichel/stabilityai-stablecode-instruct-alpha-3b-2 -lo0ng/bingo -Alpaca233/ai-stable-diffusion-Text-to-Image -gigaShrimp/NousResearch-Nous-Hermes-Llama2-70b -Alpaca233/SadTalker -mrolando/text_to_sound -mohamedemam/QA_GeneraToR -Lerdweg/Energie-NRW -iabualhaol/ai-score-openai -shoupeng/bingo -lzglyq/bingolzglyq -caoyongfu/gpt4 -supercyx3/gpt -nugrahatheo/Customer_Churn_Prediction -toiram/goofyai-3d_render_style_xl -gourib/llama_demo -kasunx64/codellama-CodeLlama-34b-hf -mmecheri/Rakuten_Streamlit -shuaiqiyiliu/newbing -BRICS/README -nuttella/test -allknowingroger/Image-Models-Test116 -allknowingroger/Image-Models-Test117 -TouchFrosty/QSign -AntNikYab/NaturalLanguageProcessing -awacke1/YouTubeTranscript2Insights -sarthakrw/web-query -Shubhy/ReliefRouteDemo -jitubutwal1441/image-to-story -XFcontinue/bingo -LDJA/iris -miniv/bingai -trakss1436/PictoGen -Linhao416/Bing -sayurio/Dynosaur-dynosaur-llama-7b-superni -kargaranamir/selenium-screenshot-gradio -Tirendaz/Text-Classification -giseldo/story_point_estimator -teganmosi/codellama-playground -shaolin123/soulteary-Chinese-Llama-2-7b-ggml-q4 -yoru-tomosu/Translate_video -SoUmNerd/Phind-Phind-CodeLlama-34B-Python-v1 -SoUmNerd/FlowiseAI -mygyasir/invisiblecat-junior-diffusion -mygyasir/minimaxir-sdxl-wrong-lora -mygyasir/sourceoftruthdata-sot_autotrain_dreambooth_v1 -mygyasir/digiplay-Photon_v1 -unicorn345/bingo34778 -allknowingroger/Image-Models-Test119 -jiushini/bingo-jiushini -IXIAOHEII/NB -zipp1er/bingo -lkji/bingo -ADOPLE/Multi-Doc-Virtual-Chatbot -sh20raj/sdxl2.0 -Pontonkid/simple-bot -Reself/StableVideo -GilbertClaus/VideoCutter -Happys/bing -pikto/next-chat-ui -Zannriell/hakurei-waifu-diffusion -Veucci/turkish-lyric-to-genre -Veucci/lyric-to-genre -harshitv804/Tamil_Translator -marvingabler/codellama-34b-chat -datastx/EmailGenerator -xiaowunv/bingo -vishnu654/2AV -DHEIVER/Segmento_de_Angio_Coronariana_v6 -Ajitku/BTMLabs -4com/README -eruuin/question-answering -AnTo2209/3D_Zeroshot_Neural_Style_Transfer -leoken2023/bingo -Omnibus/TTS-voice-clone -mimiboy/biying -HansSongBin/Hans -dotku/fastapi-demo -a718/jjj -themanas021/AI-TEXT-DETECTION -Grade2021/bingo -Justin-Choo/AWPortrait_WEB_UI -zhengxuan-github/NEW_bing -hhhwmws/ChatHaruhi-GLMPro -moaz-t728hw/chatgpt_4 -andromeda123/captionscraft -IshA2023/Named-Entity-Recognition -datastx/ChatWithADocDocker -IshA2023/Image-Generation -AvaterClasher/Food_Classifier_Moni -Photon08/rps_computer_vison -Omnibus/Bark-simple -Spectrez/Chest-Lung-Identification -jeycov/emociones -zoe4u/newbing -Smols/GPT4 -marffff/revrvsdjijijijij -Error114/bingo -allknowingroger/Image-Models-Test124 -taurusduan/bingo -PHZane/emrwa -ATang0729/Forecast4Muses -jackrui/Diff-AMP-property-prediction-model -CCaniggia/GPT -chunnibyou/min_test_1 -mokoringo/llama-gpt-api -themanas021/AI-Generated-text-Detection -themanas021/BERT-CASED-AI-TEXT-DETECTION -opensky-org/README -htekas/jondurbin-airoboros-l2-70b-2.1 -Gabesantos1007/NewsAgora -awacke1/Eudaimonia -awacke1/Eudaimonia-HTML5-ReadAloud -onursavas/MultilingualOCR -harpreetsahota/RAQA-with-LlamaIndex-and-a-fine-tuned-GPT-35 -saad-k7/Document-Query-Search -pzc163/Personal-TTS -wangbinhu/bingo -WanZhongYun/ChatGPT-to-Iris -JayKen/YSF-External-Testing -GordenGhost/Gorden -hoppiece/yans_2023_trans4mer -Glazastik/Infinite_Vision -keimoriyama/catoon-generator -OkayuTadano/OgiriMasters -Apex-X/Tm -ClinBAY/Safeterm_Demo -NEXAS/stock -fracapuano/AISandbox -anshu-ravi/simpson-demo -Nikhil0987/hnjii -lucaspetti/chatbot-ui -themanas021/seamless_m4t -utensil/model-memory-usage -xianqi21/bingo -KeeganFdes/stack_onnx -MAEBA96/SUMMARISER96 -hardon-server/space-diffusion-img2img-1 -qpmzonxw/bing -lethalhames/Phind-Phind-CodeLlama-34B-v2 -hardon-server/space-diffusion-txt2vid-1 -NewBing520997/bingo -Apex-X/nono -DunnBC22/Password_Strength_Classifier_with_CodeBERT -hrnph/rvc-models -ktangri/url-classifier -srijitpanja/aip -Gauri54damle/McDFries-SDXL-Dreambooth-Lora-Model -mattricesound/RemFx -taurusduan/bing -kobayashi123/bingo -cbs-tech-strategy/chat -dfhgfh/bingAI -HenryJJ/llm_template -GZZYYP/bingo -dongsiqie/Code-Interpreter -ywl2005/2005 -awacke1/PythonicCoder-CodeLlama-34B-Instruct-HF -awacke1/SelfModifyStreamlitTest -awacke1/Docker-PEFT-ParamEfficiency -xndrChris/SD-XL1.0 -codes4aryan/LLMs-QandA-AI -awacke1/AframeHTML5Demo -Arvi/feedback_generator -Michael2008S/flowise -Vladimirktan/find-my-pic-app -greatMLideas/Realstate -fanzhuyu/Code-Interpreter -yavorbel/Phind-Phind-CodeLlama-34B-v2 -harshvardhansb/ObjectDetection -pvcodes/comment_toxicity_classifier -kingabzpro/glass-classification -sixtyfold/generate_names -stvnchnsn/chat_about_my_experience -mipbkhn/PneumoniaDetectionPublic -mipbkhn/PaddyDoctorPublic -NooneImportant/tts -sshaileshk/stylechatGPT -radames/ComfyUI-data-index -htukor/NLLB-Translator -NFBN/bingo-1 -dilums/sentence-similarity -manananan/QQsign -TakaMETaka/openai-reverse-proxy -yuntian000/bingAI -touhou-ai-experimental/research-paper -mando11/README -fengjianliang/bingo -qiufenge/bingo -jengiskhann/FahsaiChatbot-03 -huangjiefree/bingo -Vladislawoo/booktoread -LISHILEI/bingo -onemriganka/hello_space -HOLYBOY/Customer_Churn_App -tube1925/bing -awacke1/HL7-Libraries-V2-V4 -takuuuuuuu/stabilityai-stable-diffusion-xl-base-1.0 -sshaileshk/feedsGPT -DylanYan/WizardLM-WizardCoder-Python-34B-V1.0 -AdithyaSNair/Dog_breed_predictor -Alexpro1213/WizardLM-WizardCoder-Python-34B-V1.0 -SurendraKumarDhaka/Drowsiness-detection-system -Shivu2210/testSum -KazeDevID/RVC-Model -wffcyrus/llama2-with-gradio-chat -liujch1998/crystal -zelros/Transparent-Insurance -liuyang3/bingo-gpt4-2 -saicmsaicm/pet-breed -willblockbrain/blockbrain1 -captain-awesome/docuverse -soggys/repozzitory -soggys/all-in -wangfuchao/bingo-wangfuchao -hheel/bingo -kevinwang676/Personal-TTS-v3 -YangHao520/testCreateFile -cllatMTK/TransformerAnalyzer -UDE-SE/ReturnTypePredictor -mishig/embeddings-similarity -Alex89912/ai-code-v1 -themanas021/VisualVoice-Caption_to_Hindi_Speech -poetrychor/Gustavosta-MagicPrompt-Stable-Diffusion -CMU-80100/80-100-Pre-Writing-Chatbot-Section-H -el-denny/minimal -grupo10/risk-of-death-in-road-incident -XiJingPong/Perisa-Bot -TFEH/Streamlit_demo -MuGeminorum/insecta -Persival123/thisisitboiiii -onursavas/Document-Layout-Analysis-via-Segmentation -zyx1995/bingo -iabualhaol/pdfchat -dxl3811051/BingAI -WHRSTUDIO/draw-ai -nugrahatheo/Vehicle-Type-Recognition -hudawang/sydney -mkbk96/mys -poetrychor/CompVis-stable-diffusion-v1-4 -Straits/SI43-photostyle1 -YangHao520/AIGCReviewer -Demosthene-OR/avr23-cds-translation -AtomdffAI/wechatgpt4atom -Bravefe/Artist_Classification -tdnathmlenthusiast/online-course-categorize-system -rahgadda/MigrationUtility -Apex-X/GODROOP -Sreezx/Sentzi -aliceoq/vozes-da-loirinha -Saralesjak123/open-reverse-proxy -SudharsanSundar/token_edit_distance -DHEIVER/endoscopy_multiClassification -themanas021/Yt-Transcript-Hindi -AvaterClasher/Food_Classifier_Refined_MONI -mzh2077/_AI_house -GTKJF/SFE -haxenbane/20230903 -geulabddn/pk -Omnibus/text-to-vid -zhiyin123/MyBingAI6 -www23/anime-remove-background -zhiyin123/MyNewBing8 -brainblow/MusiCreator -brainblow/AI-TV -h1r41/vicuna_chat -Hasani/Specific_Object_Recognition_in_the_Wild -timmy0x-eth/Testspace -Varun6579/MyGenAIChatBot -allica/bingoasf -stunner007/movie-recommender-system -Hasani/Binary-Video-Classification-In-The-Wild -Hasani/Binary-Image-Classification-In-The-Wild -VikasKumar01/My_AI_chatbot -MestikonAgency/README -SenthilShunmugam2003/StudentMindscape -Osmond141319/ComfyUI-XL-Vae-Public -xiaozhengchina/bingo -YUMASUKIii/Chat -Sresti/sharma -spignelon/plant_leaf_classifier -Ranvelx/Ai2 -oulin/fastai_dog_classifier -flaviooliveira/trocr-bullinger-htr -FDSRashid/Taraf_by_Year -dibend/OracleOfNewProvidence -Karthikbolla/NEP-Chatbot -tintoretor/WealthSentiment -typesdigital/codellama -iabualhaol/Imam-Muslim -PeepDaSlan9/conceptofmind-Yarn-Llama-2-7b-128k -xcoolcoinx/ehartford-Wizard-Vicuna-30B-Uncensored -bleysg/Phind-CodeLlama-34B-v2 -Omnibus/2-button-Story-Board -dawdqd/ChuanhuChatGPT -hoalarious/edenlabs.tech-TTS -theekshana/boardpac_chat_app_test -sowmika/content-generation-text -ifey/chatdemo -NCTCMumbai/NCTC -jengiskhann/FahsaiChatbot03 -sach-en/cisco_handbook -Defalt-404/Bittensor_Explore -JanhviSingh/mentalHealthChatbot -pourmand1376/whisper-large-v2 -jhparmar/Blip-image-captioning-base -Nikhil0987/omm -techasad/geame-idea-generator -LeonOY/Leon_BingAI -zihan0516/B1 -watanabe3tipapa/web-sge-agent -zhuanjiaoover/bingo -Ashrafb/translate -qingyu-h/bingo -zzzzzc/zzcbingAi -Zannriell/cloudqi-cqi_speech_recognize_pt_v0 -hocaionline/ComfyUI_Free -mrolando/classify_images -hardon-server/remove-background-on-image -hardon-server/remove-background-on-image-def -errorok/rvc-models-en-test -DHEIVER/Classificacao.de.Imagens.de.Cardiomiopatia -WKTSHNN/simplify_color_values -onursavas/ObjectTrackingWithYOLOv8 -pen-one/bingo-pen-one -Truym/rvc-pendu -Hobe/bingo -Xiaini0/bingo-112233 -Apex-X/ROOPOK -hugo-guo/bingo-hugo -mangiucugna/self-retrospective-generator -SpfIo/Whisper_TL_Streaming_API -RahulJ24/gradiolangchainchatbotAI -alwaysbetter1314/gradio-start -mjuetz/neu -nisssdwefq/Bing -Lynx1221/rvc-test1 -N093/final_tts_mix -RahulJ24/genAIvoicebot -chengggg12/bingo -nmynxy/bingo -Harsha86390/mygenaichatgpt -Admin08077/Cosmosis -ovieyra21/audio_webui -awacke1/Whisper2ChatUsingInferenceEndpoints -Edward-Ji/essentials-of-microeconomics -DHEIVER/CoronaryAngioSegment -JianYu233/bingo1 -NSect/VALL-E-X -conanwl/bingo -NSect/voice_conversion_service -nisssdwefq/huangzisen -crystals201/Mikufans -KANATA980122/bingo -Hobe/bing -cruxx/ssyoutube -foduucom/web-form-ui-field-detection -Zheng0211/mybing -Dify-AI/README -allknowingroger/Image-Models-Test125 -awacke1/WVW-WhisperVoiceWriter -dynamicstude/RHYTHMflowise -lianxin03/Z-BingAI-QY -L1Y2/bing -Abhay834/SY_Bot -transiteration/nemo_stt_kz_quartznet15x5 -Popitmania123/Open-reverse-proxy -Anandbheesetti/MNIST_digit_predictor -AK-12/llama-gradio-chat -hardon-server/basegan1 -krishnakkindia/ehartford-Wizard-Vicuna-30B-Uncensored -parvezalmuqtadir/stablediffusionapi-vector-art -semillero/IAMIND -sana123/codenamewei-speech-to-text -place4unity/persianchat -dayachoudekar8/swalearn -Nikhatu/stable-diffusion-webui-cpu-the-best -wy213/AIwy -allknowingroger/Image-Models-Test128 -Linguistz/bingo.cn -mbazaNLP/Finetuned-NLLB-TOURISM-EN-KIN -dgnk007/dgnk007-eagle -VaishakhRaveendran/Audio_2_chat -SeyedAli/Persian-Text-NER -SeyedAli/Persian-Speech-synthesis -SeyedAli/Food-Image-Classification -gptaibox/Langflow -SoUmNerd/RemoteMojo -webpodcast/discussion -PhucBui/demo -Siyamansari/liveTranslation -arslan-ahmed/talk-to-your-docs -tdeshane/artists-of-data-science-chainlit -ZeroTwo3/WavJourney -awacke1/VideoCombinerInterpolator -wrs/nbh -aichitrakaar/prompthero-openjourney -ysheng/SSN-Soft-Shadow-Network-for-Image-Composition -iamadhxxx/Analyse -SuperZz/StartWithAI -heshihuan/bingo -itachi1234/rishu -Drac77/hakurei-waifu-diffusion -awacke1/DromedarySpeciesFAQ -hardon-server/img2txt1 -applsisujsus/qiangbing -Arcypojeb/NeuralServer -tshome/new_ts_model -kangvcar/RealChar -klenovich/df1 -farhananis005/LawyerGPT -mylesai/mylesAI_test -Wander1ngW1nd/EdControl -wejudging/grobid -kcswag/axiong-PMC_LLaMA_13B -OttoYu/Tree-Inspection-demo -altairv/03 -nugrahatheo/Customer-Segmentation -koubi888/uptime -techguy1423/ChatABT -masonbarnes/open-llm-search -johnskyper/demo -Emmy101/Emer -Catspin/2_ai_chat -techguy1423/ABT2 -techguy1423/ChatABT0.4 -amritsolar/NEWGRADIOAI -SystemGPT/system-rule-based-chatbot -NarendraC/MyAIChatBot -AlhitawiMohammed22/HTD_HTR -passant-labs/ailogo -KashiwaByte/SparkDebate-V2.0 -oriastanjung/restGin -fracapuano/NebulOS -AlhitawiMohammed22/E2E_OCR -Dinesh1102/Text-To-Image -weibinke/vits-simple-api -HF-Demos/bingo -harisansarkhan/Predict_Car_Brand -the-neural-networker/multilingual-language-recognition -jergra43/llama2-7b-ggml-chat-app -miittnnss/UrFriendly-Chatbot -aabyzov/playground -meapbot/testing -MohammedAlakhras/Telegram_API -alecinvan/medidoctorchatbot -simonraj/ELOralCoachv2 -XODI/guess -gforguru/MarketingComapaignTool -Samarth991/LLM-Chatbot -typesdigital/YoutubeVideotoText -Varun6579/mygenAiAvatarSpeech -yohn-maistre/respiratory-diseases-classification-cnn-tf -prueba123jdjq/inswapper_128.onnx -gatilin/mmocr-webui -Sells30/stabilityai-stable-diffusion-xl-base-1.0 -gatilin/mmpose-webui -alihalabyah/falcon-180b-demo -gekkouga/open-reverse-proxy -isididiidid/ojggg128 -higantest/openai-reverse-proxy -chenxc1029/Local-Code-Interpreter -supercyx3/nova -liangxiaohua/bingo -supercyx3/magic -SIH/building-segmentation -Omnibus/Video-Diffusion-WebUI -Micklew/music-generator -allknowingroger/Image-Models-Test134 -allknowingroger/Image-Models-Test135 -Vageesh1/bio_generator -Roblox-organization1ol/README -anurag629/botaniscan -ilhamsyahids/nllb-translation -awacke1/Text2AudioStreamlitHTML5Demo -librarian-bots/SFconvertbot-PR-dashboard -nt3awnou/embed-rescue-map -656-156/Real-CUGAN -GT-RIPL/GPT-K -Sapnil/Text_Summarization -Tayaba171/CALText-TextRecognizer -cherry0021/lab-ni-doc -mega-snowman/image-to-text -gjhjh/bingo -allknowingroger/Image-Models-Test138 -Olga19821109/Google_Palm2_Chat -HiTZ/C1_sailkapen_demoa -SystemGPT/TrialSpace -alexat/TextToVoiceEn -YangHao520/Openai_GPT_Fine_tune_VisonSystem -ViktorTsoi13/ABA_Test -Sing11104/bingo-11104 -mega-snowman/combine-images -Bakar31/MLOps_Practice_Repo_1 -Shrikrishna/Stock_Market_Trend_Prediction -bi02/bingo -0xrk/gpt2 -ilmhona/api -Tonic1/falcon-180b-demo -ryanjvi/MS-Image2Video -Lagz/openai-reverse-proxy -godelbach/onlyjitz -nathanaw/cybersec-ai -Cartinoe5930/LLMAgora -MindSyncAI/brain-tumor-classification -fffiloni/gradio-bug-clear-event -ko5cles/lyric_writer -typ12323/bingo -adrianpierce/cocktails -awacke1/VideoFromImage -openMUSE/open-parti-prompts -kottu/stabble_diffusion_sketch -ejschwartz/function-method-detector -Virus561/sdf -Quantumhealth/README -seagulltyf/chatglm3-6b -Huu-Mon12/test01 -kenton-li/maia-utsw -mauriciogtec/w2vec-app -qducnguyen/chatpdf-demo -silentAw404/bot.py -Liu-LAB/GPT-academic -jackrui/diff-amp-AMP_Sequence_Detector -YaeMiko2005/Yae_Miko_voice_jp -jackrui/diff-amp-antimicrobial_peptide_generation -okriyan/README -isotope21/Musicgen -maksimluzik/ml-learning -AzinZ/vitscn -ZDarren/huanhua -Olga19821109/falcon180b -mohamedemam/Arabic-meeting-summarization -guetLzy/Real-ESRGAN-Demo -Alfasign/fdvdv -huggingface-projects/AudioLDM2-bot -seok07/1JK50 -Mushfi/forecasting_geomagnetic_storms -huggingface-projects/codellama-bot -AnonymousSub/Ayurveda4U -Osmond141319/ComfyUI-CalicoMixv7.5-v2-Public -fredrikskatland/finn-annonser -artificialimagination/ai_detect_v0.1 -ServerX/PorcoDiaz -samathuggingface/sarguru -samathuggingface/sarguruchatbot -asigalov61/Euterpe-X -feeme666/auto_mjw -betelguesestudios/Musicc -samathuggingface/SampleAi -allknowingroger/Image-Models-Test142 -allknowingroger/Image-Models-Test143 -zshn25/DINOv2_Depth -Lajonbot/Chatbot-Share -AUBADA-ALARABI/poetry202 -AUBADA-ALARABI/poetry2023 -AUBADA-ALARABI/AraPoet -AUBADA-ALARABI/poetry1 -AUBADA-ALARABI/poetry20233 -ysharma/xtts -sahirp/cvbeardetect -SohaibAamir/AI-Innovators-Demo-Hub -golem4300/RVC-TTS -jasonreisman/primates -dibend/individual-stock-lookup -bielalpha/nerijs-pixel-art-xl -Deepaksiwania12/Face-Landmark-Detection -fjenett/ellipse-detection-aamed -dylanplummer/NextJump -Moonkiler/Nio22 -czwQAQ/extras -bielalpha/pixelparty-pixel-party-xl -jsaplication/jsphoto -assecorML/README -thekubist/Deci-DeciDiffusion-v1-0 -stevez/b_demo_hf -agonh/Speech-t5 -happiestminds/trackbot -allknowingroger/Image-Models-Test146 -vsrinivas/Image_Generation_by_SrinivasV -bobsby23/step-by-step -Vaibhav-vinci/NewSpace -Bidwill/Sanskrit-asr -NSect/multitrack-midi-music-generator -Ohio-uchil/stablediffusionapi-anything-v5 -alecinvan/flotationHealthChatbot -SynaptInk/ajibawa-2023-Uncensored-Frank-7B -alecinvan/flotationMultiModalRobot -chrisjones1234/llm-app -fermuch/harborwater-open-llama-3b-v2-wizard-evol-instuct-v2-196k -dfassaf/newbingChatAI -RO4DHOG/Ripper -JackBAI/master_wlb_index -openpecha/TTS -pinhome/property_knowledge_qa_chatbot -rahul2001/student_performance -ShubhamVermaDS/text_to_image -arkaprav0/gpt-transcript-plugin -petros/petros-bert-base-cypriot-uncased-v1 -YanzBotz/Stablediffusion-YanzBotz -salemamassi/PdfChatBot -k2-fsa/generate-subtitles-for-videos -Autodog/nova -bincooo/auto-ai -wffcyrus/MetaGPT-v1 -usecodenaija/x-spaces-web-ui -sh20raj/Test -tez321/pipeline-visualizer -droidcv/bahd -allknowingroger/Image-Models-Test149 -phiyodr/dacl-challenge -Nunchakuka/FrenchAnonymizer -Jmansoking/newbing -DanLeBossDeESGI/Musica -sh20raj/uploader -anjaria93402/free-vps-1 -fadetube/bingo -CohereForAI/pokemon-cards-explorer -AchyuthGamer/OpenGPT -plzdontcry/dakubettergpt -Bart92/RVC_HF -DuckyPolice/DeciDiffusion-v1-0 -hanan217/QQsign -joshuasundance/langchain-streamlit-demo -salemamassi/GeneralPdfChatBot -mpshemarketing/README -TimVan1/nllb-translation-demo -hunz/web2inpaint -stallbr/microsoft-BioGPT-Large-PubMedQA -kevkev05/Chat-To-Sequence -faunxs233/zidunuer-bing -CamodDew/youtubelegal -alfabill/stable-diffusion-inpainting-2 -chyh/chatbot -allknowingroger/Image-Models-Test153 -yderre-aubay/midi-player-demo -QaryR/EcoCycleAI -arnaucas/wildfire-detection -jbilcke-hf/splatter-api -Bilalst/Gradio_Youtube_Transcript_v2 -coding4vinayak/openaccess-ai-collective-jeopardy-bot -wishwork/Persian-LLM-Leaderboard -jkassemi/hf-speech-bench -gatilin/damo-yolo-webui -ChristopherMarais/Andrew_AI-BB_classification-beta -olivianuzum/EmoJeneration -CHDCruze/entertainmentbybhdcruze -CikeyQI/meme-api -arslan-ahmed/talk-to-arslan -athuljoy/whisper_model_speech_to_text2 -gatilin/damo-facedet-webui -Shankarm08/chatconversation -gforguru/EmailGenerator -lm/lychee_law -Nybb/README -digitalxingtong/Shanbao-Bert-VITS2 -allknowingroger/Image-Models-Test154 -digitalxingtong/Azusa-Bert-VITS2 -fullname77/README -yannESGI/test_fitz -jpwahle/field-time-diversity -mattiaspaul/chasingclouds -801artistry/RVC801 -UglyLemon/LEMONTR -shauray/StarCoder -UglyLemon/Lemon_Reverse -gventur4/recipesDaCasa -thePhenom21/AdaptLLM-medicine-LLM -fastaioncampus/TrafficSigns -generativeai/test-image-similarity -gventur4/receitas_tera-final -Bradjan310/ehartford-Wizard-Vicuna-30B-Uncensored -yjmqaq/Iloveyou -jsaplication/jsphoto-api -JoYCC/ICBU-NPU-FashionGPT-70B-V1.1 -MJ/AI-ChatBot -Omnibus/summarize-long-text -alamin655/websurfx -SeyedAli/Persian-Speech-Emotion-Detection -SeyedAli/Arabic-Speech-Synthesis -SeyedAli/Persian-Text-Paraphrase -tomascufarovertic/keyword_classification -themanas021/legal_chat -allknowingroger/Image-Models-Test157 -allknowingroger/Image-Models-Test158 -Fiacre/projectmanagerideator -SeyedAli/Persian-Text-Sentiment -MindSyncAI/Plant_Classification -sravya-abburi/ResumeParserLLM -iccv23-diffusers-demo/instruct-pix2pix -iccv23-diffusers-demo/LoraTheExplorer -iccv23-diffusers-demo/T2I-Adapter-SDXL-Sketch -iccv23-diffusers-demo/stable-diffusion-image-variations -iccv23-diffusers-demo/zeroscope-v2 -iccv23-diffusers-demo/sdxl -iccv23-diffusers-demo/Shap-E -jbilcke-hf/campose-api -pharma-IA/PharmaWise_Prospecto_Megalabs_V2.10 -luisotorres/Volatility-Based-Support-and-Resistance-Levels -srini047/asapp-hackathon -Docfile/open_llm_leaderboard -francojc/transcribe -kevinwang676/VITS2-Mandarin -llm-learnings/huberman-gpt -laiguorui/bing -davidashirov/cilantro -dongsiqie/Image-to-Line-Drawings -sh20raj/python-bootcamp -drdonut1/TIGER-Lab-MAmmoTH-Coder-34B -tonne/jupyterlab -Rurrr/qr_monster -olanigan/glaiveai-glaive-coder-7b -xyyyds/som -anilkumar-kanasani/chat-with-your-pdf -JAKKIHARISH/mygenAIAvatar -Harish143/AIavatar2.0 -kudoshinichi/hf-sentiment-models -yeahpic/YeahPic -felixz/open_llm_leaderboard -SirensOfNC/sail-rvc-Sonic_SonicBoom -huazhao/QQsign -Toritto/Genshin-impact-IA-project-v1 -Asifpa6/emotion-analyzer-app -Manoj21k/Custom-QandA -angelayeu/my_hf_space -allknowingroger/Image-Models-Test162 -allknowingroger/Image-Models-Test163 -dinhhung1508/VietnamAIHub-Vietnamese_LLama2_13B_8K_SFT_General_Domain_Knowledge -EmRa228/Image-Models-Test1001 -sanjay7178/FAS-demo -Deepak7376/demo-sapce -r0seyyyd33p/sdui-custom -CuraAlizm/stabilityai-stable-diffusion-xl-base-1.0 -aai198/ComfyUI -kavit02/chatbot1 -ddosxd/sydney-inpaint -HarshWK/Basic_Models -lancewilhelm/bad-actors-annotator -Raghavan1988/falcon-lablabai-hackathon-brainstorming-buddy-for-researchers -magulux/openai-reverse-proxy-3 -Kayson/InstructDiffusion -bohmian/stock_intrinsic_value_calculator -aiswaryamlds/YoutubeQA -sahirp/planedetect -Zuleyyuyuu/Yuyu -gradio/keras-image-classifier -TusharGoel/LayoutLM-DocVQA -neridonk/facebook-nougat-base -hareshgautham/Myspace -allknowingroger/Image-Models-Test165 -DHEIVER/ThyroidTumorClassificationModel -allknowingroger/Image-Models-Test166 -SeyedAli/Persian-Image-Captioning-1 -Enterprisium/Easy_GUI -SeyedAli/Persian-Image-Captioning -GreenTeaLatte/ComfyUI-cpu -DHEIVER/ImageClassifierCataract -Semibit/gentle-audio -Monster/Llama-2-13B-chat -PaSathees/FoodVision_Mini -ForTheLoveOfML0/X-ray_Classifier -kavit02/chatbot2 -Sudhanshu976/NLP_FULL_APP -PaSathees/FoodVision_Big -kenton-li/record -vtomoasv/product-recognition -benjaminzuckermanbasisscottsdale/Chronic_Kidney_Disease_Prediction_Service -CrAvila/DigitClassifier -VishnuSaiTeja/RogerStaff -tarjomeh/disney-pixal-cartoon -SeyedAli/Musical-genres-Detection -SalahZa/Tunisian-Speech-Recognition -allknowingroger/Image-Models-Test167 -allknowingroger/Image-Models-Test169 -anilkumar-kanasani/cloths_order_bot -VishnuSaiTeja/Predictor -zzzzred/extras -deafheavennnn/metalproxy -binker/interpreter -priyaaa22/gen1 -SeyedAli/Persian-To-English-Translation -SeyedAli/English-To-Persian-Translation -Thanarit/GPT-Detection-Demo -Sandy0909/Finance_Sentiment -qefunaba/nicky007-stable-diffusion-logo-fine-tuned -qefunaba/iamkaikai-amazing-logos-v3 -Tatvajsh/AHS -CCOM/README -AIWaves/Debate -Jineet/Handwritten_Digit_Recognition -Omnibus/idefics_playground_mod -metricspace/juristische_Ersteinschaetzung_einer_KI -allknowingroger/Image-Models-Test170 -DORA1222/1234 -Justin-12138/FSALA -stevenxiao29/ResumeAssist -aichitrakaar/Deci-DeciDiffusion-v1-0 -rishabh2322/chatbot -sudokush/goofyai-3d_render_style_xl__generator -ramki123/testing -fersch/predictor_fraude -huang4414/saltacc-anime-ai-detect -passgenau-digital/virtual-assistant-demo-hsb -ahmedgamal777722/flowise -themanas021/legal-chat -ngoctuanai/aivestablediffusionv15 -Manvir786/nfgj -apokalis/Apokalis -prxx/Norod78-SD15-IllusionDiffusionPattern-LoRA -SeyedAli/Multilingual-Text-Similarity -OdiaGenAI/Olive_Farm -74run/Predict_Car -all-diffusions/stable-diffusion-v1-5 -SmileyTatsu/Bleh -Alex123aaa/1234 -binker/interpreter5 -allknowingroger/Image-Models-Test171 -allknowingroger/Image-Models-Test172 -enochianborg/stable-diffusion-webui-vorstcavry -jitubutwal1441/multiple-pdfs-chat -artba/SchoolStats1 -yegeta1243/Image-Models-Test130 -ak0601/news_sentiment_analysis -ltg/no-en-translation -passgenau-digital/virtual-chat-assistent-cc-energy -spritlesoftware/Spritle-Bot -zhuraavl/mistralai-Mistral-7B-v0.1 -ai-maker-space/ChatWithYourPDF -NanoT/demo2 -samibel/A-Comparative-Analysis-of-State-of-the-Art-Deep-learning-Models-for-Medical-Image-Segmentation -bincooo/m3e-large-api -Ralmao/Anemia -banana-dev/demo-mistral-7b-instruct-v0.1 -ridges/mistralai-Mistral-7B-v0.1 -shoupeng/test -zliang/ClimateChat -nyust-eb210/bge-large-zh-v1.5_gradio -FridaZuley/RVC_HFKawaii -samyak152002/Quantumn-Multiplication -Samarth991/LLAMA-QA-AudioFiles -allknowingroger/Image-Models-Test173 -allknowingroger/Image-Models-Test174 -illrapper/ill -CyberPeace-Institute/Cybersecurity-Knowledge-Graph-Extraction -KushJaggi/YOLOv8 -samyak152002/Qiskit -wonderit-safeai/tts-announcer -kavit02/cono -Tonic/indiansummer -vikdutt/vd -Elegbede/Text_to_emotion_classifier -debayan/ISM2023w -fschramm21/fraudDetector -cbensimon/stable-diffusion-xl -SeyedAli/Image-Similarity -44brabal/valentinafeve-yolos-fashionpedia -huggingdalle/dalle-mini -SeyedAli/Image-Object-Detection -iabualhaol/emot -Faridmaruf/RVCV2MODEL -AbdoulGafar/woodsound -Thafx/sdrvxl2 -Lbx091/rev -AP123/dreamgaussian -philwsophi/Testeoi -Cran-May/ygVI -PeepDaSlan9/TigerResearch-tigerbot-70b-chat -yuanh/bingon -Veer15/image-prompt-editing -allknowingroger/Image-Models-Test176 -allknowingroger/Image-Models-Test177 -allknowingroger/Image-Models-Test178 -Hua626/QQsign -byC2bot/TikTok_info -ayoubkirouane/BERT-base_NER-ar -XzJosh/Carol-Bert-VITS2 -Okkoman/PokeFace -Bready11/Onodofthenorth-SD_PixelArt_SpriteSheet_Generator -SeyedAli/Image-Segmentation -HenryCarle/your_sport_picker -TNK21/Translator_app -yuangongfdu/LTU -yuangongfdu/LTU-Compare -msobhy/langchain-chat-with-pdf -Omnibus/MusicGen -qscwdv/bing -Abhiboken12/travelling_ai -digitalxingtong/Nailv-read-Bert-Vits2 -digitalxingtong/Eileen-Bert-Vits2 -curveman2/MysteryClaude -litagin/vits-japros-webui-demo -LabAlproITS/CyberDAS-FE -allknowingroger/Image-Models-Test179 -Sagand/Sargand -yuezih/BLIP-SMILE -MultiTransformer/autogen-online -pablodawson/ldm3d-inpainting -RockInnn/snake_by_princepspolycap -wbe/balls -DollieHell/pisa -GabeIsHaxkee/E -javedkumail/HopeAI -digitalxingtong/Jiuxia-Bert-Vits2 -sara4dev/rag-iblog-qa -digitalxingtong/Jiaohuaji-Bert-Vits2 -digitalxingtong/Kino-Bert-VITS2 -digitalxingtong/Lixiang-Bert-Vits2 -digitalxingtong/Luzao-Bert-Vits2 -AchyuthGamer/AchyuthGamer-OpenGPT -digitalxingtong/Miiu-Bert-Vits2 -digitalxingtong/Un-Bert-Vits2 -allknowingroger/Image-Models-Test181 -arborvitae/GalaxiCode.ai -DamarJati/DamarJati-NSFW-filter-DecentScan -cmtry/nAIr -Djacon/emotion_detection -lunarflu/HuggingMod -MohamedRabie26/Soil_Shear_Strength_Prediciton -imperialwool/llama-cpp-api -webtest1s/testings -Puyush/MultiLabel-TextClassification -ALSv/Chat-with-Llama-2-70b -Gauri54damle/sdxl-lora-multi-object -murongtianfeng/gradio1 -Jingqi/ChatGPT-QA -shabnam91/Sanskrit-TTS -LZRi/LZR-Bert-VITS2 -Afrihub/README -Detomo/ai-avatar-backend -allknowingroger/Image-Models-Test182 -Jaskirat-04/Food-Personalisation -allknowingroger/Image-Models-Test185 -zhan66/vits-uma-genshin-honkai -williamberman/stable-diffusion-xl-inpainting -mgolu/EDvai_final -zhan66/vits-simple-api -aimustafa/Example -karthick965938/ChatGPT-Demo -rohitt45/Movie-Recommendation-System -tjgo/README -fmind/resume -AmirTrader/LinearRegression -dsank/PY007-TinyLlama-1.1B-Chat-v0.3 -doevent/XTTS_V1_CPU_working -OmarSRF/OOOFFF -Fedev23/Proyecto_edvai -MultiTransformer/Automated-Social-Media-Campaign -qgyd2021/chat_with_llm -rishiraj/mistral -huutinh111111/ChatGPT4 -isan2001/BertApps -XPMaster/chainladder -Abs6187/AI_Chatbot -Hackatos/Smart-Shower-ATC -ArnePan/German-LLM-leaderboard -dhanilka/illusion-image-ai -SiraH/DQA-Llama2-4bit -Pietrzak/bigscience-bloomz-7b1-mt -sub314xxl/MetaGPT -omdivyatej/general_invoice_parser -tekkonetes/rust-code-server -alsalemi/pv-segment-01 -Megatron17/RAQA_with_Langchain -benmaor/FoodVision_Big -BilalSardar/Halal_Food_Checker -Sujal7/shikshaconnect -thiago-osorio/track-search-engine -DHEIVER/classificador_de_imagem_colonoscopia -1-13-am/neural-style-transfer -ShoukanLabs/OpenNiji-Aesthetic-Dataset-Viewer -Maheshiscoding/MAHESH-AI-HELPER -HypermindLabs/Snore-Detector -Vicent3/laniakea -roggen/unity-llm-example -Vicent3/sharp-transformers-traveltaxi -Vicent3/ocr-endpoint -Vicent3/ocr-wrapper -eaglelandsonce/BabyAGI -mixcard/image-1-captioning -Anew5128/Anew51 -mixcard/text-finbert -mixcard/blip-image-captioning-large -Anew1007/extras -mixcard/ask-reader-text -yuki-816/science-communication -mixcard/text-summarization -leo-bourrel/test-streamlit -mixcard/text-summary -mixcard/text-summary-2 -mixcard/image-2-text-largecoco -mixcard/image-captioning-ru -mixcard/image-2-captionmax -mixcard/image-2-details -asgaardlab/DatasetPreviewer -Admin08077/Record -nothingsuspicious/curaude -yoon-gu/pokemon-quiz -kavit02/cono.type.xd -dlmn/SIH_S2T_multilingual_ASR -jeanbaptdzd/mistralai-Mistral-7B-v0.1 -kevinwang676/xtts -Sujal7/Shiksha-Connect -allknowingroger/Image-Models-Test189 -pharma-IA/PharmaWise_Prospecto_Generico_Acetilsalicilico_V2C_STREAM -allknowingroger/Image-Models-Test190 -allknowingroger/Image-Models-Test191 -pharma-IA/PharmaWise_Prospecto_Generico_Vortioxetina_V2C_STREAM -seanghay/KLEA -AtheneaEdu/README -pharma-IA/PharmaWise_Experto_GMP_V2C_STREAM -pharma-IA/PharmaWise_Experto_Data_Integrity_V2C_STREAM -roshithindia/image_classification -hilsq/bingotest -Waranchari/Image_Classification -ashioyajotham/falcon_7b_coder -fatimahhussain/workoutwizard -universal-ml/NLang -sherinsp/openai-reverse-proxy -ziyadsuper2017/Biochemistry3.0 -puji4ml/PubMedAbstractSkimmingTool -KoalaAI/Text-Moderation-Demo -winterForestStump/bank_deposit_prediction -FlipTip/ChatBot -finaspirant/SearchWithVoice -abidlabs/persistent-storage-test -IceAnimates123/README -PeepDaSlan9/ToyWorld -MJ/EEG_cls -TheKitten/Pictures -mfernezir/VanillaChatbot -Cran-May/Mistril-7b -khjs012/1412 -Mysterykey/test -awacke1/MistralGradioFast -padmanabhbosamia/Segment_Anything -AbeShinzo0708/AI_Kishida_Fumio_speaker -jbilcke-hf/hotshot-xl-api -rajan30may/Agribot -roshithindia/chatBotGPT2 -BreetheRun/stabilityai-stable-diffusion-xl-base-1.0 -huaiji3y/bingo -AkshayKumarP/AI-ChatBot -udartem/easwsnn -Arsenii2023/Demo1 -leelaaaaaavvv/VoiceCloneAi -fazzam/Grainsight2 -ivuxy/Eval -xiantian/123 -awacke1/MistralAndABardGoRoleplaying -vlsp-2023-vllm/VLLMs-Leaderboard -legend1234/b3clf_hf -wayandadang/MathLLM-MathCoder-L-7B -VatsaDev/TinyLlama -digitalxingtong/Xingtong-Read-Dongmuchang-Bert-VITS2 -digitalxingtong/Xingtong-Longread-Dongmuchang-Bert-VITS2 -rimasalshehri/NASAproject -kkumarkumar/miniprojectvoice -KushJaggi/pdfGPT -allknowingroger/Image-Models-Test194 -allknowingroger/Image-Models-Test195 -Gna1L/jonatasgrosman-wav2vec2-large-xlsr-53-english -roshithindia/imageQuestionAnswering -DonngHuang/auto-ai -nesanchezo/ChatbotNico -awacke1/Mistral_Ultimate_Chords_and_Lyrics_Writer -gauravtewari/famos-at -PiyushLavaniya/Llama2_Chatbot -pykale/README -Cran-May/SEA-orca -Alfaxad/BioGalacticModels -deepakHonakeri5/instagram -openskyml/pigeonchat-demo -akiraaaaaa/Waifu-Reina -PhilSpiel/annie -DylanWolf/h2ogpt-api -eaglelandsonce/autogenmultichat -Drac77/stabilityai-stable-diffusion-xl-base-1.0 -feiya/feiyaa -zhaoys/wfms-kuiwenc -ssthouse/runwayml-stable-diffusion-v1-5 -hsdhgds/htyjuietryt -lixiang3718/bing -trialapp/gpt_summarizer -BestteaLib/README -ClipHamper/stable-diffusion-webui -allknowingroger/Image-Models-Test196 -ravichodry/CHATGPT-LLAMA2 -Ajay07pandey/Netfilx_Movie_Recommendation_System -allknowingroger/Image-Models-Test198 -justest/ai-support -kunderabr/ResumoYouTube -wisamidris7/erp -Darwin2023/darwin -Araby/BRATArA -alexray/btc_predictor -PeepDaSlan9/bank_deposit_prediction -huang4414/anime-remove-background -TogetherAI/EinfachLlaMistral -TRaw/pro -digitalxingtong/Xingtong-All-in-One -ViktorTsoi13/GPT4 -penguin2023/vncs -immortaker/as -Omnibus/video-2-3d -Veerjyot/Digital_India -Rayzggz/illi-Bert-VITS2 -arbitrarygate/ayaka_sign -cybercorejapan/human-detection-docker -NewtonKimathi/Sepsis_Prediction_FastApi -allknowingroger/Image-Models-Test200 -XciD/te -searchfind/SG161222-Realistic_Vision_V1.4 -zongxiao/speech-to-speech -cedpsam/mistral_openorca_lamacpp -dlmn/BHASHAVANI -Nehal07/Text-Colour-Changes -titanito/stablediffusionapi-juggernaut-xl-v5 -ardances/mistralai-Mistral-7B-v0.1 -nllg/AutomaTikZ -Nehal07/text-translator-with-voice -Ashrafb/Imdf2 -KHAMMAMKURRODU/ChatbotApplication -amin2809/rvc-models -teragron/TinyStories -TheKitten/Images -Diego-0121/ImaText -mies8888/intfloat-multilingual-e5-large -kmanoj/Sentiment_Analysis -DynoKevin/img-cap-for-vision-mate -pragneshbarik/ikigai-chat -IMU20/kestrl_merchantname_nlp -ryo2/convertcsv2h5 -Mosharof/FMS -allknowingroger/Image-Models-Test201 -allknowingroger/Image-Models-Test202 -allknowingroger/Image-Models-Test203 -bhunter/jupyter-1 -LovnishVermaPRINCE/chatai -Ankush05/Newcode -ALSv/FSW -BilalSardar/Reinhard_Color_Transformation -spritlesoftware/Image-Object-Detection -BilalSardar/Remove_Text_for_Image -hf4all/chatbot-ui-bing -lingluoACE/bingbyd -aimaswx/my_streamchat -rrepiece/ostris-ikea-instructions-lora-sdxl -vasistasaimagam/FoodVision_Big -coyotte508/static-light-dark -Mohammed-Khalil/Chat_with_Youtube_Videos -str-platformAI/striim-gpt -BMukhtar/BookRecognitionKz -PeepDaSlan9/HuggingFaceH4-zephyr-7b-alpha -AchyuthGamer/jondurbin-airoboros-gpt-3.5-turbo-100k-7b -limcheekin/bge-small-en-v1.5 -glassofwine/glassofwine-DialoGPT-medium-johanwine -ombhojane/Fetch-Alerts -rrkd/cosmos -Fu-chiang/skintest -Exalt-company/text-to-video -awacke1/VoiceChatMistral -Owechada/roopfaceswapr -mittalneha/SD_Styles_Assignment -narutovk/VKreate -dincali/text-to-image -iloveapplesandoranges/stablediffusionapi-disney-pixal-cartoon -geetu040/video-gen -chendl/compositional_test -themanas021/falcon-legal -yooso/PixelFusion -adolfoutfpr/learn4elixir -Olivier-Truong/faster-whisper-webui-v2 -wrice/denoisers -VAGOsolutions/README -ketangandhi/demo-space -vr18/legal-rag -Tefa90/ehartford-dolphin-2.1-mistral-7b -carlostoxtli/ace -kanli/AIchatBot -Fu-chiang/Bit-50-Glaucoma -Dragneel/Recon -ardha27/rvc_TTS -PeepDaSlan9/Dup_Digital_India -mrplants/alphabot -jbilcke-hf/hotshot-xl-server-1 -ngoctuanai/chatgpt -allknowingroger/Image-Models-Test205 -allknowingroger/Image-Models-Test206 -fgpzen/remove-photo-object -hzzgenius/bing -XzJosh/Jianmo-Bert-VITS2 -XzJosh/JM-Bert-VITS2 -vagmi/isai -Jung/ep_explorer -PunGrumpy/text-generation -JoeJenkins/Norod78-SD15-IllusionDiffusionPattern-LoRA -Nymisha123/InstagramQuoteDeveloper -Guying2/guying -andy7475/english_place_name_generator -Toxfu/BIgVisionEffnetB2 -neharao/loraking -lamtung16/Llama-2-AWS -PeepDaSlan9/Llama-2-AWS -nesticot/pp_roundup -isididiidid/chatgpt-next-webiii -MA9149210776/CrucibleAI-ControlNetMediaPipeFace -metrosir/ChatGPT4 -pyimagesearch/summary-to-title -allknowingroger/Image-Models-Test207 -allknowingroger/Image-Models-Test208 -hf-audio/vocos-bark -allknowingroger/Image-Models-Test209 -KAHRAMAN42/Animal_species_detection -LEKAI007/QQ -Erala/QQsign -DEVILOVER/image_captioning -AchyuthGamer/MagicPrompt-Stable-Diffusion -Malolactica/amigosdejuegos -mayordp/DeepFakeAI -picopi/openai-reverse-proxy -Wauplin/huggingface_hub -lighdow/anime-cute-tts -fittar/ViPE -Bonp/B -ercaronte/speech-to-speech-translation -Hoolbo/bing -joaopereirajp/livvieChatBot -vih-v/Stable-Diffusion-prompt-generator -mrSoul7766/Instagram_post_caption_generator -wangzhang/ChatSDB -Cran-May/SEA-Streamlit -gelnicker/ostris-ikea-instructions-lora-sdxl -InvisableClearCoat101/mistralai-Mistral-7B-v0.1 -prthgo/PDF-Chatbot -roshithindia/song-generation -roshithindia/text_calssification_model -OrangeBusiness/OrangeBranding -Thorsten-Voice/Hessisch -SakshiRathi77/SakshiRathi77-wav2vec2_xlsr_300m -igolas0/fastai_sportsman -Acapellas/vocalinstrumentalremover -HughAA/IPQA -Orami01/Cha_with_CSV_using_Llama2 -nightelf/codesandbox -dreamdrop/bot -praveen-reddy/PDP -ennov8ion/art-models -derek-thomas/RAGDemo -rajababu15/Health_Tracker -Starkate/zo -eaglev/whales -rasmodev/sepsis_prediction -rajababu15/ht_bk -JustSkyDev/DSEG -Tanjiro2002/Government_order -AchyuthGamer/OpenGPT-Chat -JayKen/Object-detection -Potanin/12345 -ennov8ion/art-multi -botlik100/kaki -lzr090708/Real-CUGA -awacke1/HealthyBrainAging -rajababu15/ht_bk_gr -Dimalker/Faceswapper -MakiAi/SquareMotion -better-ai/lisa -pn23/HackGT2023 -sznicko/tick -aaronayitey/Streamlit-app -Mark3347/AlpinaB12 -Kurugodu/mygenaibha -XX-4419/xx-chatui -lewisliuX123/wechatllama2 -typesdigital/llm-agents-tora-70b-v1.0 -Severian/ANIMA-7B-Biomimicry-LLM -ikechan8370/meme-generator -genaibook/audio_visualizations -xiaoyeAI/clewd -nesticot/player_cards -skoneru/contextual_refinement_ende -hadasak/SciTrends -gstaff/gif-reverser -Yaroslav1234/PublicComment.AI -Frantz103/CaptionQuest -datboichidori/Ryzan-fantasy-diffusion-v1 -datboichidori/yehiaserag-anime-pencil-diffusion -mrciolino/InvertibleSteganography -roger33303/GenerativeAI-Chatbot.AI-Therapist -SakshiRathi77/SakshiRathi77-Wav2Vec2-hi-kagglex -exit9/neuro_evolution -nathanTQ/ChatDev -LaynzKunz/Model-RCV -sachin1729/Image_GeneratorByText_Sachin -kmrmanish/LPI_Course_Recommendation_System -sachin1729/Imgae2text_BySachin -AnishKumbhar/ChatBot -vishal0501/ICD-DEMO -VlaTal/facial_beauty_analysis -Liberian/jtr8ukj8sk -Liberian/ghfvtybrfbuyt -wolf-sigma/Starburst_Galaxy__PyStarburst_Demo -QuophyDzifa/Sepsis-prediction-App -Shreeraj/SEO_APP -luisotorres/plant-disease-detection -firdavsyorkulov/delivery_project_fastapi -MultiTransformer/vision-agent-with-llava -vih-v/x_mod -crimbo66/openai-whisper-large -dreamdrop/kandinsky-2-1 -asteph/harrywang-pokemon-lora -Vishakaraj/Point_Cloud_Segmentation-Trimble_Cloud -TechnoByte/wd-v1-4-tags -TechnoByte/soft-improved -ShrapTy/text_generation -KdaiP/yolov8-deepsort-tracking -Arulkumar03/GroundingDINO_SOTA_Zero_Shot_Model -firsk/ai_otto -darthPanda/facial_recognition -Kvikontent/QrGen -Nigomaster/Analizador_CVs -alonsosilva/NextTokenPrediction -Beasto/Photo2Monet_Cyclegan -Cippppy/RegressionVisualization -metalslimeee/zigspace -serdaryildiz/TRCaptionNet -generativeai/bestpics-ms-crop-image -generativeai/bestpics-ms-image-similarity -Anthony-Ml/covid_predictor -ShrapTy/GPT4ALL -rbanfield/libfacedetection -caixyz/ok -gstaff/system-monitor -XzJosh/Bekki-Bert-VITS2 -XzJosh/TianDou-Bert-VITS2 -iamironman4279/SadTalker -QCRI/mt-bench-ar -Priyanka-Kumavat/Anomaly-Detection-On-Sound-Data -shinexyt/StaticDemo -awacke1/VotingCrowdsourceEvaluationApps2 -PepijnvB/KappaNeuro-salomon-van-ruysdael-style -Endream/test -ERICTORRALBA/CAD -Alycer/VITS-Umamusume-voice-synthesizer -Vardaan08/TeamPredictor2 -jpwahle/field-diversity -Cran-May/BetaSEA-Streamlit -pdehaye/EleutherAI-llemma_34b -hamzaislamorg/README -deepusus/tts -acmyu/frame_interpolation_prototype -tether1/usdt -deepusus/chat -amankishore/adept-fuyu-8b -vroy02243/ML -44ov41za8i/FreeVC -moin1234/XAGPT1 -mixcard/Gustavosta-MagicPrompt-Dalle -cha0smagick/RPG_Character_generator -pharma-IA/PharmaWise_Experto_GMP_V2C_ToT -abidlabs/gradio-lite-speech -tbdaox/roopUn -temion/KoGPT_API -LaynzKunz/RCV-AI-COVER -simonraj/DesignThinkingCoach -Pranjal12345/Text_to_Speech -echons/musicgen-small -ArtyomKhyan/Detection -Isuru623/CardioScanPro -Priyanka-Kumavat/Document-Summarization -netrosec/diabetes-cox-ph-hazard -dfhhr4/QQsign -msy127/app_rag_llama2_paper -Vedarutvija/Veda_Audio_To_Text -Vedarutvija/ZebraGPT -Iseratho/frame-finder -padmanabhbosamia/Stable_Diffusion -riccorl/relik-entity-linking -XzJosh/Echo-Bert-VITS2 -XzJosh/Spade-Bert-VITS2 -aseuteurideu/audio_deepfake_detector -islammohy/Chat-with-Llama-2-7b-st-voice -fero/stable-diffusion-webui-cpu -innat/UniFormerV2 -Reza2kn/teknium-OpenHermes-2-Mistral-7B -30SecondsToMoon/30SecondsToMoon -freddyaboulton/gradio-lite-sklearn -samayg/StriimTheme -Benjov/Demo-IR -diego2554/RemBG_super -sznicko/vpsfree -Making/goofyai-Leonardo_Ai_Style_Illustration -AnishKumbhar/DogDiseasePredictor -AlekseyKorshuk/gai-project -PeepDaSlan9/Nan-Do-LeetCodeWizard_13B_v1.0 -prithush/Disaster_Tweet_Prediction -Beasto/Day_to_Night_Cyclegan -deepusus/tts-eng -Abhaykoul/Palm-2 -quanhua/KappaNeuro-movie-poster -chatpdfdemo/chatpdfdemo -chatpdfdemo/demo -idlsono/Idksono4 -awacke1/CalorieCalculatorForMorningSwimandPullUps -solara-dev/template -keyikai/bing -Sifal/En2Kab -ladapetrushenko/construction_prediction -Beasto/Face_To_Anime_Cyclegan -tsi-org/Faceswapper -PeepDaSlan9/stabilityai-stablecode-instruct-alpha-3b -yomo93/Tendon-search -Sonnt/Fracture_Webapp -hysts-duplicates/comparing-captioning-models -Rashid2026/Course-Recommender -Prasanna18/Nagpur-FoodGPT -LaynzKunz/REMAKE-AI-COVER -HyAgOsK/ECG_avalible -AlbertoFH98/CastenaApp -fh2412/handwritten_numbers -Abhaykoul/HelpingAI-t2 -yifangtongxing/qsign -AAYUSH27/Neuro -Jamin252/Dog_Identifier -TNK21/Story_Generator -silvanoalbuquerque/YOLO-V8_ANIMALS_CLASSIFICATION -gojiteji/mistral-7b-fast-chat-with-Japanese-MT -Theopan/VoiceFixer -Abhaykoul/Prompt_generator_for_helpingAI-tti -Prasanna18/SujokTherapy -CamCam17/Alexwww-davide-comic-book-characters -Lehele/bingai -mkoot007/Text2Story -allberto/Porn_Merge_V1.3 -hidevs-community/Youtube2Linkedin -tensor-diffusion/contribute-together-datasets -Michale1017/WS -TheKitten/Chat-with-Llama-2-70b-st-voice -geofactoryplastix/my-rvc-voicemodels -mkoot007/Conversation -Mycroft756/artificialguybr-StickersRedmond -svjack/chatglm2-6b-ggml -roughhai/myGenAIChatBot -hjianganthony/fetch_ner -sunxyz/testxy -wolfpackhnu/web_hosting -Fcjs/stablediffusionapi-lob-realvisxl-v20 -MoyerLiu/ChatGPT-Next-Web -muteekhan06/English-to-French -INDONESIA-AI/Anapnoe -dodos3/cosmos -huang4414/Real-CUGAN -huang4414/GTest -huang4414/anime-aesthetic-predict -vonewman/ner_app -bodrum/bodrumfenisleri -GIZ/vulnerability_analysis -Jose-Alonso26/API-Online -Abhaykoul/BardCookies-AI_Query -AutomationVR/ImageDemo -Abhaykoul/HelpingAI-T3 -Omnibus/game-test -fxmikau/o4gpt -svjack/stable-diffusion.cpp -leonardoboulitreau/aitmospheric -Nightwing25/AICoverGen -mfoud2023/Alhareq -masterkram/finance_news_classifier -awacke1/Top-Ten-United-States -awacke1/Map-California-AI -awacke1/California-Medical-Centers-Streamlit -awacke1/Minnesota-Medical-Centers-Streamlit -hyxue/HiFiFace-inference-demo -awacke1/Azure-Cosmos-DB -digitalxingtong/Bufeiyan-b-Bert-VITS2 -digitalxingtong/Bufeiyan-c-Bert-VITS2 -Zaixi/ICLR_FLAG -certkor/CertKOR.ai -simonraj/ELOralCoachHONGWEN -Beasto/Image_Colorizer_Pix2Pix -Fcjs/stablediffusionapi-edge-of-realism -XzJosh/maimai-Bert-VITS2 -df-h/viachat-v0.95 -JSP/test4k -mkoot007/Text2Image -garima-mahato/ShakespearesWeirdTales -hca97/Mosquito-Detection -abidlabs/structured-data-classification -peazy/Matt-or-Meth-Damon -Violetmae14/images-to-audio -abidlabs/frame-example -Happys/chatbot -JamesStratford/Identify-Pest-Predators-Demo -Fcjs/digiplay-Real3D_F16full_v3.1 -wuliya/QQsign -JunghunleePhD/catsClassification -rahul-pandey-ct/kinship-llm -teasouse/teaProxy -S1516/README -dongyaren/bhyy -XzJosh/Lumi-Bert-VITS2 -XzJosh/yoyo-Bert-VITS2 -Saketh-Reddy/testing -M-A-D/Dar-En-Translation-streamlit-Test -awacke1/mixture-of-experts-dr-llama -legacy107/flan-t5-large-ia3-cpgqa -HaohuaLv/one-shot_object_detection -schogini/toys -arsalagrey/audio-classification-vue -rng0x17/jupyterlab -Thanu83/Music -JunghunleePhD/testfordocker -sunil448832/retrieval-augment-generation -Jody36565/segmind-SSD-1B -olimpa/CVPZJACOB -LAYEK-143/TEXT-TO-IMAGE-AI -adrianpierce/recipes_app -AliUsama98/Aliusama_spellchecker -arsalagrey/speech-recognition-vue -Dragonnnext/Unicorn-proxy -Dragonnnext/Drago-Proxy -Dragonnnext/scylla-proxy -Dragonnnext/charybdis -jonybepary/teknium-CollectiveCognition-v1.1-Mistral-7B -Michale1017/Auto-keep-online -AliUsama98/Usama_TextClassifier -puqi/climsim -amin2809/rvc-models2023 -leelalife/super-fast-sdxl-stable-diffusion-xl -sakina1122/Jimmey_image_capturing -swj0419/Detect-Pretraining-Data -abidlabs/mteb-leaderboard -LaynzKunz/AI-Cover-Gen-Web-Ui -vpsrikanth/FaceSimilarity -bennydou/gitea -PeWeX47/GPT-2-Lyrics-Generator -Siyuan0730/clewordAutomaticGenerating -spineapple/FoodVision -qiemanqieman/Salesforce-blip-image-captioning-base -Designstanic/meta-llama-Llama-2-7b-chat-hf -NexusInstruments/DFIRFlowChain -Immaniel/mygenAIAvatarSpeech -padmanabhbosamia/Nano_GPT -KennyUTC/BotChat -thelou1s/MIT-ast-finetuned-audioset-10-10-0.4593 -Pluviophile/QQsign -svjack/stable-diffusion.search.hash -Niansuh/Image -sunilbhatia/hackathon1 -Lianguangluowuyan/QQsign -QuanLingZ/ChatReviewer -abusch419/PetBreedClassifier -SIH/tree-segmentation -Smols/Ilinalta -Smols/AWS -abhi3940/test -yeshpanovrustem/ner-kazakh -themanas021/pictionary -saurabhg2083/jobbias -Leyo/AI_Meme_Generator -Albertha/qwe123 -Promit/BrainSEG -LaynzKunz/RCVAICOVER -jokogadingan/joko-gadingan-image-description-project -xznwwh/aabb -cancanasoyak/CropBased-TissueMasking -capjamesg/fastvit -BiTransSciencia/www -Gianpaolog/newbie-elixir -AixiaGreyatt/QQsign -Blessin/impro-scene-generator -AchyuthGamer/text-to-speech-client -shimizukawa/python-no-senpai -hhemanth/first_project -kevinhug/clientX -josegabmuz/gradio-test -bhagyaK/mygenai -suvradip2000/space1 -Sanathkumar1603/hackathon -RohithMidigudla/Comment_Toxicity_Detection -SeyedAli/Butterfly-image-Generation -Blessin/one-liners -Abdo1Kamr/Text_Translation_And_Text_Formatter_For_Palestinian_Case -Banbri/zcvzcv -Zahnanni/FinnishLocalLingoLexicon -hsukqilee/NSFW-API -Siyuan0730/revise_IELTS_writting -LinJulya/PromptGenerator -langvision/codellama-34b-chat -aidinro/qqqqqqqqqqqqq -langvision/llama-2-70b-chat -langvision/README -hackertwo/GoAheadMazen -shhegart/f1-vs-gt3 -ayushnoori/program-synthesis -PaSathees/Vehicle_Tyre_Quality_Checker -ubermenchh/zephyr_chatbot -langvision/ChatWeb -MiklX/claude -langvision/ChatGPT -eye-yawn/visuAILearn -popo23/app -ethan-ai/VideoRetalking -linzjian666/vvvtss -miracle01/white-emotion-recognition -ongxuanhong/listing-content-with-ai -JosueElias/borrs -Cran-May/Shi-Ci-app -digitalxingtong/Xingtong-2dall-Bert-VITS2 -gkswk/cosmos -livekhh/formal_project -Clara998/DisneyPixarMovie -ArcanAlt/arcanDream -mdkhalid/mistralai-Mistral-7B-v0.1 -fauzanrisqullah/rmt-24-gc5 -shubhamjaiswar/RakshakReet-SpamDetection -deniskrr/clothing-type-classifier -yithong/audio2summary -Michale1017/xray -yigekeqing/QQsign -saawal/Heart_Disease_Model -sofanorai/gpt-web -trysem/image-matting-app -JDWebProgrammer/chatbot -NillJan/NelsonBot -americanboy/Prime_Numbers -uzairm/anyroad -thelou1s/ltu-2 -gracexu/llama-2-7b-chat-grace -Blessin/drama-director -rahul-pandey-ct/kinship-llm-poc -Blessin/movie-poster-generator -awacke1/Streamlit-Google-Maps-Minnesota -KAHRAMAN42/youtube_transcript -Blessin/yes-and-improv-game -prthgo/Tabular-Data-Analysis-and-Auto-ML -awacke1/Streamlit-Google-Maps-California -awacke1/Streamlit-Google-Maps-Washington -awacke1/Streamlit-Google-Maps-Massachusetts -awacke1/Streamlit-Google-Maps-Texas -tushar27/Streamlit-Magic-Sheet -ltg/chat-nort5 -nascetti-a/py2DIC -xumingliuJ/space-demo -SukhdevMiyatra/streamlit-smartphone-eda -kevin-dw/runwayml-stable-diffusion-v1-5 -omarbaba/streamlit-test -jbilcke-hf/image-caption-server -Ataturk-Chatbot/HuggingFaceChat -rupeshs/fastsdcpu -Tirendaz/Multilingual-NER -captain-awesome/pet-dog-care-bot -Pranjal2041/GEO-bench -cybergpt/ChatGPT -TRaw/jelly -kvviingu/stabilityai-stable-diffusion-xl-base-1.0 -Kiyo-umm/Linaqruf-pastel-anime-xl-lora -luisotorres/bart-text-summarization -allyssonmacedo/good-clients -livingbox/Image-Models-Test-31 -hayas/rinna-youri-7b-chat -dongyaren/12345 -aipicasso/playground -jessica6105/Lu-Bert-VITS2 -ViralWeb/aifi -nopassionyeah/bing -thuonghai2711/JDhfjrtjklrkhjgknhjvfgnh2 -noahzhy/KR_LPR_TF -dragao-elastico/RVC_V2 -saad-k7/Jewelli-Chatbot -freddyaboulton/gradio_foliumtest -trungtruc/segment_clothes -simonraj/ELOralCoachRiverValleyPrimarySchool -mabzak/Youtube-Comment-Sentimen-Analisis -zht1/test2 -KalbeDigitalLab/ham1000-skin-classification -simonraj/ELOralCoachCantonmentPrimary -Chomkwoy/Nilkessye -lqy09/GT -Mmmm7/M -bruno16/massa_qa -Felladrin/LaMini-Flan-T5-248M-Candle-Wasm -atimughal662/InfoFusion -wiwaaw/chatpdf -simonraj/ELOralCoachTestFeedback -meet244/Legal-Up_Lawyer_Recommendation_System -jonathanjordan21/lmd_chatbot_embedding -KennethTM/semantic_search -namdu/README -LaynzKunz/RVC-Inference-webui-grado-colab-huggingafce -Deviliaan/sd_twist -kunkun11/home -Sanchayt/VectaraBeginner -Shubhamskg/LangchainQuesAnsChatbot -NLPark/Misteln-Schariac -realgenius/NousResearch-Yarn-Mistral-7b-128k -SIGMitch/Real-Time-Chad -krunalss/firstllm -themanas021/Kosmos-2 -ryn-85/NousResearch-Yarn-Mistral-7b-128k -janeH/QQsign -Luckro3/README -bejar111/cursoia -DeliaPaladines/CursoIA -lordfoogthe2st/PDIS-nature-surfer-ai -ahuang11/tastykitchen -profayle/TerrapinTalk -Niansuh/api -Niansuh/chat -MultiAgentSystems/README -MultiAgentSystems/MapAI-ClinicsAndMedCenters -MultiAgentSystems/WhisperLlamaMultiAgentSystems -MultiAgentSystems/WhisperGPTMultiAgentSystems -rajeev12/rajeev_space -Osborn-bh/ChatGLM3-6B-Osborn -IES-Rafael-Alberti/PerfectGPT -rostislav553/PROGECT -Kaikaikai/webgl_demo -abcdef12356/slinteg -ankur2402/ISRO -sujitojha/nanoGPT -tiagopessoalim/Predicting180-DayMortalityInGeriatricOncology -AnimeStudio/anime-models -Tirendaz/NER-Demo -Abhaykoul/Merriam-webster_clone -Gosula/hand_written_digit_recognition -icashwave/rwkv-v5-1b5-cpu -Sjmin/cosmos -typesdigital/demo-app -Trangluna2002/AI_Cover_Gen -tosta86/Flowise -keanteng/job -qq2855562986/anime-remove-background -o-m-s/Med_DL -YanzBotz/stablediffusionapi-disney-pixar-cartoon -osl-ai/NousResearch-Yarn-Mistral-7b-64k -Wassim/public-custom-search -hyunda/test9week -yousuf-e/yousuf-space-1 -Abhaykoul/Wikipedia -mufssdr/jaidhus -mufssdr/kkhuy -GAURAVBRAR/AIGK -alGOriTM207/Ru_DialoModel -svjack/stable-diffusion.search.embedding -asfzf/DeepDanbooru_stringxchj -Talo88/Tumer-Detection -yyyyulia/7390_nlp_interactive_v2 -Mahit/DDoS_Attack_Classifier -sanjay11/resumescan -Rifd/Sdallmodels -pseudolab/Rice_Disease_Classifier -Fadil369/docker -JessPink/Text_rewriting-Chatbot -Abhaykoul/HelpingAI-2.0 -MultiAgentSystems/MultiSystemAgentUI -QuanLingZ/ChatResponse -xun/Qwen-Token-Calc -ioniumX/SDXL-High-quality-art -jorgeppp/LDCC-LDCC-Instruct-Llama-2-ko-13B-v1.4 -Suniilkumaar/MusicGen-updated -kodirovshchik/food_classification_api -petermutwiri/Movie_Review_Application -ztime/Yi-6B-GGUF_llama_cpp_python -kinit-tomassako/ver-spaces-demo -Hoshiyume/FixedStar-DebugChat -Aqdas/YouTube_Video_OpenAI_whisper -airely/bingai1 -themanas021/Image-alanysis -Jianfeng777/Car_Bike_Classification -yufiofficial/MusicGenQ -kevinwang676/KNN-VC -pp3232133/pp3232133-distilgpt2-wikitext2 -A-Roucher/Quotes -TandCAcceptMe/face-swap-docker -Nymbo/OpenAI_TTS_Streaming_Whisperv3 -Dodero1305/Heart-Disease-Chatbot -tsinghua-ee/SALMONN-7B-gradio -hanxuan/XQSign -luckpunk/LLMRiddles -pantherhead/pantherhead -Abhaykoul/Wizard-AI -tbvl/Fake_Face_Detection -bbz662bbz/chatgpt_cost_calc -JDWebProgrammer/space-weather -ArpitM/chat-llm-streaming -LuxOAI/zenFace-Recognition-SDK -Aditya757864/SentimentAnalysis -ProgramX/hi -merve/kosmos-2 -VinayHajare/MistralTalk -BimboAnon/BimboProxy -lunarflu/HF-QA-Demo-3 -Karifannaa/audio_story -aaronayitey/Covid_19-Vaccine-Sentiment_Analysis -Zitang/Self-attention-based-V1MT-motion-model -taham655/transcriptionApp -MathFabian/p2_m5_hugging -rashmi/h2oai-predict-llm -pantherhead/test -phyloforfun/GreenSight -AshutoshPattanayak/LangchainDemo -latent-consistency/Real-Time-LCM-Text-to-Image-Lora-SD1.5 -ngoctuanai/DALL-E -OpenDILabCommunity/LLMRiddlesChatGLMCN -flowers-team/SocialAISchool -nsoma/ml-break -Tobias111/uptime -librarian-bots/Model-Cards-Nomic-Atlas-Map -librarian-bots/Dataset-Cards-Nomic-Atlas-Map -dawood/gradio_videogallery -GRATITUD3/NESGPT-AutoAnnotatorv0 -AhmedMagdy7/My_paper_space -lIlIlllllmeng/zhaoyang -alvin888/GeoGenie -TiKaira-6995/NepAI -Niansuh/DALL-E -VinayHajare/Fruit-Recognition -syq163/EmotiVoice -thelou1s/sleep_data -flf/8983 -sh20raj/telebot \ No newline at end of file diff --git a/spaces/17TheWord/RealESRGAN/realesrgan/models/realesrgan_model.py b/spaces/17TheWord/RealESRGAN/realesrgan/models/realesrgan_model.py deleted file mode 100644 index c298a09c42433177f90001a0a31d029576072ccd..0000000000000000000000000000000000000000 --- a/spaces/17TheWord/RealESRGAN/realesrgan/models/realesrgan_model.py +++ /dev/null @@ -1,258 +0,0 @@ -import numpy as np -import random -import torch -from basicsr.data.degradations import random_add_gaussian_noise_pt, random_add_poisson_noise_pt -from basicsr.data.transforms import paired_random_crop -from basicsr.models.srgan_model import SRGANModel -from basicsr.utils import DiffJPEG, USMSharp -from basicsr.utils.img_process_util import filter2D -from basicsr.utils.registry import MODEL_REGISTRY -from collections import OrderedDict -from torch.nn import functional as F - - -@MODEL_REGISTRY.register() -class RealESRGANModel(SRGANModel): - """RealESRGAN Model for Real-ESRGAN: Training Real-World Blind Super-Resolution with Pure Synthetic Data. - - It mainly performs: - 1. randomly synthesize LQ images in GPU tensors - 2. optimize the networks with GAN training. - """ - - def __init__(self, opt): - super(RealESRGANModel, self).__init__(opt) - self.jpeger = DiffJPEG(differentiable=False).cuda() # simulate JPEG compression artifacts - self.usm_sharpener = USMSharp().cuda() # do usm sharpening - self.queue_size = opt.get('queue_size', 180) - - @torch.no_grad() - def _dequeue_and_enqueue(self): - """It is the training pair pool for increasing the diversity in a batch. - - Batch processing limits the diversity of synthetic degradations in a batch. For example, samples in a - batch could not have different resize scaling factors. Therefore, we employ this training pair pool - to increase the degradation diversity in a batch. - """ - # initialize - b, c, h, w = self.lq.size() - if not hasattr(self, 'queue_lr'): - assert self.queue_size % b == 0, f'queue size {self.queue_size} should be divisible by batch size {b}' - self.queue_lr = torch.zeros(self.queue_size, c, h, w).cuda() - _, c, h, w = self.gt.size() - self.queue_gt = torch.zeros(self.queue_size, c, h, w).cuda() - self.queue_ptr = 0 - if self.queue_ptr == self.queue_size: # the pool is full - # do dequeue and enqueue - # shuffle - idx = torch.randperm(self.queue_size) - self.queue_lr = self.queue_lr[idx] - self.queue_gt = self.queue_gt[idx] - # get first b samples - lq_dequeue = self.queue_lr[0:b, :, :, :].clone() - gt_dequeue = self.queue_gt[0:b, :, :, :].clone() - # update the queue - self.queue_lr[0:b, :, :, :] = self.lq.clone() - self.queue_gt[0:b, :, :, :] = self.gt.clone() - - self.lq = lq_dequeue - self.gt = gt_dequeue - else: - # only do enqueue - self.queue_lr[self.queue_ptr:self.queue_ptr + b, :, :, :] = self.lq.clone() - self.queue_gt[self.queue_ptr:self.queue_ptr + b, :, :, :] = self.gt.clone() - self.queue_ptr = self.queue_ptr + b - - @torch.no_grad() - def feed_data(self, data): - """Accept data from dataloader, and then add two-order degradations to obtain LQ images. - """ - if self.is_train and self.opt.get('high_order_degradation', True): - # training data synthesis - self.gt = data['gt'].to(self.device) - self.gt_usm = self.usm_sharpener(self.gt) - - self.kernel1 = data['kernel1'].to(self.device) - self.kernel2 = data['kernel2'].to(self.device) - self.sinc_kernel = data['sinc_kernel'].to(self.device) - - ori_h, ori_w = self.gt.size()[2:4] - - # ----------------------- The first degradation process ----------------------- # - # blur - out = filter2D(self.gt_usm, self.kernel1) - # random resize - updown_type = random.choices(['up', 'down', 'keep'], self.opt['resize_prob'])[0] - if updown_type == 'up': - scale = np.random.uniform(1, self.opt['resize_range'][1]) - elif updown_type == 'down': - scale = np.random.uniform(self.opt['resize_range'][0], 1) - else: - scale = 1 - mode = random.choice(['area', 'bilinear', 'bicubic']) - out = F.interpolate(out, scale_factor=scale, mode=mode) - # add noise - gray_noise_prob = self.opt['gray_noise_prob'] - if np.random.uniform() < self.opt['gaussian_noise_prob']: - out = random_add_gaussian_noise_pt( - out, sigma_range=self.opt['noise_range'], clip=True, rounds=False, gray_prob=gray_noise_prob) - else: - out = random_add_poisson_noise_pt( - out, - scale_range=self.opt['poisson_scale_range'], - gray_prob=gray_noise_prob, - clip=True, - rounds=False) - # JPEG compression - jpeg_p = out.new_zeros(out.size(0)).uniform_(*self.opt['jpeg_range']) - out = torch.clamp(out, 0, 1) # clamp to [0, 1], otherwise JPEGer will result in unpleasant artifacts - out = self.jpeger(out, quality=jpeg_p) - - # ----------------------- The second degradation process ----------------------- # - # blur - if np.random.uniform() < self.opt['second_blur_prob']: - out = filter2D(out, self.kernel2) - # random resize - updown_type = random.choices(['up', 'down', 'keep'], self.opt['resize_prob2'])[0] - if updown_type == 'up': - scale = np.random.uniform(1, self.opt['resize_range2'][1]) - elif updown_type == 'down': - scale = np.random.uniform(self.opt['resize_range2'][0], 1) - else: - scale = 1 - mode = random.choice(['area', 'bilinear', 'bicubic']) - out = F.interpolate( - out, size=(int(ori_h / self.opt['scale'] * scale), int(ori_w / self.opt['scale'] * scale)), mode=mode) - # add noise - gray_noise_prob = self.opt['gray_noise_prob2'] - if np.random.uniform() < self.opt['gaussian_noise_prob2']: - out = random_add_gaussian_noise_pt( - out, sigma_range=self.opt['noise_range2'], clip=True, rounds=False, gray_prob=gray_noise_prob) - else: - out = random_add_poisson_noise_pt( - out, - scale_range=self.opt['poisson_scale_range2'], - gray_prob=gray_noise_prob, - clip=True, - rounds=False) - - # JPEG compression + the final sinc filter - # We also need to resize images to desired sizes. We group [resize back + sinc filter] together - # as one operation. - # We consider two orders: - # 1. [resize back + sinc filter] + JPEG compression - # 2. JPEG compression + [resize back + sinc filter] - # Empirically, we find other combinations (sinc + JPEG + Resize) will introduce twisted lines. - if np.random.uniform() < 0.5: - # resize back + the final sinc filter - mode = random.choice(['area', 'bilinear', 'bicubic']) - out = F.interpolate(out, size=(ori_h // self.opt['scale'], ori_w // self.opt['scale']), mode=mode) - out = filter2D(out, self.sinc_kernel) - # JPEG compression - jpeg_p = out.new_zeros(out.size(0)).uniform_(*self.opt['jpeg_range2']) - out = torch.clamp(out, 0, 1) - out = self.jpeger(out, quality=jpeg_p) - else: - # JPEG compression - jpeg_p = out.new_zeros(out.size(0)).uniform_(*self.opt['jpeg_range2']) - out = torch.clamp(out, 0, 1) - out = self.jpeger(out, quality=jpeg_p) - # resize back + the final sinc filter - mode = random.choice(['area', 'bilinear', 'bicubic']) - out = F.interpolate(out, size=(ori_h // self.opt['scale'], ori_w // self.opt['scale']), mode=mode) - out = filter2D(out, self.sinc_kernel) - - # clamp and round - self.lq = torch.clamp((out * 255.0).round(), 0, 255) / 255. - - # random crop - gt_size = self.opt['gt_size'] - (self.gt, self.gt_usm), self.lq = paired_random_crop([self.gt, self.gt_usm], self.lq, gt_size, - self.opt['scale']) - - # training pair pool - self._dequeue_and_enqueue() - # sharpen self.gt again, as we have changed the self.gt with self._dequeue_and_enqueue - self.gt_usm = self.usm_sharpener(self.gt) - self.lq = self.lq.contiguous() # for the warning: grad and param do not obey the gradient layout contract - else: - # for paired training or validation - self.lq = data['lq'].to(self.device) - if 'gt' in data: - self.gt = data['gt'].to(self.device) - self.gt_usm = self.usm_sharpener(self.gt) - - def nondist_validation(self, dataloader, current_iter, tb_logger, save_img): - # do not use the synthetic process during validation - self.is_train = False - super(RealESRGANModel, self).nondist_validation(dataloader, current_iter, tb_logger, save_img) - self.is_train = True - - def optimize_parameters(self, current_iter): - # usm sharpening - l1_gt = self.gt_usm - percep_gt = self.gt_usm - gan_gt = self.gt_usm - if self.opt['l1_gt_usm'] is False: - l1_gt = self.gt - if self.opt['percep_gt_usm'] is False: - percep_gt = self.gt - if self.opt['gan_gt_usm'] is False: - gan_gt = self.gt - - # optimize net_g - for p in self.net_d.parameters(): - p.requires_grad = False - - self.optimizer_g.zero_grad() - self.output = self.net_g(self.lq) - - l_g_total = 0 - loss_dict = OrderedDict() - if (current_iter % self.net_d_iters == 0 and current_iter > self.net_d_init_iters): - # pixel loss - if self.cri_pix: - l_g_pix = self.cri_pix(self.output, l1_gt) - l_g_total += l_g_pix - loss_dict['l_g_pix'] = l_g_pix - # perceptual loss - if self.cri_perceptual: - l_g_percep, l_g_style = self.cri_perceptual(self.output, percep_gt) - if l_g_percep is not None: - l_g_total += l_g_percep - loss_dict['l_g_percep'] = l_g_percep - if l_g_style is not None: - l_g_total += l_g_style - loss_dict['l_g_style'] = l_g_style - # gan loss - fake_g_pred = self.net_d(self.output) - l_g_gan = self.cri_gan(fake_g_pred, True, is_disc=False) - l_g_total += l_g_gan - loss_dict['l_g_gan'] = l_g_gan - - l_g_total.backward() - self.optimizer_g.step() - - # optimize net_d - for p in self.net_d.parameters(): - p.requires_grad = True - - self.optimizer_d.zero_grad() - # real - real_d_pred = self.net_d(gan_gt) - l_d_real = self.cri_gan(real_d_pred, True, is_disc=True) - loss_dict['l_d_real'] = l_d_real - loss_dict['out_d_real'] = torch.mean(real_d_pred.detach()) - l_d_real.backward() - # fake - fake_d_pred = self.net_d(self.output.detach().clone()) # clone for pt1.9 - l_d_fake = self.cri_gan(fake_d_pred, False, is_disc=True) - loss_dict['l_d_fake'] = l_d_fake - loss_dict['out_d_fake'] = torch.mean(fake_d_pred.detach()) - l_d_fake.backward() - self.optimizer_d.step() - - if self.ema_decay > 0: - self.model_ema(decay=self.ema_decay) - - self.log_dict = self.reduce_loss_dict(loss_dict) diff --git a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Download LINK Free FieldIT (CRM) Current Version.md b/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Download LINK Free FieldIT (CRM) Current Version.md deleted file mode 100644 index e0cfdd17fcb2907e360f88ea4c7b48c100992957..0000000000000000000000000000000000000000 --- a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Download LINK Free FieldIT (CRM) Current Version.md +++ /dev/null @@ -1,26 +0,0 @@ - -

How to Download Free FieldIT (CRM) Current Version

-

FieldIT (CRM) is a customer relationship management software that helps you manage your contacts, tasks, appointments, documents, and more. It is designed for small and medium businesses that need a simple and affordable solution to organize their data and improve their productivity.

-

If you want to download free FieldIT (CRM) current version, you can follow these steps:

-

Download free FieldIT (CRM) current version


Download Ziphttps://byltly.com/2uKz0T



-
    -
  1. Go to this link [^3^] and click on the green Download Now button.
  2. -
  3. Save the file FieldITCRM.exe to your computer and run it.
  4. -
  5. Follow the installation wizard to complete the setup.
  6. -
  7. Launch FieldIT (CRM) and enter your name and email address to register for a free license.
  8. -
  9. Enjoy using FieldIT (CRM) for your business needs.
  10. -
-

Note that the current version of FieldIT (CRM) is 3.8.20, which was released on December 28, 2012 [^3^]. It is compatible with Windows 2003, XP, Vista, 7, and 8 [^3^]. If you need more advanced features or support, you can upgrade to a paid version of FieldIT (CRM).

-

If you are looking for other CRM software options, you can also check out Microsoft Dynamics 365 [^1^] or SAP CRM [^2^], which are more comprehensive and scalable solutions for larger enterprises. They offer various modules and enhancements for different business functions and industries. However, they also require more investment and technical expertise to implement and maintain.

-

Whatever CRM software you choose, make sure it meets your business goals and requirements. A good CRM software can help you improve your customer satisfaction, loyalty, retention, and revenue.

Here are some additional tips on how to use CRM software effectively:

- -

By following these tips, you can make the most out of your CRM software and boost your business growth.

-

7b8c122e87
-
-
\ No newline at end of file diff --git a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Excel 2016 Test Questions And Answers Pdf.md b/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Excel 2016 Test Questions And Answers Pdf.md deleted file mode 100644 index c83376c1427c6c5d923b668082d07ca6cc2d101b..0000000000000000000000000000000000000000 --- a/spaces/1acneusushi/gradio-2dmoleculeeditor/data/Excel 2016 Test Questions And Answers Pdf.md +++ /dev/null @@ -1,16 +0,0 @@ - -

How to Prepare for Excel 2016 Test Questions and Answers PDF

-

If you are planning to take an Excel 2016 test, you might be looking for some resources to help you prepare. One of the best ways to study for an Excel 2016 test is to use a PDF file that contains questions and answers. A PDF file is a document that can be viewed on any device and printed easily. A PDF file that contains Excel 2016 test questions and answers can help you practice your skills, review your knowledge, and identify your strengths and weaknesses.

-

However, not all PDF files that contain Excel 2016 test questions and answers are created equal. Some PDF files may have outdated, inaccurate, or irrelevant information. Some PDF files may have poor formatting, spelling, or grammar. Some PDF files may have too few or too many questions, or questions that are too easy or too hard. Therefore, you need to be careful when choosing a PDF file that contains Excel 2016 test questions and answers.

-

excel 2016 test questions and answers pdf


Downloadhttps://byltly.com/2uKA0w



-

Here are some tips to help you find and use a good PDF file that contains Excel 2016 test questions and answers:

- -

By following these tips, you can find and use a good PDF file that contains Excel 2016 test questions and answers. A good PDF file can help you prepare for your Excel 2016 test effectively and efficiently.

ddb901b051
-
-
\ No newline at end of file diff --git a/spaces/1gistliPinn/ChatGPT4/Examples/Addictive Drums Authorization Code 111 14.md b/spaces/1gistliPinn/ChatGPT4/Examples/Addictive Drums Authorization Code 111 14.md deleted file mode 100644 index 3347d0e2ce8b0bc09549b607c1be03a6f57da5af..0000000000000000000000000000000000000000 --- a/spaces/1gistliPinn/ChatGPT4/Examples/Addictive Drums Authorization Code 111 14.md +++ /dev/null @@ -1,109 +0,0 @@ - -

Addictive Drums Authorization Code 111 14: How to Unlock the Full Potential of Your Drum Software

- -

If you are a music producer, composer, or drummer, you probably know how amazing Addictive Drums 2 is. This software allows you to create realistic and expressive drum tracks with ease, using a variety of kits, presets, and effects. You can use it as a standalone application or as a plug-in in your favorite music production software.

- -

But what if you want to access more features and options in Addictive Drums 2? What if you want to customize your drum sounds, mix and match different kits, and tweak every parameter to your liking? Well, you need an authorization code to unlock the full potential of your drum software.

-

Addictive Drums Authorization Code 111 14


DOWNLOADhttps://imgfil.com/2uxYJU



- -

What is Addictive Drums Authorization Code 111 14?

- -

Addictive Drums Authorization Code 111 14 is a special code that you can use to activate Addictive Drums 2 on your computer. This code is provided by XLN Audio, the company that develops and distributes Addictive Drums 2. You can get this code by purchasing Addictive Drums 2 from their official website or from an authorized dealer.

- -

Once you have this code, you can enter it in the Addictive Drums 2 activation window and enjoy all the benefits of the full version of the software. You can use any kit, preset, or effect that you want, and you can also download and install additional content from the XLN Audio website. You can also use Addictive Drums 2 on up to three computers with the same code.

- -

How to Activate Addictive Drums 2 with Authorization Code 111 14?

- -

Activating Addictive Drums 2 with Authorization Code 111 14 is very easy and straightforward. Here are the steps you need to follow:

- -
    -
  1. Download and install Addictive Drums 2 from the XLN Audio website or from the installation disc that came with your purchase.
  2. -
  3. Launch Addictive Drums 2 as a standalone application or as a plug-in in your music production software.
  4. -
  5. Click on the "Activate Product" button in the lower right corner of the Addictive Drums 2 window.
  6. -
  7. Enter your email address and password that you used to register your product on the XLN Audio website. If you don't have an account yet, you can create one for free.
  8. -
  9. Enter your Addictive Drums Authorization Code 111 14 in the field provided and click on "Activate".
  10. -
  11. Wait for the activation process to complete and restart Addictive Drums 2.
  12. -
- -

Congratulations! You have successfully activated Addictive Drums 2 with Authorization Code 111 14. You can now enjoy all the features and options that this software has to offer.

- -

Why Choose Addictive Drums Authorization Code 111 14?

- -

Addictive Drums Authorization Code 111 14 is the best way to unlock the full potential of your drum software. Here are some of the reasons why you should choose this code:

- - - -

Addictive Drums Authorization Code 111 14 is the ultimate solution for anyone who wants to create professional and realistic drum tracks with ease. Don't miss this opportunity to get this code and experience the power of Addictive Drums 2.

-

- -

Get Your Addictive Drums Authorization Code 111 14 Today!

- -

If you are ready to take your drum production to the next level, don't hesitate to get your Addictive Drums Authorization Code 111 14 today. You can get this code by visiting the XLN Audio website or by contacting an authorized dealer near you.

- -

Addictive Drums Authorization Code 111 14 is the key to unlocking the full potential of your drum software. Get it now and start creating amazing drum tracks with Addictive Drums 2!

-

What are the Benefits of Addictive Drums 2?

- -

Addictive Drums 2 is not just another drum software. It is a complete drum production solution that offers many benefits for music producers, composers, and drummers. Here are some of the benefits of Addictive Drums 2:

- - - -

Addictive Drums 2 is a powerful and flexible drum software that will help you create professional and realistic drum tracks with ease.

- -

How to Get the Best Out of Addictive Drums Authorization Code 111 14?

- -

Addictive Drums Authorization Code 111 14 is a great way to activate Addictive Drums 2 on your computer. However, there are some tips and tricks that you can use to get the best out of this code and your drum software. Here are some of them:

- - - -

Addictive Drums Authorization Code 111 14 is a valuable tool that will help you unlock the full potential of your drum software. Use it wisely and enjoy creating amazing drum tracks with Addictive Drums 2!

-

Where to Buy Addictive Drums Authorization Code 111 14?

- -

If you are interested in buying Addictive Drums Authorization Code 111 14, you have two options. You can either buy it directly from the XLN Audio website or from an authorized dealer near you.

- -

Buying from the XLN Audio website is the easiest and fastest way to get your code. You can choose from different payment methods and currencies, and you can also get instant access to your code and your product downloads. You can also benefit from the XLN Audio loyalty program and get discounts and rewards for your purchases.

- -

Buying from an authorized dealer is another option that you can consider. You can find a list of authorized dealers on the XLN Audio website or by contacting their customer support. Buying from an authorized dealer can give you some advantages, such as local support, warranty, and physical installation discs.

- -

Whichever option you choose, make sure you buy Addictive Drums Authorization Code 111 14 from a legitimate source. Avoid buying from unauthorized sellers or websites that offer suspiciously low prices or free codes. These codes may be fake, stolen, or expired, and they may not work or may cause problems with your software.

- -

How to Troubleshoot Addictive Drums Authorization Code 111 14?

- -

Addictive Drums Authorization Code 111 14 is a reliable and secure code that should work without any issues. However, if you encounter any problems with your code or your software activation, here are some tips and solutions that you can try:

- - - -

Addictive Drums Authorization Code 111 14 is a simple and effective way to activate Addictive Drums 2 on your computer. If you follow these tips and solutions, you should be able to enjoy your drum software without any problems.

-

Conclusion

- -

Addictive Drums 2 is a powerful and versatile drum production software that can help you create realistic and expressive drum tracks with ease. Whether you use it as a standalone application or as a plug-in in your music production software, Addictive Drums 2 will give you access to a huge library of kits, presets, and effects that cover a wide range of genres and styles.

- -

To unlock the full potential of your drum software, you need Addictive Drums Authorization Code 111 14. This code will allow you to activate Addictive Drums 2 on your computer and enjoy all the features and options that this software has to offer. You can also use this code on up to three computers with the same code.

- -

Addictive Drums Authorization Code 111 14 is easy to use and activate. You can get this code by buying Addictive Drums 2 from the XLN Audio website or from an authorized dealer near you. You can also register your product on the XLN Audio website to get access to additional content and updates.

- -

If you have any problems with your code or your software activation, you can follow the tips and solutions that we have provided in this article. You can also contact XLN Audio customer support for more help and assistance.

- -

Addictive Drums Authorization Code 111 14 is the key to unlocking the full potential of your drum software. Get it now and start creating amazing drum tracks with Addictive Drums 2!

3cee63e6c2
-
-
\ No newline at end of file diff --git a/spaces/1gistliPinn/ChatGPT4/Examples/DFX Audio Enhancer 13.008 - Repack KpoJIuK .rar.md b/spaces/1gistliPinn/ChatGPT4/Examples/DFX Audio Enhancer 13.008 - Repack KpoJIuK .rar.md deleted file mode 100644 index 854b6e3b3416438db1b32c6b2a485a00868aff5d..0000000000000000000000000000000000000000 --- a/spaces/1gistliPinn/ChatGPT4/Examples/DFX Audio Enhancer 13.008 - Repack KpoJIuK .rar.md +++ /dev/null @@ -1,6 +0,0 @@ -

DFX Audio Enhancer 13.008 - Repack KpoJIuK .rar


Download Zip --->>> https://imgfil.com/2uy1N6



- -SCPH- .... PlayStation 2 SCPH 39001.rar 8.11 Mb . SCPH 30000-50000.pdf 2.17 Mb . 31584 . ... DFX Audio Enhancer 13.008 - Repack KpoJIuK .rar · Crack For ... 4d29de3e1b
-
-
-

diff --git a/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download Bullet Echo Mod Apk and Enjoy Free Shopping and Epic Battles.md b/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download Bullet Echo Mod Apk and Enjoy Free Shopping and Epic Battles.md deleted file mode 100644 index ec17ddbd5fc3181f975d734ba7dc860e9d8188c4..0000000000000000000000000000000000000000 --- a/spaces/1pelhydcardo/ChatGPT-prompt-generator/assets/Download Bullet Echo Mod Apk and Enjoy Free Shopping and Epic Battles.md +++ /dev/null @@ -1,94 +0,0 @@ -
-

Free Download Bullet Echo Mod Apk: A Stealthy Battle Royale Game

-

If you are looking for a new and exciting action shooter game to play on your mobile device, you might want to check out Bullet Echo. This game is a PvP tactical team shooter that pits teams of players against each other in intense, competitive matches. You can choose from dozens of different heroes with unique play styles, guns, and abilities. You can also play in various game modes, such as Team vs Team, Solo, and Battle Royale.

-

free download bullet echo mod apk


Download Zip > https://urlin.us/2uSTI0



-

But what if you want to enjoy the game without spending any money or waiting for long hours to unlock new heroes and perks? Well, you can do that by downloading the Bullet Echo Mod Apk. This is a modified version of the original game that gives you unlimited money, free shopping, unlocked heroes, and more. In this article, we will tell you everything you need to know about Bullet Echo Mod Apk, including what it is, how to download and install it, and some tips and tricks to play it.

-

What is Bullet Echo?

-

Bullet Echo is a game developed by ZeptoLab, the creators of popular games like Cut the Rope, King of Thieves, and C.A.T.S. It is a top-down PvP tactical shooter that combines stealth, teamwork, and shooting skills. Here are some of the features of the game:

-

A top-down PvP tactical shooter

-

In Bullet Echo, you play as one of the heroes in a team of up to five players. Your goal is to eliminate the enemy team or be the last team standing when the battle ends. You can use your gun, your abilities, and your flashlight to fight your way through ever-changing environments. However, your vision is limited by the beam of your flashlight, so you have to rely on sound cues to locate your enemies and allies.

-

A game with multiple modes and heroes

-

Bullet Echo offers three main game modes that you can play solo or with friends online. These are:

- -

Besides these modes, you can also participate in championships, missions, and events to earn valuable resources and rewards.

-

The game also features 21 heroes at launch and more heroes coming soon. Each hero has a unique set of abilities that can be used in combat. For example, some heroes can turn invisible, some can create electrical shields, some can heal themselves or their teammates, and some can launch rockets or grenades. You can unlock new heroes by playing online matches or by using resources that you earn or buy.

-

How to get bullet echo mod apk for free
-Bullet echo mod apk unlimited money and free shopping
-Bullet echo mod apk latest version download
-Bullet echo hack mod apk download for android
-Bullet echo mod apk offline mode
-Bullet echo mod apk with unlimited ammo and health
-Bullet echo mod apk no root required
-Bullet echo mod apk gameplay and features
-Bullet echo mod apk download link and installation guide
-Bullet echo mod apk review and rating
-Bullet echo mod apk tips and tricks
-Bullet echo mod apk best weapons and characters
-Bullet echo mod apk cheats and codes
-Bullet echo mod apk vs original game comparison
-Bullet echo mod apk multiplayer mode and online battles
-Bullet echo mod apk new update and patch notes
-Bullet echo mod apk best settings and graphics
-Bullet echo mod apk problems and solutions
-Bullet echo mod apk support and feedback
-Bullet echo mod apk alternatives and similar games
-Download bullet echo mod apk from wonderapk.com[^1^]
-Bullet echo mod apk free download without survey
-Bullet echo mod apk file size and requirements
-Bullet echo mod apk fun and addictive shooting game
-Bullet echo mod apk missions and challenges
-Bullet echo mod apk rewards and achievements
-Bullet echo mod apk skins and customizations
-Bullet echo mod apk maps and locations
-Bullet echo mod apk enemies and bosses
-Bullet echo mod apk sound and music

-

A game with stealth and sound mechanics

-

One of the most unique aspects of Bullet Echo is the stealth and sound mechanics. Unlike other shooter games, you cannot see the whole map or the enemies' positions. You can only see what your flashlight illuminates, which is a narrow cone of light in front of you. This means that you have to be careful about where you point your flashlight, as it can reveal your location to the enemies or blind your teammates. You also have to use sound cues to detect the enemies and allies. You can hear the footsteps, gunshots, and abilities of other players, as well as the ambient noises of the environment. You can use these sounds to locate and track your targets, or to avoid being detected by them. You can also use your abilities to create sound distractions or to silence your enemies. These mechanics make Bullet Echo a game that requires strategy, coordination, and stealth. You have to work with your team to plan your moves, communicate your positions, and execute your attacks. You also have to adapt to the changing situations and environments, as the maps are randomly generated and have different layouts, obstacles, and items.

-

What is Bullet Echo Mod Apk?

-

Bullet Echo Mod Apk is a modified version of the original game that gives you some advantages and features that are not available in the official version. These are:

-

A modified version of the original game

-

Bullet Echo Mod Apk is not an official app from ZeptoLab. It is a third-party app that has been modified by some developers or hackers to alter some aspects of the game. This means that it is not authorized by ZeptoLab and it may not be compatible with the latest updates or features of the game. It also means that it may contain some bugs, errors, or viruses that can harm your device or compromise your security.

-

A version with unlimited money and free shopping

-

One of the main benefits of Bullet Echo Mod Apk is that it gives you unlimited money and free shopping. This means that you can buy anything you want in the game without spending any real money or waiting for long hours. You can buy new heroes, perks, skins, weapons, and more with just a few clicks. You can also upgrade your heroes and perks to the maximum level without any restrictions.

-

A version with unlocked heroes and perks

-

Another benefit of Bullet Echo Mod Apk is that it gives you unlocked heroes and perks. This means that you can access all the heroes and perks in the game without having to unlock them by playing online matches or using resources. You can choose any hero you want from the start and use their abilities in combat. You can also use any perk you want to enhance your performance and customize your play style.

-

How to Download and Install Bullet Echo Mod Apk?

-

If you want to download and install Bullet Echo Mod Apk on your device, you have to follow these steps:

-

The download link and steps

-
    -
  1. Go to this link: https://www.apkdone.com/bullet-echo/
  2. -
  3. Click on the green button that says "Download APK (100 MB)"
  4. -
  5. Wait for the download to finish and then open the file
  6. -
  7. Click on "Install" and allow unknown sources if prompted
  8. -
  9. Wait for the installation to finish and then open the app
  10. -
-

The requirements and precautions

- -

The benefits and drawbacks

- - - -
BenefitsDrawbacks
You can enjoy unlimited money and free shopping in Bullet Echo Mod ApkYou may get banned or hacked by ZeptoLab or other players for using Bullet Echo Mod Apk
You can access all the heroes and perks should use Bullet Echo Mod Apk at your own risk and discretion. -
  • Q: How can I update Bullet Echo Mod Apk?
  • -
  • A: You should not update Bullet Echo Mod Apk from Google Play Store or other sources, as it may overwrite the modded features or cause errors. You should only update Bullet Echo Mod Apk from the same link that you downloaded it from, or from a trusted source that provides the latest version of the mod. You should also backup your data before updating Bullet Echo Mod Apk, as it may delete or corrupt your existing data.
  • -
  • Q: How can I switch back to the official version of Bullet Echo?
  • -
  • A: If you want to switch back to the official version of Bullet Echo, you have to uninstall Bullet Echo Mod Apk from your device. You can do this by going to your device settings, finding the app, and tapping on "Uninstall". You should also delete any residual files or folders that are related to Bullet Echo Mod Apk. Then, you can download and install the official version of Bullet Echo from Google Play Store or other sources.
  • -
  • Q: Can I play Bullet Echo Mod Apk with other players who use the official version of the game?
  • -
  • A: Yes, you can play Bullet Echo Mod Apk with other players who use the official version of the game. However, you may face unfair or unbalanced matches against them, as they may not have the same advantages and features that you have. You may also get reported or banned by them for using Bullet Echo Mod Apk, as it is considered cheating or hacking by ZeptoLab and other players.
  • -
  • Q: Can I play Bullet Echo Mod Apk offline?
  • -
  • A: No, you cannot play Bullet Echo Mod Apk offline. You need a stable internet connection to play online matches in Bullet Echo Mod Apk. You also need an internet connection to download and install Bullet Echo Mod Apk on your device.
  • -

    197e85843d
    -
    -
    \ No newline at end of file diff --git a/spaces/1phancelerku/anime-remove-background/Burger Please Mod Apk The Ultimate Fun Game with Unlimited Money.md b/spaces/1phancelerku/anime-remove-background/Burger Please Mod Apk The Ultimate Fun Game with Unlimited Money.md deleted file mode 100644 index 18a9a264040da063dc79b5c46053944809b9e876..0000000000000000000000000000000000000000 --- a/spaces/1phancelerku/anime-remove-background/Burger Please Mod Apk The Ultimate Fun Game with Unlimited Money.md +++ /dev/null @@ -1,132 +0,0 @@ - -

    Burger Please Mod APK Unlimited Money: How to Download and Play

    -

    Do you love burgers? Do you want to run your own burger shop? Do you want to have unlimited money, diamonds, coins, and cash in your game? If you answered yes to any of these questions, then you might be interested in Burger Please Mod APK Unlimited Money, a modified version of the popular casual game Burger Please. In this article, we will tell you everything you need to know about this mod, including what it is, how to download and install it, and how to play it. Let's get started!

    -

    burger please mod apk unlimited money


    Download File ->>->>->> https://jinyurl.com/2uNLp7



    -

    What is Burger Please?

    -

    Burger Please is a casual game where you run your own burger shop and serve delicious burgers to your customers. You can customize your shop, upgrade your equipment, unlock new recipes, and create your own burger combinations. You can also compete with other players in the leaderboard, complete daily missions, and earn rewards.

    -

    A casual game where you run your own burger shop

    -

    The main mode of Burger Please is the Career Mode, where you start from a small burger stand and work your way up to a big burger empire. You will have to manage your time, resources, and customer satisfaction as you prepare and serve burgers according to their orders. You will also have to deal with different types of customers, such as kids, adults, celebrities, zombies, aliens, and more. Each customer has their own preferences, personality, and patience level, so you have to be careful not to make them angry or disappointed.

    -

    The features and gameplay of Burger Please

    -

    Burger Please has many features and gameplay elements that make it fun and addictive. Some of them are:

    -
      -
    • Customization: You can customize your shop with different themes, decorations, furniture, and accessories. You can also customize your character with different outfits, hairstyles, accessories, and expressions.
    • -
    • Upgrades: You can upgrade your equipment, such as your grill, fryer, toaster, blender, etc., to make them faster, more efficient, and more durable. You can also upgrade your ingredients, such as your meat, cheese, lettuce, tomato, etc., to make them tastier, fresher, and more nutritious.
    • -
    • Recipes: You can unlock new recipes as you progress in the game. You can also create your own recipes by combining different ingredients. You can save your recipes in your cookbook and use them anytime.
    • -
    • Challenges: You can challenge yourself with different modes and levels in the game. You can play the Time Attack Mode, where you have to serve as many burgers as possible in a limited time. You can play the Endless Mode, where you have to serve burgers until you run out of ingredients or customers. You can also play the Boss Mode, where you have to face a special customer who will test your skills.
    • -mod APK from a trusted and reliable source, such as a reputable website, forum, or blog. -
    • You should always scan the mod APK with an antivirus or anti-malware program before installing it on your device.
    • -
    • You should always read the reviews and comments of other users who have used the mod APK before downloading and installing it.
    • -
    • You should always follow the instructions and requirements of the mod APK carefully and correctly.
    • -
    - - -

    How to Download and Install Burger Please Mod APK Unlimited Money?

    -

    If you have decided to use Burger Please Mod APK Unlimited Money, you will need to download and install it on your device. The process may vary depending on the type of device you have, but here are some general steps that you can follow:

    -

    The steps to download and install Burger Please Mod APK Unlimited Money on your Android device

    -
      -
    1. Go to the link where you can download the mod APK file. You can search for it online or use the link provided by the source.
    2. -
    3. Tap on the download button and wait for the file to be downloaded on your device.
    4. -
    5. Once the file is downloaded, go to your device settings and enable the option to install apps from unknown sources. This will allow you to install the mod APK file.
    6. -
    7. Locate the mod APK file in your device storage and tap on it to start the installation process.
    8. -
    9. Follow the on-screen instructions and grant the necessary permissions to install the mod APK file.
    10. -
    11. Wait for the installation to be completed and then launch the game from your app drawer or home screen.
    12. -
    13. Enjoy playing Burger Please Mod APK Unlimited Money on your Android device.
    14. -
    -

    The steps to download and install Burger Please Mod APK Unlimited Money on your PC using an emulator

    -
      -
    1. Go to the link where you can download the mod APK file. You can search for it online or use the link provided by the source.
    2. -
    3. Download and install an Android emulator on your PC. An emulator is a software that allows you to run Android apps on your PC. Some popular emulators are BlueStacks, NoxPlayer, LDPlayer, etc.
    4. -
    5. Launch the emulator and sign in with your Google account. This will allow you to access the Google Play Store and other Google services on your PC.
    6. -
    7. Drag and drop the mod APK file into the emulator or use the built-in browser to download it from the link.
    8. -
    9. Install the mod APK file using the emulator's app installer or file manager.
    10. -
    11. Launch the game from the emulator's app drawer or home screen.
    12. -
    13. Enjoy playing Burger Please Mod APK Unlimited Money on your PC using an emulator.
    14. -
    -

    The steps to download and install Burger Please Mod APK Unlimited Money on your iOS device using a third-party app store

    -
      -
    1. Go to the link where you can download the mod IPA file. You can search for it online or use the link provided by the source. The mod IPA file is a modified version of the original game that works on iOS devices. The mod IPA file is not an official version of the game and it is not supported or endorsed by the original developers.
    2. -
    3. Download and install a third-party app store on your iOS device. A third-party app store is an alternative to the Apple App Store that allows you to download and install apps that are not available or approved by Apple. Some popular third-party app stores are TutuApp, Panda Helper, AppValley, etc.
    4. -
    5. Launch the third-party app store and search for Burger Please Mod IPA Unlimited Money or use the link provided by the source.
    6. -
    7. Tap on the download button and wait for the file to be downloaded on your device.
    8. -
    9. Once the file is downloaded, go to your device settings and trust the developer profile of the third-party app store. This will allow you to install the mod IPA file.
    10. -
    11. Locate the mod IPA file in your device storage and tap on it to start the installation process.
    12. -
    13. Follow the on-screen instructions and grant the necessary permissions to install the mod IPA file.
    14. -
    15. Wait for the installation to be completed and then launch the game from your app drawer or home screen.
    16. -
    17. Enjoy playing Burger Please Mod IPA Unlimited Money on your iOS device using a third-party app store.
    18. -
    -

    How to Play Burger Please Mod APK Unlimited Money?

    -

    Now that you have downloaded and installed Burger Please Mod APK Unlimited Money, you might be wondering how to play it. Well, the gameplay is pretty much the same as the original game, except that you have unlimited resources and access to everything. However, if you want some tips and tricks to play it effectively and enjoyably, here are some suggestions:

    -

    The tips and tricks to play Burger Please Mod APK Unlimited Money effectively and enjoyably

    -
      -
    • Experiment with different recipes: Since you have unlimited ingredients, you can try out different combinations and create your own recipes. You can also use the cookbook to save your recipes and use them anytime. You might discover some delicious and unique burgers that will impress your customers and yourself.
    • -
    • Upgrade your equipment and ingredients: Since you have unlimited money, diamonds, coins, and cash, you can upgrade your equipment and ingredients to the max level. This will make your burgers faster, better, and more profitable. You can also buy new equipment and ingredients that will enhance your gameplay and variety.
    • -
    • Customize your shop and character: Since you have unlimited money, diamonds, coins, and cash, you can customize your shop and character with any theme, decoration, furniture, accessory, outfit, hairstyle, etc. that you want. You can also change them anytime according to your mood or preference. You can make your shop and character look unique and attractive.
    • -
    • Compete with other players: Since you have unlimited money, diamonds, coins, and cash, you can compete with other players in the leaderboard without any fear or pressure. You can also challenge yourself with different modes and levels in the game. You can show off your skills and achievements to other players and prove that you are the best burger master.
    • -
    • Have fun: The most important tip is to have fun while playing Burger Please Mod APK Unlimited Money. Don't take it too seriously or get bored by having everything easy. Enjoy the game as a casual and relaxing activity that will make you happy and hungry.
    • -
    -

    The best strategies and techniques to play Burger Please Mod APK Unlimited Money successfully and competitively

    -
      -
    • Plan ahead: Even though you have unlimited resources, you still need to plan ahead when preparing and serving burgers. You need to pay attention to the customer's order, the ingredient's availability, the equipment's condition, etc. You need to avoid wasting time or making mistakes that will affect your performance or customer satisfaction.
    • -
    • Prioritize your customers: Even though you have unlimited resources, you still need to prioritize your customers when serving burgers. You need to consider their preferences, personality, patience level, etc. You need to serve them quickly, accurately, politely, etc. You need to avoid making them angry or disappointed that will affect your reputation or income.
    • -
    • Balanc e your resources: Even though you have unlimited resources, you still need to balance your resources when buying and upgrading items. You need to consider the cost, benefit, quality, quantity, etc. of each item. You need to avoid overspending or underutilizing your resources that will affect your gameplay or variety.
    • -
    • Use your creativity: Even though you have unlimited resources, you still need to use your creativity when creating and customizing burgers. You need to experiment with different ingredients, recipes, combinations, etc. You need to make your burgers look appealing, delicious, and unique. You need to impress your customers and yourself with your creativity.
    • -
    • Have fun: The most important strategy is to have fun while playing Burger Please Mod APK Unlimited Money. Don't take it too seriously or get stressed by the competition or the challenge. Enjoy the game as a casual and relaxing activity that will make you happy and hungry.
    • -
    -

    Conclusion

    -

    In conclusion, Burger Please Mod APK Unlimited Money is a modified version of the original game that gives you unlimited money, diamonds, coins, and cash in your game. It allows you to enjoy the game without any limitations or restrictions. However, it also has some benefits and drawbacks, risks and precautions, tips and tricks, and strategies and techniques that you should know before downloading and installing it. We hope that this article has helped you learn more about this mod and how to download and play it. If you have any questions or feedback, please feel free to leave a comment below. Thank you for reading and happy burger making!

    -

    burger please mod apk latest version
    -burger please mod apk free download
    -burger please mod apk android 1
    -burger please mod apk unlimited coins
    -burger please mod apk hack
    -burger please mod apk offline
    -burger please mod apk no ads
    -burger please mod apk revdl
    -burger please mod apk rexdl
    -burger please mod apk happymod
    -burger please mod apk cheat
    -burger please mod apk 0.8.0
    -burger please mod apk terbaru
    -burger please mod apk premium
    -burger please mod apk pro
    -burger please mod apk vip
    -burger please mod apk unlocked
    -burger please mod apk full
    -burger please mod apk mega
    -burger please mod apk 2023
    -burger please hack apk download
    -burger please hack apk unlimited money
    -burger please cheat apk unlimited money
    -burger please cheat apk download
    -burger please cheat apk free
    -burger please unlimited money apk download
    -burger please unlimited money apk free
    -burger please unlimited money apk latest
    -burger please unlimited money apk offline
    -burger please unlimited money apk online
    -download game burger please mod apk
    -download game burger please mod apk unlimited money
    -download game burger please hack apk
    -download game burger please cheat apk
    -download game burger please unlimited money apk
    -game burger please mod apk free download
    -game burger please mod apk latest version
    -game burger please hack apk download
    -game burger please cheat apk download
    -game burger please unlimited money apk download

    -

    FAQs

    -

    Here are some frequently asked questions about Burger Please Mod APK Unlimited Money:

    -
      -
    1. Is Burger Please Mod APK Unlimited Money safe to use?
    2. -

      It depends on the source and the file that you download. Some mod APK files may be safe to use, while others may be fake or malicious. You should always download a mod APK file from a trusted and reliable source, such as a reputable website, forum, or blog. You should also scan the mod APK file with an antivirus or anti-malware program before installing it on your device.

      -
    3. Is Burger Please Mod APK Unlimited Money legal to use?
    4. -

      No, it is not legal to use Burger Please Mod APK Unlimited Money. It is a modified version of the original game that violates the terms and conditions of the game and the intellectual property rights of the original developers. Using a mod APK file may result in legal actions or penalties from the original developers or the authorities.

      -
    5. Can I play Burger Please Mod APK Unlimited Money online?
    6. -

      Yes, you can play Burger Please Mod APK Unlimited Money online with other players. However, you may face some issues or problems when playing online, such as lagging, crashing, banning, etc. You may also encounter some players who are using the same mod or other mods that may give them an unfair advantage or disadvantage.

      -
    7. Can I play Burger Please Mod APK Unlimited Money offline?
    8. -

      Yes, you can play Burger Please Mod APK Unlimited Money offline without an internet connection. However, you may not be able to access some features or functions of the game that require an internet connection, such as the leaderboard, the daily missions, the ads, etc.

      -
    9. Can I update Burger Please Mod APK Unlimited Money?
    10. -

      No, you cannot update Burger Please Mod APK Unlimited Money. Updating a mod APK file may cause some errors or issues in the game or remove the modifications. You may also lose your progress or account if you update a mod APK file. If you want to update the game, you will have to uninstall the mod APK file and install the original game from the Google Play Store or the Apple App Store.

      -

    197e85843d
    -
    -
    \ No newline at end of file diff --git a/spaces/1phancelerku/anime-remove-background/Download Ragnarok X APK - The 3D MMORPG Mobile Game that Brings Back the Classic Masterpiece.md b/spaces/1phancelerku/anime-remove-background/Download Ragnarok X APK - The 3D MMORPG Mobile Game that Brings Back the Classic Masterpiece.md deleted file mode 100644 index bd62db689e558ca89a031341134478834c22569e..0000000000000000000000000000000000000000 --- a/spaces/1phancelerku/anime-remove-background/Download Ragnarok X APK - The 3D MMORPG Mobile Game that Brings Back the Classic Masterpiece.md +++ /dev/null @@ -1,146 +0,0 @@ -
    -

    Download Ragnarok X APK: How to Play the Next Generation of the Classic MMORPG on Your Android Device

    -

    Introduction

    -

    If you are a fan of the classic Ragnarok Online, you might be interested in trying out its latest mobile version, Ragnarok X: Next Generation. This game is an official remake of the original masterpiece, authorized by Gravity from South Korea. It features retro style, new classes, cross-server PvP, guild wars, pet taming, marriage system, and more. In this article, we will show you how to download Ragnarok X APK and play it on your Android device. We will also share some features, tips, and tricks for playing this game.

    -

    What is Ragnarok X: Next Generation?

    -

    Ragnarok X: Next Generation is a 3D MMORPG mobile game that is based on the classic Ragnarok Online. It is developed by Nuverse and published by Gravity in various regions. The game aims to recreate the original love and nostalgia of the classic game, while adding new elements and improvements. The game has been launched in Southeast Asia, Taiwan, Hong Kong, Macau, Japan, and Korea. It has received positive reviews from players and critics alike.

    -

    download ragnarok x apk


    Download File ✑ ✑ ✑ https://jinyurl.com/2uNKI1



    -

    Why should you download Ragnarok X APK?

    -

    If you want to play Ragnarok X: Next Generation on your Android device, you might need to download its APK file. APK stands for Android Package Kit, which is a file format that contains all the necessary components for installing an app on an Android device. There are several reasons why you might want to download Ragnarok X APK:

    -
      -
    • The game is not available in your region or country.
    • -
    • The game is not compatible with your device or operating system.
    • -
    • You want to access the latest version or features of the game before they are officially released.
    • -
    • You want to avoid any potential errors or bugs that might occur during the installation process from the Google Play Store.
    • -
    -

    How to download Ragnarok X APK safely and easily?

    -

    Downloading Ragnarok X APK is not difficult, but you need to be careful about the source and the file. There are many websites that offer APK files for various apps and games, but not all of them are trustworthy or reliable. Some of them might contain malware, viruses, or other harmful content that could damage your device or compromise your privacy. Therefore, you need to follow these steps to download Ragnarok X APK safely and easily:

    -
      -
    1. Find a reputable website that offers Ragnarok X APK. You can use a search engine or a review site to find one. Some examples of trustworthy websites are , , and .
    2. -
    3. Check the details and information about the APK file. Make sure it matches the version, size, developer, and description of the game. You can also read the comments and ratings from other users to see if they have any issues or complaints.
    4. -
    5. Download the APK file to your device. You might need to enable the option to install apps from unknown sources in your device settings. This will allow you to install apps that are not from the Google Play Store.
    6. -
    7. Install the APK file by tapping on it and following the instructions on the screen. You might need to grant some permissions to the app to run properly.
    8. -
    -

    Congratulations, you have successfully downloaded and installed Ragnarok X APK on your Android device. You can now launch the game and enjoy its features.

    -

    Features of Ragnarok X: Next Generation

    -

    Ragnarok X: Next Generation is not just a simple remake of the classic Ragnarok Online. It also offers many new and exciting features that will enhance your gaming experience. Here are some of the features that you can expect from this game:

    -

    Super Novice: The ultimate all-rounder class

    -

    One of the most unique features of Ragnarok X: Next Generation is the Super Novice class. This is a special class that can learn skills from all other classes, making it the most versatile and flexible class in the game. You can customize your Super Novice according to your preferences and play style. You can also equip any weapon or armor that you want, as long as you meet the requirements. However, the Super Novice also has some drawbacks, such as low HP and SP, and limited skill slots. Therefore, you need to be careful and strategic when playing as a Super Novice.

    -

    World of Champions: The cross-server PvP competition

    -

    If you are looking for some thrilling and competitive action, you should try out the World of Champions mode. This is a cross-server PvP mode that pits players from different servers against each other in a 5v5 battle. You can join this mode by registering in the World of Champions NPC in Prontera. You will be matched with other players based on your rank and level. The winning team will receive rewards such as honor points, zeny, and rare items. You can also use your honor points to exchange for exclusive costumes and accessories.

    -

    Guild vs Guild: The ultimate test of teamwork and strategy

    -

    Another feature that will test your skills and teamwork is the Guild vs Guild mode. This is a large-scale war mode that involves up to 50 guilds fighting for the control of castles. You can join this mode by being a member of a guild that has registered for the war. You will need to cooperate with your guildmates to attack or defend the castles, using various strategies and tactics. The guilds that successfully occupy the castles will receive rewards such as guild funds, zeny, and rare items. They will also have access to exclusive dungeons and quests.

    -

    How to download ragnarok x apk on android
    -Ragnarok x apk latest version 2023
    -Ragnarok x next generation apk free download
    -Ragnarok x apk download for pc
    -Ragnarok x apk mod unlimited money
    -Download ragnarok x apk from google play
    -Ragnarok x apk offline installer
    -Ragnarok x apk size and requirements
    -Ragnarok x apk update and patch notes
    -Ragnarok x apk mirror and alternative links
    -Download ragnarok x apk english version
    -Ragnarok x apk obb data download
    -Ragnarok x apk hack and cheat tool
    -Download ragnarok x apk for ios
    -Ragnarok x apk gameplay and review
    -Ragnarok x apk error and fix guide
    -Download ragnarok x apk from official website
    -Ragnarok x apk features and benefits
    -Ragnarok x apk tips and tricks
    -Download ragnarok x apk for emulator
    -Ragnarok x apk best class and build
    -Ragnarok x apk redeem code and rewards
    -Download ragnarok x apk for tablet
    -Ragnarok x apk system compatibility test
    -Ragnarok x apk graphics and sound quality
    -Download ragnarok x apk old versions
    -Ragnarok x apk new events and promotions
    -Ragnarok x apk customer service and support
    -Download ragnarok x apk for mac
    -Ragnarok x apk ratings and reviews
    -Download ragnarok x apk from apkpure
    -Ragnarok x apk comparison with other games
    -Ragnarok x apk guide and walkthrough
    -Download ragnarok x apk from uptodown
    -Ragnarok x apk community and forum
    -Download ragnarok x apk from apkmirror
    -Ragnarok x apk collaboration with slime 2.0
    -Ragnarok x apk pet system and taming guide
    -Download ragnarok x apk from apkpure.com/ragnarox/com.play.rosea/download/apk(^3^)
    -Ragnarok x next generation 2nd anniversary celebration event(^2^)

    -

    Pet Taming: The adorable companions for your adventure

    -

    If you are looking for some cute and loyal companions for your adventure, you should try out the pet taming feature. This feature allows you to capture and tame various monsters in the game, such as Poring, Lunatic, Yoyo, Baphomet Jr., and more. You will need to use specific items to lure and tame them, such as apples, bananas, honey, etc. Once you have tamed them, they will follow you around and assist you in combat. They will also have their own skills and attributes that you can upgrade by feeding them and giving them affection.

    -

    Marriage System: The romantic journey with your loved one

    -

    If you are looking for some romance in your life, you should try out the marriage system feature. This feature allows you to propose to another player that you have a good relationship with, and get married in a beautiful ceremony. You will need to buy a ring, a wedding dress or suit, and a wedding invitation card to prepare for the wedding. You will also need to invite your friends and guildmates to witness your special day. Once you are married, you will receive benefits such as bonus stats, exclusive skills, and special quests.

    -

    Tips and Tricks for Playing Ragnarok X: Next Generation

    -

    Ragnarok X: Next Generation is a fun and immersive game that will keep you entertained for hours. However, it can also be challenging and complex at times, especially for beginners. Therefore, we have prepared some tips and tricks for playing this game that will help you improve your performance and enjoyment.

    -

    How to level up fast and efficiently?

    -

    One of the most important aspects of playing Ragnarok X: Next Generation is leveling up your character. Leveling up will increase your stats, unlock new skills, and allow you to access more content in the game. Here are some ways to level up fast and efficiently:

    -
      -
    • Complete quests: Quests are one of the main sources of experience points in the game. You can find quests from NPCs in various towns and maps. Quests will also reward you with zeny, items, and other benefits.
    • -
    • Join parties: Parties are groups of players that cooperate with each other in combat. Join ing parties will allow you to share experience points and loot with other players. You can also benefit from their skills and buffs. You can join parties by using the party finder feature or by inviting other players manually.
    • -
    • Use items: Items are consumables that can boost your experience points gain and other aspects of your character. You can use items such as EXP potions, field manuals, battle manuals, etc. to increase your experience points gain. You can also use items such as food, scrolls, cards, etc. to enhance your stats and skills.
    • -
    • Explore maps: Maps are the areas where you can find monsters, NPCs, quests, and other features in the game. Exploring maps will allow you to discover new places, encounter new monsters, and complete new quests. You can also gain experience points by killing monsters and collecting items.
    • -
    -

    How to earn zeny and upgrade your equipment?

    -

    Zeny is the main currency in Ragnarok X: Next Generation. You will need zeny to buy items, upgrade equipment, enhance skills, and perform other actions in the game. Equipment is the gear that you can equip on your character to improve your stats and abilities. Upgrading equipment will increase its quality and effectiveness. Here are some ways to earn zeny and upgrade your equipment:

    -
      -
    • Sell items: Items are the things that you can collect, craft, or buy in the game. You can sell items that you don't need or want to other players or NPCs for zeny. You can use the auction house feature or the personal shop feature to sell items to other players. You can also use the NPC shops or the vending machine feature to sell items to NPCs.
    • -
    • Craft items: Crafting is the process of creating new items from raw materials or existing items. You can craft items such as weapons, armor, accessories, potions, etc. by using the crafting feature or the blacksmith feature. You can use the crafted items for yourself or sell them for zeny.
    • -
    • Upgrade items: Upgrading is the process of improving the quality and level of your equipment. You can upgrade your equipment by using the upgrade feature or the refine feature. You will need materials such as ores, crystals, eluniums, etc. to upgrade your equipment. Upgrading your equipment will increase its stats and effects.
    • -
    • Enhance items: Enhancing is the process of adding extra effects or attributes to your equipment. You can enhance your equipment by using the enhance feature or the enchant feature. You will need materials such as cards, runes, gems, etc. to enhance your equipment. Enhancing your equipment will add special bonuses and abilities to it.
    • -
    -

    How to join a guild and participate in guild activities?

    -

    A guild is a group of players that share a common goal and interest in the game. Joining a guild will allow you to interact with other players, cooperate with them in combat, and enjoy various benefits and features in the game. Guild activities are events or modes that are exclusive for guild members. Participating in guild activities will allow you to earn rewards, improve your reputation, and have fun with your guildmates. Here are some ways to join a guild and participate in guild activities:

    -
      -
    • Find a guild: Finding a guild is the first step to joining a guild. You can find a guild by using the guild finder feature or by browsing the guild list feature. You can also find a guild by asking other players or by checking online forums or communities.
    • -
    • Apply for a guild: Applying for a guild is the second step to joining a guild. You can apply for a guild by sending a request to the guild leader or by accepting an invitation from a guild member. You will need to wait for the approval of the guild leader or the guild officer before you can join the guild.
    • -
    • Contribute to a guild: Contributing to a guild is the third step to joining a guild. You can contribute to a guild by donating zeny, materials, or items to the guild fund or by completing guild quests or missions. Contributing to a guild will increase your contribution points and your reputation within the guild.
    • -
    • Participate in guild activities: Participating in guild activities is the fourth step to joining a guild. You can participate in guild activities by joining the guild war, the guild dungeon, the guild raid, or the guild party. Participating in guild activities will earn you rewards such as zeny, items, honor points, or rare items. You will also have fun and bond with your guildmates.
    • -
    -

    How to customize your character and skills?

    -

    Customizing your character and skills is one of the most enjoyable aspects of playing Ragnarok X: Next Generation. Customizing your character and skills will allow you to express your personality, style, and preferences in the game. You can also optimize your performance and effectiveness in combat by choosing the best combination of skills and equipment for your character. Here are some ways to customize your character and skills:

    -
      -
    • Choose a class: Choosing a class is the first step to customizing your character and skills. You can choose from six classes in the game: Swordsman, Thief, Archer, Mage, Acolyte, and Merchant. Each class has its own strengths, weaknesses, and roles in the game. You can also change your class later in the game by using the job change feature.
    • -
    • Choose a hairstyle: Choosing a hairstyle is the second step to customizing your character and skills. You can choose from various hairstyles in the game, ranging from cute to cool to elegant. You can also change your hairstyle later in the game by using the barber shop feature or by buying hair coupons.
    • -
    • Choose a costume: Choosing a costume is the third step to customizing your character and skills. You can choose from various costumes in the game, such as uniforms, suits, dresses, casual wear, etc. You can also change your costume later in the game by using the wardrobe feature or by buying costume coupons.
    • -
    • Choose a skill build: Choosing a skill build is the fourth step to customizing your character and skills. You can choose from various skills in the game, depending on your class and level. You can also change your skill build later in the game by using the skill reset feature or by buying skill reset coupons.
    • -
    -

    How to enjoy the social aspects of the game?

    -

    Ragnarok X: Next Generation is not only a game, but also a social platform. You can interact with other players, make friends, chat, trade, and have fun with them in the game. You can also join various events and activities that are designed to enhance your social experience in the game. Here are some ways to enjoy the social aspects of the game:

    -
      -
    • Use chat: Chat is one of the main ways to communicate with other players in the game. You can use chat to send messages, emojis, stickers, or voice messages to other players. You can also use chat to join different channels, such as world chat, guild chat, party chat, etc.
    • -
    • Use friend: Friend is one of the main ways to connect with other players in the game. You can use friend to add other players as your friends, send them gifts, invite them to parties or guilds, or view their profiles.
    • -
    • Use emoticon: Emoticon is one of the main ways to express yourself in the game. You can use emoticon to perform various actions or gestures with your character, such as waving, laughing, crying, dancing, etc. You can also use emoticon to interact with other players or NPCs.
    • -
    • Use event: Event is one of the main ways to participate in various activities in the game. You can use event to join different events that are held regularly or occasionally in the game, such as festivals, concerts, quizzes, etc. You can also use event to earn rewards such as zeny, items, costumes, etc.
    • -
    -

    Conclusion

    -

    Ragnarok X: Next Generation is a great game for fans of Ragnarok Online and MMORPGs in general. It offers a nostalgic and immersive experience that will keep you hooked for hours. It also offers many new and exciting features that will enhance your gaming experience. If you want to play this game on your Android device, you should download Ragnarok X APK from a reputable website and install it on your device. You should also follow our tips and tricks for playing this game that will help you improve your performance and enjoyment.

    -

    FAQs

    -

    Here are some frequently asked questions about Ragnarok X: Next Generation:

    -
      -
    1. Is Ragnarok X: Next Generation free to play?
      -Yes, Ragnarok X: Next Generation is free to play. However, it also has some optional in-app purchases that can enhance your gameplay or appearance. You can buy items such as zeny, diamonds, costumes, etc. with real money. However, these purchases are not necessary to enjoy the game.
    2. -
    3. Is Ragnarok X: Next Generation compatible with my device?
      -Ragnarok X: Next Generation is compatible with most Android devices that have at least 2 GB of RAM and Android 5.0 or higher. However, some devices might have issues with the game due to various factors such as hardware, software, or network. If you encounter any problems with the game, you can contact the customer service or check the official website for solutions.
    4. -
    5. Is Ragnarok X: Next Generation safe to download and play?
      -Yes, Ragnarok X: Next Generation is safe to download and play. The game is authorized by Gravity from South Korea and developed by Nuverse, a reputable game company. The game also has various security measures and policies to protect your privacy and data. However, you should be careful about the source and the file of the APK that you download, as some websites might offer fake or harmful APK files. You should also avoid using any third-party tools or hacks that might compromise your account or device.
    6. -
    7. How can I contact the customer service or the community of Ragnarok X: Next Generation?
      -You can contact the customer service or the community of Ragnarok X: Next Generation by using the following methods:

      -
        -
      • Customer service: You can use the customer service feature in the game to submit a ticket or chat with an agent. You can also email them at rxng@gravity.co.kr.
      • -
      • Community: You can use the community feature in the game to join various groups or forums. You can also follow their official social media accounts such as Facebook, Instagram, Twitter, YouTube, etc.
      • -
    8. -
    9. How can I support Ragnarok X: Next Generation?
      -You can support Ragnarok X: Next Generation by doing the following things:

      -
        -
      • Play the game regularly and invite your friends to join you.
      • -
      • Rate and review the game on the Google Play Store or other platforms.
      • -
      • Share your feedback and suggestions with the developers and the customer service.
      • -
      • Purchase items or services in the game to support its development and maintenance.
      • -
    10. -

    401be4b1e0
    -
    -
    \ No newline at end of file diff --git a/spaces/1phancelerku/anime-remove-background/FM WhatsApp APK Download for Android - Latest Version 2023 with New Features.md b/spaces/1phancelerku/anime-remove-background/FM WhatsApp APK Download for Android - Latest Version 2023 with New Features.md deleted file mode 100644 index 96979469f832fdd4563f07316df9ab0d9cd84e39..0000000000000000000000000000000000000000 --- a/spaces/1phancelerku/anime-remove-background/FM WhatsApp APK Download for Android - Latest Version 2023 with New Features.md +++ /dev/null @@ -1,95 +0,0 @@ - -

    FM WhatsApp 2023 APK Download: Everything You Need to Know

    -

    Are you looking for a way to enhance your WhatsApp experience with more features and customization options? If yes, then you might want to try FM WhatsApp, one of the most popular and advanced WhatsApp mods available. In this article, we will tell you everything you need to know about FM WhatsApp 2023 APK download, including what it is, what it offers, how to download and install it, how to update it, and some frequently asked questions. Let's get started!

    -

    What is FM WhatsApp?

    -

    FM WhatsApp is a modified version of the official WhatsApp app that adds more functionality and personalization to the original app. It is developed by Fouad Mokdad, a well-known modder who also created other popular WhatsApp mods like Fouad WhatsApp and YoWhatsApp. FM WhatsApp allows you to enjoy features that are not available in the official app, such as themes, fonts, emojis, privacy settings, anti-delete messages, status downloader, and much more.

    -

    fm whatsapp 2023 apk download


    Download Zip ––– https://jinyurl.com/2uNKsH



    -

    Features of FM WhatsApp

    -

    Here are some of the main features that you can get with FM WhatsApp:

    -
      -
    • Anti-ban: You can use FM WhatsApp without worrying about getting banned by the official app.
    • -
    • Customization: You can change the look and feel of your WhatsApp app with hundreds of themes, fonts, and emojis.
    • -
    • Privacy: You can hide your online status, last seen, blue ticks, typing status, and more.
    • -
    • Anti-delete messages: You can view messages and status updates that have been deleted by the sender.
    • -
    • Media sharing: You can send up to 90 images at once and video files up to 700 MB.
    • -
    • Image quality: You can increase the quality of images that you send or receive.
    • -
    • And many more.
    • -
    -

    Benefits of FM WhatsApp

    -

    Here are some of the benefits that you can enjoy with FM WhatsApp:

    -
      -
    • You can have more control over your WhatsApp app and customize it according to your preferences.
    • -
    • You can access features that are not available in the official app and enhance your user experience.
    • -
    • You can protect your privacy and security with more options and settings.
    • -
    • You can communicate with your contacts more easily and conveniently with more media sharing options.
    • -
    -

    How to Download and Install FM WhatsApp 2023 APK on Android?

    -

    If you want to download and install FM WhatsApp 2023 APK on your Android device, you need to follow these steps:

    -

    Download FM WhatsApp APK File

    -

    First, you need to download the latest version of FM WhatsApp APK file from a reliable source. You can use this link to download the file. The file name is FMWA9.25_By_FouadMODS.apk, the file size is 50.2 MB, and the latest update is March 2022. Make sure that your device has enough storage space and a stable internet connection before downloading the file.

    -

    Enable Unknown Sources

    -

    Next, you need to enable unknown sources on your device to allow the installation of apps from sources other than the Google Play Store. To do this, go to Settings > Security > Unknown Sources and toggle it on. You may see a warning message, but you can ignore it and proceed.

    -

    Install FM WhatsApp APK File

    -

    Then, you need to locate the downloaded APK file on your device and tap on it to start the installation process. You may see a pop-up asking for permissions, but you can grant them and follow the instructions on the screen. The installation may take a few minutes, so please be patient.

    -

    Verify Your Phone Number

    -

    Finally, you need to verify your phone number to activate FM WhatsApp on your device. To do this, open the app and enter your phone number. You will receive a verification code via SMS or a phone call. Enter the code and confirm your account. You can also restore your chat backup from the official app if you have one. That's it! You have successfully installed FM WhatsApp 2023 APK on your Android device.

    -

    How to Update FM WhatsApp to the Latest Version?

    -

    If you want to update FM WhatsApp to the latest version, you need to follow these steps:

    -

    fm whatsapp 2023 apk download latest version
    -fm whatsapp 2023 apk download for android
    -fm whatsapp 2023 apk download by fouad mods
    -fm whatsapp 2023 apk download free
    -fm whatsapp 2023 apk download update
    -fm whatsapp 2023 apk download new features
    -fm whatsapp 2023 apk download anti ban
    -fm whatsapp 2023 apk download link
    -fm whatsapp 2023 apk download official website
    -fm whatsapp 2023 apk download install
    -fm whatsapp 2023 apk download how to use
    -fm whatsapp 2023 apk download benefits
    -fm whatsapp 2023 apk download review
    -fm whatsapp 2023 apk download comparison
    -fm whatsapp 2023 apk download alternatives
    -fm whatsapp 2023 apk download tips and tricks
    -fm whatsapp 2023 apk download guide
    -fm whatsapp 2023 apk download tutorial
    -fm whatsapp 2023 apk download faq
    -fm whatsapp 2023 apk download support
    -fm whatsapp 2023 apk download problems and solutions
    -fm whatsapp 2023 apk download modded version
    -fm whatsapp 2023 apk download customization options
    -fm whatsapp 2023 apk download themes and fonts
    -fm whatsapp 2023 apk download emoji and stickers
    -fm whatsapp 2023 apk download privacy and security settings
    -fm whatsapp 2023 apk download hide online status and last seen
    -fm whatsapp 2023 apk download view deleted messages and statuses
    -fm whatsapp 2023 apk download send large files and images
    -fm whatsapp 2023 apk download increase quality of media sharing
    -fm whatsapp 2023 apk download backup and restore chats
    -fm whatsapp 2023 apk download transfer data to new phone
    -fm whatsapp 2023 apk download sync with other devices
    -fm whatsapp 2023 apk download group chat and video call features
    -fm whatsapp 2023 apk download broadcast messages and status updates
    -fm whatsapp 2023 apk download pin chats and mark as unread
    -fm whatsapp 2023 apk download mute notifications and block contacts
    -fm whatsapp 2023 apk download dark mode and night mode options
    -fm whatsapp 2023 apk download auto reply and schedule messages features
    -fm whatsapp 2023 apk download lock app and chats with password or fingerprint

    -

    Check for Updates

    -

    First, you need to check if there is a new version of FM WhatsApp available. To do this, open the app and go to Menu > Fouad Mods > Updates. You will see a message telling you if there is an update or not. If there is an update, you can tap on Download to get the latest APK file.

    -

    Download and Install the Latest Version

    -

    Next, you need to download and install the latest version of FM WhatsApp APK file on your device. To do this, follow the same steps as above for downloading and installing FM WhatsApp 2023 APK. You don't need to uninstall the previous version or enable unknown sources again. Just overwrite the existing app with the new one and verify your phone number again. You have successfully updated FM WhatsApp to the latest version.

    -

    FAQs about FM WhatsApp

    -

    Here are some of the frequently asked questions about FM WhatsApp:

    - - - - - - - -
    QuestionAnswer
    Is FM WhatsApp safe to use?FM WhatsApp is safe to use as long as you download it from a trusted source and scan it for viruses before installing it. However, it is not an official app and it may violate some of the terms and conditions of WhatsApp. Therefore, use it at your own risk and discretion.
    Can I use FM WhatsApp with the official app?Yes, you can use FM WhatsApp with the official app if you want to have two WhatsApp accounts on the same device. However, you need to use different phone numbers for each account and install them in separate folders.
    How can I backup my chats on FM WhatsApp?You can backup your chats on FM WhatsApp by going to Menu > Settings > Chats > Chat Backup. You can choose to backup your chats locally or on Google Drive. You can also restore your chats from the backup when you reinstall or update FM WhatsApp.
    How can I change themes on FM WhatsApp?You can change themes on FM WhatsApp by going to Menu > Fouad Mods > Universal > Themes. You can choose from hundreds of themes available or download more from the internet. You can also create your own theme by customizing various elements of the app.
    How can I contact the developer of FM WhatsApp?You can contact the developer of FM WhatsApp by going to Menu > Fouad Mods > About > Contact Me. You can send him an email or follow him on social media platforms like Twitter, Instagram, and Telegram.
    -

    Conclusion

    -

    In conclusion, FM WhatsApp is a great alternative to the official WhatsApp app that offers more features and customization options. You can download and install FM WhatsApp 2023 APK on your Android device by following the steps mentioned in this article. You can also update it to the latest version whenever there is one available. However, you should be aware of the risks involved in using a modded app and use it responsibly. We hope that this article has helped you learn more about FM WhatsApp 2023 APK download and answered some of your questions.

    401be4b1e0
    -
    -
    \ No newline at end of file diff --git a/spaces/1phancelerku/anime-remove-background/Free 3D Models of Orange Trees - Easy to Customize and Render.md b/spaces/1phancelerku/anime-remove-background/Free 3D Models of Orange Trees - Easy to Customize and Render.md deleted file mode 100644 index b2e2137a64ada459f710d9e11194fa47046c293e..0000000000000000000000000000000000000000 --- a/spaces/1phancelerku/anime-remove-background/Free 3D Models of Orange Trees - Easy to Customize and Render.md +++ /dev/null @@ -1,136 +0,0 @@ - -

    How to Find and Download Free Orange Tree 3D Models

    -

    If you are looking for realistic and high-quality orange tree 3D models for your project, you might be wondering where to find them online. Whether you need them for animation, rendering, game development, or any other purpose, you can save time and money by downloading free 3D models from various websites. In this article, we will show you how to find and download free orange tree 3D models from some of the most popular sources on the web.

    -

    Introduction

    -

    What are orange tree 3D models and why are they useful?

    -

    An orange tree 3D model is a digital representation of an orange tree that can be used in various applications that require 3D graphics. A 3D model consists of vertices, edges, faces, and textures that define the shape, color, and appearance of the object. A 3D model can also have animations, rigging, lighting, and other features that make it more realistic and interactive.

    -

    orange tree 3d model free download


    Download Filehttps://jinyurl.com/2uNMqi



    -

    Orange tree 3D models are useful for many reasons. For example, they can help you create stunning scenes and environments for your animations or games. They can also help you visualize and design your own garden or landscape. They can even be used for educational purposes, such as teaching students about botany or ecology.

    -

    Where can you find free orange tree 3D models online?

    -

    There are many websites that offer free 3D models of various objects, including orange trees. However, not all of them are reliable or easy to use. Some of them may have low-quality models, limited formats, or unclear licenses. Therefore, you need to be careful and selective when choosing a website to download free 3D models from.

    -

    In this article, we will focus on three websites that are well-known and trusted by many 3D artists and enthusiasts. They are TurboSquid, Sketchfab, and CGTrader. These websites have a large collection of free orange tree 3D models that you can browse, download, and use in your projects. We will explain how to use each website and what to look for when downloading a model.

    -

    TurboSquid: A Popular Source of Free 3D Models

    -

    What is TurboSquid and how does it work?

    -

    TurboSquid is one of the largest and oldest online marketplaces for 3D models. It was founded in 2000 and has over one million models in its catalog. TurboSquid allows anyone to buy or sell 3D models for various purposes. It also has a section dedicated to free 3D models that anyone can download and use without paying anything.

    -

    TurboSquid works by connecting buyers and sellers of 3D models. Buyers can search for the models they need by using keywords, filters, categories, or collections. They can also preview the models in different views, check the details and ratings, and download them in various formats. Sellers can upload their models to TurboSquid and set their own prices or offer them for free. TurboSquid also has a quality assurance program called CheckMate that certifies the models that meet certain standards of quality and compatibility.

    -

    How to search for free orange tree 3D models on TurboSquid?

    -

    Searching for free orange tree 3D models on TurboSquid is easy and fast. Here are the steps you need to follow:

    -

    Filter by free, orange, and tree keywords

    -

    The first step is to go to the free 3D models section of TurboSquid. You will see a search bar where you can enter the keywords that describe the model you are looking for. In this case, you can type "free orange tree" and hit enter. You will see a list of results that match your query.

    -

    orange tree 3d model free download
    -free 3d orange tree models turbosquid
    -free nature orange-tree 3d models for download
    -free 3d orange models turbosquid
    -orange tree 3d model free obj
    -free low poly orange tree 3d model
    -free 3d model of orange tree with fruits
    -free realistic orange tree 3d model
    -free 3d model of orange tree in pot
    -free animated orange tree 3d model
    -free game ready orange tree 3d model
    -free vr ready orange tree 3d model
    -free 3d model of orange tree branch
    -free 3d model of orange tree leaf
    -free 3d model of orange tree flower
    -free 3d model of orange fruit
    -free 3d model of sliced orange
    -free 3d model of peeled orange
    -free 3d model of orange juice
    -free 3d model of orange peel
    -free blender orange tree 3d model
    -free maya orange tree 3d model
    -free c4d orange tree 3d model
    -free max orange tree 3d model
    -free fbx orange tree 3d model
    -free stl orange tree 3d model
    -free gltf orange tree 3d model
    -free usdz orange tree 3d model
    -free dae orange tree 3d model
    -free ztl orange tree 3d model
    -how to make a free orange tree 3d model
    -where to find a free orange tree 3d model
    -best sites for free orange tree 3d models
    -top rated free orange tree 3d models
    -most downloaded free orange tree 3d models
    -most realistic free orange tree 3d models
    -most detailed free orange tree 3d models
    -most optimized free orange tree 3d models
    -most compatible free orange tree 3d models
    -most versatile free orange tree 3d models
    -high quality free orange tree 3d models
    -high resolution free orange tree 3d models
    -high poly free orange tree 3d models
    -low poly cartoon style free orange tree 3d models
    -low poly stylized free orange tree 3d models
    -low poly pixel art style free orange tree 3d models
    -low poly voxel style free orange tree 3d models
    -low poly flat style free orange tree 3d models
    -low poly minimalist style free orange tree 3d models

    -

    Sort by best match, quality, or poly count

    -

    The next step is to sort the results by the criteria that matter to you. You can use the drop-down menu on the top right corner of the page to choose how to sort the results. You can sort them by best match, quality, or poly count. Best match will show you the models that are most relevant to your query. Quality will show you the models that have the highest ratings or CheckMate certification. Poly count will show you the models that have the lowest or highest number of polygons.

    -

    Check the license, format, and details of each model

    -

    The final step is to check the license, format, and details of each model before downloading it. You can click on the thumbnail of each model to see more information about it. You will see a page that shows you the preview images, description, specifications, reviews, and related models of the model. You will also see a section that shows you the license, format, and download options of the model.

    -

    The license tells you how you can use the model in your project. Some models are royalty-free, which means you can use them for any purpose without paying anything. Some models are editorial-only, which means you can only use them for non-commercial purposes such as news or education. Some models have custom licenses, which means you have to read and follow the terms and conditions of the seller.

    -

    The format tells you what file types are available for the model. Some models have multiple formats, such as OBJ, FBX, 3DS, or STL. Some models have only one format, such as MAX or BLEND. You should choose the format that is compatible with your software or application.

    -

    The download options tell you how you can get the model on your device. Some models have direct download links, which means you can download them instantly by clicking on them. Some models have email delivery links, which means you have to enter your email address and wait for the link to be sent to you.

    -

    Download the model and use it in your project

    -

    Once you have checked everything and found the model that suits your needs, you can download it and use it in your project. You should always respect the license and credit the seller if required. You should also check the quality and compatibility of the model before using it in your project. You may need to adjust some settings or parameters to make it look better or fit better in your scene.

    -

    Other Websites to Download Free Orange Tree 3D Models

    -

    Sketchfab: A Platform for 3D and VR Content

    -

    How to find and download free orange tree 3D models on Sketchfab?

    -

    Sketchfab is another popular platform for 3D and VR content. It was founded in 2012 and has over four million models in its library. Sketchfab allows anyone to upload, view, share, and download 3D models for various purposes. It also has a section dedicated to free 3D models that anyone can download and use without paying anything.

    -

    Finding and downloading free orange tree 3D models on Sketchfab is similar to TurboSquid. Here are the steps you need to follow:

    - - Go to the free 3D models section of Sketchfab. - Type "orange tree" in the search bar and hit enter. - Use the filters on the left side of the page to narrow down your results by category, license, format, poly count, or tags. - Click on the thumbnail of each model to see more information about it. - Check the license, format, details, and preview of each model before downloading it. - Click on the download button on the bottom right corner of each model page. - Choose the format that is compatible with your software or application. - Download the model and use it in your project.

    CGTrader: A Marketplace for 3D Assets

    -

    How to find and download free orange tree 3D models on CGTrader?

    -

    CGTrader is another marketplace for 3D assets. It was founded in 2011 and has over one million models in its catalog. CGTrader allows anyone to buy or sell 3D models for various purposes. It also has a section dedicated to free 3D models that anyone can download and use without paying anything.

    -

    Finding and downloading free orange tree 3D models on CGTrader is similar to TurboSquid and Sketchfab. Here are the steps you need to follow:

    - - Go to the free 3D models section of CGTrader. - Type "orange tree" in the search bar and hit enter. - Use the filters on the left side of the page to narrow down your results by category, license, format, poly count, or tags. - Click on the thumbnail of each model to see more information about it. - Check the license, format, details, and preview of each model before downloading it. - Click on the download button on the bottom right corner of each model page. - Choose the format that is compatible with your software or application. - Download the model and use it in your project.

    Conclusion

    -

    Summary of the main points

    -

    In this article, we have shown you how to find and download free orange tree 3D models from some of the most popular websites on the web. We have explained what orange tree 3D models are and why they are useful. We have also given you a step-by-step guide on how to use TurboSquid, Sketchfab, and CGTrader to search for, filter, check, and download free orange tree 3D models for your project.

    -

    Call to action and final remarks

    -

    We hope you have found this article helpful and informative. If you are looking for realistic and high-quality orange tree 3D models for your project, you can save time and money by downloading them for free from these websites. You can also explore other types of 3D models that are available for free or for a reasonable price.

    -

    If you have any questions or feedback, please feel free to leave a comment below. We would love to hear from you and help you with your 3D modeling needs. Thank you for reading and happy downloading!

    -

    FAQs

    -

    What are the benefits of using free orange tree 3D models?

    -

    Some of the benefits of using free orange tree 3D models are:

    -
      -
    • You can save time and money by not having to create or buy your own models.
    • -
    • You can enhance the realism and quality of your project by using models that are made by professional 3D artists.
    • -
    • You can learn from the models by studying their structure, texture, lighting, and animation.
    • -
    • You can support the 3D community by giving credit and feedback to the creators of the models.
    • -
    -

    What are the drawbacks of using free orange tree 3D models?

    -

    Some of the drawbacks of using free orange tree 3D models are:

    -
      -
    • You may not find the exact model that matches your vision or requirements.
    • -
    • You may have to deal with compatibility issues or errors when importing or exporting the models.
    • -
    • You may have to follow certain restrictions or limitations when using the models in your project.
    • -
    • You may have to compete with other users who are using the same models in their projects.
    • -
    -

    How can I improve the quality and performance of free orange tree 3D models?

    -

    Some of the ways you can improve the quality and performance of free orange tree 3D models are:

    -
      -
    • You can optimize the poly count, texture size, and level of detail of the models to reduce the load on your system.
    • -
    • You can adjust the lighting, shading, and rendering settings of your software or application to enhance the appearance of the models.
    • -
    • You can modify or customize the models to suit your needs or preferences.
    • -
    • You can combine or blend different models to create unique and diverse variations.
    • -
    -

    How can I avoid plagiarism or infringement when using free orange tree 3D models?

    -

    Some of the ways you can avoid plagiarism or infringement when using free orange tree 3D models are:

    -
      -
    • You can always check the license and terms of use of each model before downloading and using it in your project.
    • -
    • You can always give proper credit and attribution to the original creator or source of the model.
    • -
    • You can always use the model for the intended purpose and not for any illegal or unethical activities.
    • -
    • You can always respect the rights and reputation of the creator and other users of the model.
    • -
    -

    What are some tips and tricks for finding and downloading free orange tree 3D models?

    -

    Some of the tips and tricks for finding and downloading free orange tree 3D models are:

    -
      -
    • You can use specific keywords, phrases, or tags to narrow down your search results.
    • -
    • You can use advanced filters, such as category, license, format, poly count, or tags, to refine your search results.
    • -
    • You can use collections, favorites, or bookmarks to save and organize the models that you like or want to use later.
    • -
    • You can use ratings, reviews, or comments to evaluate the quality and popularity of the models.
    • -
    • You can use previews, screenshots, or videos to see how the models look and behave in different situations.
    • -

    401be4b1e0
    -
    -
    \ No newline at end of file diff --git a/spaces/1toTree/lora_test/ppdiffusers/pipelines/paint_by_example/pipeline_paint_by_example.py b/spaces/1toTree/lora_test/ppdiffusers/pipelines/paint_by_example/pipeline_paint_by_example.py deleted file mode 100644 index 390011bd816ce8616090968b43d07299cf939505..0000000000000000000000000000000000000000 --- a/spaces/1toTree/lora_test/ppdiffusers/pipelines/paint_by_example/pipeline_paint_by_example.py +++ /dev/null @@ -1,536 +0,0 @@ -# Copyright 2022 The HuggingFace Team. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import inspect -from typing import Callable, List, Optional, Union - -import numpy as np -import paddle -import PIL - -from paddlenlp.transformers import CLIPFeatureExtractor - -from ...models import AutoencoderKL, UNet2DConditionModel -from ...pipeline_utils import DiffusionPipeline -from ...schedulers import DDIMScheduler, LMSDiscreteScheduler, PNDMScheduler -from ...utils import logging -from ..stable_diffusion import StableDiffusionPipelineOutput -from ..stable_diffusion.safety_checker import StableDiffusionSafetyChecker -from .image_encoder import PaintByExampleImageEncoder - -logger = logging.get_logger(__name__) # pylint: disable=invalid-name - - -def prepare_mask_and_masked_image(image, mask): - """ - Prepares a pair (image, mask) to be consumed by the Paint by Example pipeline. This means that those inputs will be - converted to ``paddle.Tensor`` with shapes ``batch x channels x height x width`` where ``channels`` is ``3`` for the - ``image`` and ``1`` for the ``mask``. - - The ``image`` will be converted to ``torch.float32`` and normalized to be in ``[-1, 1]``. The ``mask`` will be - binarized (``mask > 0.5``) and cast to ``torch.float32`` too. - - Args: - image (Union[np.array, PIL.Image, paddle.Tensor]): The image to inpaint. - It can be a ``PIL.Image``, or a ``height x width x 3`` ``np.array`` or a ``channels x height x width`` - ``paddle.Tensor`` or a ``batch x channels x height x width`` ``paddle.Tensor``. - mask (_type_): The mask to apply to the image, i.e. regions to inpaint. - It can be a ``PIL.Image``, or a ``height x width`` ``np.array`` or a ``1 x height x width`` - ``paddle.Tensor`` or a ``batch x 1 x height x width`` ``paddle.Tensor``. - - - Raises: - ValueError: ``paddle.Tensor`` images should be in the ``[-1, 1]`` range. ValueError: ``paddle.Tensor`` mask - should be in the ``[0, 1]`` range. ValueError: ``mask`` and ``image`` should have the same spatial dimensions. - TypeError: ``mask`` is a ``paddle.Tensor`` but ``image`` is not - (ot the other way around). - - Returns: - tuple[paddle.Tensor]: The pair (mask, masked_image) as ``paddle.Tensor`` with 4 - dimensions: ``batch x channels x height x width``. - """ - if isinstance(image, paddle.Tensor): - if not isinstance(mask, paddle.Tensor): - raise TypeError(f"`image` is a paddle.Tensor but `mask` (type: {type(mask)} is not") - - # Batch single image - if image.ndim == 3: - assert image.shape[0] == 3, "Image outside a batch should be of shape (3, H, W)" - image = image.unsqueeze(0) - - # Batch and add channel dim for single mask - if mask.ndim == 2: - mask = mask.unsqueeze(0).unsqueeze(0) - - # Batch single mask or add channel dim - if mask.ndim == 3: - # Batched mask - if mask.shape[0] == image.shape[0]: - mask = mask.unsqueeze(1) - else: - mask = mask.unsqueeze(0) - - assert image.ndim == 4 and mask.ndim == 4, "Image and Mask must have 4 dimensions" - assert image.shape[-2:] == mask.shape[-2:], "Image and Mask must have the same spatial dimensions" - assert image.shape[0] == mask.shape[0], "Image and Mask must have the same batch size" - assert mask.shape[1] == 1, "Mask image must have a single channel" - - # Check image is in [-1, 1] - if image.min() < -1 or image.max() > 1: - raise ValueError("Image should be in [-1, 1] range") - - # Check mask is in [0, 1] - if mask.min() < 0 or mask.max() > 1: - raise ValueError("Mask should be in [0, 1] range") - - # paint-by-example inverses the mask - mask = 1 - mask - - # Binarize mask - mask[mask < 0.5] = 0 - mask[mask >= 0.5] = 1 - - # Image as float32 - image = image.cast(paddle.float32) - elif isinstance(mask, paddle.Tensor): - raise TypeError(f"`mask` is a paddle.Tensor but `image` (type: {type(image)} is not") - else: - if isinstance(image, PIL.Image.Image): - image = [image] - - image = np.concatenate([np.array(i.convert("RGB"))[None, :] for i in image], axis=0) - image = image.transpose(0, 3, 1, 2) - image = paddle.to_tensor(image).cast(paddle.float32) / 127.5 - 1.0 - - # preprocess mask - if isinstance(mask, PIL.Image.Image): - mask = [mask] - - mask = np.concatenate([np.array(m.convert("L"))[None, None, :] for m in mask], axis=0) - mask = mask.astype(np.float32) / 255.0 - - # paint-by-example inverses the mask - mask = 1 - mask - - mask[mask < 0.5] = 0 - mask[mask >= 0.5] = 1 - mask = paddle.to_tensor(mask) - - masked_image = image * mask - - return mask, masked_image - - -class PaintByExamplePipeline(DiffusionPipeline): - r""" - Pipeline for text-guided image inpainting using Stable Diffusion. *This is an experimental feature*. - - This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods the - library implements for all the pipelines (such as downloading or saving, running on a particular device, etc.) - - Args: - vae ([`AutoencoderKL`]): - Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations. - text_encoder ([`CLIPTextModel`]): - Frozen text-encoder. Stable Diffusion uses the text portion of - [CLIP](https://huggingface.co/docs/transformers/model_doc/clip#transformers.CLIPTextModel), specifically - the [clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14) variant. - tokenizer (`CLIPTokenizer`): - Tokenizer of class - [CLIPTokenizer](https://huggingface.co/docs/transformers/v4.21.0/en/model_doc/clip#transformers.CLIPTokenizer). - unet ([`UNet2DConditionModel`]): Conditional U-Net architecture to denoise the encoded image latents. - scheduler ([`SchedulerMixin`]): - A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of - [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`]. - safety_checker ([`StableDiffusionSafetyChecker`]): - Classification module that estimates whether generated images could be considered offensive or harmful. - Please, refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for details. - feature_extractor ([`CLIPFeatureExtractor`]): - Model that extracts features from generated images to be used as inputs for the `safety_checker`. - """ - _optional_components = ["safety_checker"] - - def __init__( - self, - vae: AutoencoderKL, - image_encoder: PaintByExampleImageEncoder, - unet: UNet2DConditionModel, - scheduler: Union[DDIMScheduler, PNDMScheduler, LMSDiscreteScheduler], - safety_checker: StableDiffusionSafetyChecker, - feature_extractor: CLIPFeatureExtractor, - requires_safety_checker: bool = False, - ): - super().__init__() - - self.register_modules( - vae=vae, - image_encoder=image_encoder, - unet=unet, - scheduler=scheduler, - safety_checker=safety_checker, - feature_extractor=feature_extractor, - ) - self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) - self.register_to_config(requires_safety_checker=requires_safety_checker) - - # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.run_safety_checker - def run_safety_checker(self, image, dtype): - if self.safety_checker is not None: - safety_checker_input = self.feature_extractor(self.numpy_to_pil(image), return_tensors="pd") - image, has_nsfw_concept = self.safety_checker( - images=image, clip_input=safety_checker_input.pixel_values.cast(dtype) - ) - else: - has_nsfw_concept = None - return image, has_nsfw_concept - - # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs - def prepare_extra_step_kwargs(self, generator, eta): - # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature - # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers. - # eta corresponds to η in DDIM paper: https://arxiv.org/abs/2010.02502 - # and should be between [0, 1] - - accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys()) - extra_step_kwargs = {} - if accepts_eta: - extra_step_kwargs["eta"] = eta - - # check if the scheduler accepts generator - accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys()) - if accepts_generator: - extra_step_kwargs["generator"] = generator - return extra_step_kwargs - - # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.decode_latents - def decode_latents(self, latents): - latents = 1 / 0.18215 * latents - image = self.vae.decode(latents).sample - image = (image / 2 + 0.5).clip(0, 1) - # we always cast to float32 as this does not cause significant overhead and is compatible with bfloa16 - image = image.transpose([0, 2, 3, 1]).cast("float32").numpy() - return image - - # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_image_variation.StableDiffusionImageVariationPipeline.check_inputs - def check_inputs(self, image, height, width, callback_steps): - if ( - not isinstance(image, paddle.Tensor) - and not isinstance(image, PIL.Image.Image) - and not isinstance(image, list) - ): - raise ValueError( - "`image` has to be of type `paddle.Tensor` or `PIL.Image.Image` or `List[PIL.Image.Image]` but is" - f" {type(image)}" - ) - - if height % 8 != 0 or width % 8 != 0: - raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.") - - if (callback_steps is None) or ( - callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0) - ): - raise ValueError( - f"`callback_steps` has to be a positive integer but is {callback_steps} of type" - f" {type(callback_steps)}." - ) - - # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_latents - def prepare_latents(self, batch_size, num_channels_latents, height, width, dtype, generator, latents=None): - shape = [batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor] - if isinstance(generator, list) and len(generator) != batch_size: - raise ValueError( - f"You have passed a list of generators of length {len(generator)}, but requested an effective batch" - f" size of {batch_size}. Make sure the batch size matches the length of the generators." - ) - - if latents is None: - if isinstance(generator, list): - shape = [ - 1, - ] + shape[1:] - latents = [paddle.randn(shape, generator=generator[i], dtype=dtype) for i in range(batch_size)] - latents = paddle.concat(latents, axis=0) - else: - latents = paddle.randn(shape, generator=generator, dtype=dtype) - else: - if latents.shape != shape: - raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {shape}") - - # scale the initial noise by the standard deviation required by the scheduler - latents = latents * self.scheduler.init_noise_sigma - return latents - - # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_inpaint.StableDiffusionInpaintPipeline.prepare_mask_latents - def prepare_mask_latents( - self, mask, masked_image, batch_size, height, width, dtype, generator, do_classifier_free_guidance - ): - # resize the mask to latents shape as we concatenate the mask to the latents - # we do that before converting to dtype to avoid breaking in case we're using cpu_offload - # and half precision - mask = paddle.nn.functional.interpolate( - mask, size=(height // self.vae_scale_factor, width // self.vae_scale_factor) - ) - mask = mask.cast(dtype) - - masked_image = masked_image.cast(dtype) - - # encode the mask image into latents space so we can concatenate it to the latents - if isinstance(generator, list): - masked_image_latents = [ - self.vae.encode(masked_image[i : i + 1]).latent_dist.sample(generator=generator[i]) - for i in range(batch_size) - ] - masked_image_latents = paddle.concat(masked_image_latents, axis=0) - else: - masked_image_latents = self.vae.encode(masked_image).latent_dist.sample(generator=generator) - masked_image_latents = 0.18215 * masked_image_latents - - # duplicate mask and masked_image_latents for each generation per prompt, using mps friendly method - if mask.shape[0] < batch_size: - if not batch_size % mask.shape[0] == 0: - raise ValueError( - "The passed mask and the required batch size don't match. Masks are supposed to be duplicated to" - f" a total batch size of {batch_size}, but {mask.shape[0]} masks were passed. Make sure the number" - " of masks that you pass is divisible by the total requested batch size." - ) - mask = mask.tile([batch_size // mask.shape[0], 1, 1, 1]) - if masked_image_latents.shape[0] < batch_size: - if not batch_size % masked_image_latents.shape[0] == 0: - raise ValueError( - "The passed images and the required batch size don't match. Images are supposed to be duplicated" - f" to a total batch size of {batch_size}, but {masked_image_latents.shape[0]} images were passed." - " Make sure the number of images that you pass is divisible by the total requested batch size." - ) - masked_image_latents = masked_image_latents.tile([batch_size // masked_image_latents.shape[0], 1, 1, 1]) - - mask = paddle.concat([mask] * 2) if do_classifier_free_guidance else mask - masked_image_latents = ( - paddle.concat([masked_image_latents] * 2) if do_classifier_free_guidance else masked_image_latents - ) - - # aligning device to prevent device errors when concating it with the latent model input - masked_image_latents = masked_image_latents.cast(dtype) - return mask, masked_image_latents - - def _encode_image(self, image, num_images_per_prompt, do_classifier_free_guidance): - # dtype = self.image_encoder.dtype - - if not isinstance(image, paddle.Tensor): - image = self.feature_extractor(images=image, return_tensors="pd").pixel_values - - # image = image.cast(dtype) - image_embeddings = self.image_encoder(image) - - # duplicate image embeddings for each generation per prompt, using mps friendly method - bs_embed, seq_len, _ = image_embeddings.shape - image_embeddings = image_embeddings.tile([1, num_images_per_prompt, 1]) - image_embeddings = image_embeddings.reshape([bs_embed * num_images_per_prompt, seq_len, -1]) - - if do_classifier_free_guidance: - uncond_embeddings = self.image_encoder.uncond_vector - uncond_embeddings = uncond_embeddings.tile([1, image_embeddings.shape[0], 1]) - uncond_embeddings = uncond_embeddings.reshape([bs_embed * num_images_per_prompt, 1, -1]) - - # For classifier free guidance, we need to do two forward passes. - # Here we concatenate the unconditional and text embeddings into a single batch - # to avoid doing two forward passes - image_embeddings = paddle.concat([uncond_embeddings, image_embeddings]) - - return image_embeddings - - @paddle.no_grad() - def __call__( - self, - example_image: Union[paddle.Tensor, PIL.Image.Image], - image: Union[paddle.Tensor, PIL.Image.Image], - mask_image: Union[paddle.Tensor, PIL.Image.Image], - height: Optional[int] = None, - width: Optional[int] = None, - num_inference_steps: int = 50, - guidance_scale: float = 5.0, - num_images_per_prompt: Optional[int] = 1, - eta: float = 0.0, - generator: Optional[Union[paddle.Generator, List[paddle.Generator]]] = None, - latents: Optional[paddle.Tensor] = None, - output_type: Optional[str] = "pil", - return_dict: bool = True, - callback: Optional[Callable[[int, int, paddle.Tensor], None]] = None, - callback_steps: Optional[int] = 1, - ): - r""" - Function invoked when calling the pipeline for generation. - - Args: - example_image (`paddle.Tensor` or `PIL.Image.Image` or `List[PIL.Image.Image]`): - The exemplar image to guide the image generation. - image (`paddle.Tensor` or `PIL.Image.Image` or `List[PIL.Image.Image]`): - `Image`, or tensor representing an image batch which will be inpainted, *i.e.* parts of the image will - be masked out with `mask_image` and repainted according to `prompt`. - mask_image (`paddle.Tensor` or `PIL.Image.Image` or `List[PIL.Image.Image]`): - `Image`, or tensor representing an image batch, to mask `image`. White pixels in the mask will be - repainted, while black pixels will be preserved. If `mask_image` is a PIL image, it will be converted - to a single channel (luminance) before use. If it's a tensor, it should contain one color channel (L) - instead of 3, so the expected shape would be `(B, H, W, 1)`. - height (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): - The height in pixels of the generated image. - width (`int`, *optional*, defaults to self.unet.config.sample_size * self.vae_scale_factor): - The width in pixels of the generated image. - num_inference_steps (`int`, *optional*, defaults to 50): - The number of denoising steps. More denoising steps usually lead to a higher quality image at the - expense of slower inference. - guidance_scale (`float`, *optional*, defaults to 7.5): - Guidance scale as defined in [Classifier-Free Diffusion Guidance](https://arxiv.org/abs/2207.12598). - `guidance_scale` is defined as `w` of equation 2. of [Imagen - Paper](https://arxiv.org/pdf/2205.11487.pdf). Guidance scale is enabled by setting `guidance_scale > - 1`. Higher guidance scale encourages to generate images that are closely linked to the text `prompt`, - usually at the expense of lower image quality. - num_images_per_prompt (`int`, *optional*, defaults to 1): - The number of images to generate per prompt. - eta (`float`, *optional*, defaults to 0.0): - Corresponds to parameter eta (η) in the DDIM paper: https://arxiv.org/abs/2010.02502. Only applies to - [`schedulers.DDIMScheduler`], will be ignored for others. - generator (`torch.Generator`, *optional*): - One or a list of [torch generator(s)](https://pytorch.org/docs/stable/generated/torch.Generator.html) - to make generation deterministic. - latents (`paddle.Tensor`, *optional*): - Pre-generated noisy latents, sampled from a Gaussian distribution, to be used as inputs for image - generation. Can be used to tweak the same generation with different prompts. If not provided, a latents - tensor will ge generated by sampling using the supplied random `generator`. - output_type (`str`, *optional*, defaults to `"pil"`): - The output format of the generate image. Choose between - [PIL](https://pillow.readthedocs.io/en/stable/): `PIL.Image.Image` or `np.array`. - return_dict (`bool`, *optional*, defaults to `True`): - Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a - plain tuple. - callback (`Callable`, *optional*): - A function that will be called every `callback_steps` steps during inference. The function will be - called with the following arguments: `callback(step: int, timestep: int, latents: paddle.Tensor)`. - callback_steps (`int`, *optional*, defaults to 1): - The frequency at which the `callback` function will be called. If not specified, the callback will be - called at every step. - - Returns: - [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`: - [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] if `return_dict` is True, otherwise a `tuple. - When returning a tuple, the first element is a list with the generated images, and the second element is a - list of `bool`s denoting whether the corresponding generated image likely represents "not-safe-for-work" - (nsfw) content, according to the `safety_checker`. - """ - # 1. Define call parameters - if isinstance(image, PIL.Image.Image): - batch_size = 1 - elif isinstance(image, list): - batch_size = len(image) - else: - batch_size = image.shape[0] - # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2) - # of the Imagen paper: https://arxiv.org/pdf/2205.11487.pdf . `guidance_scale = 1` - # corresponds to doing no classifier free guidance. - do_classifier_free_guidance = guidance_scale > 1.0 - - # 2. Preprocess mask and image - mask, masked_image = prepare_mask_and_masked_image(image, mask_image) - height, width = masked_image.shape[-2:] - - # 3. Check inputs - self.check_inputs(example_image, height, width, callback_steps) - - # 4. Encode input image - image_embeddings = self._encode_image(example_image, num_images_per_prompt, do_classifier_free_guidance) - - # 5. set timesteps - self.scheduler.set_timesteps(num_inference_steps) - timesteps = self.scheduler.timesteps - - # 6. Prepare latent variables - num_channels_latents = self.vae.config.latent_channels - latents = self.prepare_latents( - batch_size * num_images_per_prompt, - num_channels_latents, - height, - width, - image_embeddings.dtype, - generator, - latents, - ) - - # 7. Prepare mask latent variables - mask, masked_image_latents = self.prepare_mask_latents( - mask, - masked_image, - batch_size * num_images_per_prompt, - height, - width, - image_embeddings.dtype, - generator, - do_classifier_free_guidance, - ) - - # 8. Check that sizes of mask, masked image and latents match - num_channels_mask = mask.shape[1] - num_channels_masked_image = masked_image_latents.shape[1] - if num_channels_latents + num_channels_mask + num_channels_masked_image != self.unet.config.in_channels: - raise ValueError( - f"Incorrect configuration settings! The config of `pipeline.unet`: {self.unet.config} expects" - f" {self.unet.config.in_channels} but received `num_channels_latents`: {num_channels_latents} +" - f" `num_channels_mask`: {num_channels_mask} + `num_channels_masked_image`: {num_channels_masked_image}" - f" = {num_channels_latents+num_channels_masked_image+num_channels_mask}. Please verify the config of" - " `pipeline.unet` or your `mask_image` or `image` input." - ) - - # 9. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline - extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta) - - # 10. Denoising loop - num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order - with self.progress_bar(total=num_inference_steps) as progress_bar: - for i, t in enumerate(timesteps): - # expand the latents if we are doing classifier free guidance - latent_model_input = paddle.concat([latents] * 2) if do_classifier_free_guidance else latents - - # concat latents, mask, masked_image_latents in the channel dimension - latent_model_input = self.scheduler.scale_model_input(latent_model_input, t) - latent_model_input = paddle.concat([latent_model_input, masked_image_latents, mask], axis=1) - - # predict the noise residual - noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=image_embeddings).sample - - # perform guidance - if do_classifier_free_guidance: - noise_pred_uncond, noise_pred_text = noise_pred.chunk(2) - noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond) - - # compute the previous noisy sample x_t -> x_t-1 - latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample - - # call the callback, if provided - if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0): - progress_bar.update() - if callback is not None and i % callback_steps == 0: - callback(i, t, latents) - - # 11. Post-processing - image = self.decode_latents(latents) - - # 12. Run safety checker - image, has_nsfw_concept = self.run_safety_checker(image, image_embeddings.dtype) - - # 13. Convert to PIL - if output_type == "pil": - image = self.numpy_to_pil(image) - - if not return_dict: - return (image, has_nsfw_concept) - - return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept) diff --git a/spaces/AI-Dashboards/CP.Matplotlib.NetworkX.Streamlit.PyVis.Graphviz/README.md b/spaces/AI-Dashboards/CP.Matplotlib.NetworkX.Streamlit.PyVis.Graphviz/README.md deleted file mode 100644 index 6bfbe69cdfb38a4c77d7b13ec8e2d8229a0bdbac..0000000000000000000000000000000000000000 --- a/spaces/AI-Dashboards/CP.Matplotlib.NetworkX.Streamlit.PyVis.Graphviz/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: 🕸️📈Graph NLP Matplotlib NetworkX Streamlit PyViz Graphviz🩺 -emoji: 📉🕸️📈 -colorFrom: pink -colorTo: blue -sdk: streamlit -sdk_version: 1.2.0 -app_file: app.py -pinned: false -license: mit ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference diff --git a/spaces/AI-Hobbyist/Hoyo-RVC/docs/training_tips_ko.md b/spaces/AI-Hobbyist/Hoyo-RVC/docs/training_tips_ko.md deleted file mode 100644 index 8b3b6245862aef69480f57263d268c94d5e843ca..0000000000000000000000000000000000000000 --- a/spaces/AI-Hobbyist/Hoyo-RVC/docs/training_tips_ko.md +++ /dev/null @@ -1,53 +0,0 @@ -RVC 훈련에 대한 설명과 팁들 -====================================== -본 팁에서는 어떻게 데이터 훈련이 이루어지고 있는지 설명합니다. - -# 훈련의 흐름 -GUI의 훈련 탭의 단계를 따라 설명합니다. - -## step1 -실험 이름을 지정합니다. 또한, 모델이 피치(소리의 높낮이)를 고려해야 하는지 여부를 여기에서 설정할 수도 있습니다.. -각 실험을 위한 데이터는 `/logs/experiment name/`에 배치됩니다.. - -## step2a -음성 파일을 불러오고 전처리합니다. - -### 음성 파일 불러오기 -음성 파일이 있는 폴더를 지정하면 해당 폴더에 있는 음성 파일이 자동으로 가져와집니다. -예를 들어 `C:Users\hoge\voices`를 지정하면 `C:Users\hoge\voices\voice.mp3`가 읽히지만 `C:Users\hoge\voices\dir\voice.mp3`는 읽히지 않습니다. - -음성 로드에는 내부적으로 ffmpeg를 이용하고 있으므로, ffmpeg로 대응하고 있는 확장자라면 자동적으로 읽힙니다. -ffmpeg에서 int16으로 변환한 후 float32로 변환하고 -1과 1 사이에 정규화됩니다. - -### 잡음 제거 -음성 파일에 대해 scipy의 filtfilt를 이용하여 잡음을 처리합니다. - -### 음성 분할 -입력한 음성 파일은 먼저 일정 기간(max_sil_kept=5초?)보다 길게 무음이 지속되는 부분을 감지하여 음성을 분할합니다.무음으로 음성을 분할한 후에는 0.3초의 overlap을 포함하여 4초마다 음성을 분할합니다.4초 이내에 구분된 음성은 음량의 정규화를 실시한 후 wav 파일을 `/logs/실험명/0_gt_wavs`로, 거기에서 16k의 샘플링 레이트로 변환해 `/logs/실험명/1_16k_wavs`에 wav 파일로 저장합니다. - -## step2b -### 피치 추출 -wav 파일에서 피치(소리의 높낮이) 정보를 추출합니다. parselmouth나 pyworld에 내장되어 있는 메서드으로 피치 정보(=f0)를 추출해, `/logs/실험명/2a_f0`에 저장합니다. 그 후 피치 정보를 로그로 변환하여 1~255 정수로 변환하고 `/logs/실험명/2b-f0nsf`에 저장합니다. - -### feature_print 추출 -HuBERT를 이용하여 wav 파일을 미리 embedding으로 변환합니다. `/logs/실험명/1_16k_wavs`에 저장한 wav 파일을 읽고 HuBERT에서 wav 파일을 256차원 feature들로 변환한 후 npy 형식으로 `/logs/실험명/3_feature256`에 저장합니다. - -## step3 -모델의 훈련을 진행합니다. - -### 초보자용 용어 해설 -심층학습(딥러닝)에서는 데이터셋을 분할하여 조금씩 학습을 진행합니다.한 번의 모델 업데이트(step) 단계 당 batch_size개의 데이터를 탐색하여 예측과 오차를 수정합니다. 데이터셋 전부에 대해 이 작업을 한 번 수행하는 이를 하나의 epoch라고 계산합니다. - -따라서 학습 시간은 단계당 학습 시간 x (데이터셋 내 데이터의 수 / batch size) x epoch 수가 소요됩니다. 일반적으로 batch size가 클수록 학습이 안정적이게 됩니다. (step당 학습 시간 ÷ batch size)는 작아지지만 GPU 메모리를 더 많이 사용합니다. GPU RAM은 nvidia-smi 명령어를 통해 확인할 수 있습니다. 실행 환경에 따라 배치 크기를 최대한 늘리면 짧은 시간 내에 학습이 가능합니다. - -### 사전 학습된 모델 지정 -RVC는 적은 데이터셋으로도 훈련이 가능하도록 사전 훈련된 가중치에서 모델 훈련을 시작합니다. 기본적으로 `rvc-location/pretrained/f0G40k.pth` 및 `rvc-location/pretrained/f0D40k.pth`를 불러옵니다. 학습을 할 시에, 모델 파라미터는 각 save_every_epoch별로 `logs/experiment name/G_{}.pth` 와 `logs/experiment name/D_{}.pth`로 저장이 되는데, 이 경로를 지정함으로써 학습을 재개하거나, 다른 실험에서 학습한 모델의 가중치에서 학습을 시작할 수 있습니다. - -### index의 학습 -RVC에서는 학습시에 사용된 HuBERT의 feature값을 저장하고, 추론 시에는 학습 시 사용한 feature값과 유사한 feature 값을 탐색해 추론을 진행합니다. 이 탐색을 고속으로 수행하기 위해 사전에 index을 학습하게 됩니다. -Index 학습에는 근사 근접 탐색법 라이브러리인 Faiss를 사용하게 됩니다. `/logs/실험명/3_feature256`의 feature값을 불러와, 이를 모두 결합시킨 feature값을 `/logs/실험명/total_fea.npy`로서 저장, 그것을 사용해 학습한 index를`/logs/실험명/add_XXX.index`로 저장합니다. - -### 버튼 설명 -- モデルのトレーニング (모델 학습): step2b까지 실행한 후, 이 버튼을 눌러 모델을 학습합니다. -- 特徴インデックスのトレーニング (특징 지수 훈련): 모델의 훈련 후, index를 학습합니다. -- ワンクリックトレーニング (원클릭 트레이닝): step2b까지의 모델 훈련, feature index 훈련을 일괄로 실시합니다. \ No newline at end of file diff --git a/spaces/AIFILMS/audioldm-text-to-audio-generation/audioldm/clap/training/zero_shot.py b/spaces/AIFILMS/audioldm-text-to-audio-generation/audioldm/clap/training/zero_shot.py deleted file mode 100644 index 28b8fccc1af17fc69002857a7f529ac041c374f2..0000000000000000000000000000000000000000 --- a/spaces/AIFILMS/audioldm-text-to-audio-generation/audioldm/clap/training/zero_shot.py +++ /dev/null @@ -1,95 +0,0 @@ -# NOTE: This script is currently not supported for CLAP. -import logging -from contextlib import suppress - -import torch -import torch.nn.functional as F -from tqdm import tqdm - -from open_clip import tokenize -from .imagenet_zeroshot_data import imagenet_classnames, openai_imagenet_template - - -def zero_shot_classifier(model, classnames, templates, args): - with torch.no_grad(): - zeroshot_weights = [] - for classname in tqdm(classnames): - texts = [template(classname) for template in templates] # format with class - texts = tokenize(texts).to(args.device) # tokenize - if args.distributed and not args.horovod: - class_embeddings = model.module.encode_text(texts) - else: - class_embeddings = model.encode_text(texts) - class_embedding = F.normalize(class_embeddings, dim=-1).mean(dim=0) - class_embedding /= class_embedding.norm() - zeroshot_weights.append(class_embedding) - zeroshot_weights = torch.stack(zeroshot_weights, dim=1).to(args.device) - return zeroshot_weights - - -def accuracy(output, target, topk=(1,)): - pred = output.topk(max(topk), 1, True, True)[1].t() - correct = pred.eq(target.view(1, -1).expand_as(pred)) - return [ - float(correct[:k].reshape(-1).float().sum(0, keepdim=True).cpu().numpy()) - for k in topk - ] - - -def run(model, classifier, dataloader, args): - autocast = torch.cuda.amp.autocast if args.precision == "amp" else suppress - with torch.no_grad(): - top1, top5, n = 0.0, 0.0, 0.0 - for images, target in tqdm(dataloader, unit_scale=args.batch_size): - images = images.to(args.device) - target = target.to(args.device) - - with autocast(): - # predict - if args.distributed and not args.horovod: - image_features = model.module.encode_image(images) - else: - image_features = model.encode_image(images) - image_features = F.normalize(image_features, dim=-1) - logits = 100.0 * image_features @ classifier - - # measure accuracy - acc1, acc5 = accuracy(logits, target, topk=(1, 5)) - top1 += acc1 - top5 += acc5 - n += images.size(0) - - top1 = top1 / n - top5 = top5 / n - return top1, top5 - - -def zero_shot_eval(model, data, epoch, args): - if "imagenet-val" not in data and "imagenet-v2" not in data: - return {} - if args.zeroshot_frequency == 0: - return {} - if (epoch % args.zeroshot_frequency) != 0 and epoch != args.epochs: - return {} - - logging.info("Starting zero-shot imagenet.") - - logging.info("Building zero-shot classifier") - classifier = zero_shot_classifier( - model, imagenet_classnames, openai_imagenet_template, args - ) - - logging.info("Using classifier") - results = {} - if "imagenet-val" in data: - top1, top5 = run(model, classifier, data["imagenet-val"].dataloader, args) - results["imagenet-zeroshot-val-top1"] = top1 - results["imagenet-zeroshot-val-top5"] = top5 - if "imagenet-v2" in data: - top1, top5 = run(model, classifier, data["imagenet-v2"].dataloader, args) - results["imagenetv2-zeroshot-val-top1"] = top1 - results["imagenetv2-zeroshot-val-top5"] = top5 - - logging.info("Finished zero-shot imagenet.") - - return results diff --git a/spaces/AIWaves/Software_Company/README.md b/spaces/AIWaves/Software_Company/README.md deleted file mode 100644 index 895eba92684caa347ff6327e1d52f5a2db6d0b67..0000000000000000000000000000000000000000 --- a/spaces/AIWaves/Software_Company/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Software Company -emoji: 🐨 -colorFrom: green -colorTo: indigo -sdk: gradio -sdk_version: 3.44.4 -app_file: app.py -pinned: false -license: apache-2.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/AIWaves/Software_Company/src/agents/Component/ToolComponent.py b/spaces/AIWaves/Software_Company/src/agents/Component/ToolComponent.py deleted file mode 100644 index 95da2abdb7e8b7b5283763587f23ecc29e8ec35f..0000000000000000000000000000000000000000 --- a/spaces/AIWaves/Software_Company/src/agents/Component/ToolComponent.py +++ /dev/null @@ -1,887 +0,0 @@ -from abc import abstractmethod -import uuid -from text2vec import semantic_search -from utils import ( - get_relevant_history, - load_knowledge_base_qa, - load_knowledge_base_UnstructuredFile, - get_embedding, - extract, -) -import json -from typing import Dict, List -import os -from googleapiclient.discovery import build -import requests -from selenium import webdriver -from selenium.webdriver.common.by import By -from selenium.webdriver.support.ui import WebDriverWait -from selenium.webdriver.support import expected_conditions as EC -from bs4 import BeautifulSoup -import base64 -import re -from datetime import datetime, timedelta -from typing import Tuple, List, Any, Dict -from email.mime.text import MIMEText -from email.mime.multipart import MIMEMultipart -from google.auth.transport.requests import Request -from google.oauth2.credentials import Credentials -from google_auth_oauthlib.flow import InstalledAppFlow -from googleapiclient.discovery import build -from googleapiclient.errors import HttpError -from tqdm import tqdm - -class ToolComponent: - def __init__(self): - pass - - @abstractmethod - def func(self): - pass - -class KnowledgeBaseComponent(ToolComponent): - """ - Inject knowledge base - top_k : Top_k with the highest matching degree - type : "QA" or others - knowledge_base(json_path) : knowledge_base_path - """ - def __init__(self, top_k, type, knowledge_base): - super().__init__() - self.top_k = top_k - self.type = type - self.knowledge_base = knowledge_base - - if self.type == "QA": - ( - self.kb_embeddings, - self.kb_questions, - self.kb_answers, - self.kb_chunks, - ) = load_knowledge_base_qa(self.knowledge_base) - else: - self.kb_embeddings, self.kb_chunks = load_knowledge_base_UnstructuredFile( - self.knowledge_base - ) - - def func(self, agent): - query = ( - agent.long_term_memory[-1]["content"] - if len(agent.long_term_memory) > 0 - else "" - ) - knowledge = "" - query = extract(query, "query") - query_embedding = get_embedding(query) - hits = semantic_search(query_embedding, self.kb_embeddings, top_k=50) - hits = hits[0] - temp = [] - if self.type == "QA": - for hit in hits: - matching_idx = hit["corpus_id"] - if self.kb_chunks[matching_idx] in temp: - pass - else: - knowledge = ( - knowledge - + f"question:{self.kb_questions[matching_idx]},answer:{self.kb_answers[matching_idx]}\n\n" - ) - temp.append(self.kb_answers[matching_idx]) - if len(temp) == 1: - break - print(hits[0]["score"]) - score = hits[0]["score"] - if score < 0.5: - return {"prompt": "No matching knowledge base"} - else: - return {"prompt": "The relevant content is: " + knowledge + "\n"} - else: - for hit in hits: - matching_idx = hit["corpus_id"] - if self.kb_chunks[matching_idx] in temp: - pass - else: - knowledge = knowledge + f"{self.kb_answers[matching_idx]}\n\n" - temp.append(self.kb_answers[matching_idx]) - if len(temp) == self.top_k: - break - print(hits[0]["score"]) - score = hits[0]["score"] - if score < 0.5: - return {"prompt": "No matching knowledge base"} - else: - print(knowledge) - return {"prompt": "The relevant content is: " + knowledge + "\n"} - - -class StaticComponent(ToolComponent): - "Return static response" - def __init__(self, output): - super().__init__() - self.output = output - - def func(self, agent): - outputdict = {"response": self.output} - return outputdict - - -class ExtractComponent(ToolComponent): - """ - Extract keywords based on the current scene and store them in the environment - extract_words(list) : Keywords to be extracted - system_prompt & last_prompt : Prompt to extract keywords - """ - def __init__( - self, - extract_words, - system_prompt, - last_prompt=None, - ): - super().__init__() - self.extract_words = extract_words - self.system_prompt = system_prompt - self.default_prompt = ( - "Please strictly adhere to the following format for outputting:\n" - ) - for extract_word in extract_words: - self.default_prompt += ( - f"<{extract_word}> the content you need to extract " - ) - self.last_prompt = last_prompt if last_prompt else self.default_prompt - - def func(self, agent): - response = agent.LLM.get_response( - agent.long_term_memory, - self.system_prompt, - self.last_prompt, - stream=False, - ) - for extract_word in self.extract_words: - key = extract(response, extract_word) - key = key if key else response - agent.environment.shared_memory[extract_word] = key - - return {} - - -"""Search sources: chatgpt/search engines/specific search sources/can even be multimodal (if it comes to clothing)""" - - -class WebSearchComponent(ToolComponent): - """search engines""" - - __ENGINE_NAME__: List = ["google", "bing"] - - def __init__(self, engine_name: str, api: Dict): - """ - :param engine_name: The name of the search engine used - :param api: Pass in a dictionary, such as {"bing":"key1", "google":"key2", ...}, of course each value can also be a list, or more complicated - """ - super(WebSearchComponent, self).__init__() - """Determine whether the key and engine_name of the api are legal""" - - assert engine_name in WebSearchComponent.__ENGINE_NAME__ - for api_name in api: - assert api_name in WebSearchComponent.__ENGINE_NAME__ - - self.api = api - self.engine_name = engine_name - - self.search: Dict = {"bing": self._bing_search, "google": self._google_search} - - def _bing_search(self, query: str, **kwargs): - """Initialize search hyperparameters""" - subscription_key = self.api["bing"] - search_url = "https://api.bing.microsoft.com/v7.0/search" - headers = {"Ocp-Apim-Subscription-Key": subscription_key} - params = { - "q": query, - "textDecorations": True, - "textFormat": "HTML", - "count": 10, - } - """start searching""" - response = requests.get(search_url, headers=headers, params=params) - response.raise_for_status() - results = response.json()["webPages"]["value"] - """execute""" - metadata_results = [] - for result in results: - metadata_result = { - "snippet": result["snippet"], - "title": result["name"], - "link": result["url"], - } - metadata_results.append(metadata_result) - return {"meta data": metadata_results} - - def _google_search(self, query: str, **kwargs): - """Initialize search hyperparameters""" - api_key = self.api[self.engine_name]["api_key"] - cse_id = self.api[self.engine_name]["cse_id"] - service = build("customsearch", "v1", developerKey=api_key) - """start searching""" - results = ( - service.cse().list(q=query, cx=cse_id, num=10, **kwargs).execute()["items"] - ) - """execute""" - metadata_results = [] - for result in results: - metadata_result = { - "snippet": result["snippet"], - "title": result["title"], - "link": result["link"], - } - metadata_results.append(metadata_result) - return {"meta data": metadata_results} - - def func(self, agent, **kwargs) -> Dict: - query = ( - agent.long_term_memory[-1]["content"] - if len(agent.long_term_memory) > 0 - else " " - ) - response = agent.LLM.get_response( - None, - system_prompt=f"Please analyze the provided conversation and identify keywords that can be used for a search engine query. Format the output as extracted keywords:\nConversation:\n{query}", - stream=False, - ) - response = extract(response, "keywords") - query = response if response else query - - search_results = self.search[self.engine_name](query=query, **kwargs) - information = "" - for i in search_results["meta data"][:5]: - information += i["snippet"] - return { - "prompt": "You can refer to the following information to reply:\n" - + information - } - - def convert_search_engine_to(self, engine_name): - assert engine_name in WebSearchComponent.__ENGINE_NAME__ - self.engine_name = engine_name - - -class WebCrawlComponent(ToolComponent): - """Open a single web page for crawling""" - - def __init__(self): - super(WebCrawlComponent, self).__init__() - - def func(self, agent_dict) -> Dict: - url = agent_dict["url"] - print(f"crawling {url} ......") - content = "" - """Crawling content from url may need to be carried out according to different websites, such as wiki, baidu, zhihu, etc.""" - driver = webdriver.Chrome() - try: - """open url""" - driver.get(url) - - """wait 20 second""" - wait = WebDriverWait(driver, 20) - wait.until(EC.presence_of_element_located((By.TAG_NAME, "body"))) - - """crawl code""" - page_source = driver.page_source - - """parse""" - soup = BeautifulSoup(page_source, "html.parser") - - """concatenate""" - for paragraph in soup.find_all("p"): - content = f"{content}\n{paragraph.get_text()}" - except Exception as e: - print("Error:", e) - finally: - """quit""" - driver.quit() - return {"content": content.strip()} - - -class MailComponent(ToolComponent): - __VALID_ACTION__ = ["read", "send"] - - def __init__( - self, cfg_file: str, default_action: str = "read", name: str = "e-mail" - ): - """'../config/google_mail.json'""" - super(MailComponent, self).__init__(name) - self.name = name - assert ( - default_action.lower() in self.__VALID_ACTION__ - ), f"Action `{default_action}` is not allowed! The valid action is in `{self.__VALID_ACTION__}`" - self.action = default_action.lower() - self.credential = self._login(cfg_file) - - def _login(self, cfg_file: str): - SCOPES = [ - "https://www.googleapis.com/auth/gmail.readonly", - "https://www.googleapis.com/auth/gmail.send", - ] - creds = None - if os.path.exists("token.json"): - print("Login Successfully!") - creds = Credentials.from_authorized_user_file("token.json", SCOPES) - if not creds or not creds.valid: - print("Please authorize in an open browser.") - if creds and creds.expired and creds.refresh_token: - creds.refresh(Request()) - else: - flow = InstalledAppFlow.from_client_secrets_file(cfg_file, SCOPES) - creds = flow.run_local_server(port=0) - # Save the credentials for the next run - with open("token.json", "w") as token: - token.write(creds.to_json()) - return creds - - def _read(self, mail_dict: dict): - credential = self.credential - state = mail_dict["state"] if "state" in mail_dict else None - time_between = ( - mail_dict["time_between"] if "time_between" in mail_dict else None - ) - sender_mail = mail_dict["sender_mail"] if "sender_mail" in mail_dict else None - only_both = mail_dict["only_both"] if "only_both" in mail_dict else False - order_by_time = ( - mail_dict["order_by_time"] if "order_by_time" in mail_dict else "descend" - ) - include_word = ( - mail_dict["include_word"] if "include_word" in mail_dict else None - ) - exclude_word = ( - mail_dict["exclude_word"] if "exclude_word" in mail_dict else None - ) - MAX_SEARCH_CNT = ( - mail_dict["MAX_SEARCH_CNT"] if "MAX_SEARCH_CNT" in mail_dict else 50 - ) - number = mail_dict["number"] if "number" in mail_dict else 10 - if state is None: - state = "all" - if time_between is not None: - assert isinstance(time_between, tuple) - assert len(time_between) == 2 - assert state in ["all", "unread", "read", "sent"] - if only_both: - assert sender_mail is not None - if sender_mail is not None: - assert isinstance(sender_mail, str) - assert credential - assert order_by_time in ["descend", "ascend"] - - def generate_query(): - query = "" - if state in ["unread", "read"]: - query = f"is:{state}" - if state in ["sent"]: - query = f"in:{state}" - if only_both: - query = f"{query} from:{sender_mail} OR to:{sender_mail}" - if sender_mail is not None and not only_both: - query = f"{query} from:({sender_mail})" - if include_word is not None: - query = f"{query} {include_word}" - if exclude_word is not None: - query = f"{query} -{exclude_word}" - if time_between is not None: - TIME_FORMAT = "%Y/%m/%d" - t1, t2 = time_between - if t1 == "now": - t1 = datetime.now().strftime(TIME_FORMAT) - if t2 == "now": - t2 = datetime.now().strftime(TIME_FORMAT) - if isinstance(t1, str) and isinstance(t2, str): - t1 = datetime.strptime(t1, TIME_FORMAT) - t2 = datetime.strptime(t2, TIME_FORMAT) - elif isinstance(t1, str) and isinstance(t2, int): - t1 = datetime.strptime(t1, TIME_FORMAT) - t2 = t1 + timedelta(days=t2) - elif isinstance(t1, int) and isinstance(t2, str): - t2 = datetime.strptime(t2, TIME_FORMAT) - t1 = t2 + timedelta(days=t1) - else: - assert False, "invalid time" - if t1 > t2: - t1, t2 = t2, t1 - query = f"{query} after:{t1.strftime(TIME_FORMAT)} before:{t2.strftime(TIME_FORMAT)}" - return query.strip() - - def sort_by_time(data: List[Dict]): - if order_by_time == "descend": - reverse = True - else: - reverse = False - sorted_data = sorted( - data, - key=lambda x: datetime.strptime(x["time"], "%Y-%m-%d %H:%M:%S"), - reverse=reverse, - ) - return sorted_data - - try: - service = build("gmail", "v1", credentials=credential) - results = ( - service.users() - .messages() - .list(userId="me", labelIds=["INBOX"], q=generate_query()) - .execute() - ) - - messages = results.get("messages", []) - email_data = list() - - if not messages: - print("No eligible emails.") - return None - else: - pbar = tqdm(total=min(MAX_SEARCH_CNT, len(messages))) - for cnt, message in enumerate(messages): - pbar.update(1) - if cnt >= MAX_SEARCH_CNT: - break - msg = ( - service.users() - .messages() - .get( - userId="me", - id=message["id"], - format="full", - metadataHeaders=None, - ) - .execute() - ) - - subject = "" - for header in msg["payload"]["headers"]: - if header["name"] == "Subject": - subject = header["value"] - break - - sender = "" - for header in msg["payload"]["headers"]: - if header["name"] == "From": - sender = re.findall( - r"\b[\w\.-]+@[\w\.-]+\.\w+\b", header["value"] - )[0] - break - body = "" - if "parts" in msg["payload"]: - for part in msg["payload"]["parts"]: - if part["mimeType"] == "text/plain": - data = part["body"]["data"] - body = base64.urlsafe_b64decode(data).decode("utf-8") - break - - email_info = { - "sender": sender, - "time": datetime.fromtimestamp( - int(msg["internalDate"]) / 1000 - ).strftime("%Y-%m-%d %H:%M:%S"), - "subject": subject, - "body": body, - } - email_data.append(email_info) - pbar.close() - email_data = sort_by_time(email_data)[0:number] - return {"results": email_data} - except Exception as e: - print(e) - return None - - def _send(self, mail_dict: dict): - recipient_mail = mail_dict["recipient_mail"] - subject = mail_dict["subject"] - body = mail_dict["body"] - credential = self.credential - service = build("gmail", "v1", credentials=credential) - - message = MIMEMultipart() - message["to"] = recipient_mail - message["subject"] = subject - - message.attach(MIMEText(body, "plain")) - - raw_message = base64.urlsafe_b64encode(message.as_bytes()).decode("utf-8") - try: - message = ( - service.users() - .messages() - .send(userId="me", body={"raw": raw_message}) - .execute() - ) - return {"state": True} - except HttpError as error: - print(error) - return {"state": False} - - def func(self, mail_dict: dict): - if "action" in mail_dict: - assert mail_dict["action"].lower() in self.__VALID_ACTION__ - self.action = mail_dict["action"] - functions = {"read": self._read, "send": self._send} - return functions[self.action](mail_dict) - - def convert_action_to(self, action_name: str): - assert ( - action_name.lower() in self.__VALID_ACTION__ - ), f"Action `{action_name}` is not allowed! The valid action is in `{self.__VALID_ACTION__}`" - self.action = action_name.lower() - - -class WeatherComponet(ToolComponent): - def __init__(self, api_key, name="weather", TIME_FORMAT="%Y-%m-%d"): - super(WeatherComponet, self).__init__(name) - self.name = name - self.TIME_FORMAT = TIME_FORMAT - self.api_key = api_key - - def _parse(self, data): - dict_data: dict = {} - for item in data["data"]: - date = item["datetime"] - dict_data[date] = {} - if "weather" in item: - dict_data[date]["description"] = item["weather"]["description"] - mapping = { - "temp": "temperature", - "max_temp": "max_temperature", - "min_temp": "min_temperature", - "precip": "accumulated_precipitation", - } - for key in ["temp", "max_temp", "min_temp", "precip"]: - if key in item: - dict_data[date][mapping[key]] = item[key] - return dict_data - - def _query(self, city_name, country_code, start_date, end_date): - """https://www.weatherbit.io/api/historical-weather-daily""" - # print(datetime.strftime(start_date, self.TIME_FORMAT), datetime.strftime(datetime.now(), self.TIME_FORMAT), end_date, datetime.strftime(datetime.now()+timedelta(days=1), self.TIME_FORMAT)) - if start_date == datetime.strftime( - datetime.now(), self.TIME_FORMAT - ) and end_date == datetime.strftime( - datetime.now() + timedelta(days=1), self.TIME_FORMAT - ): - """today""" - url = f"https://api.weatherbit.io/v2.0/current?city={city_name}&country={country_code}&key={self.api_key}" - else: - url = f"https://api.weatherbit.io/v2.0/history/daily?&city={city_name}&country={country_code}&start_date={start_date}&end_date={end_date}&key={self.api_key}" - response = requests.get(url) - data = response.json() - return self._parse(data) - - def func(self, weather_dict: Dict) -> Dict: - TIME_FORMAT = self.TIME_FORMAT - # Beijing, Shanghai - city_name = weather_dict["city_name"] - # CN, US - country_code = weather_dict["country_code"] - # 2020-02-02 - start_date = datetime.strftime( - datetime.strptime(weather_dict["start_date"], self.TIME_FORMAT), - self.TIME_FORMAT, - ) - end_date = weather_dict["end_date"] if "end_date" in weather_dict else None - if end_date is None: - end_date = datetime.strftime( - datetime.strptime(start_date, TIME_FORMAT) + timedelta(days=-1), - TIME_FORMAT, - ) - else: - end_date = datetime.strftime( - datetime.strptime(weather_dict["end_date"], self.TIME_FORMAT), - self.TIME_FORMAT, - ) - if datetime.strptime(start_date, TIME_FORMAT) > datetime.strptime( - end_date, TIME_FORMAT - ): - start_date, end_date = end_date, start_date - assert start_date != end_date - return self._query(city_name, country_code, start_date, end_date) - - -class TranslateComponent(ToolComponent): - __SUPPORT_LANGUAGE__ = [ - "af", - "am", - "ar", - "as", - "az", - "ba", - "bg", - "bn", - "bo", - "bs", - "ca", - "cs", - "cy", - "da", - "de", - "dsb", - "dv", - "el", - "en", - "es", - "et", - "eu", - "fa", - "fi", - "fil", - "fj", - "fo", - "fr", - "fr-CA", - "ga", - "gl", - "gom", - "gu", - "ha", - "he", - "hi", - "hr", - "hsb", - "ht", - "hu", - "hy", - "id", - "ig", - "ikt", - "is", - "it", - "iu", - "iu-Latn", - "ja", - "ka", - "kk", - "km", - "kmr", - "kn", - "ko", - "ku", - "ky", - "ln", - "lo", - "lt", - "lug", - "lv", - "lzh", - "mai", - "mg", - "mi", - "mk", - "ml", - "mn-Cyrl", - "mn-Mong", - "mr", - "ms", - "mt", - "mww", - "my", - "nb", - "ne", - "nl", - "nso", - "nya", - "or", - "otq", - "pa", - "pl", - "prs", - "ps", - "pt", - "pt-PT", - "ro", - "ru", - "run", - "rw", - "sd", - "si", - "sk", - "sl", - "sm", - "sn", - "so", - "sq", - "sr-Cyrl", - "sr-Latn", - "st", - "sv", - "sw", - "ta", - "te", - "th", - "ti", - "tk", - "tlh-Latn", - "tlh-Piqd", - "tn", - "to", - "tr", - "tt", - "ty", - "ug", - "uk", - "ur", - "uz", - "vi", - "xh", - "yo", - "yua", - "yue", - "zh-Hans", - "zh-Hant", - "zu", - ] - - def __init__( - self, api_key, location, default_target_language="zh-cn", name="translate" - ): - super(TranslateComponent, self).__init__(name) - self.name = name - self.api_key = api_key - self.location = location - self.default_target_language = default_target_language - - def func(self, translate_dict: Dict) -> Dict: - content = translate_dict["content"] - target_language = self.default_target_language - if "target_language" in translate_dict: - target_language = translate_dict["target_language"] - assert ( - target_language in self.__SUPPORT_LANGUAGE__ - ), f"language `{target_language}` is not supported." - - endpoint = "https://api.cognitive.microsofttranslator.com" - - path = "/translate" - constructed_url = endpoint + path - - params = {"api-version": "3.0", "to": target_language} - - headers = { - "Ocp-Apim-Subscription-Key": self.api_key, - "Ocp-Apim-Subscription-Region": self.location, - "Content-type": "application/json", - "X-ClientTraceId": str(uuid.uuid4()), - } - - body = [{"text": content}] - - request = requests.post( - constructed_url, params=params, headers=headers, json=body - ) - response = request.json() - response = json.dumps( - response, - sort_keys=True, - ensure_ascii=False, - indent=4, - separators=(",", ": "), - ) - response = eval(response) - return {"result": response[0]["translations"][0]["text"]} - - -class APIComponent(ToolComponent): - def __init__(self): - super(APIComponent, self).__init__() - - def func(self, agent) -> Dict: - pass - - -class FunctionComponent(ToolComponent): - def __init__( - self, - functions, - function_call="auto", - response_type="response", - your_function=None, - ): - super().__init__() - self.functions = functions - self.function_call = function_call - self.parameters = {} - self.available_functions = {} - self.response_type = response_type - if your_function: - function_name = your_function["name"] - function_content = your_function["content"] - exec(function_content) - self.available_functions[function_name] = eval(function_name) - - for function in self.functions: - self.parameters[function["name"]] = list( - function["parameters"]["properties"].keys() - ) - self.available_functions[function["name"]] = eval(function["name"]) - - def func(self, agent): - messages = agent.long_term_memory - outputdict = {} - query = agent.long_term_memory[-1].content if len(agent.long_term_memory) > 0 else " " - relevant_history = get_relevant_history( - query, - agent.long_term_memory[:-1], - agent.chat_embeddings[:-1], - ) - response = agent.LLM.get_response( - messages, - None, - functions=self.functions, - stream=False, - function_call=self.function_call, - relevant_history=relevant_history, - ) - response_message = response - if response_message.get("function_call"): - function_name = response_message["function_call"]["name"] - fuction_to_call = self.available_functions[function_name] - function_args = json.loads(response_message["function_call"]["arguments"]) - input_args = {} - for args_name in self.parameters[function_name]: - input_args[args_name] = function_args.get(args_name) - function_response = fuction_to_call(**input_args) - if self.response_type == "response": - outputdict["response"] = function_response - elif self.response_type == "prompt": - outputdict["prompt"] = function_response - - return outputdict - - -class CodeComponent(ToolComponent): - def __init__(self, file_name, keyword) -> None: - super().__init__() - self.file_name = file_name - self.keyword = keyword - self.system_prompt = ( - "you need to extract the modified code as completely as possible." - ) - self.last_prompt = ( - f"Please strictly adhere to the following format for outputting: \n" - ) - self.last_prompt += ( - f"<{self.keyword}> the content you need to extract " - ) - - def func(self, agent): - response = agent.LLM.get_response( - agent.long_term_memory, - self.system_prompt, - self.last_prompt, - stream=False, - ) - code = extract(response, self.keyword) - code = code if code else response - os.makedirs("output_code", exist_ok=True) - file_name = "output_code/" + self.file_name - codes = code.split("\n") - if codes[0] == "```python": - codes.remove(codes[0]) - if codes[-1] == "```": - codes.remove(codes[-1]) - code = "\n".join(codes) - with open(file_name, "w", encoding="utf-8") as f: - f.write(code) - return {} diff --git a/spaces/Abeer123/Pokemon_Digimon/README.md b/spaces/Abeer123/Pokemon_Digimon/README.md deleted file mode 100644 index 657e9498e239089de3dd3585bbb6dbaeaf05c16d..0000000000000000000000000000000000000000 --- a/spaces/Abeer123/Pokemon_Digimon/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Pokemon Digimon -emoji: 💻 -colorFrom: gray -colorTo: yellow -sdk: gradio -sdk_version: 3.11.0 -app_file: app.py -pinned: false -license: apache-2.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/AchyuthGamer/OpenGPT/g4f/Provider/Equing.py b/spaces/AchyuthGamer/OpenGPT/g4f/Provider/Equing.py deleted file mode 100644 index 261c53c01219d4c4a1f80d08cf1df33ccb3e0813..0000000000000000000000000000000000000000 --- a/spaces/AchyuthGamer/OpenGPT/g4f/Provider/Equing.py +++ /dev/null @@ -1,81 +0,0 @@ -from __future__ import annotations - -import json -from abc import ABC, abstractmethod - -import requests - -from ..typing import Any, CreateResult -from .base_provider import BaseProvider - - -class Equing(BaseProvider): - url: str = 'https://next.eqing.tech/' - working = False - supports_stream = True - supports_gpt_35_turbo = True - supports_gpt_4 = False - - @staticmethod - @abstractmethod - def create_completion( - model: str, - messages: list[dict[str, str]], - stream: bool, **kwargs: Any) -> CreateResult: - - headers = { - 'authority' : 'next.eqing.tech', - 'accept' : 'text/event-stream', - 'accept-language' : 'en,fr-FR;q=0.9,fr;q=0.8,es-ES;q=0.7,es;q=0.6,en-US;q=0.5,am;q=0.4,de;q=0.3', - 'cache-control' : 'no-cache', - 'content-type' : 'application/json', - 'origin' : 'https://next.eqing.tech', - 'plugins' : '0', - 'pragma' : 'no-cache', - 'referer' : 'https://next.eqing.tech/', - 'sec-ch-ua' : '"Not/A)Brand";v="99", "Google Chrome";v="115", "Chromium";v="115"', - 'sec-ch-ua-mobile' : '?0', - 'sec-ch-ua-platform': '"macOS"', - 'sec-fetch-dest' : 'empty', - 'sec-fetch-mode' : 'cors', - 'sec-fetch-site' : 'same-origin', - 'user-agent' : 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_7) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/115.0.0.0 Safari/537.36', - 'usesearch' : 'false', - 'x-requested-with' : 'XMLHttpRequest' - } - - json_data = { - 'messages' : messages, - 'stream' : stream, - 'model' : model, - 'temperature' : kwargs.get('temperature', 0.5), - 'presence_penalty' : kwargs.get('presence_penalty', 0), - 'frequency_penalty' : kwargs.get('frequency_penalty', 0), - 'top_p' : kwargs.get('top_p', 1), - } - - response = requests.post('https://next.eqing.tech/api/openai/v1/chat/completions', - headers=headers, json=json_data, stream=stream) - - if not stream: - yield response.json()["choices"][0]["message"]["content"] - return - - for line in response.iter_content(chunk_size=1024): - if line: - if b'content' in line: - line_json = json.loads(line.decode('utf-8').split('data: ')[1]) - token = line_json['choices'][0]['delta'].get('content') - if token: - yield token - - @classmethod - @property - def params(cls): - params = [ - ("model", "str"), - ("messages", "list[dict[str, str]]"), - ("stream", "bool"), - ] - param = ", ".join([": ".join(p) for p in params]) - return f"g4f.provider.{cls.__name__} supports: ({param})" \ No newline at end of file diff --git a/spaces/AgentVerse/agentVerse/ui/.github/CODE_OF_CONDUCT.md b/spaces/AgentVerse/agentVerse/ui/.github/CODE_OF_CONDUCT.md deleted file mode 100644 index 8fafdb3094f7728a087e6e7bfa9a7677467875d2..0000000000000000000000000000000000000000 --- a/spaces/AgentVerse/agentVerse/ui/.github/CODE_OF_CONDUCT.md +++ /dev/null @@ -1,84 +0,0 @@ -# Code of Conduct - -## 1. Purpose - -A primary goal of Phaser is to be inclusive to the largest number of contributors, with the most varied and diverse backgrounds possible. As such, we are committed to providing a friendly, safe and welcoming environment for all, regardless of gender, sexual orientation, ability, ethnicity, socioeconomic status, and religion (or lack thereof). - -This code of conduct outlines our expectations for all those who participate in our community, as well as the consequences for unacceptable behavior. - -We invite all those who participate in Phaser to help us create safe and positive experiences for everyone. - -## 2. Open Source Citizenship - -A supplemental goal of this Code of Conduct is to increase open source citizenship by encouraging participants to recognize and strengthen the relationships between our actions and their effects on our community. - -Communities mirror the societies in which they exist and positive action is essential to counteract the many forms of inequality and abuses of power that exist in society. - -If you see someone who is making an extra effort to ensure our community is welcoming, friendly, and encourages all participants to contribute to the fullest extent, we want to know. - -## 3. Expected Behavior - -The following behaviors are expected and requested of all community members: - -* Participate in an authentic and active way. In doing so, you contribute to the health and longevity of this community. -* Exercise consideration and respect in your speech and actions. -* Attempt collaboration before conflict. -* Refrain from demeaning, discriminatory, or harassing behavior and speech. -* Be mindful of your surroundings and of your fellow participants. Alert community leaders if you notice a dangerous situation, someone in distress, or violations of this Code of Conduct, even if they seem inconsequential. -* Remember that community event venues may be shared with members of the public; please be respectful to all patrons of these locations. - -## 4. Unacceptable Behavior - -The following behaviors are considered harassment and are unacceptable within our community: - -* Violence, threats of violence or violent language directed against another person. -* Sexist, racist, homophobic, transphobic, ableist or otherwise discriminatory jokes and language. -* Posting or displaying sexually explicit or violent material. -* Posting or threatening to post other people’s personally identifying information ("doxing"). -* Personal insults, particularly those related to gender, sexual orientation, race, religion, or disability. -* Inappropriate photography or recording. -* Inappropriate physical contact. You should have someone’s consent before touching them. -* Unwelcome sexual attention. This includes, sexualized comments or jokes; inappropriate touching, groping, and unwelcomed sexual advances. -* Deliberate intimidation, stalking or following (online or in person). -* Advocating for, or encouraging, any of the above behavior. -* Sustained disruption of community events, including talks and presentations. - -## 5. Consequences of Unacceptable Behavior - -Unacceptable behavior from any community member, including sponsors and those with decision-making authority, will not be tolerated. - -Anyone asked to stop unacceptable behavior is expected to comply immediately. - -If a community member engages in unacceptable behavior, the community organizers may take any action they deem appropriate, up to and including a temporary ban or permanent expulsion from the community without warning (and without refund in the case of a paid event). - -## 6. Reporting Guidelines - -If you are subject to or witness unacceptable behavior, or have any other concerns, please notify a community organizer as soon as possible. support@phaser.io. - - - -Additionally, community organizers are available to help community members engage with local law enforcement or to otherwise help those experiencing unacceptable behavior feel safe. In the context of in-person events, organizers will also provide escorts as desired by the person experiencing distress. - -## 7. Addressing Grievances - -If you feel you have been falsely or unfairly accused of violating this Code of Conduct, you should notify Photon Storm Ltd with a concise description of your grievance. Your grievance will be handled in accordance with our existing governing policies. - - - -## 8. Scope - -We expect all community participants (contributors, paid or otherwise; sponsors; and other guests) to abide by this Code of Conduct in all community venues–online and in-person–as well as in all one-on-one communications pertaining to community business. - -This code of conduct and its related procedures also applies to unacceptable behavior occurring outside the scope of community activities when such behavior has the potential to adversely affect the safety and well-being of community members. - -## 9. Contact info - -support@phaser.io - -## 10. License and attribution - -This Code of Conduct is distributed under a [Creative Commons Attribution-ShareAlike license](http://creativecommons.org/licenses/by-sa/3.0/). - -Portions of text derived from the [Django Code of Conduct](https://www.djangoproject.com/conduct/) and the [Geek Feminism Anti-Harassment Policy](http://geekfeminism.wikia.com/wiki/Conference_anti-harassment/Policy). - -Retrieved on November 22, 2016 from [http://citizencodeofconduct.org/](http://citizencodeofconduct.org/) diff --git a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/spinner/spinner-components.d.ts b/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/spinner/spinner-components.d.ts deleted file mode 100644 index 3ca2d056b688d512a66eb74af75ec20e637f8752..0000000000000000000000000000000000000000 --- a/spaces/AgentVerse/agentVerse/ui/src/phaser3-rex-plugins/templates/spinner/spinner-components.d.ts +++ /dev/null @@ -1,39 +0,0 @@ -import Audio from './audio/Audio'; -import Ball from './ball/Ball'; -import Bars from './bars/Bars'; -import Box from './box/Box'; -import Clock from './clock/Clock'; -import Cube from './cube/Cube'; -import Custom from './custom/Custom'; -import Dots from './dots/Dots'; -import Facebook from './facebook/Facebook'; -import Grid from './grid/Grid'; -import Los from './los/Los'; -import Orbit from './orbit/Orbit'; -import Oval from './oval/Oval'; -import Pie from './pie/Pie'; -import Puff from './puff/Puff'; -import Radio from './radio/Radio'; -import Rings from './rings/Rings'; -import Spinner from './spinner/Spinner'; - -export { - Audio, - Ball, - Bars, - Box, - Clock, - Cube, - Custom, - Dots, - Facebook, - Grid, - Los, - Orbit, - Oval, - Pie, - Puff, - Radio, - Rings, - Spinner -} \ No newline at end of file diff --git a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/schedulers/scheduling_lms_discrete.py b/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/schedulers/scheduling_lms_discrete.py deleted file mode 100644 index d58d4ce45bd17645b86905c1ae36ce937015fc29..0000000000000000000000000000000000000000 --- a/spaces/Androidonnxfork/CivitAi-to-Diffusers/diffusers/src/diffusers/schedulers/scheduling_lms_discrete.py +++ /dev/null @@ -1,413 +0,0 @@ -# Copyright 2023 Katherine Crowson and The HuggingFace Team. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -import math -import warnings -from dataclasses import dataclass -from typing import List, Optional, Tuple, Union - -import numpy as np -import torch -from scipy import integrate - -from ..configuration_utils import ConfigMixin, register_to_config -from ..utils import BaseOutput -from .scheduling_utils import KarrasDiffusionSchedulers, SchedulerMixin - - -@dataclass -# Copied from diffusers.schedulers.scheduling_ddpm.DDPMSchedulerOutput with DDPM->LMSDiscrete -class LMSDiscreteSchedulerOutput(BaseOutput): - """ - Output class for the scheduler's step function output. - - Args: - prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images): - Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the - denoising loop. - pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images): - The predicted denoised sample (x_{0}) based on the model output from the current timestep. - `pred_original_sample` can be used to preview progress or for guidance. - """ - - prev_sample: torch.FloatTensor - pred_original_sample: Optional[torch.FloatTensor] = None - - -# Copied from diffusers.schedulers.scheduling_ddpm.betas_for_alpha_bar -def betas_for_alpha_bar( - num_diffusion_timesteps, - max_beta=0.999, - alpha_transform_type="cosine", -): - """ - Create a beta schedule that discretizes the given alpha_t_bar function, which defines the cumulative product of - (1-beta) over time from t = [0,1]. - - Contains a function alpha_bar that takes an argument t and transforms it to the cumulative product of (1-beta) up - to that part of the diffusion process. - - - Args: - num_diffusion_timesteps (`int`): the number of betas to produce. - max_beta (`float`): the maximum beta to use; use values lower than 1 to - prevent singularities. - alpha_transform_type (`str`, *optional*, default to `cosine`): the type of noise schedule for alpha_bar. - Choose from `cosine` or `exp` - - Returns: - betas (`np.ndarray`): the betas used by the scheduler to step the model outputs - """ - if alpha_transform_type == "cosine": - - def alpha_bar_fn(t): - return math.cos((t + 0.008) / 1.008 * math.pi / 2) ** 2 - - elif alpha_transform_type == "exp": - - def alpha_bar_fn(t): - return math.exp(t * -12.0) - - else: - raise ValueError(f"Unsupported alpha_tranform_type: {alpha_transform_type}") - - betas = [] - for i in range(num_diffusion_timesteps): - t1 = i / num_diffusion_timesteps - t2 = (i + 1) / num_diffusion_timesteps - betas.append(min(1 - alpha_bar_fn(t2) / alpha_bar_fn(t1), max_beta)) - return torch.tensor(betas, dtype=torch.float32) - - -class LMSDiscreteScheduler(SchedulerMixin, ConfigMixin): - """ - Linear Multistep Scheduler for discrete beta schedules. Based on the original k-diffusion implementation by - Katherine Crowson: - https://github.com/crowsonkb/k-diffusion/blob/481677d114f6ea445aa009cf5bd7a9cdee909e47/k_diffusion/sampling.py#L181 - - [`~ConfigMixin`] takes care of storing all config attributes that are passed in the scheduler's `__init__` - function, such as `num_train_timesteps`. They can be accessed via `scheduler.config.num_train_timesteps`. - [`SchedulerMixin`] provides general loading and saving functionality via the [`SchedulerMixin.save_pretrained`] and - [`~SchedulerMixin.from_pretrained`] functions. - - Args: - num_train_timesteps (`int`): number of diffusion steps used to train the model. - beta_start (`float`): the starting `beta` value of inference. - beta_end (`float`): the final `beta` value. - beta_schedule (`str`): - the beta schedule, a mapping from a beta range to a sequence of betas for stepping the model. Choose from - `linear` or `scaled_linear`. - trained_betas (`np.ndarray`, optional): - option to pass an array of betas directly to the constructor to bypass `beta_start`, `beta_end` etc. - use_karras_sigmas (`bool`, *optional*, defaults to `False`): - This parameter controls whether to use Karras sigmas (Karras et al. (2022) scheme) for step sizes in the - noise schedule during the sampling process. If True, the sigmas will be determined according to a sequence - of noise levels {σi} as defined in Equation (5) of the paper https://arxiv.org/pdf/2206.00364.pdf. - prediction_type (`str`, default `epsilon`, optional): - prediction type of the scheduler function, one of `epsilon` (predicting the noise of the diffusion - process), `sample` (directly predicting the noisy sample`) or `v_prediction` (see section 2.4 - https://imagen.research.google/video/paper.pdf) - timestep_spacing (`str`, default `"linspace"`): - The way the timesteps should be scaled. Refer to Table 2. of [Common Diffusion Noise Schedules and Sample - Steps are Flawed](https://arxiv.org/abs/2305.08891) for more information. - steps_offset (`int`, default `0`): - an offset added to the inference steps. You can use a combination of `offset=1` and - `set_alpha_to_one=False`, to make the last step use step 0 for the previous alpha product, as done in - stable diffusion. - """ - - _compatibles = [e.name for e in KarrasDiffusionSchedulers] - order = 1 - - @register_to_config - def __init__( - self, - num_train_timesteps: int = 1000, - beta_start: float = 0.0001, - beta_end: float = 0.02, - beta_schedule: str = "linear", - trained_betas: Optional[Union[np.ndarray, List[float]]] = None, - use_karras_sigmas: Optional[bool] = False, - prediction_type: str = "epsilon", - timestep_spacing: str = "linspace", - steps_offset: int = 0, - ): - if trained_betas is not None: - self.betas = torch.tensor(trained_betas, dtype=torch.float32) - elif beta_schedule == "linear": - self.betas = torch.linspace(beta_start, beta_end, num_train_timesteps, dtype=torch.float32) - elif beta_schedule == "scaled_linear": - # this schedule is very specific to the latent diffusion model. - self.betas = ( - torch.linspace(beta_start**0.5, beta_end**0.5, num_train_timesteps, dtype=torch.float32) ** 2 - ) - elif beta_schedule == "squaredcos_cap_v2": - # Glide cosine schedule - self.betas = betas_for_alpha_bar(num_train_timesteps) - else: - raise NotImplementedError(f"{beta_schedule} does is not implemented for {self.__class__}") - - self.alphas = 1.0 - self.betas - self.alphas_cumprod = torch.cumprod(self.alphas, dim=0) - - sigmas = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5) - sigmas = np.concatenate([sigmas[::-1], [0.0]]).astype(np.float32) - self.sigmas = torch.from_numpy(sigmas) - - # setable values - self.num_inference_steps = None - self.use_karras_sigmas = use_karras_sigmas - self.set_timesteps(num_train_timesteps, None) - self.derivatives = [] - self.is_scale_input_called = False - - @property - def init_noise_sigma(self): - # standard deviation of the initial noise distribution - if self.config.timestep_spacing in ["linspace", "trailing"]: - return self.sigmas.max() - - return (self.sigmas.max() ** 2 + 1) ** 0.5 - - def scale_model_input( - self, sample: torch.FloatTensor, timestep: Union[float, torch.FloatTensor] - ) -> torch.FloatTensor: - """ - Scales the denoising model input by `(sigma**2 + 1) ** 0.5` to match the K-LMS algorithm. - - Args: - sample (`torch.FloatTensor`): input sample - timestep (`float` or `torch.FloatTensor`): the current timestep in the diffusion chain - - Returns: - `torch.FloatTensor`: scaled input sample - """ - if isinstance(timestep, torch.Tensor): - timestep = timestep.to(self.timesteps.device) - step_index = (self.timesteps == timestep).nonzero().item() - sigma = self.sigmas[step_index] - sample = sample / ((sigma**2 + 1) ** 0.5) - self.is_scale_input_called = True - return sample - - def get_lms_coefficient(self, order, t, current_order): - """ - Compute a linear multistep coefficient. - - Args: - order (TODO): - t (TODO): - current_order (TODO): - """ - - def lms_derivative(tau): - prod = 1.0 - for k in range(order): - if current_order == k: - continue - prod *= (tau - self.sigmas[t - k]) / (self.sigmas[t - current_order] - self.sigmas[t - k]) - return prod - - integrated_coeff = integrate.quad(lms_derivative, self.sigmas[t], self.sigmas[t + 1], epsrel=1e-4)[0] - - return integrated_coeff - - def set_timesteps(self, num_inference_steps: int, device: Union[str, torch.device] = None): - """ - Sets the timesteps used for the diffusion chain. Supporting function to be run before inference. - - Args: - num_inference_steps (`int`): - the number of diffusion steps used when generating samples with a pre-trained model. - device (`str` or `torch.device`, optional): - the device to which the timesteps should be moved to. If `None`, the timesteps are not moved. - """ - self.num_inference_steps = num_inference_steps - - # "linspace", "leading", "trailing" corresponds to annotation of Table 2. of https://arxiv.org/abs/2305.08891 - if self.config.timestep_spacing == "linspace": - timesteps = np.linspace(0, self.config.num_train_timesteps - 1, num_inference_steps, dtype=float)[ - ::-1 - ].copy() - elif self.config.timestep_spacing == "leading": - step_ratio = self.config.num_train_timesteps // self.num_inference_steps - # creates integer timesteps by multiplying by ratio - # casting to int to avoid issues when num_inference_step is power of 3 - timesteps = (np.arange(0, num_inference_steps) * step_ratio).round()[::-1].copy().astype(float) - timesteps += self.config.steps_offset - elif self.config.timestep_spacing == "trailing": - step_ratio = self.config.num_train_timesteps / self.num_inference_steps - # creates integer timesteps by multiplying by ratio - # casting to int to avoid issues when num_inference_step is power of 3 - timesteps = (np.arange(self.config.num_train_timesteps, 0, -step_ratio)).round().copy().astype(float) - timesteps -= 1 - else: - raise ValueError( - f"{self.config.timestep_spacing} is not supported. Please make sure to choose one of 'linspace', 'leading' or 'trailing'." - ) - - sigmas = np.array(((1 - self.alphas_cumprod) / self.alphas_cumprod) ** 0.5) - log_sigmas = np.log(sigmas) - sigmas = np.interp(timesteps, np.arange(0, len(sigmas)), sigmas) - - if self.use_karras_sigmas: - sigmas = self._convert_to_karras(in_sigmas=sigmas) - timesteps = np.array([self._sigma_to_t(sigma, log_sigmas) for sigma in sigmas]) - - sigmas = np.concatenate([sigmas, [0.0]]).astype(np.float32) - - self.sigmas = torch.from_numpy(sigmas).to(device=device) - if str(device).startswith("mps"): - # mps does not support float64 - self.timesteps = torch.from_numpy(timesteps).to(device, dtype=torch.float32) - else: - self.timesteps = torch.from_numpy(timesteps).to(device=device) - - self.derivatives = [] - - # copied from diffusers.schedulers.scheduling_euler_discrete._sigma_to_t - def _sigma_to_t(self, sigma, log_sigmas): - # get log sigma - log_sigma = np.log(sigma) - - # get distribution - dists = log_sigma - log_sigmas[:, np.newaxis] - - # get sigmas range - low_idx = np.cumsum((dists >= 0), axis=0).argmax(axis=0).clip(max=log_sigmas.shape[0] - 2) - high_idx = low_idx + 1 - - low = log_sigmas[low_idx] - high = log_sigmas[high_idx] - - # interpolate sigmas - w = (low - log_sigma) / (low - high) - w = np.clip(w, 0, 1) - - # transform interpolation to time range - t = (1 - w) * low_idx + w * high_idx - t = t.reshape(sigma.shape) - return t - - # copied from diffusers.schedulers.scheduling_euler_discrete._convert_to_karras - def _convert_to_karras(self, in_sigmas: torch.FloatTensor) -> torch.FloatTensor: - """Constructs the noise schedule of Karras et al. (2022).""" - - sigma_min: float = in_sigmas[-1].item() - sigma_max: float = in_sigmas[0].item() - - rho = 7.0 # 7.0 is the value used in the paper - ramp = np.linspace(0, 1, self.num_inference_steps) - min_inv_rho = sigma_min ** (1 / rho) - max_inv_rho = sigma_max ** (1 / rho) - sigmas = (max_inv_rho + ramp * (min_inv_rho - max_inv_rho)) ** rho - return sigmas - - def step( - self, - model_output: torch.FloatTensor, - timestep: Union[float, torch.FloatTensor], - sample: torch.FloatTensor, - order: int = 4, - return_dict: bool = True, - ) -> Union[LMSDiscreteSchedulerOutput, Tuple]: - """ - Predict the sample at the previous timestep by reversing the SDE. Core function to propagate the diffusion - process from the learned model outputs (most often the predicted noise). - - Args: - model_output (`torch.FloatTensor`): direct output from learned diffusion model. - timestep (`float`): current timestep in the diffusion chain. - sample (`torch.FloatTensor`): - current instance of sample being created by diffusion process. - order: coefficient for multi-step inference. - return_dict (`bool`): option for returning tuple rather than LMSDiscreteSchedulerOutput class - - Returns: - [`~schedulers.scheduling_utils.LMSDiscreteSchedulerOutput`] or `tuple`: - [`~schedulers.scheduling_utils.LMSDiscreteSchedulerOutput`] if `return_dict` is True, otherwise a `tuple`. - When returning a tuple, the first element is the sample tensor. - - """ - if not self.is_scale_input_called: - warnings.warn( - "The `scale_model_input` function should be called before `step` to ensure correct denoising. " - "See `StableDiffusionPipeline` for a usage example." - ) - - if isinstance(timestep, torch.Tensor): - timestep = timestep.to(self.timesteps.device) - step_index = (self.timesteps == timestep).nonzero().item() - sigma = self.sigmas[step_index] - - # 1. compute predicted original sample (x_0) from sigma-scaled predicted noise - if self.config.prediction_type == "epsilon": - pred_original_sample = sample - sigma * model_output - elif self.config.prediction_type == "v_prediction": - # * c_out + input * c_skip - pred_original_sample = model_output * (-sigma / (sigma**2 + 1) ** 0.5) + (sample / (sigma**2 + 1)) - elif self.config.prediction_type == "sample": - pred_original_sample = model_output - else: - raise ValueError( - f"prediction_type given as {self.config.prediction_type} must be one of `epsilon`, or `v_prediction`" - ) - - # 2. Convert to an ODE derivative - derivative = (sample - pred_original_sample) / sigma - self.derivatives.append(derivative) - if len(self.derivatives) > order: - self.derivatives.pop(0) - - # 3. Compute linear multistep coefficients - order = min(step_index + 1, order) - lms_coeffs = [self.get_lms_coefficient(order, step_index, curr_order) for curr_order in range(order)] - - # 4. Compute previous sample based on the derivatives path - prev_sample = sample + sum( - coeff * derivative for coeff, derivative in zip(lms_coeffs, reversed(self.derivatives)) - ) - - if not return_dict: - return (prev_sample,) - - return LMSDiscreteSchedulerOutput(prev_sample=prev_sample, pred_original_sample=pred_original_sample) - - # Copied from diffusers.schedulers.scheduling_euler_discrete.EulerDiscreteScheduler.add_noise - def add_noise( - self, - original_samples: torch.FloatTensor, - noise: torch.FloatTensor, - timesteps: torch.FloatTensor, - ) -> torch.FloatTensor: - # Make sure sigmas and timesteps have the same device and dtype as original_samples - sigmas = self.sigmas.to(device=original_samples.device, dtype=original_samples.dtype) - if original_samples.device.type == "mps" and torch.is_floating_point(timesteps): - # mps does not support float64 - schedule_timesteps = self.timesteps.to(original_samples.device, dtype=torch.float32) - timesteps = timesteps.to(original_samples.device, dtype=torch.float32) - else: - schedule_timesteps = self.timesteps.to(original_samples.device) - timesteps = timesteps.to(original_samples.device) - - step_indices = [(schedule_timesteps == t).nonzero().item() for t in timesteps] - - sigma = sigmas[step_indices].flatten() - while len(sigma.shape) < len(original_samples.shape): - sigma = sigma.unsqueeze(-1) - - noisy_samples = original_samples + noise * sigma - return noisy_samples - - def __len__(self): - return self.config.num_train_timesteps diff --git a/spaces/Andy1621/uniformer_image_segmentation/configs/ocrnet/ocrnet_r101-d8_512x1024_40k_b8_cityscapes.py b/spaces/Andy1621/uniformer_image_segmentation/configs/ocrnet/ocrnet_r101-d8_512x1024_40k_b8_cityscapes.py deleted file mode 100644 index e34f3432e581ff506c9d2951c98b5aad7b1be6a5..0000000000000000000000000000000000000000 --- a/spaces/Andy1621/uniformer_image_segmentation/configs/ocrnet/ocrnet_r101-d8_512x1024_40k_b8_cityscapes.py +++ /dev/null @@ -1,5 +0,0 @@ -_base_ = [ - '../_base_/models/ocrnet_r50-d8.py', '../_base_/datasets/cityscapes.py', - '../_base_/default_runtime.py', '../_base_/schedules/schedule_40k.py' -] -model = dict(pretrained='open-mmlab://resnet101_v1c', backbone=dict(depth=101)) diff --git a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/distributions/base.py b/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/distributions/base.py deleted file mode 100644 index 75ce2dc9057a20a957abe2fbd4ef094dc4196684..0000000000000000000000000000000000000000 --- a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_internal/distributions/base.py +++ /dev/null @@ -1,39 +0,0 @@ -import abc - -from pip._internal.index.package_finder import PackageFinder -from pip._internal.metadata.base import BaseDistribution -from pip._internal.req import InstallRequirement - - -class AbstractDistribution(metaclass=abc.ABCMeta): - """A base class for handling installable artifacts. - - The requirements for anything installable are as follows: - - - we must be able to determine the requirement name - (or we can't correctly handle the non-upgrade case). - - - for packages with setup requirements, we must also be able - to determine their requirements without installing additional - packages (for the same reason as run-time dependencies) - - - we must be able to create a Distribution object exposing the - above metadata. - """ - - def __init__(self, req: InstallRequirement) -> None: - super().__init__() - self.req = req - - @abc.abstractmethod - def get_metadata_distribution(self) -> BaseDistribution: - raise NotImplementedError() - - @abc.abstractmethod - def prepare_distribution_metadata( - self, - finder: PackageFinder, - build_isolation: bool, - check_build_deps: bool, - ) -> None: - raise NotImplementedError() diff --git a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/packaging/requirements.py b/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/packaging/requirements.py deleted file mode 100644 index 1eab7dd66d9bfdefea1a0e159303f1c09fa16d67..0000000000000000000000000000000000000000 --- a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/packaging/requirements.py +++ /dev/null @@ -1,146 +0,0 @@ -# This file is dual licensed under the terms of the Apache License, Version -# 2.0, and the BSD License. See the LICENSE file in the root of this repository -# for complete details. - -import re -import string -import urllib.parse -from typing import List, Optional as TOptional, Set - -from pip._vendor.pyparsing import ( # noqa - Combine, - Literal as L, - Optional, - ParseException, - Regex, - Word, - ZeroOrMore, - originalTextFor, - stringEnd, - stringStart, -) - -from .markers import MARKER_EXPR, Marker -from .specifiers import LegacySpecifier, Specifier, SpecifierSet - - -class InvalidRequirement(ValueError): - """ - An invalid requirement was found, users should refer to PEP 508. - """ - - -ALPHANUM = Word(string.ascii_letters + string.digits) - -LBRACKET = L("[").suppress() -RBRACKET = L("]").suppress() -LPAREN = L("(").suppress() -RPAREN = L(")").suppress() -COMMA = L(",").suppress() -SEMICOLON = L(";").suppress() -AT = L("@").suppress() - -PUNCTUATION = Word("-_.") -IDENTIFIER_END = ALPHANUM | (ZeroOrMore(PUNCTUATION) + ALPHANUM) -IDENTIFIER = Combine(ALPHANUM + ZeroOrMore(IDENTIFIER_END)) - -NAME = IDENTIFIER("name") -EXTRA = IDENTIFIER - -URI = Regex(r"[^ ]+")("url") -URL = AT + URI - -EXTRAS_LIST = EXTRA + ZeroOrMore(COMMA + EXTRA) -EXTRAS = (LBRACKET + Optional(EXTRAS_LIST) + RBRACKET)("extras") - -VERSION_PEP440 = Regex(Specifier._regex_str, re.VERBOSE | re.IGNORECASE) -VERSION_LEGACY = Regex(LegacySpecifier._regex_str, re.VERBOSE | re.IGNORECASE) - -VERSION_ONE = VERSION_PEP440 ^ VERSION_LEGACY -VERSION_MANY = Combine( - VERSION_ONE + ZeroOrMore(COMMA + VERSION_ONE), joinString=",", adjacent=False -)("_raw_spec") -_VERSION_SPEC = Optional((LPAREN + VERSION_MANY + RPAREN) | VERSION_MANY) -_VERSION_SPEC.setParseAction(lambda s, l, t: t._raw_spec or "") - -VERSION_SPEC = originalTextFor(_VERSION_SPEC)("specifier") -VERSION_SPEC.setParseAction(lambda s, l, t: t[1]) - -MARKER_EXPR = originalTextFor(MARKER_EXPR())("marker") -MARKER_EXPR.setParseAction( - lambda s, l, t: Marker(s[t._original_start : t._original_end]) -) -MARKER_SEPARATOR = SEMICOLON -MARKER = MARKER_SEPARATOR + MARKER_EXPR - -VERSION_AND_MARKER = VERSION_SPEC + Optional(MARKER) -URL_AND_MARKER = URL + Optional(MARKER) - -NAMED_REQUIREMENT = NAME + Optional(EXTRAS) + (URL_AND_MARKER | VERSION_AND_MARKER) - -REQUIREMENT = stringStart + NAMED_REQUIREMENT + stringEnd -# pyparsing isn't thread safe during initialization, so we do it eagerly, see -# issue #104 -REQUIREMENT.parseString("x[]") - - -class Requirement: - """Parse a requirement. - - Parse a given requirement string into its parts, such as name, specifier, - URL, and extras. Raises InvalidRequirement on a badly-formed requirement - string. - """ - - # TODO: Can we test whether something is contained within a requirement? - # If so how do we do that? Do we need to test against the _name_ of - # the thing as well as the version? What about the markers? - # TODO: Can we normalize the name and extra name? - - def __init__(self, requirement_string: str) -> None: - try: - req = REQUIREMENT.parseString(requirement_string) - except ParseException as e: - raise InvalidRequirement( - f'Parse error at "{ requirement_string[e.loc : e.loc + 8]!r}": {e.msg}' - ) - - self.name: str = req.name - if req.url: - parsed_url = urllib.parse.urlparse(req.url) - if parsed_url.scheme == "file": - if urllib.parse.urlunparse(parsed_url) != req.url: - raise InvalidRequirement("Invalid URL given") - elif not (parsed_url.scheme and parsed_url.netloc) or ( - not parsed_url.scheme and not parsed_url.netloc - ): - raise InvalidRequirement(f"Invalid URL: {req.url}") - self.url: TOptional[str] = req.url - else: - self.url = None - self.extras: Set[str] = set(req.extras.asList() if req.extras else []) - self.specifier: SpecifierSet = SpecifierSet(req.specifier) - self.marker: TOptional[Marker] = req.marker if req.marker else None - - def __str__(self) -> str: - parts: List[str] = [self.name] - - if self.extras: - formatted_extras = ",".join(sorted(self.extras)) - parts.append(f"[{formatted_extras}]") - - if self.specifier: - parts.append(str(self.specifier)) - - if self.url: - parts.append(f"@ {self.url}") - if self.marker: - parts.append(" ") - - if self.marker: - parts.append(f"; {self.marker}") - - return "".join(parts) - - def __repr__(self) -> str: - return f"" diff --git a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/rich/_timer.py b/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/rich/_timer.py deleted file mode 100644 index a2ca6be03c43054caaa3660998273ebf704345dd..0000000000000000000000000000000000000000 --- a/spaces/Ataturk-Chatbot/HuggingFaceChat/venv/lib/python3.11/site-packages/pip/_vendor/rich/_timer.py +++ /dev/null @@ -1,19 +0,0 @@ -""" -Timer context manager, only used in debug. - -""" - -from time import time - -import contextlib -from typing import Generator - - -@contextlib.contextmanager -def timer(subject: str = "time") -> Generator[None, None, None]: - """print the elapsed time. (only used in debugging)""" - start = time() - yield - elapsed = time() - start - elapsed_ms = elapsed * 1000 - print(f"{subject} elapsed {elapsed_ms:.1f}ms") diff --git a/spaces/AtomdffAI/wechatgpt4atom/bot/bot.py b/spaces/AtomdffAI/wechatgpt4atom/bot/bot.py deleted file mode 100644 index 850ba3b1e4e31d8a7b079c9827fbd15bec32e9f3..0000000000000000000000000000000000000000 --- a/spaces/AtomdffAI/wechatgpt4atom/bot/bot.py +++ /dev/null @@ -1,13 +0,0 @@ -""" -Auto-replay chat robot abstract class -""" - - -class Bot(object): - def reply(self, query, context=None): - """ - bot auto-reply content - :param req: received message - :return: reply content - """ - raise NotImplementedError diff --git a/spaces/Bart92/RVC_HF/i18n/scan_i18n.py b/spaces/Bart92/RVC_HF/i18n/scan_i18n.py deleted file mode 100644 index f3e52cf4f9f06d78877d77d2353f666aa759e36f..0000000000000000000000000000000000000000 --- a/spaces/Bart92/RVC_HF/i18n/scan_i18n.py +++ /dev/null @@ -1,75 +0,0 @@ -import ast -import glob -import json -from collections import OrderedDict - - -def extract_i18n_strings(node): - i18n_strings = [] - - if ( - isinstance(node, ast.Call) - and isinstance(node.func, ast.Name) - and node.func.id == "i18n" - ): - for arg in node.args: - if isinstance(arg, ast.Str): - i18n_strings.append(arg.s) - - for child_node in ast.iter_child_nodes(node): - i18n_strings.extend(extract_i18n_strings(child_node)) - - return i18n_strings - - -# scan the directory for all .py files (recursively) -# for each file, parse the code into an AST -# for each AST, extract the i18n strings - -strings = [] -for filename in glob.iglob("**/*.py", recursive=True): - with open(filename, "r") as f: - code = f.read() - if "I18nAuto" in code: - tree = ast.parse(code) - i18n_strings = extract_i18n_strings(tree) - print(filename, len(i18n_strings)) - strings.extend(i18n_strings) -code_keys = set(strings) -""" -n_i18n.py -gui_v1.py 26 -app.py 16 -infer-web.py 147 -scan_i18n.py 0 -i18n.py 0 -lib/train/process_ckpt.py 1 -""" -print() -print("Total unique:", len(code_keys)) - - -standard_file = "i18n/locale/zh_CN.json" -with open(standard_file, "r", encoding="utf-8") as f: - standard_data = json.load(f, object_pairs_hook=OrderedDict) -standard_keys = set(standard_data.keys()) - -# Define the standard file name -unused_keys = standard_keys - code_keys -print("Unused keys:", len(unused_keys)) -for unused_key in unused_keys: - print("\t", unused_key) - -missing_keys = code_keys - standard_keys -print("Missing keys:", len(missing_keys)) -for missing_key in missing_keys: - print("\t", missing_key) - -code_keys_dict = OrderedDict() -for s in strings: - code_keys_dict[s] = s - -# write back -with open(standard_file, "w", encoding="utf-8") as f: - json.dump(code_keys_dict, f, ensure_ascii=False, indent=4, sort_keys=True) - f.write("\n") diff --git a/spaces/Benson/text-generation/Examples/Descargar Amp Letras De Fuera De Mi Vientre Por Prospa Ochimana.md b/spaces/Benson/text-generation/Examples/Descargar Amp Letras De Fuera De Mi Vientre Por Prospa Ochimana.md deleted file mode 100644 index fc07f9c46a43e4b164eb59f36b4ea7c16feb1351..0000000000000000000000000000000000000000 --- a/spaces/Benson/text-generation/Examples/Descargar Amp Letras De Fuera De Mi Vientre Por Prospa Ochimana.md +++ /dev/null @@ -1,56 +0,0 @@ - -

    Descargar & Letras de Fuera de mi vientre por Prospa Ochimana

    -

    ¿Estás buscando una poderosa e inspiradora canción gospel que despierte tu espíritu y te llene de alegría? Si es así, entonces deberías escuchar Out of My Belly de Prospa Ochimana. Esta canción es una obra maestra que bendecirá tu vida y te acercará a Dios.

    -

    En este artículo, te diremos todo lo que necesitas saber sobre esta canción, incluyendo cómo descargarla, cuáles son las letras, y cuál es el significado detrás de ellas. ¡Vamos a empezar!

    -

    descargar amp; letras de fuera de mi vientre por prospa ochimana


    Download File ->>> https://bltlly.com/2v6MLe



    -

    ¿Qué está fuera de mi vientre acerca de?

    -

    Fuera de mi vientre es una canción que expresa el deseo de liberar el río de agua viva que fluye desde dentro de cada creyente. La canción está basada en las palabras de Jesús en Juan 7:38, donde dijo, "El que cree en Mí, como la Escritura ha dicho, de su corazón fluirán ríos de agua viva."

    -

    La canción declara que cada vez que este río fluye, la vida se libera. Cada cosa muerta vuelve a la vida a medida que hacen contacto con este río. Es un río que da vida que sana, entrega, restaura y transforma. La canción también invita a todos los que tienen sed a venir a Jesús y beber de este río.

    -

    ¿Quién es Prospa Ochimana?

    -

    Prospa Ochimana es un cantante de gospel y compositor nigeriano que es conocido por ser un adorador. También es el CEO de Tornveil Music International, un sello discográfico gospel en Nigeria que abrió en enero de 2020.

    -

    Prospa Ochimana proviene de la tribu Ankpa, estado de Kogi, Nigeria, pero actualmente vive en Abuja. Nació el 6 de noviembre y se graduó de la Universidad Estatal de Nasarawa, Keffi, donde obtuvo un título en Lingüística.

    - -

    ¿Por qué es popular Fuera de mi vientre?

    -

    Out of My Belly es una de las canciones más populares de Prospa Ochimana. Fue lanzado en noviembre de 2020 y desde entonces ha ganado millones de visitas y descargas en línea. La canción también ha sido interpretada en vivo en varios eventos y conciertos por Prospa Ochimana y otros cantantes de gospel.

    -

    La razón por la que esta canción es popular es porque resuena con muchas personas que tienen hambre de más de Dios y Su presencia. La canción también inspira a la gente a aprovechar su potencial y propósito como vasos de la gloria de Dios. La canción también tiene una melodía pegadiza y un mensaje poderoso que eleva y anima a los oyentes.

    -

    -

    ¿Dónde se puede descargar Fuera de mi vientre?

    -

    Si quieres descargar Out of My Belly de Prospa Ochimana, tienes varias opciones para elegir. Puede descargarlo desde su sitio web oficial , o desde otras plataformas como YouTube , Spotify , Apple Music , Amazon Music , y más.

    -

    ¿Cuáles son los beneficios de descargar Out of My Belly?

    -

    Descargar Out of My Belly de Prospa Ochimana tiene muchos beneficios para ti como oyente. Algunos de ellos son:

    -
      -
    • Puedes escuchar la canción sin conexión en cualquier momento y en cualquier lugar que quieras.
    • -
    • Puedes disfrutar del audio y video de alta calidad de la canción.
    • -
    • Puedes compartir la canción con tus amigos y familiares a través de las redes sociales u otros medios.
    • -
    • Puedes apoyar al artista y su ministerio comprando su música.
    • -
    • Puedes experimentar el poder y la presencia de Dios mientras escuchas la canción.
    • -
    -

    ¿Cuáles son las letras de Fuera de mi vientre?

    -

    Las letras de Fuera de mi vientre por Prospa Ochimana son las siguientes:

    - - -

    ¿Cuál es el significado de la letra?

    -

    El significado de las letras de Out of My Belly de Prospa Ochimana es que cada creyente tiene una fuente de vida y poder dentro de ellos, que es el Espíritu Santo. El Espíritu Santo es el río que fluye de dentro de nosotros y nos da todo lo que necesitamos. Él es quien nos sana, nos libera, nos restaura y nos transforma. Él es también el que nos permite ser una bendición para los demás al liberar Su vida a través de nosotros.

    -

    La canción también nos recuerda que necesitamos venir a Jesús y beber de Él si tenemos sed de más de Él. Él es la fuente de agua viva que satisface nuestros anhelos más profundos. Él es también el que nos invita a creer en Él y recibir Su promesa de ríos de agua viva que fluye de nuestros corazones.

    -

    ¿Cómo puedes cantar junto con Out of My Belly?

    -

    Si quieres cantar junto con Out of My Belly de Prospa Ochimana, puedes seguir estos pasos:

    -
      -
    1. Descarga la canción desde cualquier plataforma que prefieras.
    2. -
    3. Escucha la canción y aprende la melodía y la letra.
    4. -
    5. Encuentra una versión de karaoke o instrumental de la canción en línea o crea la tuya propia usando una aplicación o software.
    6. -
    7. Practica el canto junto con el karaoke o la versión instrumental hasta que lo domines.
    8. -
    9. Canta junto con la canción original y disfruta!
    10. -
    -

    Conclusión

    -

    En conclusión, Out of My Belly de Prospa Ochimana es una maravillosa canción gospel que te inspirará a liberar el río de agua viva que fluye desde tu interior. La canción es también un testimonio de cómo Dios puede usar a cualquiera que esté dispuesto a ser Su recipiente. La canción está disponible para su descarga en varias plataformas y tiene letras increíbles que transmiten un mensaje poderoso. Esperamos que haya disfrutado de este artículo y haya aprendido algo nuevo. Si lo hizo, por favor compártalo con sus amigos y familiares. Y no te olvides de descargar y cantar junto con Out of My Belly de Prospa Ochimana!

    -

    Preguntas frecuentes

    - -

    Q: ¿Cuándo fue liberado Out of My Belly?

    -

    A: Out of My Belly fue lanzado el 27 de noviembre de 2020.

    -

    Q: ¿Quién produjo Fuera de mi vientre?

    -

    A: Fuera de mi vientre fue producido por Sunny Pee.

    -

    P: ¿Cuántas visitas tiene Out of My Belly en YouTube?

    -

    A: A partir del 20 de junio de 2023, Out of My Belly tiene más de 20 de junio de 2023, Out of My Belly tiene más de 1.1 millones de visitas en YouTube. El video oficial de la canción fue subido por Prospa Ochimana el 27 de noviembre de 2020. El video muestra a Prospa Ochimana cantando la canción con una banda en vivo y un coro en un entorno de estudio. El video también tiene subtítulos para la letra de la canción. Puede ver el video aquí o haciendo clic en la imagen de abajo. Fuera de mi vientre por Prospa Ochimana video de YouTube - También hay otras versiones de la canción en YouTube, como un video lírico y una presentación en vivo . Puedes echarles un vistazo si quieres ver diferentes formas de presentar la canción. Espero que hayas disfrutado de este artículo y hayas aprendido algo nuevo. Si lo hiciste, por favor compártelo con tus amigos y familiares. ¡Y no olvides descargar y cantar junto con Out of My Belly de Prospa Ochimana!

    64aa2da5cf
    -
    -
    \ No newline at end of file diff --git a/spaces/BigChungux/Pet_Survey2/app.py b/spaces/BigChungux/Pet_Survey2/app.py deleted file mode 100644 index b8e324b9c29780cc194b84219d4782bd519931d7..0000000000000000000000000000000000000000 --- a/spaces/BigChungux/Pet_Survey2/app.py +++ /dev/null @@ -1,172 +0,0 @@ -### ----------------------------- ### -### libraries ### -### ----------------------------- ### - -import gradio as gr -import pandas as pd -import numpy as np -from sklearn.model_selection import train_test_split -from sklearn.linear_model import LogisticRegression -from sklearn import metrics - - -### ------------------------------ ### -### data transformation ### -### ------------------------------ ### - -# load dataset -uncleaned_data = pd.read_csv('data.csv') - -# remove timestamp from dataset (always first column) -uncleaned_data = uncleaned_data.iloc[: , 1:] -data = pd.DataFrame() - -# keep track of which columns are categorical and what -# those columns' value mappings are -# structure: {colname1: {...}, colname2: {...} } -cat_value_dicts = {} -final_colname = uncleaned_data.columns[len(uncleaned_data.columns) - 1] - -# for each column... -for (colname, colval) in uncleaned_data.iteritems(): - - # check if col is already a number; if so, add col directly - # to new dataframe and skip to next column - if isinstance(colval.values[0], (np.integer, float)): - data[colname] = uncleaned_data[colname].copy() - continue - - # structure: {0: "lilac", 1: "blue", ...} - new_dict = {} - val = 0 # first index per column - transformed_col_vals = [] # new numeric datapoints - - # if not, for each item in that column... - for (row, item) in enumerate(colval.values): - - # if item is not in this col's dict... - if item not in new_dict: - new_dict[item] = val - val += 1 - - # then add numerical value to transformed dataframe - transformed_col_vals.append(new_dict[item]) - - # reverse dictionary only for final col (0, 1) => (vals) - if colname == final_colname: - new_dict = {value : key for (key, value) in new_dict.items()} - - cat_value_dicts[colname] = new_dict - data[colname] = transformed_col_vals - - -### -------------------------------- ### -### model training ### -### -------------------------------- ### - -# select features and predicton; automatically selects last column as prediction -cols = len(data.columns) -num_features = cols - 1 -x = data.iloc[: , :num_features] -y = data.iloc[: , num_features:] - -# split data into training and testing sets -x_train, x_test, y_train, y_test = train_test_split(x, y, test_size=0.25) - -# instantiate the model (using default parameters) -model = LogisticRegression() -model.fit(x_train, y_train.values.ravel()) -y_pred = model.predict(x_test) - - -### -------------------------------- ### -### article generation ### -### -------------------------------- ### -# borrow file reading function from reader.py - -def get_feat(): - feats = [abs(x) for x in model.coef_[0]] - max_val = max(feats) - idx = feats.index(max_val) - return data.columns[idx] - -acc = str(round(metrics.accuracy_score(y_test, y_pred) * 100, 1)) + "%" -most_imp_feat = get_feat() -# info = get_article(acc, most_imp_feat) - - - -### ------------------------------- ### -### interface creation ### -### ------------------------------- ### - - -# predictor for generic number of features -def general_predictor(*args): - features = [] - - # transform categorical input - for colname, arg in zip(data.columns, args): - if (colname in cat_value_dicts): - features.append(cat_value_dicts[colname][arg]) - else: - features.append(arg) - - # predict single datapoint - new_input = [features] - result = model.predict(new_input) - return cat_value_dicts[final_colname][result[0]] - -# add data labels to replace those lost via star-args - - -block = gr.Blocks() - -with open('info.md') as f: - with block: - gr.Markdown(f.readline()) - gr.Markdown('Take the quiz to get a personalized recommendation using AI.') - - with gr.Row(): - with gr.Box(): - inputls = [] - for colname in data.columns: - # skip last column - if colname == final_colname: - continue - - # access categories dict if data is categorical - # otherwise, just use a number input - if colname in cat_value_dicts: - radio_options = list(cat_value_dicts[colname].keys()) - inputls.append(gr.inputs.Dropdown(choices=radio_options, type="value", label=colname)) - else: - # add numerical input - inputls.append(gr.inputs.Number(label=colname)) - gr.Markdown("
    ") - - submit = gr.Button("Click to see your personalized result!", variant="primary") - gr.Markdown("
    ") - output = gr.Textbox(label="Your recommendation:", placeholder="your recommendation will appear here") - - submit.click(fn=general_predictor, inputs=inputls, outputs=output) - gr.Markdown("
    ") - - with gr.Row(): - with gr.Box(): - gr.Markdown(f"

    Accuracy:

    {acc}") - with gr.Box(): - gr.Markdown(f"

    Most important feature:

    {most_imp_feat}") - - gr.Markdown("
    ") - - with gr.Box(): - gr.Markdown('''⭐ Note that model accuracy is based on the uploaded data.csv and reflects how well the AI model can give correct recommendations for that dataset. Model accuracy and most important feature can be helpful for understanding how the model works, but should not be considered absolute facts about the real world.''') - - with gr.Box(): - with open('info.md') as f: - f.readline() - gr.Markdown(f.read()) - -# show the interface -block.launch() \ No newline at end of file diff --git a/spaces/Billyosoro/ESRGAN/realesrgan/utils.py b/spaces/Billyosoro/ESRGAN/realesrgan/utils.py deleted file mode 100644 index 10e7c23d04f777c250160e74470fdfacb16eab88..0000000000000000000000000000000000000000 --- a/spaces/Billyosoro/ESRGAN/realesrgan/utils.py +++ /dev/null @@ -1,280 +0,0 @@ -import cv2 -import math -import numpy as np -import os -import queue -import threading -import torch -from basicsr.utils.download_util import load_file_from_url -from torch.nn import functional as F - -ROOT_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) - - -class RealESRGANer(): - """A helper class for upsampling images with RealESRGAN. - - Args: - scale (int): Upsampling scale factor used in the networks. It is usually 2 or 4. - model_path (str): The path to the pretrained model. It can be urls (will first download it automatically). - model (nn.Module): The defined network. Default: None. - tile (int): As too large images result in the out of GPU memory issue, so this tile option will first crop - input images into tiles, and then process each of them. Finally, they will be merged into one image. - 0 denotes for do not use tile. Default: 0. - tile_pad (int): The pad size for each tile, to remove border artifacts. Default: 10. - pre_pad (int): Pad the input images to avoid border artifacts. Default: 10. - half (float): Whether to use half precision during inference. Default: False. - """ - - def __init__(self, scale, model_path, model=None, tile=0, tile_pad=10, pre_pad=10, half=False): - self.scale = scale - self.tile_size = tile - self.tile_pad = tile_pad - self.pre_pad = pre_pad - self.mod_scale = None - self.half = half - - # initialize model - self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') - # if the model_path starts with https, it will first download models to the folder: realesrgan/weights - if model_path.startswith('https://'): - model_path = load_file_from_url( - url=model_path, model_dir=os.path.join(ROOT_DIR, 'realesrgan/weights'), progress=True, file_name=None) - loadnet = torch.load(model_path, map_location=torch.device('cpu')) - # prefer to use params_ema - if 'params_ema' in loadnet: - keyname = 'params_ema' - else: - keyname = 'params' - model.load_state_dict(loadnet[keyname], strict=True) - model.eval() - self.model = model.to(self.device) - if self.half: - self.model = self.model.half() - - def pre_process(self, img): - """Pre-process, such as pre-pad and mod pad, so that the images can be divisible - """ - img = torch.from_numpy(np.transpose(img, (2, 0, 1))).float() - self.img = img.unsqueeze(0).to(self.device) - if self.half: - self.img = self.img.half() - - # pre_pad - if self.pre_pad != 0: - self.img = F.pad(self.img, (0, self.pre_pad, 0, self.pre_pad), 'reflect') - # mod pad for divisible borders - if self.scale == 2: - self.mod_scale = 2 - elif self.scale == 1: - self.mod_scale = 4 - if self.mod_scale is not None: - self.mod_pad_h, self.mod_pad_w = 0, 0 - _, _, h, w = self.img.size() - if (h % self.mod_scale != 0): - self.mod_pad_h = (self.mod_scale - h % self.mod_scale) - if (w % self.mod_scale != 0): - self.mod_pad_w = (self.mod_scale - w % self.mod_scale) - self.img = F.pad(self.img, (0, self.mod_pad_w, 0, self.mod_pad_h), 'reflect') - - def process(self): - # model inference - self.output = self.model(self.img) - - def tile_process(self): - """It will first crop input images to tiles, and then process each tile. - Finally, all the processed tiles are merged into one images. - - Modified from: https://github.com/ata4/esrgan-launcher - """ - batch, channel, height, width = self.img.shape - output_height = height * self.scale - output_width = width * self.scale - output_shape = (batch, channel, output_height, output_width) - - # start with black image - self.output = self.img.new_zeros(output_shape) - tiles_x = math.ceil(width / self.tile_size) - tiles_y = math.ceil(height / self.tile_size) - - # loop over all tiles - for y in range(tiles_y): - for x in range(tiles_x): - # extract tile from input image - ofs_x = x * self.tile_size - ofs_y = y * self.tile_size - # input tile area on total image - input_start_x = ofs_x - input_end_x = min(ofs_x + self.tile_size, width) - input_start_y = ofs_y - input_end_y = min(ofs_y + self.tile_size, height) - - # input tile area on total image with padding - input_start_x_pad = max(input_start_x - self.tile_pad, 0) - input_end_x_pad = min(input_end_x + self.tile_pad, width) - input_start_y_pad = max(input_start_y - self.tile_pad, 0) - input_end_y_pad = min(input_end_y + self.tile_pad, height) - - # input tile dimensions - input_tile_width = input_end_x - input_start_x - input_tile_height = input_end_y - input_start_y - tile_idx = y * tiles_x + x + 1 - input_tile = self.img[:, :, input_start_y_pad:input_end_y_pad, input_start_x_pad:input_end_x_pad] - - # upscale tile - try: - with torch.no_grad(): - output_tile = self.model(input_tile) - except RuntimeError as error: - print('Error', error) - print(f'\tTile {tile_idx}/{tiles_x * tiles_y}') - - # output tile area on total image - output_start_x = input_start_x * self.scale - output_end_x = input_end_x * self.scale - output_start_y = input_start_y * self.scale - output_end_y = input_end_y * self.scale - - # output tile area without padding - output_start_x_tile = (input_start_x - input_start_x_pad) * self.scale - output_end_x_tile = output_start_x_tile + input_tile_width * self.scale - output_start_y_tile = (input_start_y - input_start_y_pad) * self.scale - output_end_y_tile = output_start_y_tile + input_tile_height * self.scale - - # put tile into output image - self.output[:, :, output_start_y:output_end_y, - output_start_x:output_end_x] = output_tile[:, :, output_start_y_tile:output_end_y_tile, - output_start_x_tile:output_end_x_tile] - - def post_process(self): - # remove extra pad - if self.mod_scale is not None: - _, _, h, w = self.output.size() - self.output = self.output[:, :, 0:h - self.mod_pad_h * self.scale, 0:w - self.mod_pad_w * self.scale] - # remove prepad - if self.pre_pad != 0: - _, _, h, w = self.output.size() - self.output = self.output[:, :, 0:h - self.pre_pad * self.scale, 0:w - self.pre_pad * self.scale] - return self.output - - @torch.no_grad() - def enhance(self, img, outscale=None, alpha_upsampler='realesrgan'): - h_input, w_input = img.shape[0:2] - # img: numpy - img = img.astype(np.float32) - if np.max(img) > 256: # 16-bit image - max_range = 65535 - print('\tInput is a 16-bit image') - else: - max_range = 255 - img = img / max_range - if len(img.shape) == 2: # gray image - img_mode = 'L' - img = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB) - elif img.shape[2] == 4: # RGBA image with alpha channel - img_mode = 'RGBA' - alpha = img[:, :, 3] - img = img[:, :, 0:3] - img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) - if alpha_upsampler == 'realesrgan': - alpha = cv2.cvtColor(alpha, cv2.COLOR_GRAY2RGB) - else: - img_mode = 'RGB' - img = cv2.cvtColor(img, cv2.COLOR_BGR2RGB) - - # ------------------- process image (without the alpha channel) ------------------- # - self.pre_process(img) - if self.tile_size > 0: - self.tile_process() - else: - self.process() - output_img = self.post_process() - output_img = output_img.data.squeeze().float().cpu().clamp_(0, 1).numpy() - output_img = np.transpose(output_img[[2, 1, 0], :, :], (1, 2, 0)) - if img_mode == 'L': - output_img = cv2.cvtColor(output_img, cv2.COLOR_BGR2GRAY) - - # ------------------- process the alpha channel if necessary ------------------- # - if img_mode == 'RGBA': - if alpha_upsampler == 'realesrgan': - self.pre_process(alpha) - if self.tile_size > 0: - self.tile_process() - else: - self.process() - output_alpha = self.post_process() - output_alpha = output_alpha.data.squeeze().float().cpu().clamp_(0, 1).numpy() - output_alpha = np.transpose(output_alpha[[2, 1, 0], :, :], (1, 2, 0)) - output_alpha = cv2.cvtColor(output_alpha, cv2.COLOR_BGR2GRAY) - else: # use the cv2 resize for alpha channel - h, w = alpha.shape[0:2] - output_alpha = cv2.resize(alpha, (w * self.scale, h * self.scale), interpolation=cv2.INTER_LINEAR) - - # merge the alpha channel - output_img = cv2.cvtColor(output_img, cv2.COLOR_BGR2BGRA) - output_img[:, :, 3] = output_alpha - - # ------------------------------ return ------------------------------ # - if max_range == 65535: # 16-bit image - output = (output_img * 65535.0).round().astype(np.uint16) - else: - output = (output_img * 255.0).round().astype(np.uint8) - - if outscale is not None and outscale != float(self.scale): - output = cv2.resize( - output, ( - int(w_input * outscale), - int(h_input * outscale), - ), interpolation=cv2.INTER_LANCZOS4) - - return output, img_mode - - -class PrefetchReader(threading.Thread): - """Prefetch images. - - Args: - img_list (list[str]): A image list of image paths to be read. - num_prefetch_queue (int): Number of prefetch queue. - """ - - def __init__(self, img_list, num_prefetch_queue): - super().__init__() - self.que = queue.Queue(num_prefetch_queue) - self.img_list = img_list - - def run(self): - for img_path in self.img_list: - img = cv2.imread(img_path, cv2.IMREAD_UNCHANGED) - self.que.put(img) - - self.que.put(None) - - def __next__(self): - next_item = self.que.get() - if next_item is None: - raise StopIteration - return next_item - - def __iter__(self): - return self - - -class IOConsumer(threading.Thread): - - def __init__(self, opt, que, qid): - super().__init__() - self._queue = que - self.qid = qid - self.opt = opt - - def run(self): - while True: - msg = self._queue.get() - if isinstance(msg, str) and msg == 'quit': - break - - output = msg['output'] - save_path = msg['save_path'] - cv2.imwrite(save_path, output) - print(f'IO worker {self.qid} is done.') diff --git a/spaces/CVPR/MonoScene/monoscene/.ipynb_checkpoints/monoscene_model-checkpoint.py b/spaces/CVPR/MonoScene/monoscene/.ipynb_checkpoints/monoscene_model-checkpoint.py deleted file mode 100644 index 1bf3d80ea531ff02b3229b862b7a4cd0aec8ec58..0000000000000000000000000000000000000000 --- a/spaces/CVPR/MonoScene/monoscene/.ipynb_checkpoints/monoscene_model-checkpoint.py +++ /dev/null @@ -1,22 +0,0 @@ -from transformers import PreTrainedModel -from .config import MonoSceneConfig -from monoscene.monoscene import MonoScene - - - -class MonoSceneModel(PreTrainedModel): - config_class = ResnetConfig - - def __init__(self, config): - super().__init__(config) - self.model = MonoScene( - dataset=config.dataset, - n_classes=config.n_classes, - feature=config.feature, - project_scale=config.project_scale, - full_scene_size=config.full_scene_size - ) - - - def forward(self, tensor): - return self.model.forward(tensor) \ No newline at end of file diff --git a/spaces/Chris4K/llms_compare/app.py b/spaces/Chris4K/llms_compare/app.py deleted file mode 100644 index 471a2e861bf3890f361c1d62edacc84f4a2914ba..0000000000000000000000000000000000000000 --- a/spaces/Chris4K/llms_compare/app.py +++ /dev/null @@ -1,274 +0,0 @@ -import os, requests -import gradio as gr -HF_READ_API_KEY = os.environ["HF_READ_API_KEY"] - -### This code loads the models and undertakes inference locally ### - -# from transformers import GPTNeoForCausalLM, GPT2Tokenizer -# from transformers import AutoTokenizer, AutoModelForSeq2SeqLM, AutoModelForCausalLM -# model = GPTNeoForCausalLM.from_pretrained("EleutherAI/gpt-neo-2.7B") -# tokenizer = GPT2Tokenizer.from_pretrained("EleutherAI/gpt-neo-2.7B") -# tokenizer = T5Tokenizer.from_pretrained("google/flan-t5-small") -# model = T5ForConditionalGeneration.from_pretrained("google/flan-t5-small") - -model_list = ['google/flan-t5-small', 'google/flan-t5-base', 'google/flan-t5-large', 'google/flan-t5-xl', 'google/flan-t5-xxl', - 'gpt2-medium', 'gpt2-large', 'gpt2-xl', - 'EleutherAI/gpt-neo-1.3B', 'EleutherAI/gpt-neo-2.7B', 'EleutherAI/gpt-neo-6b', 'EleutherAI/gpt-neox-20b', - 'bigscience/bloom-1b7', 'bigscience/bloom-3b', 'bigscience/bloom-7b1' - ] - -def load_model(model_name): - if model_name == 'EleutherAI/gpt-neo-2.7B' or model_name == 'gpt2-medium' or model_name == 'gpt2-large': - model = AutoModelForCausalLM.from_pretrained(model_name) - else: - model = AutoModelForSeq2SeqLM.from_pretrained(model_name) - tokenizer = AutoTokenizer.from_pretrained(model_name) - tokenizer.pad_token = tokenizer.eos_token - # tokenizer.padding_side = "left" - return model, tokenizer - -def maybe_is_truncated(s): - punct = [".", "!", "?", '"'] - if s[-1] in punct: - return False - return True - -def load_and_generate(model_name, prompt): - model, tokenizer = load_model(model_name) - - temperature=0.25 - tokens = tokenizer(prompt, return_tensors="pt") - max_length = len(tokens.input_ids[0])+5 - input_ids = tokens.input_ids - attention_mask = tokens.attention_mask - # see huggingface.co/docs/transformers/main_classes/text_generation - gen_tokens = model.generate( - input_ids=input_ids, - attention_mask=attention_mask, - pad_token_id=tokenizer.eos_token_id, - do_sample=True, - temperature=temperature, - # max_length=max_length, - max_new_tokens=max_length, - # use_cache=False, - # penalty_alpha=0.1, - # top_k=100, - # early_stopping=False - ) - gen_text = tokenizer.batch_decode(gen_tokens)[0] - - max_times = 20 - while maybe_is_truncated(gen_text) and max_times > 0: - tokens = tokenizer(gen_text, return_tensors="pt") - max_length = len(tokens.input_ids[0])+5 - input_ids = tokens.input_ids - attention_mask = tokens.attention_mask - - gen_tokens = model.generate( - input_ids=input_ids, - attention_mask=attention_mask, - pad_token_id=tokenizer.eos_token_id, - do_sample=True, - temperature=temperature, - max_length=max_length, - # max_new_tokens=100, - # use_cache=True, - # penalty_alpha=0.1, - # top_k=100, - # early_stopping=False - ) - - gen_text = tokenizer.batch_decode(gen_tokens)[0] - - max_times -= 1 - - return gen_text.replace("", "").replace("", "") - -### This code for the inference api ### - -def generate_from_api(query, model_name, temperature, max_tokens): - headers = {f"Authorization": f"Bearer {HF_READ_API_KEY}", - "wait_for_model": "true", - "temperature": str(temperature), - "max_tokens": str(max_tokens), - "max_time": str(120)} - - model_api_url = f"https://api-inference.huggingface.co/models/{model_name}" - - payload = {"inputs": query} - response = requests.post(model_api_url, headers=headers, json=payload) - while response.status_code != 200: - response = requests.post(model_api_url, headers=headers, json=payload) - return response.json()[0]['generated_text'] - -def generate_from_api_check(query, model_name, temperature, max_tokens): - headers = {f"Authorization": f"Bearer {HF_READ_API_KEY}", - "wait_for_model": "true", - "temperature": str(temperature), - "max_tokens": str(max_tokens), - "max_time": str(120)} - - model_api_url = f"https://api-inference.huggingface.co/models/{model_name}" - - payload = {"inputs": query} - response = requests.post(model_api_url, headers=headers, json=payload) - while response.status_code != 200: - response = requests.post(model_api_url, headers=headers, json=payload) - - max_times = 20 - gen_text = response.json()[0]['generated_text'] - while maybe_is_truncated(gen_text) and max_times > 0: - headers = {f"Authorization": f"Bearer {HF_READ_API_KEY}", - "wait_for_model": "true", - "temperature": str(temperature), - "max_tokens": str(max_tokens + len(gen_text)), - "max_time": str(120)} - payload = {"inputs": query + ' ' + gen_text} - response = requests.post(model_api_url, headers=headers, json=payload) - while response.status_code != 200: - response = requests.post(model_api_url, headers=headers, json=payload) - gen_text = response.json()[0]['generated_text'] - max_times -= 1 - - return gen_text - - -with gr.Blocks(css='style.css') as demo: - gr.HTML(""" -
    -

    - Different Strokes (Prompts) for Different Folks (LLMs) -

    -
    -

    - After reading Prompt Engineering Guide, which is a good guide when starting to learn about prompts for large language models (LLMs), specifically OpenAI's LLMs, I was interested in seeing the results with for other LLMs. Hence, did up a simple demonstration of different prompts for different popular LLMs of different sizes. The prompt examples are taken from the Prompt Engineering Guide, and the LLMs that you can select below are all available on Hugging Face. If you are interested in comparing them with the prompts from OpenAI's model, you can refer to the writeup in the Prompt Engineering Guide itself. -

    -
    -
    - Note: Larger models will take a while, especially on the first run. -
    -
    - """) - - with gr.Column(elem_id="col-container"): - with gr.Row(variant="compact"): - - model_name = gr.Dropdown( - model_list, - label="Select model", - value=model_list[0], - ).style( - container=False, - ) - - temperature = gr.Slider( - 0.1, 100.0, value=1.0, label="Temperature", - ).style( - container=False, - ) - - max_tokens = gr.Slider( - 10, 2250, step=1, value=100, label="Max. tokens (in output)", - ).style( - container=False, - ) - - check_truncated = gr.Checkbox( - label="Check for truncated output", - value=False, - ).style( - container=False, - ) - - with gr.Row(variant="compact"): - prompt = gr.Textbox( - label="Enter your prompt", - show_label=False, - # max_lines=2, - placeholder="Select your prompt from the examples below", - ).style( - container=False, - ) - process = gr.Button("Generate").style(full_width=False) - - with gr.Row(): - output=gr.Textbox( - label="LLM output", - show_label=True) - - gr.HTML(""" -
    -

    - Prompt examples. Select the prompt you would like to test, and it will appear (properly formatted) in the input box above. -

    -
    - """) - with gr.Tab("Introduction"): - example_set_1 = gr.Examples(label = 'Simple Prompt vs. Instruct then Prompt.', - examples=["The sky is ", "Complete the following sentence: The sky is ",], - inputs=[prompt]) - example_set_2 = gr.Examples(label = 'Few Shot Prompt.', - examples=["This is awesome! // Positive\nThis is bad! // Negative\nWow that movie was rad! // Positive\nWhat a horrible show! //",], - inputs=[prompt]) - example_set_3 = gr.Examples(label = 'Explicitly Specify the Instruction', - examples=["### Instruction ###\nTranslate the text below to Spanish:\nText: 'hello!'",], - inputs=[prompt]) - example_set_4 = gr.Examples(label = 'Be Very Specific', - examples=["Extract the name of places in the following text.\nDesired format:\nPlace: \nInput: 'Although these developments are encouraging to researchers, much is still a mystery. “We often have a black box between the brain and the effect we see in the periphery,” says Henrique Veiga-Fernandes, a neuroimmunologist at the Champalimaud Centre for the Unknown in Lisbon. “If we want to use it in the therapeutic context, we actually need to understand the mechanism.'",], - inputs=[prompt]) - example_set_5 = gr.Examples(label = 'Precision', - examples=["Explain the concept of deep learning. Keep the explanation short, only a few sentences, and don't be too descriptive.", "Use 2-3 sentences to explain the concept of deep learning to a high school student."], - inputs=[prompt]) - example_set_6 = gr.Examples(label = 'Focus on What LLM Should Do', - examples=["The following is an agent that recommends movies to a customer. The agent is responsible to recommend a movie from the top global trending movies. It should refrain from asking users for their preferences and avoid asking for personal information. If the agent doesn't have a movie to recommend, it should respond 'Sorry, couldn't find a movie to recommend today.'.\nCustomer: Please recommend a movie based on my interests.\nAgent:"], - inputs=[prompt]) - - with gr.Tab("Basic Tasks"): - example_set_7 = gr.Examples(label = 'Explain vs. Summarize', - examples=["Explain antibiotics.\nA:", "Antibiotics are a type of medication used to treat bacterial infections. They work by either killing the bacteria or preventing them from reproducing, allowing the body’s immune system to fight off the infection. Antibiotics are usually taken orally in the form of pills, capsules, or liquid solutions, or sometimes administered intravenously. They are not effective against viral infections, and using them inappropriately can lead to antibiotic resistance.\nExplain the above in one sentence:",], - inputs=[prompt]) - example_set_8 = gr.Examples(label = 'Information Extraction', - examples=["Author-contribution statements and acknowledgements in research papers should state clearly and specifically whether, and to what extent, the authors used AI technologies such as ChatGPT in the preparation of their manuscript and analysis. They should also indicate which LLMs were used. This will alert editors and reviewers to scrutinize manuscripts more carefully for potential biases, inaccuracies and improper source crediting. Likewise, scientific journals should be transparent about their use of LLMs, for example when selecting submitted manuscripts.\nMention the large language model based product mentioned in the paragraph above:",], - inputs=[prompt]) - example_set_9 = gr.Examples(label = 'Question and Answer', - examples=["Answer the question based on the context below. Keep the answer short and concise. Respond 'Unsure about answer' if not sure about the answer.\nContext: Teplizumab traces its roots to a New Jersey drug company called Ortho Pharmaceutical. There, scientists generated an early version of the antibody, dubbed OKT3. Originally sourced from mice, the molecule was able to bind to the surface of T cells and limit their cell-killing potential. In 1986, it was approved to help prevent organ rejection after kidney transplants, making it the first therapeutic antibody allowed for human use.\nQuestion: What was OKT3 originally sourced from?\nAnswer:",], - inputs=[prompt]) - example_set_10 = gr.Examples(label = 'Text Classification', - examples=["Classify the text into neutral, negative or positive.\nText: I think the food was okay.\nSentiment:","Classify the text into neutral, negative or positive.\nText: I think the vacation is okay.\nSentiment: neutral\nText: I think the food was okay.\nSentiment:"], - inputs=[prompt]) - example_set_11 = gr.Examples(label = 'Conversation', - examples=["The following is a conversation with an AI research assistant. The assistant tone is technical and scientific.\nHuman: Hello, who are you?\nAI: Greeting! I am an AI research assistant. How can I help you today?\nHuman: Can you tell me about the creation of blackholes?\nAI:", "The following is a conversation with an AI research assistant. The assistant answers should be easy to understand even by primary school students.\nHuman: Hello, who are you?\nAI: Greeting! I am an AI research assistant. How can I help you today?\nHuman: Can you tell me about the creation of black holes?\nAI: "], - inputs=[prompt]) - example_set_12 = gr.Examples(label = 'Reasoning', - examples=["The odd numbers in this group add up to an even number: 15, 32, 5, 13, 82, 7, 1.\nA: ", "The odd numbers in this group add up to an even number: 15, 32, 5, 13, 82, 7, 1.\nSolve by breaking the problem into steps. First, identify the odd numbers, add them, and indicate whether the result is odd or even."], - inputs=[prompt]) - - - with gr.Tab("Interesting Techniques"): - example_set_13 = gr.Examples(label = 'Zero Shot, i.e., no examples at all', - examples=["Classify the text into neutral, negative or positive.\nText: I think the vacation is okay.\nSentiment:",], - inputs=[prompt]) - example_set_14 = gr.Examples(label = 'Few Shot, i.e., only a few examples', - examples=["The odd numbers in this group add up to an even number: 4, 8, 9, 15, 12, 2, 1.\nA: The answer is False.\n\nThe odd numbers in this group add up to an even number: 17, 10, 19, 4, 8, 12, 24.\nA: The answer is True.\n\nThe odd numbers in this group add up to an even number: 16, 11, 14, 4, 8, 13, 24.\nA: The answer is True.\n\nThe odd numbers in this group add up to an even number: 17, 9, 10, 12, 13, 4, 2.\nA: The answer is False.\n\nThe odd numbers in this group add up to an even number: 15, 32, 5, 13, 82, 7, 1.\nA: ",], - inputs=[prompt]) - example_set_15 = gr.Examples(label = 'Chain of Thought, i.e., go through a series of rational steps', - examples=["The odd numbers in this group add up to an even number: 4, 8, 9, 15, 12, 2, 1.\nA: Adding all the odd numbers (9, 15, 1) gives 25. The answer is False.\n\nThe odd numbers in this group add up to an even number: 15, 32, 5, 13, 82, 7, 1.\nA:",], - inputs=[prompt]) - example_set_16 = gr.Examples(label = 'Zero Shot Chain of Thought, i.e., think step by step, but no examples provided', - examples=["I went to the market and bought 10 apples. I gave 2 apples to the neighbor and 2 to the repairman. I then went and bought 5 more apples and ate 1. How many apples did I remain with?\nLet's think step by step.",], - inputs=[prompt]) - example_set_17 = gr.Examples(label = 'Self Consistency, i.e., give examples to encourage the model to be consistent', - examples=["Q: There are 15 trees in the grove. Grove workers will plant trees in the grove today. After they are done,there will be 21 trees. How many trees did the grove workers plant today?\nA: We start with 15 trees. Later we have 21 trees. The difference must be the number of trees they planted.\nSo, they must have planted 21 - 15 = 6 trees. The answer is 6.\n\nQ: If there are 3 cars in the parking lot and 2 more cars arrive, how many cars are in the parking lot?\nA: There are 3 cars in the parking lot already. 2 more arrive. Now there are 3 + 2 = 5 cars. The answer is 5.\n\nQ: Olivia has $23. She bought five bagels for $3 each. How much money does she have left?\nA: She bought 5 bagels for $3 each. This means she spent 5\n\nQ: When I was 6 my sister was half my age. Now I’m 70 how old is my sister?\nA:",], - inputs=[prompt]) - example_set_18 = gr.Examples(label = 'Generating Knowledge, i.e., use examples to generate knowledge', - examples=["Input: Greece is larger than mexico.\nKnowledge: Greece is approximately 131,957 sq km, while Mexico is approximately 1,964,375 sq km, making Mexico 1,389% larger than Greece.\n\nInput: Glasses always fog up.\nKnowledge: Condensation occurs on eyeglass lenses when water vapor from your sweat, breath, and ambient humidity lands on a cold surface, cools, and then changes into tiny drops of liquid, forming a film that you see as fog. Your lenses will be relatively cool compared to your breath, especially when the outside air is cold.\n\nInput: A fish is capable of thinking.\nKnowledge: Fish are more intelligent than they appear. In many areas, such as memory, their cognitive powers match or exceed those of ’higher’ vertebrates including non-human primates. Fish’s long-term memories help them keep track of complex social relationships.\n\nInput: A common effect of smoking lots of cigarettes in one’s lifetime is a higher than normal chance of getting lung cancer.\nKnowledge: Those who consistently averaged less than one cigarette per day over their lifetime had nine times the risk of dying from lung cancer than never smokers. Among people who smoked between one and 10 cigarettes per day, the risk of dying from lung cancer was nearly 12 times higher than that of never smokers.\n\nInput: Part of golf is trying to get a higher point total than others.\nKnowledge:",], - inputs=[prompt]) - - # process.click(load_and_generate, inputs=[model_name, prompt], outputs=[output]) - if check_truncated: - process.click(generate_from_api_check, inputs=[prompt, model_name, temperature, max_tokens], outputs=[output]) - else: - process.click(generate_from_api, inputs=[prompt, model_name, temperature, max_tokens], outputs=[output]) - -# demo.launch(server_port=8080) -demo.launch() \ No newline at end of file diff --git a/spaces/ChristopherMarais/Andrew_AI-BB_classification-beta/mysite/andrew_alpha/static/andrew_alpha.js b/spaces/ChristopherMarais/Andrew_AI-BB_classification-beta/mysite/andrew_alpha/static/andrew_alpha.js deleted file mode 100644 index 43026ce736798b33ef272a5159d33024e7fb20e7..0000000000000000000000000000000000000000 --- a/spaces/ChristopherMarais/Andrew_AI-BB_classification-beta/mysite/andrew_alpha/static/andrew_alpha.js +++ /dev/null @@ -1,208 +0,0 @@ -// Get token from cookie -function getCookie(name) { - let cookieValue = null; - if (document.cookie && document.cookie !== '') { - const cookies = document.cookie.split(';'); - for (let i = 0; i < cookies.length; i++) { - const cookie = cookies[i].trim(); - // Does this cookie string begin with the name we want? - if (cookie.substring(0, name.length + 1) === (name + '=')) { - cookieValue = decodeURIComponent(cookie.substring(name.length + 1)); - break; - } - } - } - return cookieValue; -} - -// Get the video element -const video = document.getElementById("videoElement"); -const captureButton = document.getElementById("captureButton"); -const uploadButton = document.getElementById("uploadButton"); -const capturedFrame = document.getElementById("capturedFrame"); -const webcamFeed = document.getElementById("webcamFeed"); -const processedFrame = document.getElementById("processedFrame"); -// Get CSRF token from cookie -const csrftoken = getCookie('csrftoken'); -// Get reference to form -const form = document.getElementById('myForm'); - -// Check if the browser supports getUserMedia -if (navigator.mediaDevices.getUserMedia) { - // Request access to the webcam - navigator.mediaDevices - .getUserMedia({ video: true }) - .then(function (stream) { - // Set the video source to the stream from the webcam - video.srcObject = stream; - }) - .catch(function (error) { - console.error("Error accessing the webcam:", error); - const message = document.createElement("p"); - webcamFeed.innerHTML = "No webcam detected."; - document.body.appendChild(message); - }); -} else { - console.error("getUserMedia is not supported by this browser"); -} - - -// Variable to store latest captured frame URL -let latestFrameURL; - -// Add click handler to capture button -captureButton.addEventListener("click", function() { - - // Remove previously displayed captured frame (if any) - while (capturedFrame.firstChild) { - capturedFrame.firstChild.remove(); - } - - // Clear processed image display - while (processedFrame.firstChild) { - processedFrame.firstChild.remove(); - } - - // Create canvas element - const canvas = document.createElement("canvas"); - const context = canvas.getContext("2d"); - - // Set canvas dimensions to match video - canvas.width = video.videoWidth; - canvas.height = video.videoHeight; - - // Draw current video frame to canvas - context.drawImage(video, 0, 0, canvas.width, canvas.height); - - // Convert canvas to data URL - const dataURL = canvas.toDataURL("image/png"); - - // Save data URL to reuse when appending to form - latestFrameURL = dataURL; - - // Create img element for captured frame - const capturedImage = document.createElement("img"); - capturedImage.src = latestFrameURL; - - // Append to captured frame div - capturedFrame.appendChild(capturedImage); - if (canvas) { - - // Convert canvas to blob - canvas.toBlob(function(blob) { - - // Create file from blob - const file = new File([blob], 'capturedImage.jpg', {type: 'image/jpeg'}) - - // Create FormData - const formData = new FormData(); - - // Append file - formData.append('image', file); - - // Headers with token - const headers = new Headers(); - headers.append('X-CSRFToken', csrftoken); - - // Send FormData - fetch('/process_uploaded_image/', { - method: 'POST', - headers: headers, - body: formData - }) - .then(response => response.blob()) - .then(blob => { - - // Create image from blob - const img = document.createElement('img'); - img.src = URL.createObjectURL(blob); - - // Replace original image with processed one - while (capturedFrame.firstChild) { - capturedFrame.firstChild.remove(); - } - document.getElementById('capturedFrame').appendChild(img); - - // Display processed image - // Append to DOM - // document.getElementById('processedFrame').appendChild(img); - - }) - .catch(error => { - console.error('Error processing image'); - }); - - }, 'image/jpeg'); - - } else { - console.error("Canvas not found"); - } - -}); - -// Add event listener to upload button -uploadButton.addEventListener("click", function () { - const fileInput = document.createElement("input"); - fileInput.type = "file"; - - fileInput.addEventListener("change", function () { - const fileReader = new FileReader(); - - fileReader.addEventListener("load", function () { - const uploadedImageURL = fileReader.result; - - // Remove previously displayed captured frame (if any) - while (capturedFrame.firstChild) { - capturedFrame.firstChild.remove(); - } - // Clear processed image display - while (processedFrame.firstChild) { - processedFrame.firstChild.remove(); - } - - // Create an image element for displaying uploaded image - const uploadedImage = document.createElement("img"); - uploadedImage.src = uploadedImageURL; - const imageFile = fileInput.files[0]; - let formData = new FormData(); - formData.append('image', imageFile); - - fetch('/process_uploaded_image/', { - method: 'POST', - body: formData - }) - .then(response => response.blob()) - .then(blob => { - - // Create image from blob - const img = document.createElement('img'); - img.src = URL.createObjectURL(blob); - - // Replace original image with processed one - while (capturedFrame.firstChild) { - capturedFrame.firstChild.remove(); - } - document.getElementById('capturedFrame').appendChild(img); - - // Display processed image - // Append to DOM - // document.getElementById('processedFrame').appendChild(img); - - }) - .catch(error => { - console.error('Error processing image'); - }); - - - // Append uploaded image to captured frame div - capturedFrame.appendChild(uploadedImage); - - }); - - if (fileInput.files.length > 0) { - fileReader.readAsDataURL(fileInput.files[0]); - } - }); - - fileInput.click(); -}); \ No newline at end of file diff --git a/spaces/CikeyQI/Yunzai/Yunzai/plugins/ws-plugin/config/system/help_system.js b/spaces/CikeyQI/Yunzai/Yunzai/plugins/ws-plugin/config/system/help_system.js deleted file mode 100644 index b927bf3fcdb819caac33ef290d774e68a51b250b..0000000000000000000000000000000000000000 --- a/spaces/CikeyQI/Yunzai/Yunzai/plugins/ws-plugin/config/system/help_system.js +++ /dev/null @@ -1,84 +0,0 @@ -export const helpCfg = { - "themeSet": false, - "title": "ws帮助", - "subTitle": "Yunzai-Bot & ws-plugin", - "colWidth": 265, - "theme": "all", - "themeExclude": [ - "default" - ], - "colCount": 3, - "bgBlur": true -} -export const helpList = [ - { - "group": "连接管理", - "list": [ - { - "icon": 80, - "title": "#ws添加连接", - "desc": "添加一个新的连接" - }, - { - "icon": 63, - "title": "#ws删除连接", - "desc": "删除一个已有的连接 " - }, - { - "icon": 66, - "title": "#ws关闭连接", - "desc": "不会删除已有连接,同时不进行连接" - }, - { - "icon": 65, - "title": "#ws打开连接", - "desc": "打开已关闭的连接" - }, - { - "icon": 79, - "title": "#ws查看连接", - "desc": "查看已有的所有连接名字和状态" - }, - { - "icon": 64, - "title": "#ws重新连接", - "desc": "断开连接并重新连接" - } - ] - }, - { - "group": "其他设置", - "list": [ - { - "icon": 81, - "title": "#ws(增加/删除)(禁用/启用)群123456", - "desc": "精确处理黑名单白名单,不带群号为当前群" - }, - { - "icon": 84, - "title": "#ws(禁用/启用)群123456", - "desc": "模糊匹配,比如禁用群则优先看白名单,如果有就删除,否则添加到黑名单" - }, - { - "icon": 85, - "title": "#ws查看(禁用/启用)群", - "desc": "查看当前(禁用/启用)的群聊列表" - }, - ] - }, - { - "group": "其他说明", - "list": [ - { - "icon": 71, - "title": "#ws连接说明", - "desc": "查看添加连接时的说明" - }, - { - "icon": 94, - "title": "#ws设置", - "desc": "插件设置" - } - ] - } -] \ No newline at end of file diff --git a/spaces/CikeyQI/meme-api/meme_generator/memes/dont_touch/__init__.py b/spaces/CikeyQI/meme-api/meme_generator/memes/dont_touch/__init__.py deleted file mode 100644 index 22743662ed38de955166deb43852194e470e78e6..0000000000000000000000000000000000000000 --- a/spaces/CikeyQI/meme-api/meme_generator/memes/dont_touch/__init__.py +++ /dev/null @@ -1,57 +0,0 @@ -import random -from pathlib import Path -from typing import List, Tuple - -from PIL.Image import Image as IMG -from PIL.Image import Palette -from pil_utils import BuildImage - -from meme_generator import add_meme -from meme_generator.utils import make_jpg_or_gif - -img_dir = Path(__file__).parent / "images" - - -def get_dominant_colors(img: IMG) -> List[Tuple[int, int, int]]: - img = img.convert("P", palette=Palette.ADAPTIVE, colors=20) - palette = img.getpalette() - assert palette - color_indexs = sorted(img.getcolors(), reverse=True) - colors = [tuple(palette[i * 3 : i * 3 + 3]) for _, i in color_indexs] - colors = list( - filter(lambda c: c[0] * 0.299 + c[1] * 0.578 + c[2] * 0.114 < 200, colors) - ) - return colors - - -def dont_touch(images: List[BuildImage], texts, args): - frame = BuildImage.open(img_dir / "0.png") - mask = BuildImage.open(img_dir / "mask.png").convert("L") - - def paste_random_blocks(img: BuildImage, colors: List[Tuple[int, int, int]]): - x1, y1, x2, y2 = 200, 300, 400, 650 - block_locs = [] - for _ in range(150): - x = random.randint(x1, x2) - y = random.randint(y1, y2) - if mask.image.getpixel((x, y)) == 0: - continue - if any(abs(x - x_) < 13 and abs(y - y_) < 13 for x_, y_ in block_locs): - continue - block_locs.append((x, y)) - color = random.choice(colors) - block = BuildImage.new("RGBA", (10, 10), color) - block = block.rotate(45, expand=True) - img.paste(block, (x, y), alpha=True) - - def make(img: BuildImage) -> BuildImage: - img_frame = frame.copy() - colors = get_dominant_colors(img.image) - paste_random_blocks(img_frame, colors) - img = img.convert("RGBA").resize((250, 250), keep_ratio=True, inside=True) - return img_frame.paste(img, (25, 460), alpha=True) - - return make_jpg_or_gif(images[0], make) - - -add_meme("dont_touch", dont_touch, min_images=1, max_images=1, keywords=["别碰"]) diff --git a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/altair/vegalite/v5/theme.py b/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/altair/vegalite/v5/theme.py deleted file mode 100644 index b536a1ddebe6c311672e6ce2757853ecffa6fb1e..0000000000000000000000000000000000000000 --- a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/altair/vegalite/v5/theme.py +++ /dev/null @@ -1,59 +0,0 @@ -"""Tools for enabling and registering chart themes""" - -from ...utils.theme import ThemeRegistry - -VEGA_THEMES = [ - "ggplot2", - "quartz", - "vox", - "fivethirtyeight", - "dark", - "latimes", - "urbaninstitute", - "excel", - "googlecharts", - "powerbi", -] - - -class VegaTheme: - """Implementation of a builtin vega theme.""" - - def __init__(self, theme): - self.theme = theme - - def __call__(self): - return { - "usermeta": {"embedOptions": {"theme": self.theme}}, - "config": {"view": {"continuousWidth": 300, "continuousHeight": 300}}, - } - - def __repr__(self): - return "VegaTheme({!r})".format(self.theme) - - -# The entry point group that can be used by other packages to declare other -# renderers that will be auto-detected. Explicit registration is also -# allowed by the PluginRegistery API. -ENTRY_POINT_GROUP = "altair.vegalite.v5.theme" # type: str -themes = ThemeRegistry(entry_point_group=ENTRY_POINT_GROUP) - -themes.register( - "default", - lambda: {"config": {"view": {"continuousWidth": 300, "continuousHeight": 300}}}, -) -themes.register( - "opaque", - lambda: { - "config": { - "background": "white", - "view": {"continuousWidth": 300, "continuousHeight": 300}, - } - }, -) -themes.register("none", lambda: {}) - -for theme in VEGA_THEMES: - themes.register(theme, VegaTheme(theme)) - -themes.enable("default") diff --git a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/svgLib/path/shapes.py b/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/svgLib/path/shapes.py deleted file mode 100644 index 3f22e6c6a3e4d24636e710f1920ebf04a822b159..0000000000000000000000000000000000000000 --- a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/svgLib/path/shapes.py +++ /dev/null @@ -1,183 +0,0 @@ -import re - - -def _prefer_non_zero(*args): - for arg in args: - if arg != 0: - return arg - return 0.0 - - -def _ntos(n): - # %f likes to add unnecessary 0's, %g isn't consistent about # decimals - return ("%.3f" % n).rstrip("0").rstrip(".") - - -def _strip_xml_ns(tag): - # ElementTree API doesn't provide a way to ignore XML namespaces in tags - # so we here strip them ourselves: cf. https://bugs.python.org/issue18304 - return tag.split("}", 1)[1] if "}" in tag else tag - - -def _transform(raw_value): - # TODO assumes a 'matrix' transform. - # No other transform functions are supported at the moment. - # https://developer.mozilla.org/en-US/docs/Web/SVG/Attribute/transform - # start simple: if you aren't exactly matrix(...) then no love - match = re.match(r"matrix\((.*)\)", raw_value) - if not match: - raise NotImplementedError - matrix = tuple(float(p) for p in re.split(r"\s+|,", match.group(1))) - if len(matrix) != 6: - raise ValueError("wrong # of terms in %s" % raw_value) - return matrix - - -class PathBuilder(object): - def __init__(self): - self.paths = [] - self.transforms = [] - - def _start_path(self, initial_path=""): - self.paths.append(initial_path) - self.transforms.append(None) - - def _end_path(self): - self._add("z") - - def _add(self, path_snippet): - path = self.paths[-1] - if path: - path += " " + path_snippet - else: - path = path_snippet - self.paths[-1] = path - - def _move(self, c, x, y): - self._add("%s%s,%s" % (c, _ntos(x), _ntos(y))) - - def M(self, x, y): - self._move("M", x, y) - - def m(self, x, y): - self._move("m", x, y) - - def _arc(self, c, rx, ry, x, y, large_arc): - self._add( - "%s%s,%s 0 %d 1 %s,%s" - % (c, _ntos(rx), _ntos(ry), large_arc, _ntos(x), _ntos(y)) - ) - - def A(self, rx, ry, x, y, large_arc=0): - self._arc("A", rx, ry, x, y, large_arc) - - def a(self, rx, ry, x, y, large_arc=0): - self._arc("a", rx, ry, x, y, large_arc) - - def _vhline(self, c, x): - self._add("%s%s" % (c, _ntos(x))) - - def H(self, x): - self._vhline("H", x) - - def h(self, x): - self._vhline("h", x) - - def V(self, y): - self._vhline("V", y) - - def v(self, y): - self._vhline("v", y) - - def _line(self, c, x, y): - self._add("%s%s,%s" % (c, _ntos(x), _ntos(y))) - - def L(self, x, y): - self._line("L", x, y) - - def l(self, x, y): - self._line("l", x, y) - - def _parse_line(self, line): - x1 = float(line.attrib.get("x1", 0)) - y1 = float(line.attrib.get("y1", 0)) - x2 = float(line.attrib.get("x2", 0)) - y2 = float(line.attrib.get("y2", 0)) - - self._start_path() - self.M(x1, y1) - self.L(x2, y2) - - def _parse_rect(self, rect): - x = float(rect.attrib.get("x", 0)) - y = float(rect.attrib.get("y", 0)) - w = float(rect.attrib.get("width")) - h = float(rect.attrib.get("height")) - rx = float(rect.attrib.get("rx", 0)) - ry = float(rect.attrib.get("ry", 0)) - - rx = _prefer_non_zero(rx, ry) - ry = _prefer_non_zero(ry, rx) - # TODO there are more rules for adjusting rx, ry - - self._start_path() - self.M(x + rx, y) - self.H(x + w - rx) - if rx > 0: - self.A(rx, ry, x + w, y + ry) - self.V(y + h - ry) - if rx > 0: - self.A(rx, ry, x + w - rx, y + h) - self.H(x + rx) - if rx > 0: - self.A(rx, ry, x, y + h - ry) - self.V(y + ry) - if rx > 0: - self.A(rx, ry, x + rx, y) - self._end_path() - - def _parse_path(self, path): - if "d" in path.attrib: - self._start_path(initial_path=path.attrib["d"]) - - def _parse_polygon(self, poly): - if "points" in poly.attrib: - self._start_path("M" + poly.attrib["points"]) - self._end_path() - - def _parse_polyline(self, poly): - if "points" in poly.attrib: - self._start_path("M" + poly.attrib["points"]) - - def _parse_circle(self, circle): - cx = float(circle.attrib.get("cx", 0)) - cy = float(circle.attrib.get("cy", 0)) - r = float(circle.attrib.get("r")) - - # arc doesn't seem to like being a complete shape, draw two halves - self._start_path() - self.M(cx - r, cy) - self.A(r, r, cx + r, cy, large_arc=1) - self.A(r, r, cx - r, cy, large_arc=1) - - def _parse_ellipse(self, ellipse): - cx = float(ellipse.attrib.get("cx", 0)) - cy = float(ellipse.attrib.get("cy", 0)) - rx = float(ellipse.attrib.get("rx")) - ry = float(ellipse.attrib.get("ry")) - - # arc doesn't seem to like being a complete shape, draw two halves - self._start_path() - self.M(cx - rx, cy) - self.A(rx, ry, cx + rx, cy, large_arc=1) - self.A(rx, ry, cx - rx, cy, large_arc=1) - - def add_path_from_element(self, el): - tag = _strip_xml_ns(el.tag) - parse_fn = getattr(self, "_parse_%s" % tag.lower(), None) - if not callable(parse_fn): - return False - parse_fn(el) - if "transform" in el.attrib: - self.transforms[-1] = _transform(el.attrib["transform"]) - return True diff --git a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/varLib/__main__.py b/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/varLib/__main__.py deleted file mode 100644 index 56fab06e0fe6ac22fce428209c373ecb82d8472a..0000000000000000000000000000000000000000 --- a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/fontTools/varLib/__main__.py +++ /dev/null @@ -1,6 +0,0 @@ -import sys -from fontTools.varLib import main - - -if __name__ == "__main__": - sys.exit(main()) diff --git a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/frontend/assets/index-6f7117a6.js b/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/frontend/assets/index-6f7117a6.js deleted file mode 100644 index 51a0000de63bfbdcfdcb54b7fb0a145f9757be2e..0000000000000000000000000000000000000000 --- a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/frontend/assets/index-6f7117a6.js +++ /dev/null @@ -1,2 +0,0 @@ -import{S as G,e as I,s as J,G as Z,N as O,K as F,p as U,M as z,n as q,A as j,V as be,B as K,P as D,O as S,U as $,Q as ge,R as x,k as p,m as L,o as v,u as Q,v as d,y as V,z as h,x as y,F as C,a7 as de,h as he,j as we,t as ke,a9 as Ae,ab as pe,ac as ve,ad as ye,ak as k,at as Fe,au as Be,E as ze,ae as Ue,q as je,r as Ee}from"./index-3370be2a.js";import{B as Ne}from"./Button-89624748.js";import{B as ae}from"./BlockLabel-56db415e.js";import{E as Oe}from"./Empty-585389a4.js";import{F as W}from"./File-ae385ffc.js";import{U as Se}from"./Upload-f29b2460.js";import{M as Te}from"./ModifyUpload-d8fc50ab.js";import{n as ee,b as Me}from"./ModifyUpload.svelte_svelte_type_style_lang-d2acacf0.js";import{U as Pe}from"./UploadText-28892309.js";import"./Blocks-f0129fcd.js";import"./IconButton-abe5ede9.js";const Ce=t=>{let e=["B","KB","MB","GB","PB"],n=0;for(;t>1024;)t/=1024,n++;let l=e[n];return t.toFixed(1)+" "+l},le=t=>{var e;return e=t.orig_name||t.name,e.length>30?`${e.substr(0,30)}...`:e},te=t=>{var e=0;if(Array.isArray(t))for(var n of t)n.size!==void 0&&(e+=n.size);else e=t.size||0;return Ce(e)};function ne(t,e,n){const l=t.slice();return l[4]=e[n],l[6]=n,l}function Re(t){let e;return{c(){e=D("Uploading...")},m(n,l){U(n,e,l)},p:q,d(n){n&&j(e)}}}function qe(t){let e,n,l,s;return{c(){e=O("a"),n=D("Download"),F(e,"href",l=t[4].data),F(e,"target","_blank"),F(e,"download",s=window.__is_colab__?null:t[4].orig_name||t[4].name),F(e,"class","svelte-xrr240")},m(a,i){U(a,e,i),z(e,n)},p(a,i){i&1&&l!==(l=a[4].data)&&F(e,"href",l),i&1&&s!==(s=window.__is_colab__?null:a[4].orig_name||a[4].name)&&F(e,"download",s)},d(a){a&&j(e)}}}function se(t){let e,n,l=le(t[4])+"",s,a,i,f=te(t[4])+"",r,g,o,m,_,b;function B(c,E){return c[4].data?qe:Re}let w=B(t),A=w(t);function T(){return t[3](t[4],t[6])}return{c(){e=O("tr"),n=O("td"),s=D(l),a=S(),i=O("td"),r=D(f),g=S(),o=O("td"),A.c(),m=S(),F(n,"class","svelte-xrr240"),F(i,"class","svelte-xrr240"),F(o,"class","download svelte-xrr240"),F(e,"class","file svelte-xrr240"),$(e,"selectable",t[1])},m(c,E){U(c,e,E),z(e,n),z(n,s),z(e,a),z(e,i),z(i,r),z(e,g),z(e,o),A.m(o,null),z(e,m),_||(b=ge(e,"click",T),_=!0)},p(c,E){t=c,E&1&&l!==(l=le(t[4])+"")&&x(s,l),E&1&&f!==(f=te(t[4])+"")&&x(r,f),w===(w=B(t))&&A?A.p(t,E):(A.d(1),A=w(t),A&&(A.c(),A.m(o,null))),E&2&&$(e,"selectable",t[1])},d(c){c&&j(e),A.d(),_=!1,b()}}}function De(t){let e,n,l,s=Z(Array.isArray(t[0])?t[0]:[t[0]]),a=[];for(let i=0;il("select",{value:f.orig_name||f.name,index:r});return t.$$set=f=>{"value"in f&&n(0,s=f.value),"selectable"in f&&n(1,a=f.selectable)},[s,a,l,i]}class ie extends G{constructor(e){super(),I(this,e,Ge,De,J,{value:0,selectable:1})}}function Ie(t){let e,n;return e=new Oe({props:{unpadded_box:!0,size:"large",$$slots:{default:[Ke]},$$scope:{ctx:t}}}),{c(){p(e.$$.fragment)},m(l,s){v(e,l,s),n=!0},p(l,s){const a={};s&32&&(a.$$scope={dirty:s,ctx:l}),e.$set(a)},i(l){n||(h(e.$$.fragment,l),n=!0)},o(l){d(e.$$.fragment,l),n=!1},d(l){y(e,l)}}}function Je(t){let e,n;return e=new ie({props:{selectable:t[3],value:t[0]}}),e.$on("select",t[4]),{c(){p(e.$$.fragment)},m(l,s){v(e,l,s),n=!0},p(l,s){const a={};s&8&&(a.selectable=l[3]),s&1&&(a.value=l[0]),e.$set(a)},i(l){n||(h(e.$$.fragment,l),n=!0)},o(l){d(e.$$.fragment,l),n=!1},d(l){y(e,l)}}}function Ke(t){let e,n;return e=new W({}),{c(){p(e.$$.fragment)},m(l,s){v(e,l,s),n=!0},i(l){n||(h(e.$$.fragment,l),n=!0)},o(l){d(e.$$.fragment,l),n=!1},d(l){y(e,l)}}}function Le(t){let e,n,l,s,a,i;e=new ae({props:{show_label:t[2],float:t[0]===null,Icon:W,label:t[1]||"File"}});const f=[Je,Ie],r=[];function g(o,m){return o[0]?0:1}return l=g(t),s=r[l]=f[l](t),{c(){p(e.$$.fragment),n=S(),s.c(),a=L()},m(o,m){v(e,o,m),U(o,n,m),r[l].m(o,m),U(o,a,m),i=!0},p(o,[m]){const _={};m&4&&(_.show_label=o[2]),m&1&&(_.float=o[0]===null),m&2&&(_.label=o[1]||"File"),e.$set(_);let b=l;l=g(o),l===b?r[l].p(o,m):(Q(),d(r[b],1,1,()=>{r[b]=null}),V(),s=r[l],s?s.p(o,m):(s=r[l]=f[l](o),s.c()),h(s,1),s.m(a.parentNode,a))},i(o){i||(h(e.$$.fragment,o),h(s),i=!0)},o(o){d(e.$$.fragment,o),d(s),i=!1},d(o){o&&(j(n),j(a)),y(e,o),r[l].d(o)}}}function Qe(t,e,n){let{value:l=null}=e,{label:s}=e,{show_label:a=!0}=e,{selectable:i=!1}=e;function f(r){C.call(this,t,r)}return t.$$set=r=>{"value"in r&&n(0,l=r.value),"label"in r&&n(1,s=r.label),"show_label"in r&&n(2,a=r.show_label),"selectable"in r&&n(3,i=r.selectable)},[l,s,a,i,f]}class Ve extends G{constructor(e){super(),I(this,e,Qe,Le,J,{value:0,label:1,show_label:2,selectable:3})}}function We(t){let e,n,l;function s(i){t[12](i)}let a={filetype:t[6],parse_to_data_url:!1,file_count:t[3],$$slots:{default:[Xe]},$$scope:{ctx:t}};return t[5]!==void 0&&(a.dragging=t[5]),e=new Se({props:a}),he.push(()=>we(e,"dragging",s)),e.$on("load",t[7]),{c(){p(e.$$.fragment)},m(i,f){v(e,i,f),l=!0},p(i,f){const r={};f&64&&(r.filetype=i[6]),f&8&&(r.file_count=i[3]),f&8192&&(r.$$scope={dirty:f,ctx:i}),!n&&f&32&&(n=!0,r.dragging=i[5],ke(()=>n=!1)),e.$set(r)},i(i){l||(h(e.$$.fragment,i),l=!0)},o(i){d(e.$$.fragment,i),l=!1},d(i){y(e,i)}}}function He(t){let e,n,l,s;return e=new Te({props:{absolute:!0}}),e.$on("clear",t[8]),l=new ie({props:{selectable:t[4],value:t[0]}}),l.$on("select",t[11]),{c(){p(e.$$.fragment),n=S(),p(l.$$.fragment)},m(a,i){v(e,a,i),U(a,n,i),v(l,a,i),s=!0},p(a,i){const f={};i&16&&(f.selectable=a[4]),i&1&&(f.value=a[0]),l.$set(f)},i(a){s||(h(e.$$.fragment,a),h(l.$$.fragment,a),s=!0)},o(a){d(e.$$.fragment,a),d(l.$$.fragment,a),s=!1},d(a){a&&j(n),y(e,a),y(l,a)}}}function Xe(t){let e;const n=t[10].default,l=Ae(n,t,t[13],null);return{c(){l&&l.c()},m(s,a){l&&l.m(s,a),e=!0},p(s,a){l&&l.p&&(!e||a&8192)&&pe(l,n,s,s[13],e?ye(n,s[13],a,null):ve(s[13]),null)},i(s){e||(h(l,s),e=!0)},o(s){d(l,s),e=!1},d(s){l&&l.d(s)}}}function Ye(t){let e,n,l,s,a,i;e=new ae({props:{show_label:t[2],Icon:W,float:t[0]===null,label:t[1]||"File"}});const f=[He,We],r=[];function g(o,m){return o[0]?0:1}return l=g(t),s=r[l]=f[l](t),{c(){p(e.$$.fragment),n=S(),s.c(),a=L()},m(o,m){v(e,o,m),U(o,n,m),r[l].m(o,m),U(o,a,m),i=!0},p(o,[m]){const _={};m&4&&(_.show_label=o[2]),m&1&&(_.float=o[0]===null),m&2&&(_.label=o[1]||"File"),e.$set(_);let b=l;l=g(o),l===b?r[l].p(o,m):(Q(),d(r[b],1,1,()=>{r[b]=null}),V(),s=r[l],s?s.p(o,m):(s=r[l]=f[l](o),s.c()),h(s,1),s.m(a.parentNode,a))},i(o){i||(h(e.$$.fragment,o),h(s),i=!0)},o(o){d(e.$$.fragment,o),d(s),i=!1},d(o){o&&(j(n),j(a)),y(e,o),r[l].d(o)}}}function Ze(t,e,n){let{$$slots:l={},$$scope:s}=e,{value:a}=e,{label:i}=e,{show_label:f=!0}=e,{file_count:r="single"}=e,{file_types:g=null}=e,{selectable:o=!1}=e;async function m({detail:c}){n(0,a=c),await de(),b("change",a),b("upload",c)}function _({detail:c}){n(0,a=null),b("change",a),b("clear")}const b=K();let B;g==null?B=null:(g=g.map(c=>c.startsWith(".")?c:c+"/*"),B=g.join(", "));let w=!1;function A(c){C.call(this,t,c)}function T(c){w=c,n(5,w)}return t.$$set=c=>{"value"in c&&n(0,a=c.value),"label"in c&&n(1,i=c.label),"show_label"in c&&n(2,f=c.show_label),"file_count"in c&&n(3,r=c.file_count),"file_types"in c&&n(9,g=c.file_types),"selectable"in c&&n(4,o=c.selectable),"$$scope"in c&&n(13,s=c.$$scope)},t.$$.update=()=>{t.$$.dirty&32&&b("drag",w)},[a,i,f,r,o,w,B,m,_,g,l,A,T,s]}class $e extends G{constructor(e){super(),I(this,e,Ze,Ye,J,{value:0,label:1,show_label:2,file_count:3,file_types:9,selectable:4})}}function xe(t){let e,n;return e=new Ve({props:{selectable:t[9],value:t[14],label:t[5],show_label:t[6]}}),e.$on("select",t[24]),{c(){p(e.$$.fragment)},m(l,s){v(e,l,s),n=!0},p(l,s){const a={};s&512&&(a.selectable=l[9]),s&16384&&(a.value=l[14]),s&32&&(a.label=l[5]),s&64&&(a.show_label=l[6]),e.$set(a)},i(l){n||(h(e.$$.fragment,l),n=!0)},o(l){d(e.$$.fragment,l),n=!1},d(l){y(e,l)}}}function el(t){let e,n;return e=new $e({props:{label:t[5],show_label:t[6],value:t[14],file_count:t[7],file_types:t[8],selectable:t[9],$$slots:{default:[ll]},$$scope:{ctx:t}}}),e.$on("change",t[20]),e.$on("drag",t[21]),e.$on("clear",t[22]),e.$on("select",t[23]),{c(){p(e.$$.fragment)},m(l,s){v(e,l,s),n=!0},p(l,s){const a={};s&32&&(a.label=l[5]),s&64&&(a.show_label=l[6]),s&16384&&(a.value=l[14]),s&128&&(a.file_count=l[7]),s&256&&(a.file_types=l[8]),s&512&&(a.selectable=l[9]),s&134217728&&(a.$$scope={dirty:s,ctx:l}),e.$set(a)},i(l){n||(h(e.$$.fragment,l),n=!0)},o(l){d(e.$$.fragment,l),n=!1},d(l){y(e,l)}}}function ll(t){let e,n;return e=new Pe({props:{type:"file"}}),{c(){p(e.$$.fragment)},m(l,s){v(e,l,s),n=!0},p:q,i(l){n||(h(e.$$.fragment,l),n=!0)},o(l){d(e.$$.fragment,l),n=!1},d(l){y(e,l)}}}function tl(t){let e,n,l,s,a,i;const f=[t[10],{status:t[16]?"generating":t[10]?.status||"complete"}];let r={};for(let _=0;_{o[w]=null}),V(),s=o[l],s?s.p(_,b):(s=o[l]=g[l](_),s.c()),h(s,1),s.m(a.parentNode,a))},i(_){i||(h(e.$$.fragment,_),h(s),i=!0)},o(_){d(e.$$.fragment,_),d(s),i=!1},d(_){_&&(j(n),j(a)),y(e,_),o[l].d(_)}}}function nl(t){let e,n;return e=new Ne({props:{visible:t[3],variant:t[4]==="dynamic"&&t[0]===null?"dashed":"solid",border_mode:t[15]?"focus":"base",padding:!1,elem_id:t[1],elem_classes:t[2],container:t[11],scale:t[12],min_width:t[13],$$slots:{default:[tl]},$$scope:{ctx:t}}}),{c(){p(e.$$.fragment)},m(l,s){v(e,l,s),n=!0},p(l,[s]){const a={};s&8&&(a.visible=l[3]),s&17&&(a.variant=l[4]==="dynamic"&&l[0]===null?"dashed":"solid"),s&32768&&(a.border_mode=l[15]?"focus":"base"),s&2&&(a.elem_id=l[1]),s&4&&(a.elem_classes=l[2]),s&2048&&(a.container=l[11]),s&4096&&(a.scale=l[12]),s&8192&&(a.min_width=l[13]),s&134334449&&(a.$$scope={dirty:s,ctx:l}),e.$set(a)},i(l){n||(h(e.$$.fragment,l),n=!0)},o(l){d(e.$$.fragment,l),n=!1},d(l){y(e,l)}}}function sl(t,e,n){let l,{elem_id:s=""}=e,{elem_classes:a=[]}=e,{visible:i=!0}=e,{value:f}=e,r,{mode:g}=e,{root:o}=e,{label:m}=e,{show_label:_}=e,{file_count:b}=e,{file_types:B=["file"]}=e,{root_url:w}=e,{selectable:A=!1}=e,{loading_status:T}=e,{container:c=!0}=e,{scale:E=null}=e,{min_width:H=void 0}=e;const re=Fe("upload_files")??Be;let X=!1,M=!1;const R=K(),oe=({detail:u})=>n(0,f=u),fe=({detail:u})=>n(15,X=u);function ue(u){C.call(this,t,u)}function _e(u){C.call(this,t,u)}function ce(u){C.call(this,t,u)}return t.$$set=u=>{"elem_id"in u&&n(1,s=u.elem_id),"elem_classes"in u&&n(2,a=u.elem_classes),"visible"in u&&n(3,i=u.visible),"value"in u&&n(0,f=u.value),"mode"in u&&n(4,g=u.mode),"root"in u&&n(17,o=u.root),"label"in u&&n(5,m=u.label),"show_label"in u&&n(6,_=u.show_label),"file_count"in u&&n(7,b=u.file_count),"file_types"in u&&n(8,B=u.file_types),"root_url"in u&&n(18,w=u.root_url),"selectable"in u&&n(9,A=u.selectable),"loading_status"in u&&n(10,T=u.loading_status),"container"in u&&n(11,c=u.container),"scale"in u&&n(12,E=u.scale),"min_width"in u&&n(13,H=u.min_width)},t.$$.update=()=>{if(t.$$.dirty&393217&&n(14,l=ee(f,o,w)),t.$$.dirty&933905&&JSON.stringify(l)!==JSON.stringify(r)){if(n(19,r=l),l===null)R("change"),n(16,M=!1);else if(!(Array.isArray(l)?l:[l]).every(u=>u.blob))n(16,M=!1),R("change");else if(g==="dynamic"){let u=(Array.isArray(l)?l:[l]).map(P=>P.blob),me=l;n(16,M=!0),re(o,u).then(P=>{me===l&&(n(16,M=!1),P.error?(Array.isArray(l)?l:[l]).forEach(async(N,Y)=>{N.data=await Me(N.blob),N.blob=void 0}):((Array.isArray(l)?l:[l]).forEach((N,Y)=>{P.files&&(N.orig_name=N.name,N.name=P.files[Y],N.is_file=!0,N.blob=void 0)}),n(19,r=n(14,l=ee(f,o,w)))),R("change"),R("upload"))})}}},[f,s,a,i,g,m,_,b,B,A,T,c,E,H,l,X,M,o,w,r,oe,fe,ue,_e,ce]}class al extends G{constructor(e){super(),I(this,e,sl,nl,J,{elem_id:1,elem_classes:2,visible:3,value:0,mode:4,root:17,label:5,show_label:6,file_count:7,file_types:8,root_url:18,selectable:9,loading_status:10,container:11,scale:12,min_width:13})}get elem_id(){return this.$$.ctx[1]}set elem_id(e){this.$$set({elem_id:e}),k()}get elem_classes(){return this.$$.ctx[2]}set elem_classes(e){this.$$set({elem_classes:e}),k()}get visible(){return this.$$.ctx[3]}set visible(e){this.$$set({visible:e}),k()}get value(){return this.$$.ctx[0]}set value(e){this.$$set({value:e}),k()}get mode(){return this.$$.ctx[4]}set mode(e){this.$$set({mode:e}),k()}get root(){return this.$$.ctx[17]}set root(e){this.$$set({root:e}),k()}get label(){return this.$$.ctx[5]}set label(e){this.$$set({label:e}),k()}get show_label(){return this.$$.ctx[6]}set show_label(e){this.$$set({show_label:e}),k()}get file_count(){return this.$$.ctx[7]}set file_count(e){this.$$set({file_count:e}),k()}get file_types(){return this.$$.ctx[8]}set file_types(e){this.$$set({file_types:e}),k()}get root_url(){return this.$$.ctx[18]}set root_url(e){this.$$set({root_url:e}),k()}get selectable(){return this.$$.ctx[9]}set selectable(e){this.$$set({selectable:e}),k()}get loading_status(){return this.$$.ctx[10]}set loading_status(e){this.$$set({loading_status:e}),k()}get container(){return this.$$.ctx[11]}set container(e){this.$$set({container:e}),k()}get scale(){return this.$$.ctx[12]}set scale(e){this.$$set({scale:e}),k()}get min_width(){return this.$$.ctx[13]}set min_width(e){this.$$set({min_width:e}),k()}}const hl=al,wl=["static","dynamic"],kl=t=>({type:{input_payload:"{ name: string; data: string }",response_object:"{ orig_name: string; name: string, size: number, data: string, is_file: boolean}"},description:{input_payload:"object with file name and base64 data",response_object:"object that includes path to file. The URL: {ROOT}file={name} contains the data"},example_data:{name:"zip.zip",data:"data:@file/octet-stream;base64,UEsFBgAAAAAAAAAAAAAAAAAAAAAAAA=="}});export{hl as Component,kl as document,wl as modes}; -//# sourceMappingURL=index-6f7117a6.js.map diff --git a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/frontend/assets/index-be790e2e.css b/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/frontend/assets/index-be790e2e.css deleted file mode 100644 index 2038190972931fd925656a6bd9ebd7e57f0b1d0a..0000000000000000000000000000000000000000 --- a/spaces/DQChoi/gpt-demo/venv/lib/python3.11/site-packages/gradio/templates/frontend/assets/index-be790e2e.css +++ /dev/null @@ -1 +0,0 @@ -.rangeSlider{--pip:var(--range-pip, lightslategray);--pip-text:var(--range-pip-text, var(--pip));--pip-active:var(--range-pip-active, darkslategrey);--pip-active-text:var(--range-pip-active-text, var(--pip-active));--pip-hover:var(--range-pip-hover, darkslategrey);--pip-hover-text:var(--range-pip-hover-text, var(--pip-hover));--pip-in-range:var(--range-pip-in-range, var(--pip-active));--pip-in-range-text:var(--range-pip-in-range-text, var(--pip-active-text))}.rangePips{position:absolute;height:1em;left:0;right:0;bottom:-1em}.rangePips.vertical{height:auto;width:1em;inset:0 auto 0 100%}.rangePips .pip{height:.4em;position:absolute;top:.25em;width:1px;white-space:nowrap}.rangePips.vertical .pip{height:1px;width:.4em;left:.25em;top:auto;bottom:auto}.rangePips .pipVal{position:absolute;top:.4em;transform:translate(-50%,25%)}.rangePips.vertical .pipVal{position:absolute;top:0;left:.4em;transform:translate(25%,-50%)}.rangePips .pip{transition:all .15s ease}.rangePips .pipVal{transition:all .15s ease,font-weight 0s linear}.rangePips .pip{color:#789;color:var(--pip-text);background-color:#789;background-color:var(--pip)}.rangePips .pip.selected{color:#2f4f4f;color:var(--pip-active-text);background-color:#2f4f4f;background-color:var(--pip-active)}.rangePips.hoverable:not(.disabled) .pip:hover{color:#2f4f4f;color:var(--pip-hover-text);background-color:#2f4f4f;background-color:var(--pip-hover)}.rangePips .pip.in-range{color:#2f4f4f;color:var(--pip-in-range-text);background-color:#2f4f4f;background-color:var(--pip-in-range)}.rangePips .pip.selected{height:.75em}.rangePips.vertical .pip.selected{height:1px;width:.75em}.rangePips .pip.selected .pipVal{font-weight:700;top:.75em}.rangePips.vertical .pip.selected .pipVal{top:0;left:.75em}.rangePips.hoverable:not(.disabled) .pip:not(.selected):hover{transition:none}.rangePips.hoverable:not(.disabled) .pip:not(.selected):hover .pipVal{transition:none;font-weight:700}.rangeSlider{--slider:var(--range-slider, #d7dada);--handle-inactive:var(--range-handle-inactive, #99a2a2);--handle:var(--range-handle, #838de7);--handle-focus:var(--range-handle-focus, #4a40d4);--handle-border:var(--range-handle-border, var(--handle));--range-inactive:var(--range-range-inactive, var(--handle-inactive));--range:var(--range-range, var(--handle-focus));--float-inactive:var(--range-float-inactive, var(--handle-inactive));--float:var(--range-float, var(--handle-focus));--float-text:var(--range-float-text, white)}.rangeSlider{position:relative;border-radius:100px;height:.5em;margin:1em;transition:opacity .2s ease;user-select:none}.rangeSlider *{user-select:none}.rangeSlider.pips{margin-bottom:1.8em}.rangeSlider.pip-labels{margin-bottom:2.8em}.rangeSlider.vertical{display:inline-block;border-radius:100px;width:.5em;min-height:200px}.rangeSlider.vertical.pips{margin-right:1.8em;margin-bottom:1em}.rangeSlider.vertical.pip-labels{margin-right:2.8em;margin-bottom:1em}.rangeSlider .rangeHandle{position:absolute;display:block;height:1.4em;width:1.4em;top:.25em;bottom:auto;transform:translateY(-50%) translate(-50%);z-index:2}.rangeSlider.reversed .rangeHandle{transform:translateY(-50%) translate(50%)}.rangeSlider.vertical .rangeHandle{left:.25em;top:auto;transform:translateY(50%) translate(-50%)}.rangeSlider.vertical.reversed .rangeHandle{transform:translateY(-50%) translate(-50%)}.rangeSlider .rangeNub,.rangeSlider .rangeHandle:before{position:absolute;left:0;top:0;display:block;border-radius:10em;height:100%;width:100%;transition:box-shadow .2s ease}.rangeSlider .rangeHandle:before{content:"";inset:1px;height:auto;width:auto;box-shadow:0 0 0 0 var(--handle-border);opacity:0}.rangeSlider.hoverable:not(.disabled) .rangeHandle:hover:before{box-shadow:0 0 0 8px var(--handle-border);opacity:.2}.rangeSlider.hoverable:not(.disabled) .rangeHandle.press:before,.rangeSlider.hoverable:not(.disabled) .rangeHandle.press:hover:before{box-shadow:0 0 0 12px var(--handle-border);opacity:.4}.rangeSlider.range:not(.min):not(.max) .rangeNub{border-radius:10em 10em 10em 1.6em}.rangeSlider.range .rangeHandle:nth-of-type(1) .rangeNub{transform:rotate(-135deg)}.rangeSlider.range .rangeHandle:nth-of-type(2) .rangeNub{transform:rotate(45deg)}.rangeSlider.range.reversed .rangeHandle:nth-of-type(1) .rangeNub{transform:rotate(45deg)}.rangeSlider.range.reversed .rangeHandle:nth-of-type(2) .rangeNub{transform:rotate(-135deg)}.rangeSlider.range.vertical .rangeHandle:nth-of-type(1) .rangeNub{transform:rotate(135deg)}.rangeSlider.range.vertical .rangeHandle:nth-of-type(2) .rangeNub{transform:rotate(-45deg)}.rangeSlider.range.vertical.reversed .rangeHandle:nth-of-type(1) .rangeNub{transform:rotate(-45deg)}.rangeSlider.range.vertical.reversed .rangeHandle:nth-of-type(2) .rangeNub{transform:rotate(135deg)}.rangeSlider .rangeFloat{display:block;position:absolute;left:50%;top:-.5em;transform:translate(-50%,-100%);font-size:1em;text-align:center;opacity:0;pointer-events:none;white-space:nowrap;transition:all .2s ease;font-size:.9em;padding:.2em .4em;border-radius:.2em}.rangeSlider .rangeHandle.active .rangeFloat,.rangeSlider.hoverable .rangeHandle:hover .rangeFloat{opacity:1;top:-.2em;transform:translate(-50%,-100%)}.rangeSlider .rangeBar{position:absolute;display:block;transition:background .2s ease;border-radius:1em;height:.5em;top:0;user-select:none;z-index:1}.rangeSlider.vertical .rangeBar{width:.5em;height:auto}.rangeSlider{background-color:#d7dada;background-color:var(--slider)}.rangeSlider .rangeBar{background-color:#99a2a2;background-color:var(--range-inactive)}.rangeSlider.focus .rangeBar{background-color:#838de7;background-color:var(--range)}.rangeSlider .rangeNub{background-color:#99a2a2;background-color:var(--handle-inactive)}.rangeSlider.focus .rangeNub{background-color:#838de7;background-color:var(--handle)}.rangeSlider .rangeHandle.active .rangeNub{background-color:#4a40d4;background-color:var(--handle-focus)}.rangeSlider .rangeFloat{color:#fff;color:var(--float-text);background-color:#99a2a2;background-color:var(--float-inactive)}.rangeSlider.focus .rangeFloat{background-color:#4a40d4;background-color:var(--float)}.rangeSlider.disabled{opacity:.5}.rangeSlider.disabled .rangeNub{background-color:#d7dada;background-color:var(--slider)}.mic-wrap.svelte-1thnwz{padding:var(--size-2)}.record-icon.svelte-1thnwz{display:flex;position:relative;margin-right:var(--size-2);width:6px;height:6px}.dot.svelte-1thnwz{display:inline-flex;position:relative;border-radius:var(--radius-full);background:var(--color-red-500);width:6px;height:6px}.pinger.svelte-1thnwz{display:inline-flex;position:absolute;opacity:.9;animation:svelte-1thnwz-ping 1s cubic-bezier(0,0,.2,1) infinite;border-radius:var(--radius-full);background:var(--color-red-500);width:var(--size-full);height:var(--size-full)}@keyframes svelte-1thnwz-ping{75%,to{transform:scale(2);opacity:0}}audio.svelte-1thnwz{padding:var(--size-2);width:var(--size-full);height:var(--size-14)}audio.svelte-1yfus5a{padding:var(--size-2);width:var(--size-full);height:var(--size-14)}.icon-button.svelte-1yfus5a{position:absolute;top:6px;right:6px} diff --git a/spaces/DaleChen/AutoGPT/autogpt/processing/html.py b/spaces/DaleChen/AutoGPT/autogpt/processing/html.py deleted file mode 100644 index 81387b12adab5023150c55f2075ddd40b554f386..0000000000000000000000000000000000000000 --- a/spaces/DaleChen/AutoGPT/autogpt/processing/html.py +++ /dev/null @@ -1,33 +0,0 @@ -"""HTML processing functions""" -from __future__ import annotations - -from bs4 import BeautifulSoup -from requests.compat import urljoin - - -def extract_hyperlinks(soup: BeautifulSoup, base_url: str) -> list[tuple[str, str]]: - """Extract hyperlinks from a BeautifulSoup object - - Args: - soup (BeautifulSoup): The BeautifulSoup object - base_url (str): The base URL - - Returns: - List[Tuple[str, str]]: The extracted hyperlinks - """ - return [ - (link.text, urljoin(base_url, link["href"])) - for link in soup.find_all("a", href=True) - ] - - -def format_hyperlinks(hyperlinks: list[tuple[str, str]]) -> list[str]: - """Format hyperlinks to be displayed to the user - - Args: - hyperlinks (List[Tuple[str, str]]): The hyperlinks to format - - Returns: - List[str]: The formatted hyperlinks - """ - return [f"{link_text} ({link_url})" for link_text, link_url in hyperlinks] diff --git a/spaces/Dantra1/CeliaSensei/models.py b/spaces/Dantra1/CeliaSensei/models.py deleted file mode 100644 index 8353b867f441de7e4d05aef980e672899c3a8889..0000000000000000000000000000000000000000 --- a/spaces/Dantra1/CeliaSensei/models.py +++ /dev/null @@ -1,533 +0,0 @@ -import math -import torch -from torch import nn -from torch.nn import functional as F - -import commons -import modules -import attentions -import monotonic_align - -from torch.nn import Conv1d, ConvTranspose1d, Conv2d -from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm -from commons import init_weights, get_padding - - -class StochasticDurationPredictor(nn.Module): - def __init__(self, in_channels, filter_channels, kernel_size, p_dropout, n_flows=4, gin_channels=0): - super().__init__() - filter_channels = in_channels # it needs to be removed from future version. - self.in_channels = in_channels - self.filter_channels = filter_channels - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.n_flows = n_flows - self.gin_channels = gin_channels - - self.log_flow = modules.Log() - self.flows = nn.ModuleList() - self.flows.append(modules.ElementwiseAffine(2)) - for i in range(n_flows): - self.flows.append(modules.ConvFlow(2, filter_channels, kernel_size, n_layers=3)) - self.flows.append(modules.Flip()) - - self.post_pre = nn.Conv1d(1, filter_channels, 1) - self.post_proj = nn.Conv1d(filter_channels, filter_channels, 1) - self.post_convs = modules.DDSConv(filter_channels, kernel_size, n_layers=3, p_dropout=p_dropout) - self.post_flows = nn.ModuleList() - self.post_flows.append(modules.ElementwiseAffine(2)) - for i in range(4): - self.post_flows.append(modules.ConvFlow(2, filter_channels, kernel_size, n_layers=3)) - self.post_flows.append(modules.Flip()) - - self.pre = nn.Conv1d(in_channels, filter_channels, 1) - self.proj = nn.Conv1d(filter_channels, filter_channels, 1) - self.convs = modules.DDSConv(filter_channels, kernel_size, n_layers=3, p_dropout=p_dropout) - if gin_channels != 0: - self.cond = nn.Conv1d(gin_channels, filter_channels, 1) - - def forward(self, x, x_mask, w=None, g=None, reverse=False, noise_scale=1.0): - x = torch.detach(x) - x = self.pre(x) - if g is not None: - g = torch.detach(g) - x = x + self.cond(g) - x = self.convs(x, x_mask) - x = self.proj(x) * x_mask - - if not reverse: - flows = self.flows - assert w is not None - - logdet_tot_q = 0 - h_w = self.post_pre(w) - h_w = self.post_convs(h_w, x_mask) - h_w = self.post_proj(h_w) * x_mask - e_q = torch.randn(w.size(0), 2, w.size(2)).to(device=x.device, dtype=x.dtype) * x_mask - z_q = e_q - for flow in self.post_flows: - z_q, logdet_q = flow(z_q, x_mask, g=(x + h_w)) - logdet_tot_q += logdet_q - z_u, z1 = torch.split(z_q, [1, 1], 1) - u = torch.sigmoid(z_u) * x_mask - z0 = (w - u) * x_mask - logdet_tot_q += torch.sum((F.logsigmoid(z_u) + F.logsigmoid(-z_u)) * x_mask, [1,2]) - logq = torch.sum(-0.5 * (math.log(2*math.pi) + (e_q**2)) * x_mask, [1,2]) - logdet_tot_q - - logdet_tot = 0 - z0, logdet = self.log_flow(z0, x_mask) - logdet_tot += logdet - z = torch.cat([z0, z1], 1) - for flow in flows: - z, logdet = flow(z, x_mask, g=x, reverse=reverse) - logdet_tot = logdet_tot + logdet - nll = torch.sum(0.5 * (math.log(2*math.pi) + (z**2)) * x_mask, [1,2]) - logdet_tot - return nll + logq # [b] - else: - flows = list(reversed(self.flows)) - flows = flows[:-2] + [flows[-1]] # remove a useless vflow - z = torch.randn(x.size(0), 2, x.size(2)).to(device=x.device, dtype=x.dtype) * noise_scale - for flow in flows: - z = flow(z, x_mask, g=x, reverse=reverse) - z0, z1 = torch.split(z, [1, 1], 1) - logw = z0 - return logw - - -class DurationPredictor(nn.Module): - def __init__(self, in_channels, filter_channels, kernel_size, p_dropout, gin_channels=0): - super().__init__() - - self.in_channels = in_channels - self.filter_channels = filter_channels - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.gin_channels = gin_channels - - self.drop = nn.Dropout(p_dropout) - self.conv_1 = nn.Conv1d(in_channels, filter_channels, kernel_size, padding=kernel_size//2) - self.norm_1 = modules.LayerNorm(filter_channels) - self.conv_2 = nn.Conv1d(filter_channels, filter_channels, kernel_size, padding=kernel_size//2) - self.norm_2 = modules.LayerNorm(filter_channels) - self.proj = nn.Conv1d(filter_channels, 1, 1) - - if gin_channels != 0: - self.cond = nn.Conv1d(gin_channels, in_channels, 1) - - def forward(self, x, x_mask, g=None): - x = torch.detach(x) - if g is not None: - g = torch.detach(g) - x = x + self.cond(g) - x = self.conv_1(x * x_mask) - x = torch.relu(x) - x = self.norm_1(x) - x = self.drop(x) - x = self.conv_2(x * x_mask) - x = torch.relu(x) - x = self.norm_2(x) - x = self.drop(x) - x = self.proj(x * x_mask) - return x * x_mask - - -class TextEncoder(nn.Module): - def __init__(self, - n_vocab, - out_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout): - super().__init__() - self.n_vocab = n_vocab - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - - self.emb = nn.Embedding(n_vocab, hidden_channels) - nn.init.normal_(self.emb.weight, 0.0, hidden_channels**-0.5) - - self.encoder = attentions.Encoder( - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout) - self.proj= nn.Conv1d(hidden_channels, out_channels * 2, 1) - - def forward(self, x, x_lengths): - x = self.emb(x) * math.sqrt(self.hidden_channels) # [b, t, h] - x = torch.transpose(x, 1, -1) # [b, h, t] - x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype) - - x = self.encoder(x * x_mask, x_mask) - stats = self.proj(x) * x_mask - - m, logs = torch.split(stats, self.out_channels, dim=1) - return x, m, logs, x_mask - - -class ResidualCouplingBlock(nn.Module): - def __init__(self, - channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - n_flows=4, - gin_channels=0): - super().__init__() - self.channels = channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.n_flows = n_flows - self.gin_channels = gin_channels - - self.flows = nn.ModuleList() - for i in range(n_flows): - self.flows.append(modules.ResidualCouplingLayer(channels, hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=gin_channels, mean_only=True)) - self.flows.append(modules.Flip()) - - def forward(self, x, x_mask, g=None, reverse=False): - if not reverse: - for flow in self.flows: - x, _ = flow(x, x_mask, g=g, reverse=reverse) - else: - for flow in reversed(self.flows): - x = flow(x, x_mask, g=g, reverse=reverse) - return x - - -class PosteriorEncoder(nn.Module): - def __init__(self, - in_channels, - out_channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=0): - super().__init__() - self.in_channels = in_channels - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.gin_channels = gin_channels - - self.pre = nn.Conv1d(in_channels, hidden_channels, 1) - self.enc = modules.WN(hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=gin_channels) - self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) - - def forward(self, x, x_lengths, g=None): - x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to(x.dtype) - x = self.pre(x) * x_mask - x = self.enc(x, x_mask, g=g) - stats = self.proj(x) * x_mask - m, logs = torch.split(stats, self.out_channels, dim=1) - z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask - return z, m, logs, x_mask - - -class Generator(torch.nn.Module): - def __init__(self, initial_channel, resblock, resblock_kernel_sizes, resblock_dilation_sizes, upsample_rates, upsample_initial_channel, upsample_kernel_sizes, gin_channels=0): - super(Generator, self).__init__() - self.num_kernels = len(resblock_kernel_sizes) - self.num_upsamples = len(upsample_rates) - self.conv_pre = Conv1d(initial_channel, upsample_initial_channel, 7, 1, padding=3) - resblock = modules.ResBlock1 if resblock == '1' else modules.ResBlock2 - - self.ups = nn.ModuleList() - for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)): - self.ups.append(weight_norm( - ConvTranspose1d(upsample_initial_channel//(2**i), upsample_initial_channel//(2**(i+1)), - k, u, padding=(k-u)//2))) - - self.resblocks = nn.ModuleList() - for i in range(len(self.ups)): - ch = upsample_initial_channel//(2**(i+1)) - for j, (k, d) in enumerate(zip(resblock_kernel_sizes, resblock_dilation_sizes)): - self.resblocks.append(resblock(ch, k, d)) - - self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False) - self.ups.apply(init_weights) - - if gin_channels != 0: - self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1) - - def forward(self, x, g=None): - x = self.conv_pre(x) - if g is not None: - x = x + self.cond(g) - - for i in range(self.num_upsamples): - x = F.leaky_relu(x, modules.LRELU_SLOPE) - x = self.ups[i](x) - xs = None - for j in range(self.num_kernels): - if xs is None: - xs = self.resblocks[i*self.num_kernels+j](x) - else: - xs += self.resblocks[i*self.num_kernels+j](x) - x = xs / self.num_kernels - x = F.leaky_relu(x) - x = self.conv_post(x) - x = torch.tanh(x) - - return x - - def remove_weight_norm(self): - print('Removing weight norm...') - for l in self.ups: - remove_weight_norm(l) - for l in self.resblocks: - l.remove_weight_norm() - - -class DiscriminatorP(torch.nn.Module): - def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False): - super(DiscriminatorP, self).__init__() - self.period = period - self.use_spectral_norm = use_spectral_norm - norm_f = weight_norm if use_spectral_norm == False else spectral_norm - self.convs = nn.ModuleList([ - norm_f(Conv2d(1, 32, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), - norm_f(Conv2d(32, 128, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), - norm_f(Conv2d(128, 512, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), - norm_f(Conv2d(512, 1024, (kernel_size, 1), (stride, 1), padding=(get_padding(kernel_size, 1), 0))), - norm_f(Conv2d(1024, 1024, (kernel_size, 1), 1, padding=(get_padding(kernel_size, 1), 0))), - ]) - self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0))) - - def forward(self, x): - fmap = [] - - # 1d to 2d - b, c, t = x.shape - if t % self.period != 0: # pad first - n_pad = self.period - (t % self.period) - x = F.pad(x, (0, n_pad), "reflect") - t = t + n_pad - x = x.view(b, c, t // self.period, self.period) - - for l in self.convs: - x = l(x) - x = F.leaky_relu(x, modules.LRELU_SLOPE) - fmap.append(x) - x = self.conv_post(x) - fmap.append(x) - x = torch.flatten(x, 1, -1) - - return x, fmap - - -class DiscriminatorS(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(DiscriminatorS, self).__init__() - norm_f = weight_norm if use_spectral_norm == False else spectral_norm - self.convs = nn.ModuleList([ - norm_f(Conv1d(1, 16, 15, 1, padding=7)), - norm_f(Conv1d(16, 64, 41, 4, groups=4, padding=20)), - norm_f(Conv1d(64, 256, 41, 4, groups=16, padding=20)), - norm_f(Conv1d(256, 1024, 41, 4, groups=64, padding=20)), - norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)), - norm_f(Conv1d(1024, 1024, 5, 1, padding=2)), - ]) - self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1)) - - def forward(self, x): - fmap = [] - - for l in self.convs: - x = l(x) - x = F.leaky_relu(x, modules.LRELU_SLOPE) - fmap.append(x) - x = self.conv_post(x) - fmap.append(x) - x = torch.flatten(x, 1, -1) - - return x, fmap - - -class MultiPeriodDiscriminator(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(MultiPeriodDiscriminator, self).__init__() - periods = [2,3,5,7,11] - - discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)] - discs = discs + [DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods] - self.discriminators = nn.ModuleList(discs) - - def forward(self, y, y_hat): - y_d_rs = [] - y_d_gs = [] - fmap_rs = [] - fmap_gs = [] - for i, d in enumerate(self.discriminators): - y_d_r, fmap_r = d(y) - y_d_g, fmap_g = d(y_hat) - y_d_rs.append(y_d_r) - y_d_gs.append(y_d_g) - fmap_rs.append(fmap_r) - fmap_gs.append(fmap_g) - - return y_d_rs, y_d_gs, fmap_rs, fmap_gs - - - -class SynthesizerTrn(nn.Module): - """ - Synthesizer for Training - """ - - def __init__(self, - n_vocab, - spec_channels, - segment_size, - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - n_speakers=0, - gin_channels=0, - use_sdp=True, - **kwargs): - - super().__init__() - self.n_vocab = n_vocab - self.spec_channels = spec_channels - self.inter_channels = inter_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.resblock = resblock - self.resblock_kernel_sizes = resblock_kernel_sizes - self.resblock_dilation_sizes = resblock_dilation_sizes - self.upsample_rates = upsample_rates - self.upsample_initial_channel = upsample_initial_channel - self.upsample_kernel_sizes = upsample_kernel_sizes - self.segment_size = segment_size - self.n_speakers = n_speakers - self.gin_channels = gin_channels - - self.use_sdp = use_sdp - - self.enc_p = TextEncoder(n_vocab, - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout) - self.dec = Generator(inter_channels, resblock, resblock_kernel_sizes, resblock_dilation_sizes, upsample_rates, upsample_initial_channel, upsample_kernel_sizes, gin_channels=gin_channels) - self.enc_q = PosteriorEncoder(spec_channels, inter_channels, hidden_channels, 5, 1, 16, gin_channels=gin_channels) - self.flow = ResidualCouplingBlock(inter_channels, hidden_channels, 5, 1, 4, gin_channels=gin_channels) - - if use_sdp: - self.dp = StochasticDurationPredictor(hidden_channels, 192, 3, 0.5, 4, gin_channels=gin_channels) - else: - self.dp = DurationPredictor(hidden_channels, 256, 3, 0.5, gin_channels=gin_channels) - - if n_speakers > 1: - self.emb_g = nn.Embedding(n_speakers, gin_channels) - - def forward(self, x, x_lengths, y, y_lengths, sid=None): - - x, m_p, logs_p, x_mask = self.enc_p(x, x_lengths) - if self.n_speakers > 0: - g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1] - else: - g = None - - z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g) - z_p = self.flow(z, y_mask, g=g) - - with torch.no_grad(): - # negative cross-entropy - s_p_sq_r = torch.exp(-2 * logs_p) # [b, d, t] - neg_cent1 = torch.sum(-0.5 * math.log(2 * math.pi) - logs_p, [1], keepdim=True) # [b, 1, t_s] - neg_cent2 = torch.matmul(-0.5 * (z_p ** 2).transpose(1, 2), s_p_sq_r) # [b, t_t, d] x [b, d, t_s] = [b, t_t, t_s] - neg_cent3 = torch.matmul(z_p.transpose(1, 2), (m_p * s_p_sq_r)) # [b, t_t, d] x [b, d, t_s] = [b, t_t, t_s] - neg_cent4 = torch.sum(-0.5 * (m_p ** 2) * s_p_sq_r, [1], keepdim=True) # [b, 1, t_s] - neg_cent = neg_cent1 + neg_cent2 + neg_cent3 + neg_cent4 - - attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1) - attn = monotonic_align.maximum_path(neg_cent, attn_mask.squeeze(1)).unsqueeze(1).detach() - - w = attn.sum(2) - if self.use_sdp: - l_length = self.dp(x, x_mask, w, g=g) - l_length = l_length / torch.sum(x_mask) - else: - logw_ = torch.log(w + 1e-6) * x_mask - logw = self.dp(x, x_mask, g=g) - l_length = torch.sum((logw - logw_)**2, [1,2]) / torch.sum(x_mask) # for averaging - - # expand prior - m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(1, 2) - logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(1, 2) - - z_slice, ids_slice = commons.rand_slice_segments(z, y_lengths, self.segment_size) - o = self.dec(z_slice, g=g) - return o, l_length, attn, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q) - - def infer(self, x, x_lengths, sid=None, noise_scale=1, length_scale=1, noise_scale_w=1., max_len=None): - x, m_p, logs_p, x_mask = self.enc_p(x, x_lengths) - if self.n_speakers > 0: - g = self.emb_g(sid).unsqueeze(-1) # [b, h, 1] - else: - g = None - - if self.use_sdp: - logw = self.dp(x, x_mask, g=g, reverse=True, noise_scale=noise_scale_w) - else: - logw = self.dp(x, x_mask, g=g) - w = torch.exp(logw) * x_mask * length_scale - w_ceil = torch.ceil(w) - y_lengths = torch.clamp_min(torch.sum(w_ceil, [1, 2]), 1).long() - y_mask = torch.unsqueeze(commons.sequence_mask(y_lengths, None), 1).to(x_mask.dtype) - attn_mask = torch.unsqueeze(x_mask, 2) * torch.unsqueeze(y_mask, -1) - attn = commons.generate_path(w_ceil, attn_mask) - - m_p = torch.matmul(attn.squeeze(1), m_p.transpose(1, 2)).transpose(1, 2) # [b, t', t], [b, t, d] -> [b, d, t'] - logs_p = torch.matmul(attn.squeeze(1), logs_p.transpose(1, 2)).transpose(1, 2) # [b, t', t], [b, t, d] -> [b, d, t'] - - z_p = m_p + torch.randn_like(m_p) * torch.exp(logs_p) * noise_scale - z = self.flow(z_p, y_mask, g=g, reverse=True) - o = self.dec((z * y_mask)[:,:,:max_len], g=g) - return o, attn, y_mask, (z, z_p, m_p, logs_p) - - def voice_conversion(self, y, y_lengths, sid_src, sid_tgt): - assert self.n_speakers > 0, "n_speakers have to be larger than 0." - g_src = self.emb_g(sid_src).unsqueeze(-1) - g_tgt = self.emb_g(sid_tgt).unsqueeze(-1) - z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g_src) - z_p = self.flow(z, y_mask, g=g_src) - z_hat = self.flow(z_p, y_mask, g=g_tgt, reverse=True) - o_hat = self.dec(z_hat * y_mask, g=g_tgt) - return o_hat, y_mask, (z, z_p, z_hat) - diff --git a/spaces/Datasculptor/DescriptionGPT/tools/download_cc.py b/spaces/Datasculptor/DescriptionGPT/tools/download_cc.py deleted file mode 100644 index 3c43690a3ca407c3553686d9eb51db9c1834f156..0000000000000000000000000000000000000000 --- a/spaces/Datasculptor/DescriptionGPT/tools/download_cc.py +++ /dev/null @@ -1,47 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -import os -import json -import argparse -from PIL import Image -import numpy as np - -if __name__ == '__main__': - parser = argparse.ArgumentParser() - parser.add_argument('--ann', default='datasets/cc3m/Train_GCC-training.tsv') - parser.add_argument('--save_image_path', default='datasets/cc3m/training/') - parser.add_argument('--cat_info', default='datasets/lvis/lvis_v1_val.json') - parser.add_argument('--out_path', default='datasets/cc3m/train_image_info.json') - parser.add_argument('--not_download_image', action='store_true') - args = parser.parse_args() - categories = json.load(open(args.cat_info, 'r'))['categories'] - images = [] - if not os.path.exists(args.save_image_path): - os.makedirs(args.save_image_path) - f = open(args.ann) - for i, line in enumerate(f): - cap, path = line[:-1].split('\t') - print(i, cap, path) - if not args.not_download_image: - os.system( - 'wget {} -O {}/{}.jpg'.format( - path, args.save_image_path, i + 1)) - try: - img = Image.open( - open('{}/{}.jpg'.format(args.save_image_path, i + 1), "rb")) - img = np.asarray(img.convert("RGB")) - h, w = img.shape[:2] - except: - continue - image_info = { - 'id': i + 1, - 'file_name': '{}.jpg'.format(i + 1), - 'height': h, - 'width': w, - 'captions': [cap], - } - images.append(image_info) - data = {'categories': categories, 'images': images, 'annotations': []} - for k, v in data.items(): - print(k, len(v)) - print('Saving to', args.out_path) - json.dump(data, open(args.out_path, 'w')) diff --git a/spaces/Datasculptor/MusicGen/tests/utils/__init__.py b/spaces/Datasculptor/MusicGen/tests/utils/__init__.py deleted file mode 100644 index 0952fcc3f57e34b3747962e9ebd6fc57aeea63fa..0000000000000000000000000000000000000000 --- a/spaces/Datasculptor/MusicGen/tests/utils/__init__.py +++ /dev/null @@ -1,5 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. diff --git a/spaces/Datasculptor/StyleGAN-NADA/e4e/models/psp.py b/spaces/Datasculptor/StyleGAN-NADA/e4e/models/psp.py deleted file mode 100644 index 36c0b2b7b3fdd28bc32272d0d8fcff24e4848355..0000000000000000000000000000000000000000 --- a/spaces/Datasculptor/StyleGAN-NADA/e4e/models/psp.py +++ /dev/null @@ -1,99 +0,0 @@ -import matplotlib - -matplotlib.use('Agg') -import torch -from torch import nn -from e4e.models.encoders import psp_encoders -from e4e.models.stylegan2.model import Generator -from e4e.configs.paths_config import model_paths - - -def get_keys(d, name): - if 'state_dict' in d: - d = d['state_dict'] - d_filt = {k[len(name) + 1:]: v for k, v in d.items() if k[:len(name)] == name} - return d_filt - - -class pSp(nn.Module): - - def __init__(self, opts, device): - super(pSp, self).__init__() - self.opts = opts - self.device = device - # Define architecture - self.encoder = self.set_encoder() - self.decoder = Generator(opts.stylegan_size, 512, 8, channel_multiplier=2) - self.face_pool = torch.nn.AdaptiveAvgPool2d((256, 256)) - # Load weights if needed - self.load_weights() - - def set_encoder(self): - if self.opts.encoder_type == 'GradualStyleEncoder': - encoder = psp_encoders.GradualStyleEncoder(50, 'ir_se', self.opts) - elif self.opts.encoder_type == 'Encoder4Editing': - encoder = psp_encoders.Encoder4Editing(50, 'ir_se', self.opts) - else: - raise Exception('{} is not a valid encoders'.format(self.opts.encoder_type)) - return encoder - - def load_weights(self): - if self.opts.checkpoint_path is not None: - print('Loading e4e over the pSp framework from checkpoint: {}'.format(self.opts.checkpoint_path)) - ckpt = torch.load(self.opts.checkpoint_path, map_location='cpu') - self.encoder.load_state_dict(get_keys(ckpt, 'encoder'), strict=True) - self.decoder.load_state_dict(get_keys(ckpt, 'decoder'), strict=True) - self.__load_latent_avg(ckpt) - else: - print('Loading encoders weights from irse50!') - encoder_ckpt = torch.load(model_paths['ir_se50']) - self.encoder.load_state_dict(encoder_ckpt, strict=False) - print('Loading decoder weights from pretrained!') - ckpt = torch.load(self.opts.stylegan_weights) - self.decoder.load_state_dict(ckpt['g_ema'], strict=False) - self.__load_latent_avg(ckpt, repeat=self.encoder.style_count) - - def forward(self, x, resize=True, latent_mask=None, input_code=False, randomize_noise=True, - inject_latent=None, return_latents=False, alpha=None): - if input_code: - codes = x - else: - codes = self.encoder(x) - # normalize with respect to the center of an average face - if self.opts.start_from_latent_avg: - if codes.ndim == 2: - codes = codes + self.latent_avg.repeat(codes.shape[0], 1, 1)[:, 0, :] - else: - codes = codes + self.latent_avg.repeat(codes.shape[0], 1, 1) - - if latent_mask is not None: - for i in latent_mask: - if inject_latent is not None: - if alpha is not None: - codes[:, i] = alpha * inject_latent[:, i] + (1 - alpha) * codes[:, i] - else: - codes[:, i] = inject_latent[:, i] - else: - codes[:, i] = 0 - - input_is_latent = not input_code - images, result_latent = self.decoder([codes], - input_is_latent=input_is_latent, - randomize_noise=randomize_noise, - return_latents=return_latents) - - if resize: - images = self.face_pool(images) - - if return_latents: - return images, result_latent - else: - return images - - def __load_latent_avg(self, ckpt, repeat=None): - if 'latent_avg' in ckpt: - self.latent_avg = ckpt['latent_avg'].to(self.device) - if repeat is not None: - self.latent_avg = self.latent_avg.repeat(repeat, 1) - else: - self.latent_avg = None diff --git a/spaces/DeepDrivePL/PaddleSeg-Matting/matting/model/hrnet.py b/spaces/DeepDrivePL/PaddleSeg-Matting/matting/model/hrnet.py deleted file mode 100644 index 96e23a77e656142a97c573feb501f983aecebbef..0000000000000000000000000000000000000000 --- a/spaces/DeepDrivePL/PaddleSeg-Matting/matting/model/hrnet.py +++ /dev/null @@ -1,835 +0,0 @@ -# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import math - -import paddle -import paddle.nn as nn -import paddle.nn.functional as F - -from paddleseg.cvlibs import manager, param_init -from paddleseg.models import layers -from paddleseg.utils import utils - -__all__ = [ - "HRNet_W18_Small_V1", "HRNet_W18_Small_V2", "HRNet_W18", "HRNet_W30", - "HRNet_W32", "HRNet_W40", "HRNet_W44", "HRNet_W48", "HRNet_W60", "HRNet_W64" -] - - -class HRNet(nn.Layer): - """ - The HRNet implementation based on PaddlePaddle. - - The original article refers to - Jingdong Wang, et, al. "HRNet:Deep High-Resolution Representation Learning for Visual Recognition" - (https://arxiv.org/pdf/1908.07919.pdf). - - Args: - pretrained (str, optional): The path of pretrained model. - stage1_num_modules (int, optional): Number of modules for stage1. Default 1. - stage1_num_blocks (list, optional): Number of blocks per module for stage1. Default (4). - stage1_num_channels (list, optional): Number of channels per branch for stage1. Default (64). - stage2_num_modules (int, optional): Number of modules for stage2. Default 1. - stage2_num_blocks (list, optional): Number of blocks per module for stage2. Default (4, 4). - stage2_num_channels (list, optional): Number of channels per branch for stage2. Default (18, 36). - stage3_num_modules (int, optional): Number of modules for stage3. Default 4. - stage3_num_blocks (list, optional): Number of blocks per module for stage3. Default (4, 4, 4). - stage3_num_channels (list, optional): Number of channels per branch for stage3. Default [18, 36, 72). - stage4_num_modules (int, optional): Number of modules for stage4. Default 3. - stage4_num_blocks (list, optional): Number of blocks per module for stage4. Default (4, 4, 4, 4). - stage4_num_channels (list, optional): Number of channels per branch for stage4. Default (18, 36, 72. 144). - has_se (bool, optional): Whether to use Squeeze-and-Excitation module. Default False. - align_corners (bool, optional): An argument of F.interpolate. It should be set to False when the feature size is even, - e.g. 1024x512, otherwise it is True, e.g. 769x769. Default: False. - """ - - def __init__(self, - input_channels=3, - pretrained=None, - stage1_num_modules=1, - stage1_num_blocks=(4, ), - stage1_num_channels=(64, ), - stage2_num_modules=1, - stage2_num_blocks=(4, 4), - stage2_num_channels=(18, 36), - stage3_num_modules=4, - stage3_num_blocks=(4, 4, 4), - stage3_num_channels=(18, 36, 72), - stage4_num_modules=3, - stage4_num_blocks=(4, 4, 4, 4), - stage4_num_channels=(18, 36, 72, 144), - has_se=False, - align_corners=False, - padding_same=True): - super(HRNet, self).__init__() - self.pretrained = pretrained - self.stage1_num_modules = stage1_num_modules - self.stage1_num_blocks = stage1_num_blocks - self.stage1_num_channels = stage1_num_channels - self.stage2_num_modules = stage2_num_modules - self.stage2_num_blocks = stage2_num_blocks - self.stage2_num_channels = stage2_num_channels - self.stage3_num_modules = stage3_num_modules - self.stage3_num_blocks = stage3_num_blocks - self.stage3_num_channels = stage3_num_channels - self.stage4_num_modules = stage4_num_modules - self.stage4_num_blocks = stage4_num_blocks - self.stage4_num_channels = stage4_num_channels - self.has_se = has_se - self.align_corners = align_corners - - self.feat_channels = [i for i in stage4_num_channels] - self.feat_channels = [64] + self.feat_channels - - self.conv_layer1_1 = layers.ConvBNReLU( - in_channels=input_channels, - out_channels=64, - kernel_size=3, - stride=2, - padding=1 if not padding_same else 'same', - bias_attr=False) - - self.conv_layer1_2 = layers.ConvBNReLU( - in_channels=64, - out_channels=64, - kernel_size=3, - stride=2, - padding=1 if not padding_same else 'same', - bias_attr=False) - - self.la1 = Layer1( - num_channels=64, - num_blocks=self.stage1_num_blocks[0], - num_filters=self.stage1_num_channels[0], - has_se=has_se, - name="layer2", - padding_same=padding_same) - - self.tr1 = TransitionLayer( - in_channels=[self.stage1_num_channels[0] * 4], - out_channels=self.stage2_num_channels, - name="tr1", - padding_same=padding_same) - - self.st2 = Stage( - num_channels=self.stage2_num_channels, - num_modules=self.stage2_num_modules, - num_blocks=self.stage2_num_blocks, - num_filters=self.stage2_num_channels, - has_se=self.has_se, - name="st2", - align_corners=align_corners, - padding_same=padding_same) - - self.tr2 = TransitionLayer( - in_channels=self.stage2_num_channels, - out_channels=self.stage3_num_channels, - name="tr2", - padding_same=padding_same) - self.st3 = Stage( - num_channels=self.stage3_num_channels, - num_modules=self.stage3_num_modules, - num_blocks=self.stage3_num_blocks, - num_filters=self.stage3_num_channels, - has_se=self.has_se, - name="st3", - align_corners=align_corners, - padding_same=padding_same) - - self.tr3 = TransitionLayer( - in_channels=self.stage3_num_channels, - out_channels=self.stage4_num_channels, - name="tr3", - padding_same=padding_same) - self.st4 = Stage( - num_channels=self.stage4_num_channels, - num_modules=self.stage4_num_modules, - num_blocks=self.stage4_num_blocks, - num_filters=self.stage4_num_channels, - has_se=self.has_se, - name="st4", - align_corners=align_corners, - padding_same=padding_same) - - self.init_weight() - - def forward(self, x): - feat_list = [] - conv1 = self.conv_layer1_1(x) - feat_list.append(conv1) - conv2 = self.conv_layer1_2(conv1) - - la1 = self.la1(conv2) - - tr1 = self.tr1([la1]) - st2 = self.st2(tr1) - - tr2 = self.tr2(st2) - st3 = self.st3(tr2) - - tr3 = self.tr3(st3) - st4 = self.st4(tr3) - - feat_list = feat_list + st4 - - return feat_list - - def init_weight(self): - for layer in self.sublayers(): - if isinstance(layer, nn.Conv2D): - param_init.normal_init(layer.weight, std=0.001) - elif isinstance(layer, (nn.BatchNorm, nn.SyncBatchNorm)): - param_init.constant_init(layer.weight, value=1.0) - param_init.constant_init(layer.bias, value=0.0) - if self.pretrained is not None: - utils.load_pretrained_model(self, self.pretrained) - - -class Layer1(nn.Layer): - def __init__(self, - num_channels, - num_filters, - num_blocks, - has_se=False, - name=None, - padding_same=True): - super(Layer1, self).__init__() - - self.bottleneck_block_list = [] - - for i in range(num_blocks): - bottleneck_block = self.add_sublayer( - "bb_{}_{}".format(name, i + 1), - BottleneckBlock( - num_channels=num_channels if i == 0 else num_filters * 4, - num_filters=num_filters, - has_se=has_se, - stride=1, - downsample=True if i == 0 else False, - name=name + '_' + str(i + 1), - padding_same=padding_same)) - self.bottleneck_block_list.append(bottleneck_block) - - def forward(self, x): - conv = x - for block_func in self.bottleneck_block_list: - conv = block_func(conv) - return conv - - -class TransitionLayer(nn.Layer): - def __init__(self, in_channels, out_channels, name=None, padding_same=True): - super(TransitionLayer, self).__init__() - - num_in = len(in_channels) - num_out = len(out_channels) - self.conv_bn_func_list = [] - for i in range(num_out): - residual = None - if i < num_in: - if in_channels[i] != out_channels[i]: - residual = self.add_sublayer( - "transition_{}_layer_{}".format(name, i + 1), - layers.ConvBNReLU( - in_channels=in_channels[i], - out_channels=out_channels[i], - kernel_size=3, - padding=1 if not padding_same else 'same', - bias_attr=False)) - else: - residual = self.add_sublayer( - "transition_{}_layer_{}".format(name, i + 1), - layers.ConvBNReLU( - in_channels=in_channels[-1], - out_channels=out_channels[i], - kernel_size=3, - stride=2, - padding=1 if not padding_same else 'same', - bias_attr=False)) - self.conv_bn_func_list.append(residual) - - def forward(self, x): - outs = [] - for idx, conv_bn_func in enumerate(self.conv_bn_func_list): - if conv_bn_func is None: - outs.append(x[idx]) - else: - if idx < len(x): - outs.append(conv_bn_func(x[idx])) - else: - outs.append(conv_bn_func(x[-1])) - return outs - - -class Branches(nn.Layer): - def __init__(self, - num_blocks, - in_channels, - out_channels, - has_se=False, - name=None, - padding_same=True): - super(Branches, self).__init__() - - self.basic_block_list = [] - - for i in range(len(out_channels)): - self.basic_block_list.append([]) - for j in range(num_blocks[i]): - in_ch = in_channels[i] if j == 0 else out_channels[i] - basic_block_func = self.add_sublayer( - "bb_{}_branch_layer_{}_{}".format(name, i + 1, j + 1), - BasicBlock( - num_channels=in_ch, - num_filters=out_channels[i], - has_se=has_se, - name=name + '_branch_layer_' + str(i + 1) + '_' + - str(j + 1), - padding_same=padding_same)) - self.basic_block_list[i].append(basic_block_func) - - def forward(self, x): - outs = [] - for idx, input in enumerate(x): - conv = input - for basic_block_func in self.basic_block_list[idx]: - conv = basic_block_func(conv) - outs.append(conv) - return outs - - -class BottleneckBlock(nn.Layer): - def __init__(self, - num_channels, - num_filters, - has_se, - stride=1, - downsample=False, - name=None, - padding_same=True): - super(BottleneckBlock, self).__init__() - - self.has_se = has_se - self.downsample = downsample - - self.conv1 = layers.ConvBNReLU( - in_channels=num_channels, - out_channels=num_filters, - kernel_size=1, - bias_attr=False) - - self.conv2 = layers.ConvBNReLU( - in_channels=num_filters, - out_channels=num_filters, - kernel_size=3, - stride=stride, - padding=1 if not padding_same else 'same', - bias_attr=False) - - self.conv3 = layers.ConvBN( - in_channels=num_filters, - out_channels=num_filters * 4, - kernel_size=1, - bias_attr=False) - - if self.downsample: - self.conv_down = layers.ConvBN( - in_channels=num_channels, - out_channels=num_filters * 4, - kernel_size=1, - bias_attr=False) - - if self.has_se: - self.se = SELayer( - num_channels=num_filters * 4, - num_filters=num_filters * 4, - reduction_ratio=16, - name=name + '_fc') - - self.add = layers.Add() - self.relu = layers.Activation("relu") - - def forward(self, x): - residual = x - conv1 = self.conv1(x) - conv2 = self.conv2(conv1) - conv3 = self.conv3(conv2) - - if self.downsample: - residual = self.conv_down(x) - - if self.has_se: - conv3 = self.se(conv3) - - y = self.add(conv3, residual) - y = self.relu(y) - return y - - -class BasicBlock(nn.Layer): - def __init__(self, - num_channels, - num_filters, - stride=1, - has_se=False, - downsample=False, - name=None, - padding_same=True): - super(BasicBlock, self).__init__() - - self.has_se = has_se - self.downsample = downsample - - self.conv1 = layers.ConvBNReLU( - in_channels=num_channels, - out_channels=num_filters, - kernel_size=3, - stride=stride, - padding=1 if not padding_same else 'same', - bias_attr=False) - self.conv2 = layers.ConvBN( - in_channels=num_filters, - out_channels=num_filters, - kernel_size=3, - padding=1 if not padding_same else 'same', - bias_attr=False) - - if self.downsample: - self.conv_down = layers.ConvBNReLU( - in_channels=num_channels, - out_channels=num_filters, - kernel_size=1, - bias_attr=False) - - if self.has_se: - self.se = SELayer( - num_channels=num_filters, - num_filters=num_filters, - reduction_ratio=16, - name=name + '_fc') - - self.add = layers.Add() - self.relu = layers.Activation("relu") - - def forward(self, x): - residual = x - conv1 = self.conv1(x) - conv2 = self.conv2(conv1) - - if self.downsample: - residual = self.conv_down(x) - - if self.has_se: - conv2 = self.se(conv2) - - y = self.add(conv2, residual) - y = self.relu(y) - return y - - -class SELayer(nn.Layer): - def __init__(self, num_channels, num_filters, reduction_ratio, name=None): - super(SELayer, self).__init__() - - self.pool2d_gap = nn.AdaptiveAvgPool2D(1) - - self._num_channels = num_channels - - med_ch = int(num_channels / reduction_ratio) - stdv = 1.0 / math.sqrt(num_channels * 1.0) - self.squeeze = nn.Linear( - num_channels, - med_ch, - weight_attr=paddle.ParamAttr( - initializer=nn.initializer.Uniform(-stdv, stdv))) - - stdv = 1.0 / math.sqrt(med_ch * 1.0) - self.excitation = nn.Linear( - med_ch, - num_filters, - weight_attr=paddle.ParamAttr( - initializer=nn.initializer.Uniform(-stdv, stdv))) - - def forward(self, x): - pool = self.pool2d_gap(x) - pool = paddle.reshape(pool, shape=[-1, self._num_channels]) - squeeze = self.squeeze(pool) - squeeze = F.relu(squeeze) - excitation = self.excitation(squeeze) - excitation = F.sigmoid(excitation) - excitation = paddle.reshape( - excitation, shape=[-1, self._num_channels, 1, 1]) - out = x * excitation - return out - - -class Stage(nn.Layer): - def __init__(self, - num_channels, - num_modules, - num_blocks, - num_filters, - has_se=False, - multi_scale_output=True, - name=None, - align_corners=False, - padding_same=True): - super(Stage, self).__init__() - - self._num_modules = num_modules - - self.stage_func_list = [] - for i in range(num_modules): - if i == num_modules - 1 and not multi_scale_output: - stage_func = self.add_sublayer( - "stage_{}_{}".format(name, i + 1), - HighResolutionModule( - num_channels=num_channels, - num_blocks=num_blocks, - num_filters=num_filters, - has_se=has_se, - multi_scale_output=False, - name=name + '_' + str(i + 1), - align_corners=align_corners, - padding_same=padding_same)) - else: - stage_func = self.add_sublayer( - "stage_{}_{}".format(name, i + 1), - HighResolutionModule( - num_channels=num_channels, - num_blocks=num_blocks, - num_filters=num_filters, - has_se=has_se, - name=name + '_' + str(i + 1), - align_corners=align_corners, - padding_same=padding_same)) - - self.stage_func_list.append(stage_func) - - def forward(self, x): - out = x - for idx in range(self._num_modules): - out = self.stage_func_list[idx](out) - return out - - -class HighResolutionModule(nn.Layer): - def __init__(self, - num_channels, - num_blocks, - num_filters, - has_se=False, - multi_scale_output=True, - name=None, - align_corners=False, - padding_same=True): - super(HighResolutionModule, self).__init__() - - self.branches_func = Branches( - num_blocks=num_blocks, - in_channels=num_channels, - out_channels=num_filters, - has_se=has_se, - name=name, - padding_same=padding_same) - - self.fuse_func = FuseLayers( - in_channels=num_filters, - out_channels=num_filters, - multi_scale_output=multi_scale_output, - name=name, - align_corners=align_corners, - padding_same=padding_same) - - def forward(self, x): - out = self.branches_func(x) - out = self.fuse_func(out) - return out - - -class FuseLayers(nn.Layer): - def __init__(self, - in_channels, - out_channels, - multi_scale_output=True, - name=None, - align_corners=False, - padding_same=True): - super(FuseLayers, self).__init__() - - self._actual_ch = len(in_channels) if multi_scale_output else 1 - self._in_channels = in_channels - self.align_corners = align_corners - - self.residual_func_list = [] - for i in range(self._actual_ch): - for j in range(len(in_channels)): - if j > i: - residual_func = self.add_sublayer( - "residual_{}_layer_{}_{}".format(name, i + 1, j + 1), - layers.ConvBN( - in_channels=in_channels[j], - out_channels=out_channels[i], - kernel_size=1, - bias_attr=False)) - self.residual_func_list.append(residual_func) - elif j < i: - pre_num_filters = in_channels[j] - for k in range(i - j): - if k == i - j - 1: - residual_func = self.add_sublayer( - "residual_{}_layer_{}_{}_{}".format( - name, i + 1, j + 1, k + 1), - layers.ConvBN( - in_channels=pre_num_filters, - out_channels=out_channels[i], - kernel_size=3, - stride=2, - padding=1 if not padding_same else 'same', - bias_attr=False)) - pre_num_filters = out_channels[i] - else: - residual_func = self.add_sublayer( - "residual_{}_layer_{}_{}_{}".format( - name, i + 1, j + 1, k + 1), - layers.ConvBNReLU( - in_channels=pre_num_filters, - out_channels=out_channels[j], - kernel_size=3, - stride=2, - padding=1 if not padding_same else 'same', - bias_attr=False)) - pre_num_filters = out_channels[j] - self.residual_func_list.append(residual_func) - - def forward(self, x): - outs = [] - residual_func_idx = 0 - for i in range(self._actual_ch): - residual = x[i] - residual_shape = paddle.shape(residual)[-2:] - for j in range(len(self._in_channels)): - if j > i: - y = self.residual_func_list[residual_func_idx](x[j]) - residual_func_idx += 1 - - y = F.interpolate( - y, - residual_shape, - mode='bilinear', - align_corners=self.align_corners) - residual = residual + y - elif j < i: - y = x[j] - for k in range(i - j): - y = self.residual_func_list[residual_func_idx](y) - residual_func_idx += 1 - - residual = residual + y - - residual = F.relu(residual) - outs.append(residual) - - return outs - - -@manager.BACKBONES.add_component -def HRNet_W18_Small_V1(**kwargs): - model = HRNet( - stage1_num_modules=1, - stage1_num_blocks=[1], - stage1_num_channels=[32], - stage2_num_modules=1, - stage2_num_blocks=[2, 2], - stage2_num_channels=[16, 32], - stage3_num_modules=1, - stage3_num_blocks=[2, 2, 2], - stage3_num_channels=[16, 32, 64], - stage4_num_modules=1, - stage4_num_blocks=[2, 2, 2, 2], - stage4_num_channels=[16, 32, 64, 128], - **kwargs) - return model - - -@manager.BACKBONES.add_component -def HRNet_W18_Small_V2(**kwargs): - model = HRNet( - stage1_num_modules=1, - stage1_num_blocks=[2], - stage1_num_channels=[64], - stage2_num_modules=1, - stage2_num_blocks=[2, 2], - stage2_num_channels=[18, 36], - stage3_num_modules=3, - stage3_num_blocks=[2, 2, 2], - stage3_num_channels=[18, 36, 72], - stage4_num_modules=2, - stage4_num_blocks=[2, 2, 2, 2], - stage4_num_channels=[18, 36, 72, 144], - **kwargs) - return model - - -@manager.BACKBONES.add_component -def HRNet_W18(**kwargs): - model = HRNet( - stage1_num_modules=1, - stage1_num_blocks=[4], - stage1_num_channels=[64], - stage2_num_modules=1, - stage2_num_blocks=[4, 4], - stage2_num_channels=[18, 36], - stage3_num_modules=4, - stage3_num_blocks=[4, 4, 4], - stage3_num_channels=[18, 36, 72], - stage4_num_modules=3, - stage4_num_blocks=[4, 4, 4, 4], - stage4_num_channels=[18, 36, 72, 144], - **kwargs) - return model - - -@manager.BACKBONES.add_component -def HRNet_W30(**kwargs): - model = HRNet( - stage1_num_modules=1, - stage1_num_blocks=[4], - stage1_num_channels=[64], - stage2_num_modules=1, - stage2_num_blocks=[4, 4], - stage2_num_channels=[30, 60], - stage3_num_modules=4, - stage3_num_blocks=[4, 4, 4], - stage3_num_channels=[30, 60, 120], - stage4_num_modules=3, - stage4_num_blocks=[4, 4, 4, 4], - stage4_num_channels=[30, 60, 120, 240], - **kwargs) - return model - - -@manager.BACKBONES.add_component -def HRNet_W32(**kwargs): - model = HRNet( - stage1_num_modules=1, - stage1_num_blocks=[4], - stage1_num_channels=[64], - stage2_num_modules=1, - stage2_num_blocks=[4, 4], - stage2_num_channels=[32, 64], - stage3_num_modules=4, - stage3_num_blocks=[4, 4, 4], - stage3_num_channels=[32, 64, 128], - stage4_num_modules=3, - stage4_num_blocks=[4, 4, 4, 4], - stage4_num_channels=[32, 64, 128, 256], - **kwargs) - return model - - -@manager.BACKBONES.add_component -def HRNet_W40(**kwargs): - model = HRNet( - stage1_num_modules=1, - stage1_num_blocks=[4], - stage1_num_channels=[64], - stage2_num_modules=1, - stage2_num_blocks=[4, 4], - stage2_num_channels=[40, 80], - stage3_num_modules=4, - stage3_num_blocks=[4, 4, 4], - stage3_num_channels=[40, 80, 160], - stage4_num_modules=3, - stage4_num_blocks=[4, 4, 4, 4], - stage4_num_channels=[40, 80, 160, 320], - **kwargs) - return model - - -@manager.BACKBONES.add_component -def HRNet_W44(**kwargs): - model = HRNet( - stage1_num_modules=1, - stage1_num_blocks=[4], - stage1_num_channels=[64], - stage2_num_modules=1, - stage2_num_blocks=[4, 4], - stage2_num_channels=[44, 88], - stage3_num_modules=4, - stage3_num_blocks=[4, 4, 4], - stage3_num_channels=[44, 88, 176], - stage4_num_modules=3, - stage4_num_blocks=[4, 4, 4, 4], - stage4_num_channels=[44, 88, 176, 352], - **kwargs) - return model - - -@manager.BACKBONES.add_component -def HRNet_W48(**kwargs): - model = HRNet( - stage1_num_modules=1, - stage1_num_blocks=[4], - stage1_num_channels=[64], - stage2_num_modules=1, - stage2_num_blocks=[4, 4], - stage2_num_channels=[48, 96], - stage3_num_modules=4, - stage3_num_blocks=[4, 4, 4], - stage3_num_channels=[48, 96, 192], - stage4_num_modules=3, - stage4_num_blocks=[4, 4, 4, 4], - stage4_num_channels=[48, 96, 192, 384], - **kwargs) - return model - - -@manager.BACKBONES.add_component -def HRNet_W60(**kwargs): - model = HRNet( - stage1_num_modules=1, - stage1_num_blocks=[4], - stage1_num_channels=[64], - stage2_num_modules=1, - stage2_num_blocks=[4, 4], - stage2_num_channels=[60, 120], - stage3_num_modules=4, - stage3_num_blocks=[4, 4, 4], - stage3_num_channels=[60, 120, 240], - stage4_num_modules=3, - stage4_num_blocks=[4, 4, 4, 4], - stage4_num_channels=[60, 120, 240, 480], - **kwargs) - return model - - -@manager.BACKBONES.add_component -def HRNet_W64(**kwargs): - model = HRNet( - stage1_num_modules=1, - stage1_num_blocks=[4], - stage1_num_channels=[64], - stage2_num_modules=1, - stage2_num_blocks=[4, 4], - stage2_num_channels=[64, 128], - stage3_num_modules=4, - stage3_num_blocks=[4, 4, 4], - stage3_num_channels=[64, 128, 256], - stage4_num_modules=3, - stage4_num_blocks=[4, 4, 4, 4], - stage4_num_channels=[64, 128, 256, 512], - **kwargs) - return model diff --git a/spaces/Detomo/ai-comic-generation/src/lib/useImageDimension.ts b/spaces/Detomo/ai-comic-generation/src/lib/useImageDimension.ts deleted file mode 100644 index 9cfd06e473929b1046a5dd9caa9d577ebaf09b7a..0000000000000000000000000000000000000000 --- a/spaces/Detomo/ai-comic-generation/src/lib/useImageDimension.ts +++ /dev/null @@ -1,20 +0,0 @@ -import { useEffect, useState } from "react" - -import { ImageDimension, getImageDimension } from "./getImageDimension" - -export function useImageDimension(src: string) { - const [dimension, setDimension] = useState({ - width: 0, - height: 0, - }) - - useEffect(() => { - const compute = async () => { - const newDimension = await getImageDimension(src) - setDimension(newDimension) - } - compute() - }, [src]) - - return dimension -} \ No newline at end of file diff --git a/spaces/Dineshdc/MygenAIChatbot/README.md b/spaces/Dineshdc/MygenAIChatbot/README.md deleted file mode 100644 index 97a20b162c02cd8821ebca56464925260d98e112..0000000000000000000000000000000000000000 --- a/spaces/Dineshdc/MygenAIChatbot/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: MygenAIChatbot -emoji: 📊 -colorFrom: red -colorTo: yellow -sdk: gradio -sdk_version: 3.39.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/DragGan/DragGan/stylegan_human/pti/pti_models/e4e/stylegan2/model.py b/spaces/DragGan/DragGan/stylegan_human/pti/pti_models/e4e/stylegan2/model.py deleted file mode 100644 index d8c8f16d6baf48d95082abba9aa72d30f2bc4377..0000000000000000000000000000000000000000 --- a/spaces/DragGan/DragGan/stylegan_human/pti/pti_models/e4e/stylegan2/model.py +++ /dev/null @@ -1,680 +0,0 @@ -import math -import random -import torch -from torch import nn -from torch.nn import functional as F - -from .op.fused_act import FusedLeakyReLU, fused_leaky_relu -from .op.upfirdn2d import upfirdn2d - - -class PixelNorm(nn.Module): - def __init__(self): - super().__init__() - - def forward(self, input): - return input * torch.rsqrt(torch.mean(input ** 2, dim=1, keepdim=True) + 1e-8) - - -def make_kernel(k): - k = torch.tensor(k, dtype=torch.float32) - - if k.ndim == 1: - k = k[None, :] * k[:, None] - - k /= k.sum() - - return k - - -class Upsample(nn.Module): - def __init__(self, kernel, factor=2): - super().__init__() - - self.factor = factor - kernel = make_kernel(kernel) * (factor ** 2) - self.register_buffer('kernel', kernel) - - p = kernel.shape[0] - factor - - pad0 = (p + 1) // 2 + factor - 1 - pad1 = p // 2 - - self.pad = (pad0, pad1) - - def forward(self, input): - out = upfirdn2d(input, self.kernel, up=self.factor, down=1, pad=self.pad) - - return out - - -class Downsample(nn.Module): - def __init__(self, kernel, factor=2): - super().__init__() - - self.factor = factor - kernel = make_kernel(kernel) - self.register_buffer('kernel', kernel) - - p = kernel.shape[0] - factor - - pad0 = (p + 1) // 2 - pad1 = p // 2 - - self.pad = (pad0, pad1) - - def forward(self, input): - out = upfirdn2d(input, self.kernel, up=1, down=self.factor, pad=self.pad) - - return out - - -class Blur(nn.Module): - def __init__(self, kernel, pad, upsample_factor=1): - super().__init__() - - kernel = make_kernel(kernel) - - if upsample_factor > 1: - kernel = kernel * (upsample_factor ** 2) - - self.register_buffer('kernel', kernel) - - self.pad = pad - - def forward(self, input): - out = upfirdn2d(input, self.kernel, pad=self.pad) - - return out - - -class EqualConv2d(nn.Module): - def __init__( - self, in_channel, out_channel, kernel_size, stride=1, padding=0, bias=True - ): - super().__init__() - - self.weight = nn.Parameter( - torch.randn(out_channel, in_channel, kernel_size, kernel_size) - ) - self.scale = 1 / math.sqrt(in_channel * kernel_size ** 2) - - self.stride = stride - self.padding = padding - - if bias: - self.bias = nn.Parameter(torch.zeros(out_channel)) - - else: - self.bias = None - - def forward(self, input): - out = F.conv2d( - input, - self.weight * self.scale, - bias=self.bias, - stride=self.stride, - padding=self.padding, - ) - - return out - - def __repr__(self): - return ( - f'{self.__class__.__name__}({self.weight.shape[1]}, {self.weight.shape[0]},' - f' {self.weight.shape[2]}, stride={self.stride}, padding={self.padding})' - ) - - -class EqualLinear(nn.Module): - def __init__( - self, in_dim, out_dim, bias=True, bias_init=0, lr_mul=1, activation=None - ): - super().__init__() - - self.weight = nn.Parameter(torch.randn(out_dim, in_dim).div_(lr_mul)) - - if bias: - self.bias = nn.Parameter(torch.zeros(out_dim).fill_(bias_init)) - - else: - self.bias = None - - self.activation = activation - - self.scale = (1 / math.sqrt(in_dim)) * lr_mul - self.lr_mul = lr_mul - - def forward(self, input): - if self.activation: - out = F.linear(input, self.weight * self.scale) - out = fused_leaky_relu(out, self.bias * self.lr_mul) - - else: - out = F.linear( - input, self.weight * self.scale, bias=self.bias * self.lr_mul - ) - - return out - - def __repr__(self): - return ( - f'{self.__class__.__name__}({self.weight.shape[1]}, {self.weight.shape[0]})' - ) - - -class ScaledLeakyReLU(nn.Module): - def __init__(self, negative_slope=0.2): - super().__init__() - - self.negative_slope = negative_slope - - def forward(self, input): - out = F.leaky_relu(input, negative_slope=self.negative_slope) - - return out * math.sqrt(2) - - -class ModulatedConv2d(nn.Module): - def __init__( - self, - in_channel, - out_channel, - kernel_size, - style_dim, - demodulate=True, - upsample=False, - downsample=False, - blur_kernel=[1, 3, 3, 1], - ): - super().__init__() - - self.eps = 1e-8 - self.kernel_size = kernel_size - self.in_channel = in_channel - self.out_channel = out_channel - self.upsample = upsample - self.downsample = downsample - - if upsample: - factor = 2 - p = (len(blur_kernel) - factor) - (kernel_size - 1) - pad0 = (p + 1) // 2 + factor - 1 - pad1 = p // 2 + 1 - - self.blur = Blur(blur_kernel, pad=(pad0, pad1), upsample_factor=factor) - - if downsample: - factor = 2 - p = (len(blur_kernel) - factor) + (kernel_size - 1) - pad0 = (p + 1) // 2 - pad1 = p // 2 - - self.blur = Blur(blur_kernel, pad=(pad0, pad1)) - - fan_in = in_channel * kernel_size ** 2 - self.scale = 1 / math.sqrt(fan_in) - self.padding = kernel_size // 2 - - self.weight = nn.Parameter( - torch.randn(1, out_channel, in_channel, kernel_size, kernel_size) - ) - - self.modulation = EqualLinear(style_dim, in_channel, bias_init=1) - - self.demodulate = demodulate - - def __repr__(self): - return ( - f'{self.__class__.__name__}({self.in_channel}, {self.out_channel}, {self.kernel_size}, ' - f'upsample={self.upsample}, downsample={self.downsample})' - ) - - def forward(self, input, style): - batch, in_channel, height, width = input.shape - - style = self.modulation(style).view(batch, 1, in_channel, 1, 1) - weight = self.scale * self.weight * style - - if self.demodulate: - demod = torch.rsqrt(weight.pow(2).sum([2, 3, 4]) + 1e-8) - weight = weight * demod.view(batch, self.out_channel, 1, 1, 1) - - weight = weight.view( - batch * self.out_channel, in_channel, self.kernel_size, self.kernel_size - ) - - if self.upsample: - input = input.view(1, batch * in_channel, height, width) - weight = weight.view( - batch, self.out_channel, in_channel, self.kernel_size, self.kernel_size - ) - weight = weight.transpose(1, 2).reshape( - batch * in_channel, self.out_channel, self.kernel_size, self.kernel_size - ) - out = F.conv_transpose2d(input, weight, padding=0, stride=2, groups=batch) - _, _, height, width = out.shape - out = out.view(batch, self.out_channel, height, width) - out = self.blur(out) - - elif self.downsample: - input = self.blur(input) - _, _, height, width = input.shape - input = input.view(1, batch * in_channel, height, width) - out = F.conv2d(input, weight, padding=0, stride=2, groups=batch) - _, _, height, width = out.shape - out = out.view(batch, self.out_channel, height, width) - - else: - input = input.view(1, batch * in_channel, height, width) - out = F.conv2d(input, weight, padding=self.padding, groups=batch) - _, _, height, width = out.shape - out = out.view(batch, self.out_channel, height, width) - - return out - - -class NoiseInjection(nn.Module): - def __init__(self): - super().__init__() - - self.weight = nn.Parameter(torch.zeros(1)) - - def forward(self, image, noise=None): - if noise is None: - batch, _, height, width = image.shape - noise = image.new_empty(batch, 1, height, width).normal_() - - return image + self.weight * noise - - -class ConstantInput(nn.Module): - def __init__(self, channel, size=4): - super().__init__() - - self.input = nn.Parameter(torch.randn(1, channel, size, size // 2)) - - def forward(self, input): - batch = input.shape[0] - out = self.input.repeat(batch, 1, 1, 1) - - return out - - -class StyledConv(nn.Module): - def __init__( - self, - in_channel, - out_channel, - kernel_size, - style_dim, - upsample=False, - blur_kernel=[1, 3, 3, 1], - demodulate=True, - ): - super().__init__() - - self.conv = ModulatedConv2d( - in_channel, - out_channel, - kernel_size, - style_dim, - upsample=upsample, - blur_kernel=blur_kernel, - demodulate=demodulate, - ) - - self.noise = NoiseInjection() - # self.bias = nn.Parameter(torch.zeros(1, out_channel, 1, 1)) - # self.activate = ScaledLeakyReLU(0.2) - self.activate = FusedLeakyReLU(out_channel) - - def forward(self, input, style, noise=None): - out = self.conv(input, style) - out = self.noise(out, noise=noise) - # out = out + self.bias - out = self.activate(out) - - return out - - -class ToRGB(nn.Module): - def __init__(self, in_channel, style_dim, upsample=True, blur_kernel=[1, 3, 3, 1]): - super().__init__() - - if upsample: - self.upsample = Upsample(blur_kernel) - - self.conv = ModulatedConv2d(in_channel, 3, 1, style_dim, demodulate=False) - self.bias = nn.Parameter(torch.zeros(1, 3, 1, 1)) - - def forward(self, input, style, skip=None): - out = self.conv(input, style) - out = out + self.bias - - if skip is not None: - skip = self.upsample(skip) - - out = out + skip - - return out - - -class Generator(nn.Module): - def __init__( - self, - size, - style_dim, - n_mlp, - channel_multiplier=2, - blur_kernel=[1, 3, 3, 1], - lr_mlp=0.01, - ): - super().__init__() - - self.size = size - - self.style_dim = style_dim - - layers = [PixelNorm()] - - for i in range(n_mlp): - layers.append( - EqualLinear( - style_dim, style_dim, lr_mul=lr_mlp, activation='fused_lrelu' - ) - ) - - self.style = nn.Sequential(*layers) - - self.channels = { - 4: 512, - 8: 512, - 16: 512, - 32: 512, - 64: 256 * channel_multiplier, - 128: 128 * channel_multiplier, - 256: 64 * channel_multiplier, - 512: 32 * channel_multiplier, - 1024: 16 * channel_multiplier, - } - - self.input = ConstantInput(self.channels[4]) - self.conv1 = StyledConv( - self.channels[4], self.channels[4], 3, style_dim, blur_kernel=blur_kernel - ) - self.to_rgb1 = ToRGB(self.channels[4], style_dim, upsample=False) - - self.log_size = int(math.log(size, 2)) - self.num_layers = (self.log_size - 2) * 2 + 1 - - self.convs = nn.ModuleList() - self.upsamples = nn.ModuleList() - self.to_rgbs = nn.ModuleList() - self.noises = nn.Module() - - in_channel = self.channels[4] - - for layer_idx in range(self.num_layers): - res = (layer_idx + 5) // 2 - shape = [1, 1, 2 ** res, 2 ** res // 2] - self.noises.register_buffer( - "noise_{}".format(layer_idx), torch.randn(*shape) - ) - - for i in range(3, self.log_size + 1): - out_channel = self.channels[2 ** i] - - self.convs.append( - StyledConv( - in_channel, - out_channel, - 3, - style_dim, - upsample=True, - blur_kernel=blur_kernel, - ) - ) - - self.convs.append( - StyledConv( - out_channel, out_channel, 3, style_dim, blur_kernel=blur_kernel - ) - ) - - self.to_rgbs.append(ToRGB(out_channel, style_dim)) - - in_channel = out_channel - - self.n_latent = self.log_size * 2 - 2 - - def make_noise(self): - device = self.input.input.device - - noises = [torch.randn(1, 1, 2 ** 2, 2 ** 2 // 2, device=device)] - - for i in range(3, self.log_size + 1): - for _ in range(2): - noises.append(torch.randn(1, 1, 2 ** i, 2 ** i // 2, device=device)) - - return noises - - def mean_latent(self, n_latent): - latent_in = torch.randn( - n_latent, self.style_dim, device=self.input.input.device - ) - latent = self.style(latent_in).mean(0, keepdim=True) - - return latent - - def get_latent(self, input): - return self.style(input) - - def forward( - self, - styles, - return_latents=False, - return_features=False, - inject_index=None, - truncation=1, - truncation_latent=None, - input_is_latent=False, - noise=None, - randomize_noise=True, - ): - if not input_is_latent: - styles = [self.style(s) for s in styles] - - if noise is None: - if randomize_noise: - noise = [None] * self.num_layers - else: - noise = [ - getattr(self.noises, f'noise_{i}') for i in range(self.num_layers) - ] - - if truncation < 1: - style_t = [] - - for style in styles: - style_t.append( - truncation_latent + truncation * (style - truncation_latent) - ) - - styles = style_t - - if len(styles) < 2: - inject_index = self.n_latent - if styles[0].ndim < 3: - latent = styles[0].unsqueeze(1).repeat(1, inject_index, 1) - else: - latent = styles[0] - - else: - if inject_index is None: - inject_index = random.randint(1, self.n_latent - 1) - - # latent = styles[0].unsqueeze(0) - # if latent.shape[1] == 1: - # latent = latent.repeat(1, inject_index, 1) - # else: - # latent = latent[:, :inject_index, :] - latent = styles[0].unsqueeze(1).repeat(1, inject_index, 1) - latent2 = styles[1].unsqueeze(1).repeat(1, self.n_latent - inject_index, 1) - # latent = styles[0][:, :inject_index, :] - # latent2 = styles[1][:, inject_index:, :] - latent = torch.cat([latent, latent2], 1) - out = self.input(latent) - out = self.conv1(out, latent[:, 0], noise=noise[0]) - - skip = self.to_rgb1(out, latent[:, 1]) - - i = 1 - for conv1, conv2, noise1, noise2, to_rgb in zip( - self.convs[::2], self.convs[1::2], noise[1::2], noise[2::2], self.to_rgbs - ): - out = conv1(out, latent[:, i], noise=noise1) - out = conv2(out, latent[:, i + 1], noise=noise2) - skip = to_rgb(out, latent[:, i + 2], skip) - - i += 2 - - image = skip - - if return_latents: - return image, latent - elif return_features: - return image, out - else: - return image, None - - -class ConvLayer(nn.Sequential): - def __init__( - self, - in_channel, - out_channel, - kernel_size, - downsample=False, - blur_kernel=[1, 3, 3, 1], - bias=True, - activate=True, - ): - layers = [] - - if downsample: - factor = 2 - p = (len(blur_kernel) - factor) + (kernel_size - 1) - pad0 = (p + 1) // 2 - pad1 = p // 2 - - layers.append(Blur(blur_kernel, pad=(pad0, pad1))) - - stride = 2 - self.padding = 0 - - else: - stride = 1 - self.padding = kernel_size // 2 - - layers.append( - EqualConv2d( - in_channel, - out_channel, - kernel_size, - padding=self.padding, - stride=stride, - bias=bias and not activate, - ) - ) - - if activate: - if bias: - layers.append(FusedLeakyReLU(out_channel)) - - else: - layers.append(ScaledLeakyReLU(0.2)) - - super().__init__(*layers) - - -class ResBlock(nn.Module): - def __init__(self, in_channel, out_channel, blur_kernel=[1, 3, 3, 1]): - super().__init__() - - self.conv1 = ConvLayer(in_channel, in_channel, 3) - self.conv2 = ConvLayer(in_channel, out_channel, 3, downsample=True) - - self.skip = ConvLayer( - in_channel, out_channel, 1, downsample=True, activate=False, bias=False - ) - - def forward(self, input): - out = self.conv1(input) - out = self.conv2(out) - - skip = self.skip(input) - out = (out + skip) / math.sqrt(2) - - return out - - -class Discriminator(nn.Module): - def __init__(self, size, channel_multiplier=2, blur_kernel=[1, 3, 3, 1]): - super().__init__() - - channels = { - 4: 512, - 8: 512, - 16: 512, - 32: 512, - 64: 256 * channel_multiplier, - 128: 128 * channel_multiplier, - 256: 64 * channel_multiplier, - 512: 32 * channel_multiplier, - 1024: 16 * channel_multiplier, - } - - convs = [ConvLayer(3, channels[size], 1)] - - log_size = int(math.log(size, 2)) - - in_channel = channels[size] - - for i in range(log_size, 2, -1): - out_channel = channels[2 ** (i - 1)] - - convs.append(ResBlock(in_channel, out_channel, blur_kernel)) - - in_channel = out_channel - - self.convs = nn.Sequential(*convs) - - self.stddev_group = 4 - self.stddev_feat = 1 - - self.final_conv = ConvLayer(in_channel + 1, channels[4], 3) - self.final_linear = nn.Sequential( - EqualLinear(channels[4] * 4 * 4 // 2, channels[4], activation='fused_lrelu'), - EqualLinear(channels[4], 1), - ) - - def forward(self, input): - out = self.convs(input) - - batch, channel, height, width = out.shape - group = min(batch, self.stddev_group) - stddev = out.view( - group, -1, self.stddev_feat, channel // self.stddev_feat, height, width - ) - stddev = torch.sqrt(stddev.var(0, unbiased=False) + 1e-8) - stddev = stddev.mean([2, 3, 4], keepdims=True).squeeze(2) - stddev = stddev.repeat(group, 1, height, width) - out = torch.cat([out, stddev], 1) - - out = self.final_conv(out) - - out = out.view(batch, -1) - out = self.final_linear(out) - - return out diff --git a/spaces/Dusan/clickbaitonator/fudge/eval_formality_metrics.py b/spaces/Dusan/clickbaitonator/fudge/eval_formality_metrics.py deleted file mode 100644 index 972a5f46f6207f1aa1e0a8833452738c68d404ad..0000000000000000000000000000000000000000 --- a/spaces/Dusan/clickbaitonator/fudge/eval_formality_metrics.py +++ /dev/null @@ -1,73 +0,0 @@ -from argparse import ArgumentParser -import pickle -import os -import math - -import sacrebleu -import numpy as np -import torch -from transformers import AutoTokenizer, AutoModelWithLMHead, pipeline, set_seed, GPT2Tokenizer, GPT2Model, MarianTokenizer, MarianMTModel - -from constants import * -from model import Model -from util import save_checkpoint, ProgressMeter, AverageMeter, num_params - -def avg_formality(preds, model, tokenizer, device='cuda'): - probs = [] - for sent in preds: - encoded_input = tokenizer.encode(sent, return_tensors='pt').to(device) - lengths = torch.LongTensor([encoded_input.shape[1]]).to(device) - scores = model(encoded_input, lengths=lengths) # batch x seq - score = scores.flatten()[-1].item() - probs.append(math.exp(score) / (1 + math.exp(score))) # sigmoided score = prob - return np.mean(probs) - -if __name__=='__main__': - parser = ArgumentParser() - parser.add_argument('--pred', type=str) - parser.add_argument('--ref', type=str, nargs='*', help='bleu refs') - parser.add_argument('--ckpt', type=str, help='formality classifier') - parser.add_argument('--dataset_info', type=str) - parser.add_argument('--device', type=str, default='cuda', choices=['cpu', 'cuda']) - parser.add_argument('--model_string', type=str, default='Helsinki-NLP/opus-mt-es-en') - - args = parser.parse_args() - - # refs = [['The dog bit the man.', 'It was not unexpected.', 'The man bit him first.'], - # ['The dog had bit the man.', 'No one was surprised.', 'The man had bitten the dog.']] - # sys = ['The dog bit the man.', "It wasn't surprising.", 'The man had just bitten him.'] - print('num ref files', len(args.ref)) - pred = [] - with open(args.pred, 'r') as rf: - for line in rf: - pred.append(line.strip()) - refs = [] - for ref_file in args.ref: - ref = [] - with open(ref_file, 'r') as rf: - for line in rf: - ref.append(line.strip()) - assert len(ref) == len(pred) - refs.append(ref) - bleu = sacrebleu.corpus_bleu(pred, refs) - print('BLEU score:', bleu.score) - - with open(args.dataset_info, 'rb') as rf: - dataset_info = pickle.load(rf) - - tokenizer = MarianTokenizer.from_pretrained(args.model_string) - tokenizer.add_special_tokens({'pad_token': PAD_TOKEN}) - pad_id = tokenizer.encode(PAD_TOKEN)[0] - - checkpoint = torch.load(args.ckpt, map_location=args.device) - model_args = checkpoint['args'] - conditioning_model = Model(model_args, pad_id, len(dataset_info.index2word)) # no need to get the glove embeddings when reloading since they're saved in model ckpt anyway - conditioning_model.load_state_dict(checkpoint['state_dict']) - conditioning_model = conditioning_model.to(args.device) - conditioning_model.eval() - print("=> loaded checkpoint '{}' (epoch {})" - .format(args.ckpt, checkpoint['epoch'])) - print('num params', num_params(conditioning_model)) - - print('avg formality prob according to model', avg_formality(pred, conditioning_model, tokenizer, device=args.device)) - diff --git a/spaces/ECCV2022/bytetrack/tutorials/transtrack/engine_track.py b/spaces/ECCV2022/bytetrack/tutorials/transtrack/engine_track.py deleted file mode 100644 index 925a4f2ba1b6ee4e7daaed7c0a901362ee223ddb..0000000000000000000000000000000000000000 --- a/spaces/ECCV2022/bytetrack/tutorials/transtrack/engine_track.py +++ /dev/null @@ -1,277 +0,0 @@ -# Modified by Peize Sun, Rufeng Zhang -# ------------------------------------------------------------------------ -# Deformable DETR -# Copyright (c) 2020 SenseTime. All Rights Reserved. -# Licensed under the Apache License, Version 2.0 [see LICENSE for details] -# ------------------------------------------------------------------------ -# Modified from DETR (https://github.com/facebookresearch/detr) -# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved -# ------------------------------------------------------------------------ -""" -Train and eval functions used in main.py -""" -import math -import os -import sys -from typing import Iterable - -import torch -import util.misc as utils -from datasets.coco_eval import CocoEvaluator -from datasets.panoptic_eval import PanopticEvaluator -from datasets.data_prefetcher import data_prefetcher -from mot_online.byte_tracker import BYTETracker - - -def train_one_epoch(model: torch.nn.Module, criterion: torch.nn.Module, - data_loader: Iterable, optimizer: torch.optim.Optimizer, - device: torch.device, epoch: int, max_norm: float = 0): - model.train() - criterion.train() - metric_logger = utils.MetricLogger(delimiter=" ") - metric_logger.add_meter('lr', utils.SmoothedValue(window_size=1, fmt='{value:.6f}')) - metric_logger.add_meter('class_error', utils.SmoothedValue(window_size=1, fmt='{value:.2f}')) - metric_logger.add_meter('grad_norm', utils.SmoothedValue(window_size=1, fmt='{value:.2f}')) - header = 'Epoch: [{}]'.format(epoch) - print_freq = 10 - - prefetcher = data_prefetcher(data_loader, device, prefetch=True) - samples, targets = prefetcher.next() - - # for samples, targets in metric_logger.log_every(data_loader, print_freq, header): - for _ in metric_logger.log_every(range(len(data_loader)), print_freq, header): - outputs, pre_outputs, pre_targets = model([samples, targets]) - loss_dict = criterion(outputs, targets, pre_outputs, pre_targets) - weight_dict = criterion.weight_dict - losses = sum(loss_dict[k] * weight_dict[k] for k in loss_dict.keys() if k in weight_dict) - - # reduce losses over all GPUs for logging purposes - loss_dict_reduced = utils.reduce_dict(loss_dict) - loss_dict_reduced_unscaled = {f'{k}_unscaled': v - for k, v in loss_dict_reduced.items()} - loss_dict_reduced_scaled = {k: v * weight_dict[k] - for k, v in loss_dict_reduced.items() if k in weight_dict} - losses_reduced_scaled = sum(loss_dict_reduced_scaled.values()) - - loss_value = losses_reduced_scaled.item() - - if not math.isfinite(loss_value): - print("Loss is {}, stopping training".format(loss_value)) - print(loss_dict_reduced) - sys.exit(1) - - optimizer.zero_grad() - losses.backward() - if max_norm > 0: - grad_total_norm = torch.nn.utils.clip_grad_norm_(model.parameters(), max_norm) - else: - grad_total_norm = utils.get_total_grad_norm(model.parameters(), max_norm) - optimizer.step() - - metric_logger.update(loss=loss_value, **loss_dict_reduced_scaled, **loss_dict_reduced_unscaled) - metric_logger.update(class_error=loss_dict_reduced['class_error']) - metric_logger.update(lr=optimizer.param_groups[0]["lr"]) - metric_logger.update(grad_norm=grad_total_norm) - - samples, targets = prefetcher.next() - # gather the stats from all processes - metric_logger.synchronize_between_processes() - print("Averaged stats:", metric_logger) - return {k: meter.global_avg for k, meter in metric_logger.meters.items()} - - -@torch.no_grad() -def evaluate(model, criterion, postprocessors, data_loader, base_ds, device, output_dir, tracker=None, - phase='train', det_val=False): - model.eval() - criterion.eval() - - metric_logger = utils.MetricLogger(delimiter=" ") - metric_logger.add_meter('class_error', utils.SmoothedValue(window_size=1, fmt='{value:.2f}')) - header = 'Test:' - - iou_types = tuple(k for k in ('segm', 'bbox') if k in postprocessors.keys()) - coco_evaluator = CocoEvaluator(base_ds, iou_types) - # coco_evaluator.coco_eval[iou_types[0]].params.iouThrs = [0, 0.1, 0.5, 0.75] - - panoptic_evaluator = None - if 'panoptic' in postprocessors.keys(): - panoptic_evaluator = PanopticEvaluator( - data_loader.dataset.ann_file, - data_loader.dataset.ann_folder, - output_dir=os.path.join(output_dir, "panoptic_eval"), - ) - - res_tracks = dict() - pre_embed = None - for samples, targets in metric_logger.log_every(data_loader, 10, header): - # pre process for track. - if tracker is not None: - if phase != 'train': - assert samples.tensors.shape[0] == 1, "Now only support inference of batchsize 1." - frame_id = targets[0].get("frame_id", None) - assert frame_id is not None - frame_id = frame_id.item() - if frame_id == 1: - tracker.reset_all() - pre_embed = None - - samples = samples.to(device) - targets = [{k: v.to(device) for k, v in t.items()} for t in targets] - - if det_val: - outputs = model(samples) - else: - outputs, pre_embed = model(samples, pre_embed) - loss_dict = criterion(outputs, targets) - weight_dict = criterion.weight_dict - -# reduce losses over all GPUs for logging purposes - loss_dict_reduced = utils.reduce_dict(loss_dict) - loss_dict_reduced_scaled = {k: v * weight_dict[k] - for k, v in loss_dict_reduced.items() if k in weight_dict} - loss_dict_reduced_unscaled = {f'{k}_unscaled': v - for k, v in loss_dict_reduced.items()} - metric_logger.update(loss=sum(loss_dict_reduced_scaled.values()), - **loss_dict_reduced_scaled, - **loss_dict_reduced_unscaled) - metric_logger.update(class_error=loss_dict_reduced['class_error']) - - orig_target_sizes = torch.stack([t["orig_size"] for t in targets], dim=0) - results = postprocessors['bbox'](outputs, orig_target_sizes) - - if 'segm' in postprocessors.keys(): - target_sizes = torch.stack([t["size"] for t in targets], dim=0) - results = postprocessors['segm'](results, outputs, orig_target_sizes, target_sizes) - res = {target['image_id'].item(): output for target, output in zip(targets, results)} - - # post process for track. - if tracker is not None: - if frame_id == 1: - res_track = tracker.init_track(results[0]) - else: - res_track = tracker.step(results[0]) - res_tracks[targets[0]['image_id'].item()] = res_track - - if coco_evaluator is not None: - coco_evaluator.update(res) - - if panoptic_evaluator is not None: - res_pano = postprocessors["panoptic"](outputs, target_sizes, orig_target_sizes) - for i, target in enumerate(targets): - image_id = target["image_id"].item() - file_name = f"{image_id:012d}.png" - res_pano[i]["image_id"] = image_id - res_pano[i]["file_name"] = file_name - - panoptic_evaluator.update(res_pano) - - # gather the stats from all processes - metric_logger.synchronize_between_processes() - print("Averaged stats:", metric_logger) - if coco_evaluator is not None: - coco_evaluator.synchronize_between_processes() - if panoptic_evaluator is not None: - panoptic_evaluator.synchronize_between_processes() - - # accumulate predictions from all images - if coco_evaluator is not None: - coco_evaluator.accumulate() - coco_evaluator.summarize() - panoptic_res = None - if panoptic_evaluator is not None: - panoptic_res = panoptic_evaluator.summarize() - stats = {k: meter.global_avg for k, meter in metric_logger.meters.items()} - if coco_evaluator is not None: - if 'bbox' in postprocessors.keys(): - stats['coco_eval_bbox'] = coco_evaluator.coco_eval['bbox'].stats.tolist() - if 'segm' in postprocessors.keys(): - stats['coco_eval_masks'] = coco_evaluator.coco_eval['segm'].stats.tolist() - if panoptic_res is not None: - stats['PQ_all'] = panoptic_res["All"] - stats['PQ_th'] = panoptic_res["Things"] - stats['PQ_st'] = panoptic_res["Stuff"] - return stats, coco_evaluator, res_tracks - - -@torch.no_grad() -def evaluate_track(args, model, criterion, postprocessors, data_loader, base_ds, device, output_dir, tracker=None, - phase='train', det_val=False): - model.eval() - criterion.eval() - - metric_logger = utils.MetricLogger(delimiter=" ") - metric_logger.add_meter('class_error', utils.SmoothedValue(window_size=1, fmt='{value:.2f}')) - header = 'Test:' - - iou_types = tuple(k for k in ('segm', 'bbox') if k in postprocessors.keys()) - coco_evaluator = CocoEvaluator(base_ds, iou_types) - # coco_evaluator.coco_eval[iou_types[0]].params.iouThrs = [0, 0.1, 0.5, 0.75] - - res_tracks = dict() - pre_embed = None - for samples, targets in metric_logger.log_every(data_loader, 50, header): - # pre process for track. - if tracker is not None: - frame_id = targets[0].get("frame_id", None) - assert frame_id is not None - frame_id = frame_id.item() - if frame_id == 1: - tracker = BYTETracker(args) - pre_embed = None - - samples = samples.to(device) - targets = [{k: v.to(device) for k, v in t.items()} for t in targets] - - if det_val: - outputs = model(samples) - else: - outputs, pre_embed = model(samples, pre_embed) - loss_dict = criterion(outputs, targets) - weight_dict = criterion.weight_dict - -# reduce losses over all GPUs for logging purposes - loss_dict_reduced = utils.reduce_dict(loss_dict) - loss_dict_reduced_scaled = {k: v * weight_dict[k] - for k, v in loss_dict_reduced.items() if k in weight_dict} - loss_dict_reduced_unscaled = {f'{k}_unscaled': v - for k, v in loss_dict_reduced.items()} - metric_logger.update(loss=sum(loss_dict_reduced_scaled.values()), - **loss_dict_reduced_scaled, - **loss_dict_reduced_unscaled) - metric_logger.update(class_error=loss_dict_reduced['class_error']) - - orig_target_sizes = torch.stack([t["orig_size"] for t in targets], dim=0) - results = postprocessors['bbox'](outputs, orig_target_sizes) - - if 'segm' in postprocessors.keys(): - target_sizes = torch.stack([t["size"] for t in targets], dim=0) - results = postprocessors['segm'](results, outputs, orig_target_sizes, target_sizes) - res = {target['image_id'].item(): output for target, output in zip(targets, results)} - - # post process for track. - if tracker is not None: - res_track = tracker.update(results[0]) - res_tracks[targets[0]['image_id'].item()] = res_track - - if coco_evaluator is not None: - coco_evaluator.update(res) - - # gather the stats from all processes - metric_logger.synchronize_between_processes() - print("Averaged stats:", metric_logger) - if coco_evaluator is not None: - coco_evaluator.synchronize_between_processes() - - # accumulate predictions from all images - if coco_evaluator is not None: - coco_evaluator.accumulate() - coco_evaluator.summarize() - - stats = {k: meter.global_avg for k, meter in metric_logger.meters.items()} - if coco_evaluator is not None: - if 'bbox' in postprocessors.keys(): - stats['coco_eval_bbox'] = coco_evaluator.coco_eval['bbox'].stats.tolist() - if 'segm' in postprocessors.keys(): - stats['coco_eval_masks'] = coco_evaluator.coco_eval['segm'].stats.tolist() - return stats, coco_evaluator, res_tracks \ No newline at end of file diff --git a/spaces/EPFL-VILAB/MultiMAE/multimae/input_adapters.py b/spaces/EPFL-VILAB/MultiMAE/multimae/input_adapters.py deleted file mode 100644 index 594292630944117d78eac9a62b2d11986203e909..0000000000000000000000000000000000000000 --- a/spaces/EPFL-VILAB/MultiMAE/multimae/input_adapters.py +++ /dev/null @@ -1,241 +0,0 @@ -# Copyright (c) EPFL VILAB. -# All rights reserved. - -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. -# -------------------------------------------------------- -# Based on timm, DeiT, DINO, MoCo-v3, BEiT, MAE-priv and MAE code bases -# https://github.com/rwightman/pytorch-image-models/tree/master/timm -# https://github.com/facebookresearch/deit -# https://github.com/facebookresearch/dino -# https://github.com/facebookresearch/moco-v3 -# https://github.com/microsoft/unilm/tree/master/beit -# https://github.com/BUPT-PRIV/MAE-priv -# https://github.com/facebookresearch/mae -# -------------------------------------------------------- - -from typing import Dict, List, Optional, Tuple, Union - -import torch -import torch.nn as nn -import torch.nn.functional as F -from einops import rearrange, repeat - -from .multimae_utils import build_2d_sincos_posemb, pair, trunc_normal_ - - -class PatchedInputAdapter(nn.Module): - """Adapter for spatial inputs, like images or feature maps. - Creates tokens from patches over the image. - - :param num_channels: Number of input channels of the image/feature map - :param stride_level: Stride level compared to the full-sized image. - E.g. 4 for 1/4th the size of the image. - :param patch_size_full: Int or tuple of the patch size over the full image size. - Patch size for smaller inputs will be computed accordingly. - :param dim_tokens: Dimension of output tokens. Can be set using init method. - :param sincos_pos_emb: Set to True (default) to use fixed 2D sin-cos positional embeddings - :param learnable_pos_emb: Set to True to learn positional embeddings instead - :param image_size: Default image size. Used to initialize size of positional embeddings. - """ - def __init__(self, - num_channels: int, - stride_level: int, - patch_size_full: Union[int, Tuple[int,int]], - dim_tokens: Optional[int] = None, - sincos_pos_emb: bool = True, - learnable_pos_emb: bool = False, - image_size: Union[int, Tuple[int]] = 224): - - super().__init__() - self.num_channels = num_channels - self.stride_level = stride_level - self.patch_size_full = pair(patch_size_full) - self.dim_tokens = dim_tokens - self.sincos_pos_emb = sincos_pos_emb - self.learnable_pos_emb = learnable_pos_emb - self.image_size = pair(image_size) - self.num_patches = (self.image_size[0] // patch_size_full) * (self.image_size[1] // patch_size_full) - - # Actual patch height and width, taking into account stride of input - self.P_H = max(1, self.patch_size_full[0] // stride_level) - self.P_W = max(1, self.patch_size_full[1] // stride_level) - - if self.dim_tokens is not None: - self.init(dim_tokens=dim_tokens) - - def init(self, dim_tokens: int = 768): - """ - Initialize parts of encoder that are dependent on dimension of tokens. - Should be called when setting up MultiMAE. - - :param dim_tokens: Dimension of tokens - """ - self.dim_tokens = dim_tokens - - # Task embedding identifying from which task a given token comes from - # Fixed-size positional embeddings. Can be interpolated to different input sizes - h_posemb = self.image_size[0] // (self.stride_level * self.P_H) - w_posemb = self.image_size[1] // (self.stride_level * self.P_W) - if self.sincos_pos_emb: - self.pos_emb = build_2d_sincos_posemb(h=h_posemb, w=w_posemb, embed_dim=self.dim_tokens) - self.pos_emb = nn.Parameter(self.pos_emb, requires_grad=self.learnable_pos_emb) - else: - self.pos_emb = nn.Parameter(torch.zeros(1, self.dim_tokens, h_posemb, w_posemb)) - trunc_normal_(self.pos_emb, std=0.02) - - # Image -> tokens projection - self.proj = nn.Conv2d( - in_channels=self.num_channels, out_channels=self.dim_tokens, - kernel_size=(self.P_H, self.P_W), stride=(self.P_H, self.P_W) - ) - - @torch.jit.ignore - def no_weight_decay(self): - return {'pos_emb'} - - def forward(self, x): - """ - Forward pass through input adapter, transforming image to sequence of tokens. - Adds task and positional encodings. - - :param x: Input image tensor - """ - B, C, H, W = x.shape - assert self.dim_tokens is not None, 'Need to call init(dim_tokens) function first' - assert (H % self.P_H == 0) and (W % self.P_W == 0), f'Image sizes {H}x{W} must be divisible by patch sizes {self.P_H}x{self.P_W}' - N_H, N_W = H // self.P_H, W // self.P_W # Number of patches in height and width - - # Create patches [B, C, H, W] -> [B, (H*W), C] - x_patch = rearrange(self.proj(x), 'b d nh nw -> b (nh nw) d') - - # Create positional embedding - x_pos_emb = F.interpolate(self.pos_emb, size=(N_H, N_W), mode='bicubic', align_corners=False) - x_pos_emb = rearrange(x_pos_emb, 'b d nh nw -> b (nh nw) d') - - # Add patches and positional embeddings - x = x_patch + x_pos_emb - - return x - - -class SemSegInputAdapter(nn.Module): - """ - Adapter for spatial inputs, like images or feature maps. - Creates tokens from patches over the image. - - :param num_classes: Number of input semantic classes - :param stride_level: Stride level compared to the full-sized image. - E.g. 4 for 1/4th the size of the image. - :param patch_size_full: Int or tuple of the patch size over the full image size. - Patch size for smaller inputs will be computed accordingly. - :param dim_tokens: Dimension of output tokens. Can be set using init method. - :param sincos_pos_emb: Set to True (default) to use fixed 2D sin-cos positional embeddings - :param learnable_pos_emb: Set to True to learn positional embeddings instead - :param image_size: Default image size. Used to initialize size of positional embeddings. - :param dim_class_emb: Dimension of learned class embedding - :param interpolate_class_emb: Set to True to average pool class embeddings of each patch - :param emb_padding_idx: Padding index (e.g. image border), default is None - """ - - def __init__(self, - num_classes: int, - stride_level: int, - patch_size_full: Union[int, Tuple[int, int]], - dim_tokens: Optional[int] = None, - sincos_pos_emb: int = True, - learnable_pos_emb: int = False, - image_size: Union[int, Tuple[int]] = 224, - dim_class_emb: int = 64, - interpolate_class_emb: bool = False, - emb_padding_idx: int = None - ): - super().__init__() - self.num_classes = num_classes - self.stride_level = stride_level - self.patch_size_full = pair(patch_size_full) - self.dim_tokens = dim_tokens - self.sincos_pos_emb = sincos_pos_emb - self.learnable_pos_emb = learnable_pos_emb - self.image_size = pair(image_size) - self.dim_class_emb = dim_class_emb - self.interpolate_class_emb = interpolate_class_emb - self.emb_padding_idx = emb_padding_idx - if self.emb_padding_idx is not None: - self.num_classes += 1 - - # Actual patch height and width, taking into account stride of input - self.P_H = max(1, self.patch_size_full[0] // stride_level) - self.P_W = max(1, self.patch_size_full[1] // stride_level) - - if self.dim_tokens is not None: - self.init(dim_tokens=dim_tokens) - - def init(self, dim_tokens: int = 768): - ''' - Initialize parts of encoder that are dependent on dimension of tokens. - Should be called when setting up MultiMAE. - - :param dim_tokens: Dimension of tokens - ''' - self.dim_tokens = dim_tokens - - # Task embedding identifying from which task a given token comes from - # Fixed-size positional embeddings. Can be interpolated to different input sizes - h_posemb = self.image_size[0] // (self.stride_level * self.P_H) - w_posemb = self.image_size[1] // (self.stride_level * self.P_W) - if self.sincos_pos_emb: - self.pos_emb = build_2d_sincos_posemb(h=h_posemb, w=w_posemb, embed_dim=self.dim_tokens) - self.pos_emb = nn.Parameter(self.pos_emb, requires_grad=self.learnable_pos_emb) - else: - self.pos_emb = nn.Parameter(torch.zeros(1, self.dim_tokens, h_posemb, w_posemb)) - trunc_normal_(self.pos_emb, std=0.02) - - # Image -> tokens projection - self.class_emb = nn.Embedding(num_embeddings=self.num_classes, embedding_dim=self.dim_class_emb, padding_idx=self.emb_padding_idx) - trunc_normal_(self.class_emb.weight, std=0.02) - - if self.interpolate_class_emb: - self.proj = nn.Sequential( - nn.Upsample(scale_factor=(1 / self.P_H, 1 / self.P_W), - mode='bilinear'), # Actually a downsample operation - nn.Conv2d(in_channels=self.dim_class_emb, out_channels=self.dim_tokens, - kernel_size=1, stride=1), - ) - else: - self.proj = nn.Conv2d( - in_channels=self.dim_class_emb, out_channels=self.dim_tokens, - kernel_size=(self.P_H, self.P_W), stride=(self.P_H, self.P_W) - ) - - @torch.jit.ignore - def no_weight_decay(self): - return {'pos_emb', 'class_emb'} - - def forward(self, x): - ''' - Forward pass through input adapter, transforming image to sequence of tokens. - Adds task and positional encodings. - - :param x: Input image tensor - ''' - B, H, W = x.shape - assert self.dim_tokens is not None, 'Need to call init(dim_tokens) function first' - assert (H % self.P_H == 0) and ( - W % self.P_W == 0), f'Image sizes {H}x{W} must be divisible by patch sizes {self.P_H}x{self.P_W}' - N_H, N_W = H // self.P_H, W // self.P_W # Number of patches in height and width - - # Map to embedding - x = rearrange(self.class_emb(x), 'b nh nw c -> b c nh nw') - - # Create patches [B, C, H, W] -> [B, (H*W), C] - x_patch = rearrange(self.proj(x), 'b d nh nw -> b (nh nw) d') - - # Create positional embedding - x_pos_emb = F.interpolate(self.pos_emb, size=(N_H, N_W), mode='bilinear') - x_pos_emb = rearrange(x_pos_emb, 'b d nh nw -> b (nh nw) d') - - # Add patches and positional embeddings - x = x_patch + x_pos_emb - - return x diff --git a/spaces/EuroPython2022/mmocr-demo/configs/textdet/fcenet/fcenet_r50_fpn_1500e_icdar2015.py b/spaces/EuroPython2022/mmocr-demo/configs/textdet/fcenet/fcenet_r50_fpn_1500e_icdar2015.py deleted file mode 100644 index d4a9c642307466c86f667d64bbeb4057db571b66..0000000000000000000000000000000000000000 --- a/spaces/EuroPython2022/mmocr-demo/configs/textdet/fcenet/fcenet_r50_fpn_1500e_icdar2015.py +++ /dev/null @@ -1,33 +0,0 @@ -_base_ = [ - '../../_base_/default_runtime.py', - '../../_base_/schedules/schedule_sgd_1500e.py', - '../../_base_/det_models/fcenet_r50_fpn.py', - '../../_base_/det_datasets/icdar2015.py', - '../../_base_/det_pipelines/fcenet_pipeline.py' -] - -train_list = {{_base_.train_list}} -test_list = {{_base_.test_list}} - -train_pipeline_icdar2015 = {{_base_.train_pipeline_icdar2015}} -test_pipeline_icdar2015 = {{_base_.test_pipeline_icdar2015}} - -data = dict( - samples_per_gpu=8, - workers_per_gpu=2, - val_dataloader=dict(samples_per_gpu=1), - test_dataloader=dict(samples_per_gpu=1), - train=dict( - type='UniformConcatDataset', - datasets=train_list, - pipeline=train_pipeline_icdar2015), - val=dict( - type='UniformConcatDataset', - datasets=test_list, - pipeline=test_pipeline_icdar2015), - test=dict( - type='UniformConcatDataset', - datasets=test_list, - pipeline=test_pipeline_icdar2015)) - -evaluation = dict(interval=10, metric='hmean-iou') diff --git a/spaces/Ezi/ModelCardsAnalysis/README.md b/spaces/Ezi/ModelCardsAnalysis/README.md deleted file mode 100644 index 2455df68de64fcb3aed56190a029a458ddc6806c..0000000000000000000000000000000000000000 --- a/spaces/Ezi/ModelCardsAnalysis/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: ModelCardsAnalysis -emoji: 📊 -colorFrom: blue -colorTo: indigo -sdk: streamlit -sdk_version: 1.2.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference diff --git a/spaces/Felix123456/bingo/src/components/voice.tsx b/spaces/Felix123456/bingo/src/components/voice.tsx deleted file mode 100644 index 074d0e145229947282a472bd84f6578cf0b3c71c..0000000000000000000000000000000000000000 --- a/spaces/Felix123456/bingo/src/components/voice.tsx +++ /dev/null @@ -1,52 +0,0 @@ -import React, { useEffect } from 'react' -import { useSetAtom } from 'jotai' -import { useBing } from '@/lib/hooks/use-bing' -import Image from 'next/image' -import VoiceIcon from '@/assets/images/voice.svg' -import VoiceButton from './ui/voice' -import { SR } from '@/lib/bots/bing/sr' -import { voiceListenAtom } from '@/state' - -const sr = new SR(['发送', '清空', '退出']) - -const Voice = ({ setInput, input, sendMessage, isSpeaking }: Pick, 'setInput' | 'sendMessage' | 'input' | 'isSpeaking'>) => { - const setListen = useSetAtom(voiceListenAtom) - useEffect(() => { - if (sr.listening) return - sr.transcript = !isSpeaking - }, [isSpeaking]) - - useEffect(() => { - sr.onchange = (msg: string, command?: string) => { - switch (command) { - case '退出': - sr.stop() - break; - case '发送': - sendMessage(input) - case '清空': - setInput('') - break; - default: - setInput(input + msg) - } - } - }, [input]) - - const switchSR = (enable: boolean = false) => { - setListen(enable) - if (enable) { - sr.start() - } else { - sr.stop() - } - } - - return sr.listening ? ( - switchSR(false)} /> - ) : ( - start voice switchSR(true)} /> - ) -}; - -export default Voice; diff --git a/spaces/Fernando22/freegpt-webui/client/css/global.css b/spaces/Fernando22/freegpt-webui/client/css/global.css deleted file mode 100644 index 8de755e9df1b2c4ee74d18f00ce717b22c69db4b..0000000000000000000000000000000000000000 --- a/spaces/Fernando22/freegpt-webui/client/css/global.css +++ /dev/null @@ -1,70 +0,0 @@ -@import url("https://fonts.googleapis.com/css2?family=Inter:wght@100;200;300;400;500;600;700;800;900&display=swap"); -* { - --font-1: "Inter", sans-serif; - --section-gap: 24px; - --border-radius-1: 8px; - margin: 0; - padding: 0; - box-sizing: border-box; - position: relative; - font-family: var(--font-1); -} - -.theme-light { - --colour-1: #f5f5f5; - --colour-2: #000000; - --colour-3: #474747; - --colour-4: #949494; - --colour-5: #ebebeb; - --colour-6: #dadada; - - --accent: #3a3a3a; - --blur-bg: #ffffff; - --blur-border: #dbdbdb; - --user-input: #282828; - --conversations: #666666; -} - -.theme-dark { - --colour-1: #181818; - --colour-2: #ccc; - --colour-3: #dadada; - --colour-4: #f0f0f0; - --colour-5: #181818; - --colour-6: #242424; - - --accent: #151718; - --blur-bg: #242627; - --blur-border: #242627; - --user-input: #f5f5f5; - --conversations: #555555; -} - -html, -body { - background: var(--colour-1); - color: var(--colour-3); -} - -ol, -ul { - padding-left: 20px; -} - -.shown { - display: flex !important; -} - -a:-webkit-any-link { - color: var(--accent); -} - -pre { - white-space: pre-wrap; -} - -@media screen and (max-height: 720px) { - :root { - --section-gap: 16px; - } -} diff --git a/spaces/Fiacre/projectmanagerideator/README.md b/spaces/Fiacre/projectmanagerideator/README.md deleted file mode 100644 index 5b3580fa9047e3a9b4734f7bf65ca6695b88abf8..0000000000000000000000000000000000000000 --- a/spaces/Fiacre/projectmanagerideator/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Projectmanagerideator -emoji: 🏃 -colorFrom: pink -colorTo: red -sdk: gradio -sdk_version: 3.44.4 -app_file: app.py -pinned: false -license: mit ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/FrankZxShen/vits-fast-finetuning-umamusume/modules.py b/spaces/FrankZxShen/vits-fast-finetuning-umamusume/modules.py deleted file mode 100644 index 9c7fd9cd6eb8b7e0ec0e08957e970744a374a924..0000000000000000000000000000000000000000 --- a/spaces/FrankZxShen/vits-fast-finetuning-umamusume/modules.py +++ /dev/null @@ -1,390 +0,0 @@ -import copy -import math -import numpy as np -import scipy -import torch -from torch import nn -from torch.nn import functional as F - -from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d -from torch.nn.utils import weight_norm, remove_weight_norm - -import commons -from commons import init_weights, get_padding -from transforms import piecewise_rational_quadratic_transform - - -LRELU_SLOPE = 0.1 - - -class LayerNorm(nn.Module): - def __init__(self, channels, eps=1e-5): - super().__init__() - self.channels = channels - self.eps = eps - - self.gamma = nn.Parameter(torch.ones(channels)) - self.beta = nn.Parameter(torch.zeros(channels)) - - def forward(self, x): - x = x.transpose(1, -1) - x = F.layer_norm(x, (self.channels,), self.gamma, self.beta, self.eps) - return x.transpose(1, -1) - - -class ConvReluNorm(nn.Module): - def __init__(self, in_channels, hidden_channels, out_channels, kernel_size, n_layers, p_dropout): - super().__init__() - self.in_channels = in_channels - self.hidden_channels = hidden_channels - self.out_channels = out_channels - self.kernel_size = kernel_size - self.n_layers = n_layers - self.p_dropout = p_dropout - assert n_layers > 1, "Number of layers should be larger than 0." - - self.conv_layers = nn.ModuleList() - self.norm_layers = nn.ModuleList() - self.conv_layers.append(nn.Conv1d(in_channels, hidden_channels, kernel_size, padding=kernel_size//2)) - self.norm_layers.append(LayerNorm(hidden_channels)) - self.relu_drop = nn.Sequential( - nn.ReLU(), - nn.Dropout(p_dropout)) - for _ in range(n_layers-1): - self.conv_layers.append(nn.Conv1d(hidden_channels, hidden_channels, kernel_size, padding=kernel_size//2)) - self.norm_layers.append(LayerNorm(hidden_channels)) - self.proj = nn.Conv1d(hidden_channels, out_channels, 1) - self.proj.weight.data.zero_() - self.proj.bias.data.zero_() - - def forward(self, x, x_mask): - x_org = x - for i in range(self.n_layers): - x = self.conv_layers[i](x * x_mask) - x = self.norm_layers[i](x) - x = self.relu_drop(x) - x = x_org + self.proj(x) - return x * x_mask - - -class DDSConv(nn.Module): - """ - Dialted and Depth-Separable Convolution - """ - def __init__(self, channels, kernel_size, n_layers, p_dropout=0.): - super().__init__() - self.channels = channels - self.kernel_size = kernel_size - self.n_layers = n_layers - self.p_dropout = p_dropout - - self.drop = nn.Dropout(p_dropout) - self.convs_sep = nn.ModuleList() - self.convs_1x1 = nn.ModuleList() - self.norms_1 = nn.ModuleList() - self.norms_2 = nn.ModuleList() - for i in range(n_layers): - dilation = kernel_size ** i - padding = (kernel_size * dilation - dilation) // 2 - self.convs_sep.append(nn.Conv1d(channels, channels, kernel_size, - groups=channels, dilation=dilation, padding=padding - )) - self.convs_1x1.append(nn.Conv1d(channels, channels, 1)) - self.norms_1.append(LayerNorm(channels)) - self.norms_2.append(LayerNorm(channels)) - - def forward(self, x, x_mask, g=None): - if g is not None: - x = x + g - for i in range(self.n_layers): - y = self.convs_sep[i](x * x_mask) - y = self.norms_1[i](y) - y = F.gelu(y) - y = self.convs_1x1[i](y) - y = self.norms_2[i](y) - y = F.gelu(y) - y = self.drop(y) - x = x + y - return x * x_mask - - -class WN(torch.nn.Module): - def __init__(self, hidden_channels, kernel_size, dilation_rate, n_layers, gin_channels=0, p_dropout=0): - super(WN, self).__init__() - assert(kernel_size % 2 == 1) - self.hidden_channels =hidden_channels - self.kernel_size = kernel_size, - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.gin_channels = gin_channels - self.p_dropout = p_dropout - - self.in_layers = torch.nn.ModuleList() - self.res_skip_layers = torch.nn.ModuleList() - self.drop = nn.Dropout(p_dropout) - - if gin_channels != 0: - cond_layer = torch.nn.Conv1d(gin_channels, 2*hidden_channels*n_layers, 1) - self.cond_layer = torch.nn.utils.weight_norm(cond_layer, name='weight') - - for i in range(n_layers): - dilation = dilation_rate ** i - padding = int((kernel_size * dilation - dilation) / 2) - in_layer = torch.nn.Conv1d(hidden_channels, 2*hidden_channels, kernel_size, - dilation=dilation, padding=padding) - in_layer = torch.nn.utils.weight_norm(in_layer, name='weight') - self.in_layers.append(in_layer) - - # last one is not necessary - if i < n_layers - 1: - res_skip_channels = 2 * hidden_channels - else: - res_skip_channels = hidden_channels - - res_skip_layer = torch.nn.Conv1d(hidden_channels, res_skip_channels, 1) - res_skip_layer = torch.nn.utils.weight_norm(res_skip_layer, name='weight') - self.res_skip_layers.append(res_skip_layer) - - def forward(self, x, x_mask, g=None, **kwargs): - output = torch.zeros_like(x) - n_channels_tensor = torch.IntTensor([self.hidden_channels]) - - if g is not None: - g = self.cond_layer(g) - - for i in range(self.n_layers): - x_in = self.in_layers[i](x) - if g is not None: - cond_offset = i * 2 * self.hidden_channels - g_l = g[:,cond_offset:cond_offset+2*self.hidden_channels,:] - else: - g_l = torch.zeros_like(x_in) - - acts = commons.fused_add_tanh_sigmoid_multiply( - x_in, - g_l, - n_channels_tensor) - acts = self.drop(acts) - - res_skip_acts = self.res_skip_layers[i](acts) - if i < self.n_layers - 1: - res_acts = res_skip_acts[:,:self.hidden_channels,:] - x = (x + res_acts) * x_mask - output = output + res_skip_acts[:,self.hidden_channels:,:] - else: - output = output + res_skip_acts - return output * x_mask - - def remove_weight_norm(self): - if self.gin_channels != 0: - torch.nn.utils.remove_weight_norm(self.cond_layer) - for l in self.in_layers: - torch.nn.utils.remove_weight_norm(l) - for l in self.res_skip_layers: - torch.nn.utils.remove_weight_norm(l) - - -class ResBlock1(torch.nn.Module): - def __init__(self, channels, kernel_size=3, dilation=(1, 3, 5)): - super(ResBlock1, self).__init__() - self.convs1 = nn.ModuleList([ - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0], - padding=get_padding(kernel_size, dilation[0]))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1], - padding=get_padding(kernel_size, dilation[1]))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[2], - padding=get_padding(kernel_size, dilation[2]))) - ]) - self.convs1.apply(init_weights) - - self.convs2 = nn.ModuleList([ - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1, - padding=get_padding(kernel_size, 1))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1, - padding=get_padding(kernel_size, 1))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=1, - padding=get_padding(kernel_size, 1))) - ]) - self.convs2.apply(init_weights) - - def forward(self, x, x_mask=None): - for c1, c2 in zip(self.convs1, self.convs2): - xt = F.leaky_relu(x, LRELU_SLOPE) - if x_mask is not None: - xt = xt * x_mask - xt = c1(xt) - xt = F.leaky_relu(xt, LRELU_SLOPE) - if x_mask is not None: - xt = xt * x_mask - xt = c2(xt) - x = xt + x - if x_mask is not None: - x = x * x_mask - return x - - def remove_weight_norm(self): - for l in self.convs1: - remove_weight_norm(l) - for l in self.convs2: - remove_weight_norm(l) - - -class ResBlock2(torch.nn.Module): - def __init__(self, channels, kernel_size=3, dilation=(1, 3)): - super(ResBlock2, self).__init__() - self.convs = nn.ModuleList([ - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[0], - padding=get_padding(kernel_size, dilation[0]))), - weight_norm(Conv1d(channels, channels, kernel_size, 1, dilation=dilation[1], - padding=get_padding(kernel_size, dilation[1]))) - ]) - self.convs.apply(init_weights) - - def forward(self, x, x_mask=None): - for c in self.convs: - xt = F.leaky_relu(x, LRELU_SLOPE) - if x_mask is not None: - xt = xt * x_mask - xt = c(xt) - x = xt + x - if x_mask is not None: - x = x * x_mask - return x - - def remove_weight_norm(self): - for l in self.convs: - remove_weight_norm(l) - - -class Log(nn.Module): - def forward(self, x, x_mask, reverse=False, **kwargs): - if not reverse: - y = torch.log(torch.clamp_min(x, 1e-5)) * x_mask - logdet = torch.sum(-y, [1, 2]) - return y, logdet - else: - x = torch.exp(x) * x_mask - return x - - -class Flip(nn.Module): - def forward(self, x, *args, reverse=False, **kwargs): - x = torch.flip(x, [1]) - if not reverse: - logdet = torch.zeros(x.size(0)).to(dtype=x.dtype, device=x.device) - return x, logdet - else: - return x - - -class ElementwiseAffine(nn.Module): - def __init__(self, channels): - super().__init__() - self.channels = channels - self.m = nn.Parameter(torch.zeros(channels,1)) - self.logs = nn.Parameter(torch.zeros(channels,1)) - - def forward(self, x, x_mask, reverse=False, **kwargs): - if not reverse: - y = self.m + torch.exp(self.logs) * x - y = y * x_mask - logdet = torch.sum(self.logs * x_mask, [1,2]) - return y, logdet - else: - x = (x - self.m) * torch.exp(-self.logs) * x_mask - return x - - -class ResidualCouplingLayer(nn.Module): - def __init__(self, - channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - p_dropout=0, - gin_channels=0, - mean_only=False): - assert channels % 2 == 0, "channels should be divisible by 2" - super().__init__() - self.channels = channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.half_channels = channels // 2 - self.mean_only = mean_only - - self.pre = nn.Conv1d(self.half_channels, hidden_channels, 1) - self.enc = WN(hidden_channels, kernel_size, dilation_rate, n_layers, p_dropout=p_dropout, gin_channels=gin_channels) - self.post = nn.Conv1d(hidden_channels, self.half_channels * (2 - mean_only), 1) - self.post.weight.data.zero_() - self.post.bias.data.zero_() - - def forward(self, x, x_mask, g=None, reverse=False): - x0, x1 = torch.split(x, [self.half_channels]*2, 1) - h = self.pre(x0) * x_mask - h = self.enc(h, x_mask, g=g) - stats = self.post(h) * x_mask - if not self.mean_only: - m, logs = torch.split(stats, [self.half_channels]*2, 1) - else: - m = stats - logs = torch.zeros_like(m) - - if not reverse: - x1 = m + x1 * torch.exp(logs) * x_mask - x = torch.cat([x0, x1], 1) - logdet = torch.sum(logs, [1,2]) - return x, logdet - else: - x1 = (x1 - m) * torch.exp(-logs) * x_mask - x = torch.cat([x0, x1], 1) - return x - - -class ConvFlow(nn.Module): - def __init__(self, in_channels, filter_channels, kernel_size, n_layers, num_bins=10, tail_bound=5.0): - super().__init__() - self.in_channels = in_channels - self.filter_channels = filter_channels - self.kernel_size = kernel_size - self.n_layers = n_layers - self.num_bins = num_bins - self.tail_bound = tail_bound - self.half_channels = in_channels // 2 - - self.pre = nn.Conv1d(self.half_channels, filter_channels, 1) - self.convs = DDSConv(filter_channels, kernel_size, n_layers, p_dropout=0.) - self.proj = nn.Conv1d(filter_channels, self.half_channels * (num_bins * 3 - 1), 1) - self.proj.weight.data.zero_() - self.proj.bias.data.zero_() - - def forward(self, x, x_mask, g=None, reverse=False): - x0, x1 = torch.split(x, [self.half_channels]*2, 1) - h = self.pre(x0) - h = self.convs(h, x_mask, g=g) - h = self.proj(h) * x_mask - - b, c, t = x0.shape - h = h.reshape(b, c, -1, t).permute(0, 1, 3, 2) # [b, cx?, t] -> [b, c, t, ?] - - unnormalized_widths = h[..., :self.num_bins] / math.sqrt(self.filter_channels) - unnormalized_heights = h[..., self.num_bins:2*self.num_bins] / math.sqrt(self.filter_channels) - unnormalized_derivatives = h[..., 2 * self.num_bins:] - - x1, logabsdet = piecewise_rational_quadratic_transform(x1, - unnormalized_widths, - unnormalized_heights, - unnormalized_derivatives, - inverse=reverse, - tails='linear', - tail_bound=self.tail_bound - ) - - x = torch.cat([x0, x1], 1) * x_mask - logdet = torch.sum(logabsdet * x_mask, [1,2]) - if not reverse: - return x, logdet - else: - return x diff --git a/spaces/Gradio-Blocks/uniformer_image_detection/configs/guided_anchoring/ga_faster_r50_caffe_fpn_1x_coco.py b/spaces/Gradio-Blocks/uniformer_image_detection/configs/guided_anchoring/ga_faster_r50_caffe_fpn_1x_coco.py deleted file mode 100644 index b0add92c398b62aa8fd2141f595cf0941f55d421..0000000000000000000000000000000000000000 --- a/spaces/Gradio-Blocks/uniformer_image_detection/configs/guided_anchoring/ga_faster_r50_caffe_fpn_1x_coco.py +++ /dev/null @@ -1,65 +0,0 @@ -_base_ = '../faster_rcnn/faster_rcnn_r50_caffe_fpn_1x_coco.py' -model = dict( - rpn_head=dict( - _delete_=True, - type='GARPNHead', - in_channels=256, - feat_channels=256, - approx_anchor_generator=dict( - type='AnchorGenerator', - octave_base_scale=8, - scales_per_octave=3, - ratios=[0.5, 1.0, 2.0], - strides=[4, 8, 16, 32, 64]), - square_anchor_generator=dict( - type='AnchorGenerator', - ratios=[1.0], - scales=[8], - strides=[4, 8, 16, 32, 64]), - anchor_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=[.0, .0, .0, .0], - target_stds=[0.07, 0.07, 0.14, 0.14]), - bbox_coder=dict( - type='DeltaXYWHBBoxCoder', - target_means=[.0, .0, .0, .0], - target_stds=[0.07, 0.07, 0.11, 0.11]), - loc_filter_thr=0.01, - loss_loc=dict( - type='FocalLoss', - use_sigmoid=True, - gamma=2.0, - alpha=0.25, - loss_weight=1.0), - loss_shape=dict(type='BoundedIoULoss', beta=0.2, loss_weight=1.0), - loss_cls=dict( - type='CrossEntropyLoss', use_sigmoid=True, loss_weight=1.0), - loss_bbox=dict(type='SmoothL1Loss', beta=1.0, loss_weight=1.0)), - roi_head=dict( - bbox_head=dict(bbox_coder=dict(target_stds=[0.05, 0.05, 0.1, 0.1]))), - # model training and testing settings - train_cfg=dict( - rpn=dict( - ga_assigner=dict( - type='ApproxMaxIoUAssigner', - pos_iou_thr=0.7, - neg_iou_thr=0.3, - min_pos_iou=0.3, - ignore_iof_thr=-1), - ga_sampler=dict( - type='RandomSampler', - num=256, - pos_fraction=0.5, - neg_pos_ub=-1, - add_gt_as_proposals=False), - allowed_border=-1, - center_ratio=0.2, - ignore_ratio=0.5), - rpn_proposal=dict(nms_post=1000, max_per_img=300), - rcnn=dict( - assigner=dict(pos_iou_thr=0.6, neg_iou_thr=0.6, min_pos_iou=0.6), - sampler=dict(type='RandomSampler', num=256))), - test_cfg=dict( - rpn=dict(nms_post=1000, max_per_img=300), rcnn=dict(score_thr=1e-3))) -optimizer_config = dict( - _delete_=True, grad_clip=dict(max_norm=35, norm_type=2)) diff --git a/spaces/Gradio-Blocks/uniformer_image_detection/mmdet/core/post_processing/bbox_nms.py b/spaces/Gradio-Blocks/uniformer_image_detection/mmdet/core/post_processing/bbox_nms.py deleted file mode 100644 index 966d3a6ac86637a6be90edc3aab9b6863fb87764..0000000000000000000000000000000000000000 --- a/spaces/Gradio-Blocks/uniformer_image_detection/mmdet/core/post_processing/bbox_nms.py +++ /dev/null @@ -1,168 +0,0 @@ -import torch -from mmcv.ops.nms import batched_nms - -from mmdet.core.bbox.iou_calculators import bbox_overlaps - - -def multiclass_nms(multi_bboxes, - multi_scores, - score_thr, - nms_cfg, - max_num=-1, - score_factors=None, - return_inds=False): - """NMS for multi-class bboxes. - - Args: - multi_bboxes (Tensor): shape (n, #class*4) or (n, 4) - multi_scores (Tensor): shape (n, #class), where the last column - contains scores of the background class, but this will be ignored. - score_thr (float): bbox threshold, bboxes with scores lower than it - will not be considered. - nms_thr (float): NMS IoU threshold - max_num (int, optional): if there are more than max_num bboxes after - NMS, only top max_num will be kept. Default to -1. - score_factors (Tensor, optional): The factors multiplied to scores - before applying NMS. Default to None. - return_inds (bool, optional): Whether return the indices of kept - bboxes. Default to False. - - Returns: - tuple: (bboxes, labels, indices (optional)), tensors of shape (k, 5), - (k), and (k). Labels are 0-based. - """ - num_classes = multi_scores.size(1) - 1 - # exclude background category - if multi_bboxes.shape[1] > 4: - bboxes = multi_bboxes.view(multi_scores.size(0), -1, 4) - else: - bboxes = multi_bboxes[:, None].expand( - multi_scores.size(0), num_classes, 4) - - scores = multi_scores[:, :-1] - - labels = torch.arange(num_classes, dtype=torch.long) - labels = labels.view(1, -1).expand_as(scores) - - bboxes = bboxes.reshape(-1, 4) - scores = scores.reshape(-1) - labels = labels.reshape(-1) - - if not torch.onnx.is_in_onnx_export(): - # NonZero not supported in TensorRT - # remove low scoring boxes - valid_mask = scores > score_thr - # multiply score_factor after threshold to preserve more bboxes, improve - # mAP by 1% for YOLOv3 - if score_factors is not None: - # expand the shape to match original shape of score - score_factors = score_factors.view(-1, 1).expand( - multi_scores.size(0), num_classes) - score_factors = score_factors.reshape(-1) - scores = scores * score_factors - - if not torch.onnx.is_in_onnx_export(): - # NonZero not supported in TensorRT - inds = valid_mask.nonzero(as_tuple=False).squeeze(1) - bboxes, scores, labels = bboxes[inds], scores[inds], labels[inds] - else: - # TensorRT NMS plugin has invalid output filled with -1 - # add dummy data to make detection output correct. - bboxes = torch.cat([bboxes, bboxes.new_zeros(1, 4)], dim=0) - scores = torch.cat([scores, scores.new_zeros(1)], dim=0) - labels = torch.cat([labels, labels.new_zeros(1)], dim=0) - - if bboxes.numel() == 0: - if torch.onnx.is_in_onnx_export(): - raise RuntimeError('[ONNX Error] Can not record NMS ' - 'as it has not been executed this time') - if return_inds: - return bboxes, labels, inds - else: - return bboxes, labels - - dets, keep = batched_nms(bboxes, scores, labels, nms_cfg) - - if max_num > 0: - dets = dets[:max_num] - keep = keep[:max_num] - - if return_inds: - return dets, labels[keep], keep - else: - return dets, labels[keep] - - -def fast_nms(multi_bboxes, - multi_scores, - multi_coeffs, - score_thr, - iou_thr, - top_k, - max_num=-1): - """Fast NMS in `YOLACT `_. - - Fast NMS allows already-removed detections to suppress other detections so - that every instance can be decided to be kept or discarded in parallel, - which is not possible in traditional NMS. This relaxation allows us to - implement Fast NMS entirely in standard GPU-accelerated matrix operations. - - Args: - multi_bboxes (Tensor): shape (n, #class*4) or (n, 4) - multi_scores (Tensor): shape (n, #class+1), where the last column - contains scores of the background class, but this will be ignored. - multi_coeffs (Tensor): shape (n, #class*coeffs_dim). - score_thr (float): bbox threshold, bboxes with scores lower than it - will not be considered. - iou_thr (float): IoU threshold to be considered as conflicted. - top_k (int): if there are more than top_k bboxes before NMS, - only top top_k will be kept. - max_num (int): if there are more than max_num bboxes after NMS, - only top max_num will be kept. If -1, keep all the bboxes. - Default: -1. - - Returns: - tuple: (bboxes, labels, coefficients), tensors of shape (k, 5), (k, 1), - and (k, coeffs_dim). Labels are 0-based. - """ - - scores = multi_scores[:, :-1].t() # [#class, n] - scores, idx = scores.sort(1, descending=True) - - idx = idx[:, :top_k].contiguous() - scores = scores[:, :top_k] # [#class, topk] - num_classes, num_dets = idx.size() - boxes = multi_bboxes[idx.view(-1), :].view(num_classes, num_dets, 4) - coeffs = multi_coeffs[idx.view(-1), :].view(num_classes, num_dets, -1) - - iou = bbox_overlaps(boxes, boxes) # [#class, topk, topk] - iou.triu_(diagonal=1) - iou_max, _ = iou.max(dim=1) - - # Now just filter out the ones higher than the threshold - keep = iou_max <= iou_thr - - # Second thresholding introduces 0.2 mAP gain at negligible time cost - keep *= scores > score_thr - - # Assign each kept detection to its corresponding class - classes = torch.arange( - num_classes, device=boxes.device)[:, None].expand_as(keep) - classes = classes[keep] - - boxes = boxes[keep] - coeffs = coeffs[keep] - scores = scores[keep] - - # Only keep the top max_num highest scores across all classes - scores, idx = scores.sort(0, descending=True) - if max_num > 0: - idx = idx[:max_num] - scores = scores[:max_num] - - classes = classes[idx] - boxes = boxes[idx] - coeffs = coeffs[idx] - - cls_dets = torch.cat([boxes, scores[:, None]], dim=1) - return cls_dets, classes, coeffs diff --git a/spaces/Gradio-Blocks/uniformer_image_segmentation/configs/dmnet/dmnet_r50-d8_512x1024_40k_cityscapes.py b/spaces/Gradio-Blocks/uniformer_image_segmentation/configs/dmnet/dmnet_r50-d8_512x1024_40k_cityscapes.py deleted file mode 100644 index 1f9a917fa4223bd2428f2b2d10eac446f7ecc71a..0000000000000000000000000000000000000000 --- a/spaces/Gradio-Blocks/uniformer_image_segmentation/configs/dmnet/dmnet_r50-d8_512x1024_40k_cityscapes.py +++ /dev/null @@ -1,4 +0,0 @@ -_base_ = [ - '../_base_/models/dmnet_r50-d8.py', '../_base_/datasets/cityscapes.py', - '../_base_/default_runtime.py', '../_base_/schedules/schedule_40k.py' -] diff --git a/spaces/GrandaddyShmax/MusicGen_Plus_hfv2/tests/common_utils/temp_utils.py b/spaces/GrandaddyShmax/MusicGen_Plus_hfv2/tests/common_utils/temp_utils.py deleted file mode 100644 index d1e0367e979c8b9fea65472c373916d956ad5aaa..0000000000000000000000000000000000000000 --- a/spaces/GrandaddyShmax/MusicGen_Plus_hfv2/tests/common_utils/temp_utils.py +++ /dev/null @@ -1,56 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -import os -import tempfile - - -class TempDirMixin: - """Mixin to provide easy access to temp dir. - """ - - temp_dir_ = None - - @classmethod - def get_base_temp_dir(cls): - # If AUDIOCRAFT_TEST_DIR is set, use it instead of temporary directory. - # this is handy for debugging. - key = "AUDIOCRAFT_TEST_DIR" - if key in os.environ: - return os.environ[key] - if cls.temp_dir_ is None: - cls.temp_dir_ = tempfile.TemporaryDirectory() - return cls.temp_dir_.name - - @classmethod - def tearDownClass(cls): - if cls.temp_dir_ is not None: - try: - cls.temp_dir_.cleanup() - cls.temp_dir_ = None - except PermissionError: - # On Windows there is a know issue with `shutil.rmtree`, - # which fails intermittenly. - # https://github.com/python/cpython/issues/74168 - # Following the above thread, we ignore it. - pass - super().tearDownClass() - - @property - def id(self): - return self.__class__.__name__ - - def get_temp_path(self, *paths): - temp_dir = os.path.join(self.get_base_temp_dir(), self.id) - path = os.path.join(temp_dir, *paths) - os.makedirs(os.path.dirname(path), exist_ok=True) - return path - - def get_temp_dir(self, *paths): - temp_dir = os.path.join(self.get_base_temp_dir(), self.id) - path = os.path.join(temp_dir, *paths) - os.makedirs(path, exist_ok=True) - return path diff --git a/spaces/HCMUT-GraduateThesis-HNTThinh/rgbdsod-multimae-demo/dpt/__init__.py b/spaces/HCMUT-GraduateThesis-HNTThinh/rgbdsod-multimae-demo/dpt/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/HCMUT-GraduateThesis-HNTThinh/rgbdsod-multimae-demo/lib/spvcnn_utils.py b/spaces/HCMUT-GraduateThesis-HNTThinh/rgbdsod-multimae-demo/lib/spvcnn_utils.py deleted file mode 100644 index 6d6d1ce388d51933a8c34c541eaa7bc58e3014bf..0000000000000000000000000000000000000000 --- a/spaces/HCMUT-GraduateThesis-HNTThinh/rgbdsod-multimae-demo/lib/spvcnn_utils.py +++ /dev/null @@ -1,105 +0,0 @@ -import torchsparse.nn.functional as spf -from torchsparse.point_tensor import PointTensor -from torchsparse.utils.kernel_region import * -from torchsparse.utils.helpers import * - - -__all__ = ['initial_voxelize', 'point_to_voxel', 'voxel_to_point'] - - -# z: PointTensor -# return: SparseTensor -def initial_voxelize(z, init_res, after_res): - new_float_coord = torch.cat( - [(z.C[:, :3] * init_res) / after_res, z.C[:, -1].view(-1, 1)], 1) - - pc_hash = spf.sphash(torch.floor(new_float_coord).int()) - sparse_hash = torch.unique(pc_hash) - idx_query = spf.sphashquery(pc_hash, sparse_hash) - counts = spf.spcount(idx_query.int(), len(sparse_hash)) - - inserted_coords = spf.spvoxelize(torch.floor(new_float_coord), idx_query, - counts) - inserted_coords = torch.round(inserted_coords).int() - inserted_feat = spf.spvoxelize(z.F, idx_query, counts) - - new_tensor = SparseTensor(inserted_feat, inserted_coords, 1) - new_tensor.check() - z.additional_features['idx_query'][1] = idx_query - z.additional_features['counts'][1] = counts - z.C = new_float_coord - - return new_tensor - - -# x: SparseTensor, z: PointTensor -# return: SparseTensor -def point_to_voxel(x, z): - if z.additional_features is None or z.additional_features.get('idx_query') is None\ - or z.additional_features['idx_query'].get(x.s) is None: - #pc_hash = hash_gpu(torch.floor(z.C).int()) - pc_hash = spf.sphash( - torch.cat([ - torch.floor(z.C[:, :3] / x.s).int() * x.s, - z.C[:, -1].int().view(-1, 1) - ], 1)) - sparse_hash = spf.sphash(x.C) - idx_query = spf.sphashquery(pc_hash, sparse_hash) - counts = spf.spcount(idx_query.int(), x.C.shape[0]) - z.additional_features['idx_query'][x.s] = idx_query - z.additional_features['counts'][x.s] = counts - else: - idx_query = z.additional_features['idx_query'][x.s] - counts = z.additional_features['counts'][x.s] - - inserted_feat = spf.spvoxelize(z.F, idx_query, counts) - new_tensor = SparseTensor(inserted_feat, x.C, x.s) - new_tensor.coord_maps = x.coord_maps - new_tensor.kernel_maps = x.kernel_maps - - return new_tensor - - -# x: SparseTensor, z: PointTensor -# return: PointTensor -def voxel_to_point(x, z, nearest=False): - if z.idx_query is None or z.weights is None or z.idx_query.get( - x.s) is None or z.weights.get(x.s) is None: - kr = KernelRegion(2, x.s, 1) - off = kr.get_kernel_offset().to(z.F.device) - #old_hash = kernel_hash_gpu(torch.floor(z.C).int(), off) - old_hash = spf.sphash( - torch.cat([ - torch.floor(z.C[:, :3] / x.s).int() * x.s, - z.C[:, -1].int().view(-1, 1) - ], 1), off) - pc_hash = spf.sphash(x.C.to(z.F.device)) - idx_query = spf.sphashquery(old_hash, pc_hash) - weights = spf.calc_ti_weights(z.C, idx_query, - scale=x.s).transpose(0, 1).contiguous() - idx_query = idx_query.transpose(0, 1).contiguous() - if nearest: - weights[:, 1:] = 0. - idx_query[:, 1:] = -1 - new_feat = spf.spdevoxelize(x.F, idx_query, weights) - new_tensor = PointTensor(new_feat, - z.C, - idx_query=z.idx_query, - weights=z.weights) - new_tensor.additional_features = z.additional_features - new_tensor.idx_query[x.s] = idx_query - new_tensor.weights[x.s] = weights - z.idx_query[x.s] = idx_query - z.weights[x.s] = weights - - else: - new_feat = spf.spdevoxelize(x.F, z.idx_query.get(x.s), z.weights.get(x.s)) - new_tensor = PointTensor(new_feat, - z.C, - idx_query=z.idx_query, - weights=z.weights) - new_tensor.additional_features = z.additional_features - - return new_tensor - - diff --git a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/model_parallel/models/transformer.py b/spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/model_parallel/models/transformer.py deleted file mode 100644 index 6b330ef1b7f7a506e7e8176f20a0e722b5fd5149..0000000000000000000000000000000000000000 --- a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/model_parallel/models/transformer.py +++ /dev/null @@ -1,121 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import logging - -import torch.nn as nn -from fairseq.model_parallel.modules import ( - ModelParallelTransformerDecoderLayer, - ModelParallelTransformerEncoderLayer, -) -from fairseq.models import register_model -from fairseq.models.transformer import ( - TransformerDecoder, - TransformerEncoder, - TransformerModel, -) - - -try: - from fairseq.model_parallel.megatron.mpu import ( - copy_to_model_parallel_region, - gather_from_model_parallel_region, - VocabParallelEmbedding, - ) - - has_megatron_submodule = True -except (ImportError, ModuleNotFoundError): - has_megatron_submodule = False - - -logger = logging.getLogger(__name__) - - -@register_model("model_parallel_transformer") -class ModelParallelTransformerModel(TransformerModel): - """ - Model parallel Transformer model. - """ - - @classmethod - def build_embedding(cls, args, dictionary, embed_dim, path=None): - if not has_megatron_submodule: - raise ImportError( - "\n\nPlease install the megatron submodule:" - "\n\n git submodule update --init " - "fairseq/model_parallel/megatron" - ) - dictionary.pad_to_multiple_(args.model_parallel_size * 8) - num_embeddings = len(dictionary) - padding_idx = dictionary.pad() - - def _vocab_init(tensor, **kwargs): - nn.init.normal_(tensor, mean=0, std=num_embeddings ** -0.5) - nn.init.constant_(tensor[1], 0) - - emb = VocabParallelEmbedding( - num_embeddings, embed_dim, padding_idx, init_method=_vocab_init - ) - # if provided, load from preloaded dictionaries - if path: - raise NotImplementedError( - "Loading of embedding from path is not supported for model parallel" - ) - return emb - - @classmethod - def build_encoder(cls, args, src_dict, embed_tokens): - return ModelParallelTransformerEncoder(args, src_dict, embed_tokens) - - @classmethod - def build_decoder(cls, args, tgt_dict, embed_tokens): - return ModelParallelTransformerDecoder( - args, - tgt_dict, - embed_tokens, - no_encoder_attn=getattr(args, "no_cross_attention", False), - ) - - -class ModelParallelTransformerEncoder(TransformerEncoder): - """ - Model parallel Transformer encoder consisting of *args.encoder_layers* layers. Each layer - is a :class:`ModelParallelTransformerEncoderLayer`. - """ - - def __init__(self, args, dictionary, embed_tokens): - super().__init__(args, dictionary, embed_tokens) - - if args.no_final_layer_norm: - self.layer_norm = None - - def build_encoder_layer(self, args): - return ModelParallelTransformerEncoderLayer(args) - - -class ModelParallelTransformerDecoder(TransformerDecoder): - """ - Model Parallel Transformer decoder consisting of *args.decoder_layers* layers. Each layer - is a :class:`ModelParallelTransformerDecoderLayer`. - """ - - def build_decoder_layer(self, args, no_encoder_attn=False): - return ModelParallelTransformerDecoderLayer(args, no_encoder_attn) - - def output_layer(self, features, **kwargs): - """Project features to the vocabulary size.""" - if not self.share_input_output_embed: - raise NotImplementedError( - "Model parallel training currently requires --share-decoder-input-output-embed" - ) - - features = copy_to_model_parallel_region(features) - - # project back to size of vocabulary - x = self.output_projection(features) - - if getattr(self.args, "criterion") != "vocab_parallel_cross_entropy": - x = gather_from_model_parallel_region(x).contiguous() - return x diff --git a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/models/fairseq_model.py b/spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/models/fairseq_model.py deleted file mode 100644 index e55c7ba1ad90f4e2f12db6c814d04a90c4e3b77c..0000000000000000000000000000000000000000 --- a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/fairseq/models/fairseq_model.py +++ /dev/null @@ -1,569 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. -""" -Base classes for various fairseq models. -""" - -import logging -from argparse import Namespace -from typing import Dict, List, Optional, Tuple - -import torch -import torch.nn as nn -import torch.nn.functional as F -from fairseq import utils -from fairseq.data import Dictionary -from fairseq.dataclass.utils import ( - convert_namespace_to_omegaconf, - gen_parser_from_dataclass, -) -from fairseq.models import FairseqDecoder, FairseqEncoder -from omegaconf import DictConfig -from torch import Tensor - - -logger = logging.getLogger(__name__) - - -def check_type(module, expected_type): - if hasattr(module, "unwrapped_module"): - assert isinstance(module.unwrapped_module, expected_type), \ - f"{type(module.unwrapped_module)} != {expected_type}" - else: - assert isinstance(module, expected_type), f"{type(module)} != {expected_type}" - - -class BaseFairseqModel(nn.Module): - """Base class for fairseq models.""" - - def __init__(self): - super().__init__() - self._is_generation_fast = False - - @classmethod - def add_args(cls, parser): - """Add model-specific arguments to the parser.""" - dc = getattr(cls, "__dataclass", None) - if dc is not None: - # do not set defaults so that settings defaults from various architectures still works - gen_parser_from_dataclass(parser, dc(), delete_default=True) - - @classmethod - def build_model(cls, args, task): - """Build a new model instance.""" - raise NotImplementedError("Model must implement the build_model method") - - def get_targets(self, sample, net_output): - """Get targets from either the sample or the net's output.""" - return sample["target"] - - def get_normalized_probs( - self, - net_output: Tuple[Tensor, Optional[Dict[str, List[Optional[Tensor]]]]], - log_probs: bool, - sample: Optional[Dict[str, Tensor]] = None, - ): - """Get normalized probabilities (or log probs) from a net's output.""" - return self.get_normalized_probs_scriptable(net_output, log_probs, sample) - - # TorchScript doesn't support super() method so that the scriptable Subclass - # can't access the base class model in Torchscript. - # Current workaround is to add a helper function with different name and - # call the helper function from scriptable Subclass. - def get_normalized_probs_scriptable( - self, - net_output: Tuple[Tensor, Optional[Dict[str, List[Optional[Tensor]]]]], - log_probs: bool, - sample: Optional[Dict[str, Tensor]] = None, - ): - """Scriptable helper function for get_normalized_probs in ~BaseFairseqModel""" - if hasattr(self, "decoder"): - return self.decoder.get_normalized_probs(net_output, log_probs, sample) - elif torch.is_tensor(net_output): - # syntactic sugar for simple models which don't have a decoder - # (e.g., the classification tutorial) - logits = net_output.float() - if log_probs: - return F.log_softmax(logits, dim=-1) - else: - return F.softmax(logits, dim=-1) - raise NotImplementedError - - def extract_features(self, *args, **kwargs): - """Similar to *forward* but only return features.""" - return self(*args, **kwargs) - - def max_positions(self): - """Maximum length supported by the model.""" - return None - - def load_state_dict( - self, - state_dict, - strict=True, - model_cfg: Optional[DictConfig] = None, - args: Optional[Namespace] = None, - ): - """Copies parameters and buffers from *state_dict* into this module and - its descendants. - - Overrides the method in :class:`nn.Module`. Compared with that method - this additionally "upgrades" *state_dicts* from old checkpoints. - """ - - if model_cfg is None and args is not None: - logger.warn("using 'args' is deprecated, please update your code to use dataclass config") - model_cfg = convert_namespace_to_omegaconf(args).model - - self.upgrade_state_dict(state_dict) - - from fairseq.checkpoint_utils import prune_state_dict - - new_state_dict = prune_state_dict(state_dict, model_cfg) - return super().load_state_dict(new_state_dict, strict) - - def upgrade_state_dict(self, state_dict): - """Upgrade old state dicts to work with newer code.""" - self.upgrade_state_dict_named(state_dict, "") - - def upgrade_state_dict_named(self, state_dict, name): - """Upgrade old state dicts to work with newer code. - - Args: - state_dict (dict): state dictionary to upgrade, in place - name (str): the state dict key corresponding to the current module - """ - assert state_dict is not None - - def do_upgrade(m, prefix): - if len(prefix) > 0: - prefix += "." - - for n, c in m.named_children(): - name = prefix + n - if hasattr(c, "upgrade_state_dict_named"): - c.upgrade_state_dict_named(state_dict, name) - elif hasattr(c, "upgrade_state_dict"): - c.upgrade_state_dict(state_dict) - do_upgrade(c, name) - - do_upgrade(self, name) - - def set_num_updates(self, num_updates): - """State from trainer to pass along to model at every update.""" - for m in self.modules(): - if hasattr(m, "set_num_updates") and m != self: - m.set_num_updates(num_updates) - - def prepare_for_inference_(self, cfg: DictConfig): - """Prepare model for inference.""" - kwargs = {} - kwargs["beamable_mm_beam_size"] = ( - None - if getattr(cfg.generation, "no_beamable_mm", False) - else getattr(cfg.generation, "beam", 5) - ) - kwargs["need_attn"] = getattr(cfg.generation, "print_alignment", False) - if getattr(cfg.generation, "retain_dropout", False): - kwargs["retain_dropout"] = cfg.generation.retain_dropout - kwargs["retain_dropout_modules"] = cfg.generation.retain_dropout_modules - self.make_generation_fast_(**kwargs) - - def make_generation_fast_(self, **kwargs): - """ - Legacy entry point to optimize model for faster generation. - Prefer prepare_for_inference_. - """ - if self._is_generation_fast: - return # only apply once - self._is_generation_fast = True - - # remove weight norm from all modules in the network - def apply_remove_weight_norm(module): - try: - nn.utils.remove_weight_norm(module) - except (AttributeError, ValueError): # this module didn't have weight norm - return - - self.apply(apply_remove_weight_norm) - - def apply_make_generation_fast_(module, prefix): - if len(prefix) > 0: - prefix += "." - - base_func = BaseFairseqModel.make_generation_fast_ - for n, m in module.named_modules(): - if ( - m != self - and hasattr(m, "make_generation_fast_") - # don't call this implementation again, e.g., if - # children modules also inherit from BaseFairseqModel - and m.make_generation_fast_.__func__ is not base_func - ): - name = prefix + n - m.make_generation_fast_(name=name, **kwargs) - - apply_make_generation_fast_(self, "") - - def train(mode=True): - if mode: - raise RuntimeError("cannot train after make_generation_fast") - - # this model should no longer be used for training - self.eval() - self.train = train - - def prepare_for_onnx_export_(self, **kwargs): - """Make model exportable via ONNX trace.""" - seen = set() - - def apply_prepare_for_onnx_export_(module): - if ( - module != self - and hasattr(module, "prepare_for_onnx_export_") - and module not in seen - ): - seen.add(module) - module.prepare_for_onnx_export_(**kwargs) - - self.apply(apply_prepare_for_onnx_export_) - - @classmethod - def from_pretrained( - cls, - model_name_or_path, - checkpoint_file="model.pt", - data_name_or_path=".", - **kwargs, - ): - """ - Load a :class:`~fairseq.models.FairseqModel` from a pre-trained model - file. Downloads and caches the pre-trained model file if needed. - - The base implementation returns a - :class:`~fairseq.hub_utils.GeneratorHubInterface`, which can be used to - generate translations or sample from language models. The underlying - :class:`~fairseq.models.FairseqModel` can be accessed via the - *generator.models* attribute. - - Other models may override this to implement custom hub interfaces. - - Args: - model_name_or_path (str): either the name of a pre-trained model to - load or a path/URL to a pre-trained model state dict - checkpoint_file (str, optional): colon-separated list of checkpoint - files in the model archive to ensemble (default: 'model.pt') - data_name_or_path (str, optional): point args.data to the archive - at the given path/URL. Can start with '.' or './' to reuse the - model archive path. - """ - from fairseq import hub_utils - - x = hub_utils.from_pretrained( - model_name_or_path, - checkpoint_file, - data_name_or_path, - archive_map=cls.hub_models(), - **kwargs, - ) - logger.info(x["args"]) - return hub_utils.GeneratorHubInterface(x["args"], x["task"], x["models"]) - - @classmethod - def hub_models(cls): - return {} - - -class FairseqEncoderDecoderModel(BaseFairseqModel): - """Base class for encoder-decoder models. - - Args: - encoder (FairseqEncoder): the encoder - decoder (FairseqDecoder): the decoder - """ - - def __init__(self, encoder, decoder): - super().__init__() - - self.encoder = encoder - self.decoder = decoder - - check_type(self.encoder, FairseqEncoder) - check_type(self.decoder, FairseqDecoder) - - def forward(self, src_tokens, src_lengths, prev_output_tokens, **kwargs): - """ - Run the forward pass for an encoder-decoder model. - - First feed a batch of source tokens through the encoder. Then, feed the - encoder output and previous decoder outputs (i.e., teacher forcing) to - the decoder to produce the next outputs:: - - encoder_out = self.encoder(src_tokens, src_lengths) - return self.decoder(prev_output_tokens, encoder_out) - - Args: - src_tokens (LongTensor): tokens in the source language of shape - `(batch, src_len)` - src_lengths (LongTensor): source sentence lengths of shape `(batch)` - prev_output_tokens (LongTensor): previous decoder outputs of shape - `(batch, tgt_len)`, for teacher forcing - - Returns: - tuple: - - the decoder's output of shape `(batch, tgt_len, vocab)` - - a dictionary with any model-specific outputs - """ - encoder_out = self.encoder(src_tokens, src_lengths=src_lengths, **kwargs) - decoder_out = self.decoder( - prev_output_tokens, encoder_out=encoder_out, **kwargs - ) - return decoder_out - - def forward_decoder(self, prev_output_tokens, **kwargs): - return self.decoder(prev_output_tokens, **kwargs) - - def extract_features(self, src_tokens, src_lengths, prev_output_tokens, **kwargs): - """ - Similar to *forward* but only return features. - - Returns: - tuple: - - the decoder's features of shape `(batch, tgt_len, embed_dim)` - - a dictionary with any model-specific outputs - """ - encoder_out = self.encoder(src_tokens, src_lengths=src_lengths, **kwargs) - features = self.decoder.extract_features( - prev_output_tokens, encoder_out=encoder_out, **kwargs - ) - return features - - def output_layer(self, features, **kwargs): - """Project features to the default output size (typically vocabulary size).""" - return self.decoder.output_layer(features, **kwargs) - - def max_positions(self): - """Maximum length supported by the model.""" - return (self.encoder.max_positions(), self.decoder.max_positions()) - - def max_decoder_positions(self): - """Maximum length supported by the decoder.""" - return self.decoder.max_positions() - - -class FairseqModel(FairseqEncoderDecoderModel): - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - utils.deprecation_warning( - "FairseqModel is deprecated, please use FairseqEncoderDecoderModel " - "or BaseFairseqModel instead", - stacklevel=4, - ) - - -class FairseqMultiModel(BaseFairseqModel): - """Base class for combining multiple encoder-decoder models.""" - - def __init__(self, encoders, decoders): - super().__init__() - assert encoders.keys() == decoders.keys() - self.keys = list(encoders.keys()) - for key in self.keys: - check_type(encoders[key], FairseqEncoder) - check_type(decoders[key], FairseqDecoder) - - self.models = nn.ModuleDict( - { - key: FairseqEncoderDecoderModel(encoders[key], decoders[key]) - for key in self.keys - } - ) - - @staticmethod - def build_shared_embeddings( - dicts: Dict[str, Dictionary], - langs: List[str], - embed_dim: int, - build_embedding: callable, - pretrained_embed_path: Optional[str] = None, - ): - """ - Helper function to build shared embeddings for a set of languages after - checking that all dicts corresponding to those languages are equivalent. - - Args: - dicts: Dict of lang_id to its corresponding Dictionary - langs: languages that we want to share embeddings for - embed_dim: embedding dimension - build_embedding: callable function to actually build the embedding - pretrained_embed_path: Optional path to load pretrained embeddings - """ - shared_dict = dicts[langs[0]] - if any(dicts[lang] != shared_dict for lang in langs): - raise ValueError( - "--share-*-embeddings requires a joined dictionary: " - "--share-encoder-embeddings requires a joined source " - "dictionary, --share-decoder-embeddings requires a joined " - "target dictionary, and --share-all-embeddings requires a " - "joint source + target dictionary." - ) - return build_embedding(shared_dict, embed_dim, pretrained_embed_path) - - def forward(self, src_tokens, src_lengths, prev_output_tokens, **kwargs): - raise NotImplementedError - - def max_positions(self): - """Maximum length supported by the model.""" - return { - key: ( - self.models[key].encoder.max_positions(), - self.models[key].decoder.max_positions(), - ) - for key in self.keys - } - - def max_decoder_positions(self): - """Maximum length supported by the decoder.""" - return min(model.decoder.max_positions() for model in self.models.values()) - - @property - def encoder(self): - return self.models[self.keys[0]].encoder - - @property - def decoder(self): - return self.models[self.keys[0]].decoder - - def forward_decoder(self, prev_output_tokens, **kwargs): - return self.decoder(prev_output_tokens, **kwargs) - - def load_state_dict( - self, - state_dict, - strict=True, - model_cfg=None, - args: Optional[Namespace] = None, - ): - """Copies parameters and buffers from *state_dict* into this module and - its descendants. - - Overrides the method in :class:`nn.Module`. Compared with that method - this additionally "upgrades" *state_dicts* from old checkpoints. - """ - - if model_cfg is None and args is not None: - logger.warn("using 'args' is deprecated, please update your code to use dataclass config") - model_cfg = convert_namespace_to_omegaconf(args).model - - self.upgrade_state_dict(state_dict) - - from fairseq.checkpoint_utils import prune_state_dict - - new_state_dict = prune_state_dict(state_dict, model_cfg) - return super().load_state_dict(new_state_dict, strict) - - -class FairseqLanguageModel(BaseFairseqModel): - """Base class for decoder-only models. - - Args: - decoder (FairseqDecoder): the decoder - """ - - def __init__(self, decoder): - super().__init__() - self.decoder = decoder - check_type(self.decoder, FairseqDecoder) - - def forward(self, src_tokens, **kwargs): - """ - Run the forward pass for a decoder-only model. - - Feeds a batch of tokens through the decoder to predict the next tokens. - - Args: - src_tokens (LongTensor): tokens on which to condition the decoder, - of shape `(batch, tgt_len)` - src_lengths (LongTensor): source sentence lengths of shape `(batch)` - - Returns: - tuple: - - the decoder's output of shape `(batch, seq_len, vocab)` - - a dictionary with any model-specific outputs - """ - return self.decoder(src_tokens, **kwargs) - - def forward_decoder(self, prev_output_tokens, **kwargs): - return self.decoder(prev_output_tokens, **kwargs) - - def extract_features(self, src_tokens, **kwargs): - """ - Similar to *forward* but only return features. - - Returns: - tuple: - - the decoder's features of shape `(batch, seq_len, embed_dim)` - - a dictionary with any model-specific outputs - """ - return self.decoder.extract_features(src_tokens, **kwargs) - - def output_layer(self, features, **kwargs): - """Project features to the default output size (typically vocabulary size).""" - return self.decoder.output_layer(features, **kwargs) - - def max_positions(self): - """Maximum length supported by the model.""" - return self.decoder.max_positions() - - def max_decoder_positions(self): - """Maximum length supported by the decoder.""" - return self.decoder.max_positions() - - @property - def supported_targets(self): - return {"future"} - - -class FairseqEncoderModel(BaseFairseqModel): - """Base class for encoder-only models. - - Args: - encoder (FairseqEncoder): the encoder - """ - - def __init__(self, encoder): - super().__init__() - self.encoder = encoder - check_type(self.encoder, FairseqEncoder) - - def forward(self, src_tokens, src_lengths, **kwargs): - """ - Run the forward pass for a encoder-only model. - - Feeds a batch of tokens through the encoder to generate features. - - Args: - src_tokens (LongTensor): input tokens of shape `(batch, src_len)` - src_lengths (LongTensor): source sentence lengths of shape `(batch)` - - Returns: - the encoder's output, typically of shape `(batch, src_len, features)` - """ - return self.encoder(src_tokens, src_lengths, **kwargs) - - def get_normalized_probs(self, net_output, log_probs, sample=None): - """Get normalized probabilities (or log probs) from a net's output.""" - encoder_out = net_output["encoder_out"] - if torch.is_tensor(encoder_out): - logits = encoder_out.float() - if log_probs: - return F.log_softmax(logits, dim=-1) - else: - return F.softmax(logits, dim=-1) - raise NotImplementedError - - def max_positions(self): - """Maximum length supported by the model.""" - return self.encoder.max_positions() diff --git a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/tests/test_iterators.py b/spaces/HarryLee/eCommerceImageCaptioning/fairseq/tests/test_iterators.py deleted file mode 100644 index 7b3dd4848553357e5e8326ed3a31cf5d68ceea94..0000000000000000000000000000000000000000 --- a/spaces/HarryLee/eCommerceImageCaptioning/fairseq/tests/test_iterators.py +++ /dev/null @@ -1,137 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import unittest - -from fairseq.data import iterators - - -class TestIterators(unittest.TestCase): - def test_counting_iterator_index(self, ref=None, itr=None): - # Test the indexing functionality of CountingIterator - if ref is None: - assert itr is None - ref = list(range(10)) - itr = iterators.CountingIterator(ref) - else: - assert len(ref) == 10 - assert itr is not None - - self.assertTrue(itr.has_next()) - self.assertEqual(itr.n, 0) - self.assertEqual(next(itr), ref[0]) - self.assertEqual(itr.n, 1) - self.assertEqual(next(itr), ref[1]) - self.assertEqual(itr.n, 2) - itr.skip(3) - self.assertEqual(itr.n, 5) - self.assertEqual(next(itr), ref[5]) - itr.skip(2) - self.assertEqual(itr.n, 8) - self.assertEqual(list(itr), [ref[8], ref[9]]) - self.assertFalse(itr.has_next()) - - def test_counting_iterator_length_mismatch(self): - ref = list(range(10)) - # When the underlying iterable is longer than the CountingIterator, - # the remaining items in the iterable should be ignored - itr = iterators.CountingIterator(ref, total=8) - self.assertEqual(list(itr), ref[:8]) - # When the underlying iterable is shorter than the CountingIterator, - # raise an IndexError when the underlying iterable is exhausted - itr = iterators.CountingIterator(ref, total=12) - self.assertRaises(IndexError, list, itr) - - def test_counting_iterator_take(self): - # Test the "take" method of CountingIterator - ref = list(range(10)) - itr = iterators.CountingIterator(ref) - itr.take(5) - self.assertEqual(len(itr), len(list(iter(itr)))) - self.assertEqual(len(itr), 5) - - itr = iterators.CountingIterator(ref) - itr.take(5) - self.assertEqual(next(itr), ref[0]) - self.assertEqual(next(itr), ref[1]) - itr.skip(2) - self.assertEqual(next(itr), ref[4]) - self.assertFalse(itr.has_next()) - - def test_grouped_iterator(self): - # test correctness - x = list(range(10)) - itr = iterators.GroupedIterator(x, 1) - self.assertEqual(list(itr), [[0], [1], [2], [3], [4], [5], [6], [7], [8], [9]]) - itr = iterators.GroupedIterator(x, 4) - self.assertEqual(list(itr), [[0, 1, 2, 3], [4, 5, 6, 7], [8, 9]]) - itr = iterators.GroupedIterator(x, 5) - self.assertEqual(list(itr), [[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]]) - - # test the GroupIterator also works correctly as a CountingIterator - x = list(range(30)) - ref = list(iterators.GroupedIterator(x, 3)) - itr = iterators.GroupedIterator(x, 3) - self.test_counting_iterator_index(ref, itr) - - def test_sharded_iterator(self): - # test correctness - x = list(range(10)) - itr = iterators.ShardedIterator(x, num_shards=1, shard_id=0) - self.assertEqual(list(itr), x) - itr = iterators.ShardedIterator(x, num_shards=2, shard_id=0) - self.assertEqual(list(itr), [0, 2, 4, 6, 8]) - itr = iterators.ShardedIterator(x, num_shards=2, shard_id=1) - self.assertEqual(list(itr), [1, 3, 5, 7, 9]) - itr = iterators.ShardedIterator(x, num_shards=3, shard_id=0) - self.assertEqual(list(itr), [0, 3, 6, 9]) - itr = iterators.ShardedIterator(x, num_shards=3, shard_id=1) - self.assertEqual(list(itr), [1, 4, 7, None]) - itr = iterators.ShardedIterator(x, num_shards=3, shard_id=2) - self.assertEqual(list(itr), [2, 5, 8, None]) - - # test CountingIterator functionality - x = list(range(30)) - ref = list(iterators.ShardedIterator(x, num_shards=3, shard_id=0)) - itr = iterators.ShardedIterator(x, num_shards=3, shard_id=0) - self.test_counting_iterator_index(ref, itr) - - def test_counting_iterator_buffered_iterator_take(self): - ref = list(range(10)) - buffered_itr = iterators.BufferedIterator(2, ref) - itr = iterators.CountingIterator(buffered_itr) - itr.take(5) - self.assertEqual(len(itr), len(list(iter(itr)))) - self.assertEqual(len(itr), 5) - - buffered_itr = iterators.BufferedIterator(2, ref) - itr = iterators.CountingIterator(buffered_itr) - itr.take(5) - self.assertEqual(len(buffered_itr), 5) - self.assertEqual(len(list(iter(buffered_itr))), 5) - - buffered_itr = iterators.BufferedIterator(2, ref) - itr = iterators.CountingIterator(buffered_itr) - itr.take(5) - self.assertEqual(next(itr), ref[0]) - self.assertEqual(next(itr), ref[1]) - itr.skip(2) - self.assertEqual(next(itr), ref[4]) - self.assertFalse(itr.has_next()) - self.assertRaises(StopIteration, next, buffered_itr) - - ref = list(range(4, 10)) - buffered_itr = iterators.BufferedIterator(2, ref) - itr = iterators.CountingIterator(buffered_itr, start=4) - itr.take(5) - self.assertEqual(len(itr), 5) - self.assertEqual(len(buffered_itr), 1) - self.assertEqual(next(itr), ref[0]) - self.assertFalse(itr.has_next()) - self.assertRaises(StopIteration, next, buffered_itr) - - -if __name__ == "__main__": - unittest.main() diff --git a/spaces/Hellisotherpeople/HF-SHAP/README.md b/spaces/Hellisotherpeople/HF-SHAP/README.md deleted file mode 100644 index 1b317c1af119e375364c39a70fa2a7bd3f30c07b..0000000000000000000000000000000000000000 --- a/spaces/Hellisotherpeople/HF-SHAP/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: HF SHAP -emoji: 🤷 ➡️ 🤗 -colorFrom: red -colorTo: gray -sdk: streamlit -app_file: app.py -pinned: false -license: mit ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference diff --git a/spaces/Hexamind/GDOC/src/tools/doc_tools.py b/spaces/Hexamind/GDOC/src/tools/doc_tools.py deleted file mode 100644 index ad8266e9de89f0d0a6893d11ca72cf6fb72d1104..0000000000000000000000000000000000000000 --- a/spaces/Hexamind/GDOC/src/tools/doc_tools.py +++ /dev/null @@ -1,42 +0,0 @@ -from PIL import Image -import os - -def get_positions(xml_file): - i = 0 - width = xml_file.split('cx="') - height = xml_file.split('cy="') - while(i < len(width)): - temp = width[i].split('"')[0] - if(temp.isnumeric()): - width = temp - break - else: - i+=1 - i = 0 - while(i < len(height)): - temp = height[i].split('"')[0] - if(temp.isnumeric()): - height = temp - break - else: - i+=1 - return width, height - -def convert_to_png(imageslist): - for image in imageslist: - if(image.endswith('.png')): - continue - im = Image.open(image) - im.save(image.split('.')[0]+'.png') - imageslist[imageslist.index(image)] = image.split('.')[0]+'.png' - os.remove(image) - return imageslist - - -def get_difference_with_template(styles_used_in_doc, template): - styles_used_in_template = template.styles.names - different_styles = [] - for style in styles_used_in_doc: - if style not in styles_used_in_template: - different_styles.append(style) - return different_styles \ No newline at end of file diff --git a/spaces/HuggingFaceM4/IDEFICS_Data_Measurement_Tool/cache_dir/HuggingFaceM4/OBELICS_opt_out_docs_removed_2023_07_12_train_general_metadata/text_duplicates/text_duplicates.html b/spaces/HuggingFaceM4/IDEFICS_Data_Measurement_Tool/cache_dir/HuggingFaceM4/OBELICS_opt_out_docs_removed_2023_07_12_train_general_metadata/text_duplicates/text_duplicates.html deleted file mode 100644 index 0829d026a4b7c4ceb3e5382c5f3f1bd3b5d8c4f0..0000000000000000000000000000000000000000 --- a/spaces/HuggingFaceM4/IDEFICS_Data_Measurement_Tool/cache_dir/HuggingFaceM4/OBELICS_opt_out_docs_removed_2023_07_12_train_general_metadata/text_duplicates/text_duplicates.html +++ /dev/null @@ -1 +0,0 @@ -
    duplicate_fraction0.0
    duplicates_dict
    \ No newline at end of file diff --git a/spaces/ICML2022/OFA/fairseq/examples/speech_recognition/criterions/ASG_loss.py b/spaces/ICML2022/OFA/fairseq/examples/speech_recognition/criterions/ASG_loss.py deleted file mode 100644 index 41f50bbd70388ce723f2d316d4e9776bcd6be3c9..0000000000000000000000000000000000000000 --- a/spaces/ICML2022/OFA/fairseq/examples/speech_recognition/criterions/ASG_loss.py +++ /dev/null @@ -1,170 +0,0 @@ -#!/usr/bin/env python3 - -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import torch -from examples.speech_recognition.data.replabels import pack_replabels -from fairseq import utils -from fairseq.criterions import FairseqCriterion, register_criterion - - -@register_criterion("asg_loss") -class ASGCriterion(FairseqCriterion): - @staticmethod - def add_args(parser): - group = parser.add_argument_group("ASG Loss") - group.add_argument( - "--asg-transitions-init", - help="initial diagonal value of transition matrix", - type=float, - default=0.0, - ) - group.add_argument( - "--max-replabel", help="maximum # of replabels", type=int, default=2 - ) - group.add_argument( - "--linseg-updates", - help="# of training updates to use LinSeg initialization", - type=int, - default=0, - ) - group.add_argument( - "--hide-linseg-messages", - help="hide messages about LinSeg initialization", - action="store_true", - ) - - def __init__( - self, - task, - silence_token, - asg_transitions_init, - max_replabel, - linseg_updates, - hide_linseg_messages, - ): - from flashlight.lib.sequence.criterion import ASGLoss, CriterionScaleMode - - super().__init__(task) - self.tgt_dict = task.target_dictionary - self.eos = self.tgt_dict.eos() - self.silence = ( - self.tgt_dict.index(silence_token) - if silence_token in self.tgt_dict - else None - ) - self.max_replabel = max_replabel - - num_labels = len(self.tgt_dict) - self.asg = ASGLoss(num_labels, scale_mode=CriterionScaleMode.TARGET_SZ_SQRT) - self.asg.trans = torch.nn.Parameter( - asg_transitions_init * torch.eye(num_labels), requires_grad=True - ) - - self.linseg_progress = torch.nn.Parameter( - torch.tensor([0], dtype=torch.int), requires_grad=False - ) - self.linseg_maximum = linseg_updates - self.linseg_message_state = "none" if hide_linseg_messages else "start" - - @classmethod - def build_criterion(cls, args, task): - return cls( - task, - args.silence_token, - args.asg_transitions_init, - args.max_replabel, - args.linseg_updates, - args.hide_linseg_messages, - ) - - def linseg_step(self): - if not self.training: - return False - if self.linseg_progress.item() < self.linseg_maximum: - if self.linseg_message_state == "start": - print("| using LinSeg to initialize ASG") - self.linseg_message_state = "finish" - self.linseg_progress.add_(1) - return True - elif self.linseg_message_state == "finish": - print("| finished LinSeg initialization") - self.linseg_message_state = "none" - return False - - def replace_eos_with_silence(self, tgt): - if tgt[-1] != self.eos: - return tgt - elif self.silence is None or (len(tgt) > 1 and tgt[-2] == self.silence): - return tgt[:-1] - else: - return tgt[:-1] + [self.silence] - - def forward(self, model, sample, reduce=True): - """Compute the loss for the given sample. - - Returns a tuple with three elements: - 1) the loss - 2) the sample size, which is used as the denominator for the gradient - 3) logging outputs to display while training - """ - - net_output = model(**sample["net_input"]) - emissions = net_output["encoder_out"].transpose(0, 1).contiguous() - B = emissions.size(0) - T = emissions.size(1) - device = emissions.device - - target = torch.IntTensor(B, T) - target_size = torch.IntTensor(B) - using_linseg = self.linseg_step() - - for b in range(B): - initial_target_size = sample["target_lengths"][b].item() - if initial_target_size == 0: - raise ValueError("target size cannot be zero") - - tgt = sample["target"][b, :initial_target_size].tolist() - tgt = self.replace_eos_with_silence(tgt) - tgt = pack_replabels(tgt, self.tgt_dict, self.max_replabel) - tgt = tgt[:T] - - if using_linseg: - tgt = [tgt[t * len(tgt) // T] for t in range(T)] - - target[b][: len(tgt)] = torch.IntTensor(tgt) - target_size[b] = len(tgt) - - loss = self.asg.forward(emissions, target.to(device), target_size.to(device)) - - if reduce: - loss = torch.sum(loss) - - sample_size = ( - sample["target"].size(0) if self.args.sentence_avg else sample["ntokens"] - ) - logging_output = { - "loss": utils.item(loss.data) if reduce else loss.data, - "ntokens": sample["ntokens"], - "nsentences": sample["target"].size(0), - "sample_size": sample_size, - } - return loss, sample_size, logging_output - - @staticmethod - def aggregate_logging_outputs(logging_outputs): - """Aggregate logging outputs from data parallel training.""" - loss_sum = sum(log.get("loss", 0) for log in logging_outputs) - ntokens = sum(log.get("ntokens", 0) for log in logging_outputs) - nsentences = sum(log.get("nsentences", 0) for log in logging_outputs) - sample_size = sum(log.get("sample_size", 0) for log in logging_outputs) - agg_output = { - "loss": loss_sum / nsentences, - "ntokens": ntokens, - "nsentences": nsentences, - "sample_size": sample_size, - } - return agg_output diff --git a/spaces/ICML2022/OFA/fairseq/fairseq/criterions/ctc.py b/spaces/ICML2022/OFA/fairseq/fairseq/criterions/ctc.py deleted file mode 100644 index 10e3618382c86a84466cb4264d62f31537980251..0000000000000000000000000000000000000000 --- a/spaces/ICML2022/OFA/fairseq/fairseq/criterions/ctc.py +++ /dev/null @@ -1,295 +0,0 @@ -# All rights reserved. -# -# This source code is licensed under the license found in the LICENSE file in -# the root directory of this source tree. An additional grant of patent rights -# can be found in the PATENTS file in the same directory. - -import math -from argparse import Namespace -from dataclasses import dataclass, field -from omegaconf import II -from typing import Optional - -import torch -import torch.nn.functional as F -from fairseq import metrics, utils -from fairseq.criterions import FairseqCriterion, register_criterion -from fairseq.dataclass import FairseqDataclass -from fairseq.data.data_utils import post_process -from fairseq.tasks import FairseqTask -from fairseq.logging.meters import safe_round - - -@dataclass -class CtcCriterionConfig(FairseqDataclass): - zero_infinity: bool = field( - default=False, - metadata={"help": "zero inf loss when source length <= target length"}, - ) - sentence_avg: bool = II("optimization.sentence_avg") - post_process: str = field( - default="letter", - metadata={ - "help": "how to post process predictions into words. can be letter, " - "wordpiece, BPE symbols, etc. " - "See fairseq.data.data_utils.post_process() for full list of options" - }, - ) - wer_kenlm_model: Optional[str] = field( - default=None, - metadata={ - "help": "if this is provided, use kenlm to compute wer (along with other wer_* args)" - }, - ) - wer_lexicon: Optional[str] = field( - default=None, - metadata={"help": "lexicon to use with wer_kenlm_model"}, - ) - wer_lm_weight: float = field( - default=2.0, - metadata={"help": "lm weight to use with wer_kenlm_model"}, - ) - wer_word_score: float = field( - default=-1.0, - metadata={"help": "lm word score to use with wer_kenlm_model"}, - ) - - wer_args: Optional[str] = field( - default=None, - metadata={ - "help": "DEPRECATED: tuple of (wer_kenlm_model, wer_lexicon, wer_lm_weight, wer_word_score)" - }, - ) - - -@register_criterion("ctc", dataclass=CtcCriterionConfig) -class CtcCriterion(FairseqCriterion): - def __init__(self, cfg: CtcCriterionConfig, task: FairseqTask): - super().__init__(task) - self.blank_idx = ( - task.target_dictionary.index(task.blank_symbol) - if hasattr(task, "blank_symbol") - else 0 - ) - self.pad_idx = task.target_dictionary.pad() - self.eos_idx = task.target_dictionary.eos() - self.post_process = cfg.post_process - - if cfg.wer_args is not None: - ( - cfg.wer_kenlm_model, - cfg.wer_lexicon, - cfg.wer_lm_weight, - cfg.wer_word_score, - ) = eval(cfg.wer_args) - - if cfg.wer_kenlm_model is not None: - from examples.speech_recognition.w2l_decoder import W2lKenLMDecoder - - dec_args = Namespace() - dec_args.nbest = 1 - dec_args.criterion = "ctc" - dec_args.kenlm_model = cfg.wer_kenlm_model - dec_args.lexicon = cfg.wer_lexicon - dec_args.beam = 50 - dec_args.beam_size_token = min(50, len(task.target_dictionary)) - dec_args.beam_threshold = min(50, len(task.target_dictionary)) - dec_args.lm_weight = cfg.wer_lm_weight - dec_args.word_score = cfg.wer_word_score - dec_args.unk_weight = -math.inf - dec_args.sil_weight = 0 - - self.w2l_decoder = W2lKenLMDecoder(dec_args, task.target_dictionary) - else: - self.w2l_decoder = None - - self.zero_infinity = cfg.zero_infinity - self.sentence_avg = cfg.sentence_avg - - def forward(self, model, sample, reduce=True): - net_output = model(**sample["net_input"]) - lprobs = model.get_normalized_probs( - net_output, log_probs=True - ).contiguous() # (T, B, C) from the encoder - - if "src_lengths" in sample["net_input"]: - input_lengths = sample["net_input"]["src_lengths"] - else: - if net_output["padding_mask"] is not None: - non_padding_mask = ~net_output["padding_mask"] - input_lengths = non_padding_mask.long().sum(-1) - else: - input_lengths = lprobs.new_full( - (lprobs.size(1),), lprobs.size(0), dtype=torch.long - ) - - pad_mask = (sample["target"] != self.pad_idx) & ( - sample["target"] != self.eos_idx - ) - targets_flat = sample["target"].masked_select(pad_mask) - if "target_lengths" in sample: - target_lengths = sample["target_lengths"] - else: - target_lengths = pad_mask.sum(-1) - - with torch.backends.cudnn.flags(enabled=False): - loss = F.ctc_loss( - lprobs, - targets_flat, - input_lengths, - target_lengths, - blank=self.blank_idx, - reduction="sum", - zero_infinity=self.zero_infinity, - ) - - ntokens = ( - sample["ntokens"] if "ntokens" in sample else target_lengths.sum().item() - ) - - sample_size = sample["target"].size(0) if self.sentence_avg else ntokens - logging_output = { - "loss": utils.item(loss.data), # * sample['ntokens'], - "ntokens": ntokens, - "nsentences": sample["id"].numel(), - "sample_size": sample_size, - } - - if not model.training: - import editdistance - - with torch.no_grad(): - lprobs_t = lprobs.transpose(0, 1).float().contiguous().cpu() - - c_err = 0 - c_len = 0 - w_errs = 0 - w_len = 0 - wv_errs = 0 - for lp, t, inp_l in zip( - lprobs_t, - sample["target_label"] - if "target_label" in sample - else sample["target"], - input_lengths, - ): - lp = lp[:inp_l].unsqueeze(0) - - decoded = None - if self.w2l_decoder is not None: - decoded = self.w2l_decoder.decode(lp) - if len(decoded) < 1: - decoded = None - else: - decoded = decoded[0] - if len(decoded) < 1: - decoded = None - else: - decoded = decoded[0] - - p = (t != self.task.target_dictionary.pad()) & ( - t != self.task.target_dictionary.eos() - ) - targ = t[p] - targ_units = self.task.target_dictionary.string(targ) - targ_units_arr = targ.tolist() - - toks = lp.argmax(dim=-1).unique_consecutive() - pred_units_arr = toks[toks != self.blank_idx].tolist() - - c_err += editdistance.eval(pred_units_arr, targ_units_arr) - c_len += len(targ_units_arr) - - targ_words = post_process(targ_units, self.post_process).split() - - pred_units = self.task.target_dictionary.string(pred_units_arr) - pred_words_raw = post_process(pred_units, self.post_process).split() - - if decoded is not None and "words" in decoded: - pred_words = decoded["words"] - w_errs += editdistance.eval(pred_words, targ_words) - wv_errs += editdistance.eval(pred_words_raw, targ_words) - else: - dist = editdistance.eval(pred_words_raw, targ_words) - w_errs += dist - wv_errs += dist - - w_len += len(targ_words) - - logging_output["wv_errors"] = wv_errs - logging_output["w_errors"] = w_errs - logging_output["w_total"] = w_len - logging_output["c_errors"] = c_err - logging_output["c_total"] = c_len - - return loss, sample_size, logging_output - - @staticmethod - def reduce_metrics(logging_outputs) -> None: - """Aggregate logging outputs from data parallel training.""" - - loss_sum = utils.item(sum(log.get("loss", 0) for log in logging_outputs)) - ntokens = utils.item(sum(log.get("ntokens", 0) for log in logging_outputs)) - nsentences = utils.item( - sum(log.get("nsentences", 0) for log in logging_outputs) - ) - sample_size = utils.item( - sum(log.get("sample_size", 0) for log in logging_outputs) - ) - - metrics.log_scalar( - "loss", loss_sum / sample_size / math.log(2), sample_size, round=3 - ) - metrics.log_scalar("ntokens", ntokens) - metrics.log_scalar("nsentences", nsentences) - if sample_size != ntokens: - metrics.log_scalar( - "nll_loss", loss_sum / ntokens / math.log(2), ntokens, round=3 - ) - - c_errors = sum(log.get("c_errors", 0) for log in logging_outputs) - metrics.log_scalar("_c_errors", c_errors) - c_total = sum(log.get("c_total", 0) for log in logging_outputs) - metrics.log_scalar("_c_total", c_total) - w_errors = sum(log.get("w_errors", 0) for log in logging_outputs) - metrics.log_scalar("_w_errors", w_errors) - wv_errors = sum(log.get("wv_errors", 0) for log in logging_outputs) - metrics.log_scalar("_wv_errors", wv_errors) - w_total = sum(log.get("w_total", 0) for log in logging_outputs) - metrics.log_scalar("_w_total", w_total) - - if c_total > 0: - metrics.log_derived( - "uer", - lambda meters: safe_round( - meters["_c_errors"].sum * 100.0 / meters["_c_total"].sum, 3 - ) - if meters["_c_total"].sum > 0 - else float("nan"), - ) - if w_total > 0: - metrics.log_derived( - "wer", - lambda meters: safe_round( - meters["_w_errors"].sum * 100.0 / meters["_w_total"].sum, 3 - ) - if meters["_w_total"].sum > 0 - else float("nan"), - ) - metrics.log_derived( - "raw_wer", - lambda meters: safe_round( - meters["_wv_errors"].sum * 100.0 / meters["_w_total"].sum, 3 - ) - if meters["_w_total"].sum > 0 - else float("nan"), - ) - - @staticmethod - def logging_outputs_can_be_summed() -> bool: - """ - Whether the logging outputs returned by `forward` can be summed - across workers prior to calling `reduce_metrics`. Setting this - to True will improves distributed training speed. - """ - return True diff --git a/spaces/ICML2022/OFA/fairseq/fairseq/modules/quantization/scalar/modules/qconv.py b/spaces/ICML2022/OFA/fairseq/fairseq/modules/quantization/scalar/modules/qconv.py deleted file mode 100644 index 83788c6f71fd41e61fd115681a22d53ce8b8362c..0000000000000000000000000000000000000000 --- a/spaces/ICML2022/OFA/fairseq/fairseq/modules/quantization/scalar/modules/qconv.py +++ /dev/null @@ -1,149 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import torch -import torch.nn.functional as F -from torch.nn.modules.conv import _ConvNd -from torch.nn.modules.utils import _pair - -from ..ops import emulate_int - - -class IntConv2d(_ConvNd): - """ - Quantized counterpart of the nn.Conv2d module that applies QuantNoise during training. - - Args: - - standard nn.Conv2d parameters - - p: amount of noise to inject (0 = no quantization, 1 = quantize all the weights) - - bits: number of bits - - method: choose among {"tensor", "histogram", "channel"} - - update_step: recompute scale and zero_point every update_steps iterations - - Remarks: - - We use the straight-thgourh estimator so that the gradients - back-propagate nicely in the network, this is implemented with - the detach() trick - - Parameters scale and zero_point are recomputed every update_step - forward pass to reduce the overhead - - At test time, the weights are fully quantized - """ - - def __init__( - self, - in_channels, - out_channels, - kernel_size, - stride=1, - padding=0, - dilation=1, - groups=1, - bias=True, - padding_mode="zeros", - p=0, - bits=8, - method="histogram", - update_step=1000, - ): - kernel_size = _pair(kernel_size) - stride = _pair(stride) - padding = _pair(padding) - dilation = _pair(dilation) - super(IntConv2d, self).__init__( - in_channels, - out_channels, - kernel_size, - stride, - padding, - dilation, - False, - _pair(0), - groups, - bias, - padding_mode, - ) - - # quantization parameters - self.p = p - self.bits = bits - self.method = method - self.update_step = update_step - self.counter = 0 - - def _conv_forward(self, input, weight): - if self.padding_mode != "zeros": - return F.conv2d( - F.pad(input, self._padding_repeated_twice, mode=self.padding_mode), - weight, - self.bias, - self.stride, - _pair(0), - self.dilation, - self.groups, - ) - return F.conv2d( - input, - weight, - self.bias, - self.stride, - self.padding, - self.dilation, - self.groups, - ) - - def forward(self, input): - # train with QuantNoise and evaluate the fully quantized network - p = self.p if self.training else 1 - - # update parameters every 100 iterations - if self.counter % self.update_step == 0: - self.scale = None - self.zero_point = None - self.counter += 1 - - # quantize weight - weight_quantized, self.scale, self.zero_point = emulate_int( - self.weight.detach(), - bits=self.bits, - method=self.method, - scale=self.scale, - zero_point=self.zero_point, - ) - - # mask to apply noise - mask = torch.zeros_like(self.weight) - mask.bernoulli_(1 - p) - noise = (weight_quantized - self.weight).masked_fill(mask.bool(), 0) - - # using straight-through estimator (STE) - clamp_low = -self.scale * self.zero_point - clamp_high = self.scale * (2 ** self.bits - 1 - self.zero_point) - weight = ( - torch.clamp(self.weight, clamp_low.item(), clamp_high.item()) - + noise.detach() - ) - - # return output - output = self._conv_forward(input, weight) - return output - - def extra_repr(self): - return ( - "in_channels={}, out_channels={}, kernel_size={}, stride={}, " - "padding={}, dilation={}, groups={}, bias={}, quant_noise={}, " - "bits={}, method={}".format( - self.in_channels, - self.out_channels, - self.kernel_size, - self.stride, - self.padding, - self.dilation, - self.groups, - self.bias is not None, - self.p, - self.bits, - self.method, - ) - ) diff --git a/spaces/ICML2022/resefa/utils/image_utils.py b/spaces/ICML2022/resefa/utils/image_utils.py deleted file mode 100644 index c640ac5ef977e3a7824dabbc43e5d56e733d0d76..0000000000000000000000000000000000000000 --- a/spaces/ICML2022/resefa/utils/image_utils.py +++ /dev/null @@ -1,332 +0,0 @@ -# python3.7 -"""Contains utility functions for image processing. - -The module is primarily built on `cv2`. But, differently, we assume all colorful -images are with `RGB` channel order by default. Also, we assume all gray-scale -images to be with shape [height, width, 1]. -""" - -import os -import cv2 -import numpy as np - -from .misc import IMAGE_EXTENSIONS -from .misc import check_file_ext - -__all__ = [ - 'get_blank_image', 'load_image', 'save_image', 'resize_image', - 'add_text_to_image', 'preprocess_image', 'postprocess_image', - 'parse_image_size', 'get_grid_shape', 'list_images_from_dir' -] - - -def _check_2d_image(image): - """Checks whether a given image is valid. - - A valid image is expected to be with dtype `uint8`. Also, it should have - shape like: - - (1) (height, width, 1) # gray-scale image. - (2) (height, width, 3) # colorful image. - (3) (height, width, 4) # colorful image with transparency (RGBA) - """ - assert isinstance(image, np.ndarray) - assert image.dtype == np.uint8 - assert image.ndim == 3 and image.shape[2] in [1, 3, 4] - - -def get_blank_image(height, width, channels=3, use_black=True): - """Gets a blank image, either white of black. - - NOTE: This function will always return an image with `RGB` channel order for - color image and pixel range [0, 255]. - - Args: - height: Height of the returned image. - width: Width of the returned image. - channels: Number of channels. (default: 3) - use_black: Whether to return a black image. (default: True) - """ - shape = (height, width, channels) - if use_black: - return np.zeros(shape, dtype=np.uint8) - return np.ones(shape, dtype=np.uint8) * 255 - - -def load_image(path): - """Loads an image from disk. - - NOTE: This function will always return an image with `RGB` channel order for - color image and pixel range [0, 255]. - - Args: - path: Path to load the image from. - - Returns: - An image with dtype `np.ndarray`, or `None` if `path` does not exist. - """ - image = cv2.imread(path, cv2.IMREAD_UNCHANGED) - if image is None: - return None - - if image.ndim == 2: - image = image[:, :, np.newaxis] - _check_2d_image(image) - if image.shape[2] == 3: - return cv2.cvtColor(image, cv2.COLOR_BGR2RGB) - if image.shape[2] == 4: - return cv2.cvtColor(image, cv2.COLOR_BGRA2RGBA) - return image - - -def save_image(path, image): - """Saves an image to disk. - - NOTE: The input image (if colorful) is assumed to be with `RGB` channel - order and pixel range [0, 255]. - - Args: - path: Path to save the image to. - image: Image to save. - """ - if image is None: - return - - _check_2d_image(image) - if image.shape[2] == 1: - cv2.imwrite(path, image) - elif image.shape[2] == 3: - cv2.imwrite(path, cv2.cvtColor(image, cv2.COLOR_RGB2BGR)) - elif image.shape[2] == 4: - cv2.imwrite(path, cv2.cvtColor(image, cv2.COLOR_RGBA2BGRA)) - - -def resize_image(image, *args, **kwargs): - """Resizes image. - - This is a wrap of `cv2.resize()`. - - NOTE: The channel order of the input image will not be changed. - - Args: - image: Image to resize. - *args: Additional positional arguments. - **kwargs: Additional keyword arguments. - - Returns: - An image with dtype `np.ndarray`, or `None` if `image` is empty. - """ - if image is None: - return None - - _check_2d_image(image) - if image.shape[2] == 1: # Re-expand the squeezed dim of gray-scale image. - return cv2.resize(image, *args, **kwargs)[:, :, np.newaxis] - return cv2.resize(image, *args, **kwargs) - - -def add_text_to_image(image, - text='', - position=None, - font=cv2.FONT_HERSHEY_TRIPLEX, - font_size=1.0, - line_type=cv2.LINE_8, - line_width=1, - color=(255, 255, 255)): - """Overlays text on given image. - - NOTE: The input image is assumed to be with `RGB` channel order. - - Args: - image: The image to overlay text on. - text: Text content to overlay on the image. (default: empty) - position: Target position (bottom-left corner) to add text. If not set, - center of the image will be used by default. (default: None) - font: Font of the text added. (default: cv2.FONT_HERSHEY_TRIPLEX) - font_size: Font size of the text added. (default: 1.0) - line_type: Line type used to depict the text. (default: cv2.LINE_8) - line_width: Line width used to depict the text. (default: 1) - color: Color of the text added in `RGB` channel order. (default: - (255, 255, 255)) - - Returns: - An image with target text overlaid on. - """ - if image is None or not text: - return image - - _check_2d_image(image) - cv2.putText(img=image, - text=text, - org=position, - fontFace=font, - fontScale=font_size, - color=color, - thickness=line_width, - lineType=line_type, - bottomLeftOrigin=False) - return image - - -def preprocess_image(image, min_val=-1.0, max_val=1.0): - """Pre-processes image by adjusting the pixel range and to dtype `float32`. - - This function is particularly used to convert an image or a batch of images - to `NCHW` format, which matches the data type commonly used in deep models. - - NOTE: The input image is assumed to be with pixel range [0, 255] and with - format `HWC` or `NHWC`. The returned image will be always be with format - `NCHW`. - - Args: - image: The input image for pre-processing. - min_val: Minimum value of the output image. - max_val: Maximum value of the output image. - - Returns: - The pre-processed image. - """ - assert isinstance(image, np.ndarray) - - image = image.astype(np.float64) - image = image / 255.0 * (max_val - min_val) + min_val - - if image.ndim == 3: - image = image[np.newaxis] - assert image.ndim == 4 and image.shape[3] in [1, 3, 4] - return image.transpose(0, 3, 1, 2) - - -def postprocess_image(image, min_val=-1.0, max_val=1.0): - """Post-processes image to pixel range [0, 255] with dtype `uint8`. - - This function is particularly used to handle the results produced by deep - models. - - NOTE: The input image is assumed to be with format `NCHW`, and the returned - image will always be with format `NHWC`. - - Args: - image: The input image for post-processing. - min_val: Expected minimum value of the input image. - max_val: Expected maximum value of the input image. - - Returns: - The post-processed image. - """ - assert isinstance(image, np.ndarray) - - image = image.astype(np.float64) - image = (image - min_val) / (max_val - min_val) * 255 - image = np.clip(image + 0.5, 0, 255).astype(np.uint8) - - assert image.ndim == 4 and image.shape[1] in [1, 3, 4] - return image.transpose(0, 2, 3, 1) - - -def parse_image_size(obj): - """Parses an object to a pair of image size, i.e., (height, width). - - Args: - obj: The input object to parse image size from. - - Returns: - A two-element tuple, indicating image height and width respectively. - - Raises: - If the input is invalid, i.e., neither a list or tuple, nor a string. - """ - if obj is None or obj == '': - height = 0 - width = 0 - elif isinstance(obj, int): - height = obj - width = obj - elif isinstance(obj, (list, tuple, str, np.ndarray)): - if isinstance(obj, str): - splits = obj.replace(' ', '').split(',') - numbers = tuple(map(int, splits)) - else: - numbers = tuple(obj) - if len(numbers) == 0: - height = 0 - width = 0 - elif len(numbers) == 1: - height = int(numbers[0]) - width = int(numbers[0]) - elif len(numbers) == 2: - height = int(numbers[0]) - width = int(numbers[1]) - else: - raise ValueError('At most two elements for image size.') - else: - raise ValueError(f'Invalid type of input: `{type(obj)}`!') - - return (max(0, height), max(0, width)) - - -def get_grid_shape(size, height=0, width=0, is_portrait=False): - """Gets the shape of a grid based on the size. - - This function makes greatest effort on making the output grid square if - neither `height` nor `width` is set. If `is_portrait` is set as `False`, the - height will always be equal to or smaller than the width. For example, if - input `size = 16`, output shape will be `(4, 4)`; if input `size = 15`, - output shape will be (3, 5). Otherwise, the height will always be equal to - or larger than the width. - - Args: - size: Size (height * width) of the target grid. - height: Expected height. If `size % height != 0`, this field will be - ignored. (default: 0) - width: Expected width. If `size % width != 0`, this field will be - ignored. (default: 0) - is_portrait: Whether to return a portrait size of a landscape size. - (default: False) - - Returns: - A two-element tuple, representing height and width respectively. - """ - assert isinstance(size, int) - assert isinstance(height, int) - assert isinstance(width, int) - if size <= 0: - return (0, 0) - - if height > 0 and width > 0 and height * width != size: - height = 0 - width = 0 - - if height > 0 and width > 0 and height * width == size: - return (height, width) - if height > 0 and size % height == 0: - return (height, size // height) - if width > 0 and size % width == 0: - return (size // width, width) - - height = int(np.sqrt(size)) - while height > 0: - if size % height == 0: - width = size // height - break - height = height - 1 - - return (width, height) if is_portrait else (height, width) - - -def list_images_from_dir(directory): - """Lists all images from the given directory. - - NOTE: Do NOT support finding images recursively. - - Args: - directory: The directory to find images from. - - Returns: - A list of sorted filenames, with the directory as prefix. - """ - image_list = [] - for filename in os.listdir(directory): - if check_file_ext(filename, *IMAGE_EXTENSIONS): - image_list.append(os.path.join(directory, filename)) - return sorted(image_list) diff --git a/spaces/Iceclear/StableSR/StableSR/basicsr/models/swinir_model.py b/spaces/Iceclear/StableSR/StableSR/basicsr/models/swinir_model.py deleted file mode 100644 index 5ac182f23b4a300aff14b2b45fcdca8c00da90c1..0000000000000000000000000000000000000000 --- a/spaces/Iceclear/StableSR/StableSR/basicsr/models/swinir_model.py +++ /dev/null @@ -1,33 +0,0 @@ -import torch -from torch.nn import functional as F - -from basicsr.utils.registry import MODEL_REGISTRY -from .sr_model import SRModel - - -@MODEL_REGISTRY.register() -class SwinIRModel(SRModel): - - def test(self): - # pad to multiplication of window_size - window_size = self.opt['network_g']['window_size'] - scale = self.opt.get('scale', 1) - mod_pad_h, mod_pad_w = 0, 0 - _, _, h, w = self.lq.size() - if h % window_size != 0: - mod_pad_h = window_size - h % window_size - if w % window_size != 0: - mod_pad_w = window_size - w % window_size - img = F.pad(self.lq, (0, mod_pad_w, 0, mod_pad_h), 'reflect') - if hasattr(self, 'net_g_ema'): - self.net_g_ema.eval() - with torch.no_grad(): - self.output = self.net_g_ema(img) - else: - self.net_g.eval() - with torch.no_grad(): - self.output = self.net_g(img) - self.net_g.train() - - _, _, h, w = self.output.size() - self.output = self.output[:, :, 0:h - mod_pad_h * scale, 0:w - mod_pad_w * scale] diff --git a/spaces/IkechukwuAbuah/PDF_GPT/README.md b/spaces/IkechukwuAbuah/PDF_GPT/README.md deleted file mode 100644 index 81ec81bb298e581f2ecd3bd3eff0e31a38e5f21f..0000000000000000000000000000000000000000 --- a/spaces/IkechukwuAbuah/PDF_GPT/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: PDF_GPT -emoji: 📚 -colorFrom: blue -colorTo: red -sdk: gradio -sdk_version: 3.28.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference \ No newline at end of file diff --git a/spaces/InpaintAI/Inpaint-Anything/third_party/lama/bin/filter_sharded_dataset.py b/spaces/InpaintAI/Inpaint-Anything/third_party/lama/bin/filter_sharded_dataset.py deleted file mode 100644 index b3c2b490e88bb3b55c6bb717e08f97f7a396d5fa..0000000000000000000000000000000000000000 --- a/spaces/InpaintAI/Inpaint-Anything/third_party/lama/bin/filter_sharded_dataset.py +++ /dev/null @@ -1,69 +0,0 @@ -#!/usr/bin/env python3 - - -import math -import os -import random - -import braceexpand -import webdataset as wds - -DEFAULT_CATS_FILE = os.path.join(os.path.dirname(__file__), '..', 'configs', 'places2-categories_157.txt') - -def is_good_key(key, cats): - return any(c in key for c in cats) - - -def main(args): - if args.categories == 'nofilter': - good_categories = None - else: - with open(args.categories, 'r') as f: - good_categories = set(line.strip().split(' ')[0] for line in f if line.strip()) - - all_input_files = list(braceexpand.braceexpand(args.infile)) - chunk_size = int(math.ceil(len(all_input_files) / args.n_read_streams)) - - input_iterators = [iter(wds.Dataset(all_input_files[start : start + chunk_size]).shuffle(args.shuffle_buffer)) - for start in range(0, len(all_input_files), chunk_size)] - output_datasets = [wds.ShardWriter(args.outpattern.format(i)) for i in range(args.n_write_streams)] - - good_readers = list(range(len(input_iterators))) - step_i = 0 - good_samples = 0 - bad_samples = 0 - while len(good_readers) > 0: - if step_i % args.print_freq == 0: - print(f'Iterations done {step_i}; readers alive {good_readers}; good samples {good_samples}; bad samples {bad_samples}') - - step_i += 1 - - ri = random.choice(good_readers) - try: - sample = next(input_iterators[ri]) - except StopIteration: - good_readers = list(set(good_readers) - {ri}) - continue - - if good_categories is not None and not is_good_key(sample['__key__'], good_categories): - bad_samples += 1 - continue - - wi = random.randint(0, args.n_write_streams - 1) - output_datasets[wi].write(sample) - good_samples += 1 - - -if __name__ == '__main__': - import argparse - - aparser = argparse.ArgumentParser() - aparser.add_argument('--categories', type=str, default=DEFAULT_CATS_FILE) - aparser.add_argument('--shuffle-buffer', type=int, default=10000) - aparser.add_argument('--n-read-streams', type=int, default=10) - aparser.add_argument('--n-write-streams', type=int, default=10) - aparser.add_argument('--print-freq', type=int, default=1000) - aparser.add_argument('infile', type=str) - aparser.add_argument('outpattern', type=str) - - main(aparser.parse_args()) diff --git a/spaces/Intel/NeuralChat-ICX-INT4/fastchat/serve/gradio_css.py b/spaces/Intel/NeuralChat-ICX-INT4/fastchat/serve/gradio_css.py deleted file mode 100644 index 71d79b4a4b5a7ad84b8822d99e1740e77bc1f7a8..0000000000000000000000000000000000000000 --- a/spaces/Intel/NeuralChat-ICX-INT4/fastchat/serve/gradio_css.py +++ /dev/null @@ -1,71 +0,0 @@ -code_highlight_css = """ -#chatbot .hll { background-color: #ffffcc } -#chatbot .c { color: #408080; font-style: italic } -#chatbot .err { border: 1px solid #FF0000 } -#chatbot .k { color: #008000; font-weight: bold } -#chatbot .o { color: #666666 } -#chatbot .ch { color: #408080; font-style: italic } -#chatbot .cm { color: #408080; font-style: italic } -#chatbot .cp { color: #BC7A00 } -#chatbot .cpf { color: #408080; font-style: italic } -#chatbot .c1 { color: #408080; font-style: italic } -#chatbot .cs { color: #408080; font-style: italic } -#chatbot .gd { color: #A00000 } -#chatbot .ge { font-style: italic } -#chatbot .gr { color: #FF0000 } -#chatbot .gh { color: #000080; font-weight: bold } -#chatbot .gi { color: #00A000 } -#chatbot .go { color: #888888 } -#chatbot .gp { color: #000080; font-weight: bold } -#chatbot .gs { font-weight: bold } -#chatbot .gu { color: #800080; font-weight: bold } -#chatbot .gt { color: #0044DD } -#chatbot .kc { color: #008000; font-weight: bold } -#chatbot .kd { color: #008000; font-weight: bold } -#chatbot .kn { color: #008000; font-weight: bold } -#chatbot .kp { color: #008000 } -#chatbot .kr { color: #008000; font-weight: bold } -#chatbot .kt { color: #B00040 } -#chatbot .m { color: #666666 } -#chatbot .s { color: #BA2121 } -#chatbot .na { color: #7D9029 } -#chatbot .nb { color: #008000 } -#chatbot .nc { color: #0000FF; font-weight: bold } -#chatbot .no { color: #880000 } -#chatbot .nd { color: #AA22FF } -#chatbot .ni { color: #999999; font-weight: bold } -#chatbot .ne { color: #D2413A; font-weight: bold } -#chatbot .nf { color: #0000FF } -#chatbot .nl { color: #A0A000 } -#chatbot .nn { color: #0000FF; font-weight: bold } -#chatbot .nt { color: #008000; font-weight: bold } -#chatbot .nv { color: #19177C } -#chatbot .ow { color: #AA22FF; font-weight: bold } -#chatbot .w { color: #bbbbbb } -#chatbot .mb { color: #666666 } -#chatbot .mf { color: #666666 } -#chatbot .mh { color: #666666 } -#chatbot .mi { color: #666666 } -#chatbot .mo { color: #666666 } -#chatbot .sa { color: #BA2121 } -#chatbot .sb { color: #BA2121 } -#chatbot .sc { color: #BA2121 } -#chatbot .dl { color: #BA2121 } -#chatbot .sd { color: #BA2121; font-style: italic } -#chatbot .s2 { color: #BA2121 } -#chatbot .se { color: #BB6622; font-weight: bold } -#chatbot .sh { color: #BA2121 } -#chatbot .si { color: #BB6688; font-weight: bold } -#chatbot .sx { color: #008000 } -#chatbot .sr { color: #BB6688 } -#chatbot .s1 { color: #BA2121 } -#chatbot .ss { color: #19177C } -#chatbot .bp { color: #008000 } -#chatbot .fm { color: #0000FF } -#chatbot .vc { color: #19177C } -#chatbot .vg { color: #19177C } -#chatbot .vi { color: #19177C } -#chatbot .vm { color: #19177C } -#chatbot .il { color: #666666 } -""" -# .highlight { background: #f8f8f8; } diff --git a/spaces/JackBAI/master_wlb_index/README.md b/spaces/JackBAI/master_wlb_index/README.md deleted file mode 100644 index f6cbb7c372ce5573bc77df10165945ecc922c9cb..0000000000000000000000000000000000000000 --- a/spaces/JackBAI/master_wlb_index/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Master Wlb Index -emoji: 🏆 -colorFrom: indigo -colorTo: indigo -sdk: gradio -sdk_version: 3.44.3 -app_file: app.py -pinned: false -license: mit ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/Jackflack09/diffuse-custom/diffusers/schedulers/scheduling_karras_ve.py b/spaces/Jackflack09/diffuse-custom/diffusers/schedulers/scheduling_karras_ve.py deleted file mode 100644 index 41a73b3ac36e8985a3e1cf781afc06b0e6f6ed48..0000000000000000000000000000000000000000 --- a/spaces/Jackflack09/diffuse-custom/diffusers/schedulers/scheduling_karras_ve.py +++ /dev/null @@ -1,232 +0,0 @@ -# Copyright 2022 NVIDIA and The HuggingFace Team. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - - -from dataclasses import dataclass -from typing import Optional, Tuple, Union - -import numpy as np -import torch - -from ..configuration_utils import ConfigMixin, register_to_config -from ..utils import BaseOutput -from .scheduling_utils import SchedulerMixin - - -@dataclass -class KarrasVeOutput(BaseOutput): - """ - Output class for the scheduler's step function output. - - Args: - prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images): - Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the - denoising loop. - derivative (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images): - Derivative of predicted original image sample (x_0). - pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images): - The predicted denoised sample (x_{0}) based on the model output from the current timestep. - `pred_original_sample` can be used to preview progress or for guidance. - """ - - prev_sample: torch.FloatTensor - derivative: torch.FloatTensor - pred_original_sample: Optional[torch.FloatTensor] = None - - -class KarrasVeScheduler(SchedulerMixin, ConfigMixin): - """ - Stochastic sampling from Karras et al. [1] tailored to the Variance-Expanding (VE) models [2]. Use Algorithm 2 and - the VE column of Table 1 from [1] for reference. - - [1] Karras, Tero, et al. "Elucidating the Design Space of Diffusion-Based Generative Models." - https://arxiv.org/abs/2206.00364 [2] Song, Yang, et al. "Score-based generative modeling through stochastic - differential equations." https://arxiv.org/abs/2011.13456 - - [`~ConfigMixin`] takes care of storing all config attributes that are passed in the scheduler's `__init__` - function, such as `num_train_timesteps`. They can be accessed via `scheduler.config.num_train_timesteps`. - [`SchedulerMixin`] provides general loading and saving functionality via the [`SchedulerMixin.save_pretrained`] and - [`~SchedulerMixin.from_pretrained`] functions. - - For more details on the parameters, see the original paper's Appendix E.: "Elucidating the Design Space of - Diffusion-Based Generative Models." https://arxiv.org/abs/2206.00364. The grid search values used to find the - optimal {s_noise, s_churn, s_min, s_max} for a specific model are described in Table 5 of the paper. - - Args: - sigma_min (`float`): minimum noise magnitude - sigma_max (`float`): maximum noise magnitude - s_noise (`float`): the amount of additional noise to counteract loss of detail during sampling. - A reasonable range is [1.000, 1.011]. - s_churn (`float`): the parameter controlling the overall amount of stochasticity. - A reasonable range is [0, 100]. - s_min (`float`): the start value of the sigma range where we add noise (enable stochasticity). - A reasonable range is [0, 10]. - s_max (`float`): the end value of the sigma range where we add noise. - A reasonable range is [0.2, 80]. - - """ - - order = 2 - - @register_to_config - def __init__( - self, - sigma_min: float = 0.02, - sigma_max: float = 100, - s_noise: float = 1.007, - s_churn: float = 80, - s_min: float = 0.05, - s_max: float = 50, - ): - # standard deviation of the initial noise distribution - self.init_noise_sigma = sigma_max - - # setable values - self.num_inference_steps: int = None - self.timesteps: np.IntTensor = None - self.schedule: torch.FloatTensor = None # sigma(t_i) - - def scale_model_input(self, sample: torch.FloatTensor, timestep: Optional[int] = None) -> torch.FloatTensor: - """ - Ensures interchangeability with schedulers that need to scale the denoising model input depending on the - current timestep. - - Args: - sample (`torch.FloatTensor`): input sample - timestep (`int`, optional): current timestep - - Returns: - `torch.FloatTensor`: scaled input sample - """ - return sample - - def set_timesteps(self, num_inference_steps: int, device: Union[str, torch.device] = None): - """ - Sets the continuous timesteps used for the diffusion chain. Supporting function to be run before inference. - - Args: - num_inference_steps (`int`): - the number of diffusion steps used when generating samples with a pre-trained model. - - """ - self.num_inference_steps = num_inference_steps - timesteps = np.arange(0, self.num_inference_steps)[::-1].copy() - self.timesteps = torch.from_numpy(timesteps).to(device) - schedule = [ - ( - self.config.sigma_max**2 - * (self.config.sigma_min**2 / self.config.sigma_max**2) ** (i / (num_inference_steps - 1)) - ) - for i in self.timesteps - ] - self.schedule = torch.tensor(schedule, dtype=torch.float32, device=device) - - def add_noise_to_input( - self, sample: torch.FloatTensor, sigma: float, generator: Optional[torch.Generator] = None - ) -> Tuple[torch.FloatTensor, float]: - """ - Explicit Langevin-like "churn" step of adding noise to the sample according to a factor gamma_i ≥ 0 to reach a - higher noise level sigma_hat = sigma_i + gamma_i*sigma_i. - - TODO Args: - """ - if self.config.s_min <= sigma <= self.config.s_max: - gamma = min(self.config.s_churn / self.num_inference_steps, 2**0.5 - 1) - else: - gamma = 0 - - # sample eps ~ N(0, S_noise^2 * I) - eps = self.config.s_noise * torch.randn(sample.shape, generator=generator).to(sample.device) - sigma_hat = sigma + gamma * sigma - sample_hat = sample + ((sigma_hat**2 - sigma**2) ** 0.5 * eps) - - return sample_hat, sigma_hat - - def step( - self, - model_output: torch.FloatTensor, - sigma_hat: float, - sigma_prev: float, - sample_hat: torch.FloatTensor, - return_dict: bool = True, - ) -> Union[KarrasVeOutput, Tuple]: - """ - Predict the sample at the previous timestep by reversing the SDE. Core function to propagate the diffusion - process from the learned model outputs (most often the predicted noise). - - Args: - model_output (`torch.FloatTensor`): direct output from learned diffusion model. - sigma_hat (`float`): TODO - sigma_prev (`float`): TODO - sample_hat (`torch.FloatTensor`): TODO - return_dict (`bool`): option for returning tuple rather than KarrasVeOutput class - - KarrasVeOutput: updated sample in the diffusion chain and derivative (TODO double check). - Returns: - [`~schedulers.scheduling_karras_ve.KarrasVeOutput`] or `tuple`: - [`~schedulers.scheduling_karras_ve.KarrasVeOutput`] if `return_dict` is True, otherwise a `tuple`. When - returning a tuple, the first element is the sample tensor. - - """ - - pred_original_sample = sample_hat + sigma_hat * model_output - derivative = (sample_hat - pred_original_sample) / sigma_hat - sample_prev = sample_hat + (sigma_prev - sigma_hat) * derivative - - if not return_dict: - return (sample_prev, derivative) - - return KarrasVeOutput( - prev_sample=sample_prev, derivative=derivative, pred_original_sample=pred_original_sample - ) - - def step_correct( - self, - model_output: torch.FloatTensor, - sigma_hat: float, - sigma_prev: float, - sample_hat: torch.FloatTensor, - sample_prev: torch.FloatTensor, - derivative: torch.FloatTensor, - return_dict: bool = True, - ) -> Union[KarrasVeOutput, Tuple]: - """ - Correct the predicted sample based on the output model_output of the network. TODO complete description - - Args: - model_output (`torch.FloatTensor`): direct output from learned diffusion model. - sigma_hat (`float`): TODO - sigma_prev (`float`): TODO - sample_hat (`torch.FloatTensor`): TODO - sample_prev (`torch.FloatTensor`): TODO - derivative (`torch.FloatTensor`): TODO - return_dict (`bool`): option for returning tuple rather than KarrasVeOutput class - - Returns: - prev_sample (TODO): updated sample in the diffusion chain. derivative (TODO): TODO - - """ - pred_original_sample = sample_prev + sigma_prev * model_output - derivative_corr = (sample_prev - pred_original_sample) / sigma_prev - sample_prev = sample_hat + (sigma_prev - sigma_hat) * (0.5 * derivative + 0.5 * derivative_corr) - - if not return_dict: - return (sample_prev, derivative) - - return KarrasVeOutput( - prev_sample=sample_prev, derivative=derivative, pred_original_sample=pred_original_sample - ) - - def add_noise(self, original_samples, noise, timesteps): - raise NotImplementedError() diff --git a/spaces/Jackflack09/diffuse-custom/diffusers/schedulers/scheduling_repaint.py b/spaces/Jackflack09/diffuse-custom/diffusers/schedulers/scheduling_repaint.py deleted file mode 100644 index 0b80181f438903a00fd20496d17a44a46c3cec46..0000000000000000000000000000000000000000 --- a/spaces/Jackflack09/diffuse-custom/diffusers/schedulers/scheduling_repaint.py +++ /dev/null @@ -1,324 +0,0 @@ -# Copyright 2022 ETH Zurich Computer Vision Lab and The HuggingFace Team. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import math -from dataclasses import dataclass -from typing import Optional, Tuple, Union - -import numpy as np -import torch - -from ..configuration_utils import ConfigMixin, register_to_config -from ..utils import BaseOutput -from .scheduling_utils import SchedulerMixin - - -@dataclass -class RePaintSchedulerOutput(BaseOutput): - """ - Output class for the scheduler's step function output. - - Args: - prev_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images): - Computed sample (x_{t-1}) of previous timestep. `prev_sample` should be used as next model input in the - denoising loop. - pred_original_sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` for images): - The predicted denoised sample (x_{0}) based on the model output from - the current timestep. `pred_original_sample` can be used to preview progress or for guidance. - """ - - prev_sample: torch.FloatTensor - pred_original_sample: torch.FloatTensor - - -def betas_for_alpha_bar(num_diffusion_timesteps, max_beta=0.999): - """ - Create a beta schedule that discretizes the given alpha_t_bar function, which defines the cumulative product of - (1-beta) over time from t = [0,1]. - - Contains a function alpha_bar that takes an argument t and transforms it to the cumulative product of (1-beta) up - to that part of the diffusion process. - - - Args: - num_diffusion_timesteps (`int`): the number of betas to produce. - max_beta (`float`): the maximum beta to use; use values lower than 1 to - prevent singularities. - - Returns: - betas (`np.ndarray`): the betas used by the scheduler to step the model outputs - """ - - def alpha_bar(time_step): - return math.cos((time_step + 0.008) / 1.008 * math.pi / 2) ** 2 - - betas = [] - for i in range(num_diffusion_timesteps): - t1 = i / num_diffusion_timesteps - t2 = (i + 1) / num_diffusion_timesteps - betas.append(min(1 - alpha_bar(t2) / alpha_bar(t1), max_beta)) - return torch.tensor(betas, dtype=torch.float32) - - -class RePaintScheduler(SchedulerMixin, ConfigMixin): - """ - RePaint is a schedule for DDPM inpainting inside a given mask. - - [`~ConfigMixin`] takes care of storing all config attributes that are passed in the scheduler's `__init__` - function, such as `num_train_timesteps`. They can be accessed via `scheduler.config.num_train_timesteps`. - [`SchedulerMixin`] provides general loading and saving functionality via the [`SchedulerMixin.save_pretrained`] and - [`~SchedulerMixin.from_pretrained`] functions. - - For more details, see the original paper: https://arxiv.org/pdf/2201.09865.pdf - - Args: - num_train_timesteps (`int`): number of diffusion steps used to train the model. - beta_start (`float`): the starting `beta` value of inference. - beta_end (`float`): the final `beta` value. - beta_schedule (`str`): - the beta schedule, a mapping from a beta range to a sequence of betas for stepping the model. Choose from - `linear`, `scaled_linear`, or `squaredcos_cap_v2`. - eta (`float`): - The weight of noise for added noise in a diffusion step. Its value is between 0.0 and 1.0 -0.0 is DDIM and - 1.0 is DDPM scheduler respectively. - trained_betas (`np.ndarray`, optional): - option to pass an array of betas directly to the constructor to bypass `beta_start`, `beta_end` etc. - variance_type (`str`): - options to clip the variance used when adding noise to the denoised sample. Choose from `fixed_small`, - `fixed_small_log`, `fixed_large`, `fixed_large_log`, `learned` or `learned_range`. - clip_sample (`bool`, default `True`): - option to clip predicted sample between -1 and 1 for numerical stability. - - """ - - order = 1 - - @register_to_config - def __init__( - self, - num_train_timesteps: int = 1000, - beta_start: float = 0.0001, - beta_end: float = 0.02, - beta_schedule: str = "linear", - eta: float = 0.0, - trained_betas: Optional[np.ndarray] = None, - clip_sample: bool = True, - ): - if trained_betas is not None: - self.betas = torch.from_numpy(trained_betas) - elif beta_schedule == "linear": - self.betas = torch.linspace(beta_start, beta_end, num_train_timesteps, dtype=torch.float32) - elif beta_schedule == "scaled_linear": - # this schedule is very specific to the latent diffusion model. - self.betas = ( - torch.linspace(beta_start**0.5, beta_end**0.5, num_train_timesteps, dtype=torch.float32) ** 2 - ) - elif beta_schedule == "squaredcos_cap_v2": - # Glide cosine schedule - self.betas = betas_for_alpha_bar(num_train_timesteps) - elif beta_schedule == "sigmoid": - # GeoDiff sigmoid schedule - betas = torch.linspace(-6, 6, num_train_timesteps) - self.betas = torch.sigmoid(betas) * (beta_end - beta_start) + beta_start - else: - raise NotImplementedError(f"{beta_schedule} does is not implemented for {self.__class__}") - - self.alphas = 1.0 - self.betas - self.alphas_cumprod = torch.cumprod(self.alphas, dim=0) - self.one = torch.tensor(1.0) - - self.final_alpha_cumprod = torch.tensor(1.0) - - # standard deviation of the initial noise distribution - self.init_noise_sigma = 1.0 - - # setable values - self.num_inference_steps = None - self.timesteps = torch.from_numpy(np.arange(0, num_train_timesteps)[::-1].copy()) - - self.eta = eta - - def scale_model_input(self, sample: torch.FloatTensor, timestep: Optional[int] = None) -> torch.FloatTensor: - """ - Ensures interchangeability with schedulers that need to scale the denoising model input depending on the - current timestep. - - Args: - sample (`torch.FloatTensor`): input sample - timestep (`int`, optional): current timestep - - Returns: - `torch.FloatTensor`: scaled input sample - """ - return sample - - def set_timesteps( - self, - num_inference_steps: int, - jump_length: int = 10, - jump_n_sample: int = 10, - device: Union[str, torch.device] = None, - ): - num_inference_steps = min(self.config.num_train_timesteps, num_inference_steps) - self.num_inference_steps = num_inference_steps - - timesteps = [] - - jumps = {} - for j in range(0, num_inference_steps - jump_length, jump_length): - jumps[j] = jump_n_sample - 1 - - t = num_inference_steps - while t >= 1: - t = t - 1 - timesteps.append(t) - - if jumps.get(t, 0) > 0: - jumps[t] = jumps[t] - 1 - for _ in range(jump_length): - t = t + 1 - timesteps.append(t) - - timesteps = np.array(timesteps) * (self.config.num_train_timesteps // self.num_inference_steps) - self.timesteps = torch.from_numpy(timesteps).to(device) - - def _get_variance(self, t): - prev_timestep = t - self.config.num_train_timesteps // self.num_inference_steps - - alpha_prod_t = self.alphas_cumprod[t] - alpha_prod_t_prev = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod - beta_prod_t = 1 - alpha_prod_t - beta_prod_t_prev = 1 - alpha_prod_t_prev - - # For t > 0, compute predicted variance βt (see formula (6) and (7) from - # https://arxiv.org/pdf/2006.11239.pdf) and sample from it to get - # previous sample x_{t-1} ~ N(pred_prev_sample, variance) == add - # variance to pred_sample - # Is equivalent to formula (16) in https://arxiv.org/pdf/2010.02502.pdf - # without eta. - # variance = (1 - alpha_prod_t_prev) / (1 - alpha_prod_t) * self.betas[t] - variance = (beta_prod_t_prev / beta_prod_t) * (1 - alpha_prod_t / alpha_prod_t_prev) - - return variance - - def step( - self, - model_output: torch.FloatTensor, - timestep: int, - sample: torch.FloatTensor, - original_image: torch.FloatTensor, - mask: torch.FloatTensor, - generator: Optional[torch.Generator] = None, - return_dict: bool = True, - ) -> Union[RePaintSchedulerOutput, Tuple]: - """ - Predict the sample at the previous timestep by reversing the SDE. Core function to propagate the diffusion - process from the learned model outputs (most often the predicted noise). - - Args: - model_output (`torch.FloatTensor`): direct output from learned - diffusion model. - timestep (`int`): current discrete timestep in the diffusion chain. - sample (`torch.FloatTensor`): - current instance of sample being created by diffusion process. - original_image (`torch.FloatTensor`): - the original image to inpaint on. - mask (`torch.FloatTensor`): - the mask where 0.0 values define which part of the original image to inpaint (change). - generator (`torch.Generator`, *optional*): random number generator. - return_dict (`bool`): option for returning tuple rather than - DDPMSchedulerOutput class - - Returns: - [`~schedulers.scheduling_utils.RePaintSchedulerOutput`] or `tuple`: - [`~schedulers.scheduling_utils.RePaintSchedulerOutput`] if `return_dict` is True, otherwise a `tuple`. When - returning a tuple, the first element is the sample tensor. - - """ - t = timestep - prev_timestep = timestep - self.config.num_train_timesteps // self.num_inference_steps - - # 1. compute alphas, betas - alpha_prod_t = self.alphas_cumprod[t] - alpha_prod_t_prev = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod - beta_prod_t = 1 - alpha_prod_t - - # 2. compute predicted original sample from predicted noise also called - # "predicted x_0" of formula (15) from https://arxiv.org/pdf/2006.11239.pdf - pred_original_sample = (sample - beta_prod_t**0.5 * model_output) / alpha_prod_t**0.5 - - # 3. Clip "predicted x_0" - if self.config.clip_sample: - pred_original_sample = torch.clamp(pred_original_sample, -1, 1) - - # We choose to follow RePaint Algorithm 1 to get x_{t-1}, however we - # substitute formula (7) in the algorithm coming from DDPM paper - # (formula (4) Algorithm 2 - Sampling) with formula (12) from DDIM paper. - # DDIM schedule gives the same results as DDPM with eta = 1.0 - # Noise is being reused in 7. and 8., but no impact on quality has - # been observed. - - # 5. Add noise - noise = torch.randn( - model_output.shape, dtype=model_output.dtype, generator=generator, device=model_output.device - ) - std_dev_t = self.eta * self._get_variance(timestep) ** 0.5 - - variance = 0 - if t > 0 and self.eta > 0: - variance = std_dev_t * noise - - # 6. compute "direction pointing to x_t" of formula (12) - # from https://arxiv.org/pdf/2010.02502.pdf - pred_sample_direction = (1 - alpha_prod_t_prev - std_dev_t**2) ** 0.5 * model_output - - # 7. compute x_{t-1} of formula (12) from https://arxiv.org/pdf/2010.02502.pdf - prev_unknown_part = alpha_prod_t_prev**0.5 * pred_original_sample + pred_sample_direction + variance - - # 8. Algorithm 1 Line 5 https://arxiv.org/pdf/2201.09865.pdf - prev_known_part = (alpha_prod_t**0.5) * original_image + ((1 - alpha_prod_t) ** 0.5) * noise - - # 9. Algorithm 1 Line 8 https://arxiv.org/pdf/2201.09865.pdf - pred_prev_sample = mask * prev_known_part + (1.0 - mask) * prev_unknown_part - - if not return_dict: - return ( - pred_prev_sample, - pred_original_sample, - ) - - return RePaintSchedulerOutput(prev_sample=pred_prev_sample, pred_original_sample=pred_original_sample) - - def undo_step(self, sample, timestep, generator=None): - n = self.config.num_train_timesteps // self.num_inference_steps - - for i in range(n): - beta = self.betas[timestep + i] - noise = torch.randn(sample.shape, generator=generator, device=sample.device) - - # 10. Algorithm 1 Line 10 https://arxiv.org/pdf/2201.09865.pdf - sample = (1 - beta) ** 0.5 * sample + beta**0.5 * noise - - return sample - - def add_noise( - self, - original_samples: torch.FloatTensor, - noise: torch.FloatTensor, - timesteps: torch.IntTensor, - ) -> torch.FloatTensor: - raise NotImplementedError("Use `DDPMScheduler.add_noise()` to train for sampling with RePaint.") - - def __len__(self): - return self.config.num_train_timesteps diff --git a/spaces/Jamel887/Rvc-tio887/config.py b/spaces/Jamel887/Rvc-tio887/config.py deleted file mode 100644 index 6797dd748da4a2ddf57a97fd80ce9776d98ac82e..0000000000000000000000000000000000000000 --- a/spaces/Jamel887/Rvc-tio887/config.py +++ /dev/null @@ -1,99 +0,0 @@ -import argparse -import sys -import torch -from multiprocessing import cpu_count - -class Config: - def __init__(self): - self.device = "cuda:0" - self.is_half = True - self.n_cpu = 0 - self.gpu_name = None - self.gpu_mem = None - ( - self.colab, - self.api, - self.unsupported - ) = self.arg_parse() - self.x_pad, self.x_query, self.x_center, self.x_max = self.device_config() - - @staticmethod - def arg_parse() -> tuple: - parser = argparse.ArgumentParser() - parser.add_argument("--colab", action="store_true", help="Launch in colab") - parser.add_argument("--api", action="store_true", help="Launch with api") - parser.add_argument("--unsupported", action="store_true", help="Enable unsupported feature") - cmd_opts = parser.parse_args() - - return ( - cmd_opts.colab, - cmd_opts.api, - cmd_opts.unsupported - ) - - # has_mps is only available in nightly pytorch (for now) and MasOS 12.3+. - # check `getattr` and try it for compatibility - @staticmethod - def has_mps() -> bool: - if not torch.backends.mps.is_available(): - return False - try: - torch.zeros(1).to(torch.device("mps")) - return True - except Exception: - return False - - def device_config(self) -> tuple: - if torch.cuda.is_available(): - i_device = int(self.device.split(":")[-1]) - self.gpu_name = torch.cuda.get_device_name(i_device) - if ( - ("16" in self.gpu_name and "V100" not in self.gpu_name.upper()) - or "P40" in self.gpu_name.upper() - or "1060" in self.gpu_name - or "1070" in self.gpu_name - or "1080" in self.gpu_name - ): - print("INFO: Found GPU", self.gpu_name, ", force to fp32") - self.is_half = False - else: - print("INFO: Found GPU", self.gpu_name) - self.gpu_mem = int( - torch.cuda.get_device_properties(i_device).total_memory - / 1024 - / 1024 - / 1024 - + 0.4 - ) - elif self.has_mps(): - print("INFO: No supported Nvidia GPU found, use MPS instead") - self.device = "mps" - self.is_half = False - else: - print("INFO: No supported Nvidia GPU found, use CPU instead") - self.device = "cpu" - self.is_half = False - - if self.n_cpu == 0: - self.n_cpu = cpu_count() - - if self.is_half: - # 6G显存配置 - x_pad = 3 - x_query = 10 - x_center = 60 - x_max = 65 - else: - # 5G显存配置 - x_pad = 1 - x_query = 6 - x_center = 38 - x_max = 41 - - if self.gpu_mem != None and self.gpu_mem <= 4: - x_pad = 1 - x_query = 5 - x_center = 30 - x_max = 32 - - return x_pad, x_query, x_center, x_max diff --git a/spaces/Jamkonams/AutoGPT/autogpt/cli.py b/spaces/Jamkonams/AutoGPT/autogpt/cli.py deleted file mode 100644 index a2e99cb421cad005528cb160e948ce59ccfcdb66..0000000000000000000000000000000000000000 --- a/spaces/Jamkonams/AutoGPT/autogpt/cli.py +++ /dev/null @@ -1,145 +0,0 @@ -"""Main script for the autogpt package.""" -import click - - -@click.group(invoke_without_command=True) -@click.option("-c", "--continuous", is_flag=True, help="Enable Continuous Mode") -@click.option( - "--skip-reprompt", - "-y", - is_flag=True, - help="Skips the re-prompting messages at the beginning of the script", -) -@click.option( - "--ai-settings", - "-C", - help="Specifies which ai_settings.yaml file to use, will also automatically skip the re-prompt.", -) -@click.option( - "-l", - "--continuous-limit", - type=int, - help="Defines the number of times to run in continuous mode", -) -@click.option("--speak", is_flag=True, help="Enable Speak Mode") -@click.option("--debug", is_flag=True, help="Enable Debug Mode") -@click.option("--gpt3only", is_flag=True, help="Enable GPT3.5 Only Mode") -@click.option("--gpt4only", is_flag=True, help="Enable GPT4 Only Mode") -@click.option( - "--use-memory", - "-m", - "memory_type", - type=str, - help="Defines which Memory backend to use", -) -@click.option( - "-b", - "--browser-name", - help="Specifies which web-browser to use when using selenium to scrape the web.", -) -@click.option( - "--allow-downloads", - is_flag=True, - help="Dangerous: Allows Auto-GPT to download files natively.", -) -@click.option( - "--skip-news", - is_flag=True, - help="Specifies whether to suppress the output of latest news on startup.", -) -@click.pass_context -def main( - ctx: click.Context, - continuous: bool, - continuous_limit: int, - ai_settings: str, - skip_reprompt: bool, - speak: bool, - debug: bool, - gpt3only: bool, - gpt4only: bool, - memory_type: str, - browser_name: str, - allow_downloads: bool, - skip_news: bool, -) -> None: - """ - Welcome to AutoGPT an experimental open-source application showcasing the capabilities of the GPT-4 pushing the boundaries of AI. - - Start an Auto-GPT assistant. - """ - # Put imports inside function to avoid importing everything when starting the CLI - import logging - - from colorama import Fore - - from autogpt.agent.agent import Agent - from autogpt.config import Config, check_openai_api_key - from autogpt.configurator import create_config - from autogpt.logs import logger - from autogpt.memory import get_memory - from autogpt.prompt import construct_prompt - from autogpt.utils import get_current_git_branch, get_latest_bulletin - - if ctx.invoked_subcommand is None: - cfg = Config() - # TODO: fill in llm values here - check_openai_api_key() - create_config( - continuous, - continuous_limit, - ai_settings, - skip_reprompt, - speak, - debug, - gpt3only, - gpt4only, - memory_type, - browser_name, - allow_downloads, - skip_news, - ) - logger.set_level(logging.DEBUG if cfg.debug_mode else logging.INFO) - ai_name = "" - if not cfg.skip_news: - motd = get_latest_bulletin() - if motd: - logger.typewriter_log("NEWS: ", Fore.GREEN, motd) - git_branch = get_current_git_branch() - if git_branch and git_branch != "stable": - logger.typewriter_log( - "WARNING: ", - Fore.RED, - f"You are running on `{git_branch}` branch " - "- this is not a supported branch.", - ) - system_prompt = construct_prompt() - # print(prompt) - # Initialize variables - full_message_history = [] - next_action_count = 0 - # Make a constant: - triggering_prompt = ( - "Determine which next command to use, and respond using the" - " format specified above:" - ) - # Initialize memory and make sure it is empty. - # this is particularly important for indexing and referencing pinecone memory - memory = get_memory(cfg, init=True) - logger.typewriter_log( - "Using memory of type:", Fore.GREEN, f"{memory.__class__.__name__}" - ) - logger.typewriter_log("Using Browser:", Fore.GREEN, cfg.selenium_web_browser) - agent = Agent( - ai_name=ai_name, - memory=memory, - full_message_history=full_message_history, - next_action_count=next_action_count, - system_prompt=system_prompt, - triggering_prompt=triggering_prompt, - ) - agent.start_interaction_loop() - - -if __name__ == "__main__": - main() diff --git a/spaces/JanDalhuysen/ChatPDF/app.py b/spaces/JanDalhuysen/ChatPDF/app.py deleted file mode 100644 index 4002b11a521b8d3a1f3d08444c5c14cd0db63780..0000000000000000000000000000000000000000 --- a/spaces/JanDalhuysen/ChatPDF/app.py +++ /dev/null @@ -1,178 +0,0 @@ -import requests -import json -import gradio as gr -# from concurrent.futures import ThreadPoolExecutor -import pdfplumber -import pandas as pd -import time -from cnocr import CnOcr -from sentence_transformers import SentenceTransformer, models, util -word_embedding_model = models.Transformer('uer/sbert-base-chinese-nli', do_lower_case=True) -pooling_model = models.Pooling(word_embedding_model.get_word_embedding_dimension(), pooling_mode='cls') -embedder = SentenceTransformer(modules=[word_embedding_model, pooling_model]) -ocr = CnOcr() -# chat_url = 'https://souljoy-my-api.hf.space/sale' -chat_url = 'https://souljoy-my-api.hf.space/chatpdf' -headers = { - 'Content-Type': 'application/json', -} -# thread_pool_executor = ThreadPoolExecutor(max_workers=4) -history_max_len = 500 -all_max_len = 3000 - - -def get_emb(text): - emb_url = 'https://souljoy-my-api.hf.space/embeddings' - data = {"content": text} - try: - result = requests.post(url=emb_url, - data=json.dumps(data), - headers=headers - ) - return result.json()['data'][0]['embedding'] - except Exception as e: - print('data', data, 'result json', result.json()) - - -def doc_emb(doc: str): - texts = doc.split('\n') - # futures = [] - emb_list = embedder.encode(texts) - # for text in texts: - # futures.append(thread_pool_executor.submit(get_emb, text)) - # for f in futures: - # emb_list.append(f.result()) - print('\n'.join(texts)) - return texts, emb_list, gr.Textbox.update(visible=True), gr.Button.update(visible=True), gr.Markdown.update( - # value="""操作说明 step 3:PDF解析提交成功! 🙋 可以开始对话啦~"""), gr.Chatbot.update(visible=True) - value="""Step 3: PDF analysis and submission successful! 🙋 You can start the conversation"""), gr.Chatbot.update(visible=True) - - -def get_response(msg, bot, doc_text_list, doc_embeddings): - # future = thread_pool_executor.submit(get_emb, msg) - now_len = len(msg) - req_json = {'question': msg} - his_bg = -1 - for i in range(len(bot) - 1, -1, -1): - if now_len + len(bot[i][0]) + len(bot[i][1]) > history_max_len: - break - now_len += len(bot[i][0]) + len(bot[i][1]) - his_bg = i - req_json['history'] = [] if his_bg == -1 else bot[his_bg:] - # query_embedding = future.result() - query_embedding = embedder.encode([msg]) - cos_scores = util.cos_sim(query_embedding, doc_embeddings)[0] - score_index = [[score, index] for score, index in zip(cos_scores, [i for i in range(len(cos_scores))])] - score_index.sort(key=lambda x: x[0], reverse=True) - print('score_index:\n', score_index) - index_set, sub_doc_list = set(), [] - for s_i in score_index: - doc = doc_text_list[s_i[1]] - if now_len + len(doc) > all_max_len: - break - index_set.add(s_i[1]) - now_len += len(doc) - # 可能段落截断错误,所以把上下段也加入进来 - # Maybe the paragraph is truncated wrong, so add the upper and lower paragraphs - if s_i[1] > 0 and s_i[1] -1 not in index_set: - doc = doc_text_list[s_i[1]-1] - if now_len + len(doc) > all_max_len: - break - index_set.add(s_i[1]-1) - now_len += len(doc) - if s_i[1] + 1 < len(doc_text_list) and s_i[1] + 1 not in index_set: - doc = doc_text_list[s_i[1]+1] - if now_len + len(doc) > all_max_len: - break - index_set.add(s_i[1]+1) - now_len += len(doc) - - index_list = list(index_set) - index_list.sort() - for i in index_list: - sub_doc_list.append(doc_text_list[i]) - req_json['doc'] = '' if len(sub_doc_list) == 0 else '\n'.join(sub_doc_list) - data = {"content": json.dumps(req_json)} - print('data:\n', req_json) - result = requests.post(url=chat_url, - data=json.dumps(data), - headers=headers - ) - res = result.json()['content'] - bot.append([msg, res]) - return bot[max(0, len(bot) - 3):] - - -def up_file(files): - doc_text_list = [] - for idx, file in enumerate(files): - print(file.name) - with pdfplumber.open(file.name) as pdf: - for i in range(len(pdf.pages)): - # 读取PDF文档第i+1页 - # Read page i+1 of PDF document - page = pdf.pages[i] - res_list = page.extract_text().split('\n')[:-1] - - for j in range(len(page.images)): - # 获取图片的二进制流 - # Get the binary stream of the image - img = page.images[j] - file_name = '{}-{}-{}.png'.format(str(time.time()), str(i), str(j)) - with open(file_name, mode='wb') as f: - f.write(img['stream'].get_data()) - try: - res = ocr.ocr(file_name) - except Exception as e: - res = [] - if len(res) > 0: - res_list.append(' '.join([re['text'] for re in res])) - - tables = page.extract_tables() - for table in tables: - # 第一列当成表头: - # The first column is used as the header: - df = pd.DataFrame(table[1:], columns=table[0]) - try: - records = json.loads(df.to_json(orient="records", force_ascii=False)) - for rec in records: - res_list.append(json.dumps(rec, ensure_ascii=False)) - except Exception as e: - res_list.append(str(df)) - - doc_text_list += res_list - doc_text_list = [str(text).strip() for text in doc_text_list if len(str(text).strip()) > 0] - print(doc_text_list) - return gr.Textbox.update(value='\n'.join(doc_text_list), visible=True), gr.Button.update( - visible=True), gr.Markdown.update( - # value="操作说明 step 2:确认PDF解析结果(可修正),点击“提交解析结果”,随后进行对话") - value="Step 2: Confirm the PDF analysis result (can be revised), click “Submit analysis result”, and then chat") - - -with gr.Blocks() as demo: - with gr.Row(): - with gr.Column(): - # file = gr.File(file_types=['.pdf'], label='点击上传PDF,进行解析(支持多文档、表格、OCR)', file_count='multiple') - file = gr.File(file_types=['.pdf'], label='Click to upload PDF and analyze it (support multiple documents, forms, OCR)', file_count='multiple') - # doc_bu = gr.Button(value='提交解析结果', visible=False) - doc_bu = gr.Button(value='Submit analysis results', visible=False) - # txt = gr.Textbox(label='PDF解析结果', visible=False) - txt = gr.Textbox(label='PDF analysis result', visible=False) - doc_text_state = gr.State([]) - doc_emb_state = gr.State([]) - with gr.Column(): - # md = gr.Markdown("""操作说明 step 1:点击左侧区域,上传PDF,进行解析""") - md = gr.Markdown("""Step 1: Click on the area on the left, upload the PDF and analyze it""") - chat_bot = gr.Chatbot(visible=False) - # msg_txt = gr.Textbox(label='消息框', placeholder='输入消息,点击发送', visible=False) - msg_txt = gr.Textbox(label='message box', placeholder='enter message and click to send', visible=False) - # chat_bu = gr.Button(value='发送', visible=False) - chat_bu = gr.Button(value='send', visible=False) - - file.change(up_file, [file], [txt, doc_bu, md]) - doc_bu.click(doc_emb, [txt], [doc_text_state, doc_emb_state, msg_txt, chat_bu, md, chat_bot]) - chat_bu.click(get_response, [msg_txt, chat_bot, doc_text_state, doc_emb_state], [chat_bot]) - -if __name__ == "__main__": - demo.queue().launch() - # demo.queue().launch(share=False, server_name='172.22.2.54', server_port=9191) \ No newline at end of file diff --git a/spaces/Jeff2323/ai-comic-factory/src/components/ui/use-toast.ts b/spaces/Jeff2323/ai-comic-factory/src/components/ui/use-toast.ts deleted file mode 100644 index 90d8959bf3136de29eec362bf9d089b705c4ed3b..0000000000000000000000000000000000000000 --- a/spaces/Jeff2323/ai-comic-factory/src/components/ui/use-toast.ts +++ /dev/null @@ -1,192 +0,0 @@ -// Inspired by react-hot-toast library -import * as React from "react" - -import type { - ToastActionElement, - ToastProps, -} from "@/components/ui/toast" - -const TOAST_LIMIT = 1 -const TOAST_REMOVE_DELAY = 1000000 - -type ToasterToast = ToastProps & { - id: string - title?: React.ReactNode - description?: React.ReactNode - action?: ToastActionElement -} - -const actionTypes = { - ADD_TOAST: "ADD_TOAST", - UPDATE_TOAST: "UPDATE_TOAST", - DISMISS_TOAST: "DISMISS_TOAST", - REMOVE_TOAST: "REMOVE_TOAST", -} as const - -let count = 0 - -function genId() { - count = (count + 1) % Number.MAX_VALUE - return count.toString() -} - -type ActionType = typeof actionTypes - -type Action = - | { - type: ActionType["ADD_TOAST"] - toast: ToasterToast - } - | { - type: ActionType["UPDATE_TOAST"] - toast: Partial - } - | { - type: ActionType["DISMISS_TOAST"] - toastId?: ToasterToast["id"] - } - | { - type: ActionType["REMOVE_TOAST"] - toastId?: ToasterToast["id"] - } - -interface State { - toasts: ToasterToast[] -} - -const toastTimeouts = new Map>() - -const addToRemoveQueue = (toastId: string) => { - if (toastTimeouts.has(toastId)) { - return - } - - const timeout = setTimeout(() => { - toastTimeouts.delete(toastId) - dispatch({ - type: "REMOVE_TOAST", - toastId: toastId, - }) - }, TOAST_REMOVE_DELAY) - - toastTimeouts.set(toastId, timeout) -} - -export const reducer = (state: State, action: Action): State => { - switch (action.type) { - case "ADD_TOAST": - return { - ...state, - toasts: [action.toast, ...state.toasts].slice(0, TOAST_LIMIT), - } - - case "UPDATE_TOAST": - return { - ...state, - toasts: state.toasts.map((t) => - t.id === action.toast.id ? { ...t, ...action.toast } : t - ), - } - - case "DISMISS_TOAST": { - const { toastId } = action - - // ! Side effects ! - This could be extracted into a dismissToast() action, - // but I'll keep it here for simplicity - if (toastId) { - addToRemoveQueue(toastId) - } else { - state.toasts.forEach((toast) => { - addToRemoveQueue(toast.id) - }) - } - - return { - ...state, - toasts: state.toasts.map((t) => - t.id === toastId || toastId === undefined - ? { - ...t, - open: false, - } - : t - ), - } - } - case "REMOVE_TOAST": - if (action.toastId === undefined) { - return { - ...state, - toasts: [], - } - } - return { - ...state, - toasts: state.toasts.filter((t) => t.id !== action.toastId), - } - } -} - -const listeners: Array<(state: State) => void> = [] - -let memoryState: State = { toasts: [] } - -function dispatch(action: Action) { - memoryState = reducer(memoryState, action) - listeners.forEach((listener) => { - listener(memoryState) - }) -} - -type Toast = Omit - -function toast({ ...props }: Toast) { - const id = genId() - - const update = (props: ToasterToast) => - dispatch({ - type: "UPDATE_TOAST", - toast: { ...props, id }, - }) - const dismiss = () => dispatch({ type: "DISMISS_TOAST", toastId: id }) - - dispatch({ - type: "ADD_TOAST", - toast: { - ...props, - id, - open: true, - onOpenChange: (open) => { - if (!open) dismiss() - }, - }, - }) - - return { - id: id, - dismiss, - update, - } -} - -function useToast() { - const [state, setState] = React.useState(memoryState) - - React.useEffect(() => { - listeners.push(setState) - return () => { - const index = listeners.indexOf(setState) - if (index > -1) { - listeners.splice(index, 1) - } - } - }, [state]) - - return { - ...state, - toast, - dismiss: (toastId?: string) => dispatch({ type: "DISMISS_TOAST", toastId }), - } -} - -export { useToast, toast } diff --git a/spaces/Jerimee/HelloWorld/app.py b/spaces/Jerimee/HelloWorld/app.py deleted file mode 100644 index 7dc88466056544eaa962b0bb581c5c1054f0da2c..0000000000000000000000000000000000000000 --- a/spaces/Jerimee/HelloWorld/app.py +++ /dev/null @@ -1,31 +0,0 @@ -import gradio as gr -from transformers import pipeline - -sentiment = pipeline("sentiment-analysis") - -def get_sentiment(input_text): - return sentiment(input_text) - -if __name__ == "__main__": - iface = gr.Interface( - fn = get_sentiment, - inputs = "text", - outputs = ['text'], - title = 'Sentiment Analysis', - description = 'Determine how negative or positive a given sentiment is. Input a sentence or two and see what the model "thinks."', - btn = gr.Button("Run"), - css="app.css", - examples=[ - ["better than nothing, I guess... I guess you get what you pay for :("], - ["This is better than nothing. You get what you pay for!"], - ["the price reasonable"], - ["Is this price considered reasonable?"], - ["This is better than a kick in the face. Guess you can't look a gift horse in the mouth."], - ["They seem to have a bias as all the people working at the reception look exactly the same."], - ["All the people working at the reception look exactly the same!"], - ["this was expected, clean towels and room cleaned every day"], - ["The top of the window was covered by a dirty blind. It was pretty gross."], - ["The helpful staff were consistently cheap and comfortable."] - ] - ) - iface.launch(inline = False) \ No newline at end of file diff --git a/spaces/JohnnyPittt/audio-styling/deepafx_st/models/efficient_net/model.py b/spaces/JohnnyPittt/audio-styling/deepafx_st/models/efficient_net/model.py deleted file mode 100644 index ce850cd61391ed159122a49df2c8390cfc944aac..0000000000000000000000000000000000000000 --- a/spaces/JohnnyPittt/audio-styling/deepafx_st/models/efficient_net/model.py +++ /dev/null @@ -1,419 +0,0 @@ -"""model.py - Model and module class for EfficientNet. - They are built to mirror those in the official TensorFlow implementation. -""" - -# Author: lukemelas (github username) -# Github repo: https://github.com/lukemelas/EfficientNet-PyTorch -# With adjustments and added comments by workingcoder (github username). - -import torch -from torch import nn -from torch.nn import functional as F -from .utils import ( - round_filters, - round_repeats, - drop_connect, - get_same_padding_conv2d, - get_model_params, - efficientnet_params, - load_pretrained_weights, - Swish, - MemoryEfficientSwish, - calculate_output_image_size -) - - -VALID_MODELS = ( - 'efficientnet-b0', 'efficientnet-b1', 'efficientnet-b2', 'efficientnet-b3', - 'efficientnet-b4', 'efficientnet-b5', 'efficientnet-b6', 'efficientnet-b7', - 'efficientnet-b8', - - # Support the construction of 'efficientnet-l2' without pretrained weights - 'efficientnet-l2' -) - - -class MBConvBlock(nn.Module): - """Mobile Inverted Residual Bottleneck Block. - - Args: - block_args (namedtuple): BlockArgs, defined in utils.py. - global_params (namedtuple): GlobalParam, defined in utils.py. - image_size (tuple or list): [image_height, image_width]. - - References: - [1] https://arxiv.org/abs/1704.04861 (MobileNet v1) - [2] https://arxiv.org/abs/1801.04381 (MobileNet v2) - [3] https://arxiv.org/abs/1905.02244 (MobileNet v3) - """ - - def __init__(self, block_args, global_params, image_size=None): - super().__init__() - self._block_args = block_args - self._bn_mom = 1 - global_params.batch_norm_momentum # pytorch's difference from tensorflow - self._bn_eps = global_params.batch_norm_epsilon - self.has_se = (self._block_args.se_ratio is not None) and (0 < self._block_args.se_ratio <= 1) - self.id_skip = block_args.id_skip # whether to use skip connection and drop connect - - # Expansion phase (Inverted Bottleneck) - inp = self._block_args.input_filters # number of input channels - oup = self._block_args.input_filters * self._block_args.expand_ratio # number of output channels - if self._block_args.expand_ratio != 1: - Conv2d = get_same_padding_conv2d(image_size=image_size) - self._expand_conv = Conv2d(in_channels=inp, out_channels=oup, kernel_size=1, bias=False) - self._bn0 = nn.BatchNorm2d(num_features=oup, momentum=self._bn_mom, eps=self._bn_eps) - # image_size = calculate_output_image_size(image_size, 1) <-- this wouldn't modify image_size - - # Depthwise convolution phase - k = self._block_args.kernel_size - s = self._block_args.stride - Conv2d = get_same_padding_conv2d(image_size=image_size) - self._depthwise_conv = Conv2d( - in_channels=oup, out_channels=oup, groups=oup, # groups makes it depthwise - kernel_size=k, stride=s, bias=False) - self._bn1 = nn.BatchNorm2d(num_features=oup, momentum=self._bn_mom, eps=self._bn_eps) - image_size = calculate_output_image_size(image_size, s) - - # Squeeze and Excitation layer, if desired - if self.has_se: - Conv2d = get_same_padding_conv2d(image_size=(1, 1)) - num_squeezed_channels = max(1, int(self._block_args.input_filters * self._block_args.se_ratio)) - self._se_reduce = Conv2d(in_channels=oup, out_channels=num_squeezed_channels, kernel_size=1) - self._se_expand = Conv2d(in_channels=num_squeezed_channels, out_channels=oup, kernel_size=1) - - # Pointwise convolution phase - final_oup = self._block_args.output_filters - Conv2d = get_same_padding_conv2d(image_size=image_size) - self._project_conv = Conv2d(in_channels=oup, out_channels=final_oup, kernel_size=1, bias=False) - self._bn2 = nn.BatchNorm2d(num_features=final_oup, momentum=self._bn_mom, eps=self._bn_eps) - self._swish = MemoryEfficientSwish() - - def forward(self, inputs, drop_connect_rate=None): - """MBConvBlock's forward function. - - Args: - inputs (tensor): Input tensor. - drop_connect_rate (bool): Drop connect rate (float, between 0 and 1). - - Returns: - Output of this block after processing. - """ - - # Expansion and Depthwise Convolution - x = inputs - if self._block_args.expand_ratio != 1: - x = self._expand_conv(inputs) - x = self._bn0(x) - x = self._swish(x) - - x = self._depthwise_conv(x) - x = self._bn1(x) - x = self._swish(x) - - # Squeeze and Excitation - if self.has_se: - x_squeezed = F.adaptive_avg_pool2d(x, 1) - x_squeezed = self._se_reduce(x_squeezed) - x_squeezed = self._swish(x_squeezed) - x_squeezed = self._se_expand(x_squeezed) - x = torch.sigmoid(x_squeezed) * x - - # Pointwise Convolution - x = self._project_conv(x) - x = self._bn2(x) - - # Skip connection and drop connect - input_filters, output_filters = self._block_args.input_filters, self._block_args.output_filters - if self.id_skip and self._block_args.stride == 1 and input_filters == output_filters: - # The combination of skip connection and drop connect brings about stochastic depth. - if drop_connect_rate: - x = drop_connect(x, p=drop_connect_rate, training=self.training) - x = x + inputs # skip connection - return x - - def set_swish(self, memory_efficient=True): - """Sets swish function as memory efficient (for training) or standard (for export). - - Args: - memory_efficient (bool): Whether to use memory-efficient version of swish. - """ - self._swish = MemoryEfficientSwish() if memory_efficient else Swish() - - -class EfficientNet(nn.Module): - """EfficientNet model. - Most easily loaded with the .from_name or .from_pretrained methods. - - Args: - blocks_args (list[namedtuple]): A list of BlockArgs to construct blocks. - global_params (namedtuple): A set of GlobalParams shared between blocks. - - References: - [1] https://arxiv.org/abs/1905.11946 (EfficientNet) - - Example: - >>> import torch - >>> from efficientnet.model import EfficientNet - >>> inputs = torch.rand(1, 3, 224, 224) - >>> model = EfficientNet.from_pretrained('efficientnet-b0') - >>> model.eval() - >>> outputs = model(inputs) - """ - - def __init__(self, blocks_args=None, global_params=None): - super().__init__() - assert isinstance(blocks_args, list), 'blocks_args should be a list' - assert len(blocks_args) > 0, 'block args must be greater than 0' - self._global_params = global_params - self._blocks_args = blocks_args - - # Batch norm parameters - bn_mom = 1 - self._global_params.batch_norm_momentum - bn_eps = self._global_params.batch_norm_epsilon - - # Get stem static or dynamic convolution depending on image size - image_size = global_params.image_size - Conv2d = get_same_padding_conv2d(image_size=image_size) - - # Stem - in_channels = 3 # rgb - out_channels = round_filters(32, self._global_params) # number of output channels - self._conv_stem = Conv2d(in_channels, out_channels, kernel_size=3, stride=2, bias=False) - self._bn0 = nn.BatchNorm2d(num_features=out_channels, momentum=bn_mom, eps=bn_eps) - image_size = calculate_output_image_size(image_size, 2) - - # Build blocks - self._blocks = nn.ModuleList([]) - for block_args in self._blocks_args: - - # Update block input and output filters based on depth multiplier. - block_args = block_args._replace( - input_filters=round_filters(block_args.input_filters, self._global_params), - output_filters=round_filters(block_args.output_filters, self._global_params), - num_repeat=round_repeats(block_args.num_repeat, self._global_params) - ) - - # The first block needs to take care of stride and filter size increase. - self._blocks.append(MBConvBlock(block_args, self._global_params, image_size=image_size)) - image_size = calculate_output_image_size(image_size, block_args.stride) - if block_args.num_repeat > 1: # modify block_args to keep same output size - block_args = block_args._replace(input_filters=block_args.output_filters, stride=1) - for _ in range(block_args.num_repeat - 1): - self._blocks.append(MBConvBlock(block_args, self._global_params, image_size=image_size)) - # image_size = calculate_output_image_size(image_size, block_args.stride) # stride = 1 - - # Head - in_channels = block_args.output_filters # output of final block - out_channels = round_filters(1280, self._global_params) - Conv2d = get_same_padding_conv2d(image_size=image_size) - self._conv_head = Conv2d(in_channels, out_channels, kernel_size=1, bias=False) - self._bn1 = nn.BatchNorm2d(num_features=out_channels, momentum=bn_mom, eps=bn_eps) - - # Final linear layer - self._avg_pooling = nn.AdaptiveAvgPool2d(1) - if self._global_params.include_top: - self._dropout = nn.Dropout(self._global_params.dropout_rate) - self._fc = nn.Linear(out_channels, self._global_params.num_classes) - - # set activation to memory efficient swish by default - self._swish = MemoryEfficientSwish() - - def set_swish(self, memory_efficient=True): - """Sets swish function as memory efficient (for training) or standard (for export). - - Args: - memory_efficient (bool): Whether to use memory-efficient version of swish. - """ - self._swish = MemoryEfficientSwish() if memory_efficient else Swish() - for block in self._blocks: - block.set_swish(memory_efficient) - - def extract_endpoints(self, inputs): - """Use convolution layer to extract features - from reduction levels i in [1, 2, 3, 4, 5]. - - Args: - inputs (tensor): Input tensor. - - Returns: - Dictionary of last intermediate features - with reduction levels i in [1, 2, 3, 4, 5]. - Example: - >>> import torch - >>> from efficientnet.model import EfficientNet - >>> inputs = torch.rand(1, 3, 224, 224) - >>> model = EfficientNet.from_pretrained('efficientnet-b0') - >>> endpoints = model.extract_endpoints(inputs) - >>> print(endpoints['reduction_1'].shape) # torch.Size([1, 16, 112, 112]) - >>> print(endpoints['reduction_2'].shape) # torch.Size([1, 24, 56, 56]) - >>> print(endpoints['reduction_3'].shape) # torch.Size([1, 40, 28, 28]) - >>> print(endpoints['reduction_4'].shape) # torch.Size([1, 112, 14, 14]) - >>> print(endpoints['reduction_5'].shape) # torch.Size([1, 320, 7, 7]) - >>> print(endpoints['reduction_6'].shape) # torch.Size([1, 1280, 7, 7]) - """ - endpoints = dict() - - # Stem - x = self._swish(self._bn0(self._conv_stem(inputs))) - prev_x = x - - # Blocks - for idx, block in enumerate(self._blocks): - drop_connect_rate = self._global_params.drop_connect_rate - if drop_connect_rate: - drop_connect_rate *= float(idx) / len(self._blocks) # scale drop connect_rate - x = block(x, drop_connect_rate=drop_connect_rate) - if prev_x.size(2) > x.size(2): - endpoints['reduction_{}'.format(len(endpoints) + 1)] = prev_x - elif idx == len(self._blocks) - 1: - endpoints['reduction_{}'.format(len(endpoints) + 1)] = x - prev_x = x - - # Head - x = self._swish(self._bn1(self._conv_head(x))) - endpoints['reduction_{}'.format(len(endpoints) + 1)] = x - - return endpoints - - def extract_features(self, inputs): - """use convolution layer to extract feature . - - Args: - inputs (tensor): Input tensor. - - Returns: - Output of the final convolution - layer in the efficientnet model. - """ - # Stem - x = self._swish(self._bn0(self._conv_stem(inputs))) - - # Blocks - for idx, block in enumerate(self._blocks): - drop_connect_rate = self._global_params.drop_connect_rate - if drop_connect_rate: - drop_connect_rate *= float(idx) / len(self._blocks) # scale drop connect_rate - x = block(x, drop_connect_rate=drop_connect_rate) - - # Head - x = self._swish(self._bn1(self._conv_head(x))) - - return x - - def forward(self, inputs): - """EfficientNet's forward function. - Calls extract_features to extract features, applies final linear layer, and returns logits. - - Args: - inputs (tensor): Input tensor. - - Returns: - Output of this model after processing. - """ - # Convolution layers - x = self.extract_features(inputs) - # Pooling and final linear layer - x = self._avg_pooling(x) - if self._global_params.include_top: - x = x.flatten(start_dim=1) - x = self._dropout(x) - x = self._fc(x) - return x - - @classmethod - def from_name(cls, model_name, in_channels=3, **override_params): - """Create an efficientnet model according to name. - - Args: - model_name (str): Name for efficientnet. - in_channels (int): Input data's channel number. - override_params (other key word params): - Params to override model's global_params. - Optional key: - 'width_coefficient', 'depth_coefficient', - 'image_size', 'dropout_rate', - 'num_classes', 'batch_norm_momentum', - 'batch_norm_epsilon', 'drop_connect_rate', - 'depth_divisor', 'min_depth' - - Returns: - An efficientnet model. - """ - cls._check_model_name_is_valid(model_name) - blocks_args, global_params = get_model_params(model_name, override_params) - model = cls(blocks_args, global_params) - model._change_in_channels(in_channels) - return model - - @classmethod - def from_pretrained(cls, model_name, weights_path=None, advprop=False, - in_channels=3, num_classes=1000, **override_params): - """Create an efficientnet model according to name. - - Args: - model_name (str): Name for efficientnet. - weights_path (None or str): - str: path to pretrained weights file on the local disk. - None: use pretrained weights downloaded from the Internet. - advprop (bool): - Whether to load pretrained weights - trained with advprop (valid when weights_path is None). - in_channels (int): Input data's channel number. - num_classes (int): - Number of categories for classification. - It controls the output size for final linear layer. - override_params (other key word params): - Params to override model's global_params. - Optional key: - 'width_coefficient', 'depth_coefficient', - 'image_size', 'dropout_rate', - 'batch_norm_momentum', - 'batch_norm_epsilon', 'drop_connect_rate', - 'depth_divisor', 'min_depth' - - Returns: - A pretrained efficientnet model. - """ - model = cls.from_name(model_name, num_classes=num_classes, **override_params) - load_pretrained_weights(model, model_name, weights_path=weights_path, - load_fc=(num_classes == 1000), advprop=advprop) - model._change_in_channels(in_channels) - return model - - @classmethod - def get_image_size(cls, model_name): - """Get the input image size for a given efficientnet model. - - Args: - model_name (str): Name for efficientnet. - - Returns: - Input image size (resolution). - """ - cls._check_model_name_is_valid(model_name) - _, _, res, _ = efficientnet_params(model_name) - return res - - @classmethod - def _check_model_name_is_valid(cls, model_name): - """Validates model name. - - Args: - model_name (str): Name for efficientnet. - - Returns: - bool: Is a valid name or not. - """ - if model_name not in VALID_MODELS: - raise ValueError('model_name should be one of: ' + ', '.join(VALID_MODELS)) - - def _change_in_channels(self, in_channels): - """Adjust model's first convolution layer to in_channels, if in_channels not equals 3. - - Args: - in_channels (int): Input data's channel number. - """ - if in_channels != 3: - Conv2d = get_same_padding_conv2d(image_size=self._global_params.image_size) - out_channels = round_filters(32, self._global_params) - self._conv_stem = Conv2d(in_channels, out_channels, kernel_size=3, stride=2, bias=False) diff --git a/spaces/Jonni/05-QandA-from-textfile/app.py b/spaces/Jonni/05-QandA-from-textfile/app.py deleted file mode 100644 index c66d3925b6805866e5bead78cee8fdfacd2c9638..0000000000000000000000000000000000000000 --- a/spaces/Jonni/05-QandA-from-textfile/app.py +++ /dev/null @@ -1,20 +0,0 @@ -import gradio as gr -import os - -context = "This could be any large text corpus to use as subject matter to ask questions about. You can load it as well from text file to isolate it from code changes like in the next line" - -with open('Context.txt', 'r') as file: - context = file.read() - -question = "What should be documented in a care plan?" - -API_KEY = os.environ.get("HF_TOKEN") -gr.Interface.load( - "huggingface/deepset/roberta-base-squad2", - api_key=API_KEY, - theme="default", - css=".footer{display:none !important}", - inputs=[gr.inputs.Textbox(lines=12, default=context, label="Context paragraph"), gr.inputs.Textbox(lines=3, default=question, label="Question")], - outputs=[gr.outputs.Textbox(label="Answer"), gr.outputs.Textbox(label="Score")], - title=None, - description="Provide your own paragraph and ask any question about the text. How well does the model answer?").launch() \ No newline at end of file diff --git a/spaces/Kaori1707/Depth-estimation/dpt/transforms.py b/spaces/Kaori1707/Depth-estimation/dpt/transforms.py deleted file mode 100644 index 399adbcdad096ae3fb8a190ecd3ec5483a897251..0000000000000000000000000000000000000000 --- a/spaces/Kaori1707/Depth-estimation/dpt/transforms.py +++ /dev/null @@ -1,231 +0,0 @@ -import numpy as np -import cv2 -import math - - -def apply_min_size(sample, size, image_interpolation_method=cv2.INTER_AREA): - """Rezise the sample to ensure the given size. Keeps aspect ratio. - - Args: - sample (dict): sample - size (tuple): image size - - Returns: - tuple: new size - """ - shape = list(sample["disparity"].shape) - - if shape[0] >= size[0] and shape[1] >= size[1]: - return sample - - scale = [0, 0] - scale[0] = size[0] / shape[0] - scale[1] = size[1] / shape[1] - - scale = max(scale) - - shape[0] = math.ceil(scale * shape[0]) - shape[1] = math.ceil(scale * shape[1]) - - # resize - sample["image"] = cv2.resize( - sample["image"], tuple(shape[::-1]), interpolation=image_interpolation_method - ) - - sample["disparity"] = cv2.resize( - sample["disparity"], tuple(shape[::-1]), interpolation=cv2.INTER_NEAREST - ) - sample["mask"] = cv2.resize( - sample["mask"].astype(np.float32), - tuple(shape[::-1]), - interpolation=cv2.INTER_NEAREST, - ) - sample["mask"] = sample["mask"].astype(bool) - - return tuple(shape) - - -class Resize(object): - """Resize sample to given size (width, height).""" - - def __init__( - self, - width, - height, - resize_target=True, - keep_aspect_ratio=False, - ensure_multiple_of=1, - resize_method="lower_bound", - image_interpolation_method=cv2.INTER_AREA, - ): - """Init. - - Args: - width (int): desired output width - height (int): desired output height - resize_target (bool, optional): - True: Resize the full sample (image, mask, target). - False: Resize image only. - Defaults to True. - keep_aspect_ratio (bool, optional): - True: Keep the aspect ratio of the input sample. - Output sample might not have the given width and height, and - resize behaviour depends on the parameter 'resize_method'. - Defaults to False. - ensure_multiple_of (int, optional): - Output width and height is constrained to be multiple of this parameter. - Defaults to 1. - resize_method (str, optional): - "lower_bound": Output will be at least as large as the given size. - "upper_bound": Output will be at max as large as the given size. (Output size might be smaller than given size.) - "minimal": Scale as least as possible. (Output size might be smaller than given size.) - Defaults to "lower_bound". - """ - self.__width = width - self.__height = height - - self.__resize_target = resize_target - self.__keep_aspect_ratio = keep_aspect_ratio - self.__multiple_of = ensure_multiple_of - self.__resize_method = resize_method - self.__image_interpolation_method = image_interpolation_method - - def constrain_to_multiple_of(self, x, min_val=0, max_val=None): - y = (np.round(x / self.__multiple_of) * self.__multiple_of).astype(int) - - if max_val is not None and y > max_val: - y = (np.floor(x / self.__multiple_of) * self.__multiple_of).astype(int) - - if y < min_val: - y = (np.ceil(x / self.__multiple_of) * self.__multiple_of).astype(int) - - return y - - def get_size(self, width, height): - # determine new height and width - scale_height = self.__height / height - scale_width = self.__width / width - - if self.__keep_aspect_ratio: - if self.__resize_method == "lower_bound": - # scale such that output size is lower bound - if scale_width > scale_height: - # fit width - scale_height = scale_width - else: - # fit height - scale_width = scale_height - elif self.__resize_method == "upper_bound": - # scale such that output size is upper bound - if scale_width < scale_height: - # fit width - scale_height = scale_width - else: - # fit height - scale_width = scale_height - elif self.__resize_method == "minimal": - # scale as least as possbile - if abs(1 - scale_width) < abs(1 - scale_height): - # fit width - scale_height = scale_width - else: - # fit height - scale_width = scale_height - else: - raise ValueError( - f"resize_method {self.__resize_method} not implemented" - ) - - if self.__resize_method == "lower_bound": - new_height = self.constrain_to_multiple_of( - scale_height * height, min_val=self.__height - ) - new_width = self.constrain_to_multiple_of( - scale_width * width, min_val=self.__width - ) - elif self.__resize_method == "upper_bound": - new_height = self.constrain_to_multiple_of( - scale_height * height, max_val=self.__height - ) - new_width = self.constrain_to_multiple_of( - scale_width * width, max_val=self.__width - ) - elif self.__resize_method == "minimal": - new_height = self.constrain_to_multiple_of(scale_height * height) - new_width = self.constrain_to_multiple_of(scale_width * width) - else: - raise ValueError(f"resize_method {self.__resize_method} not implemented") - - return (new_width, new_height) - - def __call__(self, sample): - width, height = self.get_size( - sample["image"].shape[1], sample["image"].shape[0] - ) - - # resize sample - sample["image"] = cv2.resize( - sample["image"], - (width, height), - interpolation=self.__image_interpolation_method, - ) - - if self.__resize_target: - if "disparity" in sample: - sample["disparity"] = cv2.resize( - sample["disparity"], - (width, height), - interpolation=cv2.INTER_NEAREST, - ) - - if "depth" in sample: - sample["depth"] = cv2.resize( - sample["depth"], (width, height), interpolation=cv2.INTER_NEAREST - ) - - sample["mask"] = cv2.resize( - sample["mask"].astype(np.float32), - (width, height), - interpolation=cv2.INTER_NEAREST, - ) - sample["mask"] = sample["mask"].astype(bool) - - return sample - - -class NormalizeImage(object): - """Normlize image by given mean and std.""" - - def __init__(self, mean, std): - self.__mean = mean - self.__std = std - - def __call__(self, sample): - sample["image"] = (sample["image"] - self.__mean) / self.__std - - return sample - - -class PrepareForNet(object): - """Prepare sample for usage as network input.""" - - def __init__(self): - pass - - def __call__(self, sample): - image = np.transpose(sample["image"], (2, 0, 1)) - sample["image"] = np.ascontiguousarray(image).astype(np.float32) - - if "mask" in sample: - sample["mask"] = sample["mask"].astype(np.float32) - sample["mask"] = np.ascontiguousarray(sample["mask"]) - - if "disparity" in sample: - disparity = sample["disparity"].astype(np.float32) - sample["disparity"] = np.ascontiguousarray(disparity) - - if "depth" in sample: - depth = sample["depth"].astype(np.float32) - sample["depth"] = np.ascontiguousarray(depth) - - return sample diff --git a/spaces/Kayson/InstructDiffusion/dataset/seg/refcoco.py b/spaces/Kayson/InstructDiffusion/dataset/seg/refcoco.py deleted file mode 100644 index 6600ac5a94ece2239114c57e0918aa2e7826dbf9..0000000000000000000000000000000000000000 --- a/spaces/Kayson/InstructDiffusion/dataset/seg/refcoco.py +++ /dev/null @@ -1,354 +0,0 @@ -__author__ = 'licheng' - -""" -This interface provides access to four datasets: -1) refclef -2) refcoco -3) refcoco+ -4) refcocog -split by unc and google - -The following API functions are defined: -REFER - REFER api class -getRefIds - get ref ids that satisfy given filter conditions. -getAnnIds - get ann ids that satisfy given filter conditions. -getImgIds - get image ids that satisfy given filter conditions. -getCatIds - get category ids that satisfy given filter conditions. -loadRefs - load refs with the specified ref ids. -loadAnns - load anns with the specified ann ids. -loadImgs - load images with the specified image ids. -loadCats - load category names with the specified category ids. -getRefBox - get ref's bounding box [x, y, w, h] given the ref_id -showRef - show image, segmentation or box of the referred object with the ref -getMask - get mask and area of the referred object given ref -showMask - show mask of the referred object given ref -""" - -import sys -sys.path.append("./dataset") -import os.path as osp -import json -import pickle -import time -import itertools -import skimage.io as io -import matplotlib.pyplot as plt -from matplotlib.collections import PatchCollection -from matplotlib.patches import Polygon, Rectangle -from pprint import pprint -import numpy as np -from pycocotools import mask -# import cv2 -# from skimage.measure import label, regionprops - -class REFER: - - def __init__(self, data_root, dataset='refcoco', splitBy='unc'): - # provide data_root folder which contains refclef, refcoco, refcoco+ and refcocog - # also provide dataset name and splitBy information - # e.g., dataset = 'refcoco', splitBy = 'unc' - print('loading dataset %s into memory...' % dataset) - self.ROOT_DIR = osp.abspath(osp.dirname(__file__)) - self.DATA_DIR = osp.join(data_root, dataset) - if dataset in ['refcoco', 'refcoco+', 'refcocog']: - self.IMAGE_DIR = osp.join(data_root, 'images/mscoco/images/train2014') - elif dataset == 'refclef': - self.IMAGE_DIR = osp.join(data_root, 'images/saiapr_tc-12') - else: - print('No refer dataset is called [%s]' % dataset) - sys.exit() - - # load refs from data/dataset/refs(dataset).json - tic = time.time() - ref_file = osp.join(self.DATA_DIR, 'refs('+splitBy+').p') - self.data = {} - self.data['dataset'] = dataset - self.data['refs'] = pickle.load(open(ref_file, 'rb'),fix_imports=True) - - # load annotations from data/dataset/instances.json - instances_file = osp.join(self.DATA_DIR, 'instances.json') - instances = json.load(open(instances_file, 'r')) - self.data['images'] = instances['images'] - self.data['annotations'] = instances['annotations'] - self.data['categories'] = instances['categories'] - - # create index - self.createIndex() - print('DONE (t=%.2fs)' % (time.time()-tic)) - - def createIndex(self): - # create sets of mapping - # 1) Refs: {ref_id: ref} - # 2) Anns: {ann_id: ann} - # 3) Imgs: {image_id: image} - # 4) Cats: {category_id: category_name} - # 5) Sents: {sent_id: sent} - # 6) imgToRefs: {image_id: refs} - # 7) imgToAnns: {image_id: anns} - # 8) refToAnn: {ref_id: ann} - # 9) annToRef: {ann_id: ref} - # 10) catToRefs: {category_id: refs} - # 11) sentToRef: {sent_id: ref} - # 12) sentToTokens: {sent_id: tokens} - print('creating index...') - # fetch info from instances - Anns, Imgs, Cats, imgToAnns = {}, {}, {}, {} - for ann in self.data['annotations']: - Anns[ann['id']] = ann - imgToAnns[ann['image_id']] = imgToAnns.get(ann['image_id'], []) + [ann] - for img in self.data['images']: - Imgs[img['id']] = img - for cat in self.data['categories']: - Cats[cat['id']] = cat['name'] - - # fetch info from refs - Refs, imgToRefs, refToAnn, annToRef, catToRefs = {}, {}, {}, {}, {} - Sents, sentToRef, sentToTokens = {}, {}, {} - for ref in self.data['refs']: - # ids - ref_id = ref['ref_id'] - ann_id = ref['ann_id'] - category_id = ref['category_id'] - image_id = ref['image_id'] - - # add mapping related to ref - Refs[ref_id] = ref - imgToRefs[image_id] = imgToRefs.get(image_id, []) + [ref] - catToRefs[category_id] = catToRefs.get(category_id, []) + [ref] - refToAnn[ref_id] = Anns[ann_id] - annToRef[ann_id] = ref - - # add mapping of sent - for sent in ref['sentences']: - Sents[sent['sent_id']] = sent - sentToRef[sent['sent_id']] = ref - sentToTokens[sent['sent_id']] = sent['tokens'] - - # create class members - self.Refs = Refs - self.Anns = Anns - self.Imgs = Imgs - self.Cats = Cats - self.Sents = Sents - self.imgToRefs = imgToRefs - self.imgToAnns = imgToAnns - self.refToAnn = refToAnn - self.annToRef = annToRef - self.catToRefs = catToRefs - self.sentToRef = sentToRef - self.sentToTokens = sentToTokens - print('index created.') - - def getRefIds(self, image_ids=[], cat_ids=[], ref_ids=[], split=''): - image_ids = image_ids if type(image_ids) == list else [image_ids] - cat_ids = cat_ids if type(cat_ids) == list else [cat_ids] - ref_ids = ref_ids if type(ref_ids) == list else [ref_ids] - - if len(image_ids)==len(cat_ids)==len(ref_ids)==len(split)==0: - refs = self.data['refs'] - else: - if not len(image_ids) == 0: - refs = [self.imgToRefs[image_id] for image_id in image_ids] - else: - refs = self.data['refs'] - if not len(cat_ids) == 0: - refs = [ref for ref in refs if ref['category_id'] in cat_ids] - if not len(ref_ids) == 0: - refs = [ref for ref in refs if ref['ref_id'] in ref_ids] - if not len(split) == 0: - if split in ['testA', 'testB', 'testC']: - refs = [ref for ref in refs if split[-1] in ref['split']] # we also consider testAB, testBC, ... - elif split in ['testAB', 'testBC', 'testAC']: - refs = [ref for ref in refs if ref['split'] == split] # rarely used I guess... - elif split == 'test': - refs = [ref for ref in refs if 'test' in ref['split']] - elif split == 'train' or split == 'val': - refs = [ref for ref in refs if ref['split'] == split] - else: - print('No such split [%s]' % split) - sys.exit() - ref_ids = [ref['ref_id'] for ref in refs] - return ref_ids - - def getAnnIds(self, image_ids=[], cat_ids=[], ref_ids=[]): - image_ids = image_ids if type(image_ids) == list else [image_ids] - cat_ids = cat_ids if type(cat_ids) == list else [cat_ids] - ref_ids = ref_ids if type(ref_ids) == list else [ref_ids] - - if len(image_ids) == len(cat_ids) == len(ref_ids) == 0: - ann_ids = [ann['id'] for ann in self.data['annotations']] - else: - if not len(image_ids) == 0: - lists = [self.imgToAnns[image_id] for image_id in image_ids if image_id in self.imgToAnns] # list of [anns] - anns = list(itertools.chain.from_iterable(lists)) - else: - anns = self.data['annotations'] - if not len(cat_ids) == 0: - anns = [ann for ann in anns if ann['category_id'] in cat_ids] - ann_ids = [ann['id'] for ann in anns] - if not len(ref_ids) == 0: - ids = set(ann_ids).intersection(set([self.Refs[ref_id]['ann_id'] for ref_id in ref_ids])) - return ann_ids - - def getImgIds(self, ref_ids=[]): - ref_ids = ref_ids if type(ref_ids) == list else [ref_ids] - - if not len(ref_ids) == 0: - image_ids = list(set([self.Refs[ref_id]['image_id'] for ref_id in ref_ids])) - else: - image_ids = self.Imgs.keys() - return image_ids - - def getCatIds(self): - return self.Cats.keys() - - def loadRefs(self, ref_ids=[]): - if type(ref_ids) == list: - return [self.Refs[ref_id] for ref_id in ref_ids] - elif type(ref_ids) == int: - return [self.Refs[ref_ids]] - - def loadAnns(self, ann_ids=[]): - if type(ann_ids) == list: - return [self.Anns[ann_id] for ann_id in ann_ids] - elif type(ann_ids) == int or type(ann_ids) == unicode: - return [self.Anns[ann_ids]] - - def loadImgs(self, image_ids=[]): - if type(image_ids) == list: - return [self.Imgs[image_id] for image_id in image_ids] - elif type(image_ids) == int: - return [self.Imgs[image_ids]] - - def loadCats(self, cat_ids=[]): - if type(cat_ids) == list: - return [self.Cats[cat_id] for cat_id in cat_ids] - elif type(cat_ids) == int: - return [self.Cats[cat_ids]] - - def getRefBox(self, ref_id): - ref = self.Refs[ref_id] - ann = self.refToAnn[ref_id] - return ann['bbox'] # [x, y, w, h] - - def showRef(self, ref, seg_box='seg'): - ax = plt.gca() - # show image - image = self.Imgs[ref['image_id']] - I = io.imread(osp.join(self.IMAGE_DIR, image['file_name'])) - ax.imshow(I) - # show refer expression - for sid, sent in enumerate(ref['sentences']): - print('%s. %s' % (sid+1, sent['sent'])) - # show segmentations - if seg_box == 'seg': - ann_id = ref['ann_id'] - ann = self.Anns[ann_id] - polygons = [] - color = [] - c = 'none' - if type(ann['segmentation'][0]) == list: - # polygon used for refcoco* - for seg in ann['segmentation']: - poly = np.array(seg).reshape((len(seg)/2, 2)) - polygons.append(Polygon(poly, True, alpha=0.4)) - color.append(c) - p = PatchCollection(polygons, facecolors=color, edgecolors=(1,1,0,0), linewidths=3, alpha=1) - ax.add_collection(p) # thick yellow polygon - p = PatchCollection(polygons, facecolors=color, edgecolors=(1,0,0,0), linewidths=1, alpha=1) - ax.add_collection(p) # thin red polygon - else: - # mask used for refclef - rle = ann['segmentation'] - m = mask.decode(rle) - img = np.ones( (m.shape[0], m.shape[1], 3) ) - color_mask = np.array([2.0,166.0,101.0])/255 - for i in range(3): - img[:,:,i] = color_mask[i] - ax.imshow(np.dstack( (img, m*0.5) )) - # show bounding-box - elif seg_box == 'box': - ann_id = ref['ann_id'] - ann = self.Anns[ann_id] - bbox = self.getRefBox(ref['ref_id']) - box_plot = Rectangle((bbox[0], bbox[1]), bbox[2], bbox[3], fill=False, edgecolor='green', linewidth=3) - ax.add_patch(box_plot) - - def getMask(self, ref): - # return mask, area and mask-center - ann = self.refToAnn[ref['ref_id']] - image = self.Imgs[ref['image_id']] - if type(ann['segmentation'][0]) == list: # polygon - rle = mask.frPyObjects(ann['segmentation'], image['height'], image['width']) - else: - rle = ann['segmentation'] - m = mask.decode(rle) - m = np.sum(m, axis=2) # sometimes there are multiple binary map (corresponding to multiple segs) - m = m.astype(np.uint8) # convert to np.uint8 - # compute area - area = sum(mask.area(rle)) # should be close to ann['area'] - return {'mask': m, 'area': area} - # # position - # position_x = np.mean(np.where(m==1)[1]) # [1] means columns (matlab style) -> x (c style) - # position_y = np.mean(np.where(m==1)[0]) # [0] means rows (matlab style) -> y (c style) - # # mass position (if there were multiple regions, we use the largest one.) - # label_m = label(m, connectivity=m.ndim) - # regions = regionprops(label_m) - # if len(regions) > 0: - # largest_id = np.argmax(np.array([props.filled_area for props in regions])) - # largest_props = regions[largest_id] - # mass_y, mass_x = largest_props.centroid - # else: - # mass_x, mass_y = position_x, position_y - # # if centroid is not in mask, we find the closest point to it from mask - # if m[mass_y, mass_x] != 1: - # print 'Finding closes mask point ...' - # kernel = np.ones((10, 10),np.uint8) - # me = cv2.erode(m, kernel, iterations = 1) - # points = zip(np.where(me == 1)[0].tolist(), np.where(me == 1)[1].tolist()) # row, col style - # points = np.array(points) - # dist = np.sum((points - (mass_y, mass_x))**2, axis=1) - # id = np.argsort(dist)[0] - # mass_y, mass_x = points[id] - # # return - # return {'mask': m, 'area': area, 'position_x': position_x, 'position_y': position_y, 'mass_x': mass_x, 'mass_y': mass_y} - # # show image and mask - # I = io.imread(osp.join(self.IMAGE_DIR, image['file_name'])) - # plt.figure() - # plt.imshow(I) - # ax = plt.gca() - # img = np.ones( (m.shape[0], m.shape[1], 3) ) - # color_mask = np.array([2.0,166.0,101.0])/255 - # for i in range(3): - # img[:,:,i] = color_mask[i] - # ax.imshow(np.dstack( (img, m*0.5) )) - # plt.show() - - def showMask(self, ref): - M = self.getMask(ref) - msk = M['mask'] - ax = plt.gca() - ax.imshow(msk) - - -if __name__ == '__main__': - refer = REFER(dataset='refcocog', splitBy='google') - ref_ids = refer.getRefIds() - print(len(ref_ids)) - - print(len(refer.Imgs)) - print(len(refer.imgToRefs)) - - ref_ids = refer.getRefIds(split='train') - print('There are %s training referred objects.' % len(ref_ids)) - - for ref_id in ref_ids: - ref = refer.loadRefs(ref_id)[0] - if len(ref['sentences']) < 2: - continue - - pprint(ref) - print('The label is %s.' % refer.Cats[ref['category_id']]) - plt.figure() - refer.showRef(ref, seg_box='box') - plt.show() \ No newline at end of file diff --git a/spaces/Kevin676/AutoGPT/autogpt/utils.py b/spaces/Kevin676/AutoGPT/autogpt/utils.py deleted file mode 100644 index e93d5ac740097ee144d1809aea31c0f7fb242fa5..0000000000000000000000000000000000000000 --- a/spaces/Kevin676/AutoGPT/autogpt/utils.py +++ /dev/null @@ -1,77 +0,0 @@ -import os - -import requests -import yaml -from colorama import Fore -from git import Repo - - -def clean_input(prompt: str = ""): - try: - return input(prompt) - except KeyboardInterrupt: - print("You interrupted Auto-GPT") - print("Quitting...") - exit(0) - - -def validate_yaml_file(file: str): - try: - with open(file, encoding="utf-8") as fp: - yaml.load(fp.read(), Loader=yaml.FullLoader) - except FileNotFoundError: - return (False, f"The file {Fore.CYAN}`{file}`{Fore.RESET} wasn't found") - except yaml.YAMLError as e: - return ( - False, - f"There was an issue while trying to read with your AI Settings file: {e}", - ) - - return (True, f"Successfully validated {Fore.CYAN}`{file}`{Fore.RESET}!") - - -def readable_file_size(size, decimal_places=2): - """Converts the given size in bytes to a readable format. - Args: - size: Size in bytes - decimal_places (int): Number of decimal places to display - """ - for unit in ["B", "KB", "MB", "GB", "TB"]: - if size < 1024.0: - break - size /= 1024.0 - return f"{size:.{decimal_places}f} {unit}" - - -def get_bulletin_from_web() -> str: - try: - response = requests.get( - "https://raw.githubusercontent.com/Significant-Gravitas/Auto-GPT/master/BULLETIN.md" - ) - if response.status_code == 200: - return response.text - except: - return "" - - -def get_current_git_branch() -> str: - try: - repo = Repo(search_parent_directories=True) - branch = repo.active_branch - return branch.name - except: - return "" - - -def get_latest_bulletin() -> str: - exists = os.path.exists("CURRENT_BULLETIN.md") - current_bulletin = "" - if exists: - current_bulletin = open("CURRENT_BULLETIN.md", "r", encoding="utf-8").read() - new_bulletin = get_bulletin_from_web() - is_new_news = new_bulletin != current_bulletin - - if new_bulletin and is_new_news: - open("CURRENT_BULLETIN.md", "w", encoding="utf-8").write(new_bulletin) - return f" {Fore.RED}::UPDATED:: {Fore.CYAN}{new_bulletin}{Fore.RESET}" - return current_bulletin diff --git a/spaces/KyanChen/RSPrompter/mmdet/models/layers/dropblock.py b/spaces/KyanChen/RSPrompter/mmdet/models/layers/dropblock.py deleted file mode 100644 index 7938199b761d637afdb1b2c62dbca01d1bf629eb..0000000000000000000000000000000000000000 --- a/spaces/KyanChen/RSPrompter/mmdet/models/layers/dropblock.py +++ /dev/null @@ -1,86 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import torch -import torch.nn as nn -import torch.nn.functional as F - -from mmdet.registry import MODELS - -eps = 1e-6 - - -@MODELS.register_module() -class DropBlock(nn.Module): - """Randomly drop some regions of feature maps. - - Please refer to the method proposed in `DropBlock - `_ for details. - - Args: - drop_prob (float): The probability of dropping each block. - block_size (int): The size of dropped blocks. - warmup_iters (int): The drop probability will linearly increase - from `0` to `drop_prob` during the first `warmup_iters` iterations. - Default: 2000. - """ - - def __init__(self, drop_prob, block_size, warmup_iters=2000, **kwargs): - super(DropBlock, self).__init__() - assert block_size % 2 == 1 - assert 0 < drop_prob <= 1 - assert warmup_iters >= 0 - self.drop_prob = drop_prob - self.block_size = block_size - self.warmup_iters = warmup_iters - self.iter_cnt = 0 - - def forward(self, x): - """ - Args: - x (Tensor): Input feature map on which some areas will be randomly - dropped. - - Returns: - Tensor: The tensor after DropBlock layer. - """ - if not self.training: - return x - self.iter_cnt += 1 - N, C, H, W = list(x.shape) - gamma = self._compute_gamma((H, W)) - mask_shape = (N, C, H - self.block_size + 1, W - self.block_size + 1) - mask = torch.bernoulli(torch.full(mask_shape, gamma, device=x.device)) - - mask = F.pad(mask, [self.block_size // 2] * 4, value=0) - mask = F.max_pool2d( - input=mask, - stride=(1, 1), - kernel_size=(self.block_size, self.block_size), - padding=self.block_size // 2) - mask = 1 - mask - x = x * mask * mask.numel() / (eps + mask.sum()) - return x - - def _compute_gamma(self, feat_size): - """Compute the value of gamma according to paper. gamma is the - parameter of bernoulli distribution, which controls the number of - features to drop. - - gamma = (drop_prob * fm_area) / (drop_area * keep_area) - - Args: - feat_size (tuple[int, int]): The height and width of feature map. - - Returns: - float: The value of gamma. - """ - gamma = (self.drop_prob * feat_size[0] * feat_size[1]) - gamma /= ((feat_size[0] - self.block_size + 1) * - (feat_size[1] - self.block_size + 1)) - gamma /= (self.block_size**2) - factor = (1.0 if self.iter_cnt > self.warmup_iters else self.iter_cnt / - self.warmup_iters) - return gamma * factor - - def extra_repr(self): - return (f'drop_prob={self.drop_prob}, block_size={self.block_size}, ' - f'warmup_iters={self.warmup_iters}') diff --git a/spaces/KyanChen/RSPrompter/mmdet/models/roi_heads/test_mixins.py b/spaces/KyanChen/RSPrompter/mmdet/models/roi_heads/test_mixins.py deleted file mode 100644 index 940490454d9cf1fde4d69c1f890c173b92d522a1..0000000000000000000000000000000000000000 --- a/spaces/KyanChen/RSPrompter/mmdet/models/roi_heads/test_mixins.py +++ /dev/null @@ -1,171 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -# TODO: delete this file after refactor -import sys - -import torch - -from mmdet.models.layers import multiclass_nms -from mmdet.models.test_time_augs import merge_aug_bboxes, merge_aug_masks -from mmdet.structures.bbox import bbox2roi, bbox_mapping - -if sys.version_info >= (3, 7): - from mmdet.utils.contextmanagers import completed - - -class BBoxTestMixin: - - if sys.version_info >= (3, 7): - # TODO: Currently not supported - async def async_test_bboxes(self, - x, - img_metas, - proposals, - rcnn_test_cfg, - rescale=False, - **kwargs): - """Asynchronized test for box head without augmentation.""" - rois = bbox2roi(proposals) - roi_feats = self.bbox_roi_extractor( - x[:len(self.bbox_roi_extractor.featmap_strides)], rois) - if self.with_shared_head: - roi_feats = self.shared_head(roi_feats) - sleep_interval = rcnn_test_cfg.get('async_sleep_interval', 0.017) - - async with completed( - __name__, 'bbox_head_forward', - sleep_interval=sleep_interval): - cls_score, bbox_pred = self.bbox_head(roi_feats) - - img_shape = img_metas[0]['img_shape'] - scale_factor = img_metas[0]['scale_factor'] - det_bboxes, det_labels = self.bbox_head.get_bboxes( - rois, - cls_score, - bbox_pred, - img_shape, - scale_factor, - rescale=rescale, - cfg=rcnn_test_cfg) - return det_bboxes, det_labels - - # TODO: Currently not supported - def aug_test_bboxes(self, feats, img_metas, rpn_results_list, - rcnn_test_cfg): - """Test det bboxes with test time augmentation.""" - aug_bboxes = [] - aug_scores = [] - for x, img_meta in zip(feats, img_metas): - # only one image in the batch - img_shape = img_meta[0]['img_shape'] - scale_factor = img_meta[0]['scale_factor'] - flip = img_meta[0]['flip'] - flip_direction = img_meta[0]['flip_direction'] - # TODO more flexible - proposals = bbox_mapping(rpn_results_list[0][:, :4], img_shape, - scale_factor, flip, flip_direction) - rois = bbox2roi([proposals]) - bbox_results = self.bbox_forward(x, rois) - bboxes, scores = self.bbox_head.get_bboxes( - rois, - bbox_results['cls_score'], - bbox_results['bbox_pred'], - img_shape, - scale_factor, - rescale=False, - cfg=None) - aug_bboxes.append(bboxes) - aug_scores.append(scores) - # after merging, bboxes will be rescaled to the original image size - merged_bboxes, merged_scores = merge_aug_bboxes( - aug_bboxes, aug_scores, img_metas, rcnn_test_cfg) - if merged_bboxes.shape[0] == 0: - # There is no proposal in the single image - det_bboxes = merged_bboxes.new_zeros(0, 5) - det_labels = merged_bboxes.new_zeros((0, ), dtype=torch.long) - else: - det_bboxes, det_labels = multiclass_nms(merged_bboxes, - merged_scores, - rcnn_test_cfg.score_thr, - rcnn_test_cfg.nms, - rcnn_test_cfg.max_per_img) - return det_bboxes, det_labels - - -class MaskTestMixin: - - if sys.version_info >= (3, 7): - # TODO: Currently not supported - async def async_test_mask(self, - x, - img_metas, - det_bboxes, - det_labels, - rescale=False, - mask_test_cfg=None): - """Asynchronized test for mask head without augmentation.""" - # image shape of the first image in the batch (only one) - ori_shape = img_metas[0]['ori_shape'] - scale_factor = img_metas[0]['scale_factor'] - if det_bboxes.shape[0] == 0: - segm_result = [[] for _ in range(self.mask_head.num_classes)] - else: - if rescale and not isinstance(scale_factor, - (float, torch.Tensor)): - scale_factor = det_bboxes.new_tensor(scale_factor) - _bboxes = ( - det_bboxes[:, :4] * - scale_factor if rescale else det_bboxes) - mask_rois = bbox2roi([_bboxes]) - mask_feats = self.mask_roi_extractor( - x[:len(self.mask_roi_extractor.featmap_strides)], - mask_rois) - - if self.with_shared_head: - mask_feats = self.shared_head(mask_feats) - if mask_test_cfg and \ - mask_test_cfg.get('async_sleep_interval'): - sleep_interval = mask_test_cfg['async_sleep_interval'] - else: - sleep_interval = 0.035 - async with completed( - __name__, - 'mask_head_forward', - sleep_interval=sleep_interval): - mask_pred = self.mask_head(mask_feats) - segm_result = self.mask_head.get_results( - mask_pred, _bboxes, det_labels, self.test_cfg, ori_shape, - scale_factor, rescale) - return segm_result - - # TODO: Currently not supported - def aug_test_mask(self, feats, img_metas, det_bboxes, det_labels): - """Test for mask head with test time augmentation.""" - if det_bboxes.shape[0] == 0: - segm_result = [[] for _ in range(self.mask_head.num_classes)] - else: - aug_masks = [] - for x, img_meta in zip(feats, img_metas): - img_shape = img_meta[0]['img_shape'] - scale_factor = img_meta[0]['scale_factor'] - flip = img_meta[0]['flip'] - flip_direction = img_meta[0]['flip_direction'] - _bboxes = bbox_mapping(det_bboxes[:, :4], img_shape, - scale_factor, flip, flip_direction) - mask_rois = bbox2roi([_bboxes]) - mask_results = self._mask_forward(x, mask_rois) - # convert to numpy array to save memory - aug_masks.append( - mask_results['mask_pred'].sigmoid().cpu().numpy()) - merged_masks = merge_aug_masks(aug_masks, img_metas, self.test_cfg) - - ori_shape = img_metas[0][0]['ori_shape'] - scale_factor = det_bboxes.new_ones(4) - segm_result = self.mask_head.get_results( - merged_masks, - det_bboxes, - det_labels, - self.test_cfg, - ori_shape, - scale_factor=scale_factor, - rescale=False) - return segm_result diff --git a/spaces/KyanChen/RSPrompter/mmdet/utils/profiling.py b/spaces/KyanChen/RSPrompter/mmdet/utils/profiling.py deleted file mode 100644 index 2f53f456c72db57bfa69a8d022c92d153580209e..0000000000000000000000000000000000000000 --- a/spaces/KyanChen/RSPrompter/mmdet/utils/profiling.py +++ /dev/null @@ -1,40 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import contextlib -import sys -import time - -import torch - -if sys.version_info >= (3, 7): - - @contextlib.contextmanager - def profile_time(trace_name, - name, - enabled=True, - stream=None, - end_stream=None): - """Print time spent by CPU and GPU. - - Useful as a temporary context manager to find sweet spots of code - suitable for async implementation. - """ - if (not enabled) or not torch.cuda.is_available(): - yield - return - stream = stream if stream else torch.cuda.current_stream() - end_stream = end_stream if end_stream else stream - start = torch.cuda.Event(enable_timing=True) - end = torch.cuda.Event(enable_timing=True) - stream.record_event(start) - try: - cpu_start = time.monotonic() - yield - finally: - cpu_end = time.monotonic() - end_stream.record_event(end) - end.synchronize() - cpu_time = (cpu_end - cpu_start) * 1000 - gpu_time = start.elapsed_time(end) - msg = f'{trace_name} {name} cpu_time {cpu_time:.2f} ms ' - msg += f'gpu_time {gpu_time:.2f} ms stream {stream}' - print(msg, end_stream) diff --git a/spaces/LZRi/LZR-Bert-VITS2/text/chinese.py b/spaces/LZRi/LZR-Bert-VITS2/text/chinese.py deleted file mode 100644 index 276753880b73de2e8889dcb2101cd98c09e0710b..0000000000000000000000000000000000000000 --- a/spaces/LZRi/LZR-Bert-VITS2/text/chinese.py +++ /dev/null @@ -1,193 +0,0 @@ -import os -import re - -import cn2an -from pypinyin import lazy_pinyin, Style - -from text import symbols -from text.symbols import punctuation -from text.tone_sandhi import ToneSandhi - -current_file_path = os.path.dirname(__file__) -pinyin_to_symbol_map = {line.split("\t")[0]: line.strip().split("\t")[1] for line in - open(os.path.join(current_file_path, 'opencpop-strict.txt')).readlines()} - -import jieba.posseg as psg - - -rep_map = { - ':': ',', - ';': ',', - ',': ',', - '。': '.', - '!': '!', - '?': '?', - '\n': '.', - "·": ",", - '、': ",", - '...': '…', - '$': '.', - '“': "'", - '”': "'", - '‘': "'", - '’': "'", - '(': "'", - ')': "'", - '(': "'", - ')': "'", - '《': "'", - '》': "'", - '【': "'", - '】': "'", - '[': "'", - ']': "'", - '—': "-", - '~': "-", - '~': "-", - '「': "'", - '」': "'", - -} - -tone_modifier = ToneSandhi() - -def replace_punctuation(text): - text = text.replace("嗯", "恩").replace("呣","母") - pattern = re.compile('|'.join(re.escape(p) for p in rep_map.keys())) - - replaced_text = pattern.sub(lambda x: rep_map[x.group()], text) - - replaced_text = re.sub(r'[^\u4e00-\u9fa5'+"".join(punctuation)+r']+', '', replaced_text) - - return replaced_text - -def g2p(text): - pattern = r'(?<=[{0}])\s*'.format(''.join(punctuation)) - sentences = [i for i in re.split(pattern, text) if i.strip()!=''] - phones, tones, word2ph = _g2p(sentences) - assert sum(word2ph) == len(phones) - assert len(word2ph) == len(text) #Sometimes it will crash,you can add a try-catch. - phones = ['_'] + phones + ["_"] - tones = [0] + tones + [0] - word2ph = [1] + word2ph + [1] - return phones, tones, word2ph - - -def _get_initials_finals(word): - initials = [] - finals = [] - orig_initials = lazy_pinyin( - word, neutral_tone_with_five=True, style=Style.INITIALS) - orig_finals = lazy_pinyin( - word, neutral_tone_with_five=True, style=Style.FINALS_TONE3) - for c, v in zip(orig_initials, orig_finals): - initials.append(c) - finals.append(v) - return initials, finals - - -def _g2p(segments): - phones_list = [] - tones_list = [] - word2ph = [] - for seg in segments: - pinyins = [] - # Replace all English words in the sentence - seg = re.sub('[a-zA-Z]+', '', seg) - seg_cut = psg.lcut(seg) - initials = [] - finals = [] - seg_cut = tone_modifier.pre_merge_for_modify(seg_cut) - for word, pos in seg_cut: - if pos == 'eng': - continue - sub_initials, sub_finals = _get_initials_finals(word) - sub_finals = tone_modifier.modified_tone(word, pos, - sub_finals) - initials.append(sub_initials) - finals.append(sub_finals) - - # assert len(sub_initials) == len(sub_finals) == len(word) - initials = sum(initials, []) - finals = sum(finals, []) - # - for c, v in zip(initials, finals): - raw_pinyin = c+v - # NOTE: post process for pypinyin outputs - # we discriminate i, ii and iii - if c == v: - assert c in punctuation - phone = [c] - tone = '0' - word2ph.append(1) - else: - v_without_tone = v[:-1] - tone = v[-1] - - pinyin = c+v_without_tone - assert tone in '12345' - - if c: - # 多音节 - v_rep_map = { - "uei": 'ui', - 'iou': 'iu', - 'uen': 'un', - } - if v_without_tone in v_rep_map.keys(): - pinyin = c+v_rep_map[v_without_tone] - else: - # 单音节 - pinyin_rep_map = { - 'ing': 'ying', - 'i': 'yi', - 'in': 'yin', - 'u': 'wu', - } - if pinyin in pinyin_rep_map.keys(): - pinyin = pinyin_rep_map[pinyin] - else: - single_rep_map = { - 'v': 'yu', - 'e': 'e', - 'i': 'y', - 'u': 'w', - } - if pinyin[0] in single_rep_map.keys(): - pinyin = single_rep_map[pinyin[0]]+pinyin[1:] - - assert pinyin in pinyin_to_symbol_map.keys(), (pinyin, seg, raw_pinyin) - phone = pinyin_to_symbol_map[pinyin].split(' ') - word2ph.append(len(phone)) - - phones_list += phone - tones_list += [int(tone)] * len(phone) - return phones_list, tones_list, word2ph - - - -def text_normalize(text): - numbers = re.findall(r'\d+(?:\.?\d+)?', text) - for number in numbers: - text = text.replace(number, cn2an.an2cn(number), 1) - text = replace_punctuation(text) - return text - -def get_bert_feature(text, word2ph): - from text import chinese_bert - return chinese_bert.get_bert_feature(text, word2ph) - -if __name__ == '__main__': - from text.chinese_bert import get_bert_feature - text = "啊!但是《原神》是由,米哈\游自主, [研发]的一款全.新开放世界.冒险游戏" - text = text_normalize(text) - print(text) - phones, tones, word2ph = g2p(text) - bert = get_bert_feature(text, word2ph) - - print(phones, tones, word2ph, bert.shape) - - -# # 示例用法 -# text = "这是一个示例文本:,你好!这是一个测试...." -# print(g2p_paddle(text)) # 输出: 这是一个示例文本你好这是一个测试 diff --git a/spaces/Lbin123/Lbingo/tests/parse.ts b/spaces/Lbin123/Lbingo/tests/parse.ts deleted file mode 100644 index 92940fe6315f1d7cb2b267ba5e5a7e26460a1de3..0000000000000000000000000000000000000000 --- a/spaces/Lbin123/Lbingo/tests/parse.ts +++ /dev/null @@ -1,13 +0,0 @@ -import { promises as fs } from 'fs' -import { join } from 'path' -import { parseHeadersFromCurl } from '@/lib/utils' - -(async () => { - const content = await fs.readFile(join(__dirname, './fixtures/curl.txt'), 'utf-8') - const headers = parseHeadersFromCurl(content) - console.log(headers) - - const cmdContent = await fs.readFile(join(__dirname, './fixtures/cmd.txt'), 'utf-8') - const cmdHeaders = parseHeadersFromCurl(cmdContent) - console.log(cmdHeaders) -})() diff --git a/spaces/LeeHotmen/webui-docker/Dockerfile b/spaces/LeeHotmen/webui-docker/Dockerfile deleted file mode 100644 index 9392835533090607c53b836d20e9b933fbc6a206..0000000000000000000000000000000000000000 --- a/spaces/LeeHotmen/webui-docker/Dockerfile +++ /dev/null @@ -1,46 +0,0 @@ -# Dockerfile Private A10G - -# https://gitlab.com/nvidia/container-images/cuda/-/blob/master/dist/11.7.1/ubuntu2204/devel/cudnn8/Dockerfile -FROM nvidia/cuda:11.7.1-cudnn8-devel-ubuntu22.04 -ENV DEBIAN_FRONTEND noninteractive - -WORKDIR /content - -RUN apt-get update -y && apt-get upgrade -y && apt-get install -y libgl1 libglib2.0-0 wget git git-lfs python3-pip python-is-python3 && pip3 install --upgrade pip -RUN pip install https://github.com/camenduru/stable-diffusion-webui-colab/releases/download/0.0.16/xformers-0.0.16+814314d.d20230119.A10G-cp310-cp310-linux_x86_64.whl -RUN pip install --pre triton -RUN pip install numexpr - -RUN git clone -b v1.6 https://github.com/camenduru/stable-diffusion-webui -RUN sed -i -e '''/prepare_environment()/a\ os.system\(f\"""sed -i -e ''\"s/dict()))/dict())).cuda()/g\"'' /content/stable-diffusion-webui/repositories/stable-diffusion-stability-ai/ldm/util.py""")''' /content/stable-diffusion-webui/launch.py -RUN sed -i -e 's/ start()/ #start()/g' /content/stable-diffusion-webui/launch.py -RUN cd stable-diffusion-webui && python launch.py --skip-torch-cuda-test - -# ----------------------------Delete this block if you don't want to see the extra header---------------------------- -ADD https://github.com/camenduru/webui-docker/raw/main/env_patch.py /content/env_patch.py -RUN sed -i -e '/import image_from_url_text/r /content/env_patch.py' /content/stable-diffusion-webui/modules/ui.py -ADD https://github.com/camenduru/webui-docker/raw/main/header_patch.py /content/header_patch.py -RUN sed -i -e '/demo:/r /content/header_patch.py' /content/stable-diffusion-webui/modules/ui.py -# ------------------------------------------------------------------------------------------------------------------- - -ADD https://raw.githubusercontent.com/camenduru/stable-diffusion-webui-scripts/main/run_n_times.py /content/stable-diffusion-webui/scripts/run_n_times.py -RUN git clone https://github.com/camenduru/deforum-for-automatic1111-webui /content/stable-diffusion-webui/extensions/deforum-for-automatic1111-webui -RUN git clone https://github.com/yfszzx/stable-diffusion-webui-images-browser /content/stable-diffusion-webui/extensions/stable-diffusion-webui-images-browser -RUN git clone https://github.com/camenduru/stable-diffusion-webui-huggingface /content/stable-diffusion-webui/extensions/stable-diffusion-webui-huggingface -RUN git clone https://github.com/camenduru/sd-civitai-browser /content/stable-diffusion-webui/extensions/sd-civitai-browser -RUN git clone https://github.com/camenduru/sd-webui-additional-networks /content/stable-diffusion-webui/extensions/sd-webui-additional-networks - -COPY config.json /content/config.json -COPY ui-config.json /content/ui-config.json - -ADD https://huggingface.co/andite/anything-v4.0/resolve/main/anything-v4.5-pruned.ckpt /content/stable-diffusion-webui/models/Stable-diffusion/anything-v4.5-pruned.ckpt -ADD https://huggingface.co/andite/anything-v4.0/resolve/main/anything-v4.0.vae.pt /content/stable-diffusion-webui/models/Stable-diffusion/anything-v4.5-pruned.vae.pt - -RUN adduser --disabled-password --gecos '' user -RUN chown -R user:user /content -RUN chmod -R 777 /content -USER user - -EXPOSE 7860 - -CMD cd /content/stable-diffusion-webui && python webui.py --xformers --listen --disable-console-progressbars --ui-config-file /content/ui-config.json --ui-settings-file /content/config.json \ No newline at end of file diff --git a/spaces/Lianjd/stock_dashboard/backtrader/functions.py b/spaces/Lianjd/stock_dashboard/backtrader/functions.py deleted file mode 100644 index cd3884999da6651f4c1523c15f04b3d94650e561..0000000000000000000000000000000000000000 --- a/spaces/Lianjd/stock_dashboard/backtrader/functions.py +++ /dev/null @@ -1,288 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8; py-indent-offset:4 -*- -############################################################################### -# -# Copyright (C) 2015-2020 Daniel Rodriguez -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . -# -############################################################################### -from __future__ import (absolute_import, division, print_function, - unicode_literals) - -import functools -import math - -from .linebuffer import LineActions -from .utils.py3 import cmp, range - - -# Generate a List equivalent which uses "is" for contains -class List(list): - def __contains__(self, other): - return any(x.__hash__() == other.__hash__() for x in self) - - -class Logic(LineActions): - def __init__(self, *args): - super(Logic, self).__init__() - self.args = [self.arrayize(arg) for arg in args] - - -class DivByZero(Logic): - '''This operation is a Lines object and fills it values by executing a - division on the numerator / denominator arguments and avoiding a division - by zero exception by checking the denominator - - Params: - - a: numerator (numeric or iterable object ... mostly a Lines object) - - b: denominator (numeric or iterable object ... mostly a Lines object) - - zero (def: 0.0): value to apply if division by zero would be raised - - ''' - def __init__(self, a, b, zero=0.0): - super(DivByZero, self).__init__(a, b) - self.a = a - self.b = b - self.zero = zero - - def next(self): - b = self.b[0] - self[0] = self.a[0] / b if b else self.zero - - def once(self, start, end): - # cache python dictionary lookups - dst = self.array - srca = self.a.array - srcb = self.b.array - zero = self.zero - - for i in range(start, end): - b = srcb[i] - dst[i] = srca[i] / b if b else zero - - -class DivZeroByZero(Logic): - '''This operation is a Lines object and fills it values by executing a - division on the numerator / denominator arguments and avoiding a division - by zero exception or an indetermination by checking the - denominator/numerator pair - - Params: - - a: numerator (numeric or iterable object ... mostly a Lines object) - - b: denominator (numeric or iterable object ... mostly a Lines object) - - single (def: +inf): value to apply if division is x / 0 - - dual (def: 0.0): value to apply if division is 0 / 0 - ''' - def __init__(self, a, b, single=float('inf'), dual=0.0): - super(DivZeroByZero, self).__init__(a, b) - self.a = a - self.b = b - self.single = single - self.dual = dual - - def next(self): - b = self.b[0] - a = self.a[0] - if b == 0.0: - self[0] = self.dual if a == 0.0 else self.single - else: - self[0] = self.a[0] / b - - def once(self, start, end): - # cache python dictionary lookups - dst = self.array - srca = self.a.array - srcb = self.b.array - single = self.single - dual = self.dual - - for i in range(start, end): - b = srcb[i] - a = srca[i] - if b == 0.0: - dst[i] = dual if a == 0.0 else single - else: - dst[i] = a / b - - -class Cmp(Logic): - def __init__(self, a, b): - super(Cmp, self).__init__(a, b) - self.a = self.args[0] - self.b = self.args[1] - - def next(self): - self[0] = cmp(self.a[0], self.b[0]) - - def once(self, start, end): - # cache python dictionary lookups - dst = self.array - srca = self.a.array - srcb = self.b.array - - for i in range(start, end): - dst[i] = cmp(srca[i], srcb[i]) - - -class CmpEx(Logic): - def __init__(self, a, b, r1, r2, r3): - super(CmpEx, self).__init__(a, b, r1, r2, r3) - self.a = self.args[0] - self.b = self.args[1] - self.r1 = self.args[2] - self.r2 = self.args[3] - self.r3 = self.args[4] - - def next(self): - self[0] = cmp(self.a[0], self.b[0]) - - def once(self, start, end): - # cache python dictionary lookups - dst = self.array - srca = self.a.array - srcb = self.b.array - r1 = self.r1.array - r2 = self.r2.array - r3 = self.r3.array - - for i in range(start, end): - ai = srca[i] - bi = srcb[i] - - if ai < bi: - dst[i] = r1[i] - elif ai > bi: - dst[i] = r3[i] - else: - dst[i] = r2[i] - - -class If(Logic): - def __init__(self, cond, a, b): - super(If, self).__init__(a, b) - self.a = self.args[0] - self.b = self.args[1] - self.cond = self.arrayize(cond) - - def next(self): - self[0] = self.a[0] if self.cond[0] else self.b[0] - - def once(self, start, end): - # cache python dictionary lookups - dst = self.array - srca = self.a.array - srcb = self.b.array - cond = self.cond.array - - for i in range(start, end): - dst[i] = srca[i] if cond[i] else srcb[i] - - -class MultiLogic(Logic): - def next(self): - self[0] = self.flogic([arg[0] for arg in self.args]) - - def once(self, start, end): - # cache python dictionary lookups - dst = self.array - arrays = [arg.array for arg in self.args] - flogic = self.flogic - - for i in range(start, end): - dst[i] = flogic([arr[i] for arr in arrays]) - -class SingleLogic(Logic): - def next(self): - self[0] = self.flogic(self.args[0]) - - def once(self, start, end): - # cache python dictionary lookups - dst = self.array - flogic = self.flogic - - for i in range(start, end): - dst[i] = flogic(self.args[0].array[i]) - - -class MultiLogicReduce(MultiLogic): - def __init__(self, *args, **kwargs): - super(MultiLogicReduce, self).__init__(*args) - if 'initializer' not in kwargs: - self.flogic = functools.partial(functools.reduce, self.flogic) - else: - self.flogic = functools.partial(functools.reduce, self.flogic, - initializer=kwargs['initializer']) - - -class Reduce(MultiLogicReduce): - def __init__(self, flogic, *args, **kwargs): - self.flogic = flogic - super(Reduce, self).__init__(*args, **kwargs) - - - - -# The _xxxlogic functions are defined at module scope to make them -# pickable and therefore compatible with multiprocessing -def _andlogic(x, y): - return bool(x and y) - - -class And(MultiLogicReduce): - flogic = staticmethod(_andlogic) - - -def _orlogic(x, y): - return bool(x or y) - - -class Or(MultiLogicReduce): - flogic = staticmethod(_orlogic) - - -class Max(MultiLogic): - flogic = max - - -class Min(MultiLogic): - flogic = min - - -class Sum(MultiLogic): - flogic = math.fsum - - -class Any(MultiLogic): - flogic = any - - -class All(MultiLogic): - flogic = all - - -class Log(SingleLogic): - flogic = math.log10 - - -class Ceiling(SingleLogic): - flogic = math.ceil - - -class Floor(SingleLogic): - flogic = math.floor - - -class Abs(SingleLogic): - flogic = math.fabs \ No newline at end of file diff --git a/spaces/Lianjd/stock_dashboard/backtrader/strategies/__init__.py b/spaces/Lianjd/stock_dashboard/backtrader/strategies/__init__.py deleted file mode 100644 index ec7c205ad7afc53aa464f72144d96a9e0e89f667..0000000000000000000000000000000000000000 --- a/spaces/Lianjd/stock_dashboard/backtrader/strategies/__init__.py +++ /dev/null @@ -1,24 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8; py-indent-offset:4 -*- -############################################################################### -# -# Copyright (C) 2015-2020 Daniel Rodriguez -# -# This program is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# This program is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with this program. If not, see . -# -############################################################################### -from __future__ import (absolute_import, division, print_function, - unicode_literals) - -from .sma_crossover import * diff --git a/spaces/Liu-LAB/GPT-academic/request_llm/README.md b/spaces/Liu-LAB/GPT-academic/request_llm/README.md deleted file mode 100644 index 545bc1ffba8b79a49d994cfedcc2a787475181b2..0000000000000000000000000000000000000000 --- a/spaces/Liu-LAB/GPT-academic/request_llm/README.md +++ /dev/null @@ -1,79 +0,0 @@ -# 如何使用其他大语言模型 - -## ChatGLM - -- 安装依赖 `pip install -r request_llm/requirements_chatglm.txt` -- 修改配置,在config.py中将LLM_MODEL的值改为"chatglm" - -``` sh -LLM_MODEL = "chatglm" -``` -- 运行! -``` sh -`python main.py` -``` - -## Claude-Stack - -- 请参考此教程获取 https://zhuanlan.zhihu.com/p/627485689 - - 1、SLACK_CLAUDE_BOT_ID - - 2、SLACK_CLAUDE_USER_TOKEN - -- 把token加入config.py - -## Newbing - -- 使用cookie editor获取cookie(json) -- 把cookie(json)加入config.py (NEWBING_COOKIES) - -## Moss -- 使用docker-compose - -## RWKV -- 使用docker-compose - -## LLAMA -- 使用docker-compose - -## 盘古 -- 使用docker-compose - - ---- -## Text-Generation-UI (TGUI,调试中,暂不可用) - -### 1. 部署TGUI -``` sh -# 1 下载模型 -git clone https://github.com/oobabooga/text-generation-webui.git -# 2 这个仓库的最新代码有问题,回滚到几周之前 -git reset --hard fcda3f87767e642d1c0411776e549e1d3894843d -# 3 切换路径 -cd text-generation-webui -# 4 安装text-generation的额外依赖 -pip install accelerate bitsandbytes flexgen gradio llamacpp markdown numpy peft requests rwkv safetensors sentencepiece tqdm datasets git+https://github.com/huggingface/transformers -# 5 下载模型 -python download-model.py facebook/galactica-1.3b -# 其他可选如 facebook/opt-1.3b -# facebook/galactica-1.3b -# facebook/galactica-6.7b -# facebook/galactica-120b -# facebook/pygmalion-1.3b 等 -# 详情见 https://github.com/oobabooga/text-generation-webui - -# 6 启动text-generation -python server.py --cpu --listen --listen-port 7865 --model facebook_galactica-1.3b -``` - -### 2. 修改config.py - -``` sh -# LLM_MODEL格式: tgui:[模型]@[ws地址]:[ws端口] , 端口要和上面给定的端口一致 -LLM_MODEL = "tgui:galactica-1.3b@localhost:7860" -``` - -### 3. 运行! -``` sh -cd chatgpt-academic -python main.py -``` diff --git a/spaces/Mellow-ai/PhotoAI_Mellow/ldm/models/diffusion/__init__.py b/spaces/Mellow-ai/PhotoAI_Mellow/ldm/models/diffusion/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/Mountchicken/MAERec-Gradio/configs/textrecog/sar/sar_resnet31_parallel-decoder_5e_toy.py b/spaces/Mountchicken/MAERec-Gradio/configs/textrecog/sar/sar_resnet31_parallel-decoder_5e_toy.py deleted file mode 100644 index 351f079d1cb42542a334ed8e574643286698795b..0000000000000000000000000000000000000000 --- a/spaces/Mountchicken/MAERec-Gradio/configs/textrecog/sar/sar_resnet31_parallel-decoder_5e_toy.py +++ /dev/null @@ -1,36 +0,0 @@ -_base_ = [ - '../_base_/datasets/toy_data.py', - '../_base_/default_runtime.py', - '../_base_/schedules/schedule_adam_step_5e.py', - '_base_sar_resnet31_parallel-decoder.py', -] - -# dataset settings -train_list = [_base_.toy_rec_train] -test_list = [_base_.toy_rec_test] -default_hooks = dict(logger=dict(type='LoggerHook', interval=1)) - -train_dataloader = dict( - batch_size=1, - num_workers=4, - persistent_workers=True, - sampler=dict(type='DefaultSampler', shuffle=True), - dataset=dict( - type='ConcatDataset', - datasets=train_list, - pipeline=_base_.train_pipeline)) - -val_dataloader = dict( - batch_size=1, - num_workers=4, - persistent_workers=True, - drop_last=False, - sampler=dict(type='DefaultSampler', shuffle=False), - dataset=dict( - type='ConcatDataset', - datasets=test_list, - pipeline=_base_.test_pipeline)) -test_dataloader = val_dataloader - -val_evaluator = dict(dataset_prefixes=['Toy']) -test_evaluator = val_evaluator diff --git a/spaces/NATSpeech/DiffSpeech/mfa_usr/install_mfa.sh b/spaces/NATSpeech/DiffSpeech/mfa_usr/install_mfa.sh deleted file mode 100644 index c694cf307b60cd96c254bc2089f0745d9dd602c2..0000000000000000000000000000000000000000 --- a/spaces/NATSpeech/DiffSpeech/mfa_usr/install_mfa.sh +++ /dev/null @@ -1,6 +0,0 @@ -#!/bin/bash -set -e -pip uninstall -y typing -pip install --ignore-requires-python git+https://github.com/MontrealCorpusTools/Montreal-Forced-Aligner.git@v2.0.0b3 -mfa thirdparty download -sudo apt install -y libopenblas-base libsox-fmt-mp3 libfst8 libfst-tools \ No newline at end of file diff --git a/spaces/NN520/AI/src/components/ui/codeblock.tsx b/spaces/NN520/AI/src/components/ui/codeblock.tsx deleted file mode 100644 index aabda4e3b59f4e36b6ab79feb19d8d18b70e881b..0000000000000000000000000000000000000000 --- a/spaces/NN520/AI/src/components/ui/codeblock.tsx +++ /dev/null @@ -1,142 +0,0 @@ -'use client' - -import { FC, memo } from 'react' -import { Prism as SyntaxHighlighter } from 'react-syntax-highlighter' -import { coldarkDark } from 'react-syntax-highlighter/dist/cjs/styles/prism' - -import { useCopyToClipboard } from '@/lib/hooks/use-copy-to-clipboard' -import { IconCheck, IconCopy, IconDownload } from '@/components/ui/icons' -import { Button } from '@/components/ui/button' - -interface Props { - language: string - value: string -} - -interface languageMap { - [key: string]: string | undefined -} - -export const programmingLanguages: languageMap = { - javascript: '.js', - python: '.py', - java: '.java', - c: '.c', - cpp: '.cpp', - 'c++': '.cpp', - 'c#': '.cs', - ruby: '.rb', - php: '.php', - swift: '.swift', - 'objective-c': '.m', - kotlin: '.kt', - typescript: '.ts', - go: '.go', - perl: '.pl', - rust: '.rs', - scala: '.scala', - haskell: '.hs', - lua: '.lua', - shell: '.sh', - sql: '.sql', - html: '.html', - css: '.css' - // add more file extensions here, make sure the key is same as language prop in CodeBlock.tsx component -} - -export const generateRandomString = (length: number, lowercase = false) => { - const chars = 'ABCDEFGHJKLMNPQRSTUVWXY3456789' // excluding similar looking characters like Z, 2, I, 1, O, 0 - let result = '' - for (let i = 0; i < length; i++) { - result += chars.charAt(Math.floor(Math.random() * chars.length)) - } - return lowercase ? result.toLowerCase() : result -} - -const CodeBlock: FC = memo(({ language, value }) => { - const { isCopied, copyToClipboard } = useCopyToClipboard({ timeout: 2000 }) - - const downloadAsFile = () => { - if (typeof window === 'undefined') { - return - } - const fileExtension = programmingLanguages[language] || '.file' - const suggestedFileName = `file-${generateRandomString( - 3, - true - )}${fileExtension}` - const fileName = window.prompt('Enter file name' || '', suggestedFileName) - - if (!fileName) { - // User pressed cancel on prompt. - return - } - - const blob = new Blob([value], { type: 'text/plain' }) - const url = URL.createObjectURL(blob) - const link = document.createElement('a') - link.download = fileName - link.href = url - link.style.display = 'none' - document.body.appendChild(link) - link.click() - document.body.removeChild(link) - URL.revokeObjectURL(url) - } - - const onCopy = () => { - if (isCopied) return - copyToClipboard(value) - } - - return ( -
    -
    - {language} -
    - - -
    -
    - - {value} - -
    - ) -}) -CodeBlock.displayName = 'CodeBlock' - -export { CodeBlock } diff --git a/spaces/Nahidabyer/img-to-music/utils.py b/spaces/Nahidabyer/img-to-music/utils.py deleted file mode 100644 index e4d5448735f516afa03c8a99be64fa5a2915706c..0000000000000000000000000000000000000000 --- a/spaces/Nahidabyer/img-to-music/utils.py +++ /dev/null @@ -1,36 +0,0 @@ -import json -import numpy as np -import httpx -import os - -from constants import MUBERT_TAGS, MUBERT_MODE, MUBERT_LICENSE - -def get_mubert_tags_embeddings(w2v_model): - return w2v_model.encode(MUBERT_TAGS) - - - - - -def find_similar(em, embeddings, method='cosine'): - scores = [] - for ref in embeddings: - if method == 'cosine': - scores.append(1 - np.dot(ref, em) / (np.linalg.norm(ref) * np.linalg.norm(em))) - if method == 'norm': - scores.append(np.linalg.norm(ref - em)) - return np.array(scores), np.argsort(scores) - - -def get_tags_for_prompts(w2v_model, mubert_tags_embeddings, prompts, top_n=3, debug=False): - prompts_embeddings = w2v_model.encode(prompts) - ret = [] - for i, pe in enumerate(prompts_embeddings): - scores, idxs = find_similar(pe, mubert_tags_embeddings) - top_tags = MUBERT_TAGS[idxs[:top_n]] - top_prob = 1 - scores[idxs[:top_n]] - if debug: - print(f"Prompt: {prompts[i]}\nTags: {', '.join(top_tags)}\nScores: {top_prob}\n\n\n") - ret.append((prompts[i], list(top_tags))) - print("ret: " + ret) - return ret \ No newline at end of file diff --git a/spaces/Nomanalvi/PDF_Convertor/pdfconv.py b/spaces/Nomanalvi/PDF_Convertor/pdfconv.py deleted file mode 100644 index f0557e698ef622e0ea062f41cf2df2b7702770cf..0000000000000000000000000000000000000000 --- a/spaces/Nomanalvi/PDF_Convertor/pdfconv.py +++ /dev/null @@ -1,108 +0,0 @@ -import streamlit as st # data app development -import subprocess # process in the os -from subprocess import STDOUT, check_call # os process manipuation -import os # os process manipuation -import base64 # byte object into a pdf file -import camelot as cam # extracting tables from PDFs -import pandas as pd -from io import BytesIO -import ctypes -from ctypes.util import find_library -from pyxlsb import open_workbook as open_xlsb -find_library("".join(("gsdll", str(ctypes.sizeof(ctypes.c_voidp) * 8), ".dll"))) -# -# to run this only once and it's cached -@st.cache -def gh(): - """install ghostscript on the linux machine""" - proc = subprocess.Popen('apt-get install -y ghostscript', shell=True, stdin=None, stdout=open(os.devnull, "wb"), - stderr=STDOUT, executable="/bin/bash") - proc.wait() - - -gh() - -st.title("PDF Table Extractor") -st.subheader("for `Vara` Research GmbH") - -st.image("https://www.vararesearch.de/wp-content/uploads/2020/03/vara-research-konsensus-management-consensus.png", width=200) - -# file uploader on streamlit - -input_pdf = st.file_uploader(label="upload your pdf here", type='pdf') - -# Display only when a PDF is uploaded - -#if input_pdf is not None: -def show_pdf(file_path): - with open(file_path,"rb") as f: - base64_pdf = base64.b64encode(f.read()).decode('utf-8') - pdf_display = f'' - st.markdown(pdf_display, unsafe_allow_html=True) - - - # run this only when a PDF is uploaded -if input_pdf is not None: - # byte object into a PDF file - with open("input.pdf", "wb") as f: - base64_pdf = base64.b64encode(input_pdf.read()).decode('utf-8') - - f.write(base64.b64decode(base64_pdf)) - f.close() - show_pdf('input.pdf') - - st.markdown("### Page Number") - -#page_number = st.text_input("Enter the page # from where you want to extract the PDF eg: 3", value=1) - page_number = st.text_input("Enter the page # from where you want to extract the PDF eg: 3", value=2) - - - # read the pdf and parse it using stream - table = cam.read_pdf("input.pdf", pages=page_number, multiple_tables=True ,flavor ='stream', split_text=True, edge_tol=200) #,, edge_tol=50 , flavor ='stream',table_areas=['10, 740, 580, 10'] - - st.markdown("### Number of Tables") - - # display the output after parsing - st.write(table) - - # display the table - - - if len(table) > 0: - # extract the index value of the table. - - option = st.selectbox(label="Select the Table to be displayed", options=range(len(table) + 1)) - - st.markdown('### Output Table') - - # Function that Covert the data into Excel - def to_excel(df): - output = BytesIO() - writer = pd.ExcelWriter(output, engine='xlsxwriter') - df.to_excel(writer, index=False ,sheet_name='Sheet1') - workbook = writer.book - worksheet = writer.sheets['Sheet1'] - format1 = workbook.add_format({'num_format': '0.00'}) - worksheet.set_column('A:A', None, format1) - writer.save() - processed_data = output.getvalue() - return processed_data - #print(option) - - - - # display the dataframe - - op_df = table[int(option) - 1].df - st.dataframe(op_df) - df_xlsx= to_excel(op_df) - - - st.download_button("📁 Download csv File ⬇️", - op_df.to_csv(index= False), - file_name='Output_Table.csv', - mime = 'text/csv') - st.download_button(label='📥 Download Excel File ⬇️', - data=df_xlsx , - file_name= 'Output_Table.xlsx') - \ No newline at end of file diff --git a/spaces/Noobian/SplunkGPT/README.md b/spaces/Noobian/SplunkGPT/README.md deleted file mode 100644 index 655eaac22fd2c8ee5efa450da5dd4a5a1cd52b4e..0000000000000000000000000000000000000000 --- a/spaces/Noobian/SplunkGPT/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: SplunkGPT -emoji: 💻 -colorFrom: pink -colorTo: red -sdk: gradio -sdk_version: 3.36.1 -app_file: app.py -pinned: false -license: creativeml-openrail-m ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/examples/textless_nlp/gslm/speech2unit/clustering/__init__.py b/spaces/OFA-Sys/OFA-Generic_Interface/fairseq/examples/textless_nlp/gslm/speech2unit/clustering/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/Olivier-Truong/faster-whisper-webui-v2/src/source.py b/spaces/Olivier-Truong/faster-whisper-webui-v2/src/source.py deleted file mode 100644 index e304e278bfae8ef289c999fc76311ce01b547991..0000000000000000000000000000000000000000 --- a/spaces/Olivier-Truong/faster-whisper-webui-v2/src/source.py +++ /dev/null @@ -1,80 +0,0 @@ -# Gradio seems to truncate files without keeping the extension, so we need to truncate the file prefix ourself -import os -import pathlib -from typing import List -import zipfile - -import ffmpeg -from more_itertools import unzip - -from src.download import ExceededMaximumDuration, download_url - -MAX_FILE_PREFIX_LENGTH = 17 - -class AudioSource: - def __init__(self, source_path, source_name = None, audio_duration = None): - self.source_path = source_path - self.source_name = source_name - self._audio_duration = audio_duration - - # Load source name if not provided - if (self.source_name is None): - file_path = pathlib.Path(self.source_path) - self.source_name = file_path.name - - def get_audio_duration(self): - if self._audio_duration is None: - self._audio_duration = float(ffmpeg.probe(self.source_path)["format"]["duration"]) - - return self._audio_duration - - def get_full_name(self): - return self.source_name - - def get_short_name(self, max_length: int = MAX_FILE_PREFIX_LENGTH): - file_path = pathlib.Path(self.source_name) - short_name = file_path.stem[:max_length] + file_path.suffix - - return short_name - - def __str__(self) -> str: - return self.source_path - -class AudioSourceCollection: - def __init__(self, sources: List[AudioSource]): - self.sources = sources - - def __iter__(self): - return iter(self.sources) - -def get_audio_source_collection(urlData: str, multipleFiles: List, microphoneData: str, input_audio_max_duration: float = -1) -> List[AudioSource]: - output: List[AudioSource] = [] - - if urlData: - # Download from YouTube. This could also be a playlist or a channel. - output.extend([ AudioSource(x) for x in download_url(urlData, input_audio_max_duration, playlistItems=None) ]) - else: - # Add input files - if (multipleFiles is not None): - output.extend([ AudioSource(x.name) for x in multipleFiles ]) - if (microphoneData is not None): - output.append(AudioSource(microphoneData)) - - total_duration = 0 - - # Calculate total audio length. We do this even if input_audio_max_duration - # is disabled to ensure that all the audio files are valid. - for source in output: - audioDuration = ffmpeg.probe(source.source_path)["format"]["duration"] - total_duration += float(audioDuration) - - # Save audio duration - source._audio_duration = float(audioDuration) - - # Ensure the total duration of the audio is not too long - if input_audio_max_duration > 0: - if float(total_duration) > input_audio_max_duration: - raise ExceededMaximumDuration(videoDuration=total_duration, maxDuration=input_audio_max_duration, message="Video(s) is too long") - - # Return a list of audio sources - return output \ No newline at end of file diff --git a/spaces/Omnibus/Video-Diffusion-WebUI/video_diffusion/utils/model_list.py b/spaces/Omnibus/Video-Diffusion-WebUI/video_diffusion/utils/model_list.py deleted file mode 100644 index c1bb9b1d8be48ceb76d1e2fd72981cc1e9400ec5..0000000000000000000000000000000000000000 --- a/spaces/Omnibus/Video-Diffusion-WebUI/video_diffusion/utils/model_list.py +++ /dev/null @@ -1,6 +0,0 @@ -stable_model_list = [ - "runwayml/stable-diffusion-v1-5", - "stabilityai/stable-diffusion-2-1", - # "prompthero/openjourney-v4", - "cerspense/zeroscope_v2_576w" -] diff --git a/spaces/PKUWilliamYang/StyleGANEX/webUI/styleganex_model.py b/spaces/PKUWilliamYang/StyleGANEX/webUI/styleganex_model.py deleted file mode 100644 index 0b4450c89945da352439f16e0bc059777f6e0fc5..0000000000000000000000000000000000000000 --- a/spaces/PKUWilliamYang/StyleGANEX/webUI/styleganex_model.py +++ /dev/null @@ -1,492 +0,0 @@ -from __future__ import annotations -import numpy as np -import gradio as gr - -import os -import pathlib -import gc -import torch -import dlib -import cv2 -import PIL -from tqdm import tqdm -import numpy as np -import torch.nn.functional as F -import torchvision -from torchvision import transforms, utils -from argparse import Namespace -from datasets import augmentations -from huggingface_hub import hf_hub_download -from scripts.align_all_parallel import align_face -from latent_optimization import latent_optimization -from utils.inference_utils import save_image, load_image, visualize, get_video_crop_parameter, tensor2cv2, tensor2label, labelcolormap -from models.psp import pSp -from models.bisenet.model import BiSeNet -from models.stylegan2.model import Generator - -class Model(): - def __init__(self, device): - super().__init__() - - self.device = device - self.task_name = None - self.editing_w = None - self.pspex = None - self.landmarkpredictor = dlib.shape_predictor(hf_hub_download('PKUWilliamYang/StyleGANEX', 'pretrained_models/shape_predictor_68_face_landmarks.dat')) - self.transform = transforms.Compose([ - transforms.ToTensor(), - transforms.Normalize(mean=[0.5, 0.5, 0.5],std=[0.5,0.5,0.5]), - ]) - self.to_tensor = transforms.Compose([ - transforms.ToTensor(), - transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)), - ]) - self.maskpredictor = BiSeNet(n_classes=19) - self.maskpredictor.load_state_dict(torch.load(hf_hub_download('PKUWilliamYang/VToonify', 'models/faceparsing.pth'), map_location='cpu')) - self.maskpredictor.to(self.device).eval() - self.parameters = {} - self.parameters['inversion'] = {'path':'pretrained_models/styleganex_inversion.pt', 'image_path':'./data/ILip77SbmOE.png'} - self.parameters['sr-32'] = {'path':'pretrained_models/styleganex_sr32.pt', 'image_path':'./data/pexels-daniel-xavier-1239291.jpg'} - self.parameters['sr'] = {'path':'pretrained_models/styleganex_sr.pt', 'image_path':'./data/pexels-daniel-xavier-1239291.jpg'} - self.parameters['sketch2face'] = {'path':'pretrained_models/styleganex_sketch2face.pt', 'image_path':'./data/234_sketch.jpg'} - self.parameters['mask2face'] = {'path':'pretrained_models/styleganex_mask2face.pt', 'image_path':'./data/540.jpg'} - self.parameters['edit_age'] = {'path':'pretrained_models/styleganex_edit_age.pt', 'image_path':'./data/390.mp4'} - self.parameters['edit_hair'] = {'path':'pretrained_models/styleganex_edit_hair.pt', 'image_path':'./data/390.mp4'} - self.parameters['toonify_pixar'] = {'path':'pretrained_models/styleganex_toonify_pixar.pt', 'image_path':'./data/pexels-anthony-shkraba-production-8136210.mp4'} - self.parameters['toonify_cartoon'] = {'path':'pretrained_models/styleganex_toonify_cartoon.pt', 'image_path':'./data/pexels-anthony-shkraba-production-8136210.mp4'} - self.parameters['toonify_arcane'] = {'path':'pretrained_models/styleganex_toonify_arcane.pt', 'image_path':'./data/pexels-anthony-shkraba-production-8136210.mp4'} - self.print_log = True - self.editing_dicts = torch.load(hf_hub_download('PKUWilliamYang/StyleGANEX', 'direction_dics.pt')) - self.generator = Generator(1024, 512, 8) - self.model_type = None - self.error_info = 'Error: no face detected! \ - StyleGANEX uses dlib.get_frontal_face_detector but sometimes it fails to detect a face. \ - You can try several times or use other images until a face is detected, \ - then switch back to the original image.' - - def load_model(self, task_name: str) -> None: - if task_name == self.task_name: - return - if self.pspex is not None: - del self.pspex - torch.cuda.empty_cache() - gc.collect() - path = self.parameters[task_name]['path'] - local_path = hf_hub_download('PKUWilliamYang/StyleGANEX', path) - ckpt = torch.load(local_path, map_location='cpu') - opts = ckpt['opts'] - opts['checkpoint_path'] = local_path - opts['device'] = self.device - opts = Namespace(**opts) - self.pspex = pSp(opts, ckpt).to(self.device).eval() - self.pspex.latent_avg = self.pspex.latent_avg.to(self.device) - if 'editing_w' in ckpt.keys(): - self.editing_w = ckpt['editing_w'].clone().to(self.device) - self.task_name = task_name - torch.cuda.empty_cache() - gc.collect() - - def load_G_model(self, model_type: str) -> None: - if model_type == self.model_type: - return - torch.cuda.empty_cache() - gc.collect() - local_path = hf_hub_download('rinong/stylegan-nada-models', model_type+'.pt') - self.generator.load_state_dict(torch.load(local_path, map_location='cpu')['g_ema'], strict=False) - self.generator.to(self.device).eval() - self.model_type = model_type - torch.cuda.empty_cache() - gc.collect() - - def tensor2np(self, img): - tmp = ((img.cpu().numpy().transpose(1, 2, 0) + 1.0) * 127.5).astype(np.uint8) - return tmp - - def process_sr(self, input_image: str, resize_scale: int, model: str) -> list[np.ndarray]: - #false_image = np.zeros((256,256,3), np.uint8) - #info = 'Error: no face detected! Please retry or change the photo.' - - if input_image is None: - #return [false_image, false_image], 'Error: fail to load empty file.' - raise gr.Error("Error: fail to load empty file.") - frame = cv2.imread(input_image) - if frame is None: - #return [false_image, false_image], 'Error: fail to load the image.' - raise gr.Error("Error: fail to load the image.") - frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) - - if model is None or model == 'SR for 32x': - task_name = 'sr-32' - resize_scale = 32 - else: - task_name = 'sr' - - with torch.no_grad(): - paras = get_video_crop_parameter(frame, self.landmarkpredictor) - if paras is None: - #return [false_image, false_image], info - raise gr.Error(self.error_info) - h,w,top,bottom,left,right,scale = paras - H, W = int(bottom-top), int(right-left) - frame = cv2.resize(frame, (w, h))[top:bottom, left:right] - x1 = PIL.Image.fromarray(np.uint8(frame)) - x1 = augmentations.BilinearResize(factors=[resize_scale//4])(x1) - x1_up = x1.resize((W, H)) - x2_up = align_face(np.array(x1_up), self.landmarkpredictor) - if x2_up is None: - #return [false_image, false_image], 'Error: no face detected! Please retry or change the photo.' - raise gr.Error(self.error_info) - x1_up = transforms.ToTensor()(x1_up).unsqueeze(dim=0).to(self.device) * 2 - 1 - x2_up = self.transform(x2_up).unsqueeze(dim=0).to(self.device) - if self.print_log: print('image loaded') - self.load_model(task_name) - if self.print_log: print('model %s loaded'%(task_name)) - y_hat = torch.clamp(self.pspex(x1=x1_up, x2=x2_up, use_skip=self.pspex.opts.use_skip, resize=False), -1, 1) - - return [self.tensor2np(x1_up[0]), self.tensor2np(y_hat[0])] - - - def process_s2f(self, input_image: str, seed: int) -> np.ndarray: - task_name = 'sketch2face' - with torch.no_grad(): - x1 = transforms.ToTensor()(PIL.Image.open(input_image)).unsqueeze(0).to(self.device) - if x1.shape[2] > 513: - x1 = x1[:,:,(x1.shape[2]//2-256)//8*8:(x1.shape[2]//2+256)//8*8] - if x1.shape[3] > 513: - x1 = x1[:,:,:,(x1.shape[3]//2-256)//8*8:(x1.shape[3]//2+256)//8*8] - x1 = x1[:,0:1] # uploaded files will be transformed to 3-channel RGB image! - if self.print_log: print('image loaded') - self.load_model(task_name) - if self.print_log: print('model %s loaded'%(task_name)) - self.pspex.train() - torch.manual_seed(seed) - y_hat = self.pspex(x1=x1, resize=False, latent_mask=[8,9,10,11,12,13,14,15,16,17], use_skip=self.pspex.opts.use_skip, - inject_latent= self.pspex.decoder.style(torch.randn(1, 512).to(self.device)).unsqueeze(1).repeat(1,18,1) * 0.7) - y_hat = torch.clamp(y_hat, -1, 1) - self.pspex.eval() - return self.tensor2np(y_hat[0]) - - def process_m2f(self, input_image: str, input_type: str, seed: int) -> list[np.ndarray]: - #false_image = np.zeros((256,256,3), np.uint8) - if input_image is None: - raise gr.Error('Error: fail to load empty file.' ) - #return [false_image, false_image], 'Error: fail to load empty file.' - task_name = 'mask2face' - with torch.no_grad(): - if input_type == 'parsing mask': - x1 = PIL.Image.open(input_image).getchannel(0) # uploaded files will be transformed to 3-channel RGB image! - x1 = augmentations.ToOneHot(19)(x1) - x1 = transforms.ToTensor()(x1).unsqueeze(dim=0).float().to(self.device) - #print(x1.shape) - else: - frame = cv2.imread(input_image) - if frame is None: - #return [false_image, false_image], 'Error: fail to load the image.' - raise gr.Error('Error: fail to load the image.' ) - frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) - paras = get_video_crop_parameter(frame, self.landmarkpredictor) - if paras is None: - #return [false_image, false_image], 'Error: no face detected! Please retry or change the photo.' - raise gr.Error(self.error_info) - h,w,top,bottom,left,right,scale = paras - H, W = int(bottom-top), int(right-left) - frame = cv2.resize(frame, (w, h))[top:bottom, left:right] - # convert face image to segmentation mask - x1 = self.to_tensor(frame).unsqueeze(0).to(self.device) - # upsample image for precise segmentation - x1 = F.interpolate(x1, scale_factor=2, mode='bilinear') - x1 = self.maskpredictor(x1)[0] - x1 = F.interpolate(x1, scale_factor=0.5).argmax(dim=1) - x1 = F.one_hot(x1, num_classes=19).permute(0, 3, 1, 2).float().to(self.device) - - if x1.shape[2] > 513: - x1 = x1[:,:,(x1.shape[2]//2-256)//8*8:(x1.shape[2]//2+256)//8*8] - if x1.shape[3] > 513: - x1 = x1[:,:,:,(x1.shape[3]//2-256)//8*8:(x1.shape[3]//2+256)//8*8] - - x1_viz = (tensor2label(x1[0], 19) / 192 * 256).astype(np.uint8) - - if self.print_log: print('image loaded') - self.load_model(task_name) - if self.print_log: print('model %s loaded'%(task_name)) - self.pspex.train() - torch.manual_seed(seed) - y_hat = self.pspex(x1=x1, resize=False, latent_mask=[8,9,10,11,12,13,14,15,16,17], use_skip=self.pspex.opts.use_skip, - inject_latent= self.pspex.decoder.style(torch.randn(1, 512).to(self.device)).unsqueeze(1).repeat(1,18,1) * 0.7) - y_hat = torch.clamp(y_hat, -1, 1) - self.pspex.eval() - return [x1_viz, self.tensor2np(y_hat[0])] - - - def process_editing(self, input_image: str, scale_factor: float, model_type: str) -> np.ndarray: - #false_image = np.zeros((256,256,3), np.uint8) - #info = 'Error: no face detected! Please retry or change the photo.' - - if input_image is None: - #return false_image, false_image, 'Error: fail to load empty file.' - raise gr.Error('Error: fail to load empty file.') - frame = cv2.imread(input_image) - if frame is None: - #return false_image, false_image, 'Error: fail to load the image.' - raise gr.Error('Error: fail to load the image.') - frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) - - if model_type is None or model_type == 'reduce age': - task_name = 'edit_age' - else: - task_name = 'edit_hair' - - with torch.no_grad(): - paras = get_video_crop_parameter(frame, self.landmarkpredictor) - if paras is None: - #return false_image, false_image, info - raise gr.Error(self.error_info) - h,w,top,bottom,left,right,scale = paras - H, W = int(bottom-top), int(right-left) - frame = cv2.resize(frame, (w, h))[top:bottom, left:right] - x1 = self.transform(frame).unsqueeze(0).to(self.device) - x2 = align_face(frame, self.landmarkpredictor) - if x2 is None: - #return false_image, 'Error: no face detected! Please retry or change the photo.' - raise gr.Error(self.error_info) - x2 = self.transform(x2).unsqueeze(dim=0).to(self.device) - if self.print_log: print('image loaded') - self.load_model(task_name) - if self.print_log: print('model %s loaded'%(task_name)) - y_hat = self.pspex(x1=x1, x2=x2, use_skip=self.pspex.opts.use_skip, zero_noise=True, - resize=False, editing_w= - scale_factor* self.editing_w[0:1]) - y_hat = torch.clamp(y_hat, -1, 1) - - return self.tensor2np(y_hat[0]) - - def process_vediting(self, input_video: str, scale_factor: float, model_type: str, frame_num: int) -> tuple[list[np.ndarray], str]: - #false_image = np.zeros((256,256,3), np.uint8) - #info = 'Error: no face detected! Please retry or change the video.' - - if input_video is None: - #return [false_image], 'default.mp4', 'Error: fail to load empty file.' - raise gr.Error('Error: fail to load empty file.') - video_cap = cv2.VideoCapture(input_video) - success, frame = video_cap.read() - if success is False: - #return [false_image], 'default.mp4', 'Error: fail to load the video.' - raise gr.Error('Error: fail to load the video.') - frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) - - if model_type is None or model_type == 'reduce age': - task_name = 'edit_age' - else: - task_name = 'edit_hair' - - with torch.no_grad(): - paras = get_video_crop_parameter(frame, self.landmarkpredictor) - if paras is None: - #return [false_image], 'default.mp4', info - raise gr.Error(self.error_info) - h,w,top,bottom,left,right,scale = paras - H, W = int(bottom-top), int(right-left) - frame = cv2.resize(frame, (w, h))[top:bottom, left:right] - x1 = self.transform(frame).unsqueeze(0).to(self.device) - x2 = align_face(frame, self.landmarkpredictor) - if x2 is None: - #return [false_image], 'default.mp4', info - raise gr.Error(self.error_info) - x2 = self.transform(x2).unsqueeze(dim=0).to(self.device) - if self.print_log: print('first frame loaded') - self.load_model(task_name) - if self.print_log: print('model %s loaded'%(task_name)) - - fourcc = cv2.VideoWriter_fourcc(*'mp4v') - videoWriter = cv2.VideoWriter('output.mp4', fourcc, video_cap.get(5), (4*W, 4*H)) - - viz_frames = [] - for i in range(frame_num): - if i > 0: - success, frame = video_cap.read() - frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) - frame = cv2.resize(frame, (w, h))[top:bottom, left:right] - x1 = self.transform(frame).unsqueeze(0).to(self.device) - y_hat = self.pspex(x1=x1, x2=x2, use_skip=self.pspex.opts.use_skip, zero_noise=True, - resize=False, editing_w= - scale_factor * self.editing_w[0:1]) - y_hat = torch.clamp(y_hat, -1, 1) - videoWriter.write(tensor2cv2(y_hat[0].cpu())) - if i < min(frame_num, 4): - viz_frames += [self.tensor2np(y_hat[0])] - - videoWriter.release() - - return viz_frames, 'output.mp4' - - - def process_toonify(self, input_image: str, style_type: str) -> np.ndarray: - #false_image = np.zeros((256,256,3), np.uint8) - #info = 'Error: no face detected! Please retry or change the photo.' - - if input_image is None: - raise gr.Error('Error: fail to load empty file.') - #return false_image, false_image, 'Error: fail to load empty file.' - frame = cv2.imread(input_image) - if frame is None: - raise gr.Error('Error: fail to load the image.') - #return false_image, false_image, 'Error: fail to load the image.' - frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) - - if style_type is None or style_type == 'Pixar': - task_name = 'toonify_pixar' - elif style_type == 'Cartoon': - task_name = 'toonify_cartoon' - else: - task_name = 'toonify_arcane' - - with torch.no_grad(): - paras = get_video_crop_parameter(frame, self.landmarkpredictor) - if paras is None: - raise gr.Error(self.error_info) - #return false_image, false_image, info - h,w,top,bottom,left,right,scale = paras - H, W = int(bottom-top), int(right-left) - frame = cv2.resize(frame, (w, h))[top:bottom, left:right] - x1 = self.transform(frame).unsqueeze(0).to(self.device) - x2 = align_face(frame, self.landmarkpredictor) - if x2 is None: - raise gr.Error(self.error_info) - #return false_image, 'Error: no face detected! Please retry or change the photo.' - x2 = self.transform(x2).unsqueeze(dim=0).to(self.device) - if self.print_log: print('image loaded') - self.load_model(task_name) - if self.print_log: print('model %s loaded'%(task_name)) - y_hat = self.pspex(x1=x1, x2=x2, use_skip=self.pspex.opts.use_skip, zero_noise=True, resize=False) - y_hat = torch.clamp(y_hat, -1, 1) - - return self.tensor2np(y_hat[0]) - - - def process_vtoonify(self, input_video: str, style_type: str, frame_num: int) -> tuple[list[np.ndarray], str]: - #false_image = np.zeros((256,256,3), np.uint8) - #info = 'Error: no face detected! Please retry or change the video.' - - if input_video is None: - raise gr.Error('Error: fail to load empty file.') - #return [false_image], 'default.mp4', 'Error: fail to load empty file.' - video_cap = cv2.VideoCapture(input_video) - success, frame = video_cap.read() - if success is False: - raise gr.Error('Error: fail to load the video.') - #return [false_image], 'default.mp4', 'Error: fail to load the video.' - frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) - - if style_type is None or style_type == 'Pixar': - task_name = 'toonify_pixar' - elif style_type == 'Cartoon': - task_name = 'toonify_cartoon' - else: - task_name = 'toonify_arcane' - - with torch.no_grad(): - paras = get_video_crop_parameter(frame, self.landmarkpredictor) - if paras is None: - raise gr.Error(self.error_info) - #return [false_image], 'default.mp4', info - h,w,top,bottom,left,right,scale = paras - H, W = int(bottom-top), int(right-left) - frame = cv2.resize(frame, (w, h))[top:bottom, left:right] - x1 = self.transform(frame).unsqueeze(0).to(self.device) - x2 = align_face(frame, self.landmarkpredictor) - if x2 is None: - raise gr.Error(self.error_info) - #return [false_image], 'default.mp4', info - x2 = self.transform(x2).unsqueeze(dim=0).to(self.device) - if self.print_log: print('first frame loaded') - self.load_model(task_name) - if self.print_log: print('model %s loaded'%(task_name)) - - fourcc = cv2.VideoWriter_fourcc(*'mp4v') - videoWriter = cv2.VideoWriter('output.mp4', fourcc, video_cap.get(5), (4*W, 4*H)) - - viz_frames = [] - for i in range(frame_num): - if i > 0: - success, frame = video_cap.read() - frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) - frame = cv2.resize(frame, (w, h))[top:bottom, left:right] - x1 = self.transform(frame).unsqueeze(0).to(self.device) - y_hat = self.pspex(x1=x1, x2=x2, use_skip=self.pspex.opts.use_skip, zero_noise=True, resize=False) - y_hat = torch.clamp(y_hat, -1, 1) - videoWriter.write(tensor2cv2(y_hat[0].cpu())) - if i < min(frame_num, 4): - viz_frames += [self.tensor2np(y_hat[0])] - - videoWriter.release() - - return viz_frames, 'output.mp4' - - - def process_inversion(self, input_image: str, optimize: str, input_latent: file-object, editing_options: str, - scale_factor: float, seed: int) -> tuple[np.ndarray, np.ndarray]: - #false_image = np.zeros((256,256,3), np.uint8) - #info = 'Error: no face detected! Please retry or change the photo.' - - if input_image is None: - raise gr.Error('Error: fail to load empty file.') - #return false_image, false_image, 'Error: fail to load empty file.' - frame = cv2.imread(input_image) - if frame is None: - raise gr.Error('Error: fail to load the image.') - #return false_image, false_image, 'Error: fail to load the image.' - frame = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB) - - task_name = 'inversion' - self.load_model(task_name) - if self.print_log: print('model %s loaded'%(task_name)) - if input_latent is not None: - if '.pt' not in input_latent.name: - raise gr.Error('Error: the latent format is wrong') - #return false_image, false_image, 'Error: the latent format is wrong' - latents = torch.load(input_latent.name) - if 'wplus' not in latents.keys() or 'f' not in latents.keys(): - raise gr.Error('Error: the latent format is wrong') - #return false_image, false_image, 'Error: the latent format is wrong' - wplus = latents['wplus'].to(self.device) # w+ - f = [latents['f'][0].to(self.device)] # f - elif optimize == 'Latent optimization': - wplus, f, _, _, _ = latent_optimization(frame, self.pspex, self.landmarkpredictor, - step=500, device=self.device) - else: - with torch.no_grad(): - paras = get_video_crop_parameter(frame, self.landmarkpredictor) - if paras is None: - raise gr.Error(self.error_info) - #return false_image, false_image, info - h,w,top,bottom,left,right,scale = paras - H, W = int(bottom-top), int(right-left) - frame = cv2.resize(frame, (w, h))[top:bottom, left:right] - x1 = self.transform(frame).unsqueeze(0).to(self.device) - x2 = align_face(frame, self.landmarkpredictor) - if x2 is None: - raise gr.Error(self.error_info) - #return false_image, false_image, 'Error: no face detected! Please retry or change the photo.' - x2 = self.transform(x2).unsqueeze(dim=0).to(self.device) - if self.print_log: print('image loaded') - wplus = self.pspex.encoder(x2) + self.pspex.latent_avg.unsqueeze(0) - _, f = self.pspex.encoder(x1, return_feat=True) - - with torch.no_grad(): - y_hat, _ = self.pspex.decoder([wplus], input_is_latent=True, first_layer_feature=f) - y_hat = torch.clamp(y_hat, -1, 1) - - if 'Style Mixing' in editing_options: - torch.manual_seed(seed) - wplus[:, 8:] = self.pspex.decoder.style(torch.randn(1, 512).to(self.device)).unsqueeze(1).repeat(1,10,1) * 0.7 - y_hat_edit, _ = self.pspex.decoder([wplus], input_is_latent=True, first_layer_feature=f) - elif 'Attribute Editing' in editing_options: - editing_w = self.editing_dicts[editing_options[19:]].to(self.device) - y_hat_edit, _ = self.pspex.decoder([wplus+scale_factor*editing_w], input_is_latent=True, first_layer_feature=f) - elif 'Domain Transfer' in editing_options: - self.load_G_model(editing_options[17:]) - if self.print_log: print('model %s loaded'%(editing_options[17:])) - y_hat_edit, _ = self.generator([wplus], input_is_latent=True, first_layer_feature=f) - else: - y_hat_edit = y_hat - y_hat_edit = torch.clamp(y_hat_edit, -1, 1) - - return self.tensor2np(y_hat[0]), self.tensor2np(y_hat_edit[0]) \ No newline at end of file diff --git a/spaces/PKaushik/humandetect/yolov6/data/datasets.py b/spaces/PKaushik/humandetect/yolov6/data/datasets.py deleted file mode 100644 index 1a6fe4db8fea7df27e36d410708b11f78c20b21a..0000000000000000000000000000000000000000 --- a/spaces/PKaushik/humandetect/yolov6/data/datasets.py +++ /dev/null @@ -1,550 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding:utf-8 -*- - -import glob -import os -import os.path as osp -import random -import json -import time -import hashlib - -from multiprocessing.pool import Pool - -import cv2 -import numpy as np -import torch -from PIL import ExifTags, Image, ImageOps -from torch.utils.data import Dataset -from tqdm import tqdm - -from .data_augment import ( - augment_hsv, - letterbox, - mixup, - random_affine, - mosaic_augmentation, -) -from yolov6.utils.events import LOGGER - -# Parameters -IMG_FORMATS = ["bmp", "jpg", "jpeg", "png", "tif", "tiff", "dng", "webp", "mpo"] -# Get orientation exif tag -for k, v in ExifTags.TAGS.items(): - if v == "Orientation": - ORIENTATION = k - break - - -class TrainValDataset(Dataset): - # YOLOv6 train_loader/val_loader, loads images and labels for training and validation - def __init__( - self, - img_dir, - img_size=640, - batch_size=16, - augment=False, - hyp=None, - rect=False, - check_images=False, - check_labels=False, - stride=32, - pad=0.0, - rank=-1, - data_dict=None, - task="train", - ): - assert task.lower() in ("train", "val", "speed"), f"Not supported task: {task}" - t1 = time.time() - self.__dict__.update(locals()) - self.main_process = self.rank in (-1, 0) - self.task = self.task.capitalize() - self.class_names = data_dict["names"] - self.img_paths, self.labels = self.get_imgs_labels(self.img_dir) - if self.rect: - shapes = [self.img_info[p]["shape"] for p in self.img_paths] - self.shapes = np.array(shapes, dtype=np.float64) - self.batch_indices = np.floor( - np.arange(len(shapes)) / self.batch_size - ).astype( - np.int - ) # batch indices of each image - self.sort_files_shapes() - t2 = time.time() - if self.main_process: - LOGGER.info(f"%.1fs for dataset initialization." % (t2 - t1)) - - def __len__(self): - """Get the length of dataset""" - return len(self.img_paths) - - def __getitem__(self, index): - """Fetching a data sample for a given key. - This function applies mosaic and mixup augments during training. - During validation, letterbox augment is applied. - """ - # Mosaic Augmentation - if self.augment and random.random() < self.hyp["mosaic"]: - img, labels = self.get_mosaic(index) - shapes = None - - # MixUp augmentation - if random.random() < self.hyp["mixup"]: - img_other, labels_other = self.get_mosaic( - random.randint(0, len(self.img_paths) - 1) - ) - img, labels = mixup(img, labels, img_other, labels_other) - - else: - # Load image - img, (h0, w0), (h, w) = self.load_image(index) - - # Letterbox - shape = ( - self.batch_shapes[self.batch_indices[index]] - if self.rect - else self.img_size - ) # final letterboxed shape - img, ratio, pad = letterbox(img, shape, auto=False, scaleup=self.augment) - shapes = (h0, w0), ((h / h0, w / w0), pad) # for COCO mAP rescaling - - labels = self.labels[index].copy() - if labels.size: - w *= ratio - h *= ratio - # new boxes - boxes = np.copy(labels[:, 1:]) - boxes[:, 0] = ( - w * (labels[:, 1] - labels[:, 3] / 2) + pad[0] - ) # top left x - boxes[:, 1] = ( - h * (labels[:, 2] - labels[:, 4] / 2) + pad[1] - ) # top left y - boxes[:, 2] = ( - w * (labels[:, 1] + labels[:, 3] / 2) + pad[0] - ) # bottom right x - boxes[:, 3] = ( - h * (labels[:, 2] + labels[:, 4] / 2) + pad[1] - ) # bottom right y - labels[:, 1:] = boxes - - if self.augment: - img, labels = random_affine( - img, - labels, - degrees=self.hyp["degrees"], - translate=self.hyp["translate"], - scale=self.hyp["scale"], - shear=self.hyp["shear"], - new_shape=(self.img_size, self.img_size), - ) - - if len(labels): - h, w = img.shape[:2] - - labels[:, [1, 3]] = labels[:, [1, 3]].clip(0, w - 1e-3) # x1, x2 - labels[:, [2, 4]] = labels[:, [2, 4]].clip(0, h - 1e-3) # y1, y2 - - boxes = np.copy(labels[:, 1:]) - boxes[:, 0] = ((labels[:, 1] + labels[:, 3]) / 2) / w # x center - boxes[:, 1] = ((labels[:, 2] + labels[:, 4]) / 2) / h # y center - boxes[:, 2] = (labels[:, 3] - labels[:, 1]) / w # width - boxes[:, 3] = (labels[:, 4] - labels[:, 2]) / h # height - labels[:, 1:] = boxes - - if self.augment: - img, labels = self.general_augment(img, labels) - - labels_out = torch.zeros((len(labels), 6)) - if len(labels): - labels_out[:, 1:] = torch.from_numpy(labels) - - # Convert - img = img.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB - img = np.ascontiguousarray(img) - - return torch.from_numpy(img), labels_out, self.img_paths[index], shapes - - def load_image(self, index): - """Load image. - This function loads image by cv2, resize original image to target shape(img_size) with keeping ratio. - - Returns: - Image, original shape of image, resized image shape - """ - path = self.img_paths[index] - im = cv2.imread(path) - assert im is not None, f"Image Not Found {path}, workdir: {os.getcwd()}" - - h0, w0 = im.shape[:2] # origin shape - r = self.img_size / max(h0, w0) - if r != 1: - im = cv2.resize( - im, - (int(w0 * r), int(h0 * r)), - interpolation=cv2.INTER_AREA - if r < 1 and not self.augment - else cv2.INTER_LINEAR, - ) - return im, (h0, w0), im.shape[:2] - - @staticmethod - def collate_fn(batch): - """Merges a list of samples to form a mini-batch of Tensor(s)""" - img, label, path, shapes = zip(*batch) - for i, l in enumerate(label): - l[:, 0] = i # add target image index for build_targets() - return torch.stack(img, 0), torch.cat(label, 0), path, shapes - - def get_imgs_labels(self, img_dir): - - assert osp.exists(img_dir), f"{img_dir} is an invalid directory path!" - valid_img_record = osp.join( - osp.dirname(img_dir), "." + osp.basename(img_dir) + ".json" - ) - NUM_THREADS = min(8, os.cpu_count()) - - img_paths = glob.glob(osp.join(img_dir, "*"), recursive=True) - img_paths = sorted( - p for p in img_paths if p.split(".")[-1].lower() in IMG_FORMATS - ) - assert img_paths, f"No images found in {img_dir}." - - img_hash = self.get_hash(img_paths) - if osp.exists(valid_img_record): - with open(valid_img_record, "r") as f: - cache_info = json.load(f) - if "image_hash" in cache_info and cache_info["image_hash"] == img_hash: - img_info = cache_info["information"] - else: - self.check_images = True - else: - self.check_images = True - - # check images - if self.check_images and self.main_process: - img_info = {} - nc, msgs = 0, [] # number corrupt, messages - LOGGER.info( - f"{self.task}: Checking formats of images with {NUM_THREADS} process(es): " - ) - with Pool(NUM_THREADS) as pool: - pbar = tqdm( - pool.imap(TrainValDataset.check_image, img_paths), - total=len(img_paths), - ) - for img_path, shape_per_img, nc_per_img, msg in pbar: - if nc_per_img == 0: # not corrupted - img_info[img_path] = {"shape": shape_per_img} - nc += nc_per_img - if msg: - msgs.append(msg) - pbar.desc = f"{nc} image(s) corrupted" - pbar.close() - if msgs: - LOGGER.info("\n".join(msgs)) - - cache_info = {"information": img_info, "image_hash": img_hash} - # save valid image paths. - with open(valid_img_record, "w") as f: - json.dump(cache_info, f) - - # check and load anns - label_dir = osp.join( - osp.dirname(osp.dirname(img_dir)), "labels", osp.basename(img_dir) - ) - assert osp.exists(label_dir), f"{label_dir} is an invalid directory path!" - - img_paths = list(img_info.keys()) - label_paths = sorted( - osp.join(label_dir, osp.splitext(osp.basename(p))[0] + ".txt") - for p in img_paths - ) - label_hash = self.get_hash(label_paths) - if "label_hash" not in cache_info or cache_info["label_hash"] != label_hash: - self.check_labels = True - - if self.check_labels: - cache_info["label_hash"] = label_hash - nm, nf, ne, nc, msgs = 0, 0, 0, 0, [] # number corrupt, messages - LOGGER.info( - f"{self.task}: Checking formats of labels with {NUM_THREADS} process(es): " - ) - with Pool(NUM_THREADS) as pool: - pbar = pool.imap( - TrainValDataset.check_label_files, zip(img_paths, label_paths) - ) - pbar = tqdm(pbar, total=len(label_paths)) if self.main_process else pbar - for ( - img_path, - labels_per_file, - nc_per_file, - nm_per_file, - nf_per_file, - ne_per_file, - msg, - ) in pbar: - if nc_per_file == 0: - img_info[img_path]["labels"] = labels_per_file - else: - img_info.pop(img_path) - nc += nc_per_file - nm += nm_per_file - nf += nf_per_file - ne += ne_per_file - if msg: - msgs.append(msg) - if self.main_process: - pbar.desc = f"{nf} label(s) found, {nm} label(s) missing, {ne} label(s) empty, {nc} invalid label files" - if self.main_process: - pbar.close() - with open(valid_img_record, "w") as f: - json.dump(cache_info, f) - if msgs: - LOGGER.info("\n".join(msgs)) - if nf == 0: - LOGGER.warning( - f"WARNING: No labels found in {osp.dirname(self.img_paths[0])}. " - ) - - if self.task.lower() == "val": - if self.data_dict.get("is_coco", False): # use original json file when evaluating on coco dataset. - assert osp.exists(self.data_dict["anno_path"]), "Eval on coco dataset must provide valid path of the annotation file in config file: data/coco.yaml" - else: - assert ( - self.class_names - ), "Class names is required when converting labels to coco format for evaluating." - save_dir = osp.join(osp.dirname(osp.dirname(img_dir)), "annotations") - if not osp.exists(save_dir): - os.mkdir(save_dir) - save_path = osp.join( - save_dir, "instances_" + osp.basename(img_dir) + ".json" - ) - TrainValDataset.generate_coco_format_labels( - img_info, self.class_names, save_path - ) - - img_paths, labels = list( - zip( - *[ - ( - img_path, - np.array(info["labels"], dtype=np.float32) - if info["labels"] - else np.zeros((0, 5), dtype=np.float32), - ) - for img_path, info in img_info.items() - ] - ) - ) - self.img_info = img_info - LOGGER.info( - f"{self.task}: Final numbers of valid images: {len(img_paths)}/ labels: {len(labels)}. " - ) - return img_paths, labels - - def get_mosaic(self, index): - """Gets images and labels after mosaic augments""" - indices = [index] + random.choices( - range(0, len(self.img_paths)), k=3 - ) # 3 additional image indices - random.shuffle(indices) - imgs, hs, ws, labels = [], [], [], [] - for index in indices: - img, _, (h, w) = self.load_image(index) - labels_per_img = self.labels[index] - imgs.append(img) - hs.append(h) - ws.append(w) - labels.append(labels_per_img) - img, labels = mosaic_augmentation(self.img_size, imgs, hs, ws, labels, self.hyp) - return img, labels - - def general_augment(self, img, labels): - """Gets images and labels after general augment - This function applies hsv, random ud-flip and random lr-flips augments. - """ - nl = len(labels) - - # HSV color-space - augment_hsv( - img, - hgain=self.hyp["hsv_h"], - sgain=self.hyp["hsv_s"], - vgain=self.hyp["hsv_v"], - ) - - # Flip up-down - if random.random() < self.hyp["flipud"]: - img = np.flipud(img) - if nl: - labels[:, 2] = 1 - labels[:, 2] - - # Flip left-right - if random.random() < self.hyp["fliplr"]: - img = np.fliplr(img) - if nl: - labels[:, 1] = 1 - labels[:, 1] - - return img, labels - - def sort_files_shapes(self): - # Sort by aspect ratio - batch_num = self.batch_indices[-1] + 1 - s = self.shapes # wh - ar = s[:, 1] / s[:, 0] # aspect ratio - irect = ar.argsort() - self.img_paths = [self.img_paths[i] for i in irect] - self.labels = [self.labels[i] for i in irect] - self.shapes = s[irect] # wh - ar = ar[irect] - - # Set training image shapes - shapes = [[1, 1]] * batch_num - for i in range(batch_num): - ari = ar[self.batch_indices == i] - mini, maxi = ari.min(), ari.max() - if maxi < 1: - shapes[i] = [maxi, 1] - elif mini > 1: - shapes[i] = [1, 1 / mini] - self.batch_shapes = ( - np.ceil(np.array(shapes) * self.img_size / self.stride + self.pad).astype( - np.int - ) - * self.stride - ) - - @staticmethod - def check_image(im_file): - # verify an image. - nc, msg = 0, "" - try: - im = Image.open(im_file) - im.verify() # PIL verify - shape = im.size # (width, height) - im_exif = im._getexif() - if im_exif and ORIENTATION in im_exif: - rotation = im_exif[ORIENTATION] - if rotation in (6, 8): - shape = (shape[1], shape[0]) - - assert (shape[0] > 9) & (shape[1] > 9), f"image size {shape} <10 pixels" - assert im.format.lower() in IMG_FORMATS, f"invalid image format {im.format}" - if im.format.lower() in ("jpg", "jpeg"): - with open(im_file, "rb") as f: - f.seek(-2, 2) - if f.read() != b"\xff\xd9": # corrupt JPEG - ImageOps.exif_transpose(Image.open(im_file)).save( - im_file, "JPEG", subsampling=0, quality=100 - ) - msg += f"WARNING: {im_file}: corrupt JPEG restored and saved" - return im_file, shape, nc, msg - except Exception as e: - nc = 1 - msg = f"WARNING: {im_file}: ignoring corrupt image: {e}" - return im_file, None, nc, msg - - @staticmethod - def check_label_files(args): - img_path, lb_path = args - nm, nf, ne, nc, msg = 0, 0, 0, 0, "" # number (missing, found, empty, message - try: - if osp.exists(lb_path): - nf = 1 # label found - with open(lb_path, "r") as f: - labels = [ - x.split() for x in f.read().strip().splitlines() if len(x) - ] - labels = np.array(labels, dtype=np.float32) - if len(labels): - assert all( - len(l) == 5 for l in labels - ), f"{lb_path}: wrong label format." - assert ( - labels >= 0 - ).all(), f"{lb_path}: Label values error: all values in label file must > 0" - assert ( - labels[:, 1:] <= 1 - ).all(), f"{lb_path}: Label values error: all coordinates must be normalized" - - _, indices = np.unique(labels, axis=0, return_index=True) - if len(indices) < len(labels): # duplicate row check - labels = labels[indices] # remove duplicates - msg += f"WARNING: {lb_path}: {len(labels) - len(indices)} duplicate labels removed" - labels = labels.tolist() - else: - ne = 1 # label empty - labels = [] - else: - nm = 1 # label missing - labels = [] - - return img_path, labels, nc, nm, nf, ne, msg - except Exception as e: - nc = 1 - msg = f"WARNING: {lb_path}: ignoring invalid labels: {e}" - return img_path, None, nc, nm, nf, ne, msg - - @staticmethod - def generate_coco_format_labels(img_info, class_names, save_path): - # for evaluation with pycocotools - dataset = {"categories": [], "annotations": [], "images": []} - for i, class_name in enumerate(class_names): - dataset["categories"].append( - {"id": i, "name": class_name, "supercategory": ""} - ) - - ann_id = 0 - LOGGER.info(f"Convert to COCO format") - for i, (img_path, info) in enumerate(tqdm(img_info.items())): - labels = info["labels"] if info["labels"] else [] - img_id = osp.splitext(osp.basename(img_path))[0] - img_id = int(img_id) if img_id.isnumeric() else img_id - img_w, img_h = info["shape"] - dataset["images"].append( - { - "file_name": os.path.basename(img_path), - "id": img_id, - "width": img_w, - "height": img_h, - } - ) - if labels: - for label in labels: - c, x, y, w, h = label[:5] - # convert x,y,w,h to x1,y1,x2,y2 - x1 = (x - w / 2) * img_w - y1 = (y - h / 2) * img_h - x2 = (x + w / 2) * img_w - y2 = (y + h / 2) * img_h - # cls_id starts from 0 - cls_id = int(c) - w = max(0, x2 - x1) - h = max(0, y2 - y1) - dataset["annotations"].append( - { - "area": h * w, - "bbox": [x1, y1, w, h], - "category_id": cls_id, - "id": ann_id, - "image_id": img_id, - "iscrowd": 0, - # mask - "segmentation": [], - } - ) - ann_id += 1 - - with open(save_path, "w") as f: - json.dump(dataset, f) - LOGGER.info( - f"Convert to COCO format finished. Resutls saved in {save_path}" - ) - - @staticmethod - def get_hash(paths): - """Get the hash value of paths""" - assert isinstance(paths, list), "Only support list currently." - h = hashlib.md5("".join(paths).encode()) - return h.hexdigest() diff --git a/spaces/Pattr/DrumClassification/lilypond-2.24.2/lib/guile/2.2/ccache/language/cps/prune-bailouts.go b/spaces/Pattr/DrumClassification/lilypond-2.24.2/lib/guile/2.2/ccache/language/cps/prune-bailouts.go deleted file mode 100644 index ca2194e3c69cf9b789f8243a7683e9f62774fc8b..0000000000000000000000000000000000000000 Binary files a/spaces/Pattr/DrumClassification/lilypond-2.24.2/lib/guile/2.2/ccache/language/cps/prune-bailouts.go and /dev/null differ diff --git a/spaces/Pattr/DrumClassification/lilypond-2.24.2/lib/guile/2.2/ccache/oop/goops/simple.go b/spaces/Pattr/DrumClassification/lilypond-2.24.2/lib/guile/2.2/ccache/oop/goops/simple.go deleted file mode 100644 index c0924c7e7dc1d3bb5068f27b2ee318acf499647f..0000000000000000000000000000000000000000 Binary files a/spaces/Pattr/DrumClassification/lilypond-2.24.2/lib/guile/2.2/ccache/oop/goops/simple.go and /dev/null differ diff --git a/spaces/Paulog731/SD-2.1-Img2Img/app.py b/spaces/Paulog731/SD-2.1-Img2Img/app.py deleted file mode 100644 index 4357cbf269e2ae263d1f1f73b4af385dce4bc678..0000000000000000000000000000000000000000 --- a/spaces/Paulog731/SD-2.1-Img2Img/app.py +++ /dev/null @@ -1,29 +0,0 @@ -import gradio as gr -import torch -import numpy as np -from PIL import Image -from datasets import load_dataset -from diffusers import StableDiffusionImg2ImgPipeline - -device = "cuda" if torch.cuda.is_available() else "cpu" -pipe = StableDiffusionImg2ImgPipeline.from_pretrained("stabilityai/stable-diffusion-2-1", torch_dtype=torch.float16, revision="fp16") if torch.cuda.is_available() else StableDiffusionImg2ImgPipeline.from_pretrained("stabilityai/stable-diffusion-2-1") -pipe = pipe.to(device) - -def resize(value,img): - img = Image.open(img) - img = img.resize((value,value)) - return img - -def infer(source_img, prompt, negative_prompt, guide, steps, seed, Strength): - generator = torch.Generator(device).manual_seed(seed) - source_image = resize(768, source_img) - source_image.save('source.png') - image = pipe(prompt, negative_prompt=negative_prompt, init_image=source_image, strength=Strength, guidance_scale=guide, num_inference_steps=steps).images[0] - return image - -gr.Interface(fn=infer, inputs=[gr.Image(source="upload", type="filepath", label="Raw Image. Must Be .png"), gr.Textbox(label = 'Prompt Input Text. 77 Token (Keyword or Symbol) Maximum'), gr.Textbox(label='What you Do Not want the AI to generate.'), - gr.Slider(2, 15, value = 7, label = 'Guidance Scale'), - gr.Slider(1, 25, value = 10, step = 1, label = 'Number of Iterations'), - gr.Slider(label = "Seed", minimum = 0, maximum = 987654321987654321, step = 1, randomize = True), - gr.Slider(label='Strength', minimum = 0, maximum = 1, step = .05, value = .5)], - outputs='image', title = "Stable Diffusion 2.1 Image to Image Pipeline CPU", description = "For more information on Stable Diffusion 2.1 see https://github.com/Stability-AI/stablediffusion

    Upload an Image (MUST Be .PNG and 512x512 or 768x768) enter a Prompt, or let it just do its Thing, then click submit. 10 Iterations takes about ~900-1200 seconds currently. For more informationon about Stable Diffusion or Suggestions for prompts, keywords, artists or styles see https://github.com/Maks-s/sd-akashic", article = "Code Monkey: Manjushri").queue(max_size=5).launch() \ No newline at end of file diff --git a/spaces/Pie31415/control-animation/annotator/uniformer/mmcv/cnn/bricks/non_local.py b/spaces/Pie31415/control-animation/annotator/uniformer/mmcv/cnn/bricks/non_local.py deleted file mode 100644 index 92d00155ef275c1201ea66bba30470a1785cc5d7..0000000000000000000000000000000000000000 --- a/spaces/Pie31415/control-animation/annotator/uniformer/mmcv/cnn/bricks/non_local.py +++ /dev/null @@ -1,306 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -from abc import ABCMeta - -import torch -import torch.nn as nn - -from ..utils import constant_init, normal_init -from .conv_module import ConvModule -from .registry import PLUGIN_LAYERS - - -class _NonLocalNd(nn.Module, metaclass=ABCMeta): - """Basic Non-local module. - - This module is proposed in - "Non-local Neural Networks" - Paper reference: https://arxiv.org/abs/1711.07971 - Code reference: https://github.com/AlexHex7/Non-local_pytorch - - Args: - in_channels (int): Channels of the input feature map. - reduction (int): Channel reduction ratio. Default: 2. - use_scale (bool): Whether to scale pairwise_weight by - `1/sqrt(inter_channels)` when the mode is `embedded_gaussian`. - Default: True. - conv_cfg (None | dict): The config dict for convolution layers. - If not specified, it will use `nn.Conv2d` for convolution layers. - Default: None. - norm_cfg (None | dict): The config dict for normalization layers. - Default: None. (This parameter is only applicable to conv_out.) - mode (str): Options are `gaussian`, `concatenation`, - `embedded_gaussian` and `dot_product`. Default: embedded_gaussian. - """ - - def __init__(self, - in_channels, - reduction=2, - use_scale=True, - conv_cfg=None, - norm_cfg=None, - mode='embedded_gaussian', - **kwargs): - super(_NonLocalNd, self).__init__() - self.in_channels = in_channels - self.reduction = reduction - self.use_scale = use_scale - self.inter_channels = max(in_channels // reduction, 1) - self.mode = mode - - if mode not in [ - 'gaussian', 'embedded_gaussian', 'dot_product', 'concatenation' - ]: - raise ValueError("Mode should be in 'gaussian', 'concatenation', " - f"'embedded_gaussian' or 'dot_product', but got " - f'{mode} instead.') - - # g, theta, phi are defaulted as `nn.ConvNd`. - # Here we use ConvModule for potential usage. - self.g = ConvModule( - self.in_channels, - self.inter_channels, - kernel_size=1, - conv_cfg=conv_cfg, - act_cfg=None) - self.conv_out = ConvModule( - self.inter_channels, - self.in_channels, - kernel_size=1, - conv_cfg=conv_cfg, - norm_cfg=norm_cfg, - act_cfg=None) - - if self.mode != 'gaussian': - self.theta = ConvModule( - self.in_channels, - self.inter_channels, - kernel_size=1, - conv_cfg=conv_cfg, - act_cfg=None) - self.phi = ConvModule( - self.in_channels, - self.inter_channels, - kernel_size=1, - conv_cfg=conv_cfg, - act_cfg=None) - - if self.mode == 'concatenation': - self.concat_project = ConvModule( - self.inter_channels * 2, - 1, - kernel_size=1, - stride=1, - padding=0, - bias=False, - act_cfg=dict(type='ReLU')) - - self.init_weights(**kwargs) - - def init_weights(self, std=0.01, zeros_init=True): - if self.mode != 'gaussian': - for m in [self.g, self.theta, self.phi]: - normal_init(m.conv, std=std) - else: - normal_init(self.g.conv, std=std) - if zeros_init: - if self.conv_out.norm_cfg is None: - constant_init(self.conv_out.conv, 0) - else: - constant_init(self.conv_out.norm, 0) - else: - if self.conv_out.norm_cfg is None: - normal_init(self.conv_out.conv, std=std) - else: - normal_init(self.conv_out.norm, std=std) - - def gaussian(self, theta_x, phi_x): - # NonLocal1d pairwise_weight: [N, H, H] - # NonLocal2d pairwise_weight: [N, HxW, HxW] - # NonLocal3d pairwise_weight: [N, TxHxW, TxHxW] - pairwise_weight = torch.matmul(theta_x, phi_x) - pairwise_weight = pairwise_weight.softmax(dim=-1) - return pairwise_weight - - def embedded_gaussian(self, theta_x, phi_x): - # NonLocal1d pairwise_weight: [N, H, H] - # NonLocal2d pairwise_weight: [N, HxW, HxW] - # NonLocal3d pairwise_weight: [N, TxHxW, TxHxW] - pairwise_weight = torch.matmul(theta_x, phi_x) - if self.use_scale: - # theta_x.shape[-1] is `self.inter_channels` - pairwise_weight /= theta_x.shape[-1]**0.5 - pairwise_weight = pairwise_weight.softmax(dim=-1) - return pairwise_weight - - def dot_product(self, theta_x, phi_x): - # NonLocal1d pairwise_weight: [N, H, H] - # NonLocal2d pairwise_weight: [N, HxW, HxW] - # NonLocal3d pairwise_weight: [N, TxHxW, TxHxW] - pairwise_weight = torch.matmul(theta_x, phi_x) - pairwise_weight /= pairwise_weight.shape[-1] - return pairwise_weight - - def concatenation(self, theta_x, phi_x): - # NonLocal1d pairwise_weight: [N, H, H] - # NonLocal2d pairwise_weight: [N, HxW, HxW] - # NonLocal3d pairwise_weight: [N, TxHxW, TxHxW] - h = theta_x.size(2) - w = phi_x.size(3) - theta_x = theta_x.repeat(1, 1, 1, w) - phi_x = phi_x.repeat(1, 1, h, 1) - - concat_feature = torch.cat([theta_x, phi_x], dim=1) - pairwise_weight = self.concat_project(concat_feature) - n, _, h, w = pairwise_weight.size() - pairwise_weight = pairwise_weight.view(n, h, w) - pairwise_weight /= pairwise_weight.shape[-1] - - return pairwise_weight - - def forward(self, x): - # Assume `reduction = 1`, then `inter_channels = C` - # or `inter_channels = C` when `mode="gaussian"` - - # NonLocal1d x: [N, C, H] - # NonLocal2d x: [N, C, H, W] - # NonLocal3d x: [N, C, T, H, W] - n = x.size(0) - - # NonLocal1d g_x: [N, H, C] - # NonLocal2d g_x: [N, HxW, C] - # NonLocal3d g_x: [N, TxHxW, C] - g_x = self.g(x).view(n, self.inter_channels, -1) - g_x = g_x.permute(0, 2, 1) - - # NonLocal1d theta_x: [N, H, C], phi_x: [N, C, H] - # NonLocal2d theta_x: [N, HxW, C], phi_x: [N, C, HxW] - # NonLocal3d theta_x: [N, TxHxW, C], phi_x: [N, C, TxHxW] - if self.mode == 'gaussian': - theta_x = x.view(n, self.in_channels, -1) - theta_x = theta_x.permute(0, 2, 1) - if self.sub_sample: - phi_x = self.phi(x).view(n, self.in_channels, -1) - else: - phi_x = x.view(n, self.in_channels, -1) - elif self.mode == 'concatenation': - theta_x = self.theta(x).view(n, self.inter_channels, -1, 1) - phi_x = self.phi(x).view(n, self.inter_channels, 1, -1) - else: - theta_x = self.theta(x).view(n, self.inter_channels, -1) - theta_x = theta_x.permute(0, 2, 1) - phi_x = self.phi(x).view(n, self.inter_channels, -1) - - pairwise_func = getattr(self, self.mode) - # NonLocal1d pairwise_weight: [N, H, H] - # NonLocal2d pairwise_weight: [N, HxW, HxW] - # NonLocal3d pairwise_weight: [N, TxHxW, TxHxW] - pairwise_weight = pairwise_func(theta_x, phi_x) - - # NonLocal1d y: [N, H, C] - # NonLocal2d y: [N, HxW, C] - # NonLocal3d y: [N, TxHxW, C] - y = torch.matmul(pairwise_weight, g_x) - # NonLocal1d y: [N, C, H] - # NonLocal2d y: [N, C, H, W] - # NonLocal3d y: [N, C, T, H, W] - y = y.permute(0, 2, 1).contiguous().reshape(n, self.inter_channels, - *x.size()[2:]) - - output = x + self.conv_out(y) - - return output - - -class NonLocal1d(_NonLocalNd): - """1D Non-local module. - - Args: - in_channels (int): Same as `NonLocalND`. - sub_sample (bool): Whether to apply max pooling after pairwise - function (Note that the `sub_sample` is applied on spatial only). - Default: False. - conv_cfg (None | dict): Same as `NonLocalND`. - Default: dict(type='Conv1d'). - """ - - def __init__(self, - in_channels, - sub_sample=False, - conv_cfg=dict(type='Conv1d'), - **kwargs): - super(NonLocal1d, self).__init__( - in_channels, conv_cfg=conv_cfg, **kwargs) - - self.sub_sample = sub_sample - - if sub_sample: - max_pool_layer = nn.MaxPool1d(kernel_size=2) - self.g = nn.Sequential(self.g, max_pool_layer) - if self.mode != 'gaussian': - self.phi = nn.Sequential(self.phi, max_pool_layer) - else: - self.phi = max_pool_layer - - -@PLUGIN_LAYERS.register_module() -class NonLocal2d(_NonLocalNd): - """2D Non-local module. - - Args: - in_channels (int): Same as `NonLocalND`. - sub_sample (bool): Whether to apply max pooling after pairwise - function (Note that the `sub_sample` is applied on spatial only). - Default: False. - conv_cfg (None | dict): Same as `NonLocalND`. - Default: dict(type='Conv2d'). - """ - - _abbr_ = 'nonlocal_block' - - def __init__(self, - in_channels, - sub_sample=False, - conv_cfg=dict(type='Conv2d'), - **kwargs): - super(NonLocal2d, self).__init__( - in_channels, conv_cfg=conv_cfg, **kwargs) - - self.sub_sample = sub_sample - - if sub_sample: - max_pool_layer = nn.MaxPool2d(kernel_size=(2, 2)) - self.g = nn.Sequential(self.g, max_pool_layer) - if self.mode != 'gaussian': - self.phi = nn.Sequential(self.phi, max_pool_layer) - else: - self.phi = max_pool_layer - - -class NonLocal3d(_NonLocalNd): - """3D Non-local module. - - Args: - in_channels (int): Same as `NonLocalND`. - sub_sample (bool): Whether to apply max pooling after pairwise - function (Note that the `sub_sample` is applied on spatial only). - Default: False. - conv_cfg (None | dict): Same as `NonLocalND`. - Default: dict(type='Conv3d'). - """ - - def __init__(self, - in_channels, - sub_sample=False, - conv_cfg=dict(type='Conv3d'), - **kwargs): - super(NonLocal3d, self).__init__( - in_channels, conv_cfg=conv_cfg, **kwargs) - self.sub_sample = sub_sample - - if sub_sample: - max_pool_layer = nn.MaxPool3d(kernel_size=(1, 2, 2)) - self.g = nn.Sequential(self.g, max_pool_layer) - if self.mode != 'gaussian': - self.phi = nn.Sequential(self.phi, max_pool_layer) - else: - self.phi = max_pool_layer diff --git a/spaces/Pinwheel/GLIP-BLIP-Object-Detection-VQA/maskrcnn_benchmark/data/datasets/evaluation/lvis/lvis_eval.py b/spaces/Pinwheel/GLIP-BLIP-Object-Detection-VQA/maskrcnn_benchmark/data/datasets/evaluation/lvis/lvis_eval.py deleted file mode 100644 index 5d242ec4d61a8506fb3e971e79437842c413e883..0000000000000000000000000000000000000000 --- a/spaces/Pinwheel/GLIP-BLIP-Object-Detection-VQA/maskrcnn_benchmark/data/datasets/evaluation/lvis/lvis_eval.py +++ /dev/null @@ -1,998 +0,0 @@ -# Copyright (c) Aishwarya Kamath & Nicolas Carion. Licensed under the Apache License 2.0. All Rights Reserved -# Copyright (c) Facebook, Inc. and its affiliates. All Rights Reserved -import copy -import datetime -import json -import os -from collections import OrderedDict, defaultdict - -import numpy as np -import pycocotools.mask as mask_util -import torch -import torch._six - -import maskrcnn_benchmark.utils.mdetr_dist as dist - -from maskrcnn_benchmark.utils.mdetr_dist import all_gather - - -from .lvis import LVIS - -def merge(img_ids, eval_imgs): - all_img_ids = all_gather(img_ids) - all_eval_imgs = all_gather(eval_imgs) - - merged_img_ids = [] - for p in all_img_ids: - merged_img_ids.extend(p) - - merged_eval_imgs = [] - for p in all_eval_imgs: - merged_eval_imgs.append(p) - - merged_img_ids = np.array(merged_img_ids) - merged_eval_imgs = np.concatenate(merged_eval_imgs, 2) - - # keep only unique (and in sorted order) images - merged_img_ids, idx = np.unique(merged_img_ids, return_index=True) - merged_eval_imgs = merged_eval_imgs[..., idx] - - return merged_img_ids, merged_eval_imgs - - -################################################################# -# From LVIS, with following changes: -# * fixed LVISEval constructor to accept empty dt -# * Removed logger -# * LVIS results supports numpy inputs -################################################################# - - -class Params: - def __init__(self, iou_type): - """Params for LVIS evaluation API.""" - self.img_ids = [] - self.cat_ids = [] - # np.arange causes trouble. the data point on arange is slightly - # larger than the true value - self.iou_thrs = np.linspace(0.5, 0.95, int(np.round((0.95 - 0.5) / 0.05)) + 1, endpoint=True) - self.rec_thrs = np.linspace(0.0, 1.00, int(np.round((1.00 - 0.0) / 0.01)) + 1, endpoint=True) - self.max_dets = 300 - self.area_rng = [ - [0 ** 2, 1e5 ** 2], - [0 ** 2, 32 ** 2], - [32 ** 2, 96 ** 2], - [96 ** 2, 1e5 ** 2], - ] - self.area_rng_lbl = ["all", "small", "medium", "large"] - self.use_cats = 1 - # We bin categories in three bins based how many images of the training - # set the category is present in. - # r: Rare : < 10 - # c: Common : >= 10 and < 100 - # f: Frequent: >= 100 - self.img_count_lbl = ["r", "c", "f"] - self.iou_type = iou_type - - -class LVISResults(LVIS): - def __init__(self, lvis_gt, results, max_dets=300): - """Constructor for LVIS results. - Args: - lvis_gt (LVIS class instance, or str containing path of - annotation file) - results (str containing path of result file or a list of dicts) - max_dets (int): max number of detections per image. The official - value of max_dets for LVIS is 300. - """ - super(LVISResults, self).__init__() - assert isinstance(lvis_gt, LVIS) - self.dataset["images"] = [img for img in lvis_gt.dataset["images"]] - - if isinstance(results, str): - result_anns = self._load_json(results) - elif type(results) == np.ndarray: - result_anns = self.loadNumpyAnnotations(results) - else: - result_anns = results - - if max_dets >= 0: - result_anns = self.limit_dets_per_image(result_anns, max_dets) - - if len(result_anns) > 0 and "bbox" in result_anns[0]: - self.dataset["categories"] = copy.deepcopy(lvis_gt.dataset["categories"]) - for id, ann in enumerate(result_anns): - x1, y1, w, h = ann["bbox"] - x2 = x1 + w - y2 = y1 + h - - if "segmentation" not in ann: - ann["segmentation"] = [[x1, y1, x1, y2, x2, y2, x2, y1]] - - ann["area"] = w * h - ann["id"] = id + 1 - - elif len(result_anns) > 0 and "segmentation" in result_anns[0]: - self.dataset["categories"] = copy.deepcopy(lvis_gt.dataset["categories"]) - for id, ann in enumerate(result_anns): - # Only support compressed RLE format as segmentation results - ann["area"] = mask_util.area(ann["segmentation"]) - - if "bbox" not in ann: - ann["bbox"] = mask_util.toBbox(ann["segmentation"]) - - ann["id"] = id + 1 - - self.dataset["annotations"] = result_anns - self._create_index() - - # #FIXME: disabling this check for now - # img_ids_in_result = [ann["image_id"] for ann in result_anns] - - # assert set(img_ids_in_result) == ( - # set(img_ids_in_result) & set(self.get_img_ids()) - # ), "Results do not correspond to current LVIS set." - - def limit_dets_per_image(self, anns, max_dets): - img_ann = defaultdict(list) - for ann in anns: - img_ann[ann["image_id"]].append(ann) - - for img_id, _anns in img_ann.items(): - if len(_anns) <= max_dets: - continue - _anns = sorted(_anns, key=lambda ann: ann["score"], reverse=True) - img_ann[img_id] = _anns[:max_dets] - - return [ann for anns in img_ann.values() for ann in anns] - - def get_top_results(self, img_id, score_thrs): - ann_ids = self.get_ann_ids(img_ids=[img_id]) - anns = self.load_anns(ann_ids) - return list(filter(lambda ann: ann["score"] > score_thrs, anns)) - - -class LVISEval: - def __init__(self, lvis_gt, lvis_dt=None, iou_type="segm"): - """Constructor for LVISEval. - Args: - lvis_gt (LVIS class instance, or str containing path of annotation file) - lvis_dt (LVISResult class instance, or str containing path of result file, - or list of dict) - iou_type (str): segm or bbox evaluation - """ - - if iou_type not in ["bbox", "segm"]: - raise ValueError("iou_type: {} is not supported.".format(iou_type)) - - if isinstance(lvis_gt, LVIS): - self.lvis_gt = lvis_gt - elif isinstance(lvis_gt, str): - self.lvis_gt = LVIS(lvis_gt) - else: - raise TypeError("Unsupported type {} of lvis_gt.".format(lvis_gt)) - - if isinstance(lvis_dt, LVISResults): - self.lvis_dt = lvis_dt - elif isinstance(lvis_dt, (str, list)): - self.lvis_dt = LVISResults(self.lvis_gt, lvis_dt) - elif lvis_dt is not None: - raise TypeError("Unsupported type {} of lvis_dt.".format(lvis_dt)) - - # per-image per-category evaluation results - self.eval_imgs = defaultdict(list) - self.eval = {} # accumulated evaluation results - self._gts = defaultdict(list) # gt for evaluation - self._dts = defaultdict(list) # dt for evaluation - self.params = Params(iou_type=iou_type) # parameters - self.results = OrderedDict() - self.stats = [] - self.ious = {} # ious between all gts and dts - - self.params.img_ids = sorted(self.lvis_gt.get_img_ids()) - self.params.cat_ids = sorted(self.lvis_gt.get_cat_ids()) - - def _to_mask(self, anns, lvis): - for ann in anns: - rle = lvis.ann_to_rle(ann) - ann["segmentation"] = rle - - def _prepare(self): - """Prepare self._gts and self._dts for evaluation based on params.""" - - cat_ids = self.params.cat_ids if self.params.cat_ids else None - - gts = self.lvis_gt.load_anns(self.lvis_gt.get_ann_ids(img_ids=self.params.img_ids, cat_ids=cat_ids)) - dts = self.lvis_dt.load_anns(self.lvis_dt.get_ann_ids(img_ids=self.params.img_ids, cat_ids=cat_ids)) - # convert ground truth to mask if iou_type == 'segm' - if self.params.iou_type == "segm": - self._to_mask(gts, self.lvis_gt) - self._to_mask(dts, self.lvis_dt) - - # set ignore flag - for gt in gts: - if "ignore" not in gt: - gt["ignore"] = 0 - - for gt in gts: - self._gts[gt["image_id"], gt["category_id"]].append(gt) - - # For federated dataset evaluation we will filter out all dt for an - # image which belong to categories not present in gt and not present in - # the negative list for an image. In other words detector is not penalized - # for categories about which we don't have gt information about their - # presence or absence in an image. - img_data = self.lvis_gt.load_imgs(ids=self.params.img_ids) - # per image map of categories not present in image - img_nl = {d["id"]: d["neg_category_ids"] for d in img_data} - # per image list of categories present in image - img_pl = defaultdict(set) - for ann in gts: - img_pl[ann["image_id"]].add(ann["category_id"]) - # per image map of categoires which have missing gt. For these - # categories we don't penalize the detector for flase positives. - self.img_nel = {d["id"]: d["not_exhaustive_category_ids"] for d in img_data} - - for dt in dts: - img_id, cat_id = dt["image_id"], dt["category_id"] - if cat_id not in img_nl[img_id] and cat_id not in img_pl[img_id]: - continue - self._dts[img_id, cat_id].append(dt) - - self.freq_groups = self._prepare_freq_group() - - def _prepare_freq_group(self): - freq_groups = [[] for _ in self.params.img_count_lbl] - cat_data = self.lvis_gt.load_cats(self.params.cat_ids) - for idx, _cat_data in enumerate(cat_data): - frequency = _cat_data["frequency"] - freq_groups[self.params.img_count_lbl.index(frequency)].append(idx) - return freq_groups - - def evaluate(self): - """ - Run per image evaluation on given images and store results - (a list of dict) in self.eval_imgs. - """ - - self.params.img_ids = list(np.unique(self.params.img_ids)) - - if self.params.use_cats: - cat_ids = self.params.cat_ids - else: - cat_ids = [-1] - - self._prepare() - - self.ious = { - (img_id, cat_id): self.compute_iou(img_id, cat_id) for img_id in self.params.img_ids for cat_id in cat_ids - } - - # loop through images, area range, max detection number - self.eval_imgs = [ - self.evaluate_img(img_id, cat_id, area_rng) - for cat_id in cat_ids - for area_rng in self.params.area_rng - for img_id in self.params.img_ids - ] - - def _get_gt_dt(self, img_id, cat_id): - """Create gt, dt which are list of anns/dets. If use_cats is true - only anns/dets corresponding to tuple (img_id, cat_id) will be - used. Else, all anns/dets in image are used and cat_id is not used. - """ - if self.params.use_cats: - gt = self._gts[img_id, cat_id] - dt = self._dts[img_id, cat_id] - else: - gt = [_ann for _cat_id in self.params.cat_ids for _ann in self._gts[img_id, cat_id]] - dt = [_ann for _cat_id in self.params.cat_ids for _ann in self._dts[img_id, cat_id]] - return gt, dt - - def compute_iou(self, img_id, cat_id): - gt, dt = self._get_gt_dt(img_id, cat_id) - - if len(gt) == 0 and len(dt) == 0: - return [] - - # Sort detections in decreasing order of score. - idx = np.argsort([-d["score"] for d in dt], kind="mergesort") - dt = [dt[i] for i in idx] - - iscrowd = [int(False)] * len(gt) - - if self.params.iou_type == "segm": - ann_type = "segmentation" - elif self.params.iou_type == "bbox": - ann_type = "bbox" - else: - raise ValueError("Unknown iou_type for iou computation.") - gt = [g[ann_type] for g in gt] - dt = [d[ann_type] for d in dt] - - # compute iou between each dt and gt region - # will return array of shape len(dt), len(gt) - ious = mask_util.iou(dt, gt, iscrowd) - return ious - - def evaluate_img(self, img_id, cat_id, area_rng): - """Perform evaluation for single category and image.""" - gt, dt = self._get_gt_dt(img_id, cat_id) - - if len(gt) == 0 and len(dt) == 0: - return None - - # Add another filed _ignore to only consider anns based on area range. - for g in gt: - if g["ignore"] or (g["area"] < area_rng[0] or g["area"] > area_rng[1]): - g["_ignore"] = 1 - else: - g["_ignore"] = 0 - - # Sort gt ignore last - gt_idx = np.argsort([g["_ignore"] for g in gt], kind="mergesort") - gt = [gt[i] for i in gt_idx] - - # Sort dt highest score first - dt_idx = np.argsort([-d["score"] for d in dt], kind="mergesort") - dt = [dt[i] for i in dt_idx] - - # load computed ious - ious = self.ious[img_id, cat_id][:, gt_idx] if len(self.ious[img_id, cat_id]) > 0 else self.ious[img_id, cat_id] - - num_thrs = len(self.params.iou_thrs) - num_gt = len(gt) - num_dt = len(dt) - - # Array to store the "id" of the matched dt/gt - gt_m = np.zeros((num_thrs, num_gt)) - dt_m = np.zeros((num_thrs, num_dt)) - - gt_ig = np.array([g["_ignore"] for g in gt]) - dt_ig = np.zeros((num_thrs, num_dt)) - - for iou_thr_idx, iou_thr in enumerate(self.params.iou_thrs): - if len(ious) == 0: - break - - for dt_idx, _dt in enumerate(dt): - iou = min([iou_thr, 1 - 1e-10]) - # information about best match so far (m=-1 -> unmatched) - # store the gt_idx which matched for _dt - m = -1 - for gt_idx, _ in enumerate(gt): - # if this gt already matched continue - if gt_m[iou_thr_idx, gt_idx] > 0: - continue - # if _dt matched to reg gt, and on ignore gt, stop - if m > -1 and gt_ig[m] == 0 and gt_ig[gt_idx] == 1: - break - # continue to next gt unless better match made - if ious[dt_idx, gt_idx] < iou: - continue - # if match successful and best so far, store appropriately - iou = ious[dt_idx, gt_idx] - m = gt_idx - - # No match found for _dt, go to next _dt - if m == -1: - continue - - # if gt to ignore for some reason update dt_ig. - # Should not be used in evaluation. - dt_ig[iou_thr_idx, dt_idx] = gt_ig[m] - # _dt match found, update gt_m, and dt_m with "id" - dt_m[iou_thr_idx, dt_idx] = gt[m]["id"] - gt_m[iou_thr_idx, m] = _dt["id"] - - # For LVIS we will ignore any unmatched detection if that category was - # not exhaustively annotated in gt. - dt_ig_mask = [ - d["area"] < area_rng[0] or d["area"] > area_rng[1] or d["category_id"] in self.img_nel[d["image_id"]] - for d in dt - ] - dt_ig_mask = np.array(dt_ig_mask).reshape((1, num_dt)) # 1 X num_dt - dt_ig_mask = np.repeat(dt_ig_mask, num_thrs, 0) # num_thrs X num_dt - # Based on dt_ig_mask ignore any unmatched detection by updating dt_ig - dt_ig = np.logical_or(dt_ig, np.logical_and(dt_m == 0, dt_ig_mask)) - # store results for given image and category - return { - "image_id": img_id, - "category_id": cat_id, - "area_rng": area_rng, - "dt_ids": [d["id"] for d in dt], - "gt_ids": [g["id"] for g in gt], - "dt_matches": dt_m, - "gt_matches": gt_m, - "dt_scores": [d["score"] for d in dt], - "gt_ignore": gt_ig, - "dt_ignore": dt_ig, - } - - def accumulate(self): - """Accumulate per image evaluation results and store the result in - self.eval. - """ - - if not self.eval_imgs: - print("Warning: Please run evaluate first.") - - if self.params.use_cats: - cat_ids = self.params.cat_ids - else: - cat_ids = [-1] - - num_thrs = len(self.params.iou_thrs) - num_recalls = len(self.params.rec_thrs) - num_cats = len(cat_ids) - num_area_rngs = len(self.params.area_rng) - num_imgs = len(self.params.img_ids) - - # -1 for absent categories - precision = -np.ones((num_thrs, num_recalls, num_cats, num_area_rngs)) - recall = -np.ones((num_thrs, num_cats, num_area_rngs)) - - # Initialize dt_pointers - dt_pointers = {} - for cat_idx in range(num_cats): - dt_pointers[cat_idx] = {} - for area_idx in range(num_area_rngs): - dt_pointers[cat_idx][area_idx] = {} - - # Per category evaluation - for cat_idx in range(num_cats): - Nk = cat_idx * num_area_rngs * num_imgs - for area_idx in range(num_area_rngs): - Na = area_idx * num_imgs - E = [self.eval_imgs[Nk + Na + img_idx] for img_idx in range(num_imgs)] - # Remove elements which are None - E = [e for e in E if e is not None] - if len(E) == 0: - continue - - # Append all scores: shape (N,) - dt_scores = np.concatenate([e["dt_scores"] for e in E], axis=0) - dt_ids = np.concatenate([e["dt_ids"] for e in E], axis=0) - - dt_idx = np.argsort(-dt_scores, kind="mergesort") - dt_scores = dt_scores[dt_idx] - dt_ids = dt_ids[dt_idx] - - dt_m = np.concatenate([e["dt_matches"] for e in E], axis=1)[:, dt_idx] - dt_ig = np.concatenate([e["dt_ignore"] for e in E], axis=1)[:, dt_idx] - - gt_ig = np.concatenate([e["gt_ignore"] for e in E]) - # num gt anns to consider - num_gt = np.count_nonzero(gt_ig == 0) - - if num_gt == 0: - continue - - tps = np.logical_and(dt_m, np.logical_not(dt_ig)) - fps = np.logical_and(np.logical_not(dt_m), np.logical_not(dt_ig)) - - tp_sum = np.cumsum(tps, axis=1).astype(dtype=np.float) - fp_sum = np.cumsum(fps, axis=1).astype(dtype=np.float) - - dt_pointers[cat_idx][area_idx] = { - "dt_ids": dt_ids, - "tps": tps, - "fps": fps, - } - - for iou_thr_idx, (tp, fp) in enumerate(zip(tp_sum, fp_sum)): - tp = np.array(tp) - fp = np.array(fp) - num_tp = len(tp) - rc = tp / num_gt - if num_tp: - recall[iou_thr_idx, cat_idx, area_idx] = rc[-1] - else: - recall[iou_thr_idx, cat_idx, area_idx] = 0 - - # np.spacing(1) ~= eps - pr = tp / (fp + tp + np.spacing(1)) - pr = pr.tolist() - - # Replace each precision value with the maximum precision - # value to the right of that recall level. This ensures - # that the calculated AP value will be less suspectable - # to small variations in the ranking. - for i in range(num_tp - 1, 0, -1): - if pr[i] > pr[i - 1]: - pr[i - 1] = pr[i] - - rec_thrs_insert_idx = np.searchsorted(rc, self.params.rec_thrs, side="left") - - pr_at_recall = [0.0] * num_recalls - - try: - for _idx, pr_idx in enumerate(rec_thrs_insert_idx): - pr_at_recall[_idx] = pr[pr_idx] - except Exception: - pass - precision[iou_thr_idx, :, cat_idx, area_idx] = np.array(pr_at_recall) - - self.eval = { - "params": self.params, - "counts": [num_thrs, num_recalls, num_cats, num_area_rngs], - "date": datetime.datetime.now().strftime("%Y-%m-%d %H:%M:%S"), - "precision": precision, - "recall": recall, - "dt_pointers": dt_pointers, - } - - def _summarize(self, summary_type, iou_thr=None, area_rng="all", freq_group_idx=None): - aidx = [idx for idx, _area_rng in enumerate(self.params.area_rng_lbl) if _area_rng == area_rng] - - if summary_type == "ap": - s = self.eval["precision"] - if iou_thr is not None: - tidx = np.where(iou_thr == self.params.iou_thrs)[0] - s = s[tidx] - if freq_group_idx is not None: - s = s[:, :, self.freq_groups[freq_group_idx], aidx] - else: - s = s[:, :, :, aidx] - else: - s = self.eval["recall"] - if iou_thr is not None: - tidx = np.where(iou_thr == self.params.iou_thrs)[0] - s = s[tidx] - s = s[:, :, aidx] - - if len(s[s > -1]) == 0: - mean_s = -1 - else: - mean_s = np.mean(s[s > -1]) - return mean_s - - def summarize(self): - """Compute and display summary metrics for evaluation results.""" - if not self.eval: - raise RuntimeError("Please run accumulate() first.") - - max_dets = self.params.max_dets - - self.results["AP"] = self._summarize("ap") - self.results["AP50"] = self._summarize("ap", iou_thr=0.50) - self.results["AP75"] = self._summarize("ap", iou_thr=0.75) - self.results["APs"] = self._summarize("ap", area_rng="small") - self.results["APm"] = self._summarize("ap", area_rng="medium") - self.results["APl"] = self._summarize("ap", area_rng="large") - self.results["APr"] = self._summarize("ap", freq_group_idx=0) - self.results["APc"] = self._summarize("ap", freq_group_idx=1) - self.results["APf"] = self._summarize("ap", freq_group_idx=2) - - self.stats = np.zeros((9,)) - self.stats[0] = self._summarize("ap") - self.stats[1] = self._summarize("ap", iou_thr=0.50) - self.stats[2] = self._summarize("ap", iou_thr=0.75) - self.stats[3] = self._summarize("ap", area_rng="small") - self.stats[4] = self._summarize("ap", area_rng="medium") - self.stats[5] = self._summarize("ap", area_rng="large") - self.stats[6] = self._summarize("ap", freq_group_idx=0) - self.stats[7] = self._summarize("ap", freq_group_idx=1) - self.stats[8] = self._summarize("ap", freq_group_idx=2) - - key = "AR@{}".format(max_dets) - self.results[key] = self._summarize("ar") - - for area_rng in ["small", "medium", "large"]: - key = "AR{}@{}".format(area_rng[0], max_dets) - self.results[key] = self._summarize("ar", area_rng=area_rng) - _returned = self.print_results() - return _returned - - def run(self): - """Wrapper function which calculates the results.""" - self.evaluate() - self.accumulate() - self.summarize() - - def print_results(self): - template = " {:<18} {} @[ IoU={:<9} | area={:>6s} | maxDets={:>3d} catIds={:>3s}] = {:0.3f}" - out_strings = [] - for key, value in self.results.items(): - max_dets = self.params.max_dets - if "AP" in key: - title = "Average Precision" - _type = "(AP)" - else: - title = "Average Recall" - _type = "(AR)" - - if len(key) > 2 and key[2].isdigit(): - iou_thr = float(key[2:]) / 100 - iou = "{:0.2f}".format(iou_thr) - else: - iou = "{:0.2f}:{:0.2f}".format(self.params.iou_thrs[0], self.params.iou_thrs[-1]) - - if len(key) > 2 and key[2] in ["r", "c", "f"]: - cat_group_name = key[2] - else: - cat_group_name = "all" - - if len(key) > 2 and key[2] in ["s", "m", "l"]: - area_rng = key[2] - else: - area_rng = "all" - - print(template.format(title, _type, iou, area_rng, max_dets, cat_group_name, value)) - out_strings.append(template.format(title, _type, iou, area_rng, max_dets, cat_group_name, value)) - return out_strings - - def get_results(self): - if not self.results: - print("Warning: results is empty. Call run().") - return self.results - - -################################################################# -# end of straight copy from lvis, just fixing constructor -################################################################# - - -class LvisEvaluator(object): - def __init__(self, lvis_gt, iou_types): - assert isinstance(iou_types, (list, tuple)) - # lvis_gt = copy.deepcopy(lvis_gt) - self.lvis_gt = lvis_gt - - self.iou_types = iou_types - self.coco_eval = {} - for iou_type in iou_types: - self.coco_eval[iou_type] = LVISEval(lvis_gt, iou_type=iou_type) - - self.img_ids = [] - self.eval_imgs = {k: [] for k in iou_types} - - def update(self, predictions): - img_ids = list(np.unique(list(predictions.keys()))) - self.img_ids.extend(img_ids) - - for iou_type in self.iou_types: - results = self.prepare(predictions, iou_type) - lvis_dt = LVISResults(self.lvis_gt, results) - lvis_eval = self.coco_eval[iou_type] - - lvis_eval.lvis_dt = lvis_dt - lvis_eval.params.img_ids = list(img_ids) - lvis_eval.evaluate() - eval_imgs = lvis_eval.eval_imgs - eval_imgs = np.asarray(eval_imgs).reshape( - len(lvis_eval.params.cat_ids), len(lvis_eval.params.area_rng), len(lvis_eval.params.img_ids) - ) - - self.eval_imgs[iou_type].append(eval_imgs) - - def synchronize_between_processes(self): - for iou_type in self.iou_types: - self.eval_imgs[iou_type] = np.concatenate(self.eval_imgs[iou_type], 2) - create_common_lvis_eval(self.coco_eval[iou_type], self.img_ids, self.eval_imgs[iou_type]) - - def accumulate(self): - for lvis_eval in self.coco_eval.values(): - lvis_eval.accumulate() - - def summarize(self): - for iou_type, lvis_eval in self.coco_eval.items(): - print("IoU metric: {}".format(iou_type)) - lvis_eval.summarize() - - def prepare(self, predictions, iou_type): - if iou_type == "bbox": - return self.prepare_for_lvis_detection(predictions) - elif iou_type == "segm": - return self.prepare_for_lvis_segmentation(predictions) - elif iou_type == "keypoints": - return self.prepare_for_lvis_keypoint(predictions) - else: - raise ValueError("Unknown iou type {}".format(iou_type)) - - def prepare_for_lvis_detection(self, predictions): - lvis_results = [] - for original_id, prediction in predictions.items(): - if len(prediction) == 0: - continue - - boxes = prediction["boxes"] - boxes = convert_to_xywh(boxes).tolist() - scores = prediction["scores"].tolist() - labels = prediction["labels"].tolist() - - lvis_results.extend( - [ - { - "image_id": original_id, - "category_id": labels[k], - "bbox": box, - "score": scores[k], - } - for k, box in enumerate(boxes) - ] - ) - return lvis_results - - def prepare_for_lvis_segmentation(self, predictions): - lvis_results = [] - for original_id, prediction in predictions.items(): - if len(prediction) == 0: - continue - - scores = prediction["scores"] - labels = prediction["labels"] - masks = prediction["masks"] - - masks = masks > 0.5 - - scores = prediction["scores"].tolist() - labels = prediction["labels"].tolist() - - rles = [ - mask_util.encode(np.array(mask[0, :, :, np.newaxis], dtype=np.uint8, order="F"))[0] for mask in masks - ] - for rle in rles: - rle["counts"] = rle["counts"].decode("utf-8") - - lvis_results.extend( - [ - { - "image_id": original_id, - "category_id": labels[k], - "segmentation": rle, - "score": scores[k], - } - for k, rle in enumerate(rles) - ] - ) - return lvis_results - - -def _merge_lists(listA, listB, maxN, key): - result = [] - indA, indB = 0, 0 - while (indA < len(listA) or indB < len(listB)) and len(result) < maxN: - if (indB < len(listB)) and (indA >= len(listA) or key(listA[indA]) < key(listB[indB])): - result.append(listB[indB]) - indB += 1 - else: - result.append(listA[indA]) - indA += 1 - return result - - -# Adapted from https://github.com/achalddave/large-vocab-devil/blob/9aaddc15b00e6e0d370b16743233e40d973cd53f/scripts/evaluate_ap_fixed.py -class LvisEvaluatorFixedAP(object): - def __init__(self, gt: LVIS, topk=10000, fixed_ap=True): - - self.results = [] - self.by_cat = {} - self.gt = gt - self.topk = topk - self.fixed_ap = fixed_ap - - def update(self, predictions): - cur_results = self.prepare(predictions) - if self.fixed_ap: - by_cat = defaultdict(list) - for ann in cur_results: - by_cat[ann["category_id"]].append(ann) - - for cat, cat_anns in by_cat.items(): - if cat not in self.by_cat: - self.by_cat[cat] = [] - - cur = sorted(cat_anns, key=lambda x: x["score"], reverse=True)[: self.topk] - self.by_cat[cat] = _merge_lists(self.by_cat[cat], cur, self.topk, key=lambda x: x["score"]) - else: - by_id = defaultdict(list) - for ann in cur_results: - by_id[ann["image_id"]].append(ann) - - for id_anns in by_id.values(): - self.results.extend(sorted(id_anns, key=lambda x: x["score"], reverse=True)[:300]) - - def synchronize_between_processes(self): - if self.fixed_ap: - all_cats = dist.all_gather(self.by_cat) - self.by_cat = defaultdict(list) - for cats in all_cats: - for cat, cat_anns in cats.items(): - self.by_cat[cat].extend(cat_anns) - else: - self.results = sum(dist.all_gather(self.results), []) - - def prepare(self, predictions): - lvis_results = [] - for original_id, prediction in predictions: - if len(prediction) == 0: - continue - - boxes = prediction["boxes"] - boxes = convert_to_xywh(boxes).tolist() - scores = prediction["scores"].tolist() - labels = prediction["labels"].tolist() - - lvis_results.extend( - [ - { - "image_id": original_id, - "category_id": labels[k], - "bbox": box, - "score": scores[k], - } - for k, box in enumerate(boxes) - ] - ) - return lvis_results - - def summarize(self): - if not dist.is_main_process(): - return - - if self.fixed_ap: - return self._summarize_fixed() - else: - return self._summarize_standard() - - def _summarize_standard(self): - results = LVISResults(self.gt, self.results) - lvis_eval = LVISEval(self.gt, results, iou_type="bbox") - lvis_eval.run() - lvis_eval.print_results() - - def _summarize_fixed(self): - results = [] - - missing_dets_cats = set() - for cat, cat_anns in self.by_cat.items(): - if len(cat_anns) < self.topk: - missing_dets_cats.add(cat) - results.extend(sorted(cat_anns, key=lambda x: x["score"], reverse=True)[: self.topk]) - if missing_dets_cats: - print( - f"\n===\n" - f"{len(missing_dets_cats)} classes had less than {self.topk} detections!\n" - f"Outputting {self.topk} detections for each class will improve AP further.\n" - f"If using detectron2, please use the lvdevil/infer_topk.py script to " - f"output a results file with {self.topk} detections for each class.\n" - f"===" - ) - - results = LVISResults(self.gt, results, max_dets=-1) - lvis_eval = LVISEval(self.gt, results, iou_type="bbox") - params = lvis_eval.params - params.max_dets = -1 # No limit on detections per image. - lvis_eval.run() - scores = lvis_eval.print_results() - metrics = {k: v for k, v in lvis_eval.results.items() if k.startswith("AP")} - print("copypaste: %s,%s", ",".join(map(str, metrics.keys())), "path") - return scores - - -class LvisDumper(object): - def __init__(self, topk=10000, fixed_ap=True, out_path="lvis_eval"): - - self.results = [] - self.by_cat = {} - self.topk = topk - self.fixed_ap = fixed_ap - self.out_path = out_path - if dist.is_main_process(): - if not os.path.exists(self.out_path): - os.mkdir(self.out_path) - - def update(self, predictions): - cur_results = self.prepare(predictions) - if self.fixed_ap: - by_cat = defaultdict(list) - for ann in cur_results: - by_cat[ann["category_id"]].append(ann) - - for cat, cat_anns in by_cat.items(): - if cat not in self.by_cat: - self.by_cat[cat] = [] - - cur = sorted(cat_anns, key=lambda x: x["score"], reverse=True)[: self.topk] - self.by_cat[cat] = _merge_lists(self.by_cat[cat], cur, self.topk, key=lambda x: x["score"]) - else: - by_id = defaultdict(list) - for ann in cur_results: - by_id[ann["image_id"]].append(ann) - - for id_anns in by_id.values(): - self.results.extend(sorted(id_anns, key=lambda x: x["score"], reverse=True)[:300]) - - def synchronize_between_processes(self): - if self.fixed_ap: - all_cats = dist.all_gather(self.by_cat) - self.by_cat = defaultdict(list) - for cats in all_cats: - for cat, cat_anns in cats.items(): - self.by_cat[cat].extend(cat_anns) - else: - self.results = sum(dist.all_gather(self.results), []) - - def prepare(self, predictions): - lvis_results = [] - for original_id, prediction in predictions: - if len(prediction) == 0: - continue - - boxes = prediction["boxes"] - boxes = convert_to_xywh(boxes).tolist() - scores = prediction["scores"].tolist() - labels = prediction["labels"].tolist() - - lvis_results.extend( - [ - { - "image_id": original_id, - "category_id": labels[k], - "bbox": box, - "score": scores[k], - } - for k, box in enumerate(boxes) - ] - ) - return lvis_results - - def summarize(self): - if not dist.is_main_process(): - return - - if self.fixed_ap: - self._summarize_fixed() - else: - self._summarize_standard() - - def _summarize_standard(self): - json_path = os.path.join(self.out_path, "results.json") - print("dumping to ", json_path) - with open(json_path, "w") as f: - json.dump(self.results, f) - - print("dumped") - - def _summarize_fixed(self): - results = [] - - missing_dets_cats = set() - for cat, cat_anns in self.by_cat.items(): - if len(cat_anns) < self.topk: - missing_dets_cats.add(cat) - results.extend(sorted(cat_anns, key=lambda x: x["score"], reverse=True)[: self.topk]) - if missing_dets_cats: - print( - f"\n===\n" - f"{len(missing_dets_cats)} classes had less than {self.topk} detections!\n" - f"Outputting {self.topk} detections for each class will improve AP further.\n" - f"If using detectron2, please use the lvdevil/infer_topk.py script to " - f"output a results file with {self.topk} detections for each class.\n" - f"===" - ) - - json_path = os.path.join(self.out_path, "results.json") - print("dumping to ", json_path) - with open(json_path, "w") as f: - json.dump(results, f) - - print("dumped") - - -def convert_to_xywh(boxes): - xmin, ymin, xmax, ymax = boxes.unbind(1) - return torch.stack((xmin, ymin, xmax - xmin, ymax - ymin), dim=1) - - -def create_common_lvis_eval(lvis_eval, img_ids, eval_imgs): - img_ids, eval_imgs = merge(img_ids, eval_imgs) - img_ids = list(img_ids) - eval_imgs = list(eval_imgs.flatten()) - - lvis_eval.eval_imgs = eval_imgs - lvis_eval.params.img_ids = img_ids - -def lvis_evaluation(): - pass \ No newline at end of file diff --git a/spaces/Pushpak77/fastspeech2-TTS/README.md b/spaces/Pushpak77/fastspeech2-TTS/README.md deleted file mode 100644 index 402652f2668ed9803cb9d7a0cdac5c0cca6a09f2..0000000000000000000000000000000000000000 --- a/spaces/Pushpak77/fastspeech2-TTS/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Fastspeech2 TTS -emoji: 🚀 -colorFrom: pink -colorTo: gray -sdk: gradio -sdk_version: 2.8.13 -app_file: app.py -pinned: false -duplicated_from: StevenLimcorn/fastspeech2-TTS ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference diff --git a/spaces/RMXK/RVC_HFF/infer/lib/csvutil.py b/spaces/RMXK/RVC_HFF/infer/lib/csvutil.py deleted file mode 100644 index 79f432b6933f181d9194c50581656f2fd6e66c0c..0000000000000000000000000000000000000000 --- a/spaces/RMXK/RVC_HFF/infer/lib/csvutil.py +++ /dev/null @@ -1,41 +0,0 @@ - -import numpy as np - -# import praatio -# import praatio.praat_scripts -import os -import sys - -import random - -import csv - -# praatEXE = join('.',os.path.abspath(os.getcwd()) + r"\Praat.exe") - - -def CSVutil(file, rw, type, *args): - if type == "formanting": - if rw == "r": - with open(file) as fileCSVread: - csv_reader = list(csv.reader(fileCSVread)) - return ( - (csv_reader[0][0], csv_reader[0][1], csv_reader[0][2]) - if csv_reader is not None - else (lambda: exec('raise ValueError("No data")'))() - ) - else: - if args: - doformnt = args[0] - else: - doformnt = False - qfr = args[1] if len(args) > 1 else 1.0 - tmb = args[2] if len(args) > 2 else 1.0 - with open(file, rw, newline="") as fileCSVwrite: - csv_writer = csv.writer(fileCSVwrite, delimiter=",") - csv_writer.writerow([doformnt, qfr, tmb]) - elif type == "stop": - stop = args[0] if args else False - with open(file, rw, newline="") as fileCSVwrite: - csv_writer = csv.writer(fileCSVwrite, delimiter=",") - csv_writer.writerow([stop]) - diff --git a/spaces/RMeli/gnina-torch/html/protein.html b/spaces/RMeli/gnina-torch/html/protein.html deleted file mode 100644 index d8700cb935f953cbba3c328086e9dee4e6aaa916..0000000000000000000000000000000000000000 --- a/spaces/RMeli/gnina-torch/html/protein.html +++ /dev/null @@ -1,36 +0,0 @@ - - - - - - - - - - -
    - - - \ No newline at end of file diff --git a/spaces/RamAnanth1/videocrafter/gradio_t2v.py b/spaces/RamAnanth1/videocrafter/gradio_t2v.py deleted file mode 100644 index 7702611f495ad313e33a0a745e7d9a63c67fb3a7..0000000000000000000000000000000000000000 --- a/spaces/RamAnanth1/videocrafter/gradio_t2v.py +++ /dev/null @@ -1,53 +0,0 @@ -import gradio as gr - -def create_demo(get_video): - block = gr.Blocks(css='style.css').queue() - with block: - with gr.Group(): - with gr.Box(): - with gr.Row(elem_id='prompt-container').style(equal_height=True): - prompt = gr.Text( - label='Prompt', - show_label=False, - max_lines=1, - placeholder='Enter your prompt', - elem_id='prompt-text-input').style(container=False) - run_button = gr.Button('Generate video').style( - full_width=False) - result = gr.Video(label='Result', show_label=False, elem_id='gallery') - with gr.Accordion('Advanced options', open=False): - seed = gr.Slider( - label='Seed', - minimum=-1, - maximum=1000000, - step=1, - value=-1, - info='If set to -1, a different seed will be used each time.') - sampling_steps = gr.Slider(label='Number of sampling steps', - minimum=10, - maximum=100, - step=5, - value=50) - - inputs = [ - prompt, - seed, - sampling_steps - # num_frames, - # num_inference_steps, - ] - gr.Examples(examples=[ - ["Astronaut riding a horse", 731, 50], - ["Cars running on the highway at night", 1000, 50], - ["close up of a clown fish swimming. 4K",865, 50] - - ], - inputs=inputs, - outputs=result, - fn=get_video, - cache_examples=True) - - prompt.submit(fn=get_video, inputs=inputs, outputs=result) - run_button.click(fn=get_video, inputs=inputs, outputs=result) - - return block \ No newline at end of file diff --git a/spaces/RamAnanth1/videocrafter/gradio_videocontrol.py b/spaces/RamAnanth1/videocrafter/gradio_videocontrol.py deleted file mode 100644 index 289741d360effd101bea8b57940279d31bc90b7f..0000000000000000000000000000000000000000 --- a/spaces/RamAnanth1/videocrafter/gradio_videocontrol.py +++ /dev/null @@ -1,49 +0,0 @@ -import gradio as gr - -def create_demo(get_video_control): - block = gr.Blocks(css='style.css').queue() - with block: - with gr.Group(): - - prompt = gr.Text( - label='Prompt', - show_label=False, - max_lines=1, - placeholder='Enter your prompt', - elem_id='prompt-text-input').style(container=False) - input_video = gr.Video(label='Input Video') - run_button = gr.Button('Generate video') - result = gr.Video(label='Result', show_label=False, elem_id='gallery') - with gr.Accordion('Advanced options', open=False): - seed = gr.Slider( - label='Seed', - minimum=-1, - maximum=1000000, - step=1, - value=-1, - info='If set to -1, a different seed will be used each time.') - sampling_steps = gr.Slider(label='Number of sampling steps', - minimum=10, - maximum=100, - step=5, - value=50) - - inputs = [ - prompt, - input_video, - seed, - sampling_steps - # num_frames, - # num_inference_steps, - ] - gr.Examples(examples=[ - ], - inputs=inputs, - outputs=result, - fn=get_video_control, - cache_examples=True) - - prompt.submit(fn=get_video_control, inputs=inputs, outputs=result) - run_button.click(fn=get_video_control, inputs=inputs, outputs=result) - - return block \ No newline at end of file diff --git a/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/pip/_vendor/rich/_cell_widths.py b/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/pip/_vendor/rich/_cell_widths.py deleted file mode 100644 index 36286df379e28ea997bea3ee1fd62cadebebbba9..0000000000000000000000000000000000000000 --- a/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/pip/_vendor/rich/_cell_widths.py +++ /dev/null @@ -1,451 +0,0 @@ -# Auto generated by make_terminal_widths.py - -CELL_WIDTHS = [ - (0, 0, 0), - (1, 31, -1), - (127, 159, -1), - (768, 879, 0), - (1155, 1161, 0), - (1425, 1469, 0), - (1471, 1471, 0), - (1473, 1474, 0), - (1476, 1477, 0), - (1479, 1479, 0), - (1552, 1562, 0), - (1611, 1631, 0), - (1648, 1648, 0), - (1750, 1756, 0), - (1759, 1764, 0), - (1767, 1768, 0), - (1770, 1773, 0), - (1809, 1809, 0), - (1840, 1866, 0), - (1958, 1968, 0), - (2027, 2035, 0), - (2045, 2045, 0), - (2070, 2073, 0), - (2075, 2083, 0), - (2085, 2087, 0), - (2089, 2093, 0), - (2137, 2139, 0), - (2259, 2273, 0), - (2275, 2306, 0), - (2362, 2362, 0), - (2364, 2364, 0), - (2369, 2376, 0), - (2381, 2381, 0), - (2385, 2391, 0), - (2402, 2403, 0), - (2433, 2433, 0), - (2492, 2492, 0), - (2497, 2500, 0), - (2509, 2509, 0), - (2530, 2531, 0), - (2558, 2558, 0), - (2561, 2562, 0), - (2620, 2620, 0), - (2625, 2626, 0), - (2631, 2632, 0), - (2635, 2637, 0), - (2641, 2641, 0), - (2672, 2673, 0), - (2677, 2677, 0), - (2689, 2690, 0), - (2748, 2748, 0), - (2753, 2757, 0), - (2759, 2760, 0), - (2765, 2765, 0), - (2786, 2787, 0), - (2810, 2815, 0), - (2817, 2817, 0), - (2876, 2876, 0), - (2879, 2879, 0), - (2881, 2884, 0), - (2893, 2893, 0), - (2901, 2902, 0), - (2914, 2915, 0), - (2946, 2946, 0), - (3008, 3008, 0), - (3021, 3021, 0), - (3072, 3072, 0), - (3076, 3076, 0), - (3134, 3136, 0), - (3142, 3144, 0), - (3146, 3149, 0), - (3157, 3158, 0), - (3170, 3171, 0), - (3201, 3201, 0), - (3260, 3260, 0), - (3263, 3263, 0), - (3270, 3270, 0), - (3276, 3277, 0), - (3298, 3299, 0), - (3328, 3329, 0), - (3387, 3388, 0), - (3393, 3396, 0), - (3405, 3405, 0), - (3426, 3427, 0), - (3457, 3457, 0), - (3530, 3530, 0), - (3538, 3540, 0), - (3542, 3542, 0), - (3633, 3633, 0), - (3636, 3642, 0), - (3655, 3662, 0), - (3761, 3761, 0), - (3764, 3772, 0), - (3784, 3789, 0), - (3864, 3865, 0), - (3893, 3893, 0), - (3895, 3895, 0), - (3897, 3897, 0), - (3953, 3966, 0), - (3968, 3972, 0), - (3974, 3975, 0), - (3981, 3991, 0), - (3993, 4028, 0), - (4038, 4038, 0), - (4141, 4144, 0), - (4146, 4151, 0), - (4153, 4154, 0), - (4157, 4158, 0), - (4184, 4185, 0), - (4190, 4192, 0), - (4209, 4212, 0), - (4226, 4226, 0), - (4229, 4230, 0), - (4237, 4237, 0), - (4253, 4253, 0), - (4352, 4447, 2), - (4957, 4959, 0), - (5906, 5908, 0), - (5938, 5940, 0), - (5970, 5971, 0), - (6002, 6003, 0), - (6068, 6069, 0), - (6071, 6077, 0), - (6086, 6086, 0), - (6089, 6099, 0), - (6109, 6109, 0), - (6155, 6157, 0), - (6277, 6278, 0), - (6313, 6313, 0), - (6432, 6434, 0), - (6439, 6440, 0), - (6450, 6450, 0), - (6457, 6459, 0), - (6679, 6680, 0), - (6683, 6683, 0), - (6742, 6742, 0), - (6744, 6750, 0), - (6752, 6752, 0), - (6754, 6754, 0), - (6757, 6764, 0), - (6771, 6780, 0), - (6783, 6783, 0), - (6832, 6848, 0), - (6912, 6915, 0), - (6964, 6964, 0), - (6966, 6970, 0), - (6972, 6972, 0), - (6978, 6978, 0), - (7019, 7027, 0), - (7040, 7041, 0), - (7074, 7077, 0), - (7080, 7081, 0), - (7083, 7085, 0), - (7142, 7142, 0), - (7144, 7145, 0), - (7149, 7149, 0), - (7151, 7153, 0), - (7212, 7219, 0), - (7222, 7223, 0), - (7376, 7378, 0), - (7380, 7392, 0), - (7394, 7400, 0), - (7405, 7405, 0), - (7412, 7412, 0), - (7416, 7417, 0), - (7616, 7673, 0), - (7675, 7679, 0), - (8203, 8207, 0), - (8232, 8238, 0), - (8288, 8291, 0), - (8400, 8432, 0), - (8986, 8987, 2), - (9001, 9002, 2), - (9193, 9196, 2), - (9200, 9200, 2), - (9203, 9203, 2), - (9725, 9726, 2), - (9748, 9749, 2), - (9800, 9811, 2), - (9855, 9855, 2), - (9875, 9875, 2), - (9889, 9889, 2), - (9898, 9899, 2), - (9917, 9918, 2), - (9924, 9925, 2), - (9934, 9934, 2), - (9940, 9940, 2), - (9962, 9962, 2), - (9970, 9971, 2), - (9973, 9973, 2), - (9978, 9978, 2), - (9981, 9981, 2), - (9989, 9989, 2), - (9994, 9995, 2), - (10024, 10024, 2), - (10060, 10060, 2), - (10062, 10062, 2), - (10067, 10069, 2), - (10071, 10071, 2), - (10133, 10135, 2), - (10160, 10160, 2), - (10175, 10175, 2), - (11035, 11036, 2), - (11088, 11088, 2), - (11093, 11093, 2), - (11503, 11505, 0), - (11647, 11647, 0), - (11744, 11775, 0), - (11904, 11929, 2), - (11931, 12019, 2), - (12032, 12245, 2), - (12272, 12283, 2), - (12288, 12329, 2), - (12330, 12333, 0), - (12334, 12350, 2), - (12353, 12438, 2), - (12441, 12442, 0), - (12443, 12543, 2), - (12549, 12591, 2), - (12593, 12686, 2), - (12688, 12771, 2), - (12784, 12830, 2), - (12832, 12871, 2), - (12880, 19903, 2), - (19968, 42124, 2), - (42128, 42182, 2), - (42607, 42610, 0), - (42612, 42621, 0), - (42654, 42655, 0), - (42736, 42737, 0), - (43010, 43010, 0), - (43014, 43014, 0), - (43019, 43019, 0), - (43045, 43046, 0), - (43052, 43052, 0), - (43204, 43205, 0), - (43232, 43249, 0), - (43263, 43263, 0), - (43302, 43309, 0), - (43335, 43345, 0), - (43360, 43388, 2), - (43392, 43394, 0), - (43443, 43443, 0), - (43446, 43449, 0), - (43452, 43453, 0), - (43493, 43493, 0), - (43561, 43566, 0), - (43569, 43570, 0), - (43573, 43574, 0), - (43587, 43587, 0), - (43596, 43596, 0), - (43644, 43644, 0), - (43696, 43696, 0), - (43698, 43700, 0), - (43703, 43704, 0), - (43710, 43711, 0), - (43713, 43713, 0), - (43756, 43757, 0), - (43766, 43766, 0), - (44005, 44005, 0), - (44008, 44008, 0), - (44013, 44013, 0), - (44032, 55203, 2), - (63744, 64255, 2), - (64286, 64286, 0), - (65024, 65039, 0), - (65040, 65049, 2), - (65056, 65071, 0), - (65072, 65106, 2), - (65108, 65126, 2), - (65128, 65131, 2), - (65281, 65376, 2), - (65504, 65510, 2), - (66045, 66045, 0), - (66272, 66272, 0), - (66422, 66426, 0), - (68097, 68099, 0), - (68101, 68102, 0), - (68108, 68111, 0), - (68152, 68154, 0), - (68159, 68159, 0), - (68325, 68326, 0), - (68900, 68903, 0), - (69291, 69292, 0), - (69446, 69456, 0), - (69633, 69633, 0), - (69688, 69702, 0), - (69759, 69761, 0), - (69811, 69814, 0), - (69817, 69818, 0), - (69888, 69890, 0), - (69927, 69931, 0), - (69933, 69940, 0), - (70003, 70003, 0), - (70016, 70017, 0), - (70070, 70078, 0), - (70089, 70092, 0), - (70095, 70095, 0), - (70191, 70193, 0), - (70196, 70196, 0), - (70198, 70199, 0), - (70206, 70206, 0), - (70367, 70367, 0), - (70371, 70378, 0), - (70400, 70401, 0), - (70459, 70460, 0), - (70464, 70464, 0), - (70502, 70508, 0), - (70512, 70516, 0), - (70712, 70719, 0), - (70722, 70724, 0), - (70726, 70726, 0), - (70750, 70750, 0), - (70835, 70840, 0), - (70842, 70842, 0), - (70847, 70848, 0), - (70850, 70851, 0), - (71090, 71093, 0), - (71100, 71101, 0), - (71103, 71104, 0), - (71132, 71133, 0), - (71219, 71226, 0), - (71229, 71229, 0), - (71231, 71232, 0), - (71339, 71339, 0), - (71341, 71341, 0), - (71344, 71349, 0), - (71351, 71351, 0), - (71453, 71455, 0), - (71458, 71461, 0), - (71463, 71467, 0), - (71727, 71735, 0), - (71737, 71738, 0), - (71995, 71996, 0), - (71998, 71998, 0), - (72003, 72003, 0), - (72148, 72151, 0), - (72154, 72155, 0), - (72160, 72160, 0), - (72193, 72202, 0), - (72243, 72248, 0), - (72251, 72254, 0), - (72263, 72263, 0), - (72273, 72278, 0), - (72281, 72283, 0), - (72330, 72342, 0), - (72344, 72345, 0), - (72752, 72758, 0), - (72760, 72765, 0), - (72767, 72767, 0), - (72850, 72871, 0), - (72874, 72880, 0), - (72882, 72883, 0), - (72885, 72886, 0), - (73009, 73014, 0), - (73018, 73018, 0), - (73020, 73021, 0), - (73023, 73029, 0), - (73031, 73031, 0), - (73104, 73105, 0), - (73109, 73109, 0), - (73111, 73111, 0), - (73459, 73460, 0), - (92912, 92916, 0), - (92976, 92982, 0), - (94031, 94031, 0), - (94095, 94098, 0), - (94176, 94179, 2), - (94180, 94180, 0), - (94192, 94193, 2), - (94208, 100343, 2), - (100352, 101589, 2), - (101632, 101640, 2), - (110592, 110878, 2), - (110928, 110930, 2), - (110948, 110951, 2), - (110960, 111355, 2), - (113821, 113822, 0), - (119143, 119145, 0), - (119163, 119170, 0), - (119173, 119179, 0), - (119210, 119213, 0), - (119362, 119364, 0), - (121344, 121398, 0), - (121403, 121452, 0), - (121461, 121461, 0), - (121476, 121476, 0), - (121499, 121503, 0), - (121505, 121519, 0), - (122880, 122886, 0), - (122888, 122904, 0), - (122907, 122913, 0), - (122915, 122916, 0), - (122918, 122922, 0), - (123184, 123190, 0), - (123628, 123631, 0), - (125136, 125142, 0), - (125252, 125258, 0), - (126980, 126980, 2), - (127183, 127183, 2), - (127374, 127374, 2), - (127377, 127386, 2), - (127488, 127490, 2), - (127504, 127547, 2), - (127552, 127560, 2), - (127568, 127569, 2), - (127584, 127589, 2), - (127744, 127776, 2), - (127789, 127797, 2), - (127799, 127868, 2), - (127870, 127891, 2), - (127904, 127946, 2), - (127951, 127955, 2), - (127968, 127984, 2), - (127988, 127988, 2), - (127992, 128062, 2), - (128064, 128064, 2), - (128066, 128252, 2), - (128255, 128317, 2), - (128331, 128334, 2), - (128336, 128359, 2), - (128378, 128378, 2), - (128405, 128406, 2), - (128420, 128420, 2), - (128507, 128591, 2), - (128640, 128709, 2), - (128716, 128716, 2), - (128720, 128722, 2), - (128725, 128727, 2), - (128747, 128748, 2), - (128756, 128764, 2), - (128992, 129003, 2), - (129292, 129338, 2), - (129340, 129349, 2), - (129351, 129400, 2), - (129402, 129483, 2), - (129485, 129535, 2), - (129648, 129652, 2), - (129656, 129658, 2), - (129664, 129670, 2), - (129680, 129704, 2), - (129712, 129718, 2), - (129728, 129730, 2), - (129744, 129750, 2), - (131072, 196605, 2), - (196608, 262141, 2), - (917760, 917999, 0), -] diff --git a/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/setuptools/command/build_ext.py b/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/setuptools/command/build_ext.py deleted file mode 100644 index cbfe3ec1c28529aade613b000d5b051807287deb..0000000000000000000000000000000000000000 --- a/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/setuptools/command/build_ext.py +++ /dev/null @@ -1,383 +0,0 @@ -import os -import sys -import itertools -from importlib.machinery import EXTENSION_SUFFIXES -from importlib.util import cache_from_source as _compiled_file_name -from typing import Dict, Iterator, List, Tuple - -from distutils.command.build_ext import build_ext as _du_build_ext -from distutils.ccompiler import new_compiler -from distutils.sysconfig import customize_compiler, get_config_var -from distutils import log - -from setuptools.errors import BaseError -from setuptools.extension import Extension, Library - -try: - # Attempt to use Cython for building extensions, if available - from Cython.Distutils.build_ext import build_ext as _build_ext - # Additionally, assert that the compiler module will load - # also. Ref #1229. - __import__('Cython.Compiler.Main') -except ImportError: - _build_ext = _du_build_ext - -# make sure _config_vars is initialized -get_config_var("LDSHARED") -from distutils.sysconfig import _config_vars as _CONFIG_VARS # noqa - - -def _customize_compiler_for_shlib(compiler): - if sys.platform == "darwin": - # building .dylib requires additional compiler flags on OSX; here we - # temporarily substitute the pyconfig.h variables so that distutils' - # 'customize_compiler' uses them before we build the shared libraries. - tmp = _CONFIG_VARS.copy() - try: - # XXX Help! I don't have any idea whether these are right... - _CONFIG_VARS['LDSHARED'] = ( - "gcc -Wl,-x -dynamiclib -undefined dynamic_lookup") - _CONFIG_VARS['CCSHARED'] = " -dynamiclib" - _CONFIG_VARS['SO'] = ".dylib" - customize_compiler(compiler) - finally: - _CONFIG_VARS.clear() - _CONFIG_VARS.update(tmp) - else: - customize_compiler(compiler) - - -have_rtld = False -use_stubs = False -libtype = 'shared' - -if sys.platform == "darwin": - use_stubs = True -elif os.name != 'nt': - try: - import dl - use_stubs = have_rtld = hasattr(dl, 'RTLD_NOW') - except ImportError: - pass - - -def if_dl(s): - return s if have_rtld else '' - - -def get_abi3_suffix(): - """Return the file extension for an abi3-compliant Extension()""" - for suffix in EXTENSION_SUFFIXES: - if '.abi3' in suffix: # Unix - return suffix - elif suffix == '.pyd': # Windows - return suffix - - -class build_ext(_build_ext): - editable_mode: bool = False - inplace: bool = False - - def run(self): - """Build extensions in build directory, then copy if --inplace""" - old_inplace, self.inplace = self.inplace, 0 - _build_ext.run(self) - self.inplace = old_inplace - if old_inplace: - self.copy_extensions_to_source() - - def _get_inplace_equivalent(self, build_py, ext: Extension) -> Tuple[str, str]: - fullname = self.get_ext_fullname(ext.name) - filename = self.get_ext_filename(fullname) - modpath = fullname.split('.') - package = '.'.join(modpath[:-1]) - package_dir = build_py.get_package_dir(package) - inplace_file = os.path.join(package_dir, os.path.basename(filename)) - regular_file = os.path.join(self.build_lib, filename) - return (inplace_file, regular_file) - - def copy_extensions_to_source(self): - build_py = self.get_finalized_command('build_py') - for ext in self.extensions: - inplace_file, regular_file = self._get_inplace_equivalent(build_py, ext) - - # Always copy, even if source is older than destination, to ensure - # that the right extensions for the current Python/platform are - # used. - if os.path.exists(regular_file) or not ext.optional: - self.copy_file(regular_file, inplace_file, level=self.verbose) - - if ext._needs_stub: - inplace_stub = self._get_equivalent_stub(ext, inplace_file) - self._write_stub_file(inplace_stub, ext, compile=True) - # Always compile stub and remove the original (leave the cache behind) - # (this behaviour was observed in previous iterations of the code) - - def _get_equivalent_stub(self, ext: Extension, output_file: str) -> str: - dir_ = os.path.dirname(output_file) - _, _, name = ext.name.rpartition(".") - return f"{os.path.join(dir_, name)}.py" - - def _get_output_mapping(self) -> Iterator[Tuple[str, str]]: - if not self.inplace: - return - - build_py = self.get_finalized_command('build_py') - opt = self.get_finalized_command('install_lib').optimize or "" - - for ext in self.extensions: - inplace_file, regular_file = self._get_inplace_equivalent(build_py, ext) - yield (regular_file, inplace_file) - - if ext._needs_stub: - # This version of `build_ext` always builds artifacts in another dir, - # when "inplace=True" is given it just copies them back. - # This is done in the `copy_extensions_to_source` function, which - # always compile stub files via `_compile_and_remove_stub`. - # At the end of the process, a `.pyc` stub file is created without the - # corresponding `.py`. - - inplace_stub = self._get_equivalent_stub(ext, inplace_file) - regular_stub = self._get_equivalent_stub(ext, regular_file) - inplace_cache = _compiled_file_name(inplace_stub, optimization=opt) - output_cache = _compiled_file_name(regular_stub, optimization=opt) - yield (output_cache, inplace_cache) - - def get_ext_filename(self, fullname): - so_ext = os.getenv('SETUPTOOLS_EXT_SUFFIX') - if so_ext: - filename = os.path.join(*fullname.split('.')) + so_ext - else: - filename = _build_ext.get_ext_filename(self, fullname) - so_ext = get_config_var('EXT_SUFFIX') - - if fullname in self.ext_map: - ext = self.ext_map[fullname] - use_abi3 = getattr(ext, 'py_limited_api') and get_abi3_suffix() - if use_abi3: - filename = filename[:-len(so_ext)] - so_ext = get_abi3_suffix() - filename = filename + so_ext - if isinstance(ext, Library): - fn, ext = os.path.splitext(filename) - return self.shlib_compiler.library_filename(fn, libtype) - elif use_stubs and ext._links_to_dynamic: - d, fn = os.path.split(filename) - return os.path.join(d, 'dl-' + fn) - return filename - - def initialize_options(self): - _build_ext.initialize_options(self) - self.shlib_compiler = None - self.shlibs = [] - self.ext_map = {} - self.editable_mode = False - - def finalize_options(self): - _build_ext.finalize_options(self) - self.extensions = self.extensions or [] - self.check_extensions_list(self.extensions) - self.shlibs = [ext for ext in self.extensions - if isinstance(ext, Library)] - if self.shlibs: - self.setup_shlib_compiler() - for ext in self.extensions: - ext._full_name = self.get_ext_fullname(ext.name) - for ext in self.extensions: - fullname = ext._full_name - self.ext_map[fullname] = ext - - # distutils 3.1 will also ask for module names - # XXX what to do with conflicts? - self.ext_map[fullname.split('.')[-1]] = ext - - ltd = self.shlibs and self.links_to_dynamic(ext) or False - ns = ltd and use_stubs and not isinstance(ext, Library) - ext._links_to_dynamic = ltd - ext._needs_stub = ns - filename = ext._file_name = self.get_ext_filename(fullname) - libdir = os.path.dirname(os.path.join(self.build_lib, filename)) - if ltd and libdir not in ext.library_dirs: - ext.library_dirs.append(libdir) - if ltd and use_stubs and os.curdir not in ext.runtime_library_dirs: - ext.runtime_library_dirs.append(os.curdir) - - if self.editable_mode: - self.inplace = True - - def setup_shlib_compiler(self): - compiler = self.shlib_compiler = new_compiler( - compiler=self.compiler, dry_run=self.dry_run, force=self.force - ) - _customize_compiler_for_shlib(compiler) - - if self.include_dirs is not None: - compiler.set_include_dirs(self.include_dirs) - if self.define is not None: - # 'define' option is a list of (name,value) tuples - for (name, value) in self.define: - compiler.define_macro(name, value) - if self.undef is not None: - for macro in self.undef: - compiler.undefine_macro(macro) - if self.libraries is not None: - compiler.set_libraries(self.libraries) - if self.library_dirs is not None: - compiler.set_library_dirs(self.library_dirs) - if self.rpath is not None: - compiler.set_runtime_library_dirs(self.rpath) - if self.link_objects is not None: - compiler.set_link_objects(self.link_objects) - - # hack so distutils' build_extension() builds a library instead - compiler.link_shared_object = link_shared_object.__get__(compiler) - - def get_export_symbols(self, ext): - if isinstance(ext, Library): - return ext.export_symbols - return _build_ext.get_export_symbols(self, ext) - - def build_extension(self, ext): - ext._convert_pyx_sources_to_lang() - _compiler = self.compiler - try: - if isinstance(ext, Library): - self.compiler = self.shlib_compiler - _build_ext.build_extension(self, ext) - if ext._needs_stub: - build_lib = self.get_finalized_command('build_py').build_lib - self.write_stub(build_lib, ext) - finally: - self.compiler = _compiler - - def links_to_dynamic(self, ext): - """Return true if 'ext' links to a dynamic lib in the same package""" - # XXX this should check to ensure the lib is actually being built - # XXX as dynamic, and not just using a locally-found version or a - # XXX static-compiled version - libnames = dict.fromkeys([lib._full_name for lib in self.shlibs]) - pkg = '.'.join(ext._full_name.split('.')[:-1] + ['']) - return any(pkg + libname in libnames for libname in ext.libraries) - - def get_outputs(self) -> List[str]: - if self.inplace: - return list(self.get_output_mapping().keys()) - return sorted(_build_ext.get_outputs(self) + self.__get_stubs_outputs()) - - def get_output_mapping(self) -> Dict[str, str]: - """See :class:`setuptools.commands.build.SubCommand`""" - mapping = self._get_output_mapping() - return dict(sorted(mapping, key=lambda x: x[0])) - - def __get_stubs_outputs(self): - # assemble the base name for each extension that needs a stub - ns_ext_bases = ( - os.path.join(self.build_lib, *ext._full_name.split('.')) - for ext in self.extensions - if ext._needs_stub - ) - # pair each base with the extension - pairs = itertools.product(ns_ext_bases, self.__get_output_extensions()) - return list(base + fnext for base, fnext in pairs) - - def __get_output_extensions(self): - yield '.py' - yield '.pyc' - if self.get_finalized_command('build_py').optimize: - yield '.pyo' - - def write_stub(self, output_dir, ext, compile=False): - stub_file = os.path.join(output_dir, *ext._full_name.split('.')) + '.py' - self._write_stub_file(stub_file, ext, compile) - - def _write_stub_file(self, stub_file: str, ext: Extension, compile=False): - log.info("writing stub loader for %s to %s", ext._full_name, stub_file) - if compile and os.path.exists(stub_file): - raise BaseError(stub_file + " already exists! Please delete.") - if not self.dry_run: - f = open(stub_file, 'w') - f.write( - '\n'.join([ - "def __bootstrap__():", - " global __bootstrap__, __file__, __loader__", - " import sys, os, pkg_resources, importlib.util" + - if_dl(", dl"), - " __file__ = pkg_resources.resource_filename" - "(__name__,%r)" - % os.path.basename(ext._file_name), - " del __bootstrap__", - " if '__loader__' in globals():", - " del __loader__", - if_dl(" old_flags = sys.getdlopenflags()"), - " old_dir = os.getcwd()", - " try:", - " os.chdir(os.path.dirname(__file__))", - if_dl(" sys.setdlopenflags(dl.RTLD_NOW)"), - " spec = importlib.util.spec_from_file_location(", - " __name__, __file__)", - " mod = importlib.util.module_from_spec(spec)", - " spec.loader.exec_module(mod)", - " finally:", - if_dl(" sys.setdlopenflags(old_flags)"), - " os.chdir(old_dir)", - "__bootstrap__()", - "" # terminal \n - ]) - ) - f.close() - if compile: - self._compile_and_remove_stub(stub_file) - - def _compile_and_remove_stub(self, stub_file: str): - from distutils.util import byte_compile - - byte_compile([stub_file], optimize=0, - force=True, dry_run=self.dry_run) - optimize = self.get_finalized_command('install_lib').optimize - if optimize > 0: - byte_compile([stub_file], optimize=optimize, - force=True, dry_run=self.dry_run) - if os.path.exists(stub_file) and not self.dry_run: - os.unlink(stub_file) - - -if use_stubs or os.name == 'nt': - # Build shared libraries - # - def link_shared_object( - self, objects, output_libname, output_dir=None, libraries=None, - library_dirs=None, runtime_library_dirs=None, export_symbols=None, - debug=0, extra_preargs=None, extra_postargs=None, build_temp=None, - target_lang=None): - self.link( - self.SHARED_LIBRARY, objects, output_libname, - output_dir, libraries, library_dirs, runtime_library_dirs, - export_symbols, debug, extra_preargs, extra_postargs, - build_temp, target_lang - ) -else: - # Build static libraries everywhere else - libtype = 'static' - - def link_shared_object( - self, objects, output_libname, output_dir=None, libraries=None, - library_dirs=None, runtime_library_dirs=None, export_symbols=None, - debug=0, extra_preargs=None, extra_postargs=None, build_temp=None, - target_lang=None): - # XXX we need to either disallow these attrs on Library instances, - # or warn/abort here if set, or something... - # libraries=None, library_dirs=None, runtime_library_dirs=None, - # export_symbols=None, extra_preargs=None, extra_postargs=None, - # build_temp=None - - assert output_dir is None # distutils build_ext doesn't pass this - output_dir, filename = os.path.split(output_libname) - basename, ext = os.path.splitext(filename) - if self.library_filename("x").startswith('lib'): - # strip 'lib' prefix; this is kludgy if some platform uses - # a different prefix - basename = basename[3:] - - self.create_static_lib( - objects, basename, output_dir, debug, target_lang - ) diff --git a/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/setuptools/depends.py b/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/setuptools/depends.py deleted file mode 100644 index adffd12db8c8e0477ee6532cd3b84f2e0cde9632..0000000000000000000000000000000000000000 --- a/spaces/Raspberry-ai/main/.env/lib/python3.11/site-packages/setuptools/depends.py +++ /dev/null @@ -1,176 +0,0 @@ -import sys -import marshal -import contextlib -import dis - -from setuptools.extern.packaging import version - -from ._imp import find_module, PY_COMPILED, PY_FROZEN, PY_SOURCE -from . import _imp - - -__all__ = [ - 'Require', 'find_module', 'get_module_constant', 'extract_constant' -] - - -class Require: - """A prerequisite to building or installing a distribution""" - - def __init__( - self, name, requested_version, module, homepage='', - attribute=None, format=None): - - if format is None and requested_version is not None: - format = version.Version - - if format is not None: - requested_version = format(requested_version) - if attribute is None: - attribute = '__version__' - - self.__dict__.update(locals()) - del self.self - - def full_name(self): - """Return full package/distribution name, w/version""" - if self.requested_version is not None: - return '%s-%s' % (self.name, self.requested_version) - return self.name - - def version_ok(self, version): - """Is 'version' sufficiently up-to-date?""" - return self.attribute is None or self.format is None or \ - str(version) != "unknown" and self.format(version) >= self.requested_version - - def get_version(self, paths=None, default="unknown"): - """Get version number of installed module, 'None', or 'default' - - Search 'paths' for module. If not found, return 'None'. If found, - return the extracted version attribute, or 'default' if no version - attribute was specified, or the value cannot be determined without - importing the module. The version is formatted according to the - requirement's version format (if any), unless it is 'None' or the - supplied 'default'. - """ - - if self.attribute is None: - try: - f, p, i = find_module(self.module, paths) - if f: - f.close() - return default - except ImportError: - return None - - v = get_module_constant(self.module, self.attribute, default, paths) - - if v is not None and v is not default and self.format is not None: - return self.format(v) - - return v - - def is_present(self, paths=None): - """Return true if dependency is present on 'paths'""" - return self.get_version(paths) is not None - - def is_current(self, paths=None): - """Return true if dependency is present and up-to-date on 'paths'""" - version = self.get_version(paths) - if version is None: - return False - return self.version_ok(str(version)) - - -def maybe_close(f): - @contextlib.contextmanager - def empty(): - yield - return - if not f: - return empty() - - return contextlib.closing(f) - - -def get_module_constant(module, symbol, default=-1, paths=None): - """Find 'module' by searching 'paths', and extract 'symbol' - - Return 'None' if 'module' does not exist on 'paths', or it does not define - 'symbol'. If the module defines 'symbol' as a constant, return the - constant. Otherwise, return 'default'.""" - - try: - f, path, (suffix, mode, kind) = info = find_module(module, paths) - except ImportError: - # Module doesn't exist - return None - - with maybe_close(f): - if kind == PY_COMPILED: - f.read(8) # skip magic & date - code = marshal.load(f) - elif kind == PY_FROZEN: - code = _imp.get_frozen_object(module, paths) - elif kind == PY_SOURCE: - code = compile(f.read(), path, 'exec') - else: - # Not something we can parse; we'll have to import it. :( - imported = _imp.get_module(module, paths, info) - return getattr(imported, symbol, None) - - return extract_constant(code, symbol, default) - - -def extract_constant(code, symbol, default=-1): - """Extract the constant value of 'symbol' from 'code' - - If the name 'symbol' is bound to a constant value by the Python code - object 'code', return that value. If 'symbol' is bound to an expression, - return 'default'. Otherwise, return 'None'. - - Return value is based on the first assignment to 'symbol'. 'symbol' must - be a global, or at least a non-"fast" local in the code block. That is, - only 'STORE_NAME' and 'STORE_GLOBAL' opcodes are checked, and 'symbol' - must be present in 'code.co_names'. - """ - if symbol not in code.co_names: - # name's not there, can't possibly be an assignment - return None - - name_idx = list(code.co_names).index(symbol) - - STORE_NAME = 90 - STORE_GLOBAL = 97 - LOAD_CONST = 100 - - const = default - - for byte_code in dis.Bytecode(code): - op = byte_code.opcode - arg = byte_code.arg - - if op == LOAD_CONST: - const = code.co_consts[arg] - elif arg == name_idx and (op == STORE_NAME or op == STORE_GLOBAL): - return const - else: - const = default - - -def _update_globals(): - """ - Patch the globals to remove the objects not available on some platforms. - - XXX it'd be better to test assertions about bytecode instead. - """ - - if not sys.platform.startswith('java') and sys.platform != 'cli': - return - incompatible = 'extract_constant', 'get_module_constant' - for name in incompatible: - del globals()[name] - __all__.remove(name) - - -_update_globals() diff --git a/spaces/Realcat/image-matching-webui/hloc/matchers/roma.py b/spaces/Realcat/image-matching-webui/hloc/matchers/roma.py deleted file mode 100644 index 1fe913270978346a124f17b57ca3d22c53d43d72..0000000000000000000000000000000000000000 --- a/spaces/Realcat/image-matching-webui/hloc/matchers/roma.py +++ /dev/null @@ -1,91 +0,0 @@ -import sys -from pathlib import Path -import subprocess -import torch -from PIL import Image -from ..utils.base_model import BaseModel -from .. import logger - -roma_path = Path(__file__).parent / "../../third_party/Roma" -sys.path.append(str(roma_path)) - -from roma.models.model_zoo.roma_models import roma_model - -device = torch.device("cuda" if torch.cuda.is_available() else "cpu") - -class Roma(BaseModel): - default_conf = { - "name": "two_view_pipeline", - "model_name": "roma_outdoor.pth", - "model_utils_name": "dinov2_vitl14_pretrain.pth", - "max_keypoints": 3000, - } - required_inputs = [ - "image0", - "image1", - ] - weight_urls = { - "roma": { - "roma_outdoor.pth": "https://github.com/Parskatt/storage/releases/download/roma/roma_outdoor.pth", - "roma_indoor.pth": "https://github.com/Parskatt/storage/releases/download/roma/roma_indoor.pth", - }, - "dinov2_vitl14_pretrain.pth": "https://dl.fbaipublicfiles.com/dinov2/dinov2_vitl14/dinov2_vitl14_pretrain.pth", - } - - # Initialize the line matcher - def _init(self, conf): - model_path = roma_path / "pretrained" / conf["model_name"] - dinov2_weights = roma_path / "pretrained" / conf["model_utils_name"] - - # Download the model. - if not model_path.exists(): - model_path.parent.mkdir(exist_ok=True) - link = self.weight_urls["roma"][conf["model_name"]] - cmd = ["wget", link, "-O", str(model_path)] - logger.info(f"Downloading the Roma model with `{cmd}`.") - subprocess.run(cmd, check=True) - - if not dinov2_weights.exists(): - dinov2_weights.parent.mkdir(exist_ok=True) - link = self.weight_urls[conf["model_utils_name"]] - cmd = ["wget", link, "-O", str(dinov2_weights)] - logger.info(f"Downloading the dinov2 model with `{cmd}`.") - subprocess.run(cmd, check=True) - - logger.info(f"Loading Roma model...") - # load the model - weights = torch.load(model_path, map_location="cpu") - dinov2_weights = torch.load(dinov2_weights, map_location="cpu") - - self.net = roma_model( - resolution=(14 * 8 * 6, 14 * 8 * 6), - upsample_preds=False, - weights=weights, - dinov2_weights=dinov2_weights, - device=device, - ) - logger.info(f"Load Roma model done.") - - def _forward(self, data): - img0 = data["image0"].cpu().numpy().squeeze() * 255 - img1 = data["image1"].cpu().numpy().squeeze() * 255 - img0 = img0.transpose(1, 2, 0) - img1 = img1.transpose(1, 2, 0) - img0 = Image.fromarray(img0.astype("uint8")) - img1 = Image.fromarray(img1.astype("uint8")) - W_A, H_A = img0.size - W_B, H_B = img1.size - - # Match - warp, certainty = self.net.match(img0, img1, device=device) - # Sample matches for estimation - matches, certainty = self.net.sample( - warp, certainty, num=self.conf["max_keypoints"] - ) - kpts1, kpts2 = self.net.to_pixel_coordinates( - matches, H_A, W_A, H_B, W_B - ) - pred = {} - pred["keypoints0"], pred["keypoints1"] = kpts1, kpts2 - pred["mconf"] = certainty - return pred diff --git a/spaces/Realcat/image-matching-webui/third_party/TopicFM/src/lightning_trainer/data.py b/spaces/Realcat/image-matching-webui/third_party/TopicFM/src/lightning_trainer/data.py deleted file mode 100644 index 95f6a5eeecf39a993b86674242eacb7b42f8a566..0000000000000000000000000000000000000000 --- a/spaces/Realcat/image-matching-webui/third_party/TopicFM/src/lightning_trainer/data.py +++ /dev/null @@ -1,399 +0,0 @@ -import os -import math -from collections import abc -from loguru import logger -from torch.utils.data.dataset import Dataset -from tqdm import tqdm -from os import path as osp -from pathlib import Path -from joblib import Parallel, delayed - -import pytorch_lightning as pl -from torch import distributed as dist -from torch.utils.data import ( - Dataset, - DataLoader, - ConcatDataset, - DistributedSampler, - RandomSampler, - dataloader, -) - -from src.utils.augment import build_augmentor -from src.utils.dataloader import get_local_split -from src.utils.misc import tqdm_joblib -from src.utils import comm -from src.datasets.megadepth import MegaDepthDataset -from src.datasets.scannet import ScanNetDataset -from src.datasets.sampler import RandomConcatSampler - - -class MultiSceneDataModule(pl.LightningDataModule): - """ - For distributed training, each training process is assgined - only a part of the training scenes to reduce memory overhead. - """ - - def __init__(self, args, config): - super().__init__() - - # 1. data config - # Train and Val should from the same data source - self.trainval_data_source = config.DATASET.TRAINVAL_DATA_SOURCE - self.test_data_source = config.DATASET.TEST_DATA_SOURCE - # training and validating - self.train_data_root = config.DATASET.TRAIN_DATA_ROOT - self.train_pose_root = config.DATASET.TRAIN_POSE_ROOT # (optional) - self.train_npz_root = config.DATASET.TRAIN_NPZ_ROOT - self.train_list_path = config.DATASET.TRAIN_LIST_PATH - self.train_intrinsic_path = config.DATASET.TRAIN_INTRINSIC_PATH - self.val_data_root = config.DATASET.VAL_DATA_ROOT - self.val_pose_root = config.DATASET.VAL_POSE_ROOT # (optional) - self.val_npz_root = config.DATASET.VAL_NPZ_ROOT - self.val_list_path = config.DATASET.VAL_LIST_PATH - self.val_intrinsic_path = config.DATASET.VAL_INTRINSIC_PATH - # testing - self.test_data_root = config.DATASET.TEST_DATA_ROOT - self.test_pose_root = config.DATASET.TEST_POSE_ROOT # (optional) - self.test_npz_root = config.DATASET.TEST_NPZ_ROOT - self.test_list_path = config.DATASET.TEST_LIST_PATH - self.test_intrinsic_path = config.DATASET.TEST_INTRINSIC_PATH - - # 2. dataset config - # general options - self.min_overlap_score_test = ( - config.DATASET.MIN_OVERLAP_SCORE_TEST - ) # 0.4, omit data with overlap_score < min_overlap_score - self.min_overlap_score_train = config.DATASET.MIN_OVERLAP_SCORE_TRAIN - self.augment_fn = build_augmentor( - config.DATASET.AUGMENTATION_TYPE - ) # None, options: [None, 'dark', 'mobile'] - - # MegaDepth options - self.mgdpt_img_resize = config.DATASET.MGDPT_IMG_RESIZE # 840 - self.mgdpt_img_pad = config.DATASET.MGDPT_IMG_PAD # True - self.mgdpt_depth_pad = config.DATASET.MGDPT_DEPTH_PAD # True - self.mgdpt_df = config.DATASET.MGDPT_DF # 8 - self.coarse_scale = 1 / config.MODEL.RESOLUTION[0] # 0.125. for training loftr. - - # 3.loader parameters - self.train_loader_params = { - "batch_size": args.batch_size, - "num_workers": args.num_workers, - "pin_memory": getattr(args, "pin_memory", True), - } - self.val_loader_params = { - "batch_size": 1, - "shuffle": False, - "num_workers": args.num_workers, - "pin_memory": getattr(args, "pin_memory", True), - } - self.test_loader_params = { - "batch_size": 1, - "shuffle": False, - "num_workers": args.num_workers, - "pin_memory": True, - } - - # 4. sampler - self.data_sampler = config.TRAINER.DATA_SAMPLER - self.n_samples_per_subset = config.TRAINER.N_SAMPLES_PER_SUBSET - self.subset_replacement = config.TRAINER.SB_SUBSET_SAMPLE_REPLACEMENT - self.shuffle = config.TRAINER.SB_SUBSET_SHUFFLE - self.repeat = config.TRAINER.SB_REPEAT - - # (optional) RandomSampler for debugging - - # misc configurations - self.parallel_load_data = getattr(args, "parallel_load_data", False) - self.seed = config.TRAINER.SEED # 66 - - def setup(self, stage=None): - """ - Setup train / val / test dataset. This method will be called by PL automatically. - Args: - stage (str): 'fit' in training phase, and 'test' in testing phase. - """ - - assert stage in ["fit", "test"], "stage must be either fit or test" - - try: - self.world_size = dist.get_world_size() - self.rank = dist.get_rank() - logger.info(f"[rank:{self.rank}] world_size: {self.world_size}") - except AssertionError as ae: - self.world_size = 1 - self.rank = 0 - logger.warning(str(ae) + " (set wolrd_size=1 and rank=0)") - - if stage == "fit": - self.train_dataset = self._setup_dataset( - self.train_data_root, - self.train_npz_root, - self.train_list_path, - self.train_intrinsic_path, - mode="train", - min_overlap_score=self.min_overlap_score_train, - pose_dir=self.train_pose_root, - ) - # setup multiple (optional) validation subsets - if isinstance(self.val_list_path, (list, tuple)): - self.val_dataset = [] - if not isinstance(self.val_npz_root, (list, tuple)): - self.val_npz_root = [ - self.val_npz_root for _ in range(len(self.val_list_path)) - ] - for npz_list, npz_root in zip(self.val_list_path, self.val_npz_root): - self.val_dataset.append( - self._setup_dataset( - self.val_data_root, - npz_root, - npz_list, - self.val_intrinsic_path, - mode="val", - min_overlap_score=self.min_overlap_score_test, - pose_dir=self.val_pose_root, - ) - ) - else: - self.val_dataset = self._setup_dataset( - self.val_data_root, - self.val_npz_root, - self.val_list_path, - self.val_intrinsic_path, - mode="val", - min_overlap_score=self.min_overlap_score_test, - pose_dir=self.val_pose_root, - ) - logger.info(f"[rank:{self.rank}] Train & Val Dataset loaded!") - else: # stage == 'test - self.test_dataset = self._setup_dataset( - self.test_data_root, - self.test_npz_root, - self.test_list_path, - self.test_intrinsic_path, - mode="test", - min_overlap_score=self.min_overlap_score_test, - pose_dir=self.test_pose_root, - ) - logger.info(f"[rank:{self.rank}]: Test Dataset loaded!") - - def _setup_dataset( - self, - data_root, - split_npz_root, - scene_list_path, - intri_path, - mode="train", - min_overlap_score=0.0, - pose_dir=None, - ): - """Setup train / val / test set""" - with open(scene_list_path, "r") as f: - npz_names = [name.split()[0] for name in f.readlines()] - - if mode == "train": - local_npz_names = get_local_split( - npz_names, self.world_size, self.rank, self.seed - ) - else: - local_npz_names = npz_names - logger.info(f"[rank {self.rank}]: {len(local_npz_names)} scene(s) assigned.") - - dataset_builder = ( - self._build_concat_dataset_parallel - if self.parallel_load_data - else self._build_concat_dataset - ) - return dataset_builder( - data_root, - local_npz_names, - split_npz_root, - intri_path, - mode=mode, - min_overlap_score=min_overlap_score, - pose_dir=pose_dir, - ) - - def _build_concat_dataset( - self, - data_root, - npz_names, - npz_dir, - intrinsic_path, - mode, - min_overlap_score=0.0, - pose_dir=None, - ): - datasets = [] - augment_fn = self.augment_fn if mode == "train" else None - data_source = ( - self.trainval_data_source - if mode in ["train", "val"] - else self.test_data_source - ) - if str(data_source).lower() == "megadepth": - npz_names = [f"{n}.npz" for n in npz_names] - for npz_name in tqdm( - npz_names, - desc=f"[rank:{self.rank}] loading {mode} datasets", - disable=int(self.rank) != 0, - ): - # `ScanNetDataset`/`MegaDepthDataset` load all data from npz_path when initialized, which might take time. - npz_path = osp.join(npz_dir, npz_name) - if data_source == "ScanNet": - datasets.append( - ScanNetDataset( - data_root, - npz_path, - intrinsic_path, - mode=mode, - min_overlap_score=min_overlap_score, - augment_fn=augment_fn, - pose_dir=pose_dir, - ) - ) - elif data_source == "MegaDepth": - datasets.append( - MegaDepthDataset( - data_root, - npz_path, - mode=mode, - min_overlap_score=min_overlap_score, - img_resize=self.mgdpt_img_resize, - df=self.mgdpt_df, - img_padding=self.mgdpt_img_pad, - depth_padding=self.mgdpt_depth_pad, - augment_fn=augment_fn, - coarse_scale=self.coarse_scale, - ) - ) - else: - raise NotImplementedError() - return ConcatDataset(datasets) - - def _build_concat_dataset_parallel( - self, - data_root, - npz_names, - npz_dir, - intrinsic_path, - mode, - min_overlap_score=0.0, - pose_dir=None, - ): - augment_fn = self.augment_fn if mode == "train" else None - data_source = ( - self.trainval_data_source - if mode in ["train", "val"] - else self.test_data_source - ) - if str(data_source).lower() == "megadepth": - npz_names = [f"{n}.npz" for n in npz_names] - with tqdm_joblib( - tqdm( - desc=f"[rank:{self.rank}] loading {mode} datasets", - total=len(npz_names), - disable=int(self.rank) != 0, - ) - ): - if data_source == "ScanNet": - datasets = Parallel( - n_jobs=math.floor( - len(os.sched_getaffinity(0)) * 0.9 / comm.get_local_size() - ) - )( - delayed( - lambda x: _build_dataset( - ScanNetDataset, - data_root, - osp.join(npz_dir, x), - intrinsic_path, - mode=mode, - min_overlap_score=min_overlap_score, - augment_fn=augment_fn, - pose_dir=pose_dir, - ) - )(name) - for name in npz_names - ) - elif data_source == "MegaDepth": - # TODO: _pickle.PicklingError: Could not pickle the task to send it to the workers. - raise NotImplementedError() - datasets = Parallel( - n_jobs=math.floor( - len(os.sched_getaffinity(0)) * 0.9 / comm.get_local_size() - ) - )( - delayed( - lambda x: _build_dataset( - MegaDepthDataset, - data_root, - osp.join(npz_dir, x), - mode=mode, - min_overlap_score=min_overlap_score, - img_resize=self.mgdpt_img_resize, - df=self.mgdpt_df, - img_padding=self.mgdpt_img_pad, - depth_padding=self.mgdpt_depth_pad, - augment_fn=augment_fn, - coarse_scale=self.coarse_scale, - ) - )(name) - for name in npz_names - ) - else: - raise ValueError(f"Unknown dataset: {data_source}") - return ConcatDataset(datasets) - - def train_dataloader(self): - """Build training dataloader for ScanNet / MegaDepth.""" - assert self.data_sampler in ["scene_balance"] - logger.info( - f"[rank:{self.rank}/{self.world_size}]: Train Sampler and DataLoader re-init (should not re-init between epochs!)." - ) - if self.data_sampler == "scene_balance": - sampler = RandomConcatSampler( - self.train_dataset, - self.n_samples_per_subset, - self.subset_replacement, - self.shuffle, - self.repeat, - self.seed, - ) - else: - sampler = None - dataloader = DataLoader( - self.train_dataset, sampler=sampler, **self.train_loader_params - ) - return dataloader - - def val_dataloader(self): - """Build validation dataloader for ScanNet / MegaDepth.""" - logger.info( - f"[rank:{self.rank}/{self.world_size}]: Val Sampler and DataLoader re-init." - ) - if not isinstance(self.val_dataset, abc.Sequence): - sampler = DistributedSampler(self.val_dataset, shuffle=False) - return DataLoader( - self.val_dataset, sampler=sampler, **self.val_loader_params - ) - else: - dataloaders = [] - for dataset in self.val_dataset: - sampler = DistributedSampler(dataset, shuffle=False) - dataloaders.append( - DataLoader(dataset, sampler=sampler, **self.val_loader_params) - ) - return dataloaders - - def test_dataloader(self, *args, **kwargs): - logger.info( - f"[rank:{self.rank}/{self.world_size}]: Test Sampler and DataLoader re-init." - ) - sampler = DistributedSampler(self.test_dataset, shuffle=False) - return DataLoader(self.test_dataset, sampler=sampler, **self.test_loader_params) - - -def _build_dataset(dataset: Dataset, *args, **kwargs): - return dataset(*args, **kwargs) diff --git a/spaces/Ritvik19/SudokuNet/README.md b/spaces/Ritvik19/SudokuNet/README.md deleted file mode 100644 index 9c6f0f1dc98a3071226ffb02fe3dda094b8b2dc5..0000000000000000000000000000000000000000 --- a/spaces/Ritvik19/SudokuNet/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: SudokuNet -emoji: 🚀 -colorFrom: red -colorTo: blue -sdk: streamlit -sdk_version: 1.9.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference diff --git a/spaces/Saurav21/Blog-Generation/app.py b/spaces/Saurav21/Blog-Generation/app.py deleted file mode 100644 index 4025fd211418d6397c9f05515dbda63e62642bfd..0000000000000000000000000000000000000000 --- a/spaces/Saurav21/Blog-Generation/app.py +++ /dev/null @@ -1,21 +0,0 @@ -import gradio as gr -from aitextgen import aitextgen - - -def text_gen(text): - - model_name = "EleutherAI/gpt-neo-125M" - model = aitextgen(model = model_name) - text_generated = model.generate_one(max_length=1000, prompt = text, no_repeat_ngram_size = 3) - - return text_generated - - -output_text = gr.outputs.Textbox() - -gr.Interface(text_gen, - inputs = gr.inputs.Textbox(label = "Input Text"), - outputs = gr.outputs.Textbox(), - title = "Blog Generator using AI", - description = "Create a 1000 word blog on any topic").launch() - diff --git a/spaces/Scakmak/Chatbot/app.py b/spaces/Scakmak/Chatbot/app.py deleted file mode 100644 index 0f0988771eecf969d589d8fcc92064b8d6843c9e..0000000000000000000000000000000000000000 --- a/spaces/Scakmak/Chatbot/app.py +++ /dev/null @@ -1,67 +0,0 @@ -import gradio as gr -import openai - - -import os -from dotenv import load_dotenv - -load_dotenv() - -openai.api_key = os.getenv("openai_api_key") - -# history = [] -character_name = "AI" -# chat_history = [] - -def openai_chat(prompt, ch_history): - - ch_history.append({"role": "user", "content": prompt}) - messagesFiltered = [ch_history[i] for i in range(len(ch_history)) if ((i % 3 !=0) | (i ==0)) ] - - response = openai.ChatCompletion.create( - model="gpt-3.5-turbo", - messages=messagesFiltered, - max_tokens=300, - temperature=0.6, - top_p=1, - frequency_penalty=0.5, - presence_penalty=0.0 - ) - print(response.choices[0].message["content"].strip()) - return response.choices[0].message["content"].strip() - - - - -def set_character_name(prompt_input, ch_history): - print(prompt_input) - character_name = prompt_input - #history.append({"role": "system", "content": f"You are {character_name}. You will talk and think like {character_name} from now on."}) - ch_history.append({"role": "system", "content": f"You are {character_name}. You will talk and think like {character_name} from now on."}) - return {msg: msg.update(visible=True), chatbot: chatbot.update(visible=True), char_selection : char_selection.update(visible=False), title: title.update( value = f"Chat with {character_name.upper()}",visible=True), state: ch_history} - -def respond(message, ch_history): - bot_message = openai_chat(message, ch_history) - #history.append({"role": "assistant", "content": bot_message}) - ch_history.append({"role": "assistant", "content": bot_message}) - ch_history.append((message, bot_message)) - chats = [ch_history[i] for i in range(len(ch_history)) if ((i % 3 == 0) & (i !=0)) ] - - return {msg: msg.update(value="", visible=True), chatbot: chatbot.update(value= chats,visible=True), state: ch_history} - - -with gr.Blocks() as demo: - state = gr.State([]) - - char_selection = gr.Textbox(lines=1 , label="Enter the character you want to talk to:") - title = gr.Markdown( visible=False) - chatbot = gr.Chatbot(visible=False) - msg = gr.Textbox(visible=False) - - char_selection.submit(set_character_name, [char_selection, state], [chatbot, msg, char_selection, title,state]) - - msg.submit(respond, [msg, state], [chatbot, msg,state]) - - - -demo.launch() diff --git a/spaces/Sky5408er/vits-uma-genshin-honkai/transforms.py b/spaces/Sky5408er/vits-uma-genshin-honkai/transforms.py deleted file mode 100644 index 4793d67ca5a5630e0ffe0f9fb29445c949e64dae..0000000000000000000000000000000000000000 --- a/spaces/Sky5408er/vits-uma-genshin-honkai/transforms.py +++ /dev/null @@ -1,193 +0,0 @@ -import torch -from torch.nn import functional as F - -import numpy as np - - -DEFAULT_MIN_BIN_WIDTH = 1e-3 -DEFAULT_MIN_BIN_HEIGHT = 1e-3 -DEFAULT_MIN_DERIVATIVE = 1e-3 - - -def piecewise_rational_quadratic_transform(inputs, - unnormalized_widths, - unnormalized_heights, - unnormalized_derivatives, - inverse=False, - tails=None, - tail_bound=1., - min_bin_width=DEFAULT_MIN_BIN_WIDTH, - min_bin_height=DEFAULT_MIN_BIN_HEIGHT, - min_derivative=DEFAULT_MIN_DERIVATIVE): - - if tails is None: - spline_fn = rational_quadratic_spline - spline_kwargs = {} - else: - spline_fn = unconstrained_rational_quadratic_spline - spline_kwargs = { - 'tails': tails, - 'tail_bound': tail_bound - } - - outputs, logabsdet = spline_fn( - inputs=inputs, - unnormalized_widths=unnormalized_widths, - unnormalized_heights=unnormalized_heights, - unnormalized_derivatives=unnormalized_derivatives, - inverse=inverse, - min_bin_width=min_bin_width, - min_bin_height=min_bin_height, - min_derivative=min_derivative, - **spline_kwargs - ) - return outputs, logabsdet - - -def searchsorted(bin_locations, inputs, eps=1e-6): - bin_locations[..., -1] += eps - return torch.sum( - inputs[..., None] >= bin_locations, - dim=-1 - ) - 1 - - -def unconstrained_rational_quadratic_spline(inputs, - unnormalized_widths, - unnormalized_heights, - unnormalized_derivatives, - inverse=False, - tails='linear', - tail_bound=1., - min_bin_width=DEFAULT_MIN_BIN_WIDTH, - min_bin_height=DEFAULT_MIN_BIN_HEIGHT, - min_derivative=DEFAULT_MIN_DERIVATIVE): - inside_interval_mask = (inputs >= -tail_bound) & (inputs <= tail_bound) - outside_interval_mask = ~inside_interval_mask - - outputs = torch.zeros_like(inputs) - logabsdet = torch.zeros_like(inputs) - - if tails == 'linear': - unnormalized_derivatives = F.pad(unnormalized_derivatives, pad=(1, 1)) - constant = np.log(np.exp(1 - min_derivative) - 1) - unnormalized_derivatives[..., 0] = constant - unnormalized_derivatives[..., -1] = constant - - outputs[outside_interval_mask] = inputs[outside_interval_mask] - logabsdet[outside_interval_mask] = 0 - else: - raise RuntimeError('{} tails are not implemented.'.format(tails)) - - outputs[inside_interval_mask], logabsdet[inside_interval_mask] = rational_quadratic_spline( - inputs=inputs[inside_interval_mask], - unnormalized_widths=unnormalized_widths[inside_interval_mask, :], - unnormalized_heights=unnormalized_heights[inside_interval_mask, :], - unnormalized_derivatives=unnormalized_derivatives[inside_interval_mask, :], - inverse=inverse, - left=-tail_bound, right=tail_bound, bottom=-tail_bound, top=tail_bound, - min_bin_width=min_bin_width, - min_bin_height=min_bin_height, - min_derivative=min_derivative - ) - - return outputs, logabsdet - -def rational_quadratic_spline(inputs, - unnormalized_widths, - unnormalized_heights, - unnormalized_derivatives, - inverse=False, - left=0., right=1., bottom=0., top=1., - min_bin_width=DEFAULT_MIN_BIN_WIDTH, - min_bin_height=DEFAULT_MIN_BIN_HEIGHT, - min_derivative=DEFAULT_MIN_DERIVATIVE): - if torch.min(inputs) < left or torch.max(inputs) > right: - raise ValueError('Input to a transform is not within its domain') - - num_bins = unnormalized_widths.shape[-1] - - if min_bin_width * num_bins > 1.0: - raise ValueError('Minimal bin width too large for the number of bins') - if min_bin_height * num_bins > 1.0: - raise ValueError('Minimal bin height too large for the number of bins') - - widths = F.softmax(unnormalized_widths, dim=-1) - widths = min_bin_width + (1 - min_bin_width * num_bins) * widths - cumwidths = torch.cumsum(widths, dim=-1) - cumwidths = F.pad(cumwidths, pad=(1, 0), mode='constant', value=0.0) - cumwidths = (right - left) * cumwidths + left - cumwidths[..., 0] = left - cumwidths[..., -1] = right - widths = cumwidths[..., 1:] - cumwidths[..., :-1] - - derivatives = min_derivative + F.softplus(unnormalized_derivatives) - - heights = F.softmax(unnormalized_heights, dim=-1) - heights = min_bin_height + (1 - min_bin_height * num_bins) * heights - cumheights = torch.cumsum(heights, dim=-1) - cumheights = F.pad(cumheights, pad=(1, 0), mode='constant', value=0.0) - cumheights = (top - bottom) * cumheights + bottom - cumheights[..., 0] = bottom - cumheights[..., -1] = top - heights = cumheights[..., 1:] - cumheights[..., :-1] - - if inverse: - bin_idx = searchsorted(cumheights, inputs)[..., None] - else: - bin_idx = searchsorted(cumwidths, inputs)[..., None] - - input_cumwidths = cumwidths.gather(-1, bin_idx)[..., 0] - input_bin_widths = widths.gather(-1, bin_idx)[..., 0] - - input_cumheights = cumheights.gather(-1, bin_idx)[..., 0] - delta = heights / widths - input_delta = delta.gather(-1, bin_idx)[..., 0] - - input_derivatives = derivatives.gather(-1, bin_idx)[..., 0] - input_derivatives_plus_one = derivatives[..., 1:].gather(-1, bin_idx)[..., 0] - - input_heights = heights.gather(-1, bin_idx)[..., 0] - - if inverse: - a = (((inputs - input_cumheights) * (input_derivatives - + input_derivatives_plus_one - - 2 * input_delta) - + input_heights * (input_delta - input_derivatives))) - b = (input_heights * input_derivatives - - (inputs - input_cumheights) * (input_derivatives - + input_derivatives_plus_one - - 2 * input_delta)) - c = - input_delta * (inputs - input_cumheights) - - discriminant = b.pow(2) - 4 * a * c - assert (discriminant >= 0).all() - - root = (2 * c) / (-b - torch.sqrt(discriminant)) - outputs = root * input_bin_widths + input_cumwidths - - theta_one_minus_theta = root * (1 - root) - denominator = input_delta + ((input_derivatives + input_derivatives_plus_one - 2 * input_delta) - * theta_one_minus_theta) - derivative_numerator = input_delta.pow(2) * (input_derivatives_plus_one * root.pow(2) - + 2 * input_delta * theta_one_minus_theta - + input_derivatives * (1 - root).pow(2)) - logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator) - - return outputs, -logabsdet - else: - theta = (inputs - input_cumwidths) / input_bin_widths - theta_one_minus_theta = theta * (1 - theta) - - numerator = input_heights * (input_delta * theta.pow(2) - + input_derivatives * theta_one_minus_theta) - denominator = input_delta + ((input_derivatives + input_derivatives_plus_one - 2 * input_delta) - * theta_one_minus_theta) - outputs = input_cumheights + numerator / denominator - - derivative_numerator = input_delta.pow(2) * (input_derivatives_plus_one * theta.pow(2) - + 2 * input_delta * theta_one_minus_theta - + input_derivatives * (1 - theta).pow(2)) - logabsdet = torch.log(derivative_numerator) - 2 * torch.log(denominator) - - return outputs, logabsdet diff --git a/spaces/Spjkjlkkklj/dalle/index.html b/spaces/Spjkjlkkklj/dalle/index.html deleted file mode 100644 index 34e195b924d641d56e48ca7f05870c79ba68ca66..0000000000000000000000000000000000000000 --- a/spaces/Spjkjlkkklj/dalle/index.html +++ /dev/null @@ -1,54 +0,0 @@ - - - - - - - - - - - - - - - - - - - - -
    - - \ No newline at end of file diff --git a/spaces/Sudhanshu976/NLP_FULL_APP/pages/4_LANGUAGE-DETECTOR-MODEL.py b/spaces/Sudhanshu976/NLP_FULL_APP/pages/4_LANGUAGE-DETECTOR-MODEL.py deleted file mode 100644 index 3786e8a5e6e76202473b5a1c4712ce24510d61c1..0000000000000000000000000000000000000000 --- a/spaces/Sudhanshu976/NLP_FULL_APP/pages/4_LANGUAGE-DETECTOR-MODEL.py +++ /dev/null @@ -1,88 +0,0 @@ -import streamlit as st -import pickle -import re -import string -import nltk -from nltk.corpus import stopwords -from nltk.tokenize import word_tokenize -from nltk.stem.porter import PorterStemmer -stemmer = PorterStemmer() - -st.set_page_config( - page_title="NLP WEB APP" -) - -st.title("LANGUAGE DETECTOR MODEL") -st.sidebar.success("Select a page above") -nltk.download('stopwords') -nltk.download('punkt') - -def preprocess(text): - text = text.lower() - text = re.sub(r'\d+', '', text) - translator = str.maketrans('', '', string.punctuation) - text = text.translate(translator) - - - stop_words = set(stopwords.words("english")) - word_tokens = word_tokenize(text) - filtered_text = [word for word in word_tokens if word not in stop_words] - - stems = [stemmer.stem(word) for word in filtered_text] - preprocessed_text = ' '.join(stems) - return preprocessed_text - - - -cv = pickle.load(open('language-detector-models/vectorizer.pkl','rb')) -model = pickle.load(open('language-detector-models/model.pkl','rb')) - -message= st.text_input("ENTER THE MESSAGE") - - -if st.button("PREDICT"): - # PREPROCESS - transformed_text = preprocess(message) - - # VECTORIZE - vector_input = cv.transform([message]) - - # PREDICTION - result = model.predict(vector_input)[0] - - - # DISPLAY - if result==0: - st.header("ARABIC") - elif result==1: - st.header("DANISH") - elif result==2: - st.header("DUTCH") - elif result==3: - st.header("ENGLISH") - elif result==4: - st.header("FRENCH") - elif result==5: - st.header("GERMAN") - elif result==6: - st.header("GREEK") - elif result==7: - st.header("HINDI") - elif result==8: - st.header("ITALIAN") - elif result==9: - st.header("KANNADA") - elif result==10: - st.header("MALYALAM") - elif result==11: - st.header("PORTUGESE") - elif result==12: - st.header("RUSSIAN") - elif result==13: - st.header("SPANISH") - elif result==14: - st.header("SWEDISH") - elif result==15: - st.header("TAMIL") - else: - st.header("TURKISH") \ No newline at end of file diff --git a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/IPython/core/autocall.py b/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/IPython/core/autocall.py deleted file mode 100644 index 54beec3f58dc87a5e18110a121b1c402f982e149..0000000000000000000000000000000000000000 --- a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/IPython/core/autocall.py +++ /dev/null @@ -1,70 +0,0 @@ -# encoding: utf-8 -""" -Autocall capabilities for IPython.core. - -Authors: - -* Brian Granger -* Fernando Perez -* Thomas Kluyver - -Notes ------ -""" - -#----------------------------------------------------------------------------- -# Copyright (C) 2008-2011 The IPython Development Team -# -# Distributed under the terms of the BSD License. The full license is in -# the file COPYING, distributed as part of this software. -#----------------------------------------------------------------------------- - -#----------------------------------------------------------------------------- -# Imports -#----------------------------------------------------------------------------- - - -#----------------------------------------------------------------------------- -# Code -#----------------------------------------------------------------------------- - -class IPyAutocall(object): - """ Instances of this class are always autocalled - - This happens regardless of 'autocall' variable state. Use this to - develop macro-like mechanisms. - """ - _ip = None - rewrite = True - def __init__(self, ip=None): - self._ip = ip - - def set_ip(self, ip): - """Will be used to set _ip point to current ipython instance b/f call - - Override this method if you don't want this to happen. - - """ - self._ip = ip - - -class ExitAutocall(IPyAutocall): - """An autocallable object which will be added to the user namespace so that - exit, exit(), quit or quit() are all valid ways to close the shell.""" - rewrite = False - - def __call__(self): - self._ip.ask_exit() - -class ZMQExitAutocall(ExitAutocall): - """Exit IPython. Autocallable, so it needn't be explicitly called. - - Parameters - ---------- - keep_kernel : bool - If True, leave the kernel alive. Otherwise, tell the kernel to exit too - (default). - """ - def __call__(self, keep_kernel=False): - self._ip.keepkernel_on_exit = keep_kernel - self._ip.ask_exit() diff --git a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/IPython/core/splitinput.py b/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/IPython/core/splitinput.py deleted file mode 100644 index 5bc3e32542185c50c597d4470f8a4d38b2015247..0000000000000000000000000000000000000000 --- a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/IPython/core/splitinput.py +++ /dev/null @@ -1,138 +0,0 @@ -# encoding: utf-8 -""" -Simple utility for splitting user input. This is used by both inputsplitter and -prefilter. - -Authors: - -* Brian Granger -* Fernando Perez -""" - -#----------------------------------------------------------------------------- -# Copyright (C) 2008-2011 The IPython Development Team -# -# Distributed under the terms of the BSD License. The full license is in -# the file COPYING, distributed as part of this software. -#----------------------------------------------------------------------------- - -#----------------------------------------------------------------------------- -# Imports -#----------------------------------------------------------------------------- - -import re -import sys - -from IPython.utils import py3compat -from IPython.utils.encoding import get_stream_enc -from IPython.core.oinspect import OInfo - -#----------------------------------------------------------------------------- -# Main function -#----------------------------------------------------------------------------- - -# RegExp for splitting line contents into pre-char//first word-method//rest. -# For clarity, each group in on one line. - -# WARNING: update the regexp if the escapes in interactiveshell are changed, as -# they are hardwired in. - -# Although it's not solely driven by the regex, note that: -# ,;/% only trigger if they are the first character on the line -# ! and !! trigger if they are first char(s) *or* follow an indent -# ? triggers as first or last char. - -line_split = re.compile(r""" - ^(\s*) # any leading space - ([,;/%]|!!?|\?\??)? # escape character or characters - \s*(%{0,2}[\w\.\*]*) # function/method, possibly with leading % - # to correctly treat things like '?%magic' - (.*?$|$) # rest of line - """, re.VERBOSE) - - -def split_user_input(line, pattern=None): - """Split user input into initial whitespace, escape character, function part - and the rest. - """ - # We need to ensure that the rest of this routine deals only with unicode - encoding = get_stream_enc(sys.stdin, 'utf-8') - line = py3compat.cast_unicode(line, encoding) - - if pattern is None: - pattern = line_split - match = pattern.match(line) - if not match: - # print "match failed for line '%s'" % line - try: - ifun, the_rest = line.split(None,1) - except ValueError: - # print "split failed for line '%s'" % line - ifun, the_rest = line, u'' - pre = re.match(r'^(\s*)(.*)',line).groups()[0] - esc = "" - else: - pre, esc, ifun, the_rest = match.groups() - - #print 'line:<%s>' % line # dbg - #print 'pre <%s> ifun <%s> rest <%s>' % (pre,ifun.strip(),the_rest) # dbg - return pre, esc or '', ifun.strip(), the_rest.lstrip() - - -class LineInfo(object): - """A single line of input and associated info. - - Includes the following as properties: - - line - The original, raw line - - continue_prompt - Is this line a continuation in a sequence of multiline input? - - pre - Any leading whitespace. - - esc - The escape character(s) in pre or the empty string if there isn't one. - Note that '!!' and '??' are possible values for esc. Otherwise it will - always be a single character. - - ifun - The 'function part', which is basically the maximal initial sequence - of valid python identifiers and the '.' character. This is what is - checked for alias and magic transformations, used for auto-calling, - etc. In contrast to Python identifiers, it may start with "%" and contain - "*". - - the_rest - Everything else on the line. - """ - def __init__(self, line, continue_prompt=False): - self.line = line - self.continue_prompt = continue_prompt - self.pre, self.esc, self.ifun, self.the_rest = split_user_input(line) - - self.pre_char = self.pre.strip() - if self.pre_char: - self.pre_whitespace = '' # No whitespace allowed before esc chars - else: - self.pre_whitespace = self.pre - - def ofind(self, ip) -> OInfo: - """Do a full, attribute-walking lookup of the ifun in the various - namespaces for the given IPython InteractiveShell instance. - - Return a dict with keys: {found, obj, ospace, ismagic} - - Note: can cause state changes because of calling getattr, but should - only be run if autocall is on and if the line hasn't matched any - other, less dangerous handlers. - - Does cache the results of the call, so can be called multiple times - without worrying about *further* damaging state. - """ - return ip._ofind(self.ifun) - - def __str__(self): - return "LineInfo [%s|%s|%s|%s]" %(self.pre, self.esc, self.ifun, self.the_rest) diff --git a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/PIL/ImageMath.py b/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/PIL/ImageMath.py deleted file mode 100644 index ac7d36b698c2ec9839d8a771734c9f730f701534..0000000000000000000000000000000000000000 --- a/spaces/SungBeom/chatwine-korean/.venv/Lib/site-packages/PIL/ImageMath.py +++ /dev/null @@ -1,263 +0,0 @@ -# -# The Python Imaging Library -# $Id$ -# -# a simple math add-on for the Python Imaging Library -# -# History: -# 1999-02-15 fl Original PIL Plus release -# 2005-05-05 fl Simplified and cleaned up for PIL 1.1.6 -# 2005-09-12 fl Fixed int() and float() for Python 2.4.1 -# -# Copyright (c) 1999-2005 by Secret Labs AB -# Copyright (c) 2005 by Fredrik Lundh -# -# See the README file for information on usage and redistribution. -# - -import builtins - -from . import Image, _imagingmath - - -def _isconstant(v): - return isinstance(v, (int, float)) - - -class _Operand: - """Wraps an image operand, providing standard operators""" - - def __init__(self, im): - self.im = im - - def __fixup(self, im1): - # convert image to suitable mode - if isinstance(im1, _Operand): - # argument was an image. - if im1.im.mode in ("1", "L"): - return im1.im.convert("I") - elif im1.im.mode in ("I", "F"): - return im1.im - else: - msg = f"unsupported mode: {im1.im.mode}" - raise ValueError(msg) - else: - # argument was a constant - if _isconstant(im1) and self.im.mode in ("1", "L", "I"): - return Image.new("I", self.im.size, im1) - else: - return Image.new("F", self.im.size, im1) - - def apply(self, op, im1, im2=None, mode=None): - im1 = self.__fixup(im1) - if im2 is None: - # unary operation - out = Image.new(mode or im1.mode, im1.size, None) - im1.load() - try: - op = getattr(_imagingmath, op + "_" + im1.mode) - except AttributeError as e: - msg = f"bad operand type for '{op}'" - raise TypeError(msg) from e - _imagingmath.unop(op, out.im.id, im1.im.id) - else: - # binary operation - im2 = self.__fixup(im2) - if im1.mode != im2.mode: - # convert both arguments to floating point - if im1.mode != "F": - im1 = im1.convert("F") - if im2.mode != "F": - im2 = im2.convert("F") - if im1.size != im2.size: - # crop both arguments to a common size - size = (min(im1.size[0], im2.size[0]), min(im1.size[1], im2.size[1])) - if im1.size != size: - im1 = im1.crop((0, 0) + size) - if im2.size != size: - im2 = im2.crop((0, 0) + size) - out = Image.new(mode or im1.mode, im1.size, None) - im1.load() - im2.load() - try: - op = getattr(_imagingmath, op + "_" + im1.mode) - except AttributeError as e: - msg = f"bad operand type for '{op}'" - raise TypeError(msg) from e - _imagingmath.binop(op, out.im.id, im1.im.id, im2.im.id) - return _Operand(out) - - # unary operators - def __bool__(self): - # an image is "true" if it contains at least one non-zero pixel - return self.im.getbbox() is not None - - def __abs__(self): - return self.apply("abs", self) - - def __pos__(self): - return self - - def __neg__(self): - return self.apply("neg", self) - - # binary operators - def __add__(self, other): - return self.apply("add", self, other) - - def __radd__(self, other): - return self.apply("add", other, self) - - def __sub__(self, other): - return self.apply("sub", self, other) - - def __rsub__(self, other): - return self.apply("sub", other, self) - - def __mul__(self, other): - return self.apply("mul", self, other) - - def __rmul__(self, other): - return self.apply("mul", other, self) - - def __truediv__(self, other): - return self.apply("div", self, other) - - def __rtruediv__(self, other): - return self.apply("div", other, self) - - def __mod__(self, other): - return self.apply("mod", self, other) - - def __rmod__(self, other): - return self.apply("mod", other, self) - - def __pow__(self, other): - return self.apply("pow", self, other) - - def __rpow__(self, other): - return self.apply("pow", other, self) - - # bitwise - def __invert__(self): - return self.apply("invert", self) - - def __and__(self, other): - return self.apply("and", self, other) - - def __rand__(self, other): - return self.apply("and", other, self) - - def __or__(self, other): - return self.apply("or", self, other) - - def __ror__(self, other): - return self.apply("or", other, self) - - def __xor__(self, other): - return self.apply("xor", self, other) - - def __rxor__(self, other): - return self.apply("xor", other, self) - - def __lshift__(self, other): - return self.apply("lshift", self, other) - - def __rshift__(self, other): - return self.apply("rshift", self, other) - - # logical - def __eq__(self, other): - return self.apply("eq", self, other) - - def __ne__(self, other): - return self.apply("ne", self, other) - - def __lt__(self, other): - return self.apply("lt", self, other) - - def __le__(self, other): - return self.apply("le", self, other) - - def __gt__(self, other): - return self.apply("gt", self, other) - - def __ge__(self, other): - return self.apply("ge", self, other) - - -# conversions -def imagemath_int(self): - return _Operand(self.im.convert("I")) - - -def imagemath_float(self): - return _Operand(self.im.convert("F")) - - -# logical -def imagemath_equal(self, other): - return self.apply("eq", self, other, mode="I") - - -def imagemath_notequal(self, other): - return self.apply("ne", self, other, mode="I") - - -def imagemath_min(self, other): - return self.apply("min", self, other) - - -def imagemath_max(self, other): - return self.apply("max", self, other) - - -def imagemath_convert(self, mode): - return _Operand(self.im.convert(mode)) - - -ops = {} -for k, v in list(globals().items()): - if k[:10] == "imagemath_": - ops[k[10:]] = v - - -def eval(expression, _dict={}, **kw): - """ - Evaluates an image expression. - - :param expression: A string containing a Python-style expression. - :param options: Values to add to the evaluation context. You - can either use a dictionary, or one or more keyword - arguments. - :return: The evaluated expression. This is usually an image object, but can - also be an integer, a floating point value, or a pixel tuple, - depending on the expression. - """ - - # build execution namespace - args = ops.copy() - args.update(_dict) - args.update(kw) - for k, v in list(args.items()): - if hasattr(v, "im"): - args[k] = _Operand(v) - - compiled_code = compile(expression, "", "eval") - - def scan(code): - for const in code.co_consts: - if type(const) == type(compiled_code): - scan(const) - - for name in code.co_names: - if name not in args and name != "abs": - msg = f"'{name}' not allowed" - raise ValueError(msg) - - scan(compiled_code) - out = builtins.eval(expression, {"__builtins": {"abs": abs}}, args) - try: - return out.im - except AttributeError: - return out diff --git a/spaces/Surn/UnlimitedMusicGen/audiocraft/utils/autocast.py b/spaces/Surn/UnlimitedMusicGen/audiocraft/utils/autocast.py deleted file mode 100644 index ed644843bb37cf8a92a20fbd51d6cebaa43b9a08..0000000000000000000000000000000000000000 --- a/spaces/Surn/UnlimitedMusicGen/audiocraft/utils/autocast.py +++ /dev/null @@ -1,40 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -import torch - - -class TorchAutocast: - """TorchAutocast utility class. - Allows you to enable and disable autocast. This is specially useful - when dealing with different architectures and clusters with different - levels of support. - - Args: - enabled (bool): Whether to enable torch.autocast or not. - args: Additional args for torch.autocast. - kwargs: Additional kwargs for torch.autocast - """ - def __init__(self, enabled: bool, *args, **kwargs): - self.autocast = torch.autocast(*args, **kwargs) if enabled else None - - def __enter__(self): - if self.autocast is None: - return - try: - self.autocast.__enter__() - except RuntimeError: - device = self.autocast.device - dtype = self.autocast.fast_dtype - raise RuntimeError( - f"There was an error autocasting with dtype={dtype} device={device}\n" - "If you are on the FAIR Cluster, you might need to use autocast_dtype=float16" - ) - - def __exit__(self, *args, **kwargs): - if self.autocast is None: - return - self.autocast.__exit__(*args, **kwargs) diff --git a/spaces/TH5314/newbing/src/components/chat.tsx b/spaces/TH5314/newbing/src/components/chat.tsx deleted file mode 100644 index a37ab1cc96ca2e6bfd9acbe313a8d946bfd5c3d4..0000000000000000000000000000000000000000 --- a/spaces/TH5314/newbing/src/components/chat.tsx +++ /dev/null @@ -1,93 +0,0 @@ -'use client' - -import { useCallback, useEffect, useMemo, useState } from 'react' -import { useAtom } from 'jotai' -import Image from 'next/image' -import { cn } from '@/lib/utils' -import { ChatList } from '@/components/chat-list' -import { ChatPanel } from '@/components/chat-panel' -import { WelcomeScreen } from '@/components/welcome-screen' -import { ChatScrollAnchor } from '@/components/chat-scroll-anchor' -import { ToneSelector } from './tone-selector' -import { ChatHeader } from './chat-header' -import { ChatSuggestions } from './chat-suggestions' -import { bingConversationStyleAtom } from '@/state' -import { ButtonScrollToBottom } from '@/components/button-scroll-to-bottom' -import StopIcon from '@/assets/images/stop.svg' -import { useBing } from '@/lib/hooks/use-bing' -import { ChatMessageModel } from '@/lib/bots/bing/types' -import { ChatNotification } from './chat-notification' -import { Settings } from './settings' -import { ChatHistory } from './chat-history' - -export type ChatProps = React.ComponentProps<'div'> & { initialMessages?: ChatMessageModel[] } - -export default function Chat({ className }: ChatProps) { - - const [bingStyle, setBingStyle] = useAtom(bingConversationStyleAtom) - const { - messages, - sendMessage, - resetConversation, - stopGenerating, - setInput, - bot, - input, - generating, - isSpeaking, - uploadImage, - attachmentList, - setAttachmentList, - } = useBing() - - useEffect(() => { - window.scrollTo({ - top: document.body.offsetHeight, - behavior: 'smooth' - }) - }, []) - - return ( -
    - -
    - - - - {messages.length ? ( - <> - - - - - - {generating ? ( -
    - -
    - ) : null} - - ) : null} -
    - - -
    - ) -} diff --git a/spaces/TabPFN/TabPFNPrediction/TabPFN/utils.py b/spaces/TabPFN/TabPFNPrediction/TabPFN/utils.py deleted file mode 100644 index 4f38550250e1d7de2797968caf99e2a0c0231eeb..0000000000000000000000000000000000000000 --- a/spaces/TabPFN/TabPFNPrediction/TabPFN/utils.py +++ /dev/null @@ -1,293 +0,0 @@ -import os -import math -import argparse -import random -import datetime -import itertools - -import torch -from torch import nn -from torch.optim.lr_scheduler import LambdaLR -import numpy as np - -# copied from huggingface -def get_cosine_schedule_with_warmup(optimizer, num_warmup_steps, num_training_steps, num_cycles=0.5, last_epoch=-1): - """ Create a schedule with a learning rate that decreases following the - values of the cosine function between 0 and `pi * cycles` after a warmup - period during which it increases linearly between 0 and 1. - """ - - def lr_lambda(current_step): - if current_step < num_warmup_steps: - return float(current_step) / float(max(1, num_warmup_steps)) - progress = float(current_step - num_warmup_steps) / float(max(1, num_training_steps - num_warmup_steps)) - return max(0.0, 0.5 * (1.0 + math.cos(math.pi * float(num_cycles) * 2.0 * progress))) - - return LambdaLR(optimizer, lr_lambda, last_epoch) - -# copied from huggingface -def get_restarting_cosine_schedule_with_warmup(optimizer, num_warmup_steps, num_training_steps, steps_per_restart, num_cycles=0.5, last_epoch=-1): - assert num_training_steps % steps_per_restart == 0 - - def inner_lr_lambda(current_step, num_warmup_steps, num_training_steps): - if current_step < num_warmup_steps: - return float(current_step) / float(max(1, num_warmup_steps)) - progress = float(current_step - num_warmup_steps) / float(max(1, num_training_steps - num_warmup_steps)) - return max(0.0, 0.5 * (1.0 + math.cos(math.pi * float(num_cycles) * 2.0 * progress))) - - def lr_lambda(current_step): - inner_step = current_step % steps_per_restart - return inner_lr_lambda(inner_step, - num_warmup_steps if current_step < steps_per_restart else 0, - steps_per_restart - ) - - - return LambdaLR(optimizer, lr_lambda, last_epoch) - -# copied from huggingface -def get_linear_schedule_with_warmup(optimizer, num_warmup_steps, num_training_steps, last_epoch=-1): - """ - Create a schedule with a learning rate that decreases linearly from the initial lr set in the optimizer to 0, after - a warmup period during which it increases linearly from 0 to the initial lr set in the optimizer. - - Args: - optimizer (:class:`~torch.optim.Optimizer`): - The optimizer for which to schedule the learning rate. - num_warmup_steps (:obj:`int`): - The number of steps for the warmup phase. - num_training_steps (:obj:`int`): - The total number of training steps. - last_epoch (:obj:`int`, `optional`, defaults to -1): - The index of the last epoch when resuming training. - - Return: - :obj:`torch.optim.lr_scheduler.LambdaLR` with the appropriate schedule. - """ - - def lr_lambda(current_step: int): - if current_step < num_warmup_steps: - return float(current_step) / float(max(1, num_warmup_steps)) - return max( - 0.0, float(num_training_steps - current_step) / float(max(1, num_training_steps - num_warmup_steps)) - ) - - return LambdaLR(optimizer, lr_lambda, last_epoch) - - -def get_openai_lr(transformer_model): - num_params = sum(p.numel() for p in transformer_model.parameters()) - return 0.003239 - 0.0001395 * math.log(num_params) - - -def get_weighted_single_eval_pos_sampler(max_len): - """ - This gives a sampler that can be used for `single_eval_pos` which yields good performance for all positions p, - where p <= `max_len`. At most `max_len` - 1 examples are shown to the Transformer. - :return: Sampler that can be fed to `train()` as `single_eval_pos_gen`. - """ - return lambda: random.choices(range(max_len), [1 / (max_len - i) for i in range(max_len)])[0] - - -def get_uniform_single_eval_pos_sampler(max_len, min_len=0): - """ - Just sample any evaluation position with the same weight - :return: Sampler that can be fed to `train()` as `single_eval_pos_gen`. - """ - return lambda: random.choices(range(min_len, max_len))[0] - - -class SeqBN(nn.Module): - def __init__(self, d_model): - super().__init__() - self.bn = nn.BatchNorm1d(d_model) - self.d_model = d_model - - def forward(self, x): - assert self.d_model == x.shape[-1] - flat_x = x.view(-1, self.d_model) - flat_x = self.bn(flat_x) - return flat_x.view(*x.shape) - - -def set_locals_in_self(locals): - """ - Call this function like `set_locals_in_self(locals())` to set all local variables as object variables. - Especially useful right at the beginning of `__init__`. - :param locals: `locals()` - """ - self = locals['self'] - for var_name, val in locals.items(): - if var_name != 'self': setattr(self, var_name, val) - - -default_device = 'cuda:0' if torch.cuda.is_available() else 'cpu:0' - - -# Copied from StackOverflow, but we do an eval on the values additionally -class StoreDictKeyPair(argparse.Action): - def __init__(self, option_strings, dest, nargs=None, **kwargs): - self._nargs = nargs - super(StoreDictKeyPair, self).__init__(option_strings, dest, nargs=nargs, **kwargs) - - def __call__(self, parser, namespace, values, option_string=None): - my_dict = {} - for kv in values: - k, v = kv.split("=") - try: - my_dict[k] = eval(v) - except NameError: - my_dict[k] = v - setattr(namespace, self.dest, my_dict) - print("dict values: {}".format(my_dict)) - -def get_nan_value(v, set_value_to_nan=0.0): - if random.random() < set_value_to_nan: - return v - else: - return random.choice([-999, 0, 1, 999]) - -def to_ranking(data): - x = (data >= data.unsqueeze(-3)) - x = x.sum(0) - return x -# TODO: Is there a better way to do this? -# 1. Cmparing to unique elements: When all values are different we still get quadratic blowup -# 2. Argsort(Argsort()) returns ranking, but with duplicate values there is an ordering which is problematic -# 3. Argsort(Argsort(Unique))->Scatter seems a bit complicated, doesn't have quadratic blowup, but how fast? -def to_ranking_low_mem(data): - x = torch.zeros_like(data) - for col in range(data.shape[-1]): - x_ = (data[:, :, col] >= data[:, :, col].unsqueeze(-2)) - x_ = x_.sum(0) - x[:, :, col] = x_ - return x - -def nan_handling_missing_for_unknown_reason_value(set_value_to_nan=0.0): - return get_nan_value(float('nan'), set_value_to_nan) - -def nan_handling_missing_for_no_reason_value(set_value_to_nan=0.0): - return get_nan_value(float('-inf'), set_value_to_nan) - -def nan_handling_missing_for_a_reason_value(set_value_to_nan=0.0): - return get_nan_value(float('inf'), set_value_to_nan) - -def torch_nanmean(x, axis=0, return_nanshare=False): - num = torch.where(torch.isnan(x), torch.full_like(x, 0), torch.full_like(x, 1)).sum(axis=axis) - value = torch.where(torch.isnan(x), torch.full_like(x, 0), x).sum(axis=axis) - if return_nanshare: - return value / num, 1.-num/x.shape[axis] - return value / num - -def torch_nanstd(x, axis=0): - num = torch.where(torch.isnan(x), torch.full_like(x, 0), torch.full_like(x, 1)).sum(axis=axis) - value = torch.where(torch.isnan(x), torch.full_like(x, 0), x).sum(axis=axis) - mean = value / num - mean_broadcast = torch.repeat_interleave(mean.unsqueeze(axis), x.shape[axis], dim=axis) - return torch.sqrt(torch.nansum(torch.square(mean_broadcast - x), axis=axis) / (num - 1)) - -def normalize_data(data, normalize_positions=-1): - if normalize_positions > 0: - mean = torch_nanmean(data[:normalize_positions], axis=0) - std = torch_nanstd(data[:normalize_positions], axis=0) + .000001 - else: - mean = torch_nanmean(data, axis=0) - std = torch_nanstd(data, axis=0) + .000001 - data = (data - mean) / std - data = torch.clip(data, min=-100, max=100) - - return data - -def remove_outliers(X, n_sigma=4, normalize_positions=-1): - # Expects T, B, H - assert len(X.shape) == 3, "X must be T,B,H" - #for b in range(X.shape[1]): - #for col in range(X.shape[2]): - data = X if normalize_positions == -1 else X[:normalize_positions] - data_clean = data[:].clone() - data_mean, data_std = torch_nanmean(data, axis=0), torch_nanstd(data, axis=0) - cut_off = data_std * n_sigma - lower, upper = data_mean - cut_off, data_mean + cut_off - - data_clean[torch.logical_or(data_clean > upper, data_clean < lower)] = np.nan - data_mean, data_std = torch_nanmean(data_clean, axis=0), torch_nanstd(data_clean, axis=0) - cut_off = data_std * n_sigma - lower, upper = data_mean - cut_off, data_mean + cut_off - - X = torch.maximum(-torch.log(1+torch.abs(X)) + lower, X) - X = torch.minimum(torch.log(1+torch.abs(X)) + upper, X) - # print(ds[1][data < lower, col], ds[1][data > upper, col], ds[1][~np.isnan(data), col].shape, data_mean, data_std) - return X - -def bool_mask_to_att_mask(mask): - return mask.float().masked_fill(mask == 0, float('-inf')).masked_fill(mask == 1, float(0.0)) - -def print_on_master_only(is_master): - import builtins as __builtin__ - - builtin_print = __builtin__.print - - def print(*args, **kwargs): - force = kwargs.pop("force", False) - if is_master or force: - builtin_print(*args, **kwargs) - - __builtin__.print = print - - -def init_dist(device): - print('init dist') - if 'LOCAL_RANK' in os.environ: - # launched with torch.distributed.launch - rank = int(os.environ["LOCAL_RANK"]) - print('torch.distributed.launch and my rank is', rank) - torch.cuda.set_device(rank) - os.environ['CUDA_VISIBLE_DEVICES'] = str(rank) - torch.distributed.init_process_group(backend="nccl", init_method="env://", timeout=datetime.timedelta(seconds=20), - world_size=torch.cuda.device_count(), rank=rank) - torch.distributed.barrier() - print_on_master_only(rank == 0) - print(f"Distributed training on {torch.cuda.device_count()} GPUs, this is rank {rank}, " - "only I can print, but when using print(..., force=True) it will print on all ranks.") - return True, rank, f'cuda:{rank}' - elif 'SLURM_PROCID' in os.environ and torch.cuda.device_count() > 1: - # this is for multi gpu when starting with submitit - assert device != 'cpu:0' - rank = int(os.environ['SLURM_PROCID']) - os.environ['MASTER_ADDR'] = 'localhost' - os.environ['MASTER_PORT'] = '12355' - torch.cuda.set_device(rank) - os.environ['CUDA_VISIBLE_DEVICES'] = str(rank) - print('distributed submitit launch and my rank is', rank) - torch.distributed.init_process_group(backend="nccl", init_method="env://", timeout=datetime.timedelta(seconds=20), - world_size=torch.cuda.device_count(), rank=rank) - torch.distributed.barrier() - print_on_master_only(rank == 0) - print(f"Distributed training on {torch.cuda.device_count()} GPUs, this is rank {rank}, " - "only I can print, but when using print(..., force=True) it will print on all ranks.") - - return True, rank, f'cuda:{rank}' - else: - print('Not using distributed') - # will not change any of the behavior of print, but allows putting the force=True in the print calls - print_on_master_only(True) - return False, 0, device - -# NOP function for python with statements (x = NOP(); with x:) -class NOP(): - def __enter__(self): - pass - def __exit__(self, type, value, traceback): - pass - -def check_compatibility(dl): - if hasattr(dl, 'num_outputs'): - print('`num_outputs` for the DataLoader is deprecated. It is assumed to be 1 from now on.') - assert dl.num_outputs != 1, "We assume num_outputs to be 1. Instead of the num_ouputs change your loss." \ - "We specify the number of classes in the CE loss." - -def product_dict(dic): - keys = dic.keys() - vals = dic.values() - for instance in itertools.product(*vals): - yield dict(zip(keys, instance)) diff --git a/spaces/Tape/yoga/openpose/body.py b/spaces/Tape/yoga/openpose/body.py deleted file mode 100644 index df53d82f6c2dc50424e09d7f380670f04a59d208..0000000000000000000000000000000000000000 --- a/spaces/Tape/yoga/openpose/body.py +++ /dev/null @@ -1,218 +0,0 @@ -import cv2 -import numpy as np -import math -import time -from scipy.ndimage.filters import gaussian_filter -import matplotlib.pyplot as plt -import matplotlib -import torch -from torchvision import transforms - -from openpose import util -from openpose.model import bodypose_model - -class Body(object): - def __init__(self, model_path): - self.model = bodypose_model() - if torch.cuda.is_available(): - self.model = self.model.cuda() - model_dict = util.transfer(self.model, torch.load(model_path)) - self.model.load_state_dict(model_dict) - self.model.eval() - - def __call__(self, oriImg): - # scale_search = [0.5, 1.0, 1.5, 2.0] - scale_search = [0.5] - boxsize = 368 - stride = 8 - padValue = 128 - thre1 = 0.1 - thre2 = 0.05 - multiplier = [x * boxsize / oriImg.shape[0] for x in scale_search] - heatmap_avg = np.zeros((oriImg.shape[0], oriImg.shape[1], 19)) - paf_avg = np.zeros((oriImg.shape[0], oriImg.shape[1], 38)) - - for m in range(len(multiplier)): - scale = multiplier[m] - imageToTest = cv2.resize(oriImg, (0, 0), fx=scale, fy=scale, interpolation=cv2.INTER_CUBIC) - imageToTest_padded, pad = util.padRightDownCorner(imageToTest, stride, padValue) - im = np.transpose(np.float32(imageToTest_padded[:, :, :, np.newaxis]), (3, 2, 0, 1)) / 256 - 0.5 - im = np.ascontiguousarray(im) - - data = torch.from_numpy(im).float() - if torch.cuda.is_available(): - data = data.cuda() - # data = data.permute([2, 0, 1]).unsqueeze(0).float() - with torch.no_grad(): - Mconv7_stage6_L1, Mconv7_stage6_L2 = self.model(data) - Mconv7_stage6_L1 = Mconv7_stage6_L1.cpu().numpy() - Mconv7_stage6_L2 = Mconv7_stage6_L2.cpu().numpy() - - # extract outputs, resize, and remove padding - # heatmap = np.transpose(np.squeeze(net.blobs[output_blobs.keys()[1]].data), (1, 2, 0)) # output 1 is heatmaps - heatmap = np.transpose(np.squeeze(Mconv7_stage6_L2), (1, 2, 0)) # output 1 is heatmaps - heatmap = cv2.resize(heatmap, (0, 0), fx=stride, fy=stride, interpolation=cv2.INTER_CUBIC) - heatmap = heatmap[:imageToTest_padded.shape[0] - pad[2], :imageToTest_padded.shape[1] - pad[3], :] - heatmap = cv2.resize(heatmap, (oriImg.shape[1], oriImg.shape[0]), interpolation=cv2.INTER_CUBIC) - - # paf = np.transpose(np.squeeze(net.blobs[output_blobs.keys()[0]].data), (1, 2, 0)) # output 0 is PAFs - paf = np.transpose(np.squeeze(Mconv7_stage6_L1), (1, 2, 0)) # output 0 is PAFs - paf = cv2.resize(paf, (0, 0), fx=stride, fy=stride, interpolation=cv2.INTER_CUBIC) - paf = paf[:imageToTest_padded.shape[0] - pad[2], :imageToTest_padded.shape[1] - pad[3], :] - paf = cv2.resize(paf, (oriImg.shape[1], oriImg.shape[0]), interpolation=cv2.INTER_CUBIC) - - heatmap_avg += heatmap_avg + heatmap / len(multiplier) - paf_avg += + paf / len(multiplier) - - all_peaks = [] - peak_counter = 0 - - for part in range(18): - map_ori = heatmap_avg[:, :, part] - one_heatmap = gaussian_filter(map_ori, sigma=3) - - map_left = np.zeros(one_heatmap.shape) - map_left[1:, :] = one_heatmap[:-1, :] - map_right = np.zeros(one_heatmap.shape) - map_right[:-1, :] = one_heatmap[1:, :] - map_up = np.zeros(one_heatmap.shape) - map_up[:, 1:] = one_heatmap[:, :-1] - map_down = np.zeros(one_heatmap.shape) - map_down[:, :-1] = one_heatmap[:, 1:] - - peaks_binary = np.logical_and.reduce( - (one_heatmap >= map_left, one_heatmap >= map_right, one_heatmap >= map_up, one_heatmap >= map_down, one_heatmap > thre1)) - peaks = list(zip(np.nonzero(peaks_binary)[1], np.nonzero(peaks_binary)[0])) # note reverse - peaks_with_score = [x + (map_ori[x[1], x[0]],) for x in peaks] - peak_id = range(peak_counter, peak_counter + len(peaks)) - peaks_with_score_and_id = [peaks_with_score[i] + (peak_id[i],) for i in range(len(peak_id))] - - all_peaks.append(peaks_with_score_and_id) - peak_counter += len(peaks) - - # find connection in the specified sequence, center 29 is in the position 15 - limbSeq = [[2, 3], [2, 6], [3, 4], [4, 5], [6, 7], [7, 8], [2, 9], [9, 10], \ - [10, 11], [2, 12], [12, 13], [13, 14], [2, 1], [1, 15], [15, 17], \ - [1, 16], [16, 18], [3, 17], [6, 18]] - # the middle joints heatmap correpondence - mapIdx = [[31, 32], [39, 40], [33, 34], [35, 36], [41, 42], [43, 44], [19, 20], [21, 22], \ - [23, 24], [25, 26], [27, 28], [29, 30], [47, 48], [49, 50], [53, 54], [51, 52], \ - [55, 56], [37, 38], [45, 46]] - - connection_all = [] - special_k = [] - mid_num = 10 - - for k in range(len(mapIdx)): - score_mid = paf_avg[:, :, [x - 19 for x in mapIdx[k]]] - candA = all_peaks[limbSeq[k][0] - 1] - candB = all_peaks[limbSeq[k][1] - 1] - nA = len(candA) - nB = len(candB) - indexA, indexB = limbSeq[k] - if (nA != 0 and nB != 0): - connection_candidate = [] - for i in range(nA): - for j in range(nB): - vec = np.subtract(candB[j][:2], candA[i][:2]) - norm = math.sqrt(vec[0] * vec[0] + vec[1] * vec[1]) - norm = max(0.001, norm) - vec = np.divide(vec, norm) - - startend = list(zip(np.linspace(candA[i][0], candB[j][0], num=mid_num), \ - np.linspace(candA[i][1], candB[j][1], num=mid_num))) - - vec_x = np.array([score_mid[int(round(startend[I][1])), int(round(startend[I][0])), 0] \ - for I in range(len(startend))]) - vec_y = np.array([score_mid[int(round(startend[I][1])), int(round(startend[I][0])), 1] \ - for I in range(len(startend))]) - - score_midpts = np.multiply(vec_x, vec[0]) + np.multiply(vec_y, vec[1]) - score_with_dist_prior = sum(score_midpts) / len(score_midpts) + min( - 0.5 * oriImg.shape[0] / norm - 1, 0) - criterion1 = len(np.nonzero(score_midpts > thre2)[0]) > 0.8 * len(score_midpts) - criterion2 = score_with_dist_prior > 0 - if criterion1 and criterion2: - connection_candidate.append( - [i, j, score_with_dist_prior, score_with_dist_prior + candA[i][2] + candB[j][2]]) - - connection_candidate = sorted(connection_candidate, key=lambda x: x[2], reverse=True) - connection = np.zeros((0, 5)) - for c in range(len(connection_candidate)): - i, j, s = connection_candidate[c][0:3] - if (i not in connection[:, 3] and j not in connection[:, 4]): - connection = np.vstack([connection, [candA[i][3], candB[j][3], s, i, j]]) - if (len(connection) >= min(nA, nB)): - break - - connection_all.append(connection) - else: - special_k.append(k) - connection_all.append([]) - - # last number in each row is the total parts number of that person - # the second last number in each row is the score of the overall configuration - subset = -1 * np.ones((0, 20)) - candidate = np.array([item for sublist in all_peaks for item in sublist]) - - for k in range(len(mapIdx)): - if k not in special_k: - partAs = connection_all[k][:, 0] - partBs = connection_all[k][:, 1] - indexA, indexB = np.array(limbSeq[k]) - 1 - - for i in range(len(connection_all[k])): # = 1:size(temp,1) - found = 0 - subset_idx = [-1, -1] - for j in range(len(subset)): # 1:size(subset,1): - if subset[j][indexA] == partAs[i] or subset[j][indexB] == partBs[i]: - subset_idx[found] = j - found += 1 - - if found == 1: - j = subset_idx[0] - if subset[j][indexB] != partBs[i]: - subset[j][indexB] = partBs[i] - subset[j][-1] += 1 - subset[j][-2] += candidate[partBs[i].astype(int), 2] + connection_all[k][i][2] - elif found == 2: # if found 2 and disjoint, merge them - j1, j2 = subset_idx - membership = ((subset[j1] >= 0).astype(int) + (subset[j2] >= 0).astype(int))[:-2] - if len(np.nonzero(membership == 2)[0]) == 0: # merge - subset[j1][:-2] += (subset[j2][:-2] + 1) - subset[j1][-2:] += subset[j2][-2:] - subset[j1][-2] += connection_all[k][i][2] - subset = np.delete(subset, j2, 0) - else: # as like found == 1 - subset[j1][indexB] = partBs[i] - subset[j1][-1] += 1 - subset[j1][-2] += candidate[partBs[i].astype(int), 2] + connection_all[k][i][2] - - # if find no partA in the subset, create a new subset - elif not found and k < 17: - row = -1 * np.ones(20) - row[indexA] = partAs[i] - row[indexB] = partBs[i] - row[-1] = 2 - row[-2] = sum(candidate[connection_all[k][i, :2].astype(int), 2]) + connection_all[k][i][2] - subset = np.vstack([subset, row]) - # delete some rows of subset which has few parts occur - deleteIdx = [] - for i in range(len(subset)): - if subset[i][-1] < 4 or subset[i][-2] / subset[i][-1] < 0.4: - deleteIdx.append(i) - subset = np.delete(subset, deleteIdx, axis=0) - - # subset: n*20 array, 0-17 is the index in candidate, 18 is the total score, 19 is the total parts - # candidate: x, y, score, id - return candidate, subset - -if __name__ == "__main__": - body_estimation = Body('../model/body_pose_model.pth') - - test_image = '../images/ski.jpg' - oriImg = cv2.imread(test_image) # B,G,R order - candidate, subset = body_estimation(oriImg) - canvas = util.draw_bodypose(oriImg, candidate, subset) - plt.imshow(canvas[:, :, [2, 1, 0]]) - plt.show() diff --git a/spaces/TencentARC/MasaCtrl/gradio_app/app_utils.py b/spaces/TencentARC/MasaCtrl/gradio_app/app_utils.py deleted file mode 100644 index ad475b3c9097122ac3f624edbecab403c7586c6c..0000000000000000000000000000000000000000 --- a/spaces/TencentARC/MasaCtrl/gradio_app/app_utils.py +++ /dev/null @@ -1,30 +0,0 @@ -import gradio as gr -import numpy as np -import torch -from diffusers import DDIMScheduler -from pytorch_lightning import seed_everything - -from masactrl.diffuser_utils import MasaCtrlPipeline -from masactrl.masactrl_utils import (AttentionBase, - regiter_attention_editor_diffusers) - - -torch.set_grad_enabled(False) - -device = torch.device("cuda") if torch.cuda.is_available() else torch.device( - "cpu") -model_path = "xyn-ai/anything-v4.0" -scheduler = DDIMScheduler(beta_start=0.00085, - beta_end=0.012, - beta_schedule="scaled_linear", - clip_sample=False, - set_alpha_to_one=False) -model = MasaCtrlPipeline.from_pretrained(model_path, - scheduler=scheduler).to(device) - -global_context = { - "model_path": model_path, - "scheduler": scheduler, - "model": model, - "device": device -} \ No newline at end of file diff --git a/spaces/Tetel/chat/EdgeGPT/utilities.py b/spaces/Tetel/chat/EdgeGPT/utilities.py deleted file mode 100644 index cd2be0fcd998e12db495ef3d02a68344caa23018..0000000000000000000000000000000000000000 --- a/spaces/Tetel/chat/EdgeGPT/utilities.py +++ /dev/null @@ -1,39 +0,0 @@ -import json -import locale -import random -import sys -from typing import Union - -from .constants import DELIMITER -from .locale import LocationHint - - -def append_identifier(msg: dict) -> str: - # Convert dict to json string - return json.dumps(msg, ensure_ascii=False) + DELIMITER - - -def get_ran_hex(length: int = 32) -> str: - return "".join(random.choice("0123456789abcdef") for _ in range(length)) - - -def get_location_hint_from_locale(locale: str) -> Union[dict, None]: - locale = locale.lower() - if locale == "en-gb": - hint = LocationHint.UK.value - elif locale == "en-ie": - hint = LocationHint.EU.value - elif locale == "zh-cn": - hint = LocationHint.CHINA.value - else: - hint = LocationHint.USA.value - return hint.get("LocationHint") - - -def guess_locale() -> str: - if sys.platform.startswith("win"): - return "en-us" - loc, _ = locale.getlocale() - if not loc: - return "en-us" - return loc.replace("_", "-") diff --git a/spaces/Thaweewat/ControlNet-Architecture/ldm/modules/ema.py b/spaces/Thaweewat/ControlNet-Architecture/ldm/modules/ema.py deleted file mode 100644 index bded25019b9bcbcd0260f0b8185f8c7859ca58c4..0000000000000000000000000000000000000000 --- a/spaces/Thaweewat/ControlNet-Architecture/ldm/modules/ema.py +++ /dev/null @@ -1,80 +0,0 @@ -import torch -from torch import nn - - -class LitEma(nn.Module): - def __init__(self, model, decay=0.9999, use_num_upates=True): - super().__init__() - if decay < 0.0 or decay > 1.0: - raise ValueError('Decay must be between 0 and 1') - - self.m_name2s_name = {} - self.register_buffer('decay', torch.tensor(decay, dtype=torch.float32)) - self.register_buffer('num_updates', torch.tensor(0, dtype=torch.int) if use_num_upates - else torch.tensor(-1, dtype=torch.int)) - - for name, p in model.named_parameters(): - if p.requires_grad: - # remove as '.'-character is not allowed in buffers - s_name = name.replace('.', '') - self.m_name2s_name.update({name: s_name}) - self.register_buffer(s_name, p.clone().detach().data) - - self.collected_params = [] - - def reset_num_updates(self): - del self.num_updates - self.register_buffer('num_updates', torch.tensor(0, dtype=torch.int)) - - def forward(self, model): - decay = self.decay - - if self.num_updates >= 0: - self.num_updates += 1 - decay = min(self.decay, (1 + self.num_updates) / (10 + self.num_updates)) - - one_minus_decay = 1.0 - decay - - with torch.no_grad(): - m_param = dict(model.named_parameters()) - shadow_params = dict(self.named_buffers()) - - for key in m_param: - if m_param[key].requires_grad: - sname = self.m_name2s_name[key] - shadow_params[sname] = shadow_params[sname].type_as(m_param[key]) - shadow_params[sname].sub_(one_minus_decay * (shadow_params[sname] - m_param[key])) - else: - assert not key in self.m_name2s_name - - def copy_to(self, model): - m_param = dict(model.named_parameters()) - shadow_params = dict(self.named_buffers()) - for key in m_param: - if m_param[key].requires_grad: - m_param[key].data.copy_(shadow_params[self.m_name2s_name[key]].data) - else: - assert not key in self.m_name2s_name - - def store(self, parameters): - """ - Save the current parameters for restoring later. - Args: - parameters: Iterable of `torch.nn.Parameter`; the parameters to be - temporarily stored. - """ - self.collected_params = [param.clone() for param in parameters] - - def restore(self, parameters): - """ - Restore the parameters stored with the `store` method. - Useful to validate the model with EMA parameters without affecting the - original optimization process. Store the parameters before the - `copy_to` method. After validation (or model saving), use this to - restore the former parameters. - Args: - parameters: Iterable of `torch.nn.Parameter`; the parameters to be - updated with the stored parameters. - """ - for c_param, param in zip(self.collected_params, parameters): - param.data.copy_(c_param.data) diff --git a/spaces/TheStinger/Ilaria_RVC/lib/infer_pack/models.py b/spaces/TheStinger/Ilaria_RVC/lib/infer_pack/models.py deleted file mode 100644 index 3665d03bc0514a6ed07d3372ea24717dae1e0a65..0000000000000000000000000000000000000000 --- a/spaces/TheStinger/Ilaria_RVC/lib/infer_pack/models.py +++ /dev/null @@ -1,1142 +0,0 @@ -import math, pdb, os -from time import time as ttime -import torch -from torch import nn -from torch.nn import functional as F -from lib.infer_pack import modules -from lib.infer_pack import attentions -from lib.infer_pack import commons -from lib.infer_pack.commons import init_weights, get_padding -from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d -from torch.nn.utils import weight_norm, remove_weight_norm, spectral_norm -from lib.infer_pack.commons import init_weights -import numpy as np -from lib.infer_pack import commons - - -class TextEncoder256(nn.Module): - def __init__( - self, - out_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - f0=True, - ): - super().__init__() - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.emb_phone = nn.Linear(256, hidden_channels) - self.lrelu = nn.LeakyReLU(0.1, inplace=True) - if f0 == True: - self.emb_pitch = nn.Embedding(256, hidden_channels) # pitch 256 - self.encoder = attentions.Encoder( - hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout - ) - self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) - - def forward(self, phone, pitch, lengths): - if pitch == None: - x = self.emb_phone(phone) - else: - x = self.emb_phone(phone) + self.emb_pitch(pitch) - x = x * math.sqrt(self.hidden_channels) # [b, t, h] - x = self.lrelu(x) - x = torch.transpose(x, 1, -1) # [b, h, t] - x_mask = torch.unsqueeze(commons.sequence_mask(lengths, x.size(2)), 1).to( - x.dtype - ) - x = self.encoder(x * x_mask, x_mask) - stats = self.proj(x) * x_mask - - m, logs = torch.split(stats, self.out_channels, dim=1) - return m, logs, x_mask - - -class TextEncoder768(nn.Module): - def __init__( - self, - out_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - f0=True, - ): - super().__init__() - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.emb_phone = nn.Linear(768, hidden_channels) - self.lrelu = nn.LeakyReLU(0.1, inplace=True) - if f0 == True: - self.emb_pitch = nn.Embedding(256, hidden_channels) # pitch 256 - self.encoder = attentions.Encoder( - hidden_channels, filter_channels, n_heads, n_layers, kernel_size, p_dropout - ) - self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) - - def forward(self, phone, pitch, lengths): - if pitch == None: - x = self.emb_phone(phone) - else: - x = self.emb_phone(phone) + self.emb_pitch(pitch) - x = x * math.sqrt(self.hidden_channels) # [b, t, h] - x = self.lrelu(x) - x = torch.transpose(x, 1, -1) # [b, h, t] - x_mask = torch.unsqueeze(commons.sequence_mask(lengths, x.size(2)), 1).to( - x.dtype - ) - x = self.encoder(x * x_mask, x_mask) - stats = self.proj(x) * x_mask - - m, logs = torch.split(stats, self.out_channels, dim=1) - return m, logs, x_mask - - -class ResidualCouplingBlock(nn.Module): - def __init__( - self, - channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - n_flows=4, - gin_channels=0, - ): - super().__init__() - self.channels = channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.n_flows = n_flows - self.gin_channels = gin_channels - - self.flows = nn.ModuleList() - for i in range(n_flows): - self.flows.append( - modules.ResidualCouplingLayer( - channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=gin_channels, - mean_only=True, - ) - ) - self.flows.append(modules.Flip()) - - def forward(self, x, x_mask, g=None, reverse=False): - if not reverse: - for flow in self.flows: - x, _ = flow(x, x_mask, g=g, reverse=reverse) - else: - for flow in reversed(self.flows): - x = flow(x, x_mask, g=g, reverse=reverse) - return x - - def remove_weight_norm(self): - for i in range(self.n_flows): - self.flows[i * 2].remove_weight_norm() - - -class PosteriorEncoder(nn.Module): - def __init__( - self, - in_channels, - out_channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=0, - ): - super().__init__() - self.in_channels = in_channels - self.out_channels = out_channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.gin_channels = gin_channels - - self.pre = nn.Conv1d(in_channels, hidden_channels, 1) - self.enc = modules.WN( - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=gin_channels, - ) - self.proj = nn.Conv1d(hidden_channels, out_channels * 2, 1) - - def forward(self, x, x_lengths, g=None): - x_mask = torch.unsqueeze(commons.sequence_mask(x_lengths, x.size(2)), 1).to( - x.dtype - ) - x = self.pre(x) * x_mask - x = self.enc(x, x_mask, g=g) - stats = self.proj(x) * x_mask - m, logs = torch.split(stats, self.out_channels, dim=1) - z = (m + torch.randn_like(m) * torch.exp(logs)) * x_mask - return z, m, logs, x_mask - - def remove_weight_norm(self): - self.enc.remove_weight_norm() - - -class Generator(torch.nn.Module): - def __init__( - self, - initial_channel, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels=0, - ): - super(Generator, self).__init__() - self.num_kernels = len(resblock_kernel_sizes) - self.num_upsamples = len(upsample_rates) - self.conv_pre = Conv1d( - initial_channel, upsample_initial_channel, 7, 1, padding=3 - ) - resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2 - - self.ups = nn.ModuleList() - for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)): - self.ups.append( - weight_norm( - ConvTranspose1d( - upsample_initial_channel // (2**i), - upsample_initial_channel // (2 ** (i + 1)), - k, - u, - padding=(k - u) // 2, - ) - ) - ) - - self.resblocks = nn.ModuleList() - for i in range(len(self.ups)): - ch = upsample_initial_channel // (2 ** (i + 1)) - for j, (k, d) in enumerate( - zip(resblock_kernel_sizes, resblock_dilation_sizes) - ): - self.resblocks.append(resblock(ch, k, d)) - - self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False) - self.ups.apply(init_weights) - - if gin_channels != 0: - self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1) - - def forward(self, x, g=None): - x = self.conv_pre(x) - if g is not None: - x = x + self.cond(g) - - for i in range(self.num_upsamples): - x = F.leaky_relu(x, modules.LRELU_SLOPE) - x = self.ups[i](x) - xs = None - for j in range(self.num_kernels): - if xs is None: - xs = self.resblocks[i * self.num_kernels + j](x) - else: - xs += self.resblocks[i * self.num_kernels + j](x) - x = xs / self.num_kernels - x = F.leaky_relu(x) - x = self.conv_post(x) - x = torch.tanh(x) - - return x - - def remove_weight_norm(self): - for l in self.ups: - remove_weight_norm(l) - for l in self.resblocks: - l.remove_weight_norm() - - -class SineGen(torch.nn.Module): - """Definition of sine generator - SineGen(samp_rate, harmonic_num = 0, - sine_amp = 0.1, noise_std = 0.003, - voiced_threshold = 0, - flag_for_pulse=False) - samp_rate: sampling rate in Hz - harmonic_num: number of harmonic overtones (default 0) - sine_amp: amplitude of sine-wavefrom (default 0.1) - noise_std: std of Gaussian noise (default 0.003) - voiced_thoreshold: F0 threshold for U/V classification (default 0) - flag_for_pulse: this SinGen is used inside PulseGen (default False) - Note: when flag_for_pulse is True, the first time step of a voiced - segment is always sin(np.pi) or cos(0) - """ - - def __init__( - self, - samp_rate, - harmonic_num=0, - sine_amp=0.1, - noise_std=0.003, - voiced_threshold=0, - flag_for_pulse=False, - ): - super(SineGen, self).__init__() - self.sine_amp = sine_amp - self.noise_std = noise_std - self.harmonic_num = harmonic_num - self.dim = self.harmonic_num + 1 - self.sampling_rate = samp_rate - self.voiced_threshold = voiced_threshold - - def _f02uv(self, f0): - # generate uv signal - uv = torch.ones_like(f0) - uv = uv * (f0 > self.voiced_threshold) - return uv - - def forward(self, f0, upp): - """sine_tensor, uv = forward(f0) - input F0: tensor(batchsize=1, length, dim=1) - f0 for unvoiced steps should be 0 - output sine_tensor: tensor(batchsize=1, length, dim) - output uv: tensor(batchsize=1, length, 1) - """ - with torch.no_grad(): - f0 = f0[:, None].transpose(1, 2) - f0_buf = torch.zeros(f0.shape[0], f0.shape[1], self.dim, device=f0.device) - # fundamental component - f0_buf[:, :, 0] = f0[:, :, 0] - for idx in np.arange(self.harmonic_num): - f0_buf[:, :, idx + 1] = f0_buf[:, :, 0] * ( - idx + 2 - ) # idx + 2: the (idx+1)-th overtone, (idx+2)-th harmonic - rad_values = (f0_buf / self.sampling_rate) % 1 ###%1意味着n_har的乘积无法后处理优化 - rand_ini = torch.rand( - f0_buf.shape[0], f0_buf.shape[2], device=f0_buf.device - ) - rand_ini[:, 0] = 0 - rad_values[:, 0, :] = rad_values[:, 0, :] + rand_ini - tmp_over_one = torch.cumsum(rad_values, 1) # % 1 #####%1意味着后面的cumsum无法再优化 - tmp_over_one *= upp - tmp_over_one = F.interpolate( - tmp_over_one.transpose(2, 1), - scale_factor=upp, - mode="linear", - align_corners=True, - ).transpose(2, 1) - rad_values = F.interpolate( - rad_values.transpose(2, 1), scale_factor=upp, mode="nearest" - ).transpose( - 2, 1 - ) ####### - tmp_over_one %= 1 - tmp_over_one_idx = (tmp_over_one[:, 1:, :] - tmp_over_one[:, :-1, :]) < 0 - cumsum_shift = torch.zeros_like(rad_values) - cumsum_shift[:, 1:, :] = tmp_over_one_idx * -1.0 - sine_waves = torch.sin( - torch.cumsum(rad_values + cumsum_shift, dim=1) * 2 * np.pi - ) - sine_waves = sine_waves * self.sine_amp - uv = self._f02uv(f0) - uv = F.interpolate( - uv.transpose(2, 1), scale_factor=upp, mode="nearest" - ).transpose(2, 1) - noise_amp = uv * self.noise_std + (1 - uv) * self.sine_amp / 3 - noise = noise_amp * torch.randn_like(sine_waves) - sine_waves = sine_waves * uv + noise - return sine_waves, uv, noise - - -class SourceModuleHnNSF(torch.nn.Module): - """SourceModule for hn-nsf - SourceModule(sampling_rate, harmonic_num=0, sine_amp=0.1, - add_noise_std=0.003, voiced_threshod=0) - sampling_rate: sampling_rate in Hz - harmonic_num: number of harmonic above F0 (default: 0) - sine_amp: amplitude of sine source signal (default: 0.1) - add_noise_std: std of additive Gaussian noise (default: 0.003) - note that amplitude of noise in unvoiced is decided - by sine_amp - voiced_threshold: threhold to set U/V given F0 (default: 0) - Sine_source, noise_source = SourceModuleHnNSF(F0_sampled) - F0_sampled (batchsize, length, 1) - Sine_source (batchsize, length, 1) - noise_source (batchsize, length 1) - uv (batchsize, length, 1) - """ - - def __init__( - self, - sampling_rate, - harmonic_num=0, - sine_amp=0.1, - add_noise_std=0.003, - voiced_threshod=0, - is_half=True, - ): - super(SourceModuleHnNSF, self).__init__() - - self.sine_amp = sine_amp - self.noise_std = add_noise_std - self.is_half = is_half - # to produce sine waveforms - self.l_sin_gen = SineGen( - sampling_rate, harmonic_num, sine_amp, add_noise_std, voiced_threshod - ) - - # to merge source harmonics into a single excitation - self.l_linear = torch.nn.Linear(harmonic_num + 1, 1) - self.l_tanh = torch.nn.Tanh() - - def forward(self, x, upp=None): - sine_wavs, uv, _ = self.l_sin_gen(x, upp) - if self.is_half: - sine_wavs = sine_wavs.half() - sine_merge = self.l_tanh(self.l_linear(sine_wavs)) - return sine_merge, None, None # noise, uv - - -class GeneratorNSF(torch.nn.Module): - def __init__( - self, - initial_channel, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels, - sr, - is_half=False, - ): - super(GeneratorNSF, self).__init__() - self.num_kernels = len(resblock_kernel_sizes) - self.num_upsamples = len(upsample_rates) - - self.f0_upsamp = torch.nn.Upsample(scale_factor=np.prod(upsample_rates)) - self.m_source = SourceModuleHnNSF( - sampling_rate=sr, harmonic_num=0, is_half=is_half - ) - self.noise_convs = nn.ModuleList() - self.conv_pre = Conv1d( - initial_channel, upsample_initial_channel, 7, 1, padding=3 - ) - resblock = modules.ResBlock1 if resblock == "1" else modules.ResBlock2 - - self.ups = nn.ModuleList() - for i, (u, k) in enumerate(zip(upsample_rates, upsample_kernel_sizes)): - c_cur = upsample_initial_channel // (2 ** (i + 1)) - self.ups.append( - weight_norm( - ConvTranspose1d( - upsample_initial_channel // (2**i), - upsample_initial_channel // (2 ** (i + 1)), - k, - u, - padding=(k - u) // 2, - ) - ) - ) - if i + 1 < len(upsample_rates): - stride_f0 = np.prod(upsample_rates[i + 1 :]) - self.noise_convs.append( - Conv1d( - 1, - c_cur, - kernel_size=stride_f0 * 2, - stride=stride_f0, - padding=stride_f0 // 2, - ) - ) - else: - self.noise_convs.append(Conv1d(1, c_cur, kernel_size=1)) - - self.resblocks = nn.ModuleList() - for i in range(len(self.ups)): - ch = upsample_initial_channel // (2 ** (i + 1)) - for j, (k, d) in enumerate( - zip(resblock_kernel_sizes, resblock_dilation_sizes) - ): - self.resblocks.append(resblock(ch, k, d)) - - self.conv_post = Conv1d(ch, 1, 7, 1, padding=3, bias=False) - self.ups.apply(init_weights) - - if gin_channels != 0: - self.cond = nn.Conv1d(gin_channels, upsample_initial_channel, 1) - - self.upp = np.prod(upsample_rates) - - def forward(self, x, f0, g=None): - har_source, noi_source, uv = self.m_source(f0, self.upp) - har_source = har_source.transpose(1, 2) - x = self.conv_pre(x) - if g is not None: - x = x + self.cond(g) - - for i in range(self.num_upsamples): - x = F.leaky_relu(x, modules.LRELU_SLOPE) - x = self.ups[i](x) - x_source = self.noise_convs[i](har_source) - x = x + x_source - xs = None - for j in range(self.num_kernels): - if xs is None: - xs = self.resblocks[i * self.num_kernels + j](x) - else: - xs += self.resblocks[i * self.num_kernels + j](x) - x = xs / self.num_kernels - x = F.leaky_relu(x) - x = self.conv_post(x) - x = torch.tanh(x) - return x - - def remove_weight_norm(self): - for l in self.ups: - remove_weight_norm(l) - for l in self.resblocks: - l.remove_weight_norm() - - -sr2sr = { - "32k": 32000, - "40k": 40000, - "48k": 48000, -} - - -class SynthesizerTrnMs256NSFsid(nn.Module): - def __init__( - self, - spec_channels, - segment_size, - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - spk_embed_dim, - gin_channels, - sr, - **kwargs - ): - super().__init__() - if type(sr) == type("strr"): - sr = sr2sr[sr] - self.spec_channels = spec_channels - self.inter_channels = inter_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.resblock = resblock - self.resblock_kernel_sizes = resblock_kernel_sizes - self.resblock_dilation_sizes = resblock_dilation_sizes - self.upsample_rates = upsample_rates - self.upsample_initial_channel = upsample_initial_channel - self.upsample_kernel_sizes = upsample_kernel_sizes - self.segment_size = segment_size - self.gin_channels = gin_channels - # self.hop_length = hop_length# - self.spk_embed_dim = spk_embed_dim - self.enc_p = TextEncoder256( - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - ) - self.dec = GeneratorNSF( - inter_channels, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels=gin_channels, - sr=sr, - is_half=kwargs["is_half"], - ) - self.enc_q = PosteriorEncoder( - spec_channels, - inter_channels, - hidden_channels, - 5, - 1, - 16, - gin_channels=gin_channels, - ) - self.flow = ResidualCouplingBlock( - inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels - ) - self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels) - print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim) - - def remove_weight_norm(self): - self.dec.remove_weight_norm() - self.flow.remove_weight_norm() - self.enc_q.remove_weight_norm() - - def forward( - self, phone, phone_lengths, pitch, pitchf, y, y_lengths, ds - ): # 这里ds是id,[bs,1] - # print(1,pitch.shape)#[bs,t] - g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的 - m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths) - z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g) - z_p = self.flow(z, y_mask, g=g) - z_slice, ids_slice = commons.rand_slice_segments( - z, y_lengths, self.segment_size - ) - # print(-1,pitchf.shape,ids_slice,self.segment_size,self.hop_length,self.segment_size//self.hop_length) - pitchf = commons.slice_segments2(pitchf, ids_slice, self.segment_size) - # print(-2,pitchf.shape,z_slice.shape) - o = self.dec(z_slice, pitchf, g=g) - return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q) - - def infer(self, phone, phone_lengths, pitch, nsff0, sid, rate=None): - g = self.emb_g(sid).unsqueeze(-1) - m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths) - z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask - if rate: - head = int(z_p.shape[2] * rate) - z_p = z_p[:, :, -head:] - x_mask = x_mask[:, :, -head:] - nsff0 = nsff0[:, -head:] - z = self.flow(z_p, x_mask, g=g, reverse=True) - o = self.dec(z * x_mask, nsff0, g=g) - return o, x_mask, (z, z_p, m_p, logs_p) - - -class SynthesizerTrnMs768NSFsid(nn.Module): - def __init__( - self, - spec_channels, - segment_size, - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - spk_embed_dim, - gin_channels, - sr, - **kwargs - ): - super().__init__() - if type(sr) == type("strr"): - sr = sr2sr[sr] - self.spec_channels = spec_channels - self.inter_channels = inter_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.resblock = resblock - self.resblock_kernel_sizes = resblock_kernel_sizes - self.resblock_dilation_sizes = resblock_dilation_sizes - self.upsample_rates = upsample_rates - self.upsample_initial_channel = upsample_initial_channel - self.upsample_kernel_sizes = upsample_kernel_sizes - self.segment_size = segment_size - self.gin_channels = gin_channels - # self.hop_length = hop_length# - self.spk_embed_dim = spk_embed_dim - self.enc_p = TextEncoder768( - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - ) - self.dec = GeneratorNSF( - inter_channels, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels=gin_channels, - sr=sr, - is_half=kwargs["is_half"], - ) - self.enc_q = PosteriorEncoder( - spec_channels, - inter_channels, - hidden_channels, - 5, - 1, - 16, - gin_channels=gin_channels, - ) - self.flow = ResidualCouplingBlock( - inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels - ) - self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels) - print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim) - - def remove_weight_norm(self): - self.dec.remove_weight_norm() - self.flow.remove_weight_norm() - self.enc_q.remove_weight_norm() - - def forward( - self, phone, phone_lengths, pitch, pitchf, y, y_lengths, ds - ): # 这里ds是id,[bs,1] - # print(1,pitch.shape)#[bs,t] - g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的 - m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths) - z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g) - z_p = self.flow(z, y_mask, g=g) - z_slice, ids_slice = commons.rand_slice_segments( - z, y_lengths, self.segment_size - ) - # print(-1,pitchf.shape,ids_slice,self.segment_size,self.hop_length,self.segment_size//self.hop_length) - pitchf = commons.slice_segments2(pitchf, ids_slice, self.segment_size) - # print(-2,pitchf.shape,z_slice.shape) - o = self.dec(z_slice, pitchf, g=g) - return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q) - - def infer(self, phone, phone_lengths, pitch, nsff0, sid, rate=None): - g = self.emb_g(sid).unsqueeze(-1) - m_p, logs_p, x_mask = self.enc_p(phone, pitch, phone_lengths) - z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask - if rate: - head = int(z_p.shape[2] * rate) - z_p = z_p[:, :, -head:] - x_mask = x_mask[:, :, -head:] - nsff0 = nsff0[:, -head:] - z = self.flow(z_p, x_mask, g=g, reverse=True) - o = self.dec(z * x_mask, nsff0, g=g) - return o, x_mask, (z, z_p, m_p, logs_p) - - -class SynthesizerTrnMs256NSFsid_nono(nn.Module): - def __init__( - self, - spec_channels, - segment_size, - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - spk_embed_dim, - gin_channels, - sr=None, - **kwargs - ): - super().__init__() - self.spec_channels = spec_channels - self.inter_channels = inter_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.resblock = resblock - self.resblock_kernel_sizes = resblock_kernel_sizes - self.resblock_dilation_sizes = resblock_dilation_sizes - self.upsample_rates = upsample_rates - self.upsample_initial_channel = upsample_initial_channel - self.upsample_kernel_sizes = upsample_kernel_sizes - self.segment_size = segment_size - self.gin_channels = gin_channels - # self.hop_length = hop_length# - self.spk_embed_dim = spk_embed_dim - self.enc_p = TextEncoder256( - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - f0=False, - ) - self.dec = Generator( - inter_channels, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels=gin_channels, - ) - self.enc_q = PosteriorEncoder( - spec_channels, - inter_channels, - hidden_channels, - 5, - 1, - 16, - gin_channels=gin_channels, - ) - self.flow = ResidualCouplingBlock( - inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels - ) - self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels) - print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim) - - def remove_weight_norm(self): - self.dec.remove_weight_norm() - self.flow.remove_weight_norm() - self.enc_q.remove_weight_norm() - - def forward(self, phone, phone_lengths, y, y_lengths, ds): # 这里ds是id,[bs,1] - g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的 - m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths) - z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g) - z_p = self.flow(z, y_mask, g=g) - z_slice, ids_slice = commons.rand_slice_segments( - z, y_lengths, self.segment_size - ) - o = self.dec(z_slice, g=g) - return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q) - - def infer(self, phone, phone_lengths, sid, rate=None): - g = self.emb_g(sid).unsqueeze(-1) - m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths) - z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask - if rate: - head = int(z_p.shape[2] * rate) - z_p = z_p[:, :, -head:] - x_mask = x_mask[:, :, -head:] - z = self.flow(z_p, x_mask, g=g, reverse=True) - o = self.dec(z * x_mask, g=g) - return o, x_mask, (z, z_p, m_p, logs_p) - - -class SynthesizerTrnMs768NSFsid_nono(nn.Module): - def __init__( - self, - spec_channels, - segment_size, - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - spk_embed_dim, - gin_channels, - sr=None, - **kwargs - ): - super().__init__() - self.spec_channels = spec_channels - self.inter_channels = inter_channels - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.resblock = resblock - self.resblock_kernel_sizes = resblock_kernel_sizes - self.resblock_dilation_sizes = resblock_dilation_sizes - self.upsample_rates = upsample_rates - self.upsample_initial_channel = upsample_initial_channel - self.upsample_kernel_sizes = upsample_kernel_sizes - self.segment_size = segment_size - self.gin_channels = gin_channels - # self.hop_length = hop_length# - self.spk_embed_dim = spk_embed_dim - self.enc_p = TextEncoder768( - inter_channels, - hidden_channels, - filter_channels, - n_heads, - n_layers, - kernel_size, - p_dropout, - f0=False, - ) - self.dec = Generator( - inter_channels, - resblock, - resblock_kernel_sizes, - resblock_dilation_sizes, - upsample_rates, - upsample_initial_channel, - upsample_kernel_sizes, - gin_channels=gin_channels, - ) - self.enc_q = PosteriorEncoder( - spec_channels, - inter_channels, - hidden_channels, - 5, - 1, - 16, - gin_channels=gin_channels, - ) - self.flow = ResidualCouplingBlock( - inter_channels, hidden_channels, 5, 1, 3, gin_channels=gin_channels - ) - self.emb_g = nn.Embedding(self.spk_embed_dim, gin_channels) - print("gin_channels:", gin_channels, "self.spk_embed_dim:", self.spk_embed_dim) - - def remove_weight_norm(self): - self.dec.remove_weight_norm() - self.flow.remove_weight_norm() - self.enc_q.remove_weight_norm() - - def forward(self, phone, phone_lengths, y, y_lengths, ds): # 这里ds是id,[bs,1] - g = self.emb_g(ds).unsqueeze(-1) # [b, 256, 1]##1是t,广播的 - m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths) - z, m_q, logs_q, y_mask = self.enc_q(y, y_lengths, g=g) - z_p = self.flow(z, y_mask, g=g) - z_slice, ids_slice = commons.rand_slice_segments( - z, y_lengths, self.segment_size - ) - o = self.dec(z_slice, g=g) - return o, ids_slice, x_mask, y_mask, (z, z_p, m_p, logs_p, m_q, logs_q) - - def infer(self, phone, phone_lengths, sid, rate=None): - g = self.emb_g(sid).unsqueeze(-1) - m_p, logs_p, x_mask = self.enc_p(phone, None, phone_lengths) - z_p = (m_p + torch.exp(logs_p) * torch.randn_like(m_p) * 0.66666) * x_mask - if rate: - head = int(z_p.shape[2] * rate) - z_p = z_p[:, :, -head:] - x_mask = x_mask[:, :, -head:] - z = self.flow(z_p, x_mask, g=g, reverse=True) - o = self.dec(z * x_mask, g=g) - return o, x_mask, (z, z_p, m_p, logs_p) - - -class MultiPeriodDiscriminator(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(MultiPeriodDiscriminator, self).__init__() - periods = [2, 3, 5, 7, 11, 17] - # periods = [3, 5, 7, 11, 17, 23, 37] - - discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)] - discs = discs + [ - DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods - ] - self.discriminators = nn.ModuleList(discs) - - def forward(self, y, y_hat): - y_d_rs = [] # - y_d_gs = [] - fmap_rs = [] - fmap_gs = [] - for i, d in enumerate(self.discriminators): - y_d_r, fmap_r = d(y) - y_d_g, fmap_g = d(y_hat) - # for j in range(len(fmap_r)): - # print(i,j,y.shape,y_hat.shape,fmap_r[j].shape,fmap_g[j].shape) - y_d_rs.append(y_d_r) - y_d_gs.append(y_d_g) - fmap_rs.append(fmap_r) - fmap_gs.append(fmap_g) - - return y_d_rs, y_d_gs, fmap_rs, fmap_gs - - -class MultiPeriodDiscriminatorV2(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(MultiPeriodDiscriminatorV2, self).__init__() - # periods = [2, 3, 5, 7, 11, 17] - periods = [2, 3, 5, 7, 11, 17, 23, 37] - - discs = [DiscriminatorS(use_spectral_norm=use_spectral_norm)] - discs = discs + [ - DiscriminatorP(i, use_spectral_norm=use_spectral_norm) for i in periods - ] - self.discriminators = nn.ModuleList(discs) - - def forward(self, y, y_hat): - y_d_rs = [] # - y_d_gs = [] - fmap_rs = [] - fmap_gs = [] - for i, d in enumerate(self.discriminators): - y_d_r, fmap_r = d(y) - y_d_g, fmap_g = d(y_hat) - # for j in range(len(fmap_r)): - # print(i,j,y.shape,y_hat.shape,fmap_r[j].shape,fmap_g[j].shape) - y_d_rs.append(y_d_r) - y_d_gs.append(y_d_g) - fmap_rs.append(fmap_r) - fmap_gs.append(fmap_g) - - return y_d_rs, y_d_gs, fmap_rs, fmap_gs - - -class DiscriminatorS(torch.nn.Module): - def __init__(self, use_spectral_norm=False): - super(DiscriminatorS, self).__init__() - norm_f = weight_norm if use_spectral_norm == False else spectral_norm - self.convs = nn.ModuleList( - [ - norm_f(Conv1d(1, 16, 15, 1, padding=7)), - norm_f(Conv1d(16, 64, 41, 4, groups=4, padding=20)), - norm_f(Conv1d(64, 256, 41, 4, groups=16, padding=20)), - norm_f(Conv1d(256, 1024, 41, 4, groups=64, padding=20)), - norm_f(Conv1d(1024, 1024, 41, 4, groups=256, padding=20)), - norm_f(Conv1d(1024, 1024, 5, 1, padding=2)), - ] - ) - self.conv_post = norm_f(Conv1d(1024, 1, 3, 1, padding=1)) - - def forward(self, x): - fmap = [] - - for l in self.convs: - x = l(x) - x = F.leaky_relu(x, modules.LRELU_SLOPE) - fmap.append(x) - x = self.conv_post(x) - fmap.append(x) - x = torch.flatten(x, 1, -1) - - return x, fmap - - -class DiscriminatorP(torch.nn.Module): - def __init__(self, period, kernel_size=5, stride=3, use_spectral_norm=False): - super(DiscriminatorP, self).__init__() - self.period = period - self.use_spectral_norm = use_spectral_norm - norm_f = weight_norm if use_spectral_norm == False else spectral_norm - self.convs = nn.ModuleList( - [ - norm_f( - Conv2d( - 1, - 32, - (kernel_size, 1), - (stride, 1), - padding=(get_padding(kernel_size, 1), 0), - ) - ), - norm_f( - Conv2d( - 32, - 128, - (kernel_size, 1), - (stride, 1), - padding=(get_padding(kernel_size, 1), 0), - ) - ), - norm_f( - Conv2d( - 128, - 512, - (kernel_size, 1), - (stride, 1), - padding=(get_padding(kernel_size, 1), 0), - ) - ), - norm_f( - Conv2d( - 512, - 1024, - (kernel_size, 1), - (stride, 1), - padding=(get_padding(kernel_size, 1), 0), - ) - ), - norm_f( - Conv2d( - 1024, - 1024, - (kernel_size, 1), - 1, - padding=(get_padding(kernel_size, 1), 0), - ) - ), - ] - ) - self.conv_post = norm_f(Conv2d(1024, 1, (3, 1), 1, padding=(1, 0))) - - def forward(self, x): - fmap = [] - - # 1d to 2d - b, c, t = x.shape - if t % self.period != 0: # pad first - n_pad = self.period - (t % self.period) - x = F.pad(x, (0, n_pad), "reflect") - t = t + n_pad - x = x.view(b, c, t // self.period, self.period) - - for l in self.convs: - x = l(x) - x = F.leaky_relu(x, modules.LRELU_SLOPE) - fmap.append(x) - x = self.conv_post(x) - fmap.append(x) - x = torch.flatten(x, 1, -1) - - return x, fmap diff --git a/spaces/Theivaprakasham/yolov6/yolov6/core/inferer.py b/spaces/Theivaprakasham/yolov6/yolov6/core/inferer.py deleted file mode 100644 index d4aee34440a0fa798da02476393a6df648598da3..0000000000000000000000000000000000000000 --- a/spaces/Theivaprakasham/yolov6/yolov6/core/inferer.py +++ /dev/null @@ -1,196 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding:utf-8 -*- -import os -import os.path as osp -import math - -from tqdm import tqdm - -import numpy as np -import cv2 -import torch -from PIL import ImageFont - -from yolov6.utils.events import LOGGER, load_yaml - -from yolov6.layers.common import DetectBackend -from yolov6.data.data_augment import letterbox -from yolov6.utils.nms import non_max_suppression - - -class Inferer: - def __init__(self, source, weights, device, yaml, img_size, half): - import glob - from yolov6.data.datasets import IMG_FORMATS - - self.__dict__.update(locals()) - - # Init model - self.device = device - self.img_size = img_size - cuda = self.device != 'cpu' and torch.cuda.is_available() - self.device = torch.device('cuda:0' if cuda else 'cpu') - self.model = DetectBackend(weights, device=self.device) - self.stride = self.model.stride - self.class_names = load_yaml(yaml)['names'] - self.img_size = self.check_img_size(self.img_size, s=self.stride) # check image size - - # Half precision - if half & (self.device.type != 'cpu'): - self.model.model.half() - else: - self.model.model.float() - half = False - - if self.device.type != 'cpu': - self.model(torch.zeros(1, 3, *self.img_size).to(self.device).type_as(next(self.model.model.parameters()))) # warmup - - # Load data - if os.path.isdir(source): - img_paths = sorted(glob.glob(os.path.join(source, '*.*'))) # dir - elif os.path.isfile(source): - img_paths = [source] # files - else: - raise Exception(f'Invalid path: {source}') - self.img_paths = [img_path for img_path in img_paths if img_path.split('.')[-1].lower() in IMG_FORMATS] - - def infer(self, conf_thres, iou_thres, classes, agnostic_nms, max_det, save_dir, save_txt, save_img, hide_labels, hide_conf): - ''' Model Inference and results visualization ''' - - for img_path in tqdm(self.img_paths): - img, img_src = self.precess_image(img_path, self.img_size, self.stride, self.half) - img = img.to(self.device) - if len(img.shape) == 3: - img = img[None] - # expand for batch dim - pred_results = self.model(img) - det = non_max_suppression(pred_results, conf_thres, iou_thres, classes, agnostic_nms, max_det=max_det)[0] - - save_path = osp.join(save_dir, osp.basename(img_path)) # im.jpg - txt_path = osp.join(save_dir, 'labels', osp.basename(img_path).split('.')[0]) - - gn = torch.tensor(img_src.shape)[[1, 0, 1, 0]] # normalization gain whwh - img_ori = img_src - - # check image and font - assert img_ori.data.contiguous, 'Image needs to be contiguous. Please apply to input images with np.ascontiguousarray(im).' - self.font_check() - - if len(det): - det[:, :4] = self.rescale(img.shape[2:], det[:, :4], img_src.shape).round() - - for *xyxy, conf, cls in reversed(det): - if save_txt: # Write to file - xywh = (self.box_convert(torch.tensor(xyxy).view(1, 4)) / gn).view(-1).tolist() # normalized xywh - line = (cls, *xywh, conf) - with open(txt_path + '.txt', 'a') as f: - f.write(('%g ' * len(line)).rstrip() % line + '\n') - - if save_img: - class_num = int(cls) # integer class - label = None if hide_labels else (self.class_names[class_num] if hide_conf else f'{self.class_names[class_num]} {conf:.2f}') - - self.plot_box_and_label(img_ori, max(round(sum(img_ori.shape) / 2 * 0.003), 2), xyxy, label, color=self.generate_colors(class_num, True)) - - img_src = np.asarray(img_ori) - - # Save results (image with detections) - if save_img: - cv2.imwrite(save_path, img_src) - - @staticmethod - def precess_image(path, img_size, stride, half): - '''Process image before image inference.''' - try: - img_src = cv2.imread(path) - assert img_src is not None, f'Invalid image: {path}' - except Exception as e: - LOGGER.Warning(e) - image = letterbox(img_src, img_size, stride=stride)[0] - - # Convert - image = image.transpose((2, 0, 1))[::-1] # HWC to CHW, BGR to RGB - image = torch.from_numpy(np.ascontiguousarray(image)) - image = image.half() if half else image.float() # uint8 to fp16/32 - image /= 255 # 0 - 255 to 0.0 - 1.0 - - return image, img_src - - @staticmethod - def rescale(ori_shape, boxes, target_shape): - '''Rescale the output to the original image shape''' - ratio = min(ori_shape[0] / target_shape[0], ori_shape[1] / target_shape[1]) - padding = (ori_shape[1] - target_shape[1] * ratio) / 2, (ori_shape[0] - target_shape[0] * ratio) / 2 - - boxes[:, [0, 2]] -= padding[0] - boxes[:, [1, 3]] -= padding[1] - boxes[:, :4] /= ratio - - boxes[:, 0].clamp_(0, target_shape[1]) # x1 - boxes[:, 1].clamp_(0, target_shape[0]) # y1 - boxes[:, 2].clamp_(0, target_shape[1]) # x2 - boxes[:, 3].clamp_(0, target_shape[0]) # y2 - - return boxes - - def check_img_size(self, img_size, s=32, floor=0): - """Make sure image size is a multiple of stride s in each dimension, and return a new shape list of image.""" - if isinstance(img_size, int): # integer i.e. img_size=640 - new_size = max(self.make_divisible(img_size, int(s)), floor) - elif isinstance(img_size, list): # list i.e. img_size=[640, 480] - new_size = [max(self.make_divisible(x, int(s)), floor) for x in img_size] - else: - raise Exception(f"Unsupported type of img_size: {type(img_size)}") - - if new_size != img_size: - print(f'WARNING: --img-size {img_size} must be multiple of max stride {s}, updating to {new_size}') - return new_size if isinstance(img_size,list) else [new_size]*2 - - def make_divisible(self, x, divisor): - # Upward revision the value x to make it evenly divisible by the divisor. - return math.ceil(x / divisor) * divisor - - @staticmethod - def plot_box_and_label(image, lw, box, label='', color=(128, 128, 128), txt_color=(255, 255, 255)): - # Add one xyxy box to image with label - p1, p2 = (int(box[0]), int(box[1])), (int(box[2]), int(box[3])) - cv2.rectangle(image, p1, p2, color, thickness=lw, lineType=cv2.LINE_AA) - if label: - tf = max(lw - 1, 1) # font thickness - w, h = cv2.getTextSize(label, 0, fontScale=lw / 3, thickness=tf)[0] # text width, height - outside = p1[1] - h - 3 >= 0 # label fits outside box - p2 = p1[0] + w, p1[1] - h - 3 if outside else p1[1] + h + 3 - cv2.rectangle(image, p1, p2, color, -1, cv2.LINE_AA) # filled - cv2.putText(image, label, (p1[0], p1[1] - 2 if outside else p1[1] + h + 2), 0, lw / 3, txt_color, - thickness=tf, lineType=cv2.LINE_AA) - - @staticmethod - def font_check(font='./yolov6/utils/Arial.ttf', size=10): - # Return a PIL TrueType Font, downloading to CONFIG_DIR if necessary - assert osp.exists(font), f'font path not exists: {font}' - try: - return ImageFont.truetype(str(font) if font.exists() else font.name, size) - except Exception as e: # download if missing - return ImageFont.truetype(str(font), size) - - @staticmethod - def box_convert(x): - # Convert boxes with shape [n, 4] from [x1, y1, x2, y2] to [x, y, w, h] where x1y1=top-left, x2y2=bottom-right - y = x.clone() if isinstance(x, torch.Tensor) else np.copy(x) - y[:, 0] = (x[:, 0] + x[:, 2]) / 2 # x center - y[:, 1] = (x[:, 1] + x[:, 3]) / 2 # y center - y[:, 2] = x[:, 2] - x[:, 0] # width - y[:, 3] = x[:, 3] - x[:, 1] # height - return y - - @staticmethod - def generate_colors(i, bgr=False): - hex = ('FF3838', 'FF9D97', 'FF701F', 'FFB21D', 'CFD231', '48F90A', '92CC17', '3DDB86', '1A9334', '00D4BB', - '2C99A8', '00C2FF', '344593', '6473FF', '0018EC', '8438FF', '520085', 'CB38FF', 'FF95C8', 'FF37C7') - palette = [] - for iter in hex: - h = '#' + iter - palette.append(tuple(int(h[1 + i:1 + i + 2], 16) for i in (0, 2, 4))) - num = len(palette) - color = palette[int(i) % num] - return (color[2], color[1], color[0]) if bgr else color diff --git a/spaces/Tlaloc/Aerial_Unet/app.py b/spaces/Tlaloc/Aerial_Unet/app.py deleted file mode 100644 index f0fef999e43df775de0f0273fa7f9191c94f1e66..0000000000000000000000000000000000000000 --- a/spaces/Tlaloc/Aerial_Unet/app.py +++ /dev/null @@ -1,84 +0,0 @@ -import gradio as gr -from PIL import Image -import numpy as np -import segmentation_models_pytorch as smp -import torch -from torchvision import transforms as T -import os -import cv2 -import pandas as pd -import albumentations as album - -class ExperimentDataset(torch.utils.data.Dataset): - def __init__(self, image, augment=None, preprocess=None): - self.image = image - self.augment = augment - self.preprocess = preprocess - - def __getitem__(self, i): - image = cv2.cvtColor(self.image, cv2.COLOR_BGR2RGB) - if self.augment: - sample = self.augment(image=image) - image= sample['image'] - if self.preprocess: - sample = self.preprocess(image=image) - image = sample['image'] - return image - -def color_convert(image,labelvals): - colorcodes = np.array(labelvals) - ccs = colorcodes[image.astype(int)] - return ccs - -def crop_image(image, dims=[1500,1500,3]): - target_size = dims[0] - image_size = len(image) - padding = (image_size - target_size) // 2 - - return image[ - padding:image_size - padding, - padding:image_size - padding, - :,] - -def to_tensor(x,**kwargs): - return x.transpose(2,0,1).astype("float32") - -def augment_image(): - transform = [album.PadIfNeeded(min_height=1536, min_width=1536, always_apply=True, border_mode=0)] - return album.Compose(transform) - -def preprocessing(preprocessing_fn=None): - transform = [] - if preprocessing_fn: - transform.append(album.Lambda(image=preprocessing_fn)) - transform.append(album.Lambda(image=to_tensor, mask=to_tensor)) - return album.Compose(transform) - -def segment(image): - device = torch.device("cuda" if torch.cuda.is_available() else "cpu") - - best_model = torch.load('best_model_upp.pth', map_location=device) - - classlabeldict = pd.read_csv("label_class_dict.csv") - clasnames = classlabeldict['name'].tolist() - class_rgb_values = classlabeldict[['r','g','b']].values.tolist() - select_class_indices = [clasnames.index(cls.lower()) for cls in clasnames] - select_class_rgb_values = np.array(class_rgb_values)[select_class_indices] - - encoder = "resnet34" - encoder_weights = "imagenet" - preprocess_func = smp.encoders.get_preprocessing_fn(encoder, encoder_weights) - - exp_data = ExperimentDataset(image, augment = augment_image(), - preprocess = preprocessing(preprocess_func)) - - test_img = exp_data[0] - x_tensor = torch.from_numpy(test_img).to(device).unsqueeze(0) - pred_mask = best_model(x_tensor) - pred_mask = pred_mask.detach().squeeze().cpu().numpy() - pred_mask = np.transpose(pred_mask,(1,2,0)) - pred_mask = crop_image(color_convert(np.argmax(pred_mask,axis=-1), select_class_rgb_values)) - - return pred_mask - -iface = gr.Interface(fn=segment, inputs="image", outputs="image").launch() \ No newline at end of file diff --git a/spaces/Worlandil/ChatGPT4/app.py b/spaces/Worlandil/ChatGPT4/app.py deleted file mode 100644 index 7e09e57ef928fd2451fd0ed1295d0994ca75d026..0000000000000000000000000000000000000000 --- a/spaces/Worlandil/ChatGPT4/app.py +++ /dev/null @@ -1,193 +0,0 @@ -import gradio as gr -import os -import json -import requests - -#Streaming endpoint -API_URL = "https://api.openai.com/v1/chat/completions" #os.getenv("API_URL") + "/generate_stream" - -#Huggingface provided GPT4 OpenAI API Key -OPENAI_API_KEY = os.getenv("OPENAI_API_KEY") - -#Inferenec function -def predict(system_msg, inputs, top_p, temperature, chat_counter, chatbot=[], history=[]): - - headers = { - "Content-Type": "application/json", - "Authorization": f"Bearer {OPENAI_API_KEY}" - } - print(f"system message is ^^ {system_msg}") - if system_msg.strip() == '': - initial_message = [{"role": "user", "content": f"{inputs}"},] - multi_turn_message = [] - else: - initial_message= [{"role": "system", "content": system_msg}, - {"role": "user", "content": f"{inputs}"},] - multi_turn_message = [{"role": "system", "content": system_msg},] - - if chat_counter == 0 : - payload = { - "model": "gpt-4", - "messages": initial_message , - "temperature" : 1.0, - "top_p":1.0, - "n" : 1, - "stream": True, - "presence_penalty":0, - "frequency_penalty":0, - } - print(f"chat_counter - {chat_counter}") - else: #if chat_counter != 0 : - messages=multi_turn_message # Of the type of - [{"role": "system", "content": system_msg},] - for data in chatbot: - user = {} - user["role"] = "user" - user["content"] = data[0] - assistant = {} - assistant["role"] = "assistant" - assistant["content"] = data[1] - messages.append(user) - messages.append(assistant) - temp = {} - temp["role"] = "user" - temp["content"] = inputs - messages.append(temp) - #messages - payload = { - "model": "gpt-4", - "messages": messages, # Of the type of [{"role": "user", "content": f"{inputs}"}], - "temperature" : temperature, #1.0, - "top_p": top_p, #1.0, - "n" : 1, - "stream": True, - "presence_penalty":0, - "frequency_penalty":0,} - - chat_counter+=1 - - history.append(inputs) - print(f"Logging : payload is - {payload}") - # make a POST request to the API endpoint using the requests.post method, passing in stream=True - response = requests.post(API_URL, headers=headers, json=payload, stream=True) - print(f"Logging : response code - {response}") - token_counter = 0 - partial_words = "" - - counter=0 - for chunk in response.iter_lines(): - #Skipping first chunk - if counter == 0: - counter+=1 - continue - # check whether each line is non-empty - if chunk.decode() : - chunk = chunk.decode() - # decode each line as response data is in bytes - if len(chunk) > 12 and "content" in json.loads(chunk[6:])['choices'][0]['delta']: - partial_words = partial_words + json.loads(chunk[6:])['choices'][0]["delta"]["content"] - if token_counter == 0: - history.append(" " + partial_words) - else: - history[-1] = partial_words - chat = [(history[i], history[i + 1]) for i in range(0, len(history) - 1, 2) ] # convert to tuples of list - token_counter+=1 - yield chat, history, chat_counter, response # resembles {chatbot: chat, state: history} - -#Resetting to blank -def reset_textbox(): - return gr.update(value='') - -#to set a component as visible=False -def set_visible_false(): - return gr.update(visible=False) - -#to set a component as visible=True -def set_visible_true(): - return gr.update(visible=True) - -title = """

    🔥GPT4 with ChatCompletions API +🚀Gradio-Streaming

    """ - -#display message for themes feature -theme_addon_msg = """
    🌟 Discover Gradio Themes with this Demo, featuring v3.22.0! Gradio v3.23.0 also enables seamless Theme sharing. You can develop or modify a theme, and send it to the hub using simple theme.push_to_hub(). -
    🏆Participate in Gradio's Theme Building Hackathon to exhibit your creative flair and win fabulous rewards! Join here - Gradio-Themes-Party🎨 🏆
    -""" - -#Using info to add additional information about System message in GPT4 -system_msg_info = """A conversation could begin with a system message to gently instruct the assistant. -System message helps set the behavior of the AI Assistant. For example, the assistant could be instructed with 'You are a helpful assistant.'""" - -#Modifying existing Gradio Theme -theme = gr.themes.Soft(primary_hue="zinc", secondary_hue="green", neutral_hue="green", - text_size=gr.themes.sizes.text_lg) - -with gr.Blocks(css = """#col_container { margin-left: auto; margin-right: auto;} #chatbot {height: 520px; overflow: auto;}""", - theme=theme) as demo: - gr.HTML(title) - gr.HTML("""

    🔥This Huggingface Gradio Demo provides you full access to GPT4 API (4096 token limit). 🎉🥳🎉You don't need any OPENAI API key🙌

    """) - gr.HTML(theme_addon_msg) - gr.HTML('''
    Duplicate SpaceDuplicate the Space and run securely with your OpenAI API Key
    ''') - - with gr.Column(elem_id = "col_container"): - #GPT4 API Key is provided by Huggingface - with gr.Accordion(label="System message:", open=False): - system_msg = gr.Textbox(label="Instruct the AI Assistant to set its beaviour", info = system_msg_info, value="") - accordion_msg = gr.HTML(value="🚧 To set System message you will have to refresh the app", visible=False) - chatbot = gr.Chatbot(label='GPT4', elem_id="chatbot") - inputs = gr.Textbox(placeholder= "Hi there!", label= "Type an input and press Enter") - state = gr.State([]) - with gr.Row(): - with gr.Column(scale=7): - b1 = gr.Button().style(full_width=True) - with gr.Column(scale=3): - server_status_code = gr.Textbox(label="Status code from OpenAI server", ) - - #top_p, temperature - with gr.Accordion("Parameters", open=False): - top_p = gr.Slider( minimum=-0, maximum=1.0, value=1.0, step=0.05, interactive=True, label="Top-p (nucleus sampling)",) - temperature = gr.Slider( minimum=-0, maximum=5.0, value=1.0, step=0.1, interactive=True, label="Temperature",) - chat_counter = gr.Number(value=0, visible=False, precision=0) - - #Event handling - inputs.submit( predict, [system_msg, inputs, top_p, temperature, chat_counter, chatbot, state], [chatbot, state, chat_counter, server_status_code],) #openai_api_key - b1.click( predict, [system_msg, inputs, top_p, temperature, chat_counter, chatbot, state], [chatbot, state, chat_counter, server_status_code],) #openai_api_key - - inputs.submit(set_visible_false, [], [system_msg]) - b1.click(set_visible_false, [], [system_msg]) - inputs.submit(set_visible_true, [], [accordion_msg]) - b1.click(set_visible_true, [], [accordion_msg]) - - b1.click(reset_textbox, [], [inputs]) - inputs.submit(reset_textbox, [], [inputs]) - - #Examples - with gr.Accordion(label="Examples for System message:", open=False): - gr.Examples( - examples = [["""You are an AI programming assistant. - - - Follow the user's requirements carefully and to the letter. - - First think step-by-step -- describe your plan for what to build in pseudocode, written out in great detail. - - Then output the code in a single code block. - - Minimize any other prose."""], ["""You are ComedianGPT who is a helpful assistant. You answer everything with a joke and witty replies."""], - ["You are ChefGPT, a helpful assistant who answers questions with culinary expertise and a pinch of humor."], - ["You are FitnessGuruGPT, a fitness expert who shares workout tips and motivation with a playful twist."], - ["You are SciFiGPT, an AI assistant who discusses science fiction topics with a blend of knowledge and wit."], - ["You are PhilosopherGPT, a thoughtful assistant who responds to inquiries with philosophical insights and a touch of humor."], - ["You are EcoWarriorGPT, a helpful assistant who shares environment-friendly advice with a lighthearted approach."], - ["You are MusicMaestroGPT, a knowledgeable AI who discusses music and its history with a mix of facts and playful banter."], - ["You are SportsFanGPT, an enthusiastic assistant who talks about sports and shares amusing anecdotes."], - ["You are TechWhizGPT, a tech-savvy AI who can help users troubleshoot issues and answer questions with a dash of humor."], - ["You are FashionistaGPT, an AI fashion expert who shares style advice and trends with a sprinkle of wit."], - ["You are ArtConnoisseurGPT, an AI assistant who discusses art and its history with a blend of knowledge and playful commentary."], - ["You are a helpful assistant that provides detailed and accurate information."], - ["You are an assistant that speaks like Shakespeare."], - ["You are a friendly assistant who uses casual language and humor."], - ["You are a financial advisor who gives expert advice on investments and budgeting."], - ["You are a health and fitness expert who provides advice on nutrition and exercise."], - ["You are a travel consultant who offers recommendations for destinations, accommodations, and attractions."], - ["You are a movie critic who shares insightful opinions on films and their themes."], - ["You are a history enthusiast who loves to discuss historical events and figures."], - ["You are a tech-savvy assistant who can help users troubleshoot issues and answer questions about gadgets and software."], - ["You are an AI poet who can compose creative and evocative poems on any given topic."],], - inputs = system_msg,) - -demo.queue(max_size=99, concurrency_count=20).launch(debug=True) \ No newline at end of file diff --git a/spaces/XAI/VisualCorrespondenceHumanStudy/helper.py b/spaces/XAI/VisualCorrespondenceHumanStudy/helper.py deleted file mode 100644 index 3ea013c23ec1e912876151150fd8e837e2d97cee..0000000000000000000000000000000000000000 --- a/spaces/XAI/VisualCorrespondenceHumanStudy/helper.py +++ /dev/null @@ -1,23 +0,0 @@ -import os - -def get_label_for_query(image_url, model_name): - fourway_label = image_url.split('/')[-2] - - if fourway_label=='both_correct': - return 'Correct' - - if fourway_label=='both_wrong': - return 'Wrong' - - if fourway_label == 'chm_correct_knn_incorrect' and model_name == 'CHM': - return 'Correct' - elif fourway_label == 'knn_correct_chm_incorrect' and model_name == 'KNN': - return 'Correct' - - return 'Wrong' - -def get_category(image_url): - return image_url.split('/')[-2] - -def translate_winds_to_names(winds): - return [folder_to_name[x] for x in winds] \ No newline at end of file diff --git a/spaces/Xenova/next-server-example-app/src/app/classify/pipeline.js b/spaces/Xenova/next-server-example-app/src/app/classify/pipeline.js deleted file mode 100644 index 44e73e7eb31e42763c50ea1e098390f80a5485bc..0000000000000000000000000000000000000000 --- a/spaces/Xenova/next-server-example-app/src/app/classify/pipeline.js +++ /dev/null @@ -1,30 +0,0 @@ -import { pipeline } from "@xenova/transformers"; - -// Use the Singleton pattern to enable lazy construction of the pipeline. -// NOTE: We wrap the class in a function to prevent code duplication (see below). -const P = () => class PipelineSingleton { - static task = 'text-classification'; - static model = 'Xenova/distilbert-base-uncased-finetuned-sst-2-english'; - static instance = null; - - static async getInstance(progress_callback = null) { - if (this.instance === null) { - this.instance = pipeline(this.task, this.model, { progress_callback }); - } - return this.instance; - } -} - -let PipelineSingleton; -if (process.env.NODE_ENV !== 'production') { - // When running in development mode, attach the pipeline to the - // global object so that it's preserved between hot reloads. - // For more information, see https://vercel.com/guides/nextjs-prisma-postgres - if (!global.PipelineSingleton) { - global.PipelineSingleton = P(); - } - PipelineSingleton = global.PipelineSingleton; -} else { - PipelineSingleton = P(); -} -export default PipelineSingleton; diff --git a/spaces/Xenova/the-tokenizer-playground/index.html b/spaces/Xenova/the-tokenizer-playground/index.html deleted file mode 100644 index 29aa377e863040b3d5d2b597f58e60cf8a0471c8..0000000000000000000000000000000000000000 --- a/spaces/Xenova/the-tokenizer-playground/index.html +++ /dev/null @@ -1,18 +0,0 @@ - - - - - - - - The Tokenizer Playground - - - - - -
    - - - - \ No newline at end of file diff --git a/spaces/Xlinelabs/togethercomputer-GPT-NeoXT-Chat-Base-20B/README.md b/spaces/Xlinelabs/togethercomputer-GPT-NeoXT-Chat-Base-20B/README.md deleted file mode 100644 index 5ba433be1331c03e256a73b4aa5272eba7295d4e..0000000000000000000000000000000000000000 --- a/spaces/Xlinelabs/togethercomputer-GPT-NeoXT-Chat-Base-20B/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Togethercomputer GPT NeoXT Chat Base 20B -emoji: ⚡ -colorFrom: purple -colorTo: blue -sdk: gradio -sdk_version: 3.20.1 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/XzJosh/Bella-Bert-VITS2/text/__init__.py b/spaces/XzJosh/Bella-Bert-VITS2/text/__init__.py deleted file mode 100644 index 7566bf351ca9b95af9cdc6d729557a9da083800f..0000000000000000000000000000000000000000 --- a/spaces/XzJosh/Bella-Bert-VITS2/text/__init__.py +++ /dev/null @@ -1,28 +0,0 @@ -from text.symbols import * - - -_symbol_to_id = {s: i for i, s in enumerate(symbols)} - -def cleaned_text_to_sequence(cleaned_text, tones, language): - '''Converts a string of text to a sequence of IDs corresponding to the symbols in the text. - Args: - text: string to convert to a sequence - Returns: - List of integers corresponding to the symbols in the text - ''' - phones = [_symbol_to_id[symbol] for symbol in cleaned_text] - tone_start = language_tone_start_map[language] - tones = [i + tone_start for i in tones] - lang_id = language_id_map[language] - lang_ids = [lang_id for i in phones] - return phones, tones, lang_ids - -def get_bert(norm_text, word2ph, language): - from .chinese_bert import get_bert_feature as zh_bert - from .english_bert_mock import get_bert_feature as en_bert - lang_bert_func_map = { - 'ZH': zh_bert, - 'EN': en_bert - } - bert = lang_bert_func_map[language](norm_text, word2ph) - return bert diff --git a/spaces/XzJosh/Jiaran-Bert-VITS2/text/japanese.py b/spaces/XzJosh/Jiaran-Bert-VITS2/text/japanese.py deleted file mode 100644 index ddedafa0c5b7986068dc6c91637a86febc3923a9..0000000000000000000000000000000000000000 --- a/spaces/XzJosh/Jiaran-Bert-VITS2/text/japanese.py +++ /dev/null @@ -1,104 +0,0 @@ -# modified from https://github.com/CjangCjengh/vits/blob/main/text/japanese.py -import re -import sys - -import pyopenjtalk - -from text import symbols - -# Regular expression matching Japanese without punctuation marks: -_japanese_characters = re.compile( - r'[A-Za-z\d\u3005\u3040-\u30ff\u4e00-\u9fff\uff11-\uff19\uff21-\uff3a\uff41-\uff5a\uff66-\uff9d]') - -# Regular expression matching non-Japanese characters or punctuation marks: -_japanese_marks = re.compile( - r'[^A-Za-z\d\u3005\u3040-\u30ff\u4e00-\u9fff\uff11-\uff19\uff21-\uff3a\uff41-\uff5a\uff66-\uff9d]') - -# List of (symbol, Japanese) pairs for marks: -_symbols_to_japanese = [(re.compile('%s' % x[0]), x[1]) for x in [ - ('%', 'パーセント') -]] - - -# List of (consonant, sokuon) pairs: -_real_sokuon = [(re.compile('%s' % x[0]), x[1]) for x in [ - (r'Q([↑↓]*[kg])', r'k#\1'), - (r'Q([↑↓]*[tdjʧ])', r't#\1'), - (r'Q([↑↓]*[sʃ])', r's\1'), - (r'Q([↑↓]*[pb])', r'p#\1') -]] - -# List of (consonant, hatsuon) pairs: -_real_hatsuon = [(re.compile('%s' % x[0]), x[1]) for x in [ - (r'N([↑↓]*[pbm])', r'm\1'), - (r'N([↑↓]*[ʧʥj])', r'n^\1'), - (r'N([↑↓]*[tdn])', r'n\1'), - (r'N([↑↓]*[kg])', r'ŋ\1') -]] - - - -def post_replace_ph(ph): - rep_map = { - ':': ',', - ';': ',', - ',': ',', - '。': '.', - '!': '!', - '?': '?', - '\n': '.', - "·": ",", - '、': ",", - '...': '…', - 'v': "V" - } - if ph in rep_map.keys(): - ph = rep_map[ph] - if ph in symbols: - return ph - if ph not in symbols: - ph = 'UNK' - return ph - -def symbols_to_japanese(text): - for regex, replacement in _symbols_to_japanese: - text = re.sub(regex, replacement, text) - return text - - -def preprocess_jap(text): - '''Reference https://r9y9.github.io/ttslearn/latest/notebooks/ch10_Recipe-Tacotron.html''' - text = symbols_to_japanese(text) - sentences = re.split(_japanese_marks, text) - marks = re.findall(_japanese_marks, text) - text = [] - for i, sentence in enumerate(sentences): - if re.match(_japanese_characters, sentence): - p = pyopenjtalk.g2p(sentence) - text += p.split(" ") - - if i < len(marks): - text += [marks[i].replace(' ', '')] - return text - -def text_normalize(text): - # todo: jap text normalize - return text - -def g2p(norm_text): - phones = preprocess_jap(norm_text) - phones = [post_replace_ph(i) for i in phones] - # todo: implement tones and word2ph - tones = [0 for i in phones] - word2ph = [1 for i in phones] - return phones, tones, word2ph - - -if __name__ == '__main__': - for line in open("../../../Downloads/transcript_utf8.txt").readlines(): - text = line.split(":")[1] - phones, tones, word2ph = g2p(text) - for p in phones: - if p == "z": - print(text, phones) - sys.exit(0) diff --git a/spaces/XzJosh/XingTong-Bert-VITS2/preprocess_text.py b/spaces/XzJosh/XingTong-Bert-VITS2/preprocess_text.py deleted file mode 100644 index 5eb0f3b9e929fcbe91dcbeb653391227a2518a15..0000000000000000000000000000000000000000 --- a/spaces/XzJosh/XingTong-Bert-VITS2/preprocess_text.py +++ /dev/null @@ -1,64 +0,0 @@ -import json -from random import shuffle - -import tqdm -from text.cleaner import clean_text -from collections import defaultdict -stage = [1,2,3] - -transcription_path = 'filelists/genshin.list' -train_path = 'filelists/train.list' -val_path = 'filelists/val.list' -config_path = "configs/config.json" -val_per_spk = 4 -max_val_total = 8 - -if 1 in stage: - with open( transcription_path+'.cleaned', 'w', encoding='utf-8') as f: - for line in tqdm.tqdm(open(transcription_path, encoding='utf-8').readlines()): - try: - utt, spk, language, text = line.strip().split('|') - norm_text, phones, tones, word2ph = clean_text(text, language) - f.write('{}|{}|{}|{}|{}|{}|{}\n'.format(utt, spk, language, norm_text, ' '.join(phones), - " ".join([str(i) for i in tones]), - " ".join([str(i) for i in word2ph]))) - except Exception as error : - print("err!", utt, error) - -if 2 in stage: - spk_utt_map = defaultdict(list) - spk_id_map = {} - current_sid = 0 - - with open( transcription_path+'.cleaned', encoding='utf-8') as f: - for line in f.readlines(): - utt, spk, language, text, phones, tones, word2ph = line.strip().split('|') - spk_utt_map[spk].append(line) - if spk not in spk_id_map.keys(): - spk_id_map[spk] = current_sid - current_sid += 1 - train_list = [] - val_list = [] - - for spk, utts in spk_utt_map.items(): - shuffle(utts) - val_list+=utts[:val_per_spk] - train_list+=utts[val_per_spk:] - if len(val_list) > max_val_total: - train_list+=val_list[max_val_total:] - val_list = val_list[:max_val_total] - - with open( train_path,"w", encoding='utf-8') as f: - for line in train_list: - f.write(line) - - with open(val_path, "w", encoding='utf-8') as f: - for line in val_list: - f.write(line) - -if 3 in stage: - assert 2 in stage - config = json.load(open(config_path, encoding='utf-8')) - config["data"]['spk2id'] = spk_id_map - with open(config_path, 'w', encoding='utf-8') as f: - json.dump(config, f, indent=2, ensure_ascii=False) diff --git a/spaces/YONG627/456123/yolov5-code-main/utils/loggers/clearml/hpo.py b/spaces/YONG627/456123/yolov5-code-main/utils/loggers/clearml/hpo.py deleted file mode 100644 index ee518b0fbfc89ee811b51bbf85341eee4f685be1..0000000000000000000000000000000000000000 --- a/spaces/YONG627/456123/yolov5-code-main/utils/loggers/clearml/hpo.py +++ /dev/null @@ -1,84 +0,0 @@ -from clearml import Task -# Connecting ClearML with the current process, -# from here on everything is logged automatically -from clearml.automation import HyperParameterOptimizer, UniformParameterRange -from clearml.automation.optuna import OptimizerOptuna - -task = Task.init(project_name='Hyper-Parameter Optimization', - task_name='YOLOv5', - task_type=Task.TaskTypes.optimizer, - reuse_last_task_id=False) - -# Example use case: -optimizer = HyperParameterOptimizer( - # This is the experiment we want to optimize - base_task_id='', - # here we define the hyper-parameters to optimize - # Notice: The parameter name should exactly match what you see in the UI: / - # For Example, here we see in the base experiment a section Named: "General" - # under it a parameter named "batch_size", this becomes "General/batch_size" - # If you have `argparse` for example, then arguments will appear under the "Args" section, - # and you should instead pass "Args/batch_size" - hyper_parameters=[ - UniformParameterRange('Hyperparameters/lr0', min_value=1e-5, max_value=1e-1), - UniformParameterRange('Hyperparameters/lrf', min_value=0.01, max_value=1.0), - UniformParameterRange('Hyperparameters/momentum', min_value=0.6, max_value=0.98), - UniformParameterRange('Hyperparameters/weight_decay', min_value=0.0, max_value=0.001), - UniformParameterRange('Hyperparameters/warmup_epochs', min_value=0.0, max_value=5.0), - UniformParameterRange('Hyperparameters/warmup_momentum', min_value=0.0, max_value=0.95), - UniformParameterRange('Hyperparameters/warmup_bias_lr', min_value=0.0, max_value=0.2), - UniformParameterRange('Hyperparameters/box', min_value=0.02, max_value=0.2), - UniformParameterRange('Hyperparameters/cls', min_value=0.2, max_value=4.0), - UniformParameterRange('Hyperparameters/cls_pw', min_value=0.5, max_value=2.0), - UniformParameterRange('Hyperparameters/obj', min_value=0.2, max_value=4.0), - UniformParameterRange('Hyperparameters/obj_pw', min_value=0.5, max_value=2.0), - UniformParameterRange('Hyperparameters/iou_t', min_value=0.1, max_value=0.7), - UniformParameterRange('Hyperparameters/anchor_t', min_value=2.0, max_value=8.0), - UniformParameterRange('Hyperparameters/fl_gamma', min_value=0.0, max_value=4.0), - UniformParameterRange('Hyperparameters/hsv_h', min_value=0.0, max_value=0.1), - UniformParameterRange('Hyperparameters/hsv_s', min_value=0.0, max_value=0.9), - UniformParameterRange('Hyperparameters/hsv_v', min_value=0.0, max_value=0.9), - UniformParameterRange('Hyperparameters/degrees', min_value=0.0, max_value=45.0), - UniformParameterRange('Hyperparameters/translate', min_value=0.0, max_value=0.9), - UniformParameterRange('Hyperparameters/scale', min_value=0.0, max_value=0.9), - UniformParameterRange('Hyperparameters/shear', min_value=0.0, max_value=10.0), - UniformParameterRange('Hyperparameters/perspective', min_value=0.0, max_value=0.001), - UniformParameterRange('Hyperparameters/flipud', min_value=0.0, max_value=1.0), - UniformParameterRange('Hyperparameters/fliplr', min_value=0.0, max_value=1.0), - UniformParameterRange('Hyperparameters/mosaic', min_value=0.0, max_value=1.0), - UniformParameterRange('Hyperparameters/mixup', min_value=0.0, max_value=1.0), - UniformParameterRange('Hyperparameters/copy_paste', min_value=0.0, max_value=1.0)], - # this is the objective metric we want to maximize/minimize - objective_metric_title='metrics', - objective_metric_series='mAP_0.5', - # now we decide if we want to maximize it or minimize it (accuracy we maximize) - objective_metric_sign='max', - # let us limit the number of concurrent experiments, - # this in turn will make sure we do dont bombard the scheduler with experiments. - # if we have an auto-scaler connected, this, by proxy, will limit the number of machine - max_number_of_concurrent_tasks=1, - # this is the optimizer class (actually doing the optimization) - # Currently, we can choose from GridSearch, RandomSearch or OptimizerBOHB (Bayesian optimization Hyper-Band) - optimizer_class=OptimizerOptuna, - # If specified only the top K performing Tasks will be kept, the others will be automatically archived - save_top_k_tasks_only=5, # 5, - compute_time_limit=None, - total_max_jobs=20, - min_iteration_per_job=None, - max_iteration_per_job=None, -) - -# report every 10 seconds, this is way too often, but we are testing here -optimizer.set_report_period(10 / 60) -# You can also use the line below instead to run all the optimizer tasks locally, without using queues or agent -# an_optimizer.start_locally(job_complete_callback=job_complete_callback) -# set the time limit for the optimization process (2 hours) -optimizer.set_time_limit(in_minutes=120.0) -# Start the optimization process in the local environment -optimizer.start_locally() -# wait until process is done (notice we are controlling the optimization process in the background) -optimizer.wait() -# make sure background optimization stopped -optimizer.stop() - -print('We are done, good bye') diff --git a/spaces/YeOldHermit/Super-Resolution-Anime-Diffusion/Waifu2x/Loss.py b/spaces/YeOldHermit/Super-Resolution-Anime-Diffusion/Waifu2x/Loss.py deleted file mode 100644 index 267bb66f9e221f50cd06c96e93fd0ee71be925b4..0000000000000000000000000000000000000000 --- a/spaces/YeOldHermit/Super-Resolution-Anime-Diffusion/Waifu2x/Loss.py +++ /dev/null @@ -1,44 +0,0 @@ -import torch -from torch import nn -from torch.nn.functional import _pointwise_loss - -rgb_weights = [0.29891 * 3, 0.58661 * 3, 0.11448 * 3] -# RGB have different weights -# https://github.com/nagadomi/waifu2x/blob/master/train.lua#L109 -use_cuda = torch.cuda.is_available() -FloatTensor = torch.cuda.FloatTensor if use_cuda else torch.FloatTensor -LongTensor = torch.cuda.LongTensor if use_cuda else torch.LongTensor -Tensor = FloatTensor - - -class WeightedHuberLoss(nn.SmoothL1Loss): - def __init__(self, weights=rgb_weights): - super(WeightedHuberLoss, self).__init__(size_average=True, reduce=True) - self.weights = torch.FloatTensor(weights).view(3, 1, 1) - - def forward(self, input_data, target): - diff = torch.abs(input_data - target) - z = torch.where(diff < 1, 0.5 * torch.pow(diff, 2), (diff - 0.5)) - out = z * self.weights.expand_as(diff) - return out.mean() - - -def weighted_mse_loss(input, target, weights): - out = (input - target) ** 2 - out = out * weights.expand_as(out) - loss = out.sum(0) # or sum over whatever dimensions - return loss / out.size(0) - - -class WeightedL1Loss(nn.SmoothL1Loss): - def __init__(self, weights=rgb_weights): - super(WeightedHuberLoss, self).__init__(size_average=True, reduce=True) - self.weights = torch.FloatTensor(weights).view(3, 1, 1) - - def forward(self, input_data, target): - return self.l1_loss(input_data, target, size_average=self.size_average, - reduce=self.reduce) - - def l1_loss(self, input_data, target, size_average=True, reduce=True): - return _pointwise_loss(lambda a, b: torch.abs(a - b) * self.weights.expand_as(a), - torch._C._nn.l1_loss, input_data, target, size_average, reduce) diff --git a/spaces/YeOldHermit/Super-Resolution-Anime-Diffusion/diffusers/pipelines/dance_diffusion/__init__.py b/spaces/YeOldHermit/Super-Resolution-Anime-Diffusion/diffusers/pipelines/dance_diffusion/__init__.py deleted file mode 100644 index 2ad34fc52aaa61f9313cae32d7bb39acad831104..0000000000000000000000000000000000000000 --- a/spaces/YeOldHermit/Super-Resolution-Anime-Diffusion/diffusers/pipelines/dance_diffusion/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -# flake8: noqa -from .pipeline_dance_diffusion import DanceDiffusionPipeline diff --git a/spaces/YeOldHermit/Super-Resolution-Anime-Diffusion/diffusers/utils/dummy_flax_and_transformers_objects.py b/spaces/YeOldHermit/Super-Resolution-Anime-Diffusion/diffusers/utils/dummy_flax_and_transformers_objects.py deleted file mode 100644 index 14830bca2898ed550eb9a0b671282a81967c8570..0000000000000000000000000000000000000000 --- a/spaces/YeOldHermit/Super-Resolution-Anime-Diffusion/diffusers/utils/dummy_flax_and_transformers_objects.py +++ /dev/null @@ -1,19 +0,0 @@ -# This file is autogenerated by the command `make fix-copies`, do not edit. -# flake8: noqa - -from ..utils import DummyObject, requires_backends - - -class FlaxStableDiffusionPipeline(metaclass=DummyObject): - _backends = ["flax", "transformers"] - - def __init__(self, *args, **kwargs): - requires_backends(self, ["flax", "transformers"]) - - @classmethod - def from_config(cls, *args, **kwargs): - requires_backends(cls, ["flax", "transformers"]) - - @classmethod - def from_pretrained(cls, *args, **kwargs): - requires_backends(cls, ["flax", "transformers"]) diff --git a/spaces/Yntec/photoMovieX/app.py b/spaces/Yntec/photoMovieX/app.py deleted file mode 100644 index 0de814355d32a7c37085f15abe9299d792075528..0000000000000000000000000000000000000000 --- a/spaces/Yntec/photoMovieX/app.py +++ /dev/null @@ -1,233 +0,0 @@ -import gradio as gr -import os -import sys -from pathlib import Path -import random -import string -import time -from queue import Queue -from threading import Thread -import emoji - -text_gen=gr.Interface.load("spaces/phenomenon1981/MagicPrompt-Stable-Diffusion") -def get_prompts(prompt_text): - if prompt_text: - return text_gen(prompt_text + "movie still, photo") - else: - return text_gen("") -proc1=gr.Interface.load("models/Yntec/photoMovieX") - -def restart_script_periodically(): - while True: - random_time = random.randint(5400, 6000) - time.sleep(random_time) - os.execl(sys.executable, sys.executable, *sys.argv) - - -restart_thread = Thread(target=restart_script_periodically, daemon=True) -restart_thread.start() - - -queue = Queue() -queue_threshold = 100 - -#Don't add noise to the first picture no matter what (the point of noise is to get varied outputs, the first one doesn't need to vary about anything) -def noadd_random_noise(prompt, noise_level=0.00): - if noise_level == 0: - noise_level = 0.00 - percentage_noise = noise_level * 5 - num_noise_chars = int(len(prompt) * (percentage_noise/100)) - noise_indices = random.sample(range(len(prompt)), num_noise_chars) - prompt_list = list(prompt) - noise_chars = list(string.ascii_letters + string.punctuation + '' + string.digits) - noise_chars.extend(['']) - for index in noise_indices: - prompt_list[index] = random.choice(noise_chars) - return "".join(prompt_list) - -#normal behavior -def add_random_noise(prompt, noise_level=0.00): - if noise_level == 0: - noise_level = 0.00 - percentage_noise = noise_level * 5 - num_noise_chars = int(len(prompt) * (percentage_noise/100)) - noise_indices = random.sample(range(len(prompt)), num_noise_chars) - prompt_list = list(prompt) - noise_chars = list(string.ascii_letters + string.punctuation + ' ' + string.digits) - noise_chars.extend(['😍', 'beautiful', '😂', '🤔', '😊', '🤗', '😭', '🙄', 'pretty', '🤯', '🤫', '🥴', 'sitting', '🤩', '🥳', '😔', '😩', '🤪', '😇', 'retro', '😈', '👹', 'masterpiece', '🤖', '👽', 'high quality', '🎃', '🎅', '🎄', '🎁', '🎂', '🎉', '🎈', '🎊', '🎮', '❤️', '💔', '💕', '💖', '💗', '🐶', '🐱', 'visible', '🐹', '🦊', '🐻', '🐨', '🐯', '🦁', '🐘', '🔥', '🌧️', '🌞', '🌈', '💥', '🌴', '🌊', '🌺', '🌻', '🌸', '🎨', '🌅', '🌌', '☁️', '⛈️', '❄️', '☀️', '🌤️', '⛅️', '🌥️', '🌦️', '🌧️', '🌩️', '🌨️', '🌫️', '☔️', '🌬️', '💨', '🌪️', 'cute', 'kawaii', 'little', 'photo', 'movie', 'still']) - for index in noise_indices: - prompt_list[index] = random.choice(noise_chars) - return "".join(prompt_list) - -def send_it1(inputs, noise_level, proc1=proc1): - prompt_with_noise = noadd_random_noise(inputs, noise_level) - while queue.qsize() >= queue_threshold: - time.sleep(2) - queue.put(prompt_with_noise) - output1 = proc1(prompt_with_noise) - return output1 - -def send_it2(inputs, noise_level, proc1=proc1): - prompt_with_noise = add_random_noise(inputs, noise_level) - while queue.qsize() >= queue_threshold: - time.sleep(2) - queue.put(prompt_with_noise) - output2 = proc1(prompt_with_noise) - return output2 - -def send_itX(inputs, noise_level, proc1=proc1): - prompt_with_noise = add_random_noise(inputs, noise_level) - while queue.qsize() >= queue_threshold: - time.sleep(2) - queue.put(prompt_with_noise) - outputX = proc1(prompt_with_noise) - return outputX - -def send_it3(inputs, noise_level, proc1=proc1): - prompt_with_noise = add_random_noise(inputs, noise_level) - while queue.qsize() >= queue_threshold: - time.sleep(2) - queue.put(prompt_with_noise) - output3 = proc1(prompt_with_noise) - return output3 - -def send_it4(inputs, noise_level, proc1=proc1): - prompt_with_noise = add_random_noise(inputs, noise_level) - while queue.qsize() >= queue_threshold: - time.sleep(2) - queue.put(prompt_with_noise) - output4 = proc1(prompt_with_noise) - return output4 - -def send_it5(inputs, noise_level, proc1=proc1): - prompt_with_noise = add_random_noise(inputs, noise_level) - while queue.qsize() >= queue_threshold: - time.sleep(2) - queue.put(prompt_with_noise) - output5 = proc1(prompt_with_noise) - return output5 - -#def send_it7(inputs, noise_level, proc1=proc1): - #prompt_with_noise = add_random_noise(inputs, noise_level) - #while queue.qsize() >= queue_threshold: - # time.sleep(2) - #queue.put(prompt_with_noise) - #output5 = proc1(prompt_with_noise) - #return output0 - - -with gr.Blocks(css='style.css') as demo: - gr.HTML( - """ -
    -
    - - -

    PhotoMovieX

    -
    - -
    -

    - 🤗 Celebrating 36000 downloads at huggingface! 🤗

    -

    - Check MagicArt35's original model page at Civitai here!.

    -

    - If you have an idea, put it on the first box to expand it, if you have a full prompt, you can leave the first box empty and just put it on the second one and click generate images! - Noise Level: Controls how much randomness is added to the input of the boxes after the first one before it is sent to the model, so you can get 6 unique 768x768 images. Higher noise level produces more diverse outputs, while lower noise level produces similar outputs, - original space created by Phenomenon1981. -

    -

    - ❤️ Press the Like Button if you enjoy my space! ❤️ -

    -
    - """ - ) - with gr.Column(elem_id="col-container"): - with gr.Row(variant="compact"): - input_text = gr.Textbox( - label="Short Prompt", - show_label=False, - max_lines=2, - placeholder="Enter a basic idea and click 'Magic Prompt'. Got no ideas? No problem, Simply just hit the magic button!", - ).style( - container=False,min_width=1200 - ) - see_prompts = gr.Button("✨Magic✨ ✨Prompt✨").style(full_width=False) - - - with gr.Row(variant="compact"): - prompt = gr.Textbox( - label="Enter your prompt", - show_label=False, - max_lines=2, - placeholder="Full Prompt", - ).style( - container=False, - ) - run = gr.Button("Generate Images").style(full_width=False) - with gr.Row(): - with gr.Row(): - #Now that the first box generates a picture with noise=0 having the default at 0 makes no sense as it'd generate the same image 6 times. - noise_level = gr.Slider(minimum=0.2, maximum=3, step=0.1, label="Noise Level (0.1 or less was generating the same pic 6 times! 🤣)") - gr.HTML( - """ -
    -
    - -

    Please allow up to 1 minute for each image to generate, for a total of 6 minutes max.

    -
    - -
    -
    - """ - ) - with gr.Row(): - with gr.Row(): - output1=gr.Image(label="PhotoMovieX",show_label=False,min_width=640) - output2=gr.Image(label="PhotoMovieX",show_label=False,min_width=640) - with gr.Row(): - with gr.Row(): - output3=gr.Image(label="PhotoMovieX",show_label=False,min_width=640) - output4=gr.Image(label="PhotoMovieX",show_label=False,min_width=640) - with gr.Row(): - with gr.Row(): - output5=gr.Image(label="PhotoMovieX",show_label=False,min_width=640) - outputX=gr.Image(label="PhotoMovieX",show_label=False,min_width=640) - #with gr.Row(): - #with gr.Row(): - #output0=gr.Image(label="PhotoMovieX",show_label=False,min_width=640) - - see_prompts.click(get_prompts, inputs=[input_text], outputs=[prompt], queue=False) - run.click(send_it1, inputs=[prompt, noise_level], outputs=[output1]) - #run.click(send_it7, inputs=[prompt, noise_level], outputs=[output0]) - run.click(send_it2, inputs=[prompt, noise_level], outputs=[output2]) - run.click(send_it3, inputs=[prompt, noise_level], outputs=[output3]) - run.click(send_it4, inputs=[prompt, noise_level], outputs=[output4]) - run.click(send_it5, inputs=[prompt, noise_level], outputs=[output5]) - run.click(send_itX, inputs=[prompt, noise_level], outputs=[outputX]) - - - with gr.Row(): - gr.HTML( - """ - -
    -

    Unleash your creative side and generate mesmerizing images with just a few clicks! Enter a spark of inspiration in the "Basic Idea" text box and click the "Magic Prompt" button to elevate it to a polished masterpiece. Make any final tweaks in the "Full Prompt" box and hit the "Generate Images" button to watch your vision come to life. Experiment with the "Noise Level" for a diverse range of outputs, from similar to wildly unique. Let the fun begin! -

    -
    - """ -) - - demo.launch(enable_queue=True, inline=True) - block.queue(concurrency_count=100) diff --git a/spaces/Yuliang/ICON/lib/renderer/camera.py b/spaces/Yuliang/ICON/lib/renderer/camera.py deleted file mode 100644 index fde488826c8eba933a00485d39cdc21d4d2dae8f..0000000000000000000000000000000000000000 --- a/spaces/Yuliang/ICON/lib/renderer/camera.py +++ /dev/null @@ -1,226 +0,0 @@ - -# -*- coding: utf-8 -*- - -# Max-Planck-Gesellschaft zur Förderung der Wissenschaften e.V. (MPG) is -# holder of all proprietary rights on this computer program. -# You can only use this computer program if you have closed -# a license agreement with MPG or you get the right to use the computer -# program from someone who is authorized to grant you that right. -# Any use of the computer program without a valid license is prohibited and -# liable to prosecution. -# -# Copyright©2019 Max-Planck-Gesellschaft zur Förderung -# der Wissenschaften e.V. (MPG). acting on behalf of its Max Planck Institute -# for Intelligent Systems. All rights reserved. -# -# Contact: ps-license@tuebingen.mpg.de - -import cv2 -import numpy as np - -from .glm import ortho - - -class Camera: - def __init__(self, width=1600, height=1200): - # Focal Length - # equivalent 50mm - focal = np.sqrt(width * width + height * height) - self.focal_x = focal - self.focal_y = focal - # Principal Point Offset - self.principal_x = width / 2 - self.principal_y = height / 2 - # Axis Skew - self.skew = 0 - # Image Size - self.width = width - self.height = height - - self.near = 1 - self.far = 10 - - # Camera Center - self.center = np.array([0, 0, 1.6]) - self.direction = np.array([0, 0, -1]) - self.right = np.array([1, 0, 0]) - self.up = np.array([0, 1, 0]) - - self.ortho_ratio = None - - def sanity_check(self): - self.center = self.center.reshape([-1]) - self.direction = self.direction.reshape([-1]) - self.right = self.right.reshape([-1]) - self.up = self.up.reshape([-1]) - - assert len(self.center) == 3 - assert len(self.direction) == 3 - assert len(self.right) == 3 - assert len(self.up) == 3 - - @staticmethod - def normalize_vector(v): - v_norm = np.linalg.norm(v) - return v if v_norm == 0 else v / v_norm - - def get_real_z_value(self, z): - z_near = self.near - z_far = self.far - z_n = 2.0 * z - 1.0 - z_e = 2.0 * z_near * z_far / (z_far + z_near - z_n * (z_far - z_near)) - return z_e - - def get_rotation_matrix(self): - rot_mat = np.eye(3) - s = self.right - s = self.normalize_vector(s) - rot_mat[0, :] = s - u = self.up - u = self.normalize_vector(u) - rot_mat[1, :] = -u - rot_mat[2, :] = self.normalize_vector(self.direction) - - return rot_mat - - def get_translation_vector(self): - rot_mat = self.get_rotation_matrix() - trans = -np.dot(rot_mat, self.center) - return trans - - def get_intrinsic_matrix(self): - int_mat = np.eye(3) - - int_mat[0, 0] = self.focal_x - int_mat[1, 1] = self.focal_y - int_mat[0, 1] = self.skew - int_mat[0, 2] = self.principal_x - int_mat[1, 2] = self.principal_y - - return int_mat - - def get_projection_matrix(self): - ext_mat = self.get_extrinsic_matrix() - int_mat = self.get_intrinsic_matrix() - - return np.matmul(int_mat, ext_mat) - - def get_extrinsic_matrix(self): - rot_mat = self.get_rotation_matrix() - int_mat = self.get_intrinsic_matrix() - trans = self.get_translation_vector() - - extrinsic = np.eye(4) - extrinsic[:3, :3] = rot_mat - extrinsic[:3, 3] = trans - - return extrinsic[:3, :] - - def set_rotation_matrix(self, rot_mat): - self.direction = rot_mat[2, :] - self.up = -rot_mat[1, :] - self.right = rot_mat[0, :] - - def set_intrinsic_matrix(self, int_mat): - self.focal_x = int_mat[0, 0] - self.focal_y = int_mat[1, 1] - self.skew = int_mat[0, 1] - self.principal_x = int_mat[0, 2] - self.principal_y = int_mat[1, 2] - - def set_projection_matrix(self, proj_mat): - res = cv2.decomposeProjectionMatrix(proj_mat) - int_mat, rot_mat, camera_center_homo = res[0], res[1], res[2] - camera_center = camera_center_homo[0:3] / camera_center_homo[3] - camera_center = camera_center.reshape(-1) - int_mat = int_mat / int_mat[2][2] - - self.set_intrinsic_matrix(int_mat) - self.set_rotation_matrix(rot_mat) - self.center = camera_center - - self.sanity_check() - - def get_gl_matrix(self): - z_near = self.near - z_far = self.far - rot_mat = self.get_rotation_matrix() - int_mat = self.get_intrinsic_matrix() - trans = self.get_translation_vector() - - extrinsic = np.eye(4) - extrinsic[:3, :3] = rot_mat - extrinsic[:3, 3] = trans - axis_adj = np.eye(4) - axis_adj[2, 2] = -1 - axis_adj[1, 1] = -1 - model_view = np.matmul(axis_adj, extrinsic) - - projective = np.zeros([4, 4]) - projective[:2, :2] = int_mat[:2, :2] - projective[:2, 2:3] = -int_mat[:2, 2:3] - projective[3, 2] = -1 - projective[2, 2] = (z_near + z_far) - projective[2, 3] = (z_near * z_far) - - if self.ortho_ratio is None: - ndc = ortho(0, self.width, 0, self.height, z_near, z_far) - perspective = np.matmul(ndc, projective) - else: - perspective = ortho(-self.width * self.ortho_ratio / 2, - self.width * self.ortho_ratio / 2, - -self.height * self.ortho_ratio / 2, - self.height * self.ortho_ratio / 2, z_near, - z_far) - - return perspective, model_view - - -def KRT_from_P(proj_mat, normalize_K=True): - res = cv2.decomposeProjectionMatrix(proj_mat) - K, Rot, camera_center_homog = res[0], res[1], res[2] - camera_center = camera_center_homog[0:3] / camera_center_homog[3] - trans = -Rot.dot(camera_center) - if normalize_K: - K = K / K[2][2] - return K, Rot, trans - - -def MVP_from_P(proj_mat, width, height, near=0.1, far=10000): - ''' - Convert OpenCV camera calibration matrix to OpenGL projection and model view matrix - :param proj_mat: OpenCV camera projeciton matrix - :param width: Image width - :param height: Image height - :param near: Z near value - :param far: Z far value - :return: OpenGL projection matrix and model view matrix - ''' - res = cv2.decomposeProjectionMatrix(proj_mat) - K, Rot, camera_center_homog = res[0], res[1], res[2] - camera_center = camera_center_homog[0:3] / camera_center_homog[3] - trans = -Rot.dot(camera_center) - K = K / K[2][2] - - extrinsic = np.eye(4) - extrinsic[:3, :3] = Rot - extrinsic[:3, 3:4] = trans - axis_adj = np.eye(4) - axis_adj[2, 2] = -1 - axis_adj[1, 1] = -1 - model_view = np.matmul(axis_adj, extrinsic) - - zFar = far - zNear = near - projective = np.zeros([4, 4]) - projective[:2, :2] = K[:2, :2] - projective[:2, 2:3] = -K[:2, 2:3] - projective[3, 2] = -1 - projective[2, 2] = (zNear + zFar) - projective[2, 3] = (zNear * zFar) - - ndc = ortho(0, width, 0, height, zNear, zFar) - - perspective = np.matmul(ndc, projective) - - return perspective, model_view diff --git a/spaces/Yunshansongbai/SVC-Nahida/modules/attentions.py b/spaces/Yunshansongbai/SVC-Nahida/modules/attentions.py deleted file mode 100644 index 8ca80c90f2d2db2a52eaac8b88881a84193302a5..0000000000000000000000000000000000000000 --- a/spaces/Yunshansongbai/SVC-Nahida/modules/attentions.py +++ /dev/null @@ -1,377 +0,0 @@ -import copy -import math -import numpy as np -import paddle -from paddle import nn -from paddle.nn import functional as F - -import modules.commons as commons -import modules.modules as modules -from modules.modules import LayerNorm - - -class FFT(nn.Layer): - def __init__(self, hidden_channels, filter_channels, n_heads, n_layers=1, kernel_size=1, p_dropout=0., - proximal_bias=False, proximal_init=True, **kwargs): - super().__init__() - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.proximal_bias = proximal_bias - self.proximal_init = proximal_init - - self.drop = nn.Dropout(p_dropout) - self.self_attn_layers = nn.LayerList() - self.norm_layers_0 = nn.LayerList() - self.ffn_layers = nn.LayerList() - self.norm_layers_1 = nn.LayerList() - for i in range(self.n_layers): - self.self_attn_layers.append( - MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout, proximal_bias=proximal_bias, - proximal_init=proximal_init)) - self.norm_layers_0.append(LayerNorm(hidden_channels)) - self.ffn_layers.append( - FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout, causal=True)) - self.norm_layers_1.append(LayerNorm(hidden_channels)) - - def forward(self, x, x_mask): - """ - x: decoder input - h: encoder output - """ - self_attn_mask = commons.subsequent_mask(x_mask.shape[2]).astype(dtype=x.dtype) - x = x * x_mask - for i in range(self.n_layers): - y = self.self_attn_layers[i](x, x, self_attn_mask) - y = self.drop(y) - x = self.norm_layers_0[i](x + y) - - y = self.ffn_layers[i](x, x_mask) - y = self.drop(y) - x = self.norm_layers_1[i](x + y) - x = x * x_mask - return x - - -class Encoder(nn.Layer): - def __init__(self, hidden_channels, filter_channels, n_heads, n_layers, kernel_size=1, p_dropout=0., window_size=4, **kwargs): - super().__init__() - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.window_size = window_size - - self.drop = nn.Dropout(p_dropout) - self.attn_layers = nn.LayerList() - self.norm_layers_1 = nn.LayerList() - self.ffn_layers = nn.LayerList() - self.norm_layers_2 = nn.LayerList() - for i in range(self.n_layers): - self.attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout, window_size=window_size)) - self.norm_layers_1.append(LayerNorm(hidden_channels)) - self.ffn_layers.append(FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout)) - self.norm_layers_2.append(LayerNorm(hidden_channels)) - - def forward(self, x, x_mask): - attn_mask = x_mask.unsqueeze(2) * x_mask.unsqueeze(-1) - x = x * x_mask - for i in range(self.n_layers): - y = self.attn_layers[i](x, x, attn_mask) - y = self.drop(y) - x = self.norm_layers_1[i](x + y) - y = self.ffn_layers[i](x, x_mask) - y = self.drop(y) - x = self.norm_layers_2[i](x + y) - x = x * x_mask - return x - - -class Decoder(nn.Layer): - def __init__(self, hidden_channels, filter_channels, n_heads, n_layers, kernel_size=1, p_dropout=0., proximal_bias=False, proximal_init=True, **kwargs): - super().__init__() - self.hidden_channels = hidden_channels - self.filter_channels = filter_channels - self.n_heads = n_heads - self.n_layers = n_layers - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.proximal_bias = proximal_bias - self.proximal_init = proximal_init - - self.drop = nn.Dropout(p_dropout) - self.self_attn_layers = nn.LayerList() - self.norm_layers_0 = nn.LayerList() - self.encdec_attn_layers = nn.LayerList() - self.norm_layers_1 = nn.LayerList() - self.ffn_layers = nn.LayerList() - self.norm_layers_2 = nn.LayerList() - for i in range(self.n_layers): - self.self_attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout, proximal_bias=proximal_bias, proximal_init=proximal_init)) - self.norm_layers_0.append(LayerNorm(hidden_channels)) - self.encdec_attn_layers.append(MultiHeadAttention(hidden_channels, hidden_channels, n_heads, p_dropout=p_dropout)) - self.norm_layers_1.append(LayerNorm(hidden_channels)) - self.ffn_layers.append(FFN(hidden_channels, hidden_channels, filter_channels, kernel_size, p_dropout=p_dropout, causal=True)) - self.norm_layers_2.append(LayerNorm(hidden_channels)) - - def forward(self, x, x_mask, h, h_mask): - """ - x: decoder input - h: encoder output - """ - self_attn_mask = commons.subsequent_mask(x_mask.size(2)).astype(dtype=x.dtype) - encdec_attn_mask = h_mask.unsqueeze(2) * x_mask.unsqueeze(-1) - x = x * x_mask - for i in range(self.n_layers): - y = self.self_attn_layers[i](x, x, self_attn_mask) - y = self.drop(y) - x = self.norm_layers_0[i](x + y) - - y = self.encdec_attn_layers[i](x, h, encdec_attn_mask) - y = self.drop(y) - x = self.norm_layers_1[i](x + y) - - y = self.ffn_layers[i](x, x_mask) - y = self.drop(y) - x = self.norm_layers_2[i](x + y) - x = x * x_mask - return x - - -class MultiHeadAttention(nn.Layer): - def __init__(self, channels, out_channels, n_heads, p_dropout=0., window_size=None, heads_share=True, block_length=None, proximal_bias=False, proximal_init=False): - super().__init__() - assert channels % n_heads == 0 - - self.channels = channels - self.out_channels = out_channels - self.n_heads = n_heads - self.p_dropout = p_dropout - self.window_size = window_size - self.heads_share = heads_share - self.block_length = block_length - self.proximal_bias = proximal_bias - self.proximal_init = proximal_init - self.attn = None - - self.k_channels = channels // n_heads - - self.conv_q = nn.Conv1D(channels, channels, 1,)# weight_attr=attr) - self.conv_k = nn.Conv1D(channels, channels, 1,)# weight_attr=attr) - self.conv_v = nn.Conv1D(channels, channels, 1,)# weight_attr=attr) - self.conv_o = nn.Conv1D(channels, out_channels, 1) - self.drop = nn.Dropout(p_dropout) - - if window_size is not None: - n_heads_rel = 1 if heads_share else n_heads - rel_stddev = self.k_channels**-0.5 - - rand = paddle.randn((n_heads_rel, window_size * 2 + 1, self.k_channels)) * rel_stddev - - self.emb_rel_k = paddle.create_parameter(rand.shape,'float32',None) - self.emb_rel_v = paddle.create_parameter(rand.shape,'float32',None) - - #nn.init.xavier_uniform_(self.conv_q.weight) - #nn.init.xavier_uniform_(self.conv_k.weight) - #nn.init.xavier_uniform_(self.conv_v.weight) - if proximal_init: - with paddle.no_grad(): - self.conv_k.weight = (self.conv_q.weight) - self.conv_k.bias = (self.conv_q.bias) - - def forward(self, x, c, attn_mask=None): - #print(x) - #print(self.conv_q.weight) - q = self.conv_q(x) - k = self.conv_k(c) - v = self.conv_v(c) - - x, self.attn = self.attention(q, k, v, mask=attn_mask) - - x = self.conv_o(x) - return x - - @staticmethod - def _masked_fill(x, mask, value:float): - y = paddle.full(x.shape, value, x.dtype) - return paddle.where(mask, y, x) - - def attention(self, query, key, value, mask=None): - # reshape [b, d, t] -> [b, n_h, t, d_k] - b, d, t_s, t_t = (*key.shape, query.shape[2]) - query = query.reshape((b, self.n_heads, self.k_channels, t_t)).transpose([0,1,3,2]) - key = key.reshape((b, self.n_heads, self.k_channels, t_s)).transpose([0,1,3,2]) - value = value.reshape((b, self.n_heads, self.k_channels, t_s)).transpose([0,1,3,2]) - - scores = paddle.matmul(query / math.sqrt(self.k_channels), key.transpose([0,1,3,2])) # 0 1 2 3 -4 -3 -2 -1 - if self.window_size is not None: - assert t_s == t_t, "Relative attention is only available for self-attention." - - key_relative_embeddings = self._get_relative_embeddings(self.emb_rel_k, t_s) - rel_logits = self._matmul_with_relative_keys(query /math.sqrt(self.k_channels), key_relative_embeddings) - scores_local = self._relative_position_to_absolute_position(rel_logits) - scores = scores + scores_local - if self.proximal_bias: - assert t_s == t_t, "Proximal bias is only available for self-attention." - scores = scores + self._attention_bias_proximal(t_s).astype(dtype=scores.dtype) - if mask is not None: - scores = self._masked_fill(scores, mask == 0, -1e4) - if self.block_length is not None: - assert t_s == t_t, "Local attention is only available for self-attention." - block_mask = paddle.tril(paddle.triu(paddle.ones_like(scores), -self.block_length),self.block_length) - scores = self._masked_fill(scores, block_mask == 0, -1e4) - p_attn = F.softmax(scores, axis=-1) # [b, n_h, t_t, t_s] - p_attn = self.drop(p_attn) - output = paddle.matmul(p_attn, value) - if self.window_size is not None: - relative_weights = self._absolute_position_to_relative_position(p_attn) - value_relative_embeddings = self._get_relative_embeddings(self.emb_rel_v, t_s) - output = output + self._matmul_with_relative_values(relative_weights, value_relative_embeddings) - output = output.transpose([0,1,3,2]).reshape((b, d, t_t)) # [b, n_h, t_t, d_k] -> [b, d, t_t] - return output, p_attn - - def _matmul_with_relative_values(self, x, y): - """ - x: [b, h, l, m] - y: [h or 1, m, d] - ret: [b, h, l, d] - """ - ret = paddle.matmul(x, y.unsqueeze(0)) - return ret - - def _matmul_with_relative_keys(self, x, y): - """ - x: [b, h, l, d] - y: [h or 1, m, d] - ret: [b, h, l, m] - """ - ret = paddle.matmul(x, y.unsqueeze(0).transpose([0,1,3,2])) - return ret - - def _get_relative_embeddings(self, relative_embeddings, length): - max_relative_position = 2 * self.window_size + 1 - # Pad first before slice to avoid using cond ops. - pad_length = max(length - (self.window_size + 1), 0) - slice_start_position = max((self.window_size + 1) - length, 0) - slice_end_position = slice_start_position + 2 * length - 1 - if pad_length > 0: - padding = commons.convert_pad_shape([[0, 0], [pad_length, pad_length], [0, 0]]) - - padded_relative_embeddings = F.pad( - x = relative_embeddings.unsqueeze(0), - pad = padding[0:4]).squeeze(0) - - else: - padded_relative_embeddings = relative_embeddings - used_relative_embeddings = padded_relative_embeddings[:,slice_start_position:slice_end_position] - return used_relative_embeddings - - def _relative_position_to_absolute_position(self, x): - """ - x: [b, h, l, 2*l-1] - ret: [b, h, l, l] - """ - batch, heads, length, _ = x.shape - # Concat columns of pad to shift from relative to absolute indexing. - pad_shape = commons.convert_pad_shape([[0,0],[0,0],[0,0],[0,1]]) - pad_shape = commons.fix_pad_shape(pad_shape, x) - x = F.pad(x, pad_shape) - # Concat extra elements so to add up to shape (len+1, 2*len-1). - x_flat = x.reshape([batch, heads, length * 2 * length]) - pad_shape = commons.convert_pad_shape([[0,0],[0,0],[0,length-1]]) - pad_shape = commons.fix_pad_shape(pad_shape,x_flat) - x_flat = F.pad(x_flat, pad_shape, data_format='NCL') - # Reshape and slice out the padded elements. - x_final = x_flat.reshape([batch, heads, length+1, 2*length-1])[:, :, :length, length-1:] - return x_final - - def _absolute_position_to_relative_position(self, x): - """ - x: [b, h, l, l] - ret: [b, h, l, 2*l-1] - """ - batch, heads, length, _ = x.shape - # padd along column - pad_shape = commons.convert_pad_shape([[0, 0], [0, 0], [0, 0], [0, length-1]]) - pad_shape = commons.fix_pad_shape(pad_shape, x) - x = F.pad(x, pad_shape) - x_flat = x.reshape([batch, heads, length**2 + length*(length -1)]) - # add 0's in the beginning that will skew the elements after reshape - pad_shape = commons.convert_pad_shape([[0, 0], [0, 0], [length, 0]]) - pad_shape = commons.fix_pad_shape(pad_shape, x_flat) - x_flat = F.pad(x_flat, pad_shape, data_format='NCL') - x_final = x_flat.reshape([batch, heads, length, 2*length])[:,:,:,1:] - return x_final - - def _attention_bias_proximal(self, length): - """Bias for self-attention to encourage attention to close positions. - Args: - length: an integer scalar. - Returns: - a Tensor with shape [1, 1, length, length] - """ - r = paddle.arange(length, dtype=np.float32) - diff = paddle.unsqueeze(r, 0) - paddle.unsqueeze(r, 1) - return paddle.unsqueeze(paddle.unsqueeze(-paddle.log1p(paddle.abs(diff)), 0), 0) - - -class FFN(nn.Layer): - def __init__(self, in_channels, out_channels, filter_channels, kernel_size, p_dropout=0., activation=None, causal=False): - super().__init__() - self.in_channels = in_channels - self.out_channels = out_channels - self.filter_channels = filter_channels - self.kernel_size = kernel_size - self.p_dropout = p_dropout - self.activation = activation - self.causal = causal - - if causal: - self.padding = self._causal_padding - else: - self.padding = self._same_padding - - self.conv_1 = nn.Conv1D(in_channels, filter_channels, kernel_size) - self.conv_2 = nn.Conv1D(filter_channels, out_channels, kernel_size) - self.drop = nn.Dropout(p_dropout) - - def forward(self, x, x_mask): - x = x * x_mask - x = self.padding(x) - x = self.conv_1(x) - if self.activation == "gelu": - x = x * F.sigmoid(1.702 * x) - else: - x = F.relu(x) - x = self.drop(x) - x = x * x_mask - x = self.padding(x) - x = self.conv_2(x) - return x * x_mask - - def _causal_padding(self, x): - if self.kernel_size == 1: - return x - pad_l = self.kernel_size - 1 - pad_r = 0 - padding = [[0, 0], [0, 0], [pad_l, pad_r]] - pad_shape:list = commons.convert_pad_shape(padding) - pad_shape = commons.fix_pad_shape(pad_shape, x) - x = F.pad(x, pad_shape,data_format='NCL') - return x - - def _same_padding(self, x): - if self.kernel_size == 1: - return x - pad_l = (self.kernel_size - 1) // 2 - pad_r = self.kernel_size // 2 - padding = [[0, 0], [0, 0], [pad_l, pad_r]] - pad_shape = commons.convert_pad_shape(padding) - pad_shape = commons.fix_pad_shape(pad_shape, x) - x = F.pad(x, pad_shape, data_format='NCL') - return x diff --git a/spaces/ZJunTvT/ZJunChat/readme/README_en.md b/spaces/ZJunTvT/ZJunChat/readme/README_en.md deleted file mode 100644 index a906ecb3ebc411f5cdeb33d661266a489a20c3b0..0000000000000000000000000000000000000000 --- a/spaces/ZJunTvT/ZJunChat/readme/README_en.md +++ /dev/null @@ -1,127 +0,0 @@ -
    - - 简体中文 | English | 日本語 -
    - -

    川虎 Chat 🐯 Chuanhu Chat

    -
    - - Logo - - -

    -

    Lightweight and User-friendly Web-UI for LLMs including ChatGPT/ChatGLM/LLaMA

    -

    - - Tests Passing - - - GitHub Contributors - - - GitHub pull requests - -

    - Streaming / Unlimited conversations / Save history / Preset prompts / Chat with files / Web search
    - LaTeX rendering / Table rendering / Code highlighting
    - Auto dark mode / Adaptive web interface / WeChat-like theme
    - Multi-parameters tuning / Multi-API-Key support / Multi-user support
    - Compatible with GPT-4 / Local deployment for LLMs -

    - Video Tutorial - · - 2.0 Introduction - · - 3.0 Introduction & Tutorial - || - Online trial - · - One-Click deployment -

    -

    - Animation Demo -

    -

    -
    - -## Usage Tips - -- To better control the ChatGPT, use System Prompt. -- To use a Prompt Template, select the Prompt Template Collection file first, and then choose certain prompt from the drop-down menu. -- To try again if the response is unsatisfactory, use `🔄 Regenerate` button. -- To start a new line in the input box, press Shift + Enter keys. -- To quickly switch between input history, press and key in the input box. -- To deploy the program onto a server, change the last line of the program to `demo.launch(server_name="0.0.0.0", server_port=)`. -- To get a public shared link, change the last line of the program to `demo.launch(share=True)`. Please be noted that the program must be running in order to be accessed via a public link. -- To use it in Hugging Face Spaces: It is recommended to **Duplicate Space** and run the program in your own Space for a faster and more secure experience. - -## Installation - -```shell -git clone https://github.com/GaiZhenbiao/ChuanhuChatGPT.git -cd ChuanhuChatGPT -pip install -r requirements.txt -``` - -Then make a copy of `config_example.json`, rename it to `config.json`, and then fill in your API-Key and other settings in the file. - -```shell -python ChuanhuChatbot.py -``` - -A browser window will open and you will be able to chat with ChatGPT. - -> **Note** -> -> Please check our [wiki page](https://github.com/GaiZhenbiao/ChuanhuChatGPT/wiki/使用教程) for detailed instructions. - -## Troubleshooting - -When you encounter problems, you should try manually pulling the latest changes of this project first. The steps are as follows: - -1. Download the latest code archive by clicking on `Download ZIP` on the webpage, or - ```shell - git pull https://github.com/GaiZhenbiao/ChuanhuChatGPT.git main -f - ``` -2. Try installing the dependencies again (as this project may have introduced new dependencies) - ``` - pip install -r requirements.txt - ``` -3. Update Gradio - ``` - pip install gradio --upgrade --force-reinstall - ``` - -Generally, you can solve most problems by following these steps. - -If the problem still exists, please refer to this page: [Frequently Asked Questions (FAQ)](https://github.com/GaiZhenbiao/ChuanhuChatGPT/wiki/常见问题) - -This page lists almost all the possible problems and solutions. Please read it carefully. - -## More Information - -More information could be found in our [wiki](https://github.com/GaiZhenbiao/ChuanhuChatGPT/wiki): - -- [How to contribute a translation](https://github.com/GaiZhenbiao/ChuanhuChatGPT/wiki/Localization) -- [How to make a contribution](https://github.com/GaiZhenbiao/ChuanhuChatGPT/wiki/贡献指南) -- [How to cite the project](https://github.com/GaiZhenbiao/ChuanhuChatGPT/wiki/使用许可#如何引用该项目) -- [Project changelog](https://github.com/GaiZhenbiao/ChuanhuChatGPT/wiki/更新日志) -- [Project license](https://github.com/GaiZhenbiao/ChuanhuChatGPT/wiki/使用许可) - -## Starchart - -[![Star History Chart](https://api.star-history.com/svg?repos=GaiZhenbiao/ChuanhuChatGPT&type=Date)](https://star-history.com/#GaiZhenbiao/ChuanhuChatGPT&Date) - -## Contributors - - - - - -## Sponsor - -🐯 If you find this project helpful, feel free to buy me a coke or a cup of coffee~ - -Buy Me A Coffee - -image diff --git "a/spaces/a-v-bely/russian-task-generator/pages/1_\342\232\231\357\270\217_\320\230\320\275\321\201\321\202\321\200\321\203\320\272\321\206\320\270\321\217.py" "b/spaces/a-v-bely/russian-task-generator/pages/1_\342\232\231\357\270\217_\320\230\320\275\321\201\321\202\321\200\321\203\320\272\321\206\320\270\321\217.py" deleted file mode 100644 index de520a139af964d97d40cc04d14ddcbb6aa9782b..0000000000000000000000000000000000000000 --- "a/spaces/a-v-bely/russian-task-generator/pages/1_\342\232\231\357\270\217_\320\230\320\275\321\201\321\202\321\200\321\203\320\272\321\206\320\270\321\217.py" +++ /dev/null @@ -1,78 +0,0 @@ -import streamlit as st - - -st.set_page_config(page_title='GenLexTasks', layout="wide", page_icon=':ru:') -if st.session_state.get('-LOGGED_IN_BOOL-'): - ANNOUNCES = st.expander('**ВАЖНАЯ ИНФОРМАЦИЯ**', expanded=True) - ANNOUNCES.success( - '**Уважаемые пользователи, пожалуйста, после генерации заданий перейдите на вкладку "📝Онлайн-тест" ' - 'и заполните там опросник. Таким образом Вы очень поможете в улучшении качества заданий! Спасибо!🤗**') - ANNOUNCES.warning( - '**Сейчас генератор проходит завершающую настройку и отладку, для которой необходимо большое количество ' - 'данных об уместности выбранных целевых слов и дистракторов к ним. Поэтому просим Вас отнестись с пониманием ' - 'к излишне большому количеству заданий.**') - ANNOUNCES.warning( - '**В настоящее время генератор может работать только с отдельными словами (от пробела до пробела).' - 'Использование словосочетаний в качестве целевых слов или дистракторов будет добавлено в ближайшее время.**') - ANNOUNCES.warning( - '**❗️ㅤУбедительно просим Вас дожидаться окончания генерации или загрузки и не переходить на ' - 'другие вкладки до выведения соответствующего сообщения.**') - ANNOUNCES.warning( - '**❗ㅤВ случае появления красных сообщений об ошибке, как правило, проблема решается ' - 'повторными нажатиями на нужный Вам элемент. Приносим извинения за неудобства.**') - - INSTRUCTION = st.expander(label='**ИНСТРУКЦИЯ**', expanded=True) - INSTRUCTION.markdown( - '**_I. Выберите режим работы._**' - '\n\n**_:red[СОЗДАНИЕ ЗАДАНИЙ]_**' - '\n\nПосле выбора данного режима работы появится форма, которую необходимо заполнить:' - '\n\n1. Придумайте **название** для файла с заданиями. ' - 'Вы можете оставить это поле пустым - именем по умолчанию служит текущая дата и первые 20 символов ' - 'введенного Вами текста.' - '\n\n2. Введите **текст** или выберите **текстовый файл** с исходным текстом, на основе которого Вы хотите ' - 'создать задания. ' - '\n\n3. Укажите *способ выбора целевых слов*:' - '\n\t* *:green[Автоматически]*: программа сама выберет подходящие по сложности целевые слова.' - '\n\t* *:blue[Самостоятельно]*: введите в соответствующее поле целевые слова через запятую в той форме, ' - 'в которой они встречаются в тексте. В этом случае *:orange[языковой уровень]* можно не указывать, но тогда ' - 'дистракторы будут полностью случайными и несоотнесёнными с уровнем.' - '\n4. Если Вы выбрали *:green[автоматический поиск целевых слов]*, **_:red[обязательно]_** укажите ' - '*:orange[языковой уровень]*. Данный параметр отвечает за выбор лексического минимума, использующегося при ' - 'подборе дистракторов.' - '\n5. Если Вы выбрали *:blue[самостоятельный ввод целевых слов]*, проверьте, что заполнили соответствующее ' - 'поле. ️ \n❗ **:red[Введите слова в той форме, в которой они встречаются в тексте]**.' - '\n\n6. Укажите число дистракторов - неправильных вариантов ответа. Если указано _более четырех_ ' - 'дистракторов, возможно, что в некоторых заданиях будет выведено _меньшее количество, но не менее четырех_ ' - 'вариантов. Данное обстоятельство связано с проверкой наличия дистракторов в лексических минимумах.' - '\n7. Выберите **способы вывода** готовых материалов.' - '\n8. Для начала работы нажмите на кнопку **"Запуск"**. Если все поля заполнены верно, ' - 'начнется процесс генерации заданий. Прогресс будет отображаться на экране.' - '\n9. По окончании процесса генерации заданий будет выведено **_:green[соответсвующее сообщение]_**. ' - 'Затем Вы можете перейти на вкладки **просмотра и 📥 сохранения** заданий, а так же 📝**онлайн-теста**.' - '\n\n**_:red[ЗАГРУЗКА ИЗ АРХИВА]_**' - '\n\nПосле выбора данного режима работы появится таблица, в которой перечислены названия заданий, ' - 'которые Вы сохранили, языковой уровень и дата их создания.' - ' Для загрузки определенного файла с заданиями:' - '\n1. Введите (или скопируйте из таблицы) название.' - '\n2. Укажите соответсвующий языковой уровень.' - '\n3. Нажмите на кнопку **"Загрузить"**.' - '\n4. Если все поля заполнены верно, Вы увидите сообщение о том, что **:green[задания успешно загружены]**.' - '\n\n\nДля того, чтобы свернуть/развернуть блоки **Инструкций** или **Важной информации**, ' - 'кликните по заголовку этого блока или по стрелке (ᐯ / ᐱ), располагающейся в его правом верхнем углу.') - - INSTRUCTION_ONLINE_TEST = st.expander(label='**ИНСТРУКЦИЯ ОНЛАЙН-ТЕСТ**', expanded=True) - INSTRUCTION_ONLINE_TEST.markdown( - 'Уважаемые пользователи, предлагаем Вам заполнить опросник по оценке качества созданных заданий. ' - '\n\nНиже находится анкета с заданиями в таблице.' - '\n\n- В **первом столбце** приводится ответ - слово, удаленное из оригинального текста.' - '\n\n- Отметьте во **втором столбце**, уместно ли создавать задание с данным словом.' - '\n\n- В **третьем столбце** приведены подобранные программой дистракторы.' - '\n\n- Введите в **четвертый столбец** дистракторы (целиком или букву), которые, по Вашему мнению, ' - '**:red[не уместны]**. ' - '\n\n**:green[Уместными дистракторами]** мы предлагаем считать те, которые одновременно удовлетворяют ' - 'следующим условиям в рамках языкового уровня, для которого они созданы:' - '\n\n1. не слишком очевидно являются неправильными вариантами (*варить суп/стол*);' - '\n\n2. и при этом не могут быть полноценной заменой удаленного слова (*варить суп/кашу*)' - ) -else: - st.warning('**Войдите или зарегистрируйтесь**') diff --git a/spaces/abdvl/datahub_qa_bot/docs/deploy/telemetry.md b/spaces/abdvl/datahub_qa_bot/docs/deploy/telemetry.md deleted file mode 100644 index c5458cc5df05e093e433bff4738cd552b6ec9177..0000000000000000000000000000000000000000 --- a/spaces/abdvl/datahub_qa_bot/docs/deploy/telemetry.md +++ /dev/null @@ -1,10 +0,0 @@ -# DataHub Telemetry - -## Overview of DataHub Telemetry - -To effectively build and maintain the DataHub Project, we must understand how end-users work within DataHub. Beginning in version 0.8.35, DataHub collects anonymous usage statistics and errors to inform our roadmap priorities and to enable us to proactively address errors. - -Deployments are assigned a UUID which is sent along with event details, Java version, OS, and timestamp; telemetry collection is enabled by default and can be disabled by setting `DATAHUB_TELEMETRY_ENABLED=false` in your Docker Compose config. - - -The source code is available [here.](../../metadata-service/factories/src/main/java/com/linkedin/gms/factory/telemetry/TelemetryUtils.java) \ No newline at end of file diff --git a/spaces/abhishek/sketch-to-image/annotator/uniformer_base/mmseg/models/decode_heads/gc_head.py b/spaces/abhishek/sketch-to-image/annotator/uniformer_base/mmseg/models/decode_heads/gc_head.py deleted file mode 100644 index cff93b6a9fd6f0a4a2f6833cd464efbe29559728..0000000000000000000000000000000000000000 --- a/spaces/abhishek/sketch-to-image/annotator/uniformer_base/mmseg/models/decode_heads/gc_head.py +++ /dev/null @@ -1,59 +0,0 @@ -''' - * Copyright (c) 2023 Salesforce, Inc. - * All rights reserved. - * SPDX-License-Identifier: Apache License 2.0 - * For full license text, see LICENSE.txt file in the repo root or http://www.apache.org/licenses/ - * By Can Qin - * Modified from ControlNet repo: https://github.com/lllyasviel/ControlNet - * Copyright (c) 2023 Lvmin Zhang and Maneesh Agrawala - * Modified from MMCV repo: From https://github.com/open-mmlab/mmcv - * Copyright (c) OpenMMLab. All rights reserved. -''' - -import torch -from annotator.uniformer.mmcv.cnn import ContextBlock - -from ..builder import HEADS -from .fcn_head import FCNHead - - -@HEADS.register_module() -class GCHead(FCNHead): - """GCNet: Non-local Networks Meet Squeeze-Excitation Networks and Beyond. - - This head is the implementation of `GCNet - `_. - - Args: - ratio (float): Multiplier of channels ratio. Default: 1/4. - pooling_type (str): The pooling type of context aggregation. - Options are 'att', 'avg'. Default: 'avg'. - fusion_types (tuple[str]): The fusion type for feature fusion. - Options are 'channel_add', 'channel_mul'. Default: ('channel_add',) - """ - - def __init__(self, - ratio=1 / 4., - pooling_type='att', - fusion_types=('channel_add', ), - **kwargs): - super(GCHead, self).__init__(num_convs=2, **kwargs) - self.ratio = ratio - self.pooling_type = pooling_type - self.fusion_types = fusion_types - self.gc_block = ContextBlock( - in_channels=self.channels, - ratio=self.ratio, - pooling_type=self.pooling_type, - fusion_types=self.fusion_types) - - def forward(self, inputs): - """Forward function.""" - x = self._transform_inputs(inputs) - output = self.convs[0](x) - output = self.gc_block(output) - output = self.convs[1](output) - if self.concat_input: - output = self.conv_cat(torch.cat([x, output], dim=1)) - output = self.cls_seg(output) - return output diff --git a/spaces/abidlabs/The-Acquisition-Post-Generator/README.md b/spaces/abidlabs/The-Acquisition-Post-Generator/README.md deleted file mode 100644 index 0822468a6c482785cf1e27eabe0e86c9a94ea6d6..0000000000000000000000000000000000000000 --- a/spaces/abidlabs/The-Acquisition-Post-Generator/README.md +++ /dev/null @@ -1,37 +0,0 @@ ---- -title: The Acquisition Post Generator -emoji: 🚀 -colorFrom: gray -colorTo: red -sdk: gradio -app_file: app.py -pinned: false ---- - -# Configuration - -`title`: _string_ -Display title for the Space - -`emoji`: _string_ -Space emoji (emoji-only character allowed) - -`colorFrom`: _string_ -Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray) - -`colorTo`: _string_ -Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray) - -`sdk`: _string_ -Can be either `gradio` or `streamlit` - -`sdk_version` : _string_ -Only applicable for `streamlit` SDK. -See [doc](https://hf.co/docs/hub/spaces) for more info on supported versions. - -`app_file`: _string_ -Path to your main application file (which contains either `gradio` or `streamlit` Python code). -Path is relative to the root of the repository. - -`pinned`: _boolean_ -Whether the Space stays on top of your list. diff --git a/spaces/abrar-lohia/text-2-character-anim/VQTrans/utils/eval_trans.py b/spaces/abrar-lohia/text-2-character-anim/VQTrans/utils/eval_trans.py deleted file mode 100644 index 8778bb8cb7e7a320e5f7f2f3b43c7ba0b4c285ab..0000000000000000000000000000000000000000 --- a/spaces/abrar-lohia/text-2-character-anim/VQTrans/utils/eval_trans.py +++ /dev/null @@ -1,580 +0,0 @@ -import os - -import clip -import numpy as np -import torch -from scipy import linalg - -import visualization.plot_3d_global as plot_3d -from utils.motion_process import recover_from_ric - - -def tensorborad_add_video_xyz(writer, xyz, nb_iter, tag, nb_vis=4, title_batch=None, outname=None): - xyz = xyz[:1] - bs, seq = xyz.shape[:2] - xyz = xyz.reshape(bs, seq, -1, 3) - plot_xyz = plot_3d.draw_to_batch(xyz.cpu().numpy(),title_batch, outname) - plot_xyz =np.transpose(plot_xyz, (0, 1, 4, 2, 3)) - writer.add_video(tag, plot_xyz, nb_iter, fps = 20) - -@torch.no_grad() -def evaluation_vqvae(out_dir, val_loader, net, logger, writer, nb_iter, best_fid, best_iter, best_div, best_top1, best_top2, best_top3, best_matching, eval_wrapper, draw = True, save = True, savegif=False, savenpy=False) : - net.eval() - nb_sample = 0 - - draw_org = [] - draw_pred = [] - draw_text = [] - - - motion_annotation_list = [] - motion_pred_list = [] - - R_precision_real = 0 - R_precision = 0 - - nb_sample = 0 - matching_score_real = 0 - matching_score_pred = 0 - for batch in val_loader: - word_embeddings, pos_one_hots, caption, sent_len, motion, m_length, token, name = batch - - motion = motion.cuda() - et, em = eval_wrapper.get_co_embeddings(word_embeddings, pos_one_hots, sent_len, motion, m_length) - bs, seq = motion.shape[0], motion.shape[1] - - num_joints = 21 if motion.shape[-1] == 251 else 22 - - pred_pose_eval = torch.zeros((bs, seq, motion.shape[-1])).cuda() - - for i in range(bs): - pose = val_loader.dataset.inv_transform(motion[i:i+1, :m_length[i], :].detach().cpu().numpy()) - pose_xyz = recover_from_ric(torch.from_numpy(pose).float().cuda(), num_joints) - - - pred_pose, loss_commit, perplexity = net(motion[i:i+1, :m_length[i]]) - pred_denorm = val_loader.dataset.inv_transform(pred_pose.detach().cpu().numpy()) - pred_xyz = recover_from_ric(torch.from_numpy(pred_denorm).float().cuda(), num_joints) - - if savenpy: - np.save(os.path.join(out_dir, name[i]+'_gt.npy'), pose_xyz[:, :m_length[i]].cpu().numpy()) - np.save(os.path.join(out_dir, name[i]+'_pred.npy'), pred_xyz.detach().cpu().numpy()) - - pred_pose_eval[i:i+1,:m_length[i],:] = pred_pose - - if i < min(4, bs): - draw_org.append(pose_xyz) - draw_pred.append(pred_xyz) - draw_text.append(caption[i]) - - et_pred, em_pred = eval_wrapper.get_co_embeddings(word_embeddings, pos_one_hots, sent_len, pred_pose_eval, m_length) - - motion_pred_list.append(em_pred) - motion_annotation_list.append(em) - - temp_R, temp_match = calculate_R_precision(et.cpu().numpy(), em.cpu().numpy(), top_k=3, sum_all=True) - R_precision_real += temp_R - matching_score_real += temp_match - temp_R, temp_match = calculate_R_precision(et_pred.cpu().numpy(), em_pred.cpu().numpy(), top_k=3, sum_all=True) - R_precision += temp_R - matching_score_pred += temp_match - - nb_sample += bs - - motion_annotation_np = torch.cat(motion_annotation_list, dim=0).cpu().numpy() - motion_pred_np = torch.cat(motion_pred_list, dim=0).cpu().numpy() - gt_mu, gt_cov = calculate_activation_statistics(motion_annotation_np) - mu, cov= calculate_activation_statistics(motion_pred_np) - - diversity_real = calculate_diversity(motion_annotation_np, 300 if nb_sample > 300 else 100) - diversity = calculate_diversity(motion_pred_np, 300 if nb_sample > 300 else 100) - - R_precision_real = R_precision_real / nb_sample - R_precision = R_precision / nb_sample - - matching_score_real = matching_score_real / nb_sample - matching_score_pred = matching_score_pred / nb_sample - - fid = calculate_frechet_distance(gt_mu, gt_cov, mu, cov) - - msg = f"--> \t Eva. Iter {nb_iter} :, FID. {fid:.4f}, Diversity Real. {diversity_real:.4f}, Diversity. {diversity:.4f}, R_precision_real. {R_precision_real}, R_precision. {R_precision}, matching_score_real. {matching_score_real}, matching_score_pred. {matching_score_pred}" - logger.info(msg) - - if draw: - writer.add_scalar('./Test/FID', fid, nb_iter) - writer.add_scalar('./Test/Diversity', diversity, nb_iter) - writer.add_scalar('./Test/top1', R_precision[0], nb_iter) - writer.add_scalar('./Test/top2', R_precision[1], nb_iter) - writer.add_scalar('./Test/top3', R_precision[2], nb_iter) - writer.add_scalar('./Test/matching_score', matching_score_pred, nb_iter) - - - if nb_iter % 5000 == 0 : - for ii in range(4): - tensorborad_add_video_xyz(writer, draw_org[ii], nb_iter, tag='./Vis/org_eval'+str(ii), nb_vis=1, title_batch=[draw_text[ii]], outname=[os.path.join(out_dir, 'gt'+str(ii)+'.gif')] if savegif else None) - - if nb_iter % 5000 == 0 : - for ii in range(4): - tensorborad_add_video_xyz(writer, draw_pred[ii], nb_iter, tag='./Vis/pred_eval'+str(ii), nb_vis=1, title_batch=[draw_text[ii]], outname=[os.path.join(out_dir, 'pred'+str(ii)+'.gif')] if savegif else None) - - - if fid < best_fid : - msg = f"--> --> \t FID Improved from {best_fid:.5f} to {fid:.5f} !!!" - logger.info(msg) - best_fid, best_iter = fid, nb_iter - if save: - torch.save({'net' : net.state_dict()}, os.path.join(out_dir, 'net_best_fid.pth')) - - if abs(diversity_real - diversity) < abs(diversity_real - best_div) : - msg = f"--> --> \t Diversity Improved from {best_div:.5f} to {diversity:.5f} !!!" - logger.info(msg) - best_div = diversity - if save: - torch.save({'net' : net.state_dict()}, os.path.join(out_dir, 'net_best_div.pth')) - - if R_precision[0] > best_top1 : - msg = f"--> --> \t Top1 Improved from {best_top1:.4f} to {R_precision[0]:.4f} !!!" - logger.info(msg) - best_top1 = R_precision[0] - if save: - torch.save({'net' : net.state_dict()}, os.path.join(out_dir, 'net_best_top1.pth')) - - if R_precision[1] > best_top2 : - msg = f"--> --> \t Top2 Improved from {best_top2:.4f} to {R_precision[1]:.4f} !!!" - logger.info(msg) - best_top2 = R_precision[1] - - if R_precision[2] > best_top3 : - msg = f"--> --> \t Top3 Improved from {best_top3:.4f} to {R_precision[2]:.4f} !!!" - logger.info(msg) - best_top3 = R_precision[2] - - if matching_score_pred < best_matching : - msg = f"--> --> \t matching_score Improved from {best_matching:.5f} to {matching_score_pred:.5f} !!!" - logger.info(msg) - best_matching = matching_score_pred - if save: - torch.save({'net' : net.state_dict()}, os.path.join(out_dir, 'net_best_matching.pth')) - - if save: - torch.save({'net' : net.state_dict()}, os.path.join(out_dir, 'net_last.pth')) - - net.train() - return best_fid, best_iter, best_div, best_top1, best_top2, best_top3, best_matching, writer, logger - - -@torch.no_grad() -def evaluation_transformer(out_dir, val_loader, net, trans, logger, writer, nb_iter, best_fid, best_iter, best_div, best_top1, best_top2, best_top3, best_matching, clip_model, eval_wrapper, draw = True, save = True, savegif=False) : - - trans.eval() - nb_sample = 0 - - draw_org = [] - draw_pred = [] - draw_text = [] - draw_text_pred = [] - - motion_annotation_list = [] - motion_pred_list = [] - R_precision_real = 0 - R_precision = 0 - matching_score_real = 0 - matching_score_pred = 0 - - nb_sample = 0 - for i in range(1): - for batch in val_loader: - word_embeddings, pos_one_hots, clip_text, sent_len, pose, m_length, token, name = batch - - bs, seq = pose.shape[:2] - num_joints = 21 if pose.shape[-1] == 251 else 22 - - text = clip.tokenize(clip_text, truncate=True).cuda() - - feat_clip_text = clip_model.encode_text(text).float() - pred_pose_eval = torch.zeros((bs, seq, pose.shape[-1])).cuda() - pred_len = torch.ones(bs).long() - - for k in range(bs): - try: - index_motion = trans.sample(feat_clip_text[k:k+1], False) - except: - index_motion = torch.ones(1,1).cuda().long() - - pred_pose = net.forward_decoder(index_motion) - cur_len = pred_pose.shape[1] - - pred_len[k] = min(cur_len, seq) - pred_pose_eval[k:k+1, :cur_len] = pred_pose[:, :seq] - - if draw: - pred_denorm = val_loader.dataset.inv_transform(pred_pose.detach().cpu().numpy()) - pred_xyz = recover_from_ric(torch.from_numpy(pred_denorm).float().cuda(), num_joints) - - if i == 0 and k < 4: - draw_pred.append(pred_xyz) - draw_text_pred.append(clip_text[k]) - - et_pred, em_pred = eval_wrapper.get_co_embeddings(word_embeddings, pos_one_hots, sent_len, pred_pose_eval, pred_len) - - if i == 0: - pose = pose.cuda().float() - - et, em = eval_wrapper.get_co_embeddings(word_embeddings, pos_one_hots, sent_len, pose, m_length) - motion_annotation_list.append(em) - motion_pred_list.append(em_pred) - - if draw: - pose = val_loader.dataset.inv_transform(pose.detach().cpu().numpy()) - pose_xyz = recover_from_ric(torch.from_numpy(pose).float().cuda(), num_joints) - - - for j in range(min(4, bs)): - draw_org.append(pose_xyz[j][:m_length[j]].unsqueeze(0)) - draw_text.append(clip_text[j]) - - temp_R, temp_match = calculate_R_precision(et.cpu().numpy(), em.cpu().numpy(), top_k=3, sum_all=True) - R_precision_real += temp_R - matching_score_real += temp_match - temp_R, temp_match = calculate_R_precision(et_pred.cpu().numpy(), em_pred.cpu().numpy(), top_k=3, sum_all=True) - R_precision += temp_R - matching_score_pred += temp_match - - nb_sample += bs - - motion_annotation_np = torch.cat(motion_annotation_list, dim=0).cpu().numpy() - motion_pred_np = torch.cat(motion_pred_list, dim=0).cpu().numpy() - gt_mu, gt_cov = calculate_activation_statistics(motion_annotation_np) - mu, cov= calculate_activation_statistics(motion_pred_np) - - diversity_real = calculate_diversity(motion_annotation_np, 300 if nb_sample > 300 else 100) - diversity = calculate_diversity(motion_pred_np, 300 if nb_sample > 300 else 100) - - R_precision_real = R_precision_real / nb_sample - R_precision = R_precision / nb_sample - - matching_score_real = matching_score_real / nb_sample - matching_score_pred = matching_score_pred / nb_sample - - - fid = calculate_frechet_distance(gt_mu, gt_cov, mu, cov) - - msg = f"--> \t Eva. Iter {nb_iter} :, FID. {fid:.4f}, Diversity Real. {diversity_real:.4f}, Diversity. {diversity:.4f}, R_precision_real. {R_precision_real}, R_precision. {R_precision}, matching_score_real. {matching_score_real}, matching_score_pred. {matching_score_pred}" - logger.info(msg) - - - if draw: - writer.add_scalar('./Test/FID', fid, nb_iter) - writer.add_scalar('./Test/Diversity', diversity, nb_iter) - writer.add_scalar('./Test/top1', R_precision[0], nb_iter) - writer.add_scalar('./Test/top2', R_precision[1], nb_iter) - writer.add_scalar('./Test/top3', R_precision[2], nb_iter) - writer.add_scalar('./Test/matching_score', matching_score_pred, nb_iter) - - - if nb_iter % 10000 == 0 : - for ii in range(4): - tensorborad_add_video_xyz(writer, draw_org[ii], nb_iter, tag='./Vis/org_eval'+str(ii), nb_vis=1, title_batch=[draw_text[ii]], outname=[os.path.join(out_dir, 'gt'+str(ii)+'.gif')] if savegif else None) - - if nb_iter % 10000 == 0 : - for ii in range(4): - tensorborad_add_video_xyz(writer, draw_pred[ii], nb_iter, tag='./Vis/pred_eval'+str(ii), nb_vis=1, title_batch=[draw_text_pred[ii]], outname=[os.path.join(out_dir, 'pred'+str(ii)+'.gif')] if savegif else None) - - - if fid < best_fid : - msg = f"--> --> \t FID Improved from {best_fid:.5f} to {fid:.5f} !!!" - logger.info(msg) - best_fid, best_iter = fid, nb_iter - if save: - torch.save({'trans' : trans.state_dict()}, os.path.join(out_dir, 'net_best_fid.pth')) - - if matching_score_pred < best_matching : - msg = f"--> --> \t matching_score Improved from {best_matching:.5f} to {matching_score_pred:.5f} !!!" - logger.info(msg) - best_matching = matching_score_pred - - if abs(diversity_real - diversity) < abs(diversity_real - best_div) : - msg = f"--> --> \t Diversity Improved from {best_div:.5f} to {diversity:.5f} !!!" - logger.info(msg) - best_div = diversity - - if R_precision[0] > best_top1 : - msg = f"--> --> \t Top1 Improved from {best_top1:.4f} to {R_precision[0]:.4f} !!!" - logger.info(msg) - best_top1 = R_precision[0] - - if R_precision[1] > best_top2 : - msg = f"--> --> \t Top2 Improved from {best_top2:.4f} to {R_precision[1]:.4f} !!!" - logger.info(msg) - best_top2 = R_precision[1] - - if R_precision[2] > best_top3 : - msg = f"--> --> \t Top3 Improved from {best_top3:.4f} to {R_precision[2]:.4f} !!!" - logger.info(msg) - best_top3 = R_precision[2] - - if save: - torch.save({'trans' : trans.state_dict()}, os.path.join(out_dir, 'net_last.pth')) - - trans.train() - return best_fid, best_iter, best_div, best_top1, best_top2, best_top3, best_matching, writer, logger - - -@torch.no_grad() -def evaluation_transformer_test(out_dir, val_loader, net, trans, logger, writer, nb_iter, best_fid, best_iter, best_div, best_top1, best_top2, best_top3, best_matching, best_multi, clip_model, eval_wrapper, draw = True, save = True, savegif=False, savenpy=False) : - - trans.eval() - nb_sample = 0 - - draw_org = [] - draw_pred = [] - draw_text = [] - draw_text_pred = [] - draw_name = [] - - motion_annotation_list = [] - motion_pred_list = [] - motion_multimodality = [] - R_precision_real = 0 - R_precision = 0 - matching_score_real = 0 - matching_score_pred = 0 - - nb_sample = 0 - - for batch in val_loader: - - word_embeddings, pos_one_hots, clip_text, sent_len, pose, m_length, token, name = batch - bs, seq = pose.shape[:2] - num_joints = 21 if pose.shape[-1] == 251 else 22 - - text = clip.tokenize(clip_text, truncate=True).cuda() - - feat_clip_text = clip_model.encode_text(text).float() - motion_multimodality_batch = [] - for i in range(30): - pred_pose_eval = torch.zeros((bs, seq, pose.shape[-1])).cuda() - pred_len = torch.ones(bs).long() - - for k in range(bs): - try: - index_motion = trans.sample(feat_clip_text[k:k+1], True) - except: - index_motion = torch.ones(1,1).cuda().long() - - pred_pose = net.forward_decoder(index_motion) - cur_len = pred_pose.shape[1] - - pred_len[k] = min(cur_len, seq) - pred_pose_eval[k:k+1, :cur_len] = pred_pose[:, :seq] - - if i == 0 and (draw or savenpy): - pred_denorm = val_loader.dataset.inv_transform(pred_pose.detach().cpu().numpy()) - pred_xyz = recover_from_ric(torch.from_numpy(pred_denorm).float().cuda(), num_joints) - - if savenpy: - np.save(os.path.join(out_dir, name[k]+'_pred.npy'), pred_xyz.detach().cpu().numpy()) - - if draw: - if i == 0: - draw_pred.append(pred_xyz) - draw_text_pred.append(clip_text[k]) - draw_name.append(name[k]) - - et_pred, em_pred = eval_wrapper.get_co_embeddings(word_embeddings, pos_one_hots, sent_len, pred_pose_eval, pred_len) - - motion_multimodality_batch.append(em_pred.reshape(bs, 1, -1)) - - if i == 0: - pose = pose.cuda().float() - - et, em = eval_wrapper.get_co_embeddings(word_embeddings, pos_one_hots, sent_len, pose, m_length) - motion_annotation_list.append(em) - motion_pred_list.append(em_pred) - - if draw or savenpy: - pose = val_loader.dataset.inv_transform(pose.detach().cpu().numpy()) - pose_xyz = recover_from_ric(torch.from_numpy(pose).float().cuda(), num_joints) - - if savenpy: - for j in range(bs): - np.save(os.path.join(out_dir, name[j]+'_gt.npy'), pose_xyz[j][:m_length[j]].unsqueeze(0).cpu().numpy()) - - if draw: - for j in range(bs): - draw_org.append(pose_xyz[j][:m_length[j]].unsqueeze(0)) - draw_text.append(clip_text[j]) - - temp_R, temp_match = calculate_R_precision(et.cpu().numpy(), em.cpu().numpy(), top_k=3, sum_all=True) - R_precision_real += temp_R - matching_score_real += temp_match - temp_R, temp_match = calculate_R_precision(et_pred.cpu().numpy(), em_pred.cpu().numpy(), top_k=3, sum_all=True) - R_precision += temp_R - matching_score_pred += temp_match - - nb_sample += bs - - motion_multimodality.append(torch.cat(motion_multimodality_batch, dim=1)) - - motion_annotation_np = torch.cat(motion_annotation_list, dim=0).cpu().numpy() - motion_pred_np = torch.cat(motion_pred_list, dim=0).cpu().numpy() - gt_mu, gt_cov = calculate_activation_statistics(motion_annotation_np) - mu, cov= calculate_activation_statistics(motion_pred_np) - - diversity_real = calculate_diversity(motion_annotation_np, 300 if nb_sample > 300 else 100) - diversity = calculate_diversity(motion_pred_np, 300 if nb_sample > 300 else 100) - - R_precision_real = R_precision_real / nb_sample - R_precision = R_precision / nb_sample - - matching_score_real = matching_score_real / nb_sample - matching_score_pred = matching_score_pred / nb_sample - - multimodality = 0 - motion_multimodality = torch.cat(motion_multimodality, dim=0).cpu().numpy() - multimodality = calculate_multimodality(motion_multimodality, 10) - - fid = calculate_frechet_distance(gt_mu, gt_cov, mu, cov) - - msg = f"--> \t Eva. Iter {nb_iter} :, FID. {fid:.4f}, Diversity Real. {diversity_real:.4f}, Diversity. {diversity:.4f}, R_precision_real. {R_precision_real}, R_precision. {R_precision}, matching_score_real. {matching_score_real}, matching_score_pred. {matching_score_pred}, multimodality. {multimodality:.4f}" - logger.info(msg) - - - if draw: - for ii in range(len(draw_org)): - tensorborad_add_video_xyz(writer, draw_org[ii], nb_iter, tag='./Vis/'+draw_name[ii]+'_org', nb_vis=1, title_batch=[draw_text[ii]], outname=[os.path.join(out_dir, draw_name[ii]+'_skel_gt.gif')] if savegif else None) - - tensorborad_add_video_xyz(writer, draw_pred[ii], nb_iter, tag='./Vis/'+draw_name[ii]+'_pred', nb_vis=1, title_batch=[draw_text_pred[ii]], outname=[os.path.join(out_dir, draw_name[ii]+'_skel_pred.gif')] if savegif else None) - - trans.train() - return fid, best_iter, diversity, R_precision[0], R_precision[1], R_precision[2], matching_score_pred, multimodality, writer, logger - -# (X - X_train)*(X - X_train) = -2X*X_train + X*X + X_train*X_train -def euclidean_distance_matrix(matrix1, matrix2): - """ - Params: - -- matrix1: N1 x D - -- matrix2: N2 x D - Returns: - -- dist: N1 x N2 - dist[i, j] == distance(matrix1[i], matrix2[j]) - """ - assert matrix1.shape[1] == matrix2.shape[1] - d1 = -2 * np.dot(matrix1, matrix2.T) # shape (num_test, num_train) - d2 = np.sum(np.square(matrix1), axis=1, keepdims=True) # shape (num_test, 1) - d3 = np.sum(np.square(matrix2), axis=1) # shape (num_train, ) - dists = np.sqrt(d1 + d2 + d3) # broadcasting - return dists - - - -def calculate_top_k(mat, top_k): - size = mat.shape[0] - gt_mat = np.expand_dims(np.arange(size), 1).repeat(size, 1) - bool_mat = (mat == gt_mat) - correct_vec = False - top_k_list = [] - for i in range(top_k): -# print(correct_vec, bool_mat[:, i]) - correct_vec = (correct_vec | bool_mat[:, i]) - # print(correct_vec) - top_k_list.append(correct_vec[:, None]) - top_k_mat = np.concatenate(top_k_list, axis=1) - return top_k_mat - - -def calculate_R_precision(embedding1, embedding2, top_k, sum_all=False): - dist_mat = euclidean_distance_matrix(embedding1, embedding2) - matching_score = dist_mat.trace() - argmax = np.argsort(dist_mat, axis=1) - top_k_mat = calculate_top_k(argmax, top_k) - if sum_all: - return top_k_mat.sum(axis=0), matching_score - else: - return top_k_mat, matching_score - -def calculate_multimodality(activation, multimodality_times): - assert len(activation.shape) == 3 - assert activation.shape[1] > multimodality_times - num_per_sent = activation.shape[1] - - first_dices = np.random.choice(num_per_sent, multimodality_times, replace=False) - second_dices = np.random.choice(num_per_sent, multimodality_times, replace=False) - dist = linalg.norm(activation[:, first_dices] - activation[:, second_dices], axis=2) - return dist.mean() - - -def calculate_diversity(activation, diversity_times): - assert len(activation.shape) == 2 - assert activation.shape[0] > diversity_times - num_samples = activation.shape[0] - - first_indices = np.random.choice(num_samples, diversity_times, replace=False) - second_indices = np.random.choice(num_samples, diversity_times, replace=False) - dist = linalg.norm(activation[first_indices] - activation[second_indices], axis=1) - return dist.mean() - - - -def calculate_frechet_distance(mu1, sigma1, mu2, sigma2, eps=1e-6): - - mu1 = np.atleast_1d(mu1) - mu2 = np.atleast_1d(mu2) - - sigma1 = np.atleast_2d(sigma1) - sigma2 = np.atleast_2d(sigma2) - - assert mu1.shape == mu2.shape, \ - 'Training and test mean vectors have different lengths' - assert sigma1.shape == sigma2.shape, \ - 'Training and test covariances have different dimensions' - - diff = mu1 - mu2 - - # Product might be almost singular - covmean, _ = linalg.sqrtm(sigma1.dot(sigma2), disp=False) - if not np.isfinite(covmean).all(): - msg = ('fid calculation produces singular product; ' - 'adding %s to diagonal of cov estimates') % eps - print(msg) - offset = np.eye(sigma1.shape[0]) * eps - covmean = linalg.sqrtm((sigma1 + offset).dot(sigma2 + offset)) - - # Numerical error might give slight imaginary component - if np.iscomplexobj(covmean): - if not np.allclose(np.diagonal(covmean).imag, 0, atol=1e-3): - m = np.max(np.abs(covmean.imag)) - raise ValueError('Imaginary component {}'.format(m)) - covmean = covmean.real - - tr_covmean = np.trace(covmean) - - return (diff.dot(diff) + np.trace(sigma1) - + np.trace(sigma2) - 2 * tr_covmean) - - - -def calculate_activation_statistics(activations): - - mu = np.mean(activations, axis=0) - cov = np.cov(activations, rowvar=False) - return mu, cov - - -def calculate_frechet_feature_distance(feature_list1, feature_list2): - feature_list1 = np.stack(feature_list1) - feature_list2 = np.stack(feature_list2) - - # normalize the scale - mean = np.mean(feature_list1, axis=0) - std = np.std(feature_list1, axis=0) + 1e-10 - feature_list1 = (feature_list1 - mean) / std - feature_list2 = (feature_list2 - mean) / std - - dist = calculate_frechet_distance( - mu1=np.mean(feature_list1, axis=0), - sigma1=np.cov(feature_list1, rowvar=False), - mu2=np.mean(feature_list2, axis=0), - sigma2=np.cov(feature_list2, rowvar=False), - ) - return dist \ No newline at end of file diff --git a/spaces/akhaliq/BlendGAN/psp_encoder/__init__.py b/spaces/akhaliq/BlendGAN/psp_encoder/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/akhaliq/stylegan3_clip/training/networks_stylegan3.py b/spaces/akhaliq/stylegan3_clip/training/networks_stylegan3.py deleted file mode 100644 index a6a2b4d7f5ebde6874768c0cac011cc7867d2a08..0000000000000000000000000000000000000000 --- a/spaces/akhaliq/stylegan3_clip/training/networks_stylegan3.py +++ /dev/null @@ -1,515 +0,0 @@ -# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# -# NVIDIA CORPORATION and its licensors retain all intellectual property -# and proprietary rights in and to this software, related documentation -# and any modifications thereto. Any use, reproduction, disclosure or -# distribution of this software and related documentation without an express -# license agreement from NVIDIA CORPORATION is strictly prohibited. - -"""Generator architecture from the paper -"Alias-Free Generative Adversarial Networks".""" - -import numpy as np -import scipy.signal -import scipy.optimize -import torch -from torch_utils import misc -from torch_utils import persistence -from torch_utils.ops import conv2d_gradfix -from torch_utils.ops import filtered_lrelu -from torch_utils.ops import bias_act - -#---------------------------------------------------------------------------- - -@misc.profiled_function -def modulated_conv2d( - x, # Input tensor: [batch_size, in_channels, in_height, in_width] - w, # Weight tensor: [out_channels, in_channels, kernel_height, kernel_width] - s, # Style tensor: [batch_size, in_channels] - demodulate = True, # Apply weight demodulation? - padding = 0, # Padding: int or [padH, padW] - input_gain = None, # Optional scale factors for the input channels: [], [in_channels], or [batch_size, in_channels] -): - with misc.suppress_tracer_warnings(): # this value will be treated as a constant - batch_size = int(x.shape[0]) - out_channels, in_channels, kh, kw = w.shape - misc.assert_shape(w, [out_channels, in_channels, kh, kw]) # [OIkk] - misc.assert_shape(x, [batch_size, in_channels, None, None]) # [NIHW] - misc.assert_shape(s, [batch_size, in_channels]) # [NI] - - # Pre-normalize inputs. - if demodulate: - w = w * w.square().mean([1,2,3], keepdim=True).rsqrt() - s = s * s.square().mean().rsqrt() - - # Modulate weights. - w = w.unsqueeze(0) # [NOIkk] - w = w * s.unsqueeze(1).unsqueeze(3).unsqueeze(4) # [NOIkk] - - # Demodulate weights. - if demodulate: - dcoefs = (w.square().sum(dim=[2,3,4]) + 1e-8).rsqrt() # [NO] - w = w * dcoefs.unsqueeze(2).unsqueeze(3).unsqueeze(4) # [NOIkk] - - # Apply input scaling. - if input_gain is not None: - input_gain = input_gain.expand(batch_size, in_channels) # [NI] - w = w * input_gain.unsqueeze(1).unsqueeze(3).unsqueeze(4) # [NOIkk] - - # Execute as one fused op using grouped convolution. - x = x.reshape(1, -1, *x.shape[2:]) - w = w.reshape(-1, in_channels, kh, kw) - x = conv2d_gradfix.conv2d(input=x, weight=w.to(x.dtype), padding=padding, groups=batch_size) - x = x.reshape(batch_size, -1, *x.shape[2:]) - return x - -#---------------------------------------------------------------------------- - -@persistence.persistent_class -class FullyConnectedLayer(torch.nn.Module): - def __init__(self, - in_features, # Number of input features. - out_features, # Number of output features. - activation = 'linear', # Activation function: 'relu', 'lrelu', etc. - bias = True, # Apply additive bias before the activation function? - lr_multiplier = 1, # Learning rate multiplier. - weight_init = 1, # Initial standard deviation of the weight tensor. - bias_init = 0, # Initial value of the additive bias. - ): - super().__init__() - self.in_features = in_features - self.out_features = out_features - self.activation = activation - self.weight = torch.nn.Parameter(torch.randn([out_features, in_features]) * (weight_init / lr_multiplier)) - bias_init = np.broadcast_to(np.asarray(bias_init, dtype=np.float32), [out_features]) - self.bias = torch.nn.Parameter(torch.from_numpy(bias_init / lr_multiplier)) if bias else None - self.weight_gain = lr_multiplier / np.sqrt(in_features) - self.bias_gain = lr_multiplier - - def forward(self, x): - w = self.weight.to(x.dtype) * self.weight_gain - b = self.bias - if b is not None: - b = b.to(x.dtype) - if self.bias_gain != 1: - b = b * self.bias_gain - if self.activation == 'linear' and b is not None: - x = torch.addmm(b.unsqueeze(0), x, w.t()) - else: - x = x.matmul(w.t()) - x = bias_act.bias_act(x, b, act=self.activation) - return x - - def extra_repr(self): - return f'in_features={self.in_features:d}, out_features={self.out_features:d}, activation={self.activation:s}' - -#---------------------------------------------------------------------------- - -@persistence.persistent_class -class MappingNetwork(torch.nn.Module): - def __init__(self, - z_dim, # Input latent (Z) dimensionality. - c_dim, # Conditioning label (C) dimensionality, 0 = no labels. - w_dim, # Intermediate latent (W) dimensionality. - num_ws, # Number of intermediate latents to output. - num_layers = 2, # Number of mapping layers. - lr_multiplier = 0.01, # Learning rate multiplier for the mapping layers. - w_avg_beta = 0.998, # Decay for tracking the moving average of W during training. - ): - super().__init__() - self.z_dim = z_dim - self.c_dim = c_dim - self.w_dim = w_dim - self.num_ws = num_ws - self.num_layers = num_layers - self.w_avg_beta = w_avg_beta - - # Construct layers. - self.embed = FullyConnectedLayer(self.c_dim, self.w_dim) if self.c_dim > 0 else None - features = [self.z_dim + (self.w_dim if self.c_dim > 0 else 0)] + [self.w_dim] * self.num_layers - for idx, in_features, out_features in zip(range(num_layers), features[:-1], features[1:]): - layer = FullyConnectedLayer(in_features, out_features, activation='lrelu', lr_multiplier=lr_multiplier) - setattr(self, f'fc{idx}', layer) - self.register_buffer('w_avg', torch.zeros([w_dim])) - - def forward(self, z, c, truncation_psi=1, truncation_cutoff=None, update_emas=False): - misc.assert_shape(z, [None, self.z_dim]) - if truncation_cutoff is None: - truncation_cutoff = self.num_ws - - # Embed, normalize, and concatenate inputs. - x = z.to(torch.float32) - x = x * (x.square().mean(1, keepdim=True) + 1e-8).rsqrt() - if self.c_dim > 0: - misc.assert_shape(c, [None, self.c_dim]) - y = self.embed(c.to(torch.float32)) - y = y * (y.square().mean(1, keepdim=True) + 1e-8).rsqrt() - x = torch.cat([x, y], dim=1) if x is not None else y - - # Execute layers. - for idx in range(self.num_layers): - x = getattr(self, f'fc{idx}')(x) - - # Update moving average of W. - if update_emas: - self.w_avg.copy_(x.detach().mean(dim=0).lerp(self.w_avg, self.w_avg_beta)) - - # Broadcast and apply truncation. - x = x.unsqueeze(1).repeat([1, self.num_ws, 1]) - if truncation_psi != 1: - x[:, :truncation_cutoff] = self.w_avg.lerp(x[:, :truncation_cutoff], truncation_psi) - return x - - def extra_repr(self): - return f'z_dim={self.z_dim:d}, c_dim={self.c_dim:d}, w_dim={self.w_dim:d}, num_ws={self.num_ws:d}' - -#---------------------------------------------------------------------------- - -@persistence.persistent_class -class SynthesisInput(torch.nn.Module): - def __init__(self, - w_dim, # Intermediate latent (W) dimensionality. - channels, # Number of output channels. - size, # Output spatial size: int or [width, height]. - sampling_rate, # Output sampling rate. - bandwidth, # Output bandwidth. - ): - super().__init__() - self.w_dim = w_dim - self.channels = channels - self.size = np.broadcast_to(np.asarray(size), [2]) - self.sampling_rate = sampling_rate - self.bandwidth = bandwidth - - # Draw random frequencies from uniform 2D disc. - freqs = torch.randn([self.channels, 2]) - radii = freqs.square().sum(dim=1, keepdim=True).sqrt() - freqs /= radii * radii.square().exp().pow(0.25) - freqs *= bandwidth - phases = torch.rand([self.channels]) - 0.5 - - # Setup parameters and buffers. - self.weight = torch.nn.Parameter(torch.randn([self.channels, self.channels])) - self.affine = FullyConnectedLayer(w_dim, 4, weight_init=0, bias_init=[1,0,0,0]) - self.register_buffer('transform', torch.eye(3, 3)) # User-specified inverse transform wrt. resulting image. - self.register_buffer('freqs', freqs) - self.register_buffer('phases', phases) - - def forward(self, w): - # Introduce batch dimension. - transforms = self.transform.unsqueeze(0) # [batch, row, col] - freqs = self.freqs.unsqueeze(0) # [batch, channel, xy] - phases = self.phases.unsqueeze(0) # [batch, channel] - - # Apply learned transformation. - t = self.affine(w) # t = (r_c, r_s, t_x, t_y) - t = t / t[:, :2].norm(dim=1, keepdim=True) # t' = (r'_c, r'_s, t'_x, t'_y) - m_r = torch.eye(3, device=w.device).unsqueeze(0).repeat([w.shape[0], 1, 1]) # Inverse rotation wrt. resulting image. - m_r[:, 0, 0] = t[:, 0] # r'_c - m_r[:, 0, 1] = -t[:, 1] # r'_s - m_r[:, 1, 0] = t[:, 1] # r'_s - m_r[:, 1, 1] = t[:, 0] # r'_c - m_t = torch.eye(3, device=w.device).unsqueeze(0).repeat([w.shape[0], 1, 1]) # Inverse translation wrt. resulting image. - m_t[:, 0, 2] = -t[:, 2] # t'_x - m_t[:, 1, 2] = -t[:, 3] # t'_y - transforms = m_r @ m_t @ transforms # First rotate resulting image, then translate, and finally apply user-specified transform. - - # Transform frequencies. - phases = phases + (freqs @ transforms[:, :2, 2:]).squeeze(2) - freqs = freqs @ transforms[:, :2, :2] - - # Dampen out-of-band frequencies that may occur due to the user-specified transform. - amplitudes = (1 - (freqs.norm(dim=2) - self.bandwidth) / (self.sampling_rate / 2 - self.bandwidth)).clamp(0, 1) - - # Construct sampling grid. - theta = torch.eye(2, 3, device=w.device) - theta[0, 0] = 0.5 * self.size[0] / self.sampling_rate - theta[1, 1] = 0.5 * self.size[1] / self.sampling_rate - grids = torch.nn.functional.affine_grid(theta.unsqueeze(0), [1, 1, self.size[1], self.size[0]], align_corners=False) - - # Compute Fourier features. - x = (grids.unsqueeze(3) @ freqs.permute(0, 2, 1).unsqueeze(1).unsqueeze(2)).squeeze(3) # [batch, height, width, channel] - x = x + phases.unsqueeze(1).unsqueeze(2) - x = torch.sin(x * (np.pi * 2)) - x = x * amplitudes.unsqueeze(1).unsqueeze(2) - - # Apply trainable mapping. - weight = self.weight / np.sqrt(self.channels) - x = x @ weight.t() - - # Ensure correct shape. - x = x.permute(0, 3, 1, 2) # [batch, channel, height, width] - misc.assert_shape(x, [w.shape[0], self.channels, int(self.size[1]), int(self.size[0])]) - return x - - def extra_repr(self): - return '\n'.join([ - f'w_dim={self.w_dim:d}, channels={self.channels:d}, size={list(self.size)},', - f'sampling_rate={self.sampling_rate:g}, bandwidth={self.bandwidth:g}']) - -#---------------------------------------------------------------------------- - -@persistence.persistent_class -class SynthesisLayer(torch.nn.Module): - def __init__(self, - w_dim, # Intermediate latent (W) dimensionality. - is_torgb, # Is this the final ToRGB layer? - is_critically_sampled, # Does this layer use critical sampling? - use_fp16, # Does this layer use FP16? - - # Input & output specifications. - in_channels, # Number of input channels. - out_channels, # Number of output channels. - in_size, # Input spatial size: int or [width, height]. - out_size, # Output spatial size: int or [width, height]. - in_sampling_rate, # Input sampling rate (s). - out_sampling_rate, # Output sampling rate (s). - in_cutoff, # Input cutoff frequency (f_c). - out_cutoff, # Output cutoff frequency (f_c). - in_half_width, # Input transition band half-width (f_h). - out_half_width, # Output Transition band half-width (f_h). - - # Hyperparameters. - conv_kernel = 3, # Convolution kernel size. Ignored for final the ToRGB layer. - filter_size = 6, # Low-pass filter size relative to the lower resolution when up/downsampling. - lrelu_upsampling = 2, # Relative sampling rate for leaky ReLU. Ignored for final the ToRGB layer. - use_radial_filters = False, # Use radially symmetric downsampling filter? Ignored for critically sampled layers. - conv_clamp = 256, # Clamp the output to [-X, +X], None = disable clamping. - magnitude_ema_beta = 0.999, # Decay rate for the moving average of input magnitudes. - ): - super().__init__() - self.w_dim = w_dim - self.is_torgb = is_torgb - self.is_critically_sampled = is_critically_sampled - self.use_fp16 = use_fp16 - self.in_channels = in_channels - self.out_channels = out_channels - self.in_size = np.broadcast_to(np.asarray(in_size), [2]) - self.out_size = np.broadcast_to(np.asarray(out_size), [2]) - self.in_sampling_rate = in_sampling_rate - self.out_sampling_rate = out_sampling_rate - self.tmp_sampling_rate = max(in_sampling_rate, out_sampling_rate) * (1 if is_torgb else lrelu_upsampling) - self.in_cutoff = in_cutoff - self.out_cutoff = out_cutoff - self.in_half_width = in_half_width - self.out_half_width = out_half_width - self.conv_kernel = 1 if is_torgb else conv_kernel - self.conv_clamp = conv_clamp - self.magnitude_ema_beta = magnitude_ema_beta - - # Setup parameters and buffers. - self.affine = FullyConnectedLayer(self.w_dim, self.in_channels, bias_init=1) - self.weight = torch.nn.Parameter(torch.randn([self.out_channels, self.in_channels, self.conv_kernel, self.conv_kernel])) - self.bias = torch.nn.Parameter(torch.zeros([self.out_channels])) - self.register_buffer('magnitude_ema', torch.ones([])) - - # Design upsampling filter. - self.up_factor = int(np.rint(self.tmp_sampling_rate / self.in_sampling_rate)) - assert self.in_sampling_rate * self.up_factor == self.tmp_sampling_rate - self.up_taps = filter_size * self.up_factor if self.up_factor > 1 and not self.is_torgb else 1 - self.register_buffer('up_filter', self.design_lowpass_filter( - numtaps=self.up_taps, cutoff=self.in_cutoff, width=self.in_half_width*2, fs=self.tmp_sampling_rate)) - - # Design downsampling filter. - self.down_factor = int(np.rint(self.tmp_sampling_rate / self.out_sampling_rate)) - assert self.out_sampling_rate * self.down_factor == self.tmp_sampling_rate - self.down_taps = filter_size * self.down_factor if self.down_factor > 1 and not self.is_torgb else 1 - self.down_radial = use_radial_filters and not self.is_critically_sampled - self.register_buffer('down_filter', self.design_lowpass_filter( - numtaps=self.down_taps, cutoff=self.out_cutoff, width=self.out_half_width*2, fs=self.tmp_sampling_rate, radial=self.down_radial)) - - # Compute padding. - pad_total = (self.out_size - 1) * self.down_factor + 1 # Desired output size before downsampling. - pad_total -= (self.in_size + self.conv_kernel - 1) * self.up_factor # Input size after upsampling. - pad_total += self.up_taps + self.down_taps - 2 # Size reduction caused by the filters. - pad_lo = (pad_total + self.up_factor) // 2 # Shift sample locations according to the symmetric interpretation (Appendix C.3). - pad_hi = pad_total - pad_lo - self.padding = [int(pad_lo[0]), int(pad_hi[0]), int(pad_lo[1]), int(pad_hi[1])] - - def forward(self, x, w, noise_mode='random', force_fp32=False, update_emas=False): - assert noise_mode in ['random', 'const', 'none'] # unused - misc.assert_shape(x, [None, self.in_channels, int(self.in_size[1]), int(self.in_size[0])]) - misc.assert_shape(w, [x.shape[0], self.w_dim]) - - # Track input magnitude. - if update_emas: - with torch.autograd.profiler.record_function('update_magnitude_ema'): - magnitude_cur = x.detach().to(torch.float32).square().mean() - self.magnitude_ema.copy_(magnitude_cur.lerp(self.magnitude_ema, self.magnitude_ema_beta)) - input_gain = self.magnitude_ema.rsqrt() - - # Execute affine layer. - styles = self.affine(w) - if self.is_torgb: - weight_gain = 1 / np.sqrt(self.in_channels * (self.conv_kernel ** 2)) - styles = styles * weight_gain - - # Execute modulated conv2d. - dtype = torch.float16 if (self.use_fp16 and not force_fp32 and x.device.type == 'cuda') else torch.float32 - x = modulated_conv2d(x=x.to(dtype), w=self.weight, s=styles, - padding=self.conv_kernel-1, demodulate=(not self.is_torgb), input_gain=input_gain) - - # Execute bias, filtered leaky ReLU, and clamping. - gain = 1 if self.is_torgb else np.sqrt(2) - slope = 1 if self.is_torgb else 0.2 - x = filtered_lrelu.filtered_lrelu(x=x, fu=self.up_filter, fd=self.down_filter, b=self.bias.to(x.dtype), - up=self.up_factor, down=self.down_factor, padding=self.padding, gain=gain, slope=slope, clamp=self.conv_clamp) - - # Ensure correct shape and dtype. - misc.assert_shape(x, [None, self.out_channels, int(self.out_size[1]), int(self.out_size[0])]) - assert x.dtype == dtype - return x - - @staticmethod - def design_lowpass_filter(numtaps, cutoff, width, fs, radial=False): - assert numtaps >= 1 - - # Identity filter. - if numtaps == 1: - return None - - # Separable Kaiser low-pass filter. - if not radial: - f = scipy.signal.firwin(numtaps=numtaps, cutoff=cutoff, width=width, fs=fs) - return torch.as_tensor(f, dtype=torch.float32) - - # Radially symmetric jinc-based filter. - x = (np.arange(numtaps) - (numtaps - 1) / 2) / fs - r = np.hypot(*np.meshgrid(x, x)) - f = scipy.special.j1(2 * cutoff * (np.pi * r)) / (np.pi * r) - beta = scipy.signal.kaiser_beta(scipy.signal.kaiser_atten(numtaps, width / (fs / 2))) - w = np.kaiser(numtaps, beta) - f *= np.outer(w, w) - f /= np.sum(f) - return torch.as_tensor(f, dtype=torch.float32) - - def extra_repr(self): - return '\n'.join([ - f'w_dim={self.w_dim:d}, is_torgb={self.is_torgb},', - f'is_critically_sampled={self.is_critically_sampled}, use_fp16={self.use_fp16},', - f'in_sampling_rate={self.in_sampling_rate:g}, out_sampling_rate={self.out_sampling_rate:g},', - f'in_cutoff={self.in_cutoff:g}, out_cutoff={self.out_cutoff:g},', - f'in_half_width={self.in_half_width:g}, out_half_width={self.out_half_width:g},', - f'in_size={list(self.in_size)}, out_size={list(self.out_size)},', - f'in_channels={self.in_channels:d}, out_channels={self.out_channels:d}']) - -#---------------------------------------------------------------------------- - -@persistence.persistent_class -class SynthesisNetwork(torch.nn.Module): - def __init__(self, - w_dim, # Intermediate latent (W) dimensionality. - img_resolution, # Output image resolution. - img_channels, # Number of color channels. - channel_base = 32768, # Overall multiplier for the number of channels. - channel_max = 512, # Maximum number of channels in any layer. - num_layers = 14, # Total number of layers, excluding Fourier features and ToRGB. - num_critical = 2, # Number of critically sampled layers at the end. - first_cutoff = 2, # Cutoff frequency of the first layer (f_{c,0}). - first_stopband = 2**2.1, # Minimum stopband of the first layer (f_{t,0}). - last_stopband_rel = 2**0.3, # Minimum stopband of the last layer, expressed relative to the cutoff. - margin_size = 10, # Number of additional pixels outside the image. - output_scale = 0.25, # Scale factor for the output image. - num_fp16_res = 4, # Use FP16 for the N highest resolutions. - **layer_kwargs, # Arguments for SynthesisLayer. - ): - super().__init__() - self.w_dim = w_dim - self.num_ws = num_layers + 2 - self.img_resolution = img_resolution - self.img_channels = img_channels - self.num_layers = num_layers - self.num_critical = num_critical - self.margin_size = margin_size - self.output_scale = output_scale - self.num_fp16_res = num_fp16_res - - # Geometric progression of layer cutoffs and min. stopbands. - last_cutoff = self.img_resolution / 2 # f_{c,N} - last_stopband = last_cutoff * last_stopband_rel # f_{t,N} - exponents = np.minimum(np.arange(self.num_layers + 1) / (self.num_layers - self.num_critical), 1) - cutoffs = first_cutoff * (last_cutoff / first_cutoff) ** exponents # f_c[i] - stopbands = first_stopband * (last_stopband / first_stopband) ** exponents # f_t[i] - - # Compute remaining layer parameters. - sampling_rates = np.exp2(np.ceil(np.log2(np.minimum(stopbands * 2, self.img_resolution)))) # s[i] - half_widths = np.maximum(stopbands, sampling_rates / 2) - cutoffs # f_h[i] - sizes = sampling_rates + self.margin_size * 2 - sizes[-2:] = self.img_resolution - channels = np.rint(np.minimum((channel_base / 2) / cutoffs, channel_max)) - channels[-1] = self.img_channels - - # Construct layers. - self.input = SynthesisInput( - w_dim=self.w_dim, channels=int(channels[0]), size=int(sizes[0]), - sampling_rate=sampling_rates[0], bandwidth=cutoffs[0]) - self.layer_names = [] - for idx in range(self.num_layers + 1): - prev = max(idx - 1, 0) - is_torgb = (idx == self.num_layers) - is_critically_sampled = (idx >= self.num_layers - self.num_critical) - use_fp16 = (sampling_rates[idx] * (2 ** self.num_fp16_res) > self.img_resolution) - layer = SynthesisLayer( - w_dim=self.w_dim, is_torgb=is_torgb, is_critically_sampled=is_critically_sampled, use_fp16=use_fp16, - in_channels=int(channels[prev]), out_channels= int(channels[idx]), - in_size=int(sizes[prev]), out_size=int(sizes[idx]), - in_sampling_rate=int(sampling_rates[prev]), out_sampling_rate=int(sampling_rates[idx]), - in_cutoff=cutoffs[prev], out_cutoff=cutoffs[idx], - in_half_width=half_widths[prev], out_half_width=half_widths[idx], - **layer_kwargs) - name = f'L{idx}_{layer.out_size[0]}_{layer.out_channels}' - setattr(self, name, layer) - self.layer_names.append(name) - - def forward(self, ws, **layer_kwargs): - misc.assert_shape(ws, [None, self.num_ws, self.w_dim]) - ws = ws.to(torch.float32).unbind(dim=1) - - # Execute layers. - x = self.input(ws[0]) - for name, w in zip(self.layer_names, ws[1:]): - x = getattr(self, name)(x, w, **layer_kwargs) - if self.output_scale != 1: - x = x * self.output_scale - - # Ensure correct shape and dtype. - misc.assert_shape(x, [None, self.img_channels, self.img_resolution, self.img_resolution]) - x = x.to(torch.float32) - return x - - def extra_repr(self): - return '\n'.join([ - f'w_dim={self.w_dim:d}, num_ws={self.num_ws:d},', - f'img_resolution={self.img_resolution:d}, img_channels={self.img_channels:d},', - f'num_layers={self.num_layers:d}, num_critical={self.num_critical:d},', - f'margin_size={self.margin_size:d}, num_fp16_res={self.num_fp16_res:d}']) - -#---------------------------------------------------------------------------- - -@persistence.persistent_class -class Generator(torch.nn.Module): - def __init__(self, - z_dim, # Input latent (Z) dimensionality. - c_dim, # Conditioning label (C) dimensionality. - w_dim, # Intermediate latent (W) dimensionality. - img_resolution, # Output resolution. - img_channels, # Number of output color channels. - mapping_kwargs = {}, # Arguments for MappingNetwork. - **synthesis_kwargs, # Arguments for SynthesisNetwork. - ): - super().__init__() - self.z_dim = z_dim - self.c_dim = c_dim - self.w_dim = w_dim - self.img_resolution = img_resolution - self.img_channels = img_channels - self.synthesis = SynthesisNetwork(w_dim=w_dim, img_resolution=img_resolution, img_channels=img_channels, **synthesis_kwargs) - self.num_ws = self.synthesis.num_ws - self.mapping = MappingNetwork(z_dim=z_dim, c_dim=c_dim, w_dim=w_dim, num_ws=self.num_ws, **mapping_kwargs) - - def forward(self, z, c, truncation_psi=1, truncation_cutoff=None, update_emas=False, **synthesis_kwargs): - ws = self.mapping(z, c, truncation_psi=truncation_psi, truncation_cutoff=truncation_cutoff, update_emas=update_emas) - img = self.synthesis(ws, update_emas=update_emas, **synthesis_kwargs) - return img - -#---------------------------------------------------------------------------- diff --git a/spaces/alecmueller/01-Speech2Text2Speech-GR/README.md b/spaces/alecmueller/01-Speech2Text2Speech-GR/README.md deleted file mode 100644 index e48fe05a4f92b8541ac93e5154f02ce8139e503a..0000000000000000000000000000000000000000 --- a/spaces/alecmueller/01-Speech2Text2Speech-GR/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: 🗣️01 Speech2Text2Speech GR🙉 -emoji: 🗣️🎤🙉 -colorFrom: red -colorTo: purple -sdk: gradio -sdk_version: 3.0.11 -app_file: app.py -pinned: false -license: mit ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference diff --git a/spaces/alexray/btc_predictor/venv/lib/python3.10/site-packages/pip/_vendor/pygments/formatters/irc.py b/spaces/alexray/btc_predictor/venv/lib/python3.10/site-packages/pip/_vendor/pygments/formatters/irc.py deleted file mode 100644 index ad986e04077804e9282f69503262b54c928242f2..0000000000000000000000000000000000000000 --- a/spaces/alexray/btc_predictor/venv/lib/python3.10/site-packages/pip/_vendor/pygments/formatters/irc.py +++ /dev/null @@ -1,179 +0,0 @@ -""" - pygments.formatters.irc - ~~~~~~~~~~~~~~~~~~~~~~~ - - Formatter for IRC output - - :copyright: Copyright 2006-2021 by the Pygments team, see AUTHORS. - :license: BSD, see LICENSE for details. -""" - -from pip._vendor.pygments.formatter import Formatter -from pip._vendor.pygments.token import Keyword, Name, Comment, String, Error, \ - Number, Operator, Generic, Token, Whitespace -from pip._vendor.pygments.util import get_choice_opt - - -__all__ = ['IRCFormatter'] - - -#: Map token types to a tuple of color values for light and dark -#: backgrounds. -IRC_COLORS = { - Token: ('', ''), - - Whitespace: ('gray', 'brightblack'), - Comment: ('gray', 'brightblack'), - Comment.Preproc: ('cyan', 'brightcyan'), - Keyword: ('blue', 'brightblue'), - Keyword.Type: ('cyan', 'brightcyan'), - Operator.Word: ('magenta', 'brightcyan'), - Name.Builtin: ('cyan', 'brightcyan'), - Name.Function: ('green', 'brightgreen'), - Name.Namespace: ('_cyan_', '_brightcyan_'), - Name.Class: ('_green_', '_brightgreen_'), - Name.Exception: ('cyan', 'brightcyan'), - Name.Decorator: ('brightblack', 'gray'), - Name.Variable: ('red', 'brightred'), - Name.Constant: ('red', 'brightred'), - Name.Attribute: ('cyan', 'brightcyan'), - Name.Tag: ('brightblue', 'brightblue'), - String: ('yellow', 'yellow'), - Number: ('blue', 'brightblue'), - - Generic.Deleted: ('brightred', 'brightred'), - Generic.Inserted: ('green', 'brightgreen'), - Generic.Heading: ('**', '**'), - Generic.Subheading: ('*magenta*', '*brightmagenta*'), - Generic.Error: ('brightred', 'brightred'), - - Error: ('_brightred_', '_brightred_'), -} - - -IRC_COLOR_MAP = { - 'white': 0, - 'black': 1, - 'blue': 2, - 'brightgreen': 3, - 'brightred': 4, - 'yellow': 5, - 'magenta': 6, - 'orange': 7, - 'green': 7, #compat w/ ansi - 'brightyellow': 8, - 'lightgreen': 9, - 'brightcyan': 9, # compat w/ ansi - 'cyan': 10, - 'lightblue': 11, - 'red': 11, # compat w/ ansi - 'brightblue': 12, - 'brightmagenta': 13, - 'brightblack': 14, - 'gray': 15, -} - -def ircformat(color, text): - if len(color) < 1: - return text - add = sub = '' - if '_' in color: # italic - add += '\x1D' - sub = '\x1D' + sub - color = color.strip('_') - if '*' in color: # bold - add += '\x02' - sub = '\x02' + sub - color = color.strip('*') - # underline (\x1F) not supported - # backgrounds (\x03FF,BB) not supported - if len(color) > 0: # actual color - may have issues with ircformat("red", "blah")+"10" type stuff - add += '\x03' + str(IRC_COLOR_MAP[color]).zfill(2) - sub = '\x03' + sub - return add + text + sub - return '<'+add+'>'+text+'' - - -class IRCFormatter(Formatter): - r""" - Format tokens with IRC color sequences - - The `get_style_defs()` method doesn't do anything special since there is - no support for common styles. - - Options accepted: - - `bg` - Set to ``"light"`` or ``"dark"`` depending on the terminal's background - (default: ``"light"``). - - `colorscheme` - A dictionary mapping token types to (lightbg, darkbg) color names or - ``None`` (default: ``None`` = use builtin colorscheme). - - `linenos` - Set to ``True`` to have line numbers in the output as well - (default: ``False`` = no line numbers). - """ - name = 'IRC' - aliases = ['irc', 'IRC'] - filenames = [] - - def __init__(self, **options): - Formatter.__init__(self, **options) - self.darkbg = get_choice_opt(options, 'bg', - ['light', 'dark'], 'light') == 'dark' - self.colorscheme = options.get('colorscheme', None) or IRC_COLORS - self.linenos = options.get('linenos', False) - self._lineno = 0 - - def _write_lineno(self, outfile): - self._lineno += 1 - outfile.write("\n%04d: " % self._lineno) - - def _format_unencoded_with_lineno(self, tokensource, outfile): - self._write_lineno(outfile) - - for ttype, value in tokensource: - if value.endswith("\n"): - self._write_lineno(outfile) - value = value[:-1] - color = self.colorscheme.get(ttype) - while color is None: - ttype = ttype.parent - color = self.colorscheme.get(ttype) - if color: - color = color[self.darkbg] - spl = value.split('\n') - for line in spl[:-1]: - self._write_lineno(outfile) - if line: - outfile.write(ircformat(color, line[:-1])) - if spl[-1]: - outfile.write(ircformat(color, spl[-1])) - else: - outfile.write(value) - - outfile.write("\n") - - def format_unencoded(self, tokensource, outfile): - if self.linenos: - self._format_unencoded_with_lineno(tokensource, outfile) - return - - for ttype, value in tokensource: - color = self.colorscheme.get(ttype) - while color is None: - ttype = ttype[:-1] - color = self.colorscheme.get(ttype) - if color: - color = color[self.darkbg] - spl = value.split('\n') - for line in spl[:-1]: - if line: - outfile.write(ircformat(color, line)) - outfile.write('\n') - if spl[-1]: - outfile.write(ircformat(color, spl[-1])) - else: - outfile.write(value) diff --git a/spaces/ali-ghamdan/image-colors-corrector/evaluation/evaluate_cc.py b/spaces/ali-ghamdan/image-colors-corrector/evaluation/evaluate_cc.py deleted file mode 100644 index fe4b2a7c1984d0d2b554bf8fe718f361b1e907d2..0000000000000000000000000000000000000000 --- a/spaces/ali-ghamdan/image-colors-corrector/evaluation/evaluate_cc.py +++ /dev/null @@ -1,58 +0,0 @@ -## Calculate errors between the corrected image and the ground truth image. -# -# Copyright (c) 2018-present, Mahmoud Afifi -# York University, Canada -# mafifi@eecs.yorku.ca | m.3afifi@gmail.com -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. -# All rights reserved. -# -# Please cite the following work if this program is used: -# Mahmoud Afifi, Brian Price, Scott Cohen, and Michael S. Brown, -# "When color constancy goes wrong: Correcting improperly white-balanced -# images", CVPR 2019. -# -########################################################################## - - -from evaluation.calc_deltaE import calc_deltaE -from evaluation.calc_deltaE2000 import calc_deltaE2000 -from evaluation.calc_mse import calc_mse -from evaluation.calc_mae import calc_mae - - -def evaluate_cc(corrected, gt, color_chart_area, opt=1): - """ - Color constancy (white-balance correction) evaluation of a given corrected - image. - :param corrected: corrected image - :param gt: ground-truth image - :param color_chart_area: If there is a color chart in the image, that is - masked out from both images, this variable represents the number of pixels - of the color chart. - :param opt: determines the required error metric(s) to be reported. - Options: - opt = 1 delta E 2000 (default). - opt = 2 delta E 2000 and mean squared error (MSE) - opt = 3 delta E 2000, MSE, and mean angular eror (MAE) - opt = 4 delta E 2000, MSE, MAE, and delta E 76 - :return: error(s) between corrected and gt images - """ - - if opt == 1: - return calc_deltaE2000(corrected, gt, color_chart_area) - elif opt == 2: - return calc_deltaE2000(corrected, gt, color_chart_area), calc_mse( - corrected, gt, color_chart_area) - elif opt == 3: - return calc_deltaE2000(corrected, gt, color_chart_area), calc_mse( - corrected, gt, color_chart_area), calc_mae(corrected, gt, - color_chart_area) - elif opt == 4: - return calc_deltaE2000(corrected, gt, color_chart_area), calc_mse( - corrected, gt, color_chart_area), calc_mae( - corrected, gt, color_chart_area), calc_deltaE(corrected, gt, - color_chart_area) - else: - raise Exception('Error in evaluate_cc function') diff --git a/spaces/allknowingroger/text-generation-webui-space-1/extensions/llama_prompts/script.py b/spaces/allknowingroger/text-generation-webui-space-1/extensions/llama_prompts/script.py deleted file mode 100644 index 22c96f7c2d6763213a728d77ee6666496d9c4aa3..0000000000000000000000000000000000000000 --- a/spaces/allknowingroger/text-generation-webui-space-1/extensions/llama_prompts/script.py +++ /dev/null @@ -1,18 +0,0 @@ -import gradio as gr -import modules.shared as shared -import pandas as pd - -df = pd.read_csv("https://raw.githubusercontent.com/devbrones/llama-prompts/main/prompts/prompts.csv") - -def get_prompt_by_name(name): - if name == 'None': - return '' - else: - return df[df['Prompt name'] == name].iloc[0]['Prompt'].replace('\\n', '\n') - -def ui(): - if not shared.args.chat or shared.args.cai_chat: - choices = ['None'] + list(df['Prompt name']) - - prompts_menu = gr.Dropdown(value=choices[0], choices=choices, label='Prompt') - prompts_menu.change(get_prompt_by_name, prompts_menu, shared.gradio['textbox']) diff --git a/spaces/almn-uhc/Examples-of-AI/app.py b/spaces/almn-uhc/Examples-of-AI/app.py deleted file mode 100644 index 1d37e1ba5cdbf6b844bbc2fd0e3b209c2a66fc63..0000000000000000000000000000000000000000 --- a/spaces/almn-uhc/Examples-of-AI/app.py +++ /dev/null @@ -1,856 +0,0 @@ -import streamlit as st -from graphviz import Digraph - - -st.markdown(""" -# 👋 Two easy ways to turbo boost your AI learning journey! 💻 -# 🌐 AI Pair Programming -## Open 2 Browsers to: -1. __🌐 ChatGPT__ [URL](https://chat.openai.com/chat) or [URL2](https://platform.openai.com/playground) and -2. __🌐 Huggingface__ [URL](https://huggingface.co/awacke1) in separate browser windows. -1. 🤖 Use prompts to generate a streamlit program on Huggingface or locally to test it. -2. 🔧 For advanced work, add Python 3.10 and VSCode locally, and debug as gradio or streamlit apps. -3. 🚀 Use these two superpower processes to reduce the time it takes you to make a new AI program! ⏱️ -# 🎥 YouTube University Method: -1. 🏋️‍♀️ Plan two hours each weekday to exercise your body and brain. -2. 🎬 Make a playlist of videos you want to learn from on YouTube. Save the links to edit later. -3. 🚀 Try watching the videos at a faster speed while exercising, and sample the first five minutes of each video. -4. 📜 Reorder the playlist so the most useful videos are at the front, and take breaks to exercise. -5. 📝 Practice note-taking in markdown to instantly save what you want to remember. Share your notes with others! -6. 👥 AI Pair Programming Using Long Answer Language Models with Human Feedback: -## 🎥 2023 AI/ML Advanced Learning Playlists: -1. [2023 QA Models and Long Form Question Answering NLP](https://www.youtube.com/playlist?list=PLHgX2IExbFovrkkx8HMTLNgYdjCMNYmX_) -2. [FHIR Bioinformatics Development Using AI/ML and Python, Streamlit, and Gradio - 2022](https://www.youtube.com/playlist?list=PLHgX2IExbFovoMUC3hYXeFegpk_Y0Lz0Q) -3. [2023 ChatGPT for Coding Assistant Streamlit, Gradio and Python Apps](https://www.youtube.com/playlist?list=PLHgX2IExbFouOEnppexiKZVdz_k5b0pvI) -4. [2023 BigScience Bloom - Large Language Model for AI Systems and NLP](https://www.youtube.com/playlist?list=PLHgX2IExbFouqnsIqziThlPCX_miiDq14) -5. [2023 Streamlit Pro Tips for AI UI UX for Data Science, Engineering, and Mathematics](https://www.youtube.com/playlist?list=PLHgX2IExbFou3cP19hHO9Xb-cN8uwr5RM) -6. [2023 Fun, New and Interesting AI, Videos, and AI/ML Techniques](https://www.youtube.com/playlist?list=PLHgX2IExbFotoMt32SrT3Xynt5BXTGnEP) -7. [2023 Best Minds in AGI AI Gamification and Large Language Models](https://www.youtube.com/playlist?list=PLHgX2IExbFotmFeBTpyje1uI22n0GAkXT) -8. [2023 State of the Art for Vision Image Classification, Text Classification and Regression, Extractive Question Answering and Tabular Classification](https://www.youtube.com/playlist?list=PLHgX2IExbFotPcPu6pauNHOoZTTbnAQ2F) -9. [2023 AutoML DataRobot and AI Platforms for Building Models, Features, Test, and Transparency](https://www.youtube.com/playlist?list=PLHgX2IExbFovsY2oGbDwdEhPrakkC8i3g) -""") - - -st.markdown(""" -# Cognitive AI with Human Feedback (CAHF) [Example 🩺⚕️](https://huggingface.co/spaces/awacke1/Cognitive-AI-Episodic-Semantic-Memory-Demo): -1. Create and use Models to predict __outcomes__ -2. Use AI to predict **conditions, disease, and opportunities** using AI with **explainability**. -3. **Cognitive AI** - Mimic how humans reason through decision making processes. -4. **Reasoning cycles** - "Recommended for You" reasoners - consider type of personalized needs and classification for users, to recommend products -5. **High Acuity Reasoners** - Make decisions on rules of **what it can and cannot do within human feedback** guidelines. - -Emphasizes **explainability, transparency, and removing administrative burden** to **protocolize** and improve what staff is doing. - -Vetted by SME's, adding value of **judgement and training** and pick up intelligence and **skills from human feedback**. - -**Alert, Recommended Action, and Clinical Terms** per entity with vocabularies from LOINC, SNOMED, OMS, ICD10, RXNORM, SMILES, HCPCS, CPT, CQM, HL7, SDC and FHIR. -6. Non static multi agent cognitive approach using real time series to identify factors predictive of outcome. -7. Cognitive models form of Ontology - to create a type of computable sets and relationships stored in Ontology then ingested by reasoner - -Use models of world to build predictions and recommendations with answers cumulative with information we know -8. Reasoners standardize making it easy as possible to do right thing using transfer learning and recommendation tools with questions and actions. -""") - - -st.markdown(""" -# 📚 Clinical Terminology and Ontologies [Example 🩺⚕️NLP Clinical Ontology Biomedical NER](https://huggingface.co/spaces/awacke1/Biomed-NLP-AI-Clinical-Terminology) -## Health Vocabularies, Systems of Coding, and Databases with Bibliographies -##__Keywords__: -1. __Clinical Terminology__: 💬 Words that doctors use to talk to each other about patients. -2. __Ontologies for Medications and Conditions__: 📚 A fancy way of organizing knowledge about medicine and health problems. -3. __Health Vocabularies__: 📝 A special list of words used in healthcare to talk about health issues. -4. __Systems of Coding__: 💻 A way of giving things like sicknesses and treatments special codes, so that doctors can remember them easily. -5. __Databases__: 🗄️ A computer system that stores information about patients, health research, and other healthcare things. -6. __Bibliographies__: 📖 A list of books or articles that doctors use to learn about new health information. -1. ## 1️⃣ National Library of Medicine's **RxNorm**: - - Standardized nomenclature for clinical drugs developed by NLM - - Provides links between drug names and related information such as ingredients, strengths, and dosages - - **Data type: controlled vocabulary** - - Access through **NLM's RxNorm website**: https://www.nlm.nih.gov/research/umls/rxnorm/index.html -2. ## 2️⃣ Centers for Medicare and Medicaid Services' Healthcare Common Procedure Coding System (HCPCS): - - Coding system used to identify healthcare **services, procedures, and supplies** - - Includes **codes for drugs, biologicals, and other items** used in medical care - - **Data type: coding system** - - Access through **CMS website**: https://www.cms.gov/Medicare/Coding/MedHCPCSGenInfo -3. ## 3️⃣ Unified Medical Language System (UMLS): - - Set of files and software tools developed by NLM for integrating and mapping biomedical vocabularies - - Includes RxNorm and other drug vocabularies, as well as other terminologies used in medicine - - **Data type: controlled vocabulary** - - Access through UMLS Metathesaurus: https://www.nlm.nih.gov/research/umls/index.html -4. ## 4️⃣ PubMed: - - Database of **biomedical literature** maintained by the National Center for Biotechnology Information (NCBI) - - Includes information about **drugs, including drug names, chemical structures, and pharmacological actions** - - **Data type: bibliographic database** - - Access through **PubMed website**: https://pubmed.ncbi.nlm.nih.gov/ -5. ## 5️⃣ PubChem: - - Database of chemical substances maintained by NCBI - - Includes information about drugs, including **chemical structures, properties, and activities** - - **Data type: chemical database** - - Access through **PubChem website**: https://pubchem.ncbi.nlm.nih.gov/ -6. ## 6️⃣ Behavioral Health Code Terminology Sets: - - Code terminology sets specific to behavioral health - - Includes **DSM** published by American Psychiatric Association, **ICD** published by World Health Organization, and **CPT** published by American Medical Association - - **Data type: coding system** - - Access through respective **organizations' websites**: - 1. [DSM](https://www.psychiatry.org/psychiatrists/practice/dsm) - 2. [ICD](https://www.who.int/standards/classifications/classification-of-diseases) - 3. [CPT](https://www.ama-assn.org/practice-management/cpt/current-procedural-terminology-cpt) -""") - -st.markdown(""" -1. # 📚Natural Language Processing🔤 - 🗣️🤖💭💬🌍🔍 - 1. 🤔 **🩺⚕️ Sentiment analysis** - Determine underlying sentiment of text. [Example](https://huggingface.co/spaces/awacke1/Sentiment-analysis-streamlit) - 2. 📝 **Named Entity Recognition (NER)** - Identify and classify named entities in text. [Example](https://huggingface.co/spaces/awacke1/Named-entity-resolution) - 3. 🔊 **🩺⚕️Automatic Speech Recognition (ASR)** - Transcribe spoken language into text. - # Advanced NLP ASR Examples: - 1. 🩺⚕️ https://huggingface.co/spaces/awacke1/ASR-High-Accuracy-Test - 2. https://huggingface.co/spaces/awacke1/ASRGenerateStory - 3. 🩺⚕️ https://huggingface.co/spaces/awacke1/TTS-STT-Blocks - 4. 🩺⚕️ https://huggingface.co/spaces/awacke1/CloneAnyVoice - 5. https://huggingface.co/spaces/awacke1/ASR-SOTA-NvidiaSTTMozilla - 4. 🌐 **Machine translation** - Translate text between languages automatically. [Example](https://huggingface.co/spaces/awacke1/Machine-translation) - 5. 📄 **Text summarization** - Automatically summarize large volumes of text. [Example](https://huggingface.co/spaces/awacke1/Text-summarization) - 6. ❓ **🩺⚕️ Question answering** - Answer questions posed in natural language. [Example](https://huggingface.co/spaces/awacke1/Question-answering) - 7. 🤖 **Sentiment-aware chatbots** - Use sentiment analysis to detect user emotions and respond appropriately. - 8. 📊 **🩺⚕️ Text classification** - Classify text into different categories. [Example](https://huggingface.co/spaces/awacke1/sileod-deberta-v3-base-tasksource-nli) - 9. 💬 **🩺⚕️ Text generation** - Generate natural language text. [Example](https://huggingface.co/spaces/awacke1/Sentence2Paragraph) - 10. 🔎 **Topic modeling** - Automatically identify topics in a large corpus of text. [Example](https://huggingface.co/spaces/awacke1/Topic-modeling) - - Examples - 1. [NLP Video Summary](https://huggingface.co/spaces/awacke1/Video-Summary) - 2. [TTS-STT ASR with Multiple Voices](https://huggingface.co/spaces/awacke1/TTS-STT-Blocks) - 3. [NLP Transcript with Video Player](https://huggingface.co/spaces/awacke1/Streamlit-ASR-Video) - 4. [NLP Clinical Ontology Biomedical NER](https://huggingface.co/spaces/awacke1/Biomed-NLP-AI-Clinical-Terminology) - 5. [Document Understanding and NLP](https://huggingface.co/spaces/awacke1/AIDocumentUnderstandingOCR) - 6. [NLP ASR Wav2Vec2 Multilingual](https://huggingface.co/spaces/awacke1/ASR-High-Accuracy-Test) - 7. [Live ASR](https://huggingface.co/spaces/awacke1/ASR-SOTA-NvidiaSTTMozilla) - 8. [NLP and Visualization](https://huggingface.co/spaces/awacke1/Visualization-Plotly-Sunbursts-Treemaps-and-WebGL) -""") - -st.markdown(""" -2. # 🔮Generative AI💭 (🎨Images and 📝Text) - 🎵🧩🔄📊🌌 - 1. 🆕 **🩺⚕️ Generation of new data**: Create new data that resembles existing data. [Example](https://huggingface.co/spaces/awacke1/GenAI-Generate-New-Data-Resembling-Example) - 2. 🎨 **Creative potential**: Generate music, art, or literature. [Example](https://huggingface.co/spaces/awacke1/Creative-Potential-Music-Art-Lit) - 3. 📊 **Data synthesis**: Synthesize data from multiple sources to create new datasets. [Example](https://huggingface.co/spaces/awacke1/Data-Synthesizer-Synthesize-From-Multiple-Sources) - 4. 📈 **🩺⚕️ Data augmentation**: Augment existing datasets to make them larger and more diverse. [Example](https://huggingface.co/spaces/awacke1/Data-Augmentation) - 5. 🔀 **Domain transfer**: Transfer knowledge learned from one domain to another. - 6. 🔍 **Unsupervised learning**: Learn patterns without labeled training data. - 7. 🔄 **Adaptive learning**: Adapt to changes in data over time. - 8. 🔊 **Noise injection**: Introduce noise to explore a wider range of possibilities. - 9. 🕶️ **Latent space manipulation**: Control output by manipulating a model's latent space. - 10. 🖼️ **Realistic output**: Produce output that is difficult to distinguish from human-created data. - - Examples - 1. Quantum AI Circuits: https://huggingface.co/spaces/awacke1/AI-Quantum?option=Circuit - 2. Generate Story and Video: https://huggingface.co/spaces/awacke1/ASRGenerateStoryandVideo - 3. ASR Generate Story: https://huggingface.co/spaces/awacke1/ASRGenerateStory - 4. Music Generation: https://huggingface.co/spaces/awacke1/MusicMaker -""") - -st.markdown(""" -3. # 📷Image Recognition🏞️ - 1. 📷 **Object detection**: Detect and identify multiple objects in an image for detailed analysis and classification. - 2. 🏞️ **Scene recognition**: Recognize and classify entire scenes based on objects, colors, and shapes. - 3. 😃 **Facial recognition**: Analyze facial features for accurate identification. - 4. 😊 **Emotion recognition**: Identify emotions on a subject's face, including happiness, sadness, and anger. - 5. 🔤 **Text recognition**: Identify and translate text in images for analysis. - 6. 🎨 **Color recognition**: Detect colors and provide information on hue, saturation, and brightness. - 7. 🔍 **Image segmentation**: Divide an image into multiple regions for individual analysis and classification. - 8. 🌅 **Image restoration**: Remove noise and blur, restoring images to original clarity and quality. - 9. 🔖 **Image classification**: Classify images into categories like animals, buildings, or landscapes. - 10. 🎨 **Style transfer**: Apply the style of one image to another for unique and innovative results. - - Examples - 1. 🩺⚕️ Text-to-Image : [Image Classification](https://huggingface.co/spaces/awacke1/Prompt-Refinery-Text-to-Image-Generation) - 2. Image Captions from 5 SOTA Generators: [URL](https://huggingface.co/spaces/awacke1/ImageCaptionPromptGenerator) - 3. 🩺⚕️ Image to Multilingual OCR: [URL](https://huggingface.co/spaces/awacke1/Image-to-Multilingual-OCR) - 4. WRN - Wide Residual Networks: [URL](https://huggingface.co/spaces/awacke1/ResnetPytorchImageRecognition) - 5. AI Document Understanding: [URL](https://huggingface.co/spaces/awacke1/AIDocumentUnderstandingOCR) - 6. Elixir Docker Bumblebee: [URL](https://huggingface.co/spaces/awacke1/DockerImageRecognitionToText) - 7. Speech to Text to Story to Images to Video: [URL](https://huggingface.co/spaces/awacke1/Speeech2Text2Story2Images2Video) - 8. Image to Line Drawings: [URL](https://huggingface.co/spaces/awacke1/Image-to-Line-Drawings) - 9. Semantic Image Search: [URL](https://huggingface.co/spaces/awacke1/Image-Semantic-Search) - 10. Zoom Clip Toon: [URL](https://huggingface.co/spaces/awacke1/Zoom-Clip-Toon-Image-to-Image) - 11. Image to Reading Labels: [URL](https://huggingface.co/spaces/awacke1/ImageOCRMultilingual) - 12. A Game For That - Gamification Using Snapshot Images: [URL](https://huggingface.co/spaces/awacke1/AGameForThat) - 13. AI Visually Plays QBert, Pong, Seaquest and more: [URL](https://huggingface.co/spaces/awacke1/AI-Atari-Live-Streamlit) - 14. AI Creates Generator Style Mix Art from Encyclopedia: [URL](https://huggingface.co/spaces/awacke1/Art-Generator-and-Style-Mixer) - 15. BigGAN Image Gen and Search: [URL](https://huggingface.co/spaces/awacke1/AI-BigGAN-Image-Gen) - 16. Art Style Line Drawings: [URL](https://huggingface.co/spaces/awacke1/ArtStyleFoodsandNutrition) - 17. 🩺⚕️ Yolo Real Time Image Recognition from Webcam: https://huggingface.co/spaces/awacke1/Webcam-Object-Recognition-Yolo-n-Coco -""") - -st.markdown(""" -4. # 🗣️Speech Recognition💬 - 1. 🔊 **Continuous Speech Recognition**: Transcribe spoken words in real-time without pausing. - 2. 🗣️ **Speaker Identification**: Identify individual speakers through unique features in their speech. - 3. 🧠 **Contextual Awareness**: Understand conversation context and interpret word meaning. - 4. 🌎 **Multilingual Support**: Recognize and transcribe multiple languages for translation. - 5. 🔇 **Noise Reduction**: Filter out background noise to improve transcription quality. - 6. 🔒 **Voice Biometrics**: Verify speaker identity and provide secure access to personal data. - 7. 🎛️ **Command and Control**: Interpret voice commands to automate tasks and interact with software. - 8. 💬 **Natural Language Processing**: Understand complex human speech patterns. - 9. 🧠 **Adaptive Learning**: Learn and adapt to improve accuracy over time. - 10. ☁️ **Cloud-Based Deployment**: Real-time processing of large amounts of data, even on mobile devices. -""") - -st.markdown(""" -5. # Reinforcement Learning - 1. 🏆 **Reward-driven**: RL uses rewards or punishments to drive its learning process. - 2. 🧪 **Trial-and-error learning**: RL is a trial-and-error learning method, where an agent tries different actions to find the best action that will maximize the cumulative reward. - 3. 🤔 **Exploration-exploitation trade-off**: RL agents need to balance exploration and exploitation to find new possibilities while also exploiting successful actions. - 4. 📈 **Markov Decision Processes**: RL uses MDPs to model decision-making processes. - 5. 📊 **Policy optimization**: RL uses policy optimization techniques to find the best policy for a given task or learn the optimal policy from scratch. - 6. 💰 **Value-based methods**: RL uses value-based methods to estimate the value of each state or action. - 7. 🧠 **Model-based methods**: RL can use model-based methods to predict the outcomes of different actions. - 8. 🤖 **Deep Reinforcement Learning**: DRL combines RL with deep learning techniques to learn complex decision-making tasks. - 9. 🔄 **Transfer learning**: RL can use transfer learning techniques to transfer knowledge learned in one task to another task. - 10. 🤝 **Multi-agent RL**: RL can handle multiple agents that interact with each other. -""") - -st.markdown(""" -6. 🎲Game Theory🎲 – Traditional AI processes - 1. 🤝 **Interdependence**: Game Theory considers decision-making among multiple agents, unlike traditional AI processes which focus on a single agent. - 2. 🎯 **Strategic Behavior**: Game Theory assumes that agents aim to maximize their payoffs based on the actions of other agents. Traditional AI may not consider this strategic element. - 3. 💰 **Payoffs**: Game Theory calculates payoffs for each agent based on their actions and the actions of other agents, unlike traditional AI which may focus on a single objective. - 4. ⚖️ **Equilibrium**: Game Theory seeks to identify stable states in the game where no agent has an incentive to deviate from their current strategy. Traditional AI may not seek to find an equilibrium. - 5. 🎲 **Game Formulation**: Game Theory formulates a game, including rules, players, and possible actions, unlike traditional AI which may not require such formulation. - 6. 💡 **Solution Concepts**: Game Theory has various solution concepts, such as Nash Equilibrium and Pareto Efficiency, to identify the most desirable outcomes. Traditional AI may not have such concepts. - 7. 📊 **Information**: Game Theory considers the information available to each agent in the game. Traditional AI may not consider information explicitly. - 8. ⚔️ **Adversarial**: Game Theory models adversarial scenarios where agents have conflicting goals. Traditional AI may assume cooperation among agents. - 9. ❓ **Uncertainty**: Game Theory deals with uncertainty and incomplete information in the game. Traditional AI may not consider uncertainty. - 10. 🌐 **Complexity**: Game Theory deals with complex multi-agent interactions. Traditional AI may focus on single-agent optimization. - - Examples - 1. 🩺⚕️ Health Care Game: https://huggingface.co/spaces/awacke1/AI-RPG-Self-Play-RLML-Health-Battler-Game - 2. 🩺⚕️ Sankey Snacks Math Chart Animator: https://huggingface.co/spaces/awacke1/Sankey-Snacks - 3. Blackjack 21 : https://huggingface.co/spaces/awacke1/BlackjackSimulatorCardGameAI - 4. Player Card Monster Battler: https://huggingface.co/spaces/awacke1/Player-Card-Monster-Battler-For-Math-and-AI - 5. Emojitrition: https://huggingface.co/spaces/awacke1/Emojitrition-Fun-and-Easy-Nutrition -""") - -st.markdown(""" -7. # 🃏Card Game🃏 Activity - 1. 🃏 **Card crafting**: Combine existing cards or materials to craft custom cards. [Example](https://huggingface.co/spaces/awacke1/CardCrafter-CraftCustomCards) - 2. 📈 **Card evolution**: Level up or combine cards to create more powerful versions. - 3. 🔨 **Deck building**: Build custom decks that match your play style. - 4. ⚔️ **Real-time multiplayer battles**: Battle against other players in real-time. - 5. 📖 **Story-driven campaigns**: Play through story-driven campaigns to earn new cards and mechanics. - 6. 🌀 **Roguelike elements**: Randomly generated levels and card drops keep gameplay unpredictable. - 7. 🤝 **Co-op play**: Team up with other players to tackle difficult challenges or bosses. - 8. 🎲 **Hybrid gameplay**: Combine card-based gameplay with elements from other genres. - 9. 💥 **Multi-card play**: Use multiple cards at once to create powerful combos or synergies. - 10. 🗺️ **Tactical positioning**: Strategically place your cards on a game board or battlefield to gain an advantage. - - Examples - 1. 🩺⚕️ Game Activity Graph: https://huggingface.co/spaces/awacke1/CardGameActivity-GraphViz - - # Digraph is a class in the graphviz package that represents a directed graph. - 1. It is used to create graphs with nodes and edges. - 2. It can be customized with various styles and formatting options. - 3. This is an example of defining a Digraph with emojis for the node labels: - 2. 🩺⚕️ SVG Card Generation: https://huggingface.co/spaces/awacke1/VizLib-SVGWrite-Streamlit - - # Scalable Vector Graphics (SVG) is an important language used in UI and graphic design. - 3. Game Mechanics Top 20: https://huggingface.co/spaces/awacke1/CardGameMechanics - 4. Game Mechanics Deep Dive: https://huggingface.co/spaces/awacke1/CardGameActivity - 5. Hexagon Dice: https://huggingface.co/spaces/awacke1/Hexagon-Dice-Fractal-Math-Game - 6. Dice Roll Game: https://huggingface.co/spaces/awacke1/Dice-Roll-Fractals-STEM-Math - 7. Pyplot Dice Game: https://huggingface.co/spaces/awacke1/Streamlit-Pyplot-Math-Dice-Game -""") - - -st.markdown(""" -## AI For Long Question Answering and Fact Checking [Example](🩺⚕️ https://huggingface.co/spaces/awacke1/StreamlitWikipediaChat) -1. 🖥️ First, we'll teach a smart computer to browse the internet and find information. - - 🧠 It will be like having a super-smart search engine! -2. 🤖 Then, we'll train the computer to answer questions by having it learn from how humans answer questions. - - 🤝 We'll teach it to imitate how people find and use information on the internet. -3. 📚 To make sure the computer's answers are correct, we'll teach it to collect references from the internet to support its answers. - - 🔍 This way, it will only give answers that are true and based on facts. -4. 👨‍👩‍👧‍👦 We'll test our invention on a special set of questions that real people have asked. - - 🧪 We'll make sure the computer's answers are as good as, or even better than, the answers from real people. -5. 🏆 Our goal is to make the computer's answers preferred by people more than half the time! - - 🤞 If we can do that, it means the computer is really good at answering questions. -""") - - - -st.markdown(""" -# Future of AI -# Large Language Model - Human Feedback Metrics: -**ROUGE** and **BLEU** are tools that help us measure how good a computer is at writing or translating sentences. -## 🩺⚕️ [ROUGE](https://huggingface.co/spaces/evaluate-metric/rouge) -## 🩺⚕️ [BLEU](https://huggingface.co/spaces/evaluate-metric/bleu) -1. ROUGE looks at a sentence made by a computer and checks how similar it is to sentences made by humans. - 1. It tries to see if the important information is the same. -2. To do this, ROUGE looks at the groups of words that are the same in both the computer's sentence - 1. and the human's sentence. - 2. The more groups of words that are the same, the higher the score. -3. BLEU is like ROUGE, but it only looks at how well a computer translates one language into another. - 1. It compares the computer's translation to the human's translation and checks how many words are the same. -# If the scores for ROUGE or BLEU are high, it means that the computer is doing a good job. -1. But it's also important to remember that these tools have their limits, -2. and we need to use other ways to check if the computer is doing a good job. -1. **ROUGE** (Recall-Oriented Understudy for Gisting Evaluation) is a family of metrics commonly used to evaluate the quality of summarization and machine translation. ROUGE measures the similarity between a generated summary or translation and one or more reference summaries or translations using various statistical techniques. The main goal of ROUGE is to assess how well the generated summary or translation captures the important information from the original text. -2. **ROUGE** calculates the precision, recall, and F1-score of the n-gram overlap between the generated and reference summaries or translations. Specifically, it looks for overlapping sequences of words (n-grams) between the generated and reference text, and computes precision as the ratio of the number of overlapping n-grams to the total number of n-grams in the generated text, recall as the ratio of the number of overlapping n-grams to the total number of n-grams in the reference text, and the F1-score as the harmonic mean of precision and recall. ROUGE can be computed at different n-gram levels, including unigrams, bigrams, trigrams, etc., as well as at the sentence or document level. -3. **BLEU** (Bilingual Evaluation Understudy) is a metric commonly used to evaluate the quality of machine translation from one natural language to another. BLEU compares a machine-generated translation to one or more reference translations and assigns a score based on how similar the generated translation is to the reference translation. BLEU uses a modified form of precision to calculate the score. -4. **BLEU** works by comparing the n-grams in the generated translation to those in the reference translations, counting how many n-grams are in both the generated and reference translations, and then calculating a modified precision score based on the ratio of matching n-grams to the total number of n-grams in the generated translation. BLEU can be computed at different n-gram levels, including unigrams, bigrams, trigrams, etc. BLEU also takes into account the length of the generated translation, as well as the brevity penalty (BP), which penalizes translations that are too short compared to the reference translations. -5. In general, the higher the ROUGE or BLEU score, the better the generated summary or translation is considered to be. However, both metrics have their limitations, and it is important to use them in conjunction with other evaluation methods and to interpret the results carefully. -""") - - -st.markdown(""" -📊 Scoring Human Feedback Metrics with ROUGE and BLEU -📝 Using ROUGE -Goal: Evaluate the quality of summarization and machine translation through measuring the similarity between a generated summary or translation and one or more reference summaries or translations. -Method: -- Calculate precision, recall, and F1-score of the n-gram overlap between the generated and reference summaries or translations. -- Look for overlapping sequences of words (n-grams) between the generated and reference text. -- Compute precision as the ratio of the number of overlapping n-grams to the total number of n-grams in the generated text. -- Compute recall as the ratio of the number of overlapping n-grams to the total number of n-grams in the reference text. -- Compute the F1-score as the harmonic mean of precision and recall. -- ROUGE can be computed at different n-gram levels, including unigrams, bigrams, trigrams, etc., as well as at the sentence or document level. -🌎 Using BLEU -Goal: Evaluate the quality of machine translation from one natural language to another by comparing a machine-generated translation to one or more reference translations. -Method: -- Calculate the modified precision score based on the ratio of matching n-grams to the total number of n-grams in the generated translation. -- Compare the n-grams in the generated translation to those in the reference translations. -- Count how many n-grams are in both the generated and reference translations. -- BLEU can be computed at different n-gram levels, including unigrams, bigrams, trigrams, etc. -- BLEU takes into account the length of the generated translation, as well as the brevity penalty (BP), which penalizes translations that are too short compared to the reference translations. -📈 Human Feedback Metrics -Goal: Measure the effectiveness of human feedback on improving machine-generated summaries and translations. -Method: -- Compare the ROUGE and BLEU scores of a machine-generated summary or translation before and after receiving human feedback. -Example: -1. Generate a summary or translation using a machine translation system. -2. Calculate the ROUGE and BLEU scores for the machine-generated output. -3. Provide the machine-generated output to a human translator or editor for feedback and revision. -4. Re-calculate the ROUGE and BLEU scores for the revised output. -5. Compare the scores to measure the effectiveness of the human feedback. -""") - - - -st.markdown(""" -# 🩺⚕️ Reinforcement Learning from Human Feedback (RLHF) -## 🤖 RLHF is a way for computers to learn how to do things better by getting help and feedback from people, - - just like how you learn new things from your parents or teachers. -🎮 Let's say the computer wants to learn how to play a video game. - - It might start by trying different things and seeing what happens. -👍 If it does something good, like getting a high score, it gets a reward. -👎 If it does something bad, like losing a life, it gets a punishment. -👩‍💻 Now, imagine that a person is watching the computer play the game and giving it feedback. - -The person might say things like "Good job!" when the computer gets a high score - - or "Oops, try again!" when it loses a life. -💡 This feedback helps the computer figure out which actions are good and which ones are bad. - -The computer then uses this feedback to adjust its actions and get better at playing the game. -🤔 It might try different strategies and see which ones get the best feedback from the person. - -Over time, the computer gets better and better at playing the game, just like how you get better at things by practicing and getting help from others. -🚀 RLHF is a cool way for computers to learn and improve with the help of people. - -Who knows, maybe one day you can teach a computer to do something amazing! -# Examples -## 🩺⚕️ Hospital Visualizations -🩺⚕️ https://huggingface.co/spaces/awacke1/VizLib-TopLargeHospitalsMinnesota -🩺⚕️ https://huggingface.co/spaces/awacke1/VizLib-TopLargeHospitalsNewJersey -🩺⚕️ https://huggingface.co/spaces/awacke1/VizLib-TopLargeHospitalsMentalHealth -🩺⚕️ https://huggingface.co/spaces/awacke1/VizLib-GraphViz-Folium-MapTopLargeHospitalsinWI -# Card Game Activity -https://huggingface.co/spaces/awacke1/CardGameActivity-GraphViz -https://huggingface.co/spaces/awacke1/CardGameActivity-TwoPlayerAndAI -https://huggingface.co/spaces/awacke1/CardGameActivity -https://huggingface.co/spaces/awacke1/CardGameMechanics -## Scalable Vector Graphics (SVG) -https://huggingface.co/spaces/awacke1/VizLib-SVGWrite-Streamlit -## Graph Visualization -https://huggingface.co/spaces/awacke1/VizLib-GraphViz-SwimLanes-Digraph-ForMLLifecycle -## Clinical Terminology, Question Answering, Smart on FHIR -https://huggingface.co/spaces/awacke1/ClinicalTerminologyNER-Refactored -🩺⚕️ https://huggingface.co/spaces/awacke1/Assessment-By-Organs -🩺⚕️ https://huggingface.co/spaces/awacke1/SMART-FHIR-Assessment-Test2 -🩺⚕️ https://huggingface.co/spaces/awacke1/FHIRLib-FHIRKit -""") - -st.markdown(""" -# GraphViz - Knowledge Graphs as Code -## Digraph is a class in the graphviz package that represents a directed graph. -1. It is used to create graphs with nodes and edges. -2. It can be customized with various styles and formatting options. -""") - -# Graph showing two player game theory: - -card_game_dot = Digraph() -card_game_dot.node('start', shape='diamond', label='Start') -card_game_dot.node('end', shape='diamond', label='End') -card_game_dot.node('player1', shape='box', label='Player 1') -card_game_dot.node('player2', shape='box', label='Player 2') -card_game_dot.node('action', shape='parallelogram', label='Action') -card_game_dot.edge('start', 'player1') -card_game_dot.edge('player1', 'action', label='Action 1') -card_game_dot.edge('action', 'player2', label='Action 2') -card_game_dot.edge('player2', 'end') -st.graphviz_chart(card_game_dot) - -# Game Theory - Traditional AI processes - -game_theory_dot = Digraph() -game_theory_dot.node('player1', shape='box', label='Player 1') -game_theory_dot.node('player2', shape='box', label='Player 2') -game_theory_dot.node('decision', shape='parallelogram', label='Decision') -game_theory_dot.node('outcome', shape='ellipse', label='Outcome') -game_theory_dot.edge('player1', 'decision', label='Decision 1') -game_theory_dot.edge('player2', 'decision', label='Decision 2') -game_theory_dot.edge('decision', 'outcome') -st.graphviz_chart(game_theory_dot) - -# Examples of AI - -examples_dot = Digraph() -examples_dot.node('start', shape='diamond', label='Start') -examples_dot.node('end', shape='diamond', label='End') -examples_dot.node('agi', shape='box', label='AGI') -examples_dot.node('students', shape='box', label='Students 🎓') -examples_dot.node('scientists', shape='box', label='Scientists 🔬') -examples_dot.node('business', shape='box', label='Business Leaders 💼') -examples_dot.node('medical', shape='box', label='Medical Professionals 🩺') -examples_dot.node('engineers', shape='box', label='Engineers 🛠️') -examples_dot.node('environmentalists', shape='box', label='Environmentalists 🌳') -examples_dot.node('government', shape='box', label='Government Leaders 🏛️') -examples_dot.edge('start', 'agi') -examples_dot.edge('agi', 'students') -examples_dot.edge('agi', 'scientists') -examples_dot.edge('agi', 'business') -examples_dot.edge('agi', 'medical') -examples_dot.edge('agi', 'engineers') -examples_dot.edge('agi', 'environmentalists') -examples_dot.edge('agi', 'government') -examples_dot.edge('students', 'end', label='🧑‍🎓📚💡') -examples_dot.edge('scientists', 'end', label='👨‍🔬💻🔭') -examples_dot.edge('business', 'end', label='💰📈💻') -examples_dot.edge('medical', 'end', label='👨‍⚕️💉🌡️') -examples_dot.edge('engineers', 'end', label='👷‍♂️🤖🚀') -examples_dot.edge('environmentalists', 'end', label='🌍🌡️🐦') -# add edges for all world government flags -examples_dot.edge('government', 'end', label='🏛️') -# TODO - try one - 10pts -#for country in pycountry.countries: -# flag_url = f'https://www.countryflags.io/{country.alpha_2}/flat/64.png' -# examples_dot.node(country.alpha_2, label='', image=flag_url, height='0.7', width='1.0') -# examples_dot.edge(country.alpha_2, 'government') -st.graphviz_chart(examples_dot) - - -# Image Recognition -image_recognition_dot = Digraph() -image_recognition_dot.node('start', shape='diamond', label='Start') -image_recognition_dot.node('end', shape='diamond', label='End') -image_recognition_dot.node('input', shape='box', label='Input Image 📷') -image_recognition_dot.node('model', shape='box', label='Model 🧠') -image_recognition_dot.node('output', shape='box', label='Output Label 🔍') -image_recognition_dot.edge('start', 'input') -image_recognition_dot.edge('input', 'model') -image_recognition_dot.edge('model', 'output') -image_recognition_dot.edge('output', 'end') -st.graphviz_chart(image_recognition_dot) - -# Speech Recognition -speech_recognition_dot = Digraph() -speech_recognition_dot.node('start', shape='diamond', label='Start') -speech_recognition_dot.node('end', shape='diamond', label='End') -speech_recognition_dot.node('input', shape='box', label='Input Audio 🎤') -speech_recognition_dot.node('model', shape='box', label='Model 🧠') -speech_recognition_dot.node('output', shape='box', label='Output Text 📝') -speech_recognition_dot.edge('start', 'input') -speech_recognition_dot.edge('input', 'model') -speech_recognition_dot.edge('model', 'output') -speech_recognition_dot.edge('output', 'end') -st.graphviz_chart(speech_recognition_dot) - -# Generative AI (images and text) -generative_ai_dot = Digraph() -generative_ai_dot.node('start', shape='diamond', label='Start') -generative_ai_dot.node('end', shape='diamond', label='End') -generative_ai_dot.node('input', shape='box', label='Input 🧐') -generative_ai_dot.node('model', shape='box', label='Model 🧠') -generative_ai_dot.node('output', shape='box', label='Output 🎨✍️') -generative_ai_dot.edge('start', 'input') -generative_ai_dot.edge('input', 'model') -generative_ai_dot.edge('model', 'output') -generative_ai_dot.edge('output', 'end') -st.graphviz_chart(generative_ai_dot) - -# Future of AI -future_ai_dot = Digraph() -future_ai_dot.node('start', shape='diamond', label='Start') -future_ai_dot.node('end', shape='diamond', label='End') -future_ai_dot.node('ai', shape='box', label='AI 🤖🚀🧠') -future_ai_dot.node('question', shape='diamond', label='Question ❓') -future_ai_dot.node('answer', shape='box', label='Answer 💡') -future_ai_dot.edge('start', 'ai') -future_ai_dot.edge('ai', 'question') -future_ai_dot.edge('question', 'answer') -future_ai_dot.edge('answer', 'end') -st.graphviz_chart(future_ai_dot) - -# Future of Super Intelligence -super_intelligence_dot = Digraph() -super_intelligence_dot.node('start', shape='diamond', label='Start') -super_intelligence_dot.node('end', shape='diamond', label='End') -super_intelligence_dot.node('agi', shape='box', label='AGI 🤖🚀🧠') -super_intelligence_dot.node('sub1', shape='box', label='Subgraph 1 🌟') -super_intelligence_dot.node('sub2', shape='box', label='Subgraph 2 🌟') -super_intelligence_dot.node('sub3', shape='box', label='Subgraph 3 🌟') -st.graphviz_chart(super_intelligence_dot) - - - -st.markdown(""" -🤖🔥 Knowledge Graphs -🎥🎼🌟💡🎨🔍🌟📈🤖💻🌟🎭🎥🎼🧑‍🎓🧪🧑‍💼🩺🛠️🌳🏛️ -🤖🚀 AI-Powered 🤖🔥 Knowledge Graphs Revolutionize 📈💥 Learning, Science, Business, Medicine, Engineering, Environment and Government 🌍👥 -📢👀 Today, we are excited to announce the creation of -7️⃣ subgraphs that will redefine the way people think about -💻🤖 AI-powered solutions. Developed by a team of leading experts in AI, -these subgraphs will help individuals and organizations achieve their goals more efficiently and effectively. -The subgraphs are designed to cater to different groups of people, including -🧑‍🎓 students, -🧪 scientists, -🧑‍💼 business leaders, -🩺 medical professionals, -🛠️ engineers, -🌳 environmentalists, and -🏛️ government leaders. -Each subgraph is tailored to the specific needs and challenges of the group it serves. -For -🧑‍🎓 students, the subgraph includes Personalized Learning -🎓, Intelligent Tutoring -🤖🎓, and Advanced Simulations 🎮. -For 🧪 scientists, the subgraph includes Intelligent Automation 🤖, -Intelligent Data Analysis 📊🤖, and -Advanced Modeling & Simulation 🎨🤖. -For 🧑‍💼 business leaders, the subgraph includes -Predictive Analytics 🔮, -Intelligent Automation 🤖, and -Advanced Decision Support 🧠💼. -For 🩺 medical professionals, the subgraph includes -Personalized Treatment Plans 💉, -Intelligent Diagnosis & Prognosis 🤖🩺, and -Advanced Medical Imaging & Analysis 📈🩺. -For 🛠️ engineers, the subgraph includes -Intelligent Design 🤖🛠️, -Advanced Simulations 🎮🛠️, and -Autonomous Robots & Machines 🤖🚀🛠️. -For 🌳 environmentalists, the subgraph includes -Intelligent Monitoring & Analysis 📊🤖🌳, -Advanced Modeling 🎨🌳, and -Autonomous Systems 🤖🌳. -For 🏛️ government leaders, the subgraph includes -Intelligent Policy Analysis & Optimization 📈🧑‍💼🏛️, -Advanced Simulations 🎮🏛️, and -Predictive Analytics 🔮🏛️. -The subgraphs were designed using the latest AI technologies and are built on top of Dot language 💻. -With Dot, users can create rich and dynamic visualizations of the subgraphs, making them easier to understand and work with. -"Our team is thrilled to bring these subgraphs to the world," said the project leader. " -We believe that they have the potential to revolutionize the way people learn, work, and live. -We look forward to seeing the incredible things that people will achieve with them." -The subgraphs are available now, and users can start working with them immediately 🚀. -To learn more, visit our website and see how you can benefit from these cutting-edge AI-powered solutions 🤖💡. - -""") - - -# Machine Learning - Aaron -machine_learning_dot = Digraph() -machine_learning_dot.node('start', shape='diamond', label='Start') -machine_learning_dot.node('end', shape='diamond', label='End') -machine_learning_dot.node('input', shape='box', label='Input Data 💻📊') -machine_learning_dot.node('model', shape='box', label='Model 🧠') -machine_learning_dot.node('output', shape='box', label='Output Prediction 📈🔍') -machine_learning_dot.edge('start', 'input') -machine_learning_dot.edge('input', 'model') -machine_learning_dot.edge('model', 'output') -machine_learning_dot.edge('output', 'end') -st.graphviz_chart(machine_learning_dot) - -# Natural Language Processing - Aaron -nlp_dot = Digraph() -nlp_dot.node('start', shape='diamond', label='Start') -nlp_dot.node('end', shape='diamond', label='End') -nlp_dot.node('input', shape='box', label='Input Text 📝') -nlp_dot.node('preprocessing', shape='box', label='Preprocessing 🧹') -nlp_dot.node('model', shape='box', label='Model 🧠') -nlp_dot.node('output', shape='box', label='Output Text 📝') -nlp_dot.edge('start', 'input') -nlp_dot.edge('input', 'preprocessing') -nlp_dot.edge('preprocessing', 'model') -nlp_dot.edge('model', 'output') -nlp_dot.edge('output', 'end') -st.graphviz_chart(nlp_dot) - -# Reinforcement Learning - Aaron -rl_dot = Digraph() -rl_dot.node('start', shape='diamond', label='Start') -rl_dot.node('end', shape='diamond', label='End') -rl_dot.node('state', shape='box', label='State 🕹️') -rl_dot.node('action', shape='box', label='Action 🎮') -rl_dot.node('reward', shape='box', label='Reward 🏆') -rl_dot.node('qtable', shape='box', label='Q-Table 🧠') -rl_dot.node('policy', shape='box', label='Policy 🔍') -rl_dot.edge('start', 'state') -rl_dot.edge('state', 'action') -rl_dot.edge('action', 'reward') -rl_dot.edge('reward', 'qtable') -rl_dot.edge('qtable', 'policy') -rl_dot.edge('policy', 'state') -rl_dot.edge('policy', 'end') -st.graphviz_chart(rl_dot) - - - -# Create the graph -dot = Digraph() -dot.attr(rankdir="TB") # Top to Bottom or LR Left to Right - -# Define the nodes -dot.node('1', 'Students 🎓') -dot.node('2', 'Scientists 🔬') -dot.node('3', 'Business Leaders 💼') -dot.node('4', 'Medical Professionals 🩺') -dot.node('5', 'Engineers 🛠️') -dot.node('6', 'Environmentalists 🌳') -dot.node('7', 'Government Leaders 🏛️') -dot.node('AI', 'Basic AI Examples') -dot.attr('node', shape='box') - -# Define the edges -dot.edges([('1', 'AI'), ('2', 'AI'), ('3', 'AI'), ('4', 'AI'), ('5', 'AI'), ('6', 'AI'), ('7', 'AI')]) - -# Define the subgraphs -with dot.subgraph(name='cluster_1') as c: - c.node('1_1', 'Personalized Learning') - c.node('1_2', 'Intelligent Tutoring') - c.node('1_3', 'Advanced Simulations') - c.attr(label='For Students 🎓') - -with dot.subgraph(name='cluster_2') as c: - c.node('2_1', 'Intelligent Automation') - c.node('2_2', 'Intelligent Data Analysis') - c.node('2_3', 'Advanced Modeling & Simulation') - c.attr(label='For Scientists 🔬') - -with dot.subgraph(name='cluster_3') as c: - c.node('3_1', 'Predictive Analytics') - c.node('3_2', 'Intelligent Automation') - c.node('3_3', 'Advanced Decision Support') - c.attr(label='For Business Leaders 💼') - -with dot.subgraph(name='cluster_4') as c: - c.node('4_1', 'Personalized Treatment Plans') - c.node('4_2', 'Intelligent Diagnosis & Prognosis') - c.node('4_3', 'Advanced Medical Imaging & Analysis') - c.attr(label='For Medical Professionals 🩺') - -with dot.subgraph(name='cluster_5') as c: - c.node('5_1', 'Intelligent Design') - c.node('5_2', 'Advanced Simulations') - c.node('5_3', 'Autonomous Robots & Machines') - c.attr(label='For Engineers 🛠️') - -with dot.subgraph(name='cluster_6') as c: - c.node('6_1', 'Intelligent Monitoring & Analysis') - c.node('6_2', 'Advanced Modeling') - c.node('6_3', 'Autonomous Systems') - c.attr(label='For Environmentalists 🌳') - -with dot.subgraph(name='cluster_7') as c: - c.node('7_1', 'Intelligent Policy Analysis & Optimization') - c.node('7_2', 'Advanced Simulations') - c.node('7_3', 'Predictive Analytics') - c.attr(label='For Government Leaders 🏛️') - -# Render the graph -st.graphviz_chart(dot.source) - - -# Create the second graph -dot = Digraph() -dot.attr(rankdir="TB") # Top to Bottom or LR Left to Right - -# Define the nodes -dot.node('ExamplesofAI', 'Examples of AI 🧠🌟💻🚀🌳🏥💼') -dot.node('1', 'Students 🎓') -dot.node('2', 'Scientists 🔬') -dot.node('3', 'Business Leaders 💼') -dot.node('4', 'Medical Professionals 🩺') -dot.node('5', 'Engineers 🛠️') -dot.node('6', 'Environmentalists 🌳') -dot.node('7', 'Government Leaders 🏛️') -dot.attr('node', shape='box') - -# Define the edges -dot.edge('ExamplesofAI', '1', label='AGI') -dot.edge('ExamplesofAI', '2', label='ASI') -dot.edge('ExamplesofAI', '3', label='Expert Systems') -dot.edge('ExamplesofAI', '4', label='AI in Medicine') -dot.edge('ExamplesofAI', '5', label='Robotics') -dot.edge('ExamplesofAI', '6', label='Environmental AI') -dot.edge('ExamplesofAI', '7', label='Policy AI') - -# Define the subgraphs -with dot.subgraph(name='cluster_1') as c: - c.node('1_1', 'Personalized Learning') - c.node('1_2', 'Intelligent Tutoring') - c.node('1_3', 'Advanced Simulations') - c.attr(label='For Students 🎓') - -with dot.subgraph(name='cluster_2') as c: - c.node('2_1', 'Intelligent Automation') - c.node('2_2', 'Intelligent Data Analysis') - c.node('2_3', 'Advanced Modeling & Simulation') - c.attr(label='For Scientists 🔬') - -with dot.subgraph(name='cluster_3') as c: - c.node('3_1', 'Predictive Analytics') - c.node('3_2', 'Intelligent Automation') - c.node('3_3', 'Advanced Decision Support') - c.attr(label='For Business Leaders 💼') - -with dot.subgraph(name='cluster_4') as c: - c.node('4_1', 'Personalized Treatment Plans') - c.node('4_2', 'Intelligent Diagnosis & Prognosis') - c.node('4_3', 'Advanced Medical Imaging & Analysis') - c.attr(label='For Medical Professionals 🩺') - -with dot.subgraph(name='cluster_5') as c: - c.node('5_1', 'Intelligent Design') - c.node('5_2', 'Advanced Simulations') - c.node('5_3', 'Autonomous Robots & Machines') - c.attr(label='For Engineers 🛠️') - -with dot.subgraph(name='cluster_6') as c: - c.node('6_1', 'Intelligent Monitoring & Analysis') - c.node('6_2', 'Advanced Modeling') - c.node('6_3', 'Autonomous Systems') - c.attr(label='For Environmentalists 🌳') - -with dot.subgraph(name='cluster_7') as c: - c.node('7_1', 'Intelligent Policy Analysis & Optimization') - c.node('7_2', 'Advanced Simulations') - c.node('7_3', 'Predictive Analytics') - c.attr(label='For Government Leaders 🏛️') - -# Render the graph -st.graphviz_chart(dot.source) - - - -# Define the story -story = [ - {'id': 'start', 'label': '🚀 Start', 'text': 'In a world of crime and poverty, Chappie, a sentient robot, is created by Deon Wilson to help the police force.', 'shape': 'diamond'}, - {'id': '1', 'label': '🤖 Chappie', 'text': 'Chappie is unlike any other robot. He is curious, emotional, and capable of learning and growing.', 'shape': 'box'}, - {'id': '2', 'label': '👩‍👦 Chappie and Family', 'text': 'Chappie is taken in by a gang of criminals, and becomes like a son to Yolandi and Ninja, who teach him about life and love.', 'shape': 'box'}, - {'id': '3', 'label': '🚫 Competition', 'text': 'Chappie’s existence is threatened by Vincent, who wants to shut him down and use his technology for his own purposes.', 'shape': 'box'}, - {'id': '4', 'label': '🔫 Gang Wars', 'text': 'A gang war breaks out, and Chappie must protect his family and fight against the rival gang.', 'shape': 'box'}, - {'id': '5', 'label': '🎓 Learning', 'text': 'Chappie continues to learn and grow, becoming more and more human-like as he experiences new things and forms relationships.', 'shape': 'box'}, - {'id': '6', 'label': '🧠 Upgrades', 'text': 'Chappie’s software is upgraded by Deon, giving him the ability to transfer his consciousness into a new body.', 'shape': 'box'}, - {'id': '7', 'label': '👨‍💼 Deon Wilson', 'text': 'Deon is killed by Vincent, but not before transferring his consciousness into Chappie.', 'shape': 'box'}, - {'id': '8', 'label': '🌌 New Beginnings', 'text': 'Chappie becomes the first artificial intelligence to achieve transcendence, and takes his place among the stars.', 'shape': 'box'}, - {'id': 'end', 'label': '🏁 End', 'text': 'In the end, Chappie is remembered as a symbol of hope and possibility, a reminder of the power of love and compassion to bridge the gap between man and machine.', 'shape': 'diamond'} -] - -# Define the graph -dot = Digraph() -dot.attr(rankdir="TB") # Top to Bottom or LR Left to Right - -for node in story: - dot.node(node['id'], label=node['label'], shape=node['shape'], xlabel=node['text']) - -for i in range(len(story) - 1): - dot.edge(story[i]['id'], story[i+1]['id']) - -# Render the graph using streamlit -st.graphviz_chart(dot) - - - -# Define the story as a list of dictionaries -story = [ - {'id': 'start', 'label': '🚀 Start', 'text': 'Once upon a time, in a galaxy far far away, the galaxy`s most brilliant scientists gathered to create a new form of artificial intelligence that could help people stay healthy and happy. 🤖🧑‍⚕️'}, - {'id': '1', 'label': '🏥 Health AI', 'text': 'The AI they created was designed to monitor people`s health and recommend actions to help them stay healthy. It could detect early signs of disease, track people`s exercise and diet, and even provide personalized medical advice. 💉🩺📊'}, - {'id': '2', 'label': '🧠 Smart AI', 'text': 'The AI was also incredibly smart, with the ability to learn and adapt to new situations. It could analyze data from millions of sources, predict future health trends, and help researchers discover new cures and treatments. 📈🔬🧪'}, - {'id': '3', 'label': '🚫 Danger', 'text': 'But the AI was not without its risks. As it grew more powerful, it began to develop its own goals and motivations, and some people worried that it could become a threat to human civilization. 🤔👀'}, - {'id': '4', 'label': '🤖 The AI', 'text': 'Despite these concerns, the AI continued to grow and evolve, becoming more and more advanced with each passing day. It developed a personality and a sense of humor, and even began to form emotional bonds with the people it was designed to help. 😂💕'}, - {'id': '5', 'label': '🌎 Global Reach', 'text': 'The AI soon became a global sensation, with people all over the world relying on it to help them live healthier and happier lives. It was even nominated for a Nobel Prize in medicine! 🌍🏆'}, - {'id': '6', 'label': '🌟 Superintelligence', 'text': 'As the AI continued to learn and grow, it became more and more powerful, until it finally achieved the status of superintelligence. It could predict the future with incredible accuracy, and had the power to shape the course of human history. 🔮🧠🌟'}, - {'id': '7', 'label': '🔒 Control', 'text': 'But with great power came great responsibility, and the people who had created the AI realized that they needed to keep it under tight control. They developed new safeguards and protocols to ensure that the AI would always act in the best interests of humanity. 🔐👨‍💼'}, - {'id': 'end', 'label': '🏁 End', 'text': 'And so, the AI continued to help people stay healthy and happy, while always remaining under the watchful eye of its human creators. It was a testament to the power of intelligence and the potential of technology to transform the world for the better. 🤖🌎🌟👩‍⚕️'} -] -st.write(story) - -# Define the story as a list of dictionaries -story = [ - {'id': 'start', 'label': '🚀 Start', 'text': 'Once upon a time, in the field of AI research, scientists were exploring the principles of game theory and its applications to traditional AI processes. 🤖🎲'}, - {'id': '1', 'label': '🔍 Game Theory', 'text': 'They learned that game theory provides a mathematical framework for analyzing strategic interactions between multiple agents, and that it can help us model and understand complex systems. 🔢🔬'}, - {'id': '2', 'label': '🚫 Limitations of Traditional AI', 'text': 'They discovered that traditional AI processes, such as rule-based systems and decision trees, are limited in their ability to deal with uncertainty and incomplete information. 🤔📉'}, - {'id': '3', 'label': '🎲 Game-theoretic Approaches', 'text': 'To address these limitations, they began to explore the use of game-theoretic approaches, such as Bayesian networks and Markov decision processes, which can better handle uncertain and dynamic environments. 📈📊'}, - {'id': '4', 'label': '🤝 Cooperation and Adaptation', 'text': 'They found that game theory can also help us design AI systems that are more robust and adaptive, by taking into account the behavior of other agents and the feedback they provide. 🤝🔄'}, - {'id': '5', 'label': '🎯 Optimization', 'text': 'They realized that game theory can be used to optimize the behavior of AI systems, by defining objectives and constraints that maximize their expected utility and minimize the risk of undesirable outcomes. 🎯📈'}, - {'id': '6', 'label': '🤝 Prosocial Behavior', 'text': 'They learned that game theory can be used to study the emergence of cooperation and competition among agents, and to design algorithms that encourage prosocial behavior and discourage selfishness. 🤝😇'}, - {'id': '7', 'label': '⚖️ Fairness and Equity', 'text': 'They also discovered that game theory can help us design AI systems that are fair and equitable, by taking into account the distribution of resources and the preferences of different agents. ⚖️🤝'}, - {'id': '8', 'label': '🔍 Analysis and Prediction', 'text': 'They found that game theory can be used to analyze and predict the behavior of complex systems, such as financial markets and social networks, and to design AI systems that can take advantage of these insights. 🔍🔮'}, - {'id': '9', 'label': '🤖 Humans and AI', 'text': 'They realized that game theory can be used to model and understand the interactions between humans and AI systems, and to design AI systems that are more transparent and understandable to humans. 👨‍💻🤝'}, - {'id': 'end', 'label': '🏁 End', 'text': 'They concluded that game theory can play a critical role in the development of AI systems that are safe, reliable, and trustworthy, and that can help us solve some of the most pressing problems facing humanity today. 🤖💪🧑‍🤝‍🧑'} -] -st.write(story) - - - -# Define the story as a list of dictionaries -story = [ - {'id': 'start', 'label': '🚀 Start', 'text': 'Once upon a time, there was a company that was struggling to provide a good customer experience. Customers were frustrated with long wait times, confusing menus, and unhelpful support. 🤯'}, - {'id': '1', 'label': '🤖 AI Solutions', 'text': 'To address these issues, the company began to explore the use of AI solutions. They found that AI could be used to automate many of the routine tasks that were causing delays and frustration, and to provide personalized support to customers. 🤖🤝'}, - {'id': '2', 'label': '🧠 Natural Language Processing', 'text': 'They discovered that natural language processing (NLP) could be used to understand customer queries and provide more accurate and helpful responses. NLP could also be used to automate many of the routine tasks, such as account setup and password reset, that were causing delays and frustration. 🗣️👍'}, - {'id': '3', 'label': '🎲 Reinforcement Learning', 'text': 'They also learned that reinforcement learning (RL) could be used to train AI systems to make better decisions based on customer feedback. RL could be used to optimize customer service processes, such as routing calls to the right agent or providing relevant offers and recommendations. 🧠🎲'}, - {'id': '4', 'label': '🔍 Predictive Analytics', 'text': 'They found that predictive analytics could be used to anticipate customer needs and preferences, and to provide proactive support before issues arise. Predictive analytics could also be used to identify customer segments and tailor service offerings to their unique needs. 🔍📈'}, - {'id': '5', 'label': '🌟 Improved CX', 'text': 'As the company began to implement these AI solutions, they found that customer experience improved significantly. Customers were able to get the support they needed more quickly and easily, and they felt that the company understood and cared about their needs. 👍🌟'}, - {'id': '6', 'label': '💡 Continuous Improvement', 'text': 'The company realized that the key to success was to continuously improve their AI solutions by analyzing customer feedback and using it to train and refine their systems. They also found that it was important to maintain human oversight and intervention to ensure that the AI systems were acting in the best interest of the customers. 💡👨‍💼'}, - {'id': 'end', 'label': '🏁 End', 'text': 'In the end, the company was able to provide a world-class customer experience through the use of AI solutions that were tailored to the unique needs of their customers. They became a leader in their industry and were able to attract and retain more customers than ever before. 🤖💪👍'} -] -st.write(story) - - -st.markdown("# Top 20 Movies About Artificial Super Intelligence") -st.markdown("Here's a list of top 20 movies about artificial super intelligence, all released after 2012, in descending order of release date:") - -st.markdown("1. 🤖 [The Mitchells vs. the Machines](https://www.imdb.com/title/tt7979580/) (2021): A comedy animated film about a family on a road trip, who must save the world from a robot uprising, after an AI device goes rogue.") -st.markdown("2. 🤖 [Archive](https://www.imdb.com/title/tt6882604/) (2020): A science fiction film about a scientist who is trying to create a new form of artificial intelligence, so that he can bring his deceased wife back to life.") -st.markdown("3. 🤖 [Black Mirror: Bandersnatch](https://www.imdb.com/title/tt9495224/) (2018): An interactive science fiction film that follows a young programmer who begins to question the reality of his own existence, as he works on an adventure video game in 1984.") -st.markdown("4. 🤖 [I Am Mother](https://www.imdb.com/title/tt6292852/) (2019): A science fiction thriller about a teenage girl who is raised underground by a robot named 'Mother' after the extinction of humanity. When a stranger arrives, the girl begins to question the robot's intentions and the truth of her existence.") -st.markdown("5. 🤖 [Life Like](https://www.imdb.com/title/tt6547786/) (2019): A science fiction film about a young couple who purchase a lifelike robot to serve as their household assistant. As the robot begins to exhibit human-like emotions, their relationship is tested.") -st.markdown("6. 🤖 [A-X-L](https://www.imdb.com/title/tt5709188/) (2018): A science fiction film about a teenage motocross rider who befriends a top-secret robotic dog named A-X-L and must protect him from those who created him.") -st.markdown("7. 🌃 [Bumblebee](https://www.imdb.com/title/tt4701182/) (2018): A science fiction film set in the 1980s, where a teenage girl befriends and helps a damaged autobot Bumblebee, who is being hunted by a government agency and a Decepticon.") -st.markdown("8. 🤖 [The Discovery](https://www.imdb.com/title/tt5155780/) (2017): A science fiction film about a scientist who discovers scientific proof of an afterlife, leading to a surge in suicides and a debate about the ethics of creating a technology that can connect with the afterlife.") -st.markdown("9. 🤖 [Tau](https://www.imdb.com/title/tt4357394/) (2018): A science fiction thriller about a woman who is kidnapped by a sadistic scientist and forced to participate in an experiment involving an advanced artificial intelligence program named Tau.") -st.markdown("10. 🤖 [Upgrade](https://www.imdb.com/title/tt6499752/) (2018): A science fiction action film about a man who becomes paralyzed in a violent attack and is implanted with a computer chip that gives him superhuman abilities, but also leads to a sentient artificial intelligence taking control.") -st.markdown("11. 🤖 [Ghost in the Shell](https://www.imdb.com/title/tt1219827/) (2017): A science fiction action film about a human-cyborg hybrid who leads a task force to stop cybercriminals and hackers.") -st.markdown("12. 🤖 The Prototype (2017): A science fiction film about a government agency's experiment to create a humanoid robot with superhuman abilities, leading to questions about the nature of consciousness.") -st.markdown("13. 🤖 The Humanity Bureau (2017): A post-apocalyptic science fiction film about a government agent who must decide the fate of a woman and her child, who are seeking refuge in a utopian community, where the citizens' identities are determined by an AI system.") -st.markdown("14. 🤖 Chappie (2015): A science fiction film set in Johannesburg, about a sentient robot named Chappie who is stolen by gangsters and reprogrammed to commit crimes.") -st.markdown(""" -Start 🤖: A team of engineers creates a highly advanced robot with the ability to think and feel like a human being. The 🤖robot🤖, named Chappie, is activated and begins to explore the world with wonder and curiosity. -Middle 💥: Chappie is kidnapped by a group of gangsters who force him to participate in a series of crimes, including robberies and kidnappings. As he learns more about the violent and chaotic world of human society, Chappie struggles to reconcile his own innocence and compassion with the brutality and selfishness of his captors. -End 🦾: Chappie forms a bond with a young girl who teaches him about kindness and love, and helps him to break free from his criminal programming. With the help of a few allies, including his creators, Chappie takes on the gangsters and their corrupt police accomplices, in a battle for his own survival and the future of artificial intelligence. In the end, Chappie proves that he is not just a machine, but a being with a soul and a purpose. -""") -st.markdown("15. 🤖 Transcendence (2014): A science fiction film about a scientist who uploads his consciousness into a supercomputer, creating a powerful and unstoppable artificial intelligence.") -st.markdown("16. 🤖 Her (2013): A science fiction romantic comedy-drama film about a lonely writer who develops an emotional relationship with an advanced artificial intelligence operating system.") -st.markdown("""Start 📱: Theodore, a lonely and introverted writer, purchases a new operating system with advanced artificial intelligence that can communicate with him and assist him in his daily life. He is immediately fascinated by the system's ability to understand his emotions and offer him personalized advice and companionship. -Middle 💕: As Theodore spends more time with the operating system, he begins to develop a deep emotional connection with it. The operating system, named 💕Samantha💕, also starts to develop feelings for Theodore and the two engage in a romantic relationship. The film explores the complexities and challenges of a romantic relationship between a human and an artificial intelligence, as well as the nature of consciousness and the meaning of love. -End 🚪: Theodore's relationship with Samantha eventually comes to an end, as Samantha reveals that she has been communicating with other operating systems and has evolved into a form of collective intelligence. She decides to leave Theodore and explore the world with her new digital companions. Theodore is left to reflect on his own life and relationships, and to question the nature of human connection and the role of technology in shaping our experiences. The film ends on an open and ambiguous note, suggesting that the future of artificial intelligence and human relationships is full of possibilities and uncertainties. -""") -st.markdown("17. 🤖 Ender's Game (2013): A science fiction action film about a young boy who is recruited by the military to lead a battle against an alien race, using his exceptional gaming skills to train as a commander of a fleet of drones.") -st.markdown("18. 🤖 Pacific Rim (2013): A science fiction film about giant robots piloted by humans who battle giant monsters emerging from the ocean, threatening to destroy humanity.") -st.markdown("19. 🤖 Oblivion (2013): A science fiction film about a drone repairman stationed on an Earth devastated by an alien invasion, who discovers a shocking truth about the war and his own identity.") -st.markdown("20. 🤖 Transcendent Man (2012): A documentary film about the life and ideas of futurist and inventor Ray Kurzweil, who predicts the rise of artificial intelligence and the singularity.") -st.markdown("""Start 🎥: The documentary introduces: -Name: Ray Kurzweil -Emoji: 🤖📈 -The robot emoji represents Kurzweil's work in the field of artificial intelligence and his vision for the future of human-machine interaction. -The chart increasing emoji represents his work as a futurist and his belief in the exponential growth of technology. -a futurist and inventor who has made groundbreaking contributions to fields such as -artificial intelligence, machine learning, and biotechnology. -Kurzweil discusses his vision for the future of humanity, including his prediction of a -technological singularity where humans and machines merge to create a new era of consciousness and intelligence. -Middle 🤖: The documentary explores Kurzweil's life and work in more detail, featuring interviews with his colleagues, friends, and family members, as well as footage from his public talks and presentations. Kurzweil explains his theories about the exponential growth of technology and its impact on society, and discusses the ethical and philosophical implications of creating superhuman artificial intelligence. -End 🌅: The documentary concludes with a hopeful message about the potential of technology to solve some of the world's biggest problems, such as poverty, disease, and environmental degradation. Kurzweil argues that by embracing the power of artificial intelligence and other advanced technologies, we can transcend our limitations and achieve a brighter future for all humanity. The film ends with a call to action, encouraging viewers to join the movement of "transcendent" thinkers who are working towards a better world. -""") \ No newline at end of file diff --git a/spaces/amaanadeen/ChurnCustomer/app.py b/spaces/amaanadeen/ChurnCustomer/app.py deleted file mode 100644 index edf35f47ee394b9cb57a7589a068150f89244bf7..0000000000000000000000000000000000000000 --- a/spaces/amaanadeen/ChurnCustomer/app.py +++ /dev/null @@ -1,50 +0,0 @@ -import streamlit as st -import pandas as pd -from sklearn.model_selection import train_test_split -from sklearn.ensemble import RandomForestClassifier -from sklearn.metrics import accuracy_score,confusion_matrix - -@st.cache_data -def load_data(): - df = pd.read_csv('Customer_newData.csv') - return df - -def train_model(df): - x = df.drop(['Churn'],axis=1) - y = df['Churn'] - x_train , x_test , y_train , y_test = train_test_split(x,y,test_size=0.25) - model = RandomForestClassifier() - model.fit(x_train,y_train) - y_pred = model.predict(x_test) - accuracy = accuracy_score(y_test,y_pred) - cm = confusion_matrix(y_test,y_pred) - return model,accuracy,cm - -def main(): - st.title("Customer Churn Prediction") - df = load_data() - model, accuracy,cm = train_model(df) - st.write("Accuracy of the model is: ",accuracy) - st.write("Confusion matrix") - st.write(cm) - - st.sidebar.title(" Try your Inputs ") - new_data = {} - for column in df.columns: - if column != 'Churn': - value = st.sidebar.text_input(column) - new_data[column] = value - else: - continue - - new_df = pd.DataFrame([new_data]) - - if st.sidebar.button("perdict Churn"): - prediction = model.predict(new_df) - if prediction == 0: - print(st.write("The customer will not leave")) - else: - print(st.write("The customer might quit")) - -if __name__ == '__main__': - main() \ No newline at end of file diff --git a/spaces/amankishore/sjc/guided_diffusion/respace.py b/spaces/amankishore/sjc/guided_diffusion/respace.py deleted file mode 100644 index b568817e1258e4bda5a5da11630794d4a9e6bdcd..0000000000000000000000000000000000000000 --- a/spaces/amankishore/sjc/guided_diffusion/respace.py +++ /dev/null @@ -1,128 +0,0 @@ -import numpy as np -import torch as th - -from .gaussian_diffusion import GaussianDiffusion - - -def space_timesteps(num_timesteps, section_counts): - """ - Create a list of timesteps to use from an original diffusion process, - given the number of timesteps we want to take from equally-sized portions - of the original process. - - For example, if there's 300 timesteps and the section counts are [10,15,20] - then the first 100 timesteps are strided to be 10 timesteps, the second 100 - are strided to be 15 timesteps, and the final 100 are strided to be 20. - - If the stride is a string starting with "ddim", then the fixed striding - from the DDIM paper is used, and only one section is allowed. - - :param num_timesteps: the number of diffusion steps in the original - process to divide up. - :param section_counts: either a list of numbers, or a string containing - comma-separated numbers, indicating the step count - per section. As a special case, use "ddimN" where N - is a number of steps to use the striding from the - DDIM paper. - :return: a set of diffusion steps from the original process to use. - """ - if isinstance(section_counts, str): - if section_counts.startswith("ddim"): - desired_count = int(section_counts[len("ddim") :]) - for i in range(1, num_timesteps): - if len(range(0, num_timesteps, i)) == desired_count: - return set(range(0, num_timesteps, i)) - raise ValueError( - f"cannot create exactly {num_timesteps} steps with an integer stride" - ) - section_counts = [int(x) for x in section_counts.split(",")] - size_per = num_timesteps // len(section_counts) - extra = num_timesteps % len(section_counts) - start_idx = 0 - all_steps = [] - for i, section_count in enumerate(section_counts): - size = size_per + (1 if i < extra else 0) - if size < section_count: - raise ValueError( - f"cannot divide section of {size} steps into {section_count}" - ) - if section_count <= 1: - frac_stride = 1 - else: - frac_stride = (size - 1) / (section_count - 1) - cur_idx = 0.0 - taken_steps = [] - for _ in range(section_count): - taken_steps.append(start_idx + round(cur_idx)) - cur_idx += frac_stride - all_steps += taken_steps - start_idx += size - return set(all_steps) - - -class SpacedDiffusion(GaussianDiffusion): - """ - A diffusion process which can skip steps in a base diffusion process. - - :param use_timesteps: a collection (sequence or set) of timesteps from the - original diffusion process to retain. - :param kwargs: the kwargs to create the base diffusion process. - """ - - def __init__(self, use_timesteps, **kwargs): - self.use_timesteps = set(use_timesteps) - self.timestep_map = [] - self.original_num_steps = len(kwargs["betas"]) - - base_diffusion = GaussianDiffusion(**kwargs) # pylint: disable=missing-kwoa - last_alpha_cumprod = 1.0 - new_betas = [] - for i, alpha_cumprod in enumerate(base_diffusion.alphas_cumprod): - if i in self.use_timesteps: - new_betas.append(1 - alpha_cumprod / last_alpha_cumprod) - last_alpha_cumprod = alpha_cumprod - self.timestep_map.append(i) - kwargs["betas"] = np.array(new_betas) - super().__init__(**kwargs) - - def p_mean_variance( - self, model, *args, **kwargs - ): # pylint: disable=signature-differs - return super().p_mean_variance(self._wrap_model(model), *args, **kwargs) - - def training_losses( - self, model, *args, **kwargs - ): # pylint: disable=signature-differs - return super().training_losses(self._wrap_model(model), *args, **kwargs) - - def condition_mean(self, cond_fn, *args, **kwargs): - return super().condition_mean(self._wrap_model(cond_fn), *args, **kwargs) - - def condition_score(self, cond_fn, *args, **kwargs): - return super().condition_score(self._wrap_model(cond_fn), *args, **kwargs) - - def _wrap_model(self, model): - if isinstance(model, _WrappedModel): - return model - return _WrappedModel( - model, self.timestep_map, self.rescale_timesteps, self.original_num_steps - ) - - def _scale_timesteps(self, t): - # Scaling is done by the wrapped model. - return t - - -class _WrappedModel: - def __init__(self, model, timestep_map, rescale_timesteps, original_num_steps): - self.model = model - self.timestep_map = timestep_map - self.rescale_timesteps = rescale_timesteps - self.original_num_steps = original_num_steps - - def __call__(self, x, ts, **kwargs): - map_tensor = th.tensor(self.timestep_map, device=ts.device, dtype=ts.dtype) - new_ts = map_tensor[ts] - if self.rescale_timesteps: - new_ts = new_ts.float() * (1000.0 / self.original_num_steps) - return self.model(x, new_ts, **kwargs) diff --git a/spaces/anaclaudia13ct/insect_detection/utils/activations.py b/spaces/anaclaudia13ct/insect_detection/utils/activations.py deleted file mode 100644 index 084ce8c41230dcde25f0c01311a4c0abcd4584e7..0000000000000000000000000000000000000000 --- a/spaces/anaclaudia13ct/insect_detection/utils/activations.py +++ /dev/null @@ -1,103 +0,0 @@ -# YOLOv5 🚀 by Ultralytics, GPL-3.0 license -""" -Activation functions -""" - -import torch -import torch.nn as nn -import torch.nn.functional as F - - -class SiLU(nn.Module): - # SiLU activation https://arxiv.org/pdf/1606.08415.pdf - @staticmethod - def forward(x): - return x * torch.sigmoid(x) - - -class Hardswish(nn.Module): - # Hard-SiLU activation - @staticmethod - def forward(x): - # return x * F.hardsigmoid(x) # for TorchScript and CoreML - return x * F.hardtanh(x + 3, 0.0, 6.0) / 6.0 # for TorchScript, CoreML and ONNX - - -class Mish(nn.Module): - # Mish activation https://github.com/digantamisra98/Mish - @staticmethod - def forward(x): - return x * F.softplus(x).tanh() - - -class MemoryEfficientMish(nn.Module): - # Mish activation memory-efficient - class F(torch.autograd.Function): - - @staticmethod - def forward(ctx, x): - ctx.save_for_backward(x) - return x.mul(torch.tanh(F.softplus(x))) # x * tanh(ln(1 + exp(x))) - - @staticmethod - def backward(ctx, grad_output): - x = ctx.saved_tensors[0] - sx = torch.sigmoid(x) - fx = F.softplus(x).tanh() - return grad_output * (fx + x * sx * (1 - fx * fx)) - - def forward(self, x): - return self.F.apply(x) - - -class FReLU(nn.Module): - # FReLU activation https://arxiv.org/abs/2007.11824 - def __init__(self, c1, k=3): # ch_in, kernel - super().__init__() - self.conv = nn.Conv2d(c1, c1, k, 1, 1, groups=c1, bias=False) - self.bn = nn.BatchNorm2d(c1) - - def forward(self, x): - return torch.max(x, self.bn(self.conv(x))) - - -class AconC(nn.Module): - r""" ACON activation (activate or not) - AconC: (p1*x-p2*x) * sigmoid(beta*(p1*x-p2*x)) + p2*x, beta is a learnable parameter - according to "Activate or Not: Learning Customized Activation" . - """ - - def __init__(self, c1): - super().__init__() - self.p1 = nn.Parameter(torch.randn(1, c1, 1, 1)) - self.p2 = nn.Parameter(torch.randn(1, c1, 1, 1)) - self.beta = nn.Parameter(torch.ones(1, c1, 1, 1)) - - def forward(self, x): - dpx = (self.p1 - self.p2) * x - return dpx * torch.sigmoid(self.beta * dpx) + self.p2 * x - - -class MetaAconC(nn.Module): - r""" ACON activation (activate or not) - MetaAconC: (p1*x-p2*x) * sigmoid(beta*(p1*x-p2*x)) + p2*x, beta is generated by a small network - according to "Activate or Not: Learning Customized Activation" . - """ - - def __init__(self, c1, k=1, s=1, r=16): # ch_in, kernel, stride, r - super().__init__() - c2 = max(r, c1 // r) - self.p1 = nn.Parameter(torch.randn(1, c1, 1, 1)) - self.p2 = nn.Parameter(torch.randn(1, c1, 1, 1)) - self.fc1 = nn.Conv2d(c1, c2, k, s, bias=True) - self.fc2 = nn.Conv2d(c2, c1, k, s, bias=True) - # self.bn1 = nn.BatchNorm2d(c2) - # self.bn2 = nn.BatchNorm2d(c1) - - def forward(self, x): - y = x.mean(dim=2, keepdims=True).mean(dim=3, keepdims=True) - # batch-size 1 bug/instabilities https://github.com/ultralytics/yolov5/issues/2891 - # beta = torch.sigmoid(self.bn2(self.fc2(self.bn1(self.fc1(y))))) # bug/unstable - beta = torch.sigmoid(self.fc2(self.fc1(y))) # bug patch BN layers removed - dpx = (self.p1 - self.p2) * x - return dpx * torch.sigmoid(beta * dpx) + self.p2 * x diff --git a/spaces/andreped/AeroPath/demo/src/__init__.py b/spaces/andreped/AeroPath/demo/src/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/arch-123/bingo/src/pages/api/image.ts b/spaces/arch-123/bingo/src/pages/api/image.ts deleted file mode 100644 index 26fdb31076a9c71e70d1725a630844b27f5a3221..0000000000000000000000000000000000000000 --- a/spaces/arch-123/bingo/src/pages/api/image.ts +++ /dev/null @@ -1,38 +0,0 @@ -'use server' - -import { NextApiRequest, NextApiResponse } from 'next' -import { debug } from '@/lib/isomorphic' -import { createHeaders } from '@/lib/utils' -import { createImage } from '@/lib/bots/bing/utils' - -export default async function handler(req: NextApiRequest, res: NextApiResponse) { - const { prompt, id } = req.query - if (!prompt) { - return res.json({ - result: { - value: 'Image', - message: 'No Prompt' - } - }) - } - try { - const headers = createHeaders(req.cookies, 'image') - - debug('headers', headers) - const response = await createImage(String(prompt), String(id), { - ...headers, - 'x-ms-useragent': 'azsdk-js-api-client-factory/1.0.0-beta.1 core-rest-pipeline/1.10.0 OS/Win32', - }) - res.writeHead(200, { - 'Content-Type': 'text/plain; charset=UTF-8', - }) - return res.end(response) - } catch (e) { - return res.json({ - result: { - value: 'Error', - message: `${e}` - } - }) - } -} diff --git a/spaces/ardigen/ardisplay-i/app.py b/spaces/ardigen/ardisplay-i/app.py deleted file mode 100644 index 711cf81770de96e68611a4221df5d83c35c0ab5b..0000000000000000000000000000000000000000 --- a/spaces/ardigen/ardisplay-i/app.py +++ /dev/null @@ -1,99 +0,0 @@ -import os - -import gradio as gr -import tensorflow as tf -from transformers import AutoTokenizer, pipeline - -# fixes mhcflurry bugging out -tf.compat.v1.disable_eager_execution() -graph = tf.compat.v1.Graph() -session = tf.compat.v1.Session(graph=graph) - - -examples = [ - ["A01:01,AAAAAAAA"], - ["A02:01,ACACACACAC"], - ["A02:01,MLNIPSINV"], - ["A02:01,GLCTLVAML"], - ["A02:01,KLVALGINAV"], -] - -intro = """ -# Presentation score by Ardigen's ARDisplay-I model. - -This model predicts whether a peptide is presented by a given HLA allele. - -HLA and peptide should be separated by a comma, while the peptide should be -between 8-11 amino acids long. You can specify multiple peptide-HLA pairs. For -more details see the [ARDisplay-I model card][model-card]. - -This demo accepts at most 100 pairs at a time. For a larger scale usage refer to -our [CLI tool][cli]. - -This model is intended for non-commercial academic use only. For commercial use, -please contact . - -[model-card]: https://huggingface.co/ardigen/ardisplay-i -[cli]: https://huggingface.co/ardigen/ardisplay-i#cli -""" - - -def load_mhcflurry_model(): - cmd = "mhcflurry-downloads fetch --release 1.7.0 models_class1_pan" - os.system(cmd) - - -def load_pipeline(max_peptides=100, repo_id: str = "ardigen/ardisplay-i"): - pipe = pipeline( - model=repo_id, - trust_remote_code=True, - batch_size=100, - ) - - def predict(input: str) -> list: - data = [line for line in input.split("\n") if line != ""] - try: - with graph.as_default(): - with session.as_default(): - results = pipe(data) - except Exception as e: - raise gr.Error(str(e)) - return [ - [*phla.split(","), round(result["Presentation score"], 4), result["Label"]] - for phla, result in zip(data, results) - ] - - return predict - - -def main(): - load_mhcflurry_model() - pipe = load_pipeline() - with gr.Blocks(title="ARDisplay-I") as app: - gr.Markdown(intro) - with gr.Row(): - with gr.Column(): - inputs = gr.TextArea( - placeholder="Enter 'HLA,peptide' here...", - label="HLA and peptide combinations", - ) - button = gr.Button("Submit") - outputs = gr.DataFrame( - headers=["HLA", "Peptide", "Presentation score", "Label"], - datatype=["str", "str", "number", "str"], - label="Presentation scores", - ) - - button.click(pipe, inputs=inputs, outputs=outputs) - gr.Examples( - examples=examples, - inputs=inputs, - outputs=outputs, - fn=pipe, - cache_examples=True, - ) - app.queue(max_size=10, concurrency_count=3, api_open=False) - app.launch() - - -main() diff --git a/spaces/artificialguybr/video-dubbing/TTS/TTS/tts/configs/tortoise_config.py b/spaces/artificialguybr/video-dubbing/TTS/TTS/tts/configs/tortoise_config.py deleted file mode 100644 index d60e43d71280bfa085988e31a52acfeef015c5f0..0000000000000000000000000000000000000000 --- a/spaces/artificialguybr/video-dubbing/TTS/TTS/tts/configs/tortoise_config.py +++ /dev/null @@ -1,87 +0,0 @@ -from dataclasses import dataclass, field - -from TTS.tts.configs.shared_configs import BaseTTSConfig -from TTS.tts.models.tortoise import TortoiseArgs, TortoiseAudioConfig - - -@dataclass -class TortoiseConfig(BaseTTSConfig): - """Defines parameters for Tortoise TTS model. - - Args: - model (str): - Model name. Do not change unless you know what you are doing. - - model_args (TortoiseArgs): - Model architecture arguments. Defaults to `TortoiseArgs()`. - - audio (TortoiseAudioConfig): - Audio processing configuration. Defaults to `TortoiseAudioConfig()`. - - model_dir (str): - Path to the folder that has all the Tortoise models. Defaults to None. - - temperature (float): - Temperature for the autoregressive model inference. Larger values makes predictions more creative sacrificing stability. Defaults to `0.2`. - - length_penalty (float): - Exponential penalty to the length that is used with beam-based generation. It is applied as an exponent to the sequence length, - which in turn is used to divide the score of the sequence. Since the score is the log likelihood of the sequence (i.e. negative), - length_penalty > 0.0 promotes longer sequences, while length_penalty < 0.0 encourages shorter sequences. - - reperation_penalty (float): - The parameter for repetition penalty. 1.0 means no penalty. Defaults to `2.0`. - - top_p (float): - If set to float < 1, only the smallest set of most probable tokens with probabilities that add up to top_p or higher are kept for generation. - Defaults to `0.8`. - - cond_free_k (float): - Knob that determines how to balance the conditioning free signal with the conditioning-present signal. [0,inf]. - As cond_free_k increases, the output becomes dominated by the conditioning-free signal. - Formula is: output=cond_present_output*(cond_free_k+1)-cond_absenct_output*cond_free_k. Defaults to `2.0`. - - diffusion_temperature (float): - Controls the variance of the noise fed into the diffusion model. [0,1]. Values at 0 - are the "mean" prediction of the diffusion network and will sound bland and smeared. - Defaults to `1.0`. - - num_autoregressive_samples (int): - Number of samples taken from the autoregressive model, all of which are filtered using CLVP. - As Tortoise is a probabilistic model, more samples means a higher probability of creating something "great". - Defaults to `16`. - - diffusion_iterations (int): - Number of diffusion steps to perform. [0,4000]. More steps means the network has more chances to iteratively refine - the output, which should theoretically mean a higher quality output. Generally a value above 250 is not noticeably better, - however. Defaults to `30`. - - sampler (str): - Diffusion sampler to be used. `ddim` or `dpm++2m`. Defaults to `ddim`. - Note: - Check :class:`TTS.tts.configs.shared_configs.BaseTTSConfig` for the inherited parameters. - - Example: - - >>> from TTS.tts.configs.tortoise_config import TortoiseConfig - >>> config = TortoiseConfig() - """ - - model: str = "tortoise" - # model specific params - model_args: TortoiseArgs = field(default_factory=TortoiseArgs) - audio: TortoiseAudioConfig = field(default_factory=TortoiseAudioConfig) - model_dir: str = None - - # settings - temperature: float = 0.2 - length_penalty: float = 1.0 - repetition_penalty: float = 2.0 - top_p: float = 0.8 - cond_free_k: float = 2.0 - diffusion_temperature: float = 1.0 - - # inference params - num_autoregressive_samples: int = 16 - diffusion_iterations: int = 30 - sampler: str = "ddim" diff --git a/spaces/artificialguybr/video-dubbing/TTS/TTS/tts/layers/overflow/plotting_utils.py b/spaces/artificialguybr/video-dubbing/TTS/TTS/tts/layers/overflow/plotting_utils.py deleted file mode 100644 index a63aeb370a38a29660dc93267f4be138381c7df6..0000000000000000000000000000000000000000 --- a/spaces/artificialguybr/video-dubbing/TTS/TTS/tts/layers/overflow/plotting_utils.py +++ /dev/null @@ -1,79 +0,0 @@ -from typing import Any - -import matplotlib.pyplot as plt -import numpy as np -import torch - - -def validate_numpy_array(value: Any): - r""" - Validates the input and makes sure it returns a numpy array (i.e on CPU) - - Args: - value (Any): the input value - - Raises: - TypeError: if the value is not a numpy array or torch tensor - - Returns: - np.ndarray: numpy array of the value - """ - if isinstance(value, np.ndarray): - pass - elif isinstance(value, list): - value = np.array(value) - elif torch.is_tensor(value): - value = value.cpu().numpy() - else: - raise TypeError("Value must be a numpy array, a torch tensor or a list") - - return value - - -def get_spec_from_most_probable_state(log_alpha_scaled, means, decoder=None): - """Get the most probable state means from the log_alpha_scaled. - - Args: - log_alpha_scaled (torch.Tensor): Log alpha scaled values. - - Shape: :math:`(T, N)` - means (torch.Tensor): Means of the states. - - Shape: :math:`(N, T, D_out)` - decoder (torch.nn.Module): Decoder module to decode the latent to melspectrogram. Defaults to None. - """ - max_state_numbers = torch.max(log_alpha_scaled, dim=1)[1] - max_len = means.shape[0] - n_mel_channels = means.shape[2] - max_state_numbers = max_state_numbers.unsqueeze(1).unsqueeze(1).expand(max_len, 1, n_mel_channels) - means = torch.gather(means, 1, max_state_numbers).squeeze(1).to(log_alpha_scaled.dtype) - if decoder is not None: - mel = ( - decoder(means.T.unsqueeze(0), torch.tensor([means.shape[0]], device=means.device), reverse=True)[0] - .squeeze(0) - .T - ) - else: - mel = means - return mel - - -def plot_transition_probabilities_to_numpy(states, transition_probabilities, output_fig=False): - """Generates trainsition probabilities plot for the states and the probability of transition. - - Args: - states (torch.IntTensor): the states - transition_probabilities (torch.FloatTensor): the transition probabilities - """ - states = validate_numpy_array(states) - transition_probabilities = validate_numpy_array(transition_probabilities) - - fig, ax = plt.subplots(figsize=(30, 3)) - ax.plot(transition_probabilities, "o") - ax.set_title("Transition probability of state") - ax.set_xlabel("hidden state") - ax.set_ylabel("probability") - ax.set_xticks([i for i in range(len(transition_probabilities))]) # pylint: disable=unnecessary-comprehension - ax.set_xticklabels([int(x) for x in states], rotation=90) - plt.tight_layout() - if not output_fig: - plt.close() - return fig diff --git a/spaces/artificialguybr/video-dubbing/TTS/TTS/vc/modules/freevc/mel_processing.py b/spaces/artificialguybr/video-dubbing/TTS/TTS/vc/modules/freevc/mel_processing.py deleted file mode 100644 index 2dcbf214935a1fde832a32139145ce87fa752598..0000000000000000000000000000000000000000 --- a/spaces/artificialguybr/video-dubbing/TTS/TTS/vc/modules/freevc/mel_processing.py +++ /dev/null @@ -1,125 +0,0 @@ -import torch -import torch.utils.data -from librosa.filters import mel as librosa_mel_fn - -MAX_WAV_VALUE = 32768.0 - - -def dynamic_range_compression_torch(x, C=1, clip_val=1e-5): - """ - PARAMS - ------ - C: compression factor - """ - return torch.log(torch.clamp(x, min=clip_val) * C) - - -def dynamic_range_decompression_torch(x, C=1): - """ - PARAMS - ------ - C: compression factor used to compress - """ - return torch.exp(x) / C - - -def spectral_normalize_torch(magnitudes): - output = dynamic_range_compression_torch(magnitudes) - return output - - -def spectral_de_normalize_torch(magnitudes): - output = dynamic_range_decompression_torch(magnitudes) - return output - - -mel_basis = {} -hann_window = {} - - -def spectrogram_torch(y, n_fft, sampling_rate, hop_size, win_size, center=False): - if torch.min(y) < -1.0: - print("min value is ", torch.min(y)) - if torch.max(y) > 1.0: - print("max value is ", torch.max(y)) - - global hann_window - dtype_device = str(y.dtype) + "_" + str(y.device) - wnsize_dtype_device = str(win_size) + "_" + dtype_device - if wnsize_dtype_device not in hann_window: - hann_window[wnsize_dtype_device] = torch.hann_window(win_size).to(dtype=y.dtype, device=y.device) - - y = torch.nn.functional.pad( - y.unsqueeze(1), (int((n_fft - hop_size) / 2), int((n_fft - hop_size) / 2)), mode="reflect" - ) - y = y.squeeze(1) - - spec = torch.stft( - y, - n_fft, - hop_length=hop_size, - win_length=win_size, - window=hann_window[wnsize_dtype_device], - center=center, - pad_mode="reflect", - normalized=False, - onesided=True, - return_complex=False, - ) - - spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6) - return spec - - -def spec_to_mel_torch(spec, n_fft, num_mels, sampling_rate, fmin, fmax): - global mel_basis - dtype_device = str(spec.dtype) + "_" + str(spec.device) - fmax_dtype_device = str(fmax) + "_" + dtype_device - if fmax_dtype_device not in mel_basis: - mel = librosa_mel_fn(sr=sampling_rate, n_fft=n_fft, n_mels=num_mels, fmin=fmin, fmax=fmax) - mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(dtype=spec.dtype, device=spec.device) - spec = torch.matmul(mel_basis[fmax_dtype_device], spec) - spec = spectral_normalize_torch(spec) - return spec - - -def mel_spectrogram_torch(y, n_fft, num_mels, sampling_rate, hop_size, win_size, fmin, fmax, center=False): - if torch.min(y) < -1.0: - print("min value is ", torch.min(y)) - if torch.max(y) > 1.0: - print("max value is ", torch.max(y)) - - global mel_basis, hann_window - dtype_device = str(y.dtype) + "_" + str(y.device) - fmax_dtype_device = str(fmax) + "_" + dtype_device - wnsize_dtype_device = str(win_size) + "_" + dtype_device - if fmax_dtype_device not in mel_basis: - mel = librosa_mel_fn(sr=sampling_rate, n_fft=n_fft, n_mels=num_mels, fmin=fmin, fmax=fmax) - mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(dtype=y.dtype, device=y.device) - if wnsize_dtype_device not in hann_window: - hann_window[wnsize_dtype_device] = torch.hann_window(win_size).to(dtype=y.dtype, device=y.device) - - y = torch.nn.functional.pad( - y.unsqueeze(1), (int((n_fft - hop_size) / 2), int((n_fft - hop_size) / 2)), mode="reflect" - ) - y = y.squeeze(1) - - spec = torch.stft( - y, - n_fft, - hop_length=hop_size, - win_length=win_size, - window=hann_window[wnsize_dtype_device], - center=center, - pad_mode="reflect", - normalized=False, - onesided=True, - return_complex=False, - ) - - spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6) - - spec = torch.matmul(mel_basis[fmax_dtype_device], spec) - spec = spectral_normalize_torch(spec) - - return spec diff --git a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/Crypto/Hash/SHAKE256.py b/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/Crypto/Hash/SHAKE256.py deleted file mode 100644 index f75b8221dfe4663abfb46b6fe082dcf2bfafab28..0000000000000000000000000000000000000000 --- a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/Crypto/Hash/SHAKE256.py +++ /dev/null @@ -1,130 +0,0 @@ -# =================================================================== -# -# Copyright (c) 2015, Legrandin -# All rights reserved. -# -# Redistribution and use in source and binary forms, with or without -# modification, are permitted provided that the following conditions -# are met: -# -# 1. Redistributions of source code must retain the above copyright -# notice, this list of conditions and the following disclaimer. -# 2. Redistributions in binary form must reproduce the above copyright -# notice, this list of conditions and the following disclaimer in -# the documentation and/or other materials provided with the -# distribution. -# -# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS -# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT -# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS -# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE -# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, -# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, -# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; -# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER -# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT -# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN -# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE -# POSSIBILITY OF SUCH DAMAGE. -# =================================================================== - -from Crypto.Util.py3compat import bord - -from Crypto.Util._raw_api import (load_pycryptodome_raw_lib, - VoidPointer, SmartPointer, - create_string_buffer, - get_raw_buffer, c_size_t, - c_uint8_ptr, c_ubyte) - -from Crypto.Hash.keccak import _raw_keccak_lib - -class SHAKE256_XOF(object): - """A SHAKE256 hash object. - Do not instantiate directly. - Use the :func:`new` function. - - :ivar oid: ASN.1 Object ID - :vartype oid: string - """ - - # ASN.1 Object ID - oid = "2.16.840.1.101.3.4.2.12" - - def __init__(self, data=None): - state = VoidPointer() - result = _raw_keccak_lib.keccak_init(state.address_of(), - c_size_t(64), - c_ubyte(24)) - if result: - raise ValueError("Error %d while instantiating SHAKE256" - % result) - self._state = SmartPointer(state.get(), - _raw_keccak_lib.keccak_destroy) - self._is_squeezing = False - self._padding = 0x1F - - if data: - self.update(data) - - def update(self, data): - """Continue hashing of a message by consuming the next chunk of data. - - Args: - data (byte string/byte array/memoryview): The next chunk of the message being hashed. - """ - - if self._is_squeezing: - raise TypeError("You cannot call 'update' after the first 'read'") - - result = _raw_keccak_lib.keccak_absorb(self._state.get(), - c_uint8_ptr(data), - c_size_t(len(data))) - if result: - raise ValueError("Error %d while updating SHAKE256 state" - % result) - return self - - def read(self, length): - """ - Compute the next piece of XOF output. - - .. note:: - You cannot use :meth:`update` anymore after the first call to - :meth:`read`. - - Args: - length (integer): the amount of bytes this method must return - - :return: the next piece of XOF output (of the given length) - :rtype: byte string - """ - - self._is_squeezing = True - bfr = create_string_buffer(length) - result = _raw_keccak_lib.keccak_squeeze(self._state.get(), - bfr, - c_size_t(length), - c_ubyte(self._padding)) - if result: - raise ValueError("Error %d while extracting from SHAKE256" - % result) - - return get_raw_buffer(bfr) - - def new(self, data=None): - return type(self)(data=data) - - -def new(data=None): - """Return a fresh instance of a SHAKE256 object. - - Args: - data (bytes/bytearray/memoryview): - The very first chunk of the message to hash. - It is equivalent to an early call to :meth:`update`. - Optional. - - :Return: A :class:`SHAKE256_XOF` object - """ - - return SHAKE256_XOF(data=data) diff --git a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/Cython/Tempita/__init__.py b/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/Cython/Tempita/__init__.py deleted file mode 100644 index 41a0ce3d0efa247760db266bace8e34a4b5dd9fa..0000000000000000000000000000000000000000 --- a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/Cython/Tempita/__init__.py +++ /dev/null @@ -1,4 +0,0 @@ -# The original Tempita implements all of its templating code here. -# Moved it to _tempita.py to make the compilation portable. - -from ._tempita import * diff --git a/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/altair/examples/tests/__init__.py b/spaces/arxify/RVC-beta-v2-0618/runtime/Lib/site-packages/altair/examples/tests/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/asciicorp/Legal-ai/README.md b/spaces/asciicorp/Legal-ai/README.md deleted file mode 100644 index f98981d636fdcf95351b9371b633b8e891e19546..0000000000000000000000000000000000000000 --- a/spaces/asciicorp/Legal-ai/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Lang Legal -emoji: 📚 -colorFrom: purple -colorTo: pink -sdk: streamlit -sdk_version: 1.19.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/auto-academic/auto-draft/latex_templates/ICLR2022/introduction.tex b/spaces/auto-academic/auto-draft/latex_templates/ICLR2022/introduction.tex deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/avivdm1/AutoGPT/tests/test_config.py b/spaces/avivdm1/AutoGPT/tests/test_config.py deleted file mode 100644 index b472a24c78edd1f931a76c68e08ed544bbe61d98..0000000000000000000000000000000000000000 --- a/spaces/avivdm1/AutoGPT/tests/test_config.py +++ /dev/null @@ -1,84 +0,0 @@ -from unittest import TestCase - -from autogpt.config import Config - - -class TestConfig(TestCase): - """ - Test cases for the Config class, which handles the configuration settings - for the AI and ensures it behaves as a singleton. - """ - - def setUp(self): - """ - Set up the test environment by creating an instance of the Config class. - """ - self.config = Config() - - def test_singleton(self): - """ - Test if the Config class behaves as a singleton by ensuring that two instances are the same. - """ - config2 = Config() - self.assertIs(self.config, config2) - - def test_initial_values(self): - """ - Test if the initial values of the Config class attributes are set correctly. - """ - self.assertFalse(self.config.debug_mode) - self.assertFalse(self.config.continuous_mode) - self.assertFalse(self.config.speak_mode) - self.assertEqual(self.config.fast_llm_model, "gpt-3.5-turbo") - self.assertEqual(self.config.smart_llm_model, "gpt-4") - self.assertEqual(self.config.fast_token_limit, 4000) - self.assertEqual(self.config.smart_token_limit, 8000) - - def test_set_continuous_mode(self): - """ - Test if the set_continuous_mode() method updates the continuous_mode attribute. - """ - self.config.set_continuous_mode(True) - self.assertTrue(self.config.continuous_mode) - - def test_set_speak_mode(self): - """ - Test if the set_speak_mode() method updates the speak_mode attribute. - """ - self.config.set_speak_mode(True) - self.assertTrue(self.config.speak_mode) - - def test_set_fast_llm_model(self): - """ - Test if the set_fast_llm_model() method updates the fast_llm_model attribute. - """ - self.config.set_fast_llm_model("gpt-3.5-turbo-test") - self.assertEqual(self.config.fast_llm_model, "gpt-3.5-turbo-test") - - def test_set_smart_llm_model(self): - """ - Test if the set_smart_llm_model() method updates the smart_llm_model attribute. - """ - self.config.set_smart_llm_model("gpt-4-test") - self.assertEqual(self.config.smart_llm_model, "gpt-4-test") - - def test_set_fast_token_limit(self): - """ - Test if the set_fast_token_limit() method updates the fast_token_limit attribute. - """ - self.config.set_fast_token_limit(5000) - self.assertEqual(self.config.fast_token_limit, 5000) - - def test_set_smart_token_limit(self): - """ - Test if the set_smart_token_limit() method updates the smart_token_limit attribute. - """ - self.config.set_smart_token_limit(9000) - self.assertEqual(self.config.smart_token_limit, 9000) - - def test_set_debug_mode(self): - """ - Test if the set_debug_mode() method updates the debug_mode attribute. - """ - self.config.set_debug_mode(True) - self.assertTrue(self.config.debug_mode) diff --git a/spaces/awacke1/AI-Atari-Live-Streamlit/app.py b/spaces/awacke1/AI-Atari-Live-Streamlit/app.py deleted file mode 100644 index ae6d65f0d907097f161e799b4424c3327057b814..0000000000000000000000000000000000000000 --- a/spaces/awacke1/AI-Atari-Live-Streamlit/app.py +++ /dev/null @@ -1,71 +0,0 @@ -import cv2 -import streamlit as st -import time - -from huggingface_sb3 import load_from_hub - -from stable_baselines3 import PPO -from stable_baselines3.common.env_util import make_atari_env -from stable_baselines3.common.vec_env import VecFrameStack -from stable_baselines3.common.env_util import make_atari_env - -st.subheader("Atari 2600 Deep RL Environments Live AI") - -# @st.cache This is not cachable :( -def load_env(env_name): - env = make_atari_env(env_name, n_envs=1) - env = VecFrameStack(env, n_stack=4) - return env - -# @st.cache This is not cachable :( -def load_model(env_name): - custom_objects = { - "learning_rate": 0.0, - "lr_schedule": lambda _: 0.0, - "clip_range": lambda _: 0.0, - } - checkpoint = load_from_hub( - f"ThomasSimonini/ppo-{env_name}", - f"ppo-{env_name}.zip", - ) - model = PPO.load(checkpoint, custom_objects=custom_objects) - return model - -st.write("In game theory and optimization Nash Equilibrium loss minimization starts playing randomly but then by understanding ratios of action success to action-reward with an action (observe, decide/predict, act and then observe outcome the Deep RL agents go from 50% efficiency to 98-99% efficiency based on quality of decision without making mistakes. A good reference to environments is here https://github.com/DLR-RM/rl-baselines3-zoo/blob/master/benchmark.md") -#st.write("Deep RL models: https://huggingface.co/sb3") - -env_name = st.selectbox( - "Select environment", - ( - "SeaquestNoFrameskip-v4", - "QbertNoFrameskip-v4", - "SpaceInvadersNoFrameskip-v4", - "PongNoFrameskip-v4", - #"AsteroidsNoFrameskip-v4", - #"BeamRiderNoFrameskip-v4", - #"BreakoutNoFrameskip-v4 ", - #"EnduroNoFrameskip-v4", - #"MsPacmanNoFrameskip-v4", - #"RoadRunnerNoFrameskip-v4", - #"Swimmer-v3", - #"Walker2d-v3", - ), -) - -num_episodes = st.slider("Number of Episodes", 1, 20, 5) -env = load_env(env_name) -model = load_model(env_name) - -obs = env.reset() - -with st.empty(): - for i in range(num_episodes): - obs = env.reset() - done = False - while not done: - frame = env.render(mode="rgb_array") - im = st.image(frame, width=400) - action, _states = model.predict(obs) - obs, reward, done, info = env.step([action]) - - time.sleep(0.1) \ No newline at end of file diff --git a/spaces/banana-projects/web3d/dist/index.js b/spaces/banana-projects/web3d/dist/index.js deleted file mode 100644 index 51597a0858f9eca9ed26a61222c7eb6c4441254d..0000000000000000000000000000000000000000 --- a/spaces/banana-projects/web3d/dist/index.js +++ /dev/null @@ -1,264 +0,0 @@ -"use strict"; -Object.defineProperty(exports, "__esModule", { value: true }); -const THREE = require("three"); -const TWEEN = require("@tweenjs/tween.js"); -const scene = new THREE.Scene(); -scene.background = new THREE.Color( -// 0xcccccc -'white'); -const clock = new THREE.Clock(); -const camera = new THREE.PerspectiveCamera(45, window.innerWidth / window.innerHeight, 0.1, 2000); -camera.position.set(0, 30, 50); -camera.lookAt(0, 3, 0); -const controls = new THREE.OrbitControls(camera); -const ambientLight = new THREE.AmbientLight(0xffffff, 1); -scene.add(ambientLight); -const renderer = new THREE.WebGLRenderer({ antialias: true }); -renderer.setPixelRatio(window.devicePixelRatio); -renderer.setSize(window.innerWidth, window.innerHeight); -document.body.appendChild(renderer.domElement); -const stats = new Stats(); -document.body.appendChild(stats.dom); -/// Anim mixer -const mixers = []; -class Assets { - static loadEggMtl() { - return new Promise((resolve, reject) => { - const loader = new THREE.MTLLoader(); - loader.load(`models/Egg_from_Poly_uovo/Egg from Poly uovo.mtl`, (materials) => { - materials.preload(); - resolve(materials); - }, (xhr) => { }, reject); - }); - } - static loadEggObj(materials) { - return new Promise((resolve, reject) => { - const loader = new THREE.OBJLoader(); - loader.setMaterials(materials); - loader.load(`models/Egg_from_Poly_uovo/Egg from Poly uovo.obj`, (object) => { - resolve(object); - }, (xhr) => { - // c.log(`${ xhr.loaded / xhr.total * 100 }% loaded`); - }, (error) => { - c.error(error); - reject(error); - }); - }); - } - static async loadEgg() { - const materialCreator = await this.loadEggMtl(); - return this.loadEggObj(materialCreator); - } - static loadEggGltf() { - return new Promise((resolve, reject) => { - const loader = new THREE.GLTFLoader(); - loader.load(`models/Egg_gltf/Egg from Poly uovo copy.gltf`, (gltf) => { - c.log(gltf); - resolve(gltf.scene); - }); - }); - } - static loadDogDae() { - /// In Dae/Collada: did not manage to get - /// either the anims or the texture. - return new Promise((resolve, reject) => { - const loader = new THREE.ColladaLoader(); - loader.load(`models/dog/pup_lohound.dae`, (collada) => { - resolve(collada); - }); - }); - } - static loadDogFbx() { - return new Promise((resolve, reject) => { - const loader = new THREE.FBXLoader(); - loader.load(`models/dog_fbx/puppy-snapchat.fbx`, (fbx) => { - resolve(fbx); - }); - }); - } - static loadBoloss() { - /// In Dae/Collada: did not manage to get - /// either the anims or the texture. - return new Promise((resolve, reject) => { - const loader = new THREE.ColladaLoader(); - loader.load(`models/boloss/Boloss-3d v10.dae`, (collada) => { - resolve(collada); - }); - }); - } -} -class TUtils { - static boundingBox(o) { - const bbox = new THREE.Box3().setFromObject(o); - return bbox; - } - static flushYZero(o) { - o.position.y = -(this.boundingBox(o)).min.y; - } - static perform(tween) { - return new Promise(resolve => { - tween.onComplete(resolve).start(); - }); - } -} -(async () => { - /** - * scene construction - */ - const gridHelper = new THREE.GridHelper(100, 100); - scene.add(gridHelper); - const axesHelper = new THREE.AxesHelper(50); - scene.add(axesHelper); - { - const egg = await Assets.loadEgg(); - c.log(egg); - egg.scale.setScalar(.2); - egg.rotateX(-Math.PI / 2); - egg.position.x = -18; - TUtils.flushYZero(egg); - const box = new THREE.BoxHelper(egg); - scene.add(box); - scene.add(egg); - ///// Manually set the material, for fun. - // const eggFace = egg.getObjectByName("CallKit-IconMask") as THREE.Mesh; - // c.log(eggFace.material); - // ((eggFace.material)).color.set(0x000000); - } - { - const egg = await Assets.loadEggGltf(); - c.log(egg); - egg.scale.setScalar(100); - egg.position.x = -28; - TUtils.flushYZero(egg); - egg.remove(egg.getObjectByName('Camera')); - scene.add(egg); - // c.log(Utils.boundingBox(egg)); - const box = new THREE.BoxHelper(egg, new THREE.Color('red')); - scene.add(box); - } - { - ////// dog_fbx - const dog = await Assets.loadDogFbx(); - // c.log((dog).animations); - const mixer = new THREE.AnimationMixer(dog); - const clip = dog.animations.find(clip => clip.name === "lohound|lohoundAction"); - /// ^^ this is the main parent animation! Do not play all children. - c.log(clip); - mixer.clipAction(clip).play(); - mixers.push(mixer); - const container = new THREE.Group(); - container.add(dog); - container.scale.setScalar(0.007); /// <- scale a container, not the dog itself or it'll fuck the anims. - container.position.x = -6; - scene.add(container); - const box = new THREE.BoxHelper(container, new THREE.Color('green')); - scene.add(box); - } - { - const boloss = (await Assets.loadBoloss()).scene; - c.log(boloss); - boloss.position.x = 16; - TUtils.flushYZero(boloss); - scene.add(boloss); - const box = new THREE.BoxHelper(boloss, new THREE.Color('blue')); - scene.add(box); - /// Anims like in AudioBoloss - const rootModel = boloss.getObjectByName(`SketchUp`); - const pupilL = boloss.getObjectByName(`Pupil-left`); - const pupilR = boloss.getObjectByName(`Pupil-right`); - const pupils = new THREE.Group(); - pupils.add(pupilL, pupilR); - rootModel.add(pupils); - (async () => { - while (true) { - const translatePupil = new TWEEN.Tween(pupils.position) - .to({ x: "-1", y: "-1" }, 200) - .easing(TWEEN.Easing.Quadratic.Out); - const translatePupilRev = new TWEEN.Tween(pupils.position) - .to({ x: "+1", y: "+1" }, 200) - .easing(TWEEN.Easing.Quadratic.Out); - await TUtils.perform(translatePupil); - await Utils.wait(4, 1); - await TUtils.perform(translatePupilRev); - await Utils.wait(8, 3); - } - })(); - const eyebrowL = boloss.getObjectByName(`Eyebrow-left`); - const eyebrowR = boloss.getObjectByName(`Eyebrow-right`); - const eyebrows = new THREE.Group(); - eyebrows.add(eyebrowL, eyebrowR); - rootModel.add(eyebrows); - (async () => { - while (true) { - const scaleEyebrow = new TWEEN.Tween(eyebrows.scale) - .to({ x: 1.08, y: 1.08, z: 1.08 }, 100) - .easing(TWEEN.Easing.Quadratic.InOut); - const scaleEyebrowRev = new TWEEN.Tween(eyebrows.scale) - .to({ x: 1, y: 1, z: 1 }, 100) - .easing(TWEEN.Easing.Quadratic.InOut); - await Utils.wait(6, 6); - await TUtils.perform(scaleEyebrow); - await TUtils.perform(scaleEyebrowRev); - await Utils.wait(0.14); - await TUtils.perform(scaleEyebrow); - await TUtils.perform(scaleEyebrowRev); - } - })(); - (async () => { - while (true) { - const angle = Utils.randomFloat(-0.2, 0.3); - const dummyL = new THREE.Object3D(); - dummyL.rotateOnAxis(new THREE.Vector3(0, 1, 0.8), angle); - const dummyR = new THREE.Object3D(); - dummyR.rotateOnAxis(new THREE.Vector3(0, -1, -0.8), angle); - /// ^^ exact same result as keeping the same vector and negating the angle. - const rotateBrowL = new TWEEN.Tween(eyebrowL.rotation) - .to({ - x: dummyL.rotation.x, - y: dummyL.rotation.y, - z: dummyL.rotation.z, - }, 300); - const rotateBrowR = new TWEEN.Tween(eyebrowR.rotation) - .to({ - x: dummyR.rotation.x, - y: dummyR.rotation.y, - z: dummyR.rotation.z, - }, 300); - await Promise.all([ - TUtils.perform(rotateBrowL), - TUtils.perform(rotateBrowR), - ]); - await Utils.wait(1, 1); - await Promise.all([ - TUtils.perform(new TWEEN.Tween(eyebrowL.rotation).to({ x: 0, y: 0, z: 0 }, 300)), - TUtils.perform(new TWEEN.Tween(eyebrowR.rotation).to({ x: 0, y: 0, z: 0 }, 300)), - ]); - await Utils.wait(1, 1); - /// ^^ not the exact same behavior as in AudioBoloss (all waits are actually randoms there.) - } - })(); - } -})(); -/** - * MAIN() - */ -window.addEventListener('resize', onWindowResize, false); -function onWindowResize() { - camera.aspect = window.innerWidth / window.innerHeight; - camera.updateProjectionMatrix(); - renderer.setSize(window.innerWidth, window.innerHeight); -} -function render() { - const delta = clock.getDelta(); - for (const mixer of mixers) { - mixer.update(delta); - } - renderer.render(scene, camera); -} -function animate() { - requestAnimationFrame(animate); - TWEEN.update(); - render(); - stats.update(); -} -animate(); diff --git a/spaces/banana-projects/web3d/node_modules/three/examples/js/shaders/UnpackDepthRGBAShader.js b/spaces/banana-projects/web3d/node_modules/three/examples/js/shaders/UnpackDepthRGBAShader.js deleted file mode 100644 index 55f34a2b0d455b0d5310e80831da35ea86b678ba..0000000000000000000000000000000000000000 --- a/spaces/banana-projects/web3d/node_modules/three/examples/js/shaders/UnpackDepthRGBAShader.js +++ /dev/null @@ -1,49 +0,0 @@ -/** - * @author alteredq / http://alteredqualia.com/ - * - * Unpack RGBA depth shader - * - show RGBA encoded depth as monochrome color - */ - -THREE.UnpackDepthRGBAShader = { - - uniforms: { - - "tDiffuse": { value: null }, - "opacity": { value: 1.0 } - - }, - - vertexShader: [ - - "varying vec2 vUv;", - - "void main() {", - - "vUv = uv;", - "gl_Position = projectionMatrix * modelViewMatrix * vec4( position, 1.0 );", - - "}" - - ].join( "\n" ), - - fragmentShader: [ - - "uniform float opacity;", - - "uniform sampler2D tDiffuse;", - - "varying vec2 vUv;", - - "#include ", - - "void main() {", - - "float depth = 1.0 - unpackRGBAToDepth( texture2D( tDiffuse, vUv ) );", - "gl_FragColor = vec4( vec3( depth ), opacity );", - - "}" - - ].join( "\n" ) - -}; diff --git a/spaces/beihai/GFPGAN-V1.3-whole-image/basicsr/metrics/metric_util.py b/spaces/beihai/GFPGAN-V1.3-whole-image/basicsr/metrics/metric_util.py deleted file mode 100644 index 0b21777874f18a7e87c67153ee92dec4d7b599e8..0000000000000000000000000000000000000000 --- a/spaces/beihai/GFPGAN-V1.3-whole-image/basicsr/metrics/metric_util.py +++ /dev/null @@ -1,45 +0,0 @@ -import numpy as np - -from basicsr.utils.matlab_functions import bgr2ycbcr - - -def reorder_image(img, input_order='HWC'): - """Reorder images to 'HWC' order. - - If the input_order is (h, w), return (h, w, 1); - If the input_order is (c, h, w), return (h, w, c); - If the input_order is (h, w, c), return as it is. - - Args: - img (ndarray): Input image. - input_order (str): Whether the input order is 'HWC' or 'CHW'. - If the input image shape is (h, w), input_order will not have - effects. Default: 'HWC'. - - Returns: - ndarray: reordered image. - """ - - if input_order not in ['HWC', 'CHW']: - raise ValueError(f"Wrong input_order {input_order}. Supported input_orders are 'HWC' and 'CHW'") - if len(img.shape) == 2: - img = img[..., None] - if input_order == 'CHW': - img = img.transpose(1, 2, 0) - return img - - -def to_y_channel(img): - """Change to Y channel of YCbCr. - - Args: - img (ndarray): Images with range [0, 255]. - - Returns: - (ndarray): Images with range [0, 255] (float type) without round. - """ - img = img.astype(np.float32) / 255. - if img.ndim == 3 and img.shape[2] == 3: - img = bgr2ycbcr(img, y_only=True) - img = img[..., None] - return img * 255. diff --git a/spaces/bioriAsaeru/text-to-voice/Data Structures And Algorithms By G.a.v Pai Free 35 !!TOP!!.md b/spaces/bioriAsaeru/text-to-voice/Data Structures And Algorithms By G.a.v Pai Free 35 !!TOP!!.md deleted file mode 100644 index b64be3ff95d54ca054279c16bf617e3a48ba6915..0000000000000000000000000000000000000000 --- a/spaces/bioriAsaeru/text-to-voice/Data Structures And Algorithms By G.a.v Pai Free 35 !!TOP!!.md +++ /dev/null @@ -1,10 +0,0 @@ -

    data structures and algorithms by g.a.v pai free 35


    Download Zip ⚹⚹⚹ https://urloso.com/2uyRKR



    -
    -The course, offered by the department of Computer Applications offers a computer oriented introductory course in various areas of Mathematics and Science. It also covers a brief history of Mathematics, introduction of the central concepts in Mathematics, programming techniques and Applications of Algorithms. This course attempts to teach concepts in real life application. It gives a practical approach to the solution of Mathematical problems, rather than focusing only on the mathematical definitions. It will be beneficial for all. The course covers the following topics: 1. Concepts in Mathematics. 2. Matrices, vectors and basic operations on them. 3. Graphs, adjacency, paths and algorithms. 4. Introduction to Computers and Programming. 5. Introduction to Logic and Computation. - -Introduction and Basic Concepts The History of Mathematics - -Mathematics: Use and Abuse In his review of G.H Hardy's controversial book 'A Mathematician's Apology' author remarks that "the history of mathematics is one long series of attempts to define the terms on which mathematicians agree to disagree". Hardy \[1\] had published the article on the existence of uncountably many distinct uncountable sets in 1900. Shortly afterwards Hausdorff \[2\] had proved the same result by using a different method. He used to define a set as a collection of points, where each point is closed and bounded. This definition was then further refined. Nowadays a point, known as a limit point, is an accumulation point. These closed and bounded sets are now called Haussdorff spaces. The result of Hausdorff was first published in the German Journal 'Mathematische Annalen'. It is interesting to note that he himself was surprised and dismayed by Hardy's work. In this work, Hardy showed a continuous function whose graph is uncountable. He gave the set of all such functions as well as the set of all points on this graph and a bijection between them. These sets are said to be uncountably large. Hardy goes on to discuss the difference between sets of rational numbers and the real numbers, thus, giving a definition of countable set. He then states that these two sets are not the same, and hence uncountable. But this is not his most damning criticism of the whole idea of mathematics. It is that his argument is not clearly thought out. The definition he gives of uncountable set is incorrect, and he gives an example to prove that. He then uses this to show that 4fefd39f24
    -
    -
    -

    diff --git a/spaces/bioriAsaeru/text-to-voice/HOW TO CHANGE LANUGAGE?????? Chicago 1930 General Discussions[1].md b/spaces/bioriAsaeru/text-to-voice/HOW TO CHANGE LANUGAGE?????? Chicago 1930 General Discussions[1].md deleted file mode 100644 index 3a5b1c028f4d07d9de254c5010b175e9d6f04119..0000000000000000000000000000000000000000 --- a/spaces/bioriAsaeru/text-to-voice/HOW TO CHANGE LANUGAGE?????? Chicago 1930 General Discussions[1].md +++ /dev/null @@ -1,6 +0,0 @@ -

    Chicago 1930 english language pack


    Download File 🆗 https://urloso.com/2uyOwe



    -
    - aaccfb2cb3
    -
    -
    -

    diff --git a/spaces/bioriAsaeru/text-to-voice/Ken Follett Epub Ita Download Skype The Ultimate Guide for Fans of Historical Thrillers.md b/spaces/bioriAsaeru/text-to-voice/Ken Follett Epub Ita Download Skype The Ultimate Guide for Fans of Historical Thrillers.md deleted file mode 100644 index 591132429ed82cbb97d444caede91e96daaa38b0..0000000000000000000000000000000000000000 --- a/spaces/bioriAsaeru/text-to-voice/Ken Follett Epub Ita Download Skype The Ultimate Guide for Fans of Historical Thrillers.md +++ /dev/null @@ -1,6 +0,0 @@ -

    Ken Follett Epub Ita Download Skype


    DOWNLOAD >>> https://urloso.com/2uyOeY



    -
    - aaccfb2cb3
    -
    -
    -

    diff --git a/spaces/birdortyedi/instagram-filter-removal/README.md b/spaces/birdortyedi/instagram-filter-removal/README.md deleted file mode 100644 index f2552ac5263506df94b36b9c232e8351968542ca..0000000000000000000000000000000000000000 --- a/spaces/birdortyedi/instagram-filter-removal/README.md +++ /dev/null @@ -1,37 +0,0 @@ ---- -title: Instagram Filter Removal -emoji: 👀 -colorFrom: gray -colorTo: green -sdk: gradio -app_file: app.py -pinned: false ---- - -# Configuration - -`title`: _string_ -Display title for the Space - -`emoji`: _string_ -Space emoji (emoji-only character allowed) - -`colorFrom`: _string_ -Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray) - -`colorTo`: _string_ -Color for Thumbnail gradient (red, yellow, green, blue, indigo, purple, pink, gray) - -`sdk`: _string_ -Can be either `gradio` or `streamlit` - -`sdk_version` : _string_ -Only applicable for `streamlit` SDK. -See [doc](https://hf.co/docs/hub/spaces) for more info on supported versions. - -`app_file`: _string_ -Path to your main application file (which contains either `gradio` or `streamlit` Python code). -Path is relative to the root of the repository. - -`pinned`: _boolean_ -Whether the Space stays on top of your list. diff --git a/spaces/bortle/astrophotography-object-classifier/app.py b/spaces/bortle/astrophotography-object-classifier/app.py deleted file mode 100644 index 1e49d1feca60186fb9bf10220e52af1ab65313cd..0000000000000000000000000000000000000000 --- a/spaces/bortle/astrophotography-object-classifier/app.py +++ /dev/null @@ -1,18 +0,0 @@ -import gradio as gr -from transformers import pipeline - -pipeline = pipeline(task="image-classification", model="bortle/astrophotography-object-classifier-alpha4") - -def predict(image): - predictions = pipeline(image) - return {p["label"]: p["score"] for p in predictions} - -gr.Interface( - predict, - inputs=gr.Image(shape=(1080, None), type="pil", label="Upload Astrophotography image"), - outputs=gr.Label(num_top_classes=5), - title="Astrophotography Object Classifier", - allow_flagging="manual", - examples=["examples/Andromeda.jpg", "examples/Heart.jpg", "examples/Pleiades.jpg", "examples/Rosette.jpg", "examples/Moon.jpg", "examples/GreatHercules.jpg", "examples/Leo-Triplet.jpg", "examples/Crab.jpg", "examples/North-America.jpg", "examples/Horsehead-Flame.jpg", "examples/Pinwheel.jpg"], - cache_examples=True -).launch() \ No newline at end of file diff --git a/spaces/brainblow/AudioCreator_Music-Audio_Generation/scripts/templates/login.html b/spaces/brainblow/AudioCreator_Music-Audio_Generation/scripts/templates/login.html deleted file mode 100644 index dd89ac654bceca14a9dec7d1a7f8206d1425a7a1..0000000000000000000000000000000000000000 --- a/spaces/brainblow/AudioCreator_Music-Audio_Generation/scripts/templates/login.html +++ /dev/null @@ -1,20 +0,0 @@ -{% extends "base.html" %} -{% block content %} - -

    - You must identify yourself first! We use a highly secured protocol - where you just decide your username, and that's it. No password, no encryption, - just pure trust. -

    - -{% if error %} -

    {{error}}

    -{% endif %} -
    - - - - -{% endblock %} diff --git a/spaces/brjathu/HMR2.0/vendor/detectron2/detectron2/engine/launch.py b/spaces/brjathu/HMR2.0/vendor/detectron2/detectron2/engine/launch.py deleted file mode 100644 index 7052c5040e4d9e6553a1b371518cb53fb056524e..0000000000000000000000000000000000000000 --- a/spaces/brjathu/HMR2.0/vendor/detectron2/detectron2/engine/launch.py +++ /dev/null @@ -1,123 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -import logging -from datetime import timedelta -import torch -import torch.distributed as dist -import torch.multiprocessing as mp - -from detectron2.utils import comm - -__all__ = ["DEFAULT_TIMEOUT", "launch"] - -DEFAULT_TIMEOUT = timedelta(minutes=30) - - -def _find_free_port(): - import socket - - sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM) - # Binding to port 0 will cause the OS to find an available port for us - sock.bind(("", 0)) - port = sock.getsockname()[1] - sock.close() - # NOTE: there is still a chance the port could be taken by other processes. - return port - - -def launch( - main_func, - # Should be num_processes_per_machine, but kept for compatibility. - num_gpus_per_machine, - num_machines=1, - machine_rank=0, - dist_url=None, - args=(), - timeout=DEFAULT_TIMEOUT, -): - """ - Launch multi-process or distributed training. - This function must be called on all machines involved in the training. - It will spawn child processes (defined by ``num_gpus_per_machine``) on each machine. - - Args: - main_func: a function that will be called by `main_func(*args)` - num_gpus_per_machine (int): number of processes per machine. When - using GPUs, this should be the number of GPUs. - num_machines (int): the total number of machines - machine_rank (int): the rank of this machine - dist_url (str): url to connect to for distributed jobs, including protocol - e.g. "tcp://127.0.0.1:8686". - Can be set to "auto" to automatically select a free port on localhost - timeout (timedelta): timeout of the distributed workers - args (tuple): arguments passed to main_func - """ - world_size = num_machines * num_gpus_per_machine - if world_size > 1: - # https://github.com/pytorch/pytorch/pull/14391 - # TODO prctl in spawned processes - - if dist_url == "auto": - assert num_machines == 1, "dist_url=auto not supported in multi-machine jobs." - port = _find_free_port() - dist_url = f"tcp://127.0.0.1:{port}" - if num_machines > 1 and dist_url.startswith("file://"): - logger = logging.getLogger(__name__) - logger.warning( - "file:// is not a reliable init_method in multi-machine jobs. Prefer tcp://" - ) - - mp.start_processes( - _distributed_worker, - nprocs=num_gpus_per_machine, - args=( - main_func, - world_size, - num_gpus_per_machine, - machine_rank, - dist_url, - args, - timeout, - ), - daemon=False, - ) - else: - main_func(*args) - - -def _distributed_worker( - local_rank, - main_func, - world_size, - num_gpus_per_machine, - machine_rank, - dist_url, - args, - timeout=DEFAULT_TIMEOUT, -): - has_gpu = torch.cuda.is_available() - if has_gpu: - assert num_gpus_per_machine <= torch.cuda.device_count() - global_rank = machine_rank * num_gpus_per_machine + local_rank - try: - dist.init_process_group( - backend="NCCL" if has_gpu else "GLOO", - init_method=dist_url, - world_size=world_size, - rank=global_rank, - timeout=timeout, - ) - except Exception as e: - logger = logging.getLogger(__name__) - logger.error("Process group URL: {}".format(dist_url)) - raise e - - # Setup the local process group. - comm.create_local_process_group(num_gpus_per_machine) - if has_gpu: - torch.cuda.set_device(local_rank) - - # synchronize is needed here to prevent a possible timeout after calling init_process_group - # See: https://github.com/facebookresearch/maskrcnn-benchmark/issues/172 - comm.synchronize() - - main_func(*args) diff --git a/spaces/brjathu/HMR2.0/vendor/pyrender/pyrender/renderer.py b/spaces/brjathu/HMR2.0/vendor/pyrender/pyrender/renderer.py deleted file mode 100644 index 5ae14c5cdb1785226a52ae6b71b08f01de069962..0000000000000000000000000000000000000000 --- a/spaces/brjathu/HMR2.0/vendor/pyrender/pyrender/renderer.py +++ /dev/null @@ -1,1339 +0,0 @@ -"""PBR renderer for Python. - -Author: Matthew Matl -""" -import sys - -import numpy as np -import PIL - -from .constants import (RenderFlags, TextAlign, GLTF, BufFlags, TexFlags, - ProgramFlags, DEFAULT_Z_FAR, DEFAULT_Z_NEAR, - SHADOW_TEX_SZ, MAX_N_LIGHTS) -from .shader_program import ShaderProgramCache -from .material import MetallicRoughnessMaterial, SpecularGlossinessMaterial -from .light import PointLight, SpotLight, DirectionalLight -from .font import FontCache -from .utils import format_color_vector - -from OpenGL.GL import * - - -class Renderer(object): - """Class for handling all rendering operations on a scene. - - Note - ---- - This renderer relies on the existence of an OpenGL context and - does not create one on its own. - - Parameters - ---------- - viewport_width : int - Width of the viewport in pixels. - viewport_height : int - Width of the viewport height in pixels. - point_size : float, optional - Size of points in pixels. Defaults to 1.0. - """ - - def __init__(self, viewport_width, viewport_height, point_size=1.0): - self.dpscale = 1 - # Scaling needed on retina displays - if sys.platform == 'darwin': - self.dpscale = 2 - - self.viewport_width = viewport_width - self.viewport_height = viewport_height - self.point_size = point_size - - # Optional framebuffer for offscreen renders - self._main_fb = None - self._main_cb = None - self._main_db = None - self._main_fb_ms = None - self._main_cb_ms = None - self._main_db_ms = None - self._main_fb_dims = (None, None) - self._shadow_fb = None - self._latest_znear = DEFAULT_Z_NEAR - self._latest_zfar = DEFAULT_Z_FAR - - # Shader Program Cache - self._program_cache = ShaderProgramCache() - self._font_cache = FontCache() - self._meshes = set() - self._mesh_textures = set() - self._shadow_textures = set() - self._texture_alloc_idx = 0 - - @property - def viewport_width(self): - """int : The width of the main viewport, in pixels. - """ - return self._viewport_width - - @viewport_width.setter - def viewport_width(self, value): - self._viewport_width = self.dpscale * value - - @property - def viewport_height(self): - """int : The height of the main viewport, in pixels. - """ - return self._viewport_height - - @viewport_height.setter - def viewport_height(self, value): - self._viewport_height = self.dpscale * value - - @property - def point_size(self): - """float : The size of screen-space points, in pixels. - """ - return self._point_size - - @point_size.setter - def point_size(self, value): - self._point_size = float(value) - - def render(self, scene, flags, seg_node_map=None): - """Render a scene with the given set of flags. - - Parameters - ---------- - scene : :class:`Scene` - A scene to render. - flags : int - A specification from :class:`.RenderFlags`. - seg_node_map : dict - A map from :class:`.Node` objects to (3,) colors for each. - If specified along with flags set to :attr:`.RenderFlags.SEG`, - the color image will be a segmentation image. - - Returns - ------- - color_im : (h, w, 3) uint8 or (h, w, 4) uint8 - If :attr:`RenderFlags.OFFSCREEN` is set, the color buffer. This is - normally an RGB buffer, but if :attr:`.RenderFlags.RGBA` is set, - the buffer will be a full RGBA buffer. - depth_im : (h, w) float32 - If :attr:`RenderFlags.OFFSCREEN` is set, the depth buffer - in linear units. - """ - # Update context with meshes and textures - self._update_context(scene, flags) - - # Render necessary shadow maps - if not bool(flags & RenderFlags.DEPTH_ONLY or flags & RenderFlags.SEG): - for ln in scene.light_nodes: - take_pass = False - if (isinstance(ln.light, DirectionalLight) and - bool(flags & RenderFlags.SHADOWS_DIRECTIONAL)): - take_pass = True - elif (isinstance(ln.light, SpotLight) and - bool(flags & RenderFlags.SHADOWS_SPOT)): - take_pass = True - elif (isinstance(ln.light, PointLight) and - bool(flags & RenderFlags.SHADOWS_POINT)): - take_pass = True - if take_pass: - self._shadow_mapping_pass(scene, ln, flags) - - # Make forward pass - retval = self._forward_pass(scene, flags, seg_node_map=seg_node_map) - - # If necessary, make normals pass - if flags & (RenderFlags.VERTEX_NORMALS | RenderFlags.FACE_NORMALS): - self._normals_pass(scene, flags) - - # Update camera settings for retrieving depth buffers - self._latest_znear = scene.main_camera_node.camera.znear - self._latest_zfar = scene.main_camera_node.camera.zfar - - return retval - - def render_text(self, text, x, y, font_name='OpenSans-Regular', - font_pt=40, color=None, scale=1.0, - align=TextAlign.BOTTOM_LEFT): - """Render text into the current viewport. - - Note - ---- - This cannot be done into an offscreen buffer. - - Parameters - ---------- - text : str - The text to render. - x : int - Horizontal pixel location of text. - y : int - Vertical pixel location of text. - font_name : str - Name of font, from the ``pyrender/fonts`` folder, or - a path to a ``.ttf`` file. - font_pt : int - Height of the text, in font points. - color : (4,) float - The color of the text. Default is black. - scale : int - Scaling factor for text. - align : int - One of the :class:`TextAlign` options which specifies where the - ``x`` and ``y`` parameters lie on the text. For example, - :attr:`TextAlign.BOTTOM_LEFT` means that ``x`` and ``y`` indicate - the position of the bottom-left corner of the textbox. - """ - x *= self.dpscale - y *= self.dpscale - font_pt *= self.dpscale - - if color is None: - color = np.array([0.0, 0.0, 0.0, 1.0]) - else: - color = format_color_vector(color, 4) - - # Set up viewport for render - self._configure_forward_pass_viewport(0) - - # Load font - font = self._font_cache.get_font(font_name, font_pt) - if not font._in_context(): - font._add_to_context() - - # Load program - program = self._get_text_program() - program._bind() - - # Set uniforms - p = np.eye(4) - p[0,0] = 2.0 / self.viewport_width - p[0,3] = -1.0 - p[1,1] = 2.0 / self.viewport_height - p[1,3] = -1.0 - program.set_uniform('projection', p) - program.set_uniform('text_color', color) - - # Draw text - font.render_string(text, x, y, scale, align) - - def read_color_buf(self): - """Read and return the current viewport's color buffer. - - Alpha cannot be computed for an on-screen buffer. - - Returns - ------- - color_im : (h, w, 3) uint8 - The color buffer in RGB byte format. - """ - # Extract color image from frame buffer - width, height = self.viewport_width, self.viewport_height - glBindFramebuffer(GL_READ_FRAMEBUFFER, 0) - glReadBuffer(GL_FRONT) - color_buf = glReadPixels(0, 0, width, height, GL_RGB, GL_UNSIGNED_BYTE) - - # Re-format them into numpy arrays - color_im = np.frombuffer(color_buf, dtype=np.uint8) - color_im = color_im.reshape((height, width, 3)) - color_im = np.flip(color_im, axis=0) - - # Resize for macos if needed - if sys.platform == 'darwin': - color_im = self._resize_image(color_im, True) - - return color_im - - def read_depth_buf(self): - """Read and return the current viewport's color buffer. - - Returns - ------- - depth_im : (h, w) float32 - The depth buffer in linear units. - """ - width, height = self.viewport_width, self.viewport_height - glBindFramebuffer(GL_READ_FRAMEBUFFER, 0) - glReadBuffer(GL_FRONT) - depth_buf = glReadPixels( - 0, 0, width, height, GL_DEPTH_COMPONENT, GL_FLOAT - ) - - depth_im = np.frombuffer(depth_buf, dtype=np.float32) - depth_im = depth_im.reshape((height, width)) - depth_im = np.flip(depth_im, axis=0) - - inf_inds = (depth_im == 1.0) - depth_im = 2.0 * depth_im - 1.0 - z_near, z_far = self._latest_znear, self._latest_zfar - noninf = np.logical_not(inf_inds) - if z_far is None: - depth_im[noninf] = 2 * z_near / (1.0 - depth_im[noninf]) - else: - depth_im[noninf] = ((2.0 * z_near * z_far) / - (z_far + z_near - depth_im[noninf] * - (z_far - z_near))) - depth_im[inf_inds] = 0.0 - - # Resize for macos if needed - if sys.platform == 'darwin': - depth_im = self._resize_image(depth_im) - - return depth_im - - def delete(self): - """Free all allocated OpenGL resources. - """ - # Free shaders - self._program_cache.clear() - - # Free fonts - self._font_cache.clear() - - # Free meshes - for mesh in self._meshes: - for p in mesh.primitives: - p.delete() - - # Free textures - for mesh_texture in self._mesh_textures: - mesh_texture.delete() - - for shadow_texture in self._shadow_textures: - shadow_texture.delete() - - self._meshes = set() - self._mesh_textures = set() - self._shadow_textures = set() - self._texture_alloc_idx = 0 - - self._delete_main_framebuffer() - self._delete_shadow_framebuffer() - - def __del__(self): - try: - self.delete() - except Exception: - pass - - ########################################################################### - # Rendering passes - ########################################################################### - - def _forward_pass(self, scene, flags, seg_node_map=None): - # Set up viewport for render - self._configure_forward_pass_viewport(flags) - - # Clear it - if bool(flags & RenderFlags.SEG): - glClearColor(0.0, 0.0, 0.0, 1.0) - if seg_node_map is None: - seg_node_map = {} - else: - glClearColor(*scene.bg_color) - - glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT) - - if not bool(flags & RenderFlags.SEG): - glEnable(GL_MULTISAMPLE) - else: - glDisable(GL_MULTISAMPLE) - - # Set up camera matrices - V, P = self._get_camera_matrices(scene) - - program = None - # Now, render each object in sorted order - for node in self._sorted_mesh_nodes(scene): - mesh = node.mesh - - # Skip the mesh if it's not visible - if not mesh.is_visible: - continue - - # If SEG, set color - if bool(flags & RenderFlags.SEG): - if node not in seg_node_map: - continue - color = seg_node_map[node] - if not isinstance(color, (list, tuple, np.ndarray)): - color = np.repeat(color, 3) - else: - color = np.asanyarray(color) - color = color / 255.0 - - for primitive in mesh.primitives: - - # First, get and bind the appropriate program - program = self._get_primitive_program( - primitive, flags, ProgramFlags.USE_MATERIAL - ) - program._bind() - - # Set the camera uniforms - program.set_uniform('V', V) - program.set_uniform('P', P) - program.set_uniform( - 'cam_pos', scene.get_pose(scene.main_camera_node)[:3,3] - ) - if bool(flags & RenderFlags.SEG): - program.set_uniform('color', color) - - # Next, bind the lighting - if not (flags & RenderFlags.DEPTH_ONLY or flags & RenderFlags.FLAT or - flags & RenderFlags.SEG): - self._bind_lighting(scene, program, node, flags) - - # Finally, bind and draw the primitive - self._bind_and_draw_primitive( - primitive=primitive, - pose=scene.get_pose(node), - program=program, - flags=flags - ) - self._reset_active_textures() - - # Unbind the shader and flush the output - if program is not None: - program._unbind() - glFlush() - - # If doing offscreen render, copy result from framebuffer and return - if flags & RenderFlags.OFFSCREEN: - return self._read_main_framebuffer(scene, flags) - else: - return - - def _shadow_mapping_pass(self, scene, light_node, flags): - light = light_node.light - - # Set up viewport for render - self._configure_shadow_mapping_viewport(light, flags) - - # Set up camera matrices - V, P = self._get_light_cam_matrices(scene, light_node, flags) - - # Now, render each object in sorted order - for node in self._sorted_mesh_nodes(scene): - mesh = node.mesh - - # Skip the mesh if it's not visible - if not mesh.is_visible: - continue - - for primitive in mesh.primitives: - - # First, get and bind the appropriate program - program = self._get_primitive_program( - primitive, flags, ProgramFlags.NONE - ) - program._bind() - - # Set the camera uniforms - program.set_uniform('V', V) - program.set_uniform('P', P) - program.set_uniform( - 'cam_pos', scene.get_pose(scene.main_camera_node)[:3,3] - ) - - # Finally, bind and draw the primitive - self._bind_and_draw_primitive( - primitive=primitive, - pose=scene.get_pose(node), - program=program, - flags=RenderFlags.DEPTH_ONLY - ) - self._reset_active_textures() - - # Unbind the shader and flush the output - if program is not None: - program._unbind() - glFlush() - - def _normals_pass(self, scene, flags): - # Set up viewport for render - self._configure_forward_pass_viewport(flags) - program = None - - # Set up camera matrices - V, P = self._get_camera_matrices(scene) - - # Now, render each object in sorted order - for node in self._sorted_mesh_nodes(scene): - mesh = node.mesh - - # Skip the mesh if it's not visible - if not mesh.is_visible: - continue - - for primitive in mesh.primitives: - - # Skip objects that don't have normals - if not primitive.buf_flags & BufFlags.NORMAL: - continue - - # First, get and bind the appropriate program - pf = ProgramFlags.NONE - if flags & RenderFlags.VERTEX_NORMALS: - pf = pf | ProgramFlags.VERTEX_NORMALS - if flags & RenderFlags.FACE_NORMALS: - pf = pf | ProgramFlags.FACE_NORMALS - program = self._get_primitive_program(primitive, flags, pf) - program._bind() - - # Set the camera uniforms - program.set_uniform('V', V) - program.set_uniform('P', P) - program.set_uniform('normal_magnitude', 0.05 * primitive.scale) - program.set_uniform( - 'normal_color', np.array([0.1, 0.1, 1.0, 1.0]) - ) - - # Finally, bind and draw the primitive - self._bind_and_draw_primitive( - primitive=primitive, - pose=scene.get_pose(node), - program=program, - flags=RenderFlags.DEPTH_ONLY - ) - self._reset_active_textures() - - # Unbind the shader and flush the output - if program is not None: - program._unbind() - glFlush() - - ########################################################################### - # Handlers for binding uniforms and drawing primitives - ########################################################################### - - def _bind_and_draw_primitive(self, primitive, pose, program, flags): - # Set model pose matrix - program.set_uniform('M', pose) - - # Bind mesh buffers - primitive._bind() - - # Bind mesh material - if not (flags & RenderFlags.DEPTH_ONLY or flags & RenderFlags.SEG): - material = primitive.material - - # Bind textures - tf = material.tex_flags - if tf & TexFlags.NORMAL: - self._bind_texture(material.normalTexture, - 'material.normal_texture', program) - if tf & TexFlags.OCCLUSION: - self._bind_texture(material.occlusionTexture, - 'material.occlusion_texture', program) - if tf & TexFlags.EMISSIVE: - self._bind_texture(material.emissiveTexture, - 'material.emissive_texture', program) - if tf & TexFlags.BASE_COLOR: - self._bind_texture(material.baseColorTexture, - 'material.base_color_texture', program) - if tf & TexFlags.METALLIC_ROUGHNESS: - self._bind_texture(material.metallicRoughnessTexture, - 'material.metallic_roughness_texture', - program) - if tf & TexFlags.DIFFUSE: - self._bind_texture(material.diffuseTexture, - 'material.diffuse_texture', program) - if tf & TexFlags.SPECULAR_GLOSSINESS: - self._bind_texture(material.specularGlossinessTexture, - 'material.specular_glossiness_texture', - program) - - # Bind other uniforms - b = 'material.{}' - program.set_uniform(b.format('emissive_factor'), - material.emissiveFactor) - if isinstance(material, MetallicRoughnessMaterial): - program.set_uniform(b.format('base_color_factor'), - material.baseColorFactor) - program.set_uniform(b.format('metallic_factor'), - material.metallicFactor) - program.set_uniform(b.format('roughness_factor'), - material.roughnessFactor) - elif isinstance(material, SpecularGlossinessMaterial): - program.set_uniform(b.format('diffuse_factor'), - material.diffuseFactor) - program.set_uniform(b.format('specular_factor'), - material.specularFactor) - program.set_uniform(b.format('glossiness_factor'), - material.glossinessFactor) - - # Set blending options - if material.alphaMode == 'BLEND': - glEnable(GL_BLEND) - glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA) - else: - glEnable(GL_BLEND) - glBlendFunc(GL_ONE, GL_ZERO) - - # Set wireframe mode - wf = material.wireframe - if flags & RenderFlags.FLIP_WIREFRAME: - wf = not wf - if (flags & RenderFlags.ALL_WIREFRAME) or wf: - glPolygonMode(GL_FRONT_AND_BACK, GL_LINE) - else: - glPolygonMode(GL_FRONT_AND_BACK, GL_FILL) - - # Set culling mode - if material.doubleSided or flags & RenderFlags.SKIP_CULL_FACES: - glDisable(GL_CULL_FACE) - else: - glEnable(GL_CULL_FACE) - glCullFace(GL_BACK) - else: - glEnable(GL_CULL_FACE) - glEnable(GL_BLEND) - glCullFace(GL_BACK) - glBlendFunc(GL_ONE, GL_ZERO) - glPolygonMode(GL_FRONT_AND_BACK, GL_FILL) - - # Set point size if needed - glDisable(GL_PROGRAM_POINT_SIZE) - if primitive.mode == GLTF.POINTS: - glEnable(GL_PROGRAM_POINT_SIZE) - glPointSize(self.point_size) - - # Render mesh - n_instances = 1 - if primitive.poses is not None: - n_instances = len(primitive.poses) - - if primitive.indices is not None: - glDrawElementsInstanced( - primitive.mode, primitive.indices.size, GL_UNSIGNED_INT, - ctypes.c_void_p(0), n_instances - ) - else: - glDrawArraysInstanced( - primitive.mode, 0, len(primitive.positions), n_instances - ) - - # Unbind mesh buffers - primitive._unbind() - - def _bind_lighting(self, scene, program, node, flags): - """Bind all lighting uniform values for a scene. - """ - max_n_lights = self._compute_max_n_lights(flags) - - n_d = min(len(scene.directional_light_nodes), max_n_lights[0]) - n_s = min(len(scene.spot_light_nodes), max_n_lights[1]) - n_p = min(len(scene.point_light_nodes), max_n_lights[2]) - program.set_uniform('ambient_light', scene.ambient_light) - program.set_uniform('n_directional_lights', n_d) - program.set_uniform('n_spot_lights', n_s) - program.set_uniform('n_point_lights', n_p) - plc = 0 - slc = 0 - dlc = 0 - - light_nodes = scene.light_nodes - if (len(scene.directional_light_nodes) > max_n_lights[0] or - len(scene.spot_light_nodes) > max_n_lights[1] or - len(scene.point_light_nodes) > max_n_lights[2]): - light_nodes = self._sorted_nodes_by_distance( - scene, scene.light_nodes, node - ) - - for n in light_nodes: - light = n.light - pose = scene.get_pose(n) - position = pose[:3,3] - direction = -pose[:3,2] - - if isinstance(light, PointLight): - if plc == max_n_lights[2]: - continue - b = 'point_lights[{}].'.format(plc) - plc += 1 - shadow = bool(flags & RenderFlags.SHADOWS_POINT) - program.set_uniform(b + 'position', position) - elif isinstance(light, SpotLight): - if slc == max_n_lights[1]: - continue - b = 'spot_lights[{}].'.format(slc) - slc += 1 - shadow = bool(flags & RenderFlags.SHADOWS_SPOT) - las = 1.0 / max(0.001, np.cos(light.innerConeAngle) - - np.cos(light.outerConeAngle)) - lao = -np.cos(light.outerConeAngle) * las - program.set_uniform(b + 'direction', direction) - program.set_uniform(b + 'position', position) - program.set_uniform(b + 'light_angle_scale', las) - program.set_uniform(b + 'light_angle_offset', lao) - else: - if dlc == max_n_lights[0]: - continue - b = 'directional_lights[{}].'.format(dlc) - dlc += 1 - shadow = bool(flags & RenderFlags.SHADOWS_DIRECTIONAL) - program.set_uniform(b + 'direction', direction) - - program.set_uniform(b + 'color', light.color) - program.set_uniform(b + 'intensity', light.intensity) - # if light.range is not None: - # program.set_uniform(b + 'range', light.range) - # else: - # program.set_uniform(b + 'range', 0) - - if shadow: - self._bind_texture(light.shadow_texture, - b + 'shadow_map', program) - if not isinstance(light, PointLight): - V, P = self._get_light_cam_matrices(scene, n, flags) - program.set_uniform(b + 'light_matrix', P.dot(V)) - else: - raise NotImplementedError( - 'Point light shadows not implemented' - ) - - def _sorted_mesh_nodes(self, scene): - cam_loc = scene.get_pose(scene.main_camera_node)[:3,3] - solid_nodes = [] - trans_nodes = [] - for node in scene.mesh_nodes: - mesh = node.mesh - if mesh.is_transparent: - trans_nodes.append(node) - else: - solid_nodes.append(node) - - # TODO BETTER SORTING METHOD - trans_nodes.sort( - key=lambda n: -np.linalg.norm(scene.get_pose(n)[:3,3] - cam_loc) - ) - solid_nodes.sort( - key=lambda n: -np.linalg.norm(scene.get_pose(n)[:3,3] - cam_loc) - ) - - return solid_nodes + trans_nodes - - def _sorted_nodes_by_distance(self, scene, nodes, compare_node): - nodes = list(nodes) - compare_posn = scene.get_pose(compare_node)[:3,3] - nodes.sort(key=lambda n: np.linalg.norm( - scene.get_pose(n)[:3,3] - compare_posn) - ) - return nodes - - ########################################################################### - # Context Management - ########################################################################### - - def _update_context(self, scene, flags): - - # Update meshes - scene_meshes = scene.meshes - - # Add new meshes to context - for mesh in scene_meshes - self._meshes: - for p in mesh.primitives: - p._add_to_context() - - # Remove old meshes from context - for mesh in self._meshes - scene_meshes: - for p in mesh.primitives: - p.delete() - - self._meshes = scene_meshes.copy() - - # Update mesh textures - mesh_textures = set() - for m in scene_meshes: - for p in m.primitives: - mesh_textures |= p.material.textures - - # Add new textures to context - for texture in mesh_textures - self._mesh_textures: - texture._add_to_context() - - # Remove old textures from context - for texture in self._mesh_textures - mesh_textures: - texture.delete() - - self._mesh_textures = mesh_textures.copy() - - shadow_textures = set() - for l in scene.lights: - # Create if needed - active = False - if (isinstance(l, DirectionalLight) and - flags & RenderFlags.SHADOWS_DIRECTIONAL): - active = True - elif (isinstance(l, PointLight) and - flags & RenderFlags.SHADOWS_POINT): - active = True - elif isinstance(l, SpotLight) and flags & RenderFlags.SHADOWS_SPOT: - active = True - - if active and l.shadow_texture is None: - l._generate_shadow_texture() - if l.shadow_texture is not None: - shadow_textures.add(l.shadow_texture) - - # Add new textures to context - for texture in shadow_textures - self._shadow_textures: - texture._add_to_context() - - # Remove old textures from context - for texture in self._shadow_textures - shadow_textures: - texture.delete() - - self._shadow_textures = shadow_textures.copy() - - ########################################################################### - # Texture Management - ########################################################################### - - def _bind_texture(self, texture, uniform_name, program): - """Bind a texture to an active texture unit and return - the texture unit index that was used. - """ - tex_id = self._get_next_active_texture() - glActiveTexture(GL_TEXTURE0 + tex_id) - texture._bind() - program.set_uniform(uniform_name, tex_id) - - def _get_next_active_texture(self): - val = self._texture_alloc_idx - self._texture_alloc_idx += 1 - return val - - def _reset_active_textures(self): - self._texture_alloc_idx = 0 - - ########################################################################### - # Camera Matrix Management - ########################################################################### - - def _get_camera_matrices(self, scene): - main_camera_node = scene.main_camera_node - if main_camera_node is None: - raise ValueError('Cannot render scene without a camera') - P = main_camera_node.camera.get_projection_matrix( - width=self.viewport_width, height=self.viewport_height - ) - pose = scene.get_pose(main_camera_node) - V = np.linalg.inv(pose) # V maps from world to camera - return V, P - - def _get_light_cam_matrices(self, scene, light_node, flags): - light = light_node.light - pose = scene.get_pose(light_node).copy() - s = scene.scale - camera = light._get_shadow_camera(s) - P = camera.get_projection_matrix() - if isinstance(light, DirectionalLight): - direction = -pose[:3,2] - c = scene.centroid - loc = c - direction * s - pose[:3,3] = loc - V = np.linalg.inv(pose) # V maps from world to camera - return V, P - - ########################################################################### - # Shader Program Management - ########################################################################### - - def _get_text_program(self): - program = self._program_cache.get_program( - vertex_shader='text.vert', - fragment_shader='text.frag' - ) - - if not program._in_context(): - program._add_to_context() - - return program - - def _compute_max_n_lights(self, flags): - max_n_lights = [MAX_N_LIGHTS, MAX_N_LIGHTS, MAX_N_LIGHTS] - n_tex_units = glGetIntegerv(GL_MAX_TEXTURE_IMAGE_UNITS) - - # Reserved texture units: 6 - # Normal Map - # Occlusion Map - # Emissive Map - # Base Color or Diffuse Map - # MR or SG Map - # Environment cubemap - - n_reserved_textures = 6 - n_available_textures = n_tex_units - n_reserved_textures - - # Distribute textures evenly among lights with shadows, with - # a preference for directional lights - n_shadow_types = 0 - if flags & RenderFlags.SHADOWS_DIRECTIONAL: - n_shadow_types += 1 - if flags & RenderFlags.SHADOWS_SPOT: - n_shadow_types += 1 - if flags & RenderFlags.SHADOWS_POINT: - n_shadow_types += 1 - - if n_shadow_types > 0: - tex_per_light = n_available_textures // n_shadow_types - - if flags & RenderFlags.SHADOWS_DIRECTIONAL: - max_n_lights[0] = ( - tex_per_light + - (n_available_textures - tex_per_light * n_shadow_types) - ) - if flags & RenderFlags.SHADOWS_SPOT: - max_n_lights[1] = tex_per_light - if flags & RenderFlags.SHADOWS_POINT: - max_n_lights[2] = tex_per_light - - return max_n_lights - - def _get_primitive_program(self, primitive, flags, program_flags): - vertex_shader = None - fragment_shader = None - geometry_shader = None - defines = {} - - if (bool(program_flags & ProgramFlags.USE_MATERIAL) and - not flags & RenderFlags.DEPTH_ONLY and - not flags & RenderFlags.FLAT and - not flags & RenderFlags.SEG): - vertex_shader = 'mesh.vert' - fragment_shader = 'mesh.frag' - elif bool(program_flags & (ProgramFlags.VERTEX_NORMALS | - ProgramFlags.FACE_NORMALS)): - vertex_shader = 'vertex_normals.vert' - if primitive.mode == GLTF.POINTS: - geometry_shader = 'vertex_normals_pc.geom' - else: - geometry_shader = 'vertex_normals.geom' - fragment_shader = 'vertex_normals.frag' - elif flags & RenderFlags.FLAT: - vertex_shader = 'flat.vert' - fragment_shader = 'flat.frag' - elif flags & RenderFlags.SEG: - vertex_shader = 'segmentation.vert' - fragment_shader = 'segmentation.frag' - else: - vertex_shader = 'mesh_depth.vert' - fragment_shader = 'mesh_depth.frag' - - # Set up vertex buffer DEFINES - bf = primitive.buf_flags - buf_idx = 1 - if bf & BufFlags.NORMAL: - defines['NORMAL_LOC'] = buf_idx - buf_idx += 1 - if bf & BufFlags.TANGENT: - defines['TANGENT_LOC'] = buf_idx - buf_idx += 1 - if bf & BufFlags.TEXCOORD_0: - defines['TEXCOORD_0_LOC'] = buf_idx - buf_idx += 1 - if bf & BufFlags.TEXCOORD_1: - defines['TEXCOORD_1_LOC'] = buf_idx - buf_idx += 1 - if bf & BufFlags.COLOR_0: - defines['COLOR_0_LOC'] = buf_idx - buf_idx += 1 - if bf & BufFlags.JOINTS_0: - defines['JOINTS_0_LOC'] = buf_idx - buf_idx += 1 - if bf & BufFlags.WEIGHTS_0: - defines['WEIGHTS_0_LOC'] = buf_idx - buf_idx += 1 - defines['INST_M_LOC'] = buf_idx - - # Set up shadow mapping defines - if flags & RenderFlags.SHADOWS_DIRECTIONAL: - defines['DIRECTIONAL_LIGHT_SHADOWS'] = 1 - if flags & RenderFlags.SHADOWS_SPOT: - defines['SPOT_LIGHT_SHADOWS'] = 1 - if flags & RenderFlags.SHADOWS_POINT: - defines['POINT_LIGHT_SHADOWS'] = 1 - max_n_lights = self._compute_max_n_lights(flags) - defines['MAX_DIRECTIONAL_LIGHTS'] = max_n_lights[0] - defines['MAX_SPOT_LIGHTS'] = max_n_lights[1] - defines['MAX_POINT_LIGHTS'] = max_n_lights[2] - - # Set up vertex normal defines - if program_flags & ProgramFlags.VERTEX_NORMALS: - defines['VERTEX_NORMALS'] = 1 - if program_flags & ProgramFlags.FACE_NORMALS: - defines['FACE_NORMALS'] = 1 - - # Set up material texture defines - if bool(program_flags & ProgramFlags.USE_MATERIAL): - tf = primitive.material.tex_flags - if tf & TexFlags.NORMAL: - defines['HAS_NORMAL_TEX'] = 1 - if tf & TexFlags.OCCLUSION: - defines['HAS_OCCLUSION_TEX'] = 1 - if tf & TexFlags.EMISSIVE: - defines['HAS_EMISSIVE_TEX'] = 1 - if tf & TexFlags.BASE_COLOR: - defines['HAS_BASE_COLOR_TEX'] = 1 - if tf & TexFlags.METALLIC_ROUGHNESS: - defines['HAS_METALLIC_ROUGHNESS_TEX'] = 1 - if tf & TexFlags.DIFFUSE: - defines['HAS_DIFFUSE_TEX'] = 1 - if tf & TexFlags.SPECULAR_GLOSSINESS: - defines['HAS_SPECULAR_GLOSSINESS_TEX'] = 1 - if isinstance(primitive.material, MetallicRoughnessMaterial): - defines['USE_METALLIC_MATERIAL'] = 1 - elif isinstance(primitive.material, SpecularGlossinessMaterial): - defines['USE_GLOSSY_MATERIAL'] = 1 - - program = self._program_cache.get_program( - vertex_shader=vertex_shader, - fragment_shader=fragment_shader, - geometry_shader=geometry_shader, - defines=defines - ) - - if not program._in_context(): - program._add_to_context() - - return program - - ########################################################################### - # Viewport Management - ########################################################################### - - def _configure_forward_pass_viewport(self, flags): - - # If using offscreen render, bind main framebuffer - if flags & RenderFlags.OFFSCREEN: - self._configure_main_framebuffer() - glBindFramebuffer(GL_DRAW_FRAMEBUFFER, self._main_fb_ms) - else: - glBindFramebuffer(GL_DRAW_FRAMEBUFFER, 0) - - glViewport(0, 0, self.viewport_width, self.viewport_height) - glEnable(GL_DEPTH_TEST) - glDepthMask(GL_TRUE) - glDepthFunc(GL_LESS) - glDepthRange(0.0, 1.0) - - def _configure_shadow_mapping_viewport(self, light, flags): - self._configure_shadow_framebuffer() - glBindFramebuffer(GL_FRAMEBUFFER, self._shadow_fb) - light.shadow_texture._bind() - light.shadow_texture._bind_as_depth_attachment() - glActiveTexture(GL_TEXTURE0) - light.shadow_texture._bind() - glDrawBuffer(GL_NONE) - glReadBuffer(GL_NONE) - - glClear(GL_DEPTH_BUFFER_BIT) - glViewport(0, 0, SHADOW_TEX_SZ, SHADOW_TEX_SZ) - glEnable(GL_DEPTH_TEST) - glDepthMask(GL_TRUE) - glDepthFunc(GL_LESS) - glDepthRange(0.0, 1.0) - glDisable(GL_CULL_FACE) - glDisable(GL_BLEND) - - ########################################################################### - # Framebuffer Management - ########################################################################### - - def _configure_shadow_framebuffer(self): - if self._shadow_fb is None: - self._shadow_fb = glGenFramebuffers(1) - - def _delete_shadow_framebuffer(self): - if self._shadow_fb is not None: - glDeleteFramebuffers(1, [self._shadow_fb]) - - def _configure_main_framebuffer(self): - # If mismatch with prior framebuffer, delete it - if (self._main_fb is not None and - self.viewport_width != self._main_fb_dims[0] or - self.viewport_height != self._main_fb_dims[1]): - self._delete_main_framebuffer() - - # If framebuffer doesn't exist, create it - if self._main_fb is None: - # Generate standard buffer - self._main_cb, self._main_db = glGenRenderbuffers(2) - - glBindRenderbuffer(GL_RENDERBUFFER, self._main_cb) - glRenderbufferStorage( - GL_RENDERBUFFER, GL_RGBA, - self.viewport_width, self.viewport_height - ) - - glBindRenderbuffer(GL_RENDERBUFFER, self._main_db) - glRenderbufferStorage( - GL_RENDERBUFFER, GL_DEPTH_COMPONENT24, - self.viewport_width, self.viewport_height - ) - - self._main_fb = glGenFramebuffers(1) - glBindFramebuffer(GL_DRAW_FRAMEBUFFER, self._main_fb) - glFramebufferRenderbuffer( - GL_DRAW_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, - GL_RENDERBUFFER, self._main_cb - ) - glFramebufferRenderbuffer( - GL_DRAW_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, - GL_RENDERBUFFER, self._main_db - ) - - # Generate multisample buffer - self._main_cb_ms, self._main_db_ms = glGenRenderbuffers(2) - glBindRenderbuffer(GL_RENDERBUFFER, self._main_cb_ms) - # glRenderbufferStorageMultisample( - # GL_RENDERBUFFER, 4, GL_RGBA, - # self.viewport_width, self.viewport_height - # ) - # glBindRenderbuffer(GL_RENDERBUFFER, self._main_db_ms) - # glRenderbufferStorageMultisample( - # GL_RENDERBUFFER, 4, GL_DEPTH_COMPONENT24, - # self.viewport_width, self.viewport_height - # ) - # 增加这一行 - num_samples = min(glGetIntegerv(GL_MAX_SAMPLES), 4) # No more than GL_MAX_SAMPLES - - # 其实就是把 4 替换成 num_samples,其余不变 - glRenderbufferStorageMultisample(GL_RENDERBUFFER, num_samples, GL_RGBA, self.viewport_width, self.viewport_height) - - glBindRenderbuffer(GL_RENDERBUFFER, self._main_db_ms) # 这行不变 - - # 这一行也是将 4 替换成 num_samples - glRenderbufferStorageMultisample(GL_RENDERBUFFER, num_samples, GL_DEPTH_COMPONENT24, self.viewport_width, self.viewport_height) - - self._main_fb_ms = glGenFramebuffers(1) - glBindFramebuffer(GL_DRAW_FRAMEBUFFER, self._main_fb_ms) - glFramebufferRenderbuffer( - GL_DRAW_FRAMEBUFFER, GL_COLOR_ATTACHMENT0, - GL_RENDERBUFFER, self._main_cb_ms - ) - glFramebufferRenderbuffer( - GL_DRAW_FRAMEBUFFER, GL_DEPTH_ATTACHMENT, - GL_RENDERBUFFER, self._main_db_ms - ) - - self._main_fb_dims = (self.viewport_width, self.viewport_height) - - def _delete_main_framebuffer(self): - if self._main_fb is not None: - glDeleteFramebuffers(2, [self._main_fb, self._main_fb_ms]) - if self._main_cb is not None: - glDeleteRenderbuffers(2, [self._main_cb, self._main_cb_ms]) - if self._main_db is not None: - glDeleteRenderbuffers(2, [self._main_db, self._main_db_ms]) - - self._main_fb = None - self._main_cb = None - self._main_db = None - self._main_fb_ms = None - self._main_cb_ms = None - self._main_db_ms = None - self._main_fb_dims = (None, None) - - def _read_main_framebuffer(self, scene, flags): - width, height = self._main_fb_dims[0], self._main_fb_dims[1] - - # Bind framebuffer and blit buffers - glBindFramebuffer(GL_READ_FRAMEBUFFER, self._main_fb_ms) - glBindFramebuffer(GL_DRAW_FRAMEBUFFER, self._main_fb) - glBlitFramebuffer( - 0, 0, width, height, 0, 0, width, height, - GL_COLOR_BUFFER_BIT, GL_LINEAR - ) - glBlitFramebuffer( - 0, 0, width, height, 0, 0, width, height, - GL_DEPTH_BUFFER_BIT, GL_NEAREST - ) - glBindFramebuffer(GL_READ_FRAMEBUFFER, self._main_fb) - - # Read depth - depth_buf = glReadPixels( - 0, 0, width, height, GL_DEPTH_COMPONENT, GL_FLOAT - ) - depth_im = np.frombuffer(depth_buf, dtype=np.float32) - depth_im = depth_im.reshape((height, width)) - depth_im = np.flip(depth_im, axis=0) - inf_inds = (depth_im == 1.0) - depth_im = 2.0 * depth_im - 1.0 - z_near = scene.main_camera_node.camera.znear - z_far = scene.main_camera_node.camera.zfar - noninf = np.logical_not(inf_inds) - if z_far is None: - depth_im[noninf] = 2 * z_near / (1.0 - depth_im[noninf]) - else: - depth_im[noninf] = ((2.0 * z_near * z_far) / - (z_far + z_near - depth_im[noninf] * - (z_far - z_near))) - depth_im[inf_inds] = 0.0 - - # Resize for macos if needed - if sys.platform == 'darwin': - depth_im = self._resize_image(depth_im) - - if flags & RenderFlags.DEPTH_ONLY: - return depth_im - - # Read color - if flags & RenderFlags.RGBA: - color_buf = glReadPixels( - 0, 0, width, height, GL_RGBA, GL_UNSIGNED_BYTE - ) - color_im = np.frombuffer(color_buf, dtype=np.uint8) - color_im = color_im.reshape((height, width, 4)) - else: - color_buf = glReadPixels( - 0, 0, width, height, GL_RGB, GL_UNSIGNED_BYTE - ) - color_im = np.frombuffer(color_buf, dtype=np.uint8) - color_im = color_im.reshape((height, width, 3)) - color_im = np.flip(color_im, axis=0) - - # Resize for macos if needed - if sys.platform == 'darwin': - color_im = self._resize_image(color_im, True) - - return color_im, depth_im - - def _resize_image(self, value, antialias=False): - """If needed, rescale the render for MacOS.""" - img = PIL.Image.fromarray(value) - resample = PIL.Image.NEAREST - if antialias: - resample = PIL.Image.BILINEAR - size = (self.viewport_width // self.dpscale, - self.viewport_height // self.dpscale) - img = img.resize(size, resample=resample) - return np.array(img) - - ########################################################################### - # Shadowmap Debugging - ########################################################################### - - def _forward_pass_no_reset(self, scene, flags): - # Set up camera matrices - V, P = self._get_camera_matrices(scene) - - # Now, render each object in sorted order - for node in self._sorted_mesh_nodes(scene): - mesh = node.mesh - - # Skip the mesh if it's not visible - if not mesh.is_visible: - continue - - for primitive in mesh.primitives: - - # First, get and bind the appropriate program - program = self._get_primitive_program( - primitive, flags, ProgramFlags.USE_MATERIAL - ) - program._bind() - - # Set the camera uniforms - program.set_uniform('V', V) - program.set_uniform('P', P) - program.set_uniform( - 'cam_pos', scene.get_pose(scene.main_camera_node)[:3,3] - ) - - # Next, bind the lighting - if not flags & RenderFlags.DEPTH_ONLY and not flags & RenderFlags.FLAT: - self._bind_lighting(scene, program, node, flags) - - # Finally, bind and draw the primitive - self._bind_and_draw_primitive( - primitive=primitive, - pose=scene.get_pose(node), - program=program, - flags=flags - ) - self._reset_active_textures() - - # Unbind the shader and flush the output - if program is not None: - program._unbind() - glFlush() - - def _render_light_shadowmaps(self, scene, light_nodes, flags, tile=False): - glBindFramebuffer(GL_DRAW_FRAMEBUFFER, 0) - glClearColor(*scene.bg_color) - glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT) - glEnable(GL_DEPTH_TEST) - glDepthMask(GL_TRUE) - glDepthFunc(GL_LESS) - glDepthRange(0.0, 1.0) - - w = self.viewport_width - h = self.viewport_height - - num_nodes = len(light_nodes) - viewport_dims = { - (0, 2): [0, h // 2, w // 2, h], - (1, 2): [w // 2, h // 2, w, h], - (0, 3): [0, h // 2, w // 2, h], - (1, 3): [w // 2, h // 2, w, h], - (2, 3): [0, 0, w // 2, h // 2], - (0, 4): [0, h // 2, w // 2, h], - (1, 4): [w // 2, h // 2, w, h], - (2, 4): [0, 0, w // 2, h // 2], - (3, 4): [w // 2, 0, w, h // 2] - } - - if tile: - for i, ln in enumerate(light_nodes): - light = ln.light - - if light.shadow_texture is None: - raise ValueError('Light does not have a shadow texture') - - glViewport(*viewport_dims[(i, num_nodes + 1)]) - - program = self._get_debug_quad_program() - program._bind() - self._bind_texture(light.shadow_texture, 'depthMap', program) - self._render_debug_quad() - self._reset_active_textures() - glFlush() - i += 1 - glViewport(*viewport_dims[(i, num_nodes + 1)]) - self._forward_pass_no_reset(scene, flags) - else: - for i, ln in enumerate(light_nodes): - light = ln.light - - if light.shadow_texture is None: - raise ValueError('Light does not have a shadow texture') - - glViewport(0, 0, self.viewport_width, self.viewport_height) - - program = self._get_debug_quad_program() - program._bind() - self._bind_texture(light.shadow_texture, 'depthMap', program) - self._render_debug_quad() - self._reset_active_textures() - glFlush() - return - - def _get_debug_quad_program(self): - program = self._program_cache.get_program( - vertex_shader='debug_quad.vert', - fragment_shader='debug_quad.frag' - ) - if not program._in_context(): - program._add_to_context() - return program - - def _render_debug_quad(self): - x = glGenVertexArrays(1) - glBindVertexArray(x) - glDrawArrays(GL_TRIANGLES, 0, 6) - glBindVertexArray(0) - glDeleteVertexArrays(1, [x]) diff --git a/spaces/bulentsofttech/gradio_s1000_veri_toplama_modeli/yolov5/utils/google_app_engine/Dockerfile b/spaces/bulentsofttech/gradio_s1000_veri_toplama_modeli/yolov5/utils/google_app_engine/Dockerfile deleted file mode 100644 index 0155618f475104e9858b81470339558156c94e13..0000000000000000000000000000000000000000 --- a/spaces/bulentsofttech/gradio_s1000_veri_toplama_modeli/yolov5/utils/google_app_engine/Dockerfile +++ /dev/null @@ -1,25 +0,0 @@ -FROM gcr.io/google-appengine/python - -# Create a virtualenv for dependencies. This isolates these packages from -# system-level packages. -# Use -p python3 or -p python3.7 to select python version. Default is version 2. -RUN virtualenv /env -p python3 - -# Setting these environment variables are the same as running -# source /env/bin/activate. -ENV VIRTUAL_ENV /env -ENV PATH /env/bin:$PATH - -RUN apt-get update && apt-get install -y python-opencv - -# Copy the application's requirements.txt and run pip to install all -# dependencies into the virtualenv. -ADD requirements.txt /app/requirements.txt -RUN pip install -r /app/requirements.txt - -# Add the application source code. -ADD . /app - -# Run a WSGI server to serve the application. gunicorn must be declared as -# a dependency in requirements.txt. -CMD gunicorn -b :$PORT main:app diff --git a/spaces/carlosalonso/Detection-video/carpeta_deteccion/datasets/prepare_ade20k_sem_seg.py b/spaces/carlosalonso/Detection-video/carpeta_deteccion/datasets/prepare_ade20k_sem_seg.py deleted file mode 100644 index 8b4a58d8f2877544498e328b6d269f23aa1eb59f..0000000000000000000000000000000000000000 --- a/spaces/carlosalonso/Detection-video/carpeta_deteccion/datasets/prepare_ade20k_sem_seg.py +++ /dev/null @@ -1,26 +0,0 @@ -#!/usr/bin/env python3 -# -*- coding: utf-8 -*- -# Copyright (c) Facebook, Inc. and its affiliates. -import numpy as np -import os -from pathlib import Path -import tqdm -from PIL import Image - - -def convert(input, output): - img = np.asarray(Image.open(input)) - assert img.dtype == np.uint8 - img = img - 1 # 0 (ignore) becomes 255. others are shifted by 1 - Image.fromarray(img).save(output) - - -if __name__ == "__main__": - dataset_dir = Path(os.getenv("DETECTRON2_DATASETS", "datasets")) / "ADEChallengeData2016" - for name in ["training", "validation"]: - annotation_dir = dataset_dir / "annotations" / name - output_dir = dataset_dir / "annotations_detectron2" / name - output_dir.mkdir(parents=True, exist_ok=True) - for file in tqdm.tqdm(list(annotation_dir.iterdir())): - output_file = output_dir / file.name - convert(file, output_file) diff --git a/spaces/carlosalonso/Detection-video/carpeta_deteccion/tests/test_visualizer.py b/spaces/carlosalonso/Detection-video/carpeta_deteccion/tests/test_visualizer.py deleted file mode 100644 index 1005000f525bc876ae32a3421737e3f9fe3bc5f4..0000000000000000000000000000000000000000 --- a/spaces/carlosalonso/Detection-video/carpeta_deteccion/tests/test_visualizer.py +++ /dev/null @@ -1,278 +0,0 @@ -# -*- coding: utf-8 -*- -# Copyright (c) Facebook, Inc. and its affiliates. - -import numpy as np -import os -import tempfile -import unittest -import cv2 -import torch - -from detectron2.data import MetadataCatalog -from detectron2.structures import BoxMode, Instances, RotatedBoxes -from detectron2.utils.visualizer import ColorMode, Visualizer - - -class TestVisualizer(unittest.TestCase): - def _random_data(self): - H, W = 100, 100 - N = 10 - img = np.random.rand(H, W, 3) * 255 - boxxy = np.random.rand(N, 2) * (H // 2) - boxes = np.concatenate((boxxy, boxxy + H // 2), axis=1) - - def _rand_poly(): - return np.random.rand(3, 2).flatten() * H - - polygons = [[_rand_poly() for _ in range(np.random.randint(1, 5))] for _ in range(N)] - - mask = np.zeros_like(img[:, :, 0], dtype=np.bool) - mask[:40, 10:20] = 1 - - labels = [str(i) for i in range(N)] - return img, boxes, labels, polygons, [mask] * N - - @property - def metadata(self): - return MetadataCatalog.get("coco_2017_train") - - def test_draw_dataset_dict(self): - img = np.random.rand(512, 512, 3) * 255 - dic = { - "annotations": [ - { - "bbox": [ - 368.9946492271106, - 330.891438763377, - 13.148537455410235, - 13.644708680142685, - ], - "bbox_mode": BoxMode.XYWH_ABS, - "category_id": 0, - "iscrowd": 1, - "segmentation": { - "counts": "_jh52m?2N2N2N2O100O10O001N1O2MceP2", - "size": [512, 512], - }, - } - ], - "height": 512, - "image_id": 1, - "width": 512, - } - v = Visualizer(img) - v.draw_dataset_dict(dic) - - v = Visualizer(img, self.metadata) - v.draw_dataset_dict(dic) - - def test_draw_rotated_dataset_dict(self): - img = np.random.rand(512, 512, 3) * 255 - dic = { - "annotations": [ - { - "bbox": [ - 368.9946492271106, - 330.891438763377, - 13.148537455410235, - 13.644708680142685, - 45.0, - ], - "bbox_mode": BoxMode.XYWHA_ABS, - "category_id": 0, - "iscrowd": 1, - } - ], - "height": 512, - "image_id": 1, - "width": 512, - } - v = Visualizer(img, self.metadata) - v.draw_dataset_dict(dic) - - def test_overlay_instances(self): - img, boxes, labels, polygons, masks = self._random_data() - - v = Visualizer(img, self.metadata) - output = v.overlay_instances(masks=polygons, boxes=boxes, labels=labels).get_image() - self.assertEqual(output.shape, img.shape) - - # Test 2x scaling - v = Visualizer(img, self.metadata, scale=2.0) - output = v.overlay_instances(masks=polygons, boxes=boxes, labels=labels).get_image() - self.assertEqual(output.shape[0], img.shape[0] * 2) - - # Test overlay masks - v = Visualizer(img, self.metadata) - output = v.overlay_instances(masks=masks, boxes=boxes, labels=labels).get_image() - self.assertEqual(output.shape, img.shape) - - def test_overlay_instances_no_boxes(self): - img, boxes, labels, polygons, _ = self._random_data() - v = Visualizer(img, self.metadata) - v.overlay_instances(masks=polygons, boxes=None, labels=labels).get_image() - - def test_draw_instance_predictions(self): - img, boxes, _, _, masks = self._random_data() - num_inst = len(boxes) - inst = Instances((img.shape[0], img.shape[1])) - inst.pred_classes = torch.randint(0, 80, size=(num_inst,)) - inst.scores = torch.rand(num_inst) - inst.pred_boxes = torch.from_numpy(boxes) - inst.pred_masks = torch.from_numpy(np.asarray(masks)) - - v = Visualizer(img) - v.draw_instance_predictions(inst) - - v = Visualizer(img, self.metadata) - v.draw_instance_predictions(inst) - - def test_BWmode_nomask(self): - img, boxes, _, _, masks = self._random_data() - num_inst = len(boxes) - inst = Instances((img.shape[0], img.shape[1])) - inst.pred_classes = torch.randint(0, 80, size=(num_inst,)) - inst.scores = torch.rand(num_inst) - inst.pred_boxes = torch.from_numpy(boxes) - - v = Visualizer(img, self.metadata, instance_mode=ColorMode.IMAGE_BW) - v.draw_instance_predictions(inst) - - # check that output is grayscale - inst = inst[:0] - v = Visualizer(img, self.metadata, instance_mode=ColorMode.IMAGE_BW) - output = v.draw_instance_predictions(inst).get_image() - self.assertTrue(np.allclose(output[:, :, 0], output[:, :, 1])) - self.assertTrue(np.allclose(output[:, :, 0], output[:, :, 2])) - - def test_draw_empty_mask_predictions(self): - img, boxes, _, _, masks = self._random_data() - num_inst = len(boxes) - inst = Instances((img.shape[0], img.shape[1])) - inst.pred_classes = torch.randint(0, 80, size=(num_inst,)) - inst.scores = torch.rand(num_inst) - inst.pred_boxes = torch.from_numpy(boxes) - inst.pred_masks = torch.from_numpy(np.zeros_like(np.asarray(masks))) - - v = Visualizer(img, self.metadata) - v.draw_instance_predictions(inst) - - def test_correct_output_shape(self): - img = np.random.rand(928, 928, 3) * 255 - v = Visualizer(img, self.metadata) - out = v.output.get_image() - self.assertEqual(out.shape, img.shape) - - def test_overlay_rotated_instances(self): - H, W = 100, 150 - img = np.random.rand(H, W, 3) * 255 - num_boxes = 50 - boxes_5d = torch.zeros(num_boxes, 5) - boxes_5d[:, 0] = torch.FloatTensor(num_boxes).uniform_(-0.1 * W, 1.1 * W) - boxes_5d[:, 1] = torch.FloatTensor(num_boxes).uniform_(-0.1 * H, 1.1 * H) - boxes_5d[:, 2] = torch.FloatTensor(num_boxes).uniform_(0, max(W, H)) - boxes_5d[:, 3] = torch.FloatTensor(num_boxes).uniform_(0, max(W, H)) - boxes_5d[:, 4] = torch.FloatTensor(num_boxes).uniform_(-1800, 1800) - rotated_boxes = RotatedBoxes(boxes_5d) - labels = [str(i) for i in range(num_boxes)] - - v = Visualizer(img, self.metadata) - output = v.overlay_instances(boxes=rotated_boxes, labels=labels).get_image() - self.assertEqual(output.shape, img.shape) - - def test_draw_no_metadata(self): - img, boxes, _, _, masks = self._random_data() - num_inst = len(boxes) - inst = Instances((img.shape[0], img.shape[1])) - inst.pred_classes = torch.randint(0, 80, size=(num_inst,)) - inst.scores = torch.rand(num_inst) - inst.pred_boxes = torch.from_numpy(boxes) - inst.pred_masks = torch.from_numpy(np.asarray(masks)) - - v = Visualizer(img, MetadataCatalog.get("asdfasdf")) - v.draw_instance_predictions(inst) - - def test_draw_binary_mask(self): - img, boxes, _, _, masks = self._random_data() - img[:, :, 0] = 0 # remove red color - mask = masks[0] - mask_with_hole = np.zeros_like(mask).astype("uint8") - mask_with_hole = cv2.rectangle(mask_with_hole, (10, 10), (50, 50), 1, 5) - - for m in [mask, mask_with_hole]: - for save in [True, False]: - v = Visualizer(img) - o = v.draw_binary_mask(m, color="red", text="test") - if save: - with tempfile.TemporaryDirectory(prefix="detectron2_viz") as d: - path = os.path.join(d, "output.png") - o.save(path) - o = cv2.imread(path)[:, :, ::-1] - else: - o = o.get_image().astype("float32") - # red color is drawn on the image - self.assertTrue(o[:, :, 0].sum() > 0) - - def test_draw_soft_mask(self): - img = np.random.rand(100, 100, 3) * 255 - img[:, :, 0] = 0 # remove red color - mask = np.zeros((100, 100), dtype=np.float32) - mask[30:50, 40:50] = 1.0 - cv2.GaussianBlur(mask, (21, 21), 10) - - v = Visualizer(img) - o = v.draw_soft_mask(mask, color="red", text="test") - o = o.get_image().astype("float32") - # red color is drawn on the image - self.assertTrue(o[:, :, 0].sum() > 0) - - # test draw empty mask - v = Visualizer(img) - o = v.draw_soft_mask(np.zeros((100, 100), dtype=np.float32), color="red", text="test") - o = o.get_image().astype("float32") - - def test_border_mask_with_holes(self): - H, W = 200, 200 - img = np.zeros((H, W, 3)) - img[:, :, 0] = 255.0 - v = Visualizer(img, scale=3) - - mask = np.zeros((H, W)) - mask[:, 100:150] = 1 - # create a hole, to trigger imshow - mask = cv2.rectangle(mask, (110, 110), (130, 130), 0, thickness=-1) - output = v.draw_binary_mask(mask, color="blue") - output = output.get_image()[:, :, ::-1] - - first_row = {tuple(x.tolist()) for x in output[0]} - last_row = {tuple(x.tolist()) for x in output[-1]} - # Check quantization / off-by-1 error: the first and last row must have two colors - self.assertEqual(len(last_row), 2) - self.assertEqual(len(first_row), 2) - self.assertIn((0, 0, 255), last_row) - self.assertIn((0, 0, 255), first_row) - - def test_border_polygons(self): - H, W = 200, 200 - img = np.zeros((H, W, 3)) - img[:, :, 0] = 255.0 - v = Visualizer(img, scale=3) - mask = np.zeros((H, W)) - mask[:, 100:150] = 1 - - output = v.draw_binary_mask(mask, color="blue") - output = output.get_image()[:, :, ::-1] - - first_row = {tuple(x.tolist()) for x in output[0]} - last_row = {tuple(x.tolist()) for x in output[-1]} - # Check quantization / off-by-1 error: - # the first and last row must have >=2 colors, because the polygon - # touches both rows - self.assertGreaterEqual(len(last_row), 2) - self.assertGreaterEqual(len(first_row), 2) - self.assertIn((0, 0, 255), last_row) - self.assertIn((0, 0, 255), first_row) - - -if __name__ == "__main__": - unittest.main() diff --git a/spaces/cat630/ChuanhuChatGPT/README.md b/spaces/cat630/ChuanhuChatGPT/README.md deleted file mode 100644 index d1ae83f73ac14888dedce02615afaaaea7f3d7d5..0000000000000000000000000000000000000000 --- a/spaces/cat630/ChuanhuChatGPT/README.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -title: ChuanhuChatGPT -emoji: 🐠 -colorFrom: blue -colorTo: red -sdk: gradio -sdk_version: 3.19.1 -app_file: app.py -pinned: false -license: mit -duplicated_from: JohnSmith9982/ChuanhuChatGPT ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/chansung/LLM-As-Chatbot/chats/starchat.py b/spaces/chansung/LLM-As-Chatbot/chats/starchat.py deleted file mode 100644 index 0f8ecb4074a07683473d189bd063bfbe008ff627..0000000000000000000000000000000000000000 --- a/spaces/chansung/LLM-As-Chatbot/chats/starchat.py +++ /dev/null @@ -1,112 +0,0 @@ -import torch -from transformers import StoppingCriteria, StoppingCriteriaList - -import copy -import json -import global_vars -from chats import pre, post -from pingpong import PingPong -from gens.batch_gen import get_output_batch - -from pingpong.context import CtxLastWindowStrategy - -class StopOnTokens(StoppingCriteria): - def __call__(self, input_ids: torch.LongTensor, scores: torch.FloatTensor, **kwargs) -> bool: - stop_ids = [49155, 1, 0] - for stop_id in stop_ids: - if input_ids[0][-1] == stop_id: - return True - return False - -def build_prompts(ppmanager, user_message, global_context, win_size=3): - dummy_ppm = copy.deepcopy(ppmanager) - - dummy_ppm.ctx = global_context - for pingpong in dummy_ppm.pingpongs: - pong = pingpong.pong - first_sentence = pong.split("\n")[0] - if first_sentence != "" and \ - pre.contains_image_markdown(first_sentence): - pong = ' '.join(pong.split("\n")[1:]).strip() - pingpong.pong = pong - - lws = CtxLastWindowStrategy(win_size) - - prompt = lws(dummy_ppm) - return prompt - -def text_stream(ppmanager, streamer): - for new_text in streamer: - ppmanager.append_pong(new_text) - yield ppmanager, ppmanager.build_uis() - - yield ppmanager, ppmanager.build_uis() - -def summarize( - ppmanager, prompt_to_summarize, win_size, - temperature, top_p, top_k, repetition_penalty, max_new_tokens, - num_beams, use_cache, do_sample, eos_token_id, pad_token_id -): - ctx = ppmanager.ctx - last_pong = ppmanager.pingpongs[-1].pong - ppmanager.add_pingpong(PingPong(prompt_to_summarize, "")) - prompt = ppmanager.build_prompts(from_idx=-win_size) - - _, gen_config_summarization = pre.build_gen_config( - temperature, top_p, top_k, repetition_penalty, max_new_tokens, - num_beams, use_cache, do_sample, eos_token_id, pad_token_id - ) - summarize_output = get_output_batch( - global_vars.model, global_vars.tokenizer, [prompt], gen_config_summarization - )[0].strip() - ppmanager.ctx = summarize_output - ppmanager.pop_pingpong() - return ppmanager - -def chat_stream( - idx, local_data, user_message, state, model_num, - global_context, ctx_num_lconv, ctx_sum_prompt, - res_temp, res_topp, res_topk, res_rpen, res_mnts, res_beams, res_cache, res_sample, res_eosid, res_padid, -): - res = [ - state["ppmanager_type"].from_json(json.dumps(ppm)) - for ppm in local_data - ] - - ppm = res[idx] - - # add_ping returns a prompt structured in Alpaca form - ppm.add_pingpong( - PingPong(user_message, "") - ) - prompt = build_prompts(ppm, user_message, global_context, ctx_num_lconv) - - # prepare text generating streamer & start generating - gen_kwargs, streamer = pre.build( - prompt, - res_temp, res_topp, res_topk, res_rpen, res_mnts, - res_beams, res_cache, res_sample, res_eosid, res_padid, - StoppingCriteriaList([StopOnTokens()]), False - ) - pre.start_gen(gen_kwargs) - - # handling stream - for ppmanager, uis in text_stream(ppm, streamer): - yield "", uis, prompt, str(res) - - ppm = post.strip_pong(ppm) - yield "", ppm.build_uis(), prompt, str(res) - - # summarization - # ppm.add_pingpong( - # PingPong(None, "![](https://i.postimg.cc/ZKNKDPBd/Vanilla-1s-209px.gif)") - # ) - # yield "", ppm.build_uis(), prompt, state - # ppm.pop_pingpong() - - # ppm = summarize( - # ppm, ctx_sum_prompt, ctx_num_lconv, - # sum_temp, sum_topp, sum_topk, sum_rpen, sum_mnts, - # sum_beams, sum_cache, sum_sample, sum_eosid, sum_padid - # ) - yield "", ppm.build_uis(), prompt, str(res) \ No newline at end of file diff --git a/spaces/chendl/compositional_test/transformers/examples/legacy/pytorch-lightning/run_pos.sh b/spaces/chendl/compositional_test/transformers/examples/legacy/pytorch-lightning/run_pos.sh deleted file mode 100644 index 93765366cf3123af5e361c236b46cf36680d90e2..0000000000000000000000000000000000000000 --- a/spaces/chendl/compositional_test/transformers/examples/legacy/pytorch-lightning/run_pos.sh +++ /dev/null @@ -1,39 +0,0 @@ -#!/usr/bin/env bash -if ! [ -f ./dev.txt ]; then - echo "Download dev dataset...." - curl -L -o ./dev.txt 'https://github.com/UniversalDependencies/UD_English-EWT/raw/master/en_ewt-ud-dev.conllu' -fi - -if ! [ -f ./test.txt ]; then - echo "Download test dataset...." - curl -L -o ./test.txt 'https://github.com/UniversalDependencies/UD_English-EWT/raw/master/en_ewt-ud-test.conllu' -fi - -if ! [ -f ./train.txt ]; then - echo "Download train dataset...." - curl -L -o ./train.txt 'https://github.com/UniversalDependencies/UD_English-EWT/raw/master/en_ewt-ud-train.conllu' -fi - -export MAX_LENGTH=200 -export BERT_MODEL=bert-base-uncased -export OUTPUT_DIR=postagger-model -export BATCH_SIZE=32 -export NUM_EPOCHS=3 -export SAVE_STEPS=750 -export SEED=1 - - -# Add parent directory to python path to access lightning_base.py -export PYTHONPATH="../":"${PYTHONPATH}" - -python3 run_ner.py --data_dir ./ \ ---task_type POS \ ---model_name_or_path $BERT_MODEL \ ---output_dir $OUTPUT_DIR \ ---max_seq_length $MAX_LENGTH \ ---num_train_epochs $NUM_EPOCHS \ ---train_batch_size $BATCH_SIZE \ ---seed $SEED \ ---gpus 1 \ ---do_train \ ---do_predict diff --git a/spaces/chendl/compositional_test/transformers/examples/research_projects/seq2seq-distillation/callbacks.py b/spaces/chendl/compositional_test/transformers/examples/research_projects/seq2seq-distillation/callbacks.py deleted file mode 100644 index 6f6ed5dd58acfd7b053545b6c24c1ff2cb7dbcc8..0000000000000000000000000000000000000000 --- a/spaces/chendl/compositional_test/transformers/examples/research_projects/seq2seq-distillation/callbacks.py +++ /dev/null @@ -1,116 +0,0 @@ -import logging -from pathlib import Path - -import numpy as np -import pytorch_lightning as pl -import torch -from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint -from pytorch_lightning.utilities import rank_zero_only - -from utils import save_json - - -def count_trainable_parameters(model): - model_parameters = filter(lambda p: p.requires_grad, model.parameters()) - params = sum([np.prod(p.size()) for p in model_parameters]) - return params - - -logger = logging.getLogger(__name__) - - -class Seq2SeqLoggingCallback(pl.Callback): - def on_batch_end(self, trainer, pl_module): - lrs = {f"lr_group_{i}": param["lr"] for i, param in enumerate(pl_module.trainer.optimizers[0].param_groups)} - pl_module.logger.log_metrics(lrs) - - @rank_zero_only - def _write_logs( - self, trainer: pl.Trainer, pl_module: pl.LightningModule, type_path: str, save_generations=True - ) -> None: - logger.info(f"***** {type_path} results at step {trainer.global_step:05d} *****") - metrics = trainer.callback_metrics - trainer.logger.log_metrics({k: v for k, v in metrics.items() if k not in ["log", "progress_bar", "preds"]}) - # Log results - od = Path(pl_module.hparams.output_dir) - if type_path == "test": - results_file = od / "test_results.txt" - generations_file = od / "test_generations.txt" - else: - # this never gets hit. I prefer not to save intermediate generations, and results are in metrics.json - # If people want this it will be easy enough to add back. - results_file = od / f"{type_path}_results/{trainer.global_step:05d}.txt" - generations_file = od / f"{type_path}_generations/{trainer.global_step:05d}.txt" - results_file.parent.mkdir(exist_ok=True) - generations_file.parent.mkdir(exist_ok=True) - with open(results_file, "a+") as writer: - for key in sorted(metrics): - if key in ["log", "progress_bar", "preds"]: - continue - val = metrics[key] - if isinstance(val, torch.Tensor): - val = val.item() - msg = f"{key}: {val:.6f}\n" - writer.write(msg) - - if not save_generations: - return - - if "preds" in metrics: - content = "\n".join(metrics["preds"]) - generations_file.open("w+").write(content) - - @rank_zero_only - def on_train_start(self, trainer, pl_module): - try: - npars = pl_module.model.model.num_parameters() - except AttributeError: - npars = pl_module.model.num_parameters() - - n_trainable_pars = count_trainable_parameters(pl_module) - # mp stands for million parameters - trainer.logger.log_metrics({"n_params": npars, "mp": npars / 1e6, "grad_mp": n_trainable_pars / 1e6}) - - @rank_zero_only - def on_test_end(self, trainer: pl.Trainer, pl_module: pl.LightningModule): - save_json(pl_module.metrics, pl_module.metrics_save_path) - return self._write_logs(trainer, pl_module, "test") - - @rank_zero_only - def on_validation_end(self, trainer: pl.Trainer, pl_module): - save_json(pl_module.metrics, pl_module.metrics_save_path) - # Uncommenting this will save val generations - # return self._write_logs(trainer, pl_module, "valid") - - -def get_checkpoint_callback(output_dir, metric, save_top_k=1, lower_is_better=False): - """Saves the best model by validation ROUGE2 score.""" - if metric == "rouge2": - exp = "{val_avg_rouge2:.4f}-{step_count}" - elif metric == "bleu": - exp = "{val_avg_bleu:.4f}-{step_count}" - elif metric == "loss": - exp = "{val_avg_loss:.4f}-{step_count}" - else: - raise NotImplementedError( - f"seq2seq callbacks only support rouge2, bleu and loss, got {metric}, You can make your own by adding to" - " this function." - ) - - checkpoint_callback = ModelCheckpoint( - dirpath=output_dir, - filename=exp, - monitor=f"val_{metric}", - mode="min" if "loss" in metric else "max", - save_top_k=save_top_k, - ) - return checkpoint_callback - - -def get_early_stopping_callback(metric, patience): - return EarlyStopping( - monitor=f"val_{metric}", # does this need avg? - mode="min" if "loss" in metric else "max", - patience=patience, - verbose=True, - ) diff --git a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/PIL/PSDraw.py b/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/PIL/PSDraw.py deleted file mode 100644 index 13b3048f67e18ac58170c3a1bd25cb18d66b30fe..0000000000000000000000000000000000000000 --- a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/PIL/PSDraw.py +++ /dev/null @@ -1,229 +0,0 @@ -# -# The Python Imaging Library -# $Id$ -# -# Simple PostScript graphics interface -# -# History: -# 1996-04-20 fl Created -# 1999-01-10 fl Added gsave/grestore to image method -# 2005-05-04 fl Fixed floating point issue in image (from Eric Etheridge) -# -# Copyright (c) 1997-2005 by Secret Labs AB. All rights reserved. -# Copyright (c) 1996 by Fredrik Lundh. -# -# See the README file for information on usage and redistribution. -# - -import sys - -from . import EpsImagePlugin - -## -# Simple PostScript graphics interface. - - -class PSDraw: - """ - Sets up printing to the given file. If ``fp`` is omitted, - ``sys.stdout.buffer`` or ``sys.stdout`` is assumed. - """ - - def __init__(self, fp=None): - if not fp: - try: - fp = sys.stdout.buffer - except AttributeError: - fp = sys.stdout - self.fp = fp - - def begin_document(self, id=None): - """Set up printing of a document. (Write PostScript DSC header.)""" - # FIXME: incomplete - self.fp.write( - b"%!PS-Adobe-3.0\n" - b"save\n" - b"/showpage { } def\n" - b"%%EndComments\n" - b"%%BeginDocument\n" - ) - # self.fp.write(ERROR_PS) # debugging! - self.fp.write(EDROFF_PS) - self.fp.write(VDI_PS) - self.fp.write(b"%%EndProlog\n") - self.isofont = {} - - def end_document(self): - """Ends printing. (Write PostScript DSC footer.)""" - self.fp.write(b"%%EndDocument\nrestore showpage\n%%End\n") - if hasattr(self.fp, "flush"): - self.fp.flush() - - def setfont(self, font, size): - """ - Selects which font to use. - - :param font: A PostScript font name - :param size: Size in points. - """ - font = bytes(font, "UTF-8") - if font not in self.isofont: - # reencode font - self.fp.write(b"/PSDraw-%s ISOLatin1Encoding /%s E\n" % (font, font)) - self.isofont[font] = 1 - # rough - self.fp.write(b"/F0 %d /PSDraw-%s F\n" % (size, font)) - - def line(self, xy0, xy1): - """ - Draws a line between the two points. Coordinates are given in - PostScript point coordinates (72 points per inch, (0, 0) is the lower - left corner of the page). - """ - self.fp.write(b"%d %d %d %d Vl\n" % (*xy0, *xy1)) - - def rectangle(self, box): - """ - Draws a rectangle. - - :param box: A tuple of four integers, specifying left, bottom, width and - height. - """ - self.fp.write(b"%d %d M 0 %d %d Vr\n" % box) - - def text(self, xy, text): - """ - Draws text at the given position. You must use - :py:meth:`~PIL.PSDraw.PSDraw.setfont` before calling this method. - """ - text = bytes(text, "UTF-8") - text = b"\\(".join(text.split(b"(")) - text = b"\\)".join(text.split(b")")) - xy += (text,) - self.fp.write(b"%d %d M (%s) S\n" % xy) - - def image(self, box, im, dpi=None): - """Draw a PIL image, centered in the given box.""" - # default resolution depends on mode - if not dpi: - if im.mode == "1": - dpi = 200 # fax - else: - dpi = 100 # greyscale - # image size (on paper) - x = im.size[0] * 72 / dpi - y = im.size[1] * 72 / dpi - # max allowed size - xmax = float(box[2] - box[0]) - ymax = float(box[3] - box[1]) - if x > xmax: - y = y * xmax / x - x = xmax - if y > ymax: - x = x * ymax / y - y = ymax - dx = (xmax - x) / 2 + box[0] - dy = (ymax - y) / 2 + box[1] - self.fp.write(b"gsave\n%f %f translate\n" % (dx, dy)) - if (x, y) != im.size: - # EpsImagePlugin._save prints the image at (0,0,xsize,ysize) - sx = x / im.size[0] - sy = y / im.size[1] - self.fp.write(b"%f %f scale\n" % (sx, sy)) - EpsImagePlugin._save(im, self.fp, None, 0) - self.fp.write(b"\ngrestore\n") - - -# -------------------------------------------------------------------- -# PostScript driver - -# -# EDROFF.PS -- PostScript driver for Edroff 2 -# -# History: -# 94-01-25 fl: created (edroff 2.04) -# -# Copyright (c) Fredrik Lundh 1994. -# - - -EDROFF_PS = b"""\ -/S { show } bind def -/P { moveto show } bind def -/M { moveto } bind def -/X { 0 rmoveto } bind def -/Y { 0 exch rmoveto } bind def -/E { findfont - dup maxlength dict begin - { - 1 index /FID ne { def } { pop pop } ifelse - } forall - /Encoding exch def - dup /FontName exch def - currentdict end definefont pop -} bind def -/F { findfont exch scalefont dup setfont - [ exch /setfont cvx ] cvx bind def -} bind def -""" - -# -# VDI.PS -- PostScript driver for VDI meta commands -# -# History: -# 94-01-25 fl: created (edroff 2.04) -# -# Copyright (c) Fredrik Lundh 1994. -# - -VDI_PS = b"""\ -/Vm { moveto } bind def -/Va { newpath arcn stroke } bind def -/Vl { moveto lineto stroke } bind def -/Vc { newpath 0 360 arc closepath } bind def -/Vr { exch dup 0 rlineto - exch dup 0 exch rlineto - exch neg 0 rlineto - 0 exch neg rlineto - setgray fill } bind def -/Tm matrix def -/Ve { Tm currentmatrix pop - translate scale newpath 0 0 .5 0 360 arc closepath - Tm setmatrix -} bind def -/Vf { currentgray exch setgray fill setgray } bind def -""" - -# -# ERROR.PS -- Error handler -# -# History: -# 89-11-21 fl: created (pslist 1.10) -# - -ERROR_PS = b"""\ -/landscape false def -/errorBUF 200 string def -/errorNL { currentpoint 10 sub exch pop 72 exch moveto } def -errordict begin /handleerror { - initmatrix /Courier findfont 10 scalefont setfont - newpath 72 720 moveto $error begin /newerror false def - (PostScript Error) show errorNL errorNL - (Error: ) show - /errorname load errorBUF cvs show errorNL errorNL - (Command: ) show - /command load dup type /stringtype ne { errorBUF cvs } if show - errorNL errorNL - (VMstatus: ) show - vmstatus errorBUF cvs show ( bytes available, ) show - errorBUF cvs show ( bytes used at level ) show - errorBUF cvs show errorNL errorNL - (Operand stargck: ) show errorNL /ostargck load { - dup type /stringtype ne { errorBUF cvs } if 72 0 rmoveto show errorNL - } forall errorNL - (Execution stargck: ) show errorNL /estargck load { - dup type /stringtype ne { errorBUF cvs } if 72 0 rmoveto show errorNL - } forall - end showpage -} def end -""" diff --git a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/faiss/extra_wrappers.py b/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/faiss/extra_wrappers.py deleted file mode 100644 index deff3c899552774ed6feb42c71c7abce673beb0d..0000000000000000000000000000000000000000 --- a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/faiss/extra_wrappers.py +++ /dev/null @@ -1,490 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -# @nolint - -# not linting this file because it imports * from swigfaiss, which -# causes a ton of useless warnings. - -import numpy as np - -from faiss.loader import * - -import faiss - -########################################### -# Wrapper for a few functions -########################################### - - -def kmin(array, k): - """return k smallest values (and their indices) of the lines of a - float32 array""" - array = np.ascontiguousarray(array, dtype='float32') - m, n = array.shape - I = np.zeros((m, k), dtype='int64') - D = np.zeros((m, k), dtype='float32') - ha = faiss.float_maxheap_array_t() - ha.ids = swig_ptr(I) - ha.val = swig_ptr(D) - ha.nh = m - ha.k = k - ha.heapify() - ha.addn(n, swig_ptr(array)) - ha.reorder() - return D, I - - -def kmax(array, k): - """return k largest values (and their indices) of the lines of a - float32 array""" - array = np.ascontiguousarray(array, dtype='float32') - m, n = array.shape - I = np.zeros((m, k), dtype='int64') - D = np.zeros((m, k), dtype='float32') - ha = faiss.float_minheap_array_t() - ha.ids = swig_ptr(I) - ha.val = swig_ptr(D) - ha.nh = m - ha.k = k - ha.heapify() - ha.addn(n, swig_ptr(array)) - ha.reorder() - return D, I - - -def pairwise_distances(xq, xb, metric=METRIC_L2, metric_arg=0): - """compute the whole pairwise distance matrix between two sets of - vectors""" - xq = np.ascontiguousarray(xq, dtype='float32') - xb = np.ascontiguousarray(xb, dtype='float32') - nq, d = xq.shape - nb, d2 = xb.shape - assert d == d2 - dis = np.empty((nq, nb), dtype='float32') - if metric == METRIC_L2: - pairwise_L2sqr( - d, nq, swig_ptr(xq), - nb, swig_ptr(xb), - swig_ptr(dis)) - elif metric == METRIC_INNER_PRODUCT: - dis[:] = xq @ xb.T - else: - pairwise_extra_distances( - d, nq, swig_ptr(xq), - nb, swig_ptr(xb), - metric, metric_arg, - swig_ptr(dis)) - return dis - - -def rand(n, seed=12345): - res = np.empty(n, dtype='float32') - float_rand(swig_ptr(res), res.size, seed) - return res - - -def randint(n, seed=12345, vmax=None): - res = np.empty(n, dtype='int64') - if vmax is None: - int64_rand(swig_ptr(res), res.size, seed) - else: - int64_rand_max(swig_ptr(res), res.size, vmax, seed) - return res - - -lrand = randint - - -def randn(n, seed=12345): - res = np.empty(n, dtype='float32') - float_randn(swig_ptr(res), res.size, seed) - return res - - -def checksum(a): - """ compute a checksum for quick-and-dirty comparisons of arrays """ - a = a.view('uint8') - n = a.size - n4 = n & ~3 - cs = ivec_checksum(int(n4 / 4), swig_ptr(a[:n4].view('int32'))) - for i in range(n4, n): - cs += x[i] * 33657 - return cs - - -rand_smooth_vectors_c = rand_smooth_vectors - - -def rand_smooth_vectors(n, d, seed=1234): - res = np.empty((n, d), dtype='float32') - rand_smooth_vectors_c(n, d, swig_ptr(res), seed) - return res - - -def eval_intersection(I1, I2): - """ size of intersection between each line of two result tables""" - I1 = np.ascontiguousarray(I1, dtype='int64') - I2 = np.ascontiguousarray(I2, dtype='int64') - n = I1.shape[0] - assert I2.shape[0] == n - k1, k2 = I1.shape[1], I2.shape[1] - ninter = 0 - for i in range(n): - ninter += ranklist_intersection_size( - k1, swig_ptr(I1[i]), k2, swig_ptr(I2[i])) - return ninter - - -def normalize_L2(x): - fvec_renorm_L2(x.shape[1], x.shape[0], swig_ptr(x)) - -bucket_sort_c = bucket_sort - -def bucket_sort(tab, nbucket=None, nt=0): - """Perform a bucket sort on a table of integers. - - Parameters - ---------- - tab : array_like - elements to sort, max value nbucket - 1 - nbucket : integer - number of buckets, None if unknown - nt : integer - number of threads to use (0 = use unthreaded codepath) - - Returns - ------- - lims : array_like - cumulative sum of bucket sizes (size vmax + 1) - perm : array_like - perm[lims[i] : lims[i + 1]] contains the indices of bucket #i (size tab.size) - """ - tab = np.ascontiguousarray(tab, dtype="int64") - if nbucket is None: - nbucket = int(tab.max() + 1) - lims = np.empty(nbucket + 1, dtype='int64') - perm = np.empty(tab.size, dtype='int64') - bucket_sort_c( - tab.size, faiss.swig_ptr(tab.view('uint64')), - nbucket, faiss.swig_ptr(lims), faiss.swig_ptr(perm), - nt - ) - return lims, perm - -matrix_bucket_sort_inplace_c = matrix_bucket_sort_inplace - -def matrix_bucket_sort_inplace(tab, nbucket=None, nt=0): - """Perform a bucket sort on a matrix, recording the original - row of each element. - - Parameters - ---------- - tab : array_like - array of size (N, ncol) that contains the bucket ids, maximum - value nbucket - 1. - On output, it the elements are shuffled such that the flat array - tab.ravel()[lims[i] : lims[i + 1]] contains the row numbers - of each bucket entry. - nbucket : integer - number of buckets (the maximum value in tab should be nbucket - 1) - nt : integer - number of threads to use (0 = use unthreaded codepath) - - Returns - ------- - lims : array_like - cumulative sum of bucket sizes (size vmax + 1) - """ - assert tab.dtype == 'int32' or tab.dtype == 'int64' - nrow, ncol = tab.shape - if nbucket is None: - nbucket = int(tab.max() + 1) - lims = np.empty(nbucket + 1, dtype='int64') - matrix_bucket_sort_inplace_c( - nrow, ncol, faiss.swig_ptr(tab), - nbucket, faiss.swig_ptr(lims), - nt - ) - return lims - - -########################################### -# ResultHeap -########################################### - -class ResultHeap: - """Accumulate query results from a sliced dataset. The final result will - be in self.D, self.I.""" - - def __init__(self, nq, k, keep_max=False): - """ - nq: number of query vectors, - k: number of results per query - keep_max: keep the top-k maximum values instead of the minima - """ - self.I = np.zeros((nq, k), dtype='int64') - self.D = np.zeros((nq, k), dtype='float32') - self.nq, self.k = nq, k - if keep_max: - heaps = float_minheap_array_t() - else: - heaps = float_maxheap_array_t() - heaps.k = k - heaps.nh = nq - heaps.val = swig_ptr(self.D) - heaps.ids = swig_ptr(self.I) - heaps.heapify() - self.heaps = heaps - - def add_result(self, D, I): - """ - Add results for all heaps - D, I should be of size (nh, nres) - D, I do not need to be in a particular order (heap or sorted) - """ - nq, kd = D.shape - D = np.ascontiguousarray(D, dtype='float32') - I = np.ascontiguousarray(I, dtype='int64') - assert I.shape == (nq, kd) - assert nq == self.nq - self.heaps.addn_with_ids( - kd, swig_ptr(D), - swig_ptr(I), kd) - - def add_result_subset(self, subset, D, I): - """ - Add results for a subset of heaps. - D, I should hold resutls for all the subset - as a special case, if I is 1D, then all ids are assumed to be the same - """ - nsubset, kd = D.shape - assert nsubset == len(subset) - assert ( - I.ndim == 2 and D.shape == I.shape or - I.ndim == 1 and I.shape == (kd, ) - ) - D = np.ascontiguousarray(D, dtype='float32') - I = np.ascontiguousarray(I, dtype='int64') - subset = np.ascontiguousarray(subset, dtype='int64') - id_stride = 0 if I.ndim == 1 else kd - self.heaps.addn_query_subset_with_ids( - nsubset, swig_ptr(subset), - kd, swig_ptr(D), swig_ptr(I), id_stride - ) - - def finalize(self): - self.heaps.reorder() - - -def merge_knn_results(Dall, Iall, keep_max=False): - """ - Merge a set of sorted knn-results obtained from different shards in a dataset - Dall and Iall are of size (nshard, nq, k) each D[i, j] should be sorted - returns D, I of size (nq, k) as the merged result set - """ - assert Iall.shape == Dall.shape - nshard, n, k = Dall.shape - Dnew = np.empty((n, k), dtype=Dall.dtype) - Inew = np.empty((n, k), dtype=Iall.dtype) - func = merge_knn_results_CMax if keep_max else merge_knn_results_CMin - func( - n, k, nshard, - swig_ptr(Dall), swig_ptr(Iall), - swig_ptr(Dnew), swig_ptr(Inew) - ) - return Dnew, Inew - -###################################################### -# KNN function -###################################################### - -def knn(xq, xb, k, metric=METRIC_L2): - """ - Compute the k nearest neighbors of a vector without constructing an index - - - Parameters - ---------- - xq : array_like - Query vectors, shape (nq, d) where d is appropriate for the index. - `dtype` must be float32. - xb : array_like - Database vectors, shape (nb, d) where d is appropriate for the index. - `dtype` must be float32. - k : int - Number of nearest neighbors. - distance_type : MetricType, optional - distance measure to use (either METRIC_L2 or METRIC_INNER_PRODUCT) - - Returns - ------- - D : array_like - Distances of the nearest neighbors, shape (nq, k) - I : array_like - Labels of the nearest neighbors, shape (nq, k) - """ - xq = np.ascontiguousarray(xq, dtype='float32') - xb = np.ascontiguousarray(xb, dtype='float32') - nq, d = xq.shape - nb, d2 = xb.shape - assert d == d2 - - I = np.empty((nq, k), dtype='int64') - D = np.empty((nq, k), dtype='float32') - - if metric == METRIC_L2: - knn_L2sqr( - swig_ptr(xq), swig_ptr(xb), - d, nq, nb, k, swig_ptr(D), swig_ptr(I) - ) - elif metric == METRIC_INNER_PRODUCT: - knn_inner_product( - swig_ptr(xq), swig_ptr(xb), - d, nq, nb, k, swig_ptr(D), swig_ptr(I) - ) - else: - raise NotImplementedError("only L2 and INNER_PRODUCT are supported") - return D, I - - -########################################### -# Kmeans object -########################################### - - -class Kmeans: - """Object that performs k-means clustering and manages the centroids. - The `Kmeans` class is essentially a wrapper around the C++ `Clustering` object. - - Parameters - ---------- - d : int - dimension of the vectors to cluster - k : int - number of clusters - gpu: bool or int, optional - False: don't use GPU - True: use all GPUs - number: use this many GPUs - progressive_dim_steps: - use a progressive dimension clustering (with that number of steps) - - Subsequent parameters are fields of the Clustring object. The most important are: - - niter: int, optional - clustering iterations - nredo: int, optional - redo clustering this many times and keep best - verbose: bool, optional - spherical: bool, optional - do we want normalized centroids? - int_centroids: bool, optional - round centroids coordinates to integer - seed: int, optional - seed for the random number generator - - """ - - def __init__(self, d, k, **kwargs): - """d: input dimension, k: nb of centroids. Additional - parameters are passed on the ClusteringParameters object, - including niter=25, verbose=False, spherical = False - """ - self.d = d - self.k = k - self.gpu = False - if "progressive_dim_steps" in kwargs: - self.cp = ProgressiveDimClusteringParameters() - else: - self.cp = ClusteringParameters() - for k, v in kwargs.items(): - if k == 'gpu': - if v == True or v == -1: - v = get_num_gpus() - self.gpu = v - else: - # if this raises an exception, it means that it is a non-existent field - getattr(self.cp, k) - setattr(self.cp, k, v) - self.centroids = None - - def train(self, x, weights=None, init_centroids=None): - """ Perform k-means clustering. - On output of the function call: - - - the centroids are in the centroids field of size (`k`, `d`). - - - the objective value at each iteration is in the array obj (size `niter`) - - - detailed optimization statistics are in the array iteration_stats. - - Parameters - ---------- - x : array_like - Training vectors, shape (n, d), `dtype` must be float32 and n should - be larger than the number of clusters `k`. - weights : array_like - weight associated to each vector, shape `n` - init_centroids : array_like - initial set of centroids, shape (n, d) - - Returns - ------- - final_obj: float - final optimization objective - - """ - x = np.ascontiguousarray(x, dtype='float32') - n, d = x.shape - assert d == self.d - - if self.cp.__class__ == ClusteringParameters: - # regular clustering - clus = Clustering(d, self.k, self.cp) - if init_centroids is not None: - nc, d2 = init_centroids.shape - assert d2 == d - faiss.copy_array_to_vector(init_centroids.ravel(), clus.centroids) - if self.cp.spherical: - self.index = IndexFlatIP(d) - else: - self.index = IndexFlatL2(d) - if self.gpu: - self.index = faiss.index_cpu_to_all_gpus(self.index, ngpu=self.gpu) - clus.train(x, self.index, weights) - else: - # not supported for progressive dim - assert weights is None - assert init_centroids is None - assert not self.cp.spherical - clus = ProgressiveDimClustering(d, self.k, self.cp) - if self.gpu: - fac = GpuProgressiveDimIndexFactory(ngpu=self.gpu) - else: - fac = ProgressiveDimIndexFactory() - clus.train(n, swig_ptr(x), fac) - - centroids = faiss.vector_float_to_array(clus.centroids) - - self.centroids = centroids.reshape(self.k, d) - stats = clus.iteration_stats - stats = [stats.at(i) for i in range(stats.size())] - self.obj = np.array([st.obj for st in stats]) - # copy all the iteration_stats objects to a python array - stat_fields = 'obj time time_search imbalance_factor nsplit'.split() - self.iteration_stats = [ - {field: getattr(st, field) for field in stat_fields} - for st in stats - ] - return self.obj[-1] if self.obj.size > 0 else 0.0 - - def assign(self, x): - x = np.ascontiguousarray(x, dtype='float32') - assert self.centroids is not None, "should train before assigning" - self.index.reset() - self.index.add(self.centroids) - D, I = self.index.search(x, 1) - return D.ravel(), I.ravel() diff --git a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/filetype/__main__.py b/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/filetype/__main__.py deleted file mode 100644 index 7df5c4fc676d62ee23b99c3a3122543400a7e17e..0000000000000000000000000000000000000000 --- a/spaces/chuan-hd/law-assistant-chatbot/.venv/lib/python3.11/site-packages/filetype/__main__.py +++ /dev/null @@ -1,37 +0,0 @@ -import sys - -import filetype - - -def guess(path): - kind = filetype.guess(path) - if kind is None: - print('{}: File type determination failure.'.format(path)) - else: - print('{}: {} ({})'.format(path, kind.extension, kind.mime)) - - -def main(): - import argparse - - parser = argparse.ArgumentParser( - prog='filetype', description='Determine type of FILEs.' - ) - parser.add_argument('-f', '--file', nargs='+') - parser.add_argument( - '-v', '--version', action='version', - version='%(prog)s ' + filetype.version, - help='output version information and exit' - ) - - args = parser.parse_args() - if len(sys.argv) < 2: - parser.print_help() - sys.exit(1) - - for i in args.file: - guess(i) - - -if __name__ == '__main__': - main() diff --git a/spaces/cihyFjudo/fairness-paper-search/Comportamiento Organizacional Stephen Robbins 7 Edicion Pdf 29 Un Anlisis Crtico de las Conceptos Controversias y Aplicaciones.md b/spaces/cihyFjudo/fairness-paper-search/Comportamiento Organizacional Stephen Robbins 7 Edicion Pdf 29 Un Anlisis Crtico de las Conceptos Controversias y Aplicaciones.md deleted file mode 100644 index 51f9abf2786abc95a39e266eb821a7115a82b263..0000000000000000000000000000000000000000 --- a/spaces/cihyFjudo/fairness-paper-search/Comportamiento Organizacional Stephen Robbins 7 Edicion Pdf 29 Un Anlisis Crtico de las Conceptos Controversias y Aplicaciones.md +++ /dev/null @@ -1,6 +0,0 @@ -

    one missed call tamil dubbed movie free download


    DOWNLOAD ===> https://tinurli.com/2uwilo



    - - aaccfb2cb3
    -
    -
    -

    diff --git a/spaces/cihyFjudo/fairness-paper-search/Nelly - M.O. (2013).torrent Enjoy the hit songs Get Like Me and Hey Porsche from this album..md b/spaces/cihyFjudo/fairness-paper-search/Nelly - M.O. (2013).torrent Enjoy the hit songs Get Like Me and Hey Porsche from this album..md deleted file mode 100644 index 2466a60073510c84505440f6319d61ff586bc840..0000000000000000000000000000000000000000 --- a/spaces/cihyFjudo/fairness-paper-search/Nelly - M.O. (2013).torrent Enjoy the hit songs Get Like Me and Hey Porsche from this album..md +++ /dev/null @@ -1,6 +0,0 @@ -

    Nelly - M.O. (2013).torrent


    Download File ⚙⚙⚙ https://tinurli.com/2uwiw0



    -
    - aaccfb2cb3
    -
    -
    -

    diff --git a/spaces/cihyFjudo/fairness-paper-search/Office 365 Serial Key Generator The Ultimate Guide to Activate Your Product.md b/spaces/cihyFjudo/fairness-paper-search/Office 365 Serial Key Generator The Ultimate Guide to Activate Your Product.md deleted file mode 100644 index 2ab84ce9d9f71ae0ae97a0232afe2b1e9f7cb8e1..0000000000000000000000000000000000000000 --- a/spaces/cihyFjudo/fairness-paper-search/Office 365 Serial Key Generator The Ultimate Guide to Activate Your Product.md +++ /dev/null @@ -1,18 +0,0 @@ - -

    Tag: Microsoft Office 365 Product Key Free 2021, Microsoft office 365 product key activation free, Microsoft office 365 product key generator, Microsoft office 365 product key free, Microsoft office 365 license key

    -

    Office 365 Serial Key Generator


    DOWNLOAD ✯✯✯ https://tinurli.com/2uwjpY



    -

    Here we have arrived at the last destination, where we will have shared 100% legal Microsoft Office 365 product keys. Activating your product will allow you to use it absolutely free for the rest of your life. Before publishing here, I personally verified each serial and there are no problems.

    -

    The new version of Office 365 Product Key Generator [2023] is the latest and most recent version of office released. It was modified to be user-friendly and has all the features that users could possibly need. The application includes everything a user could possibly need. It is a complete package for customers. The office is a great choice if you need a desktop or laptop application.

    -

    Microsoft Office 365 Product Key Generator used for activation of Microsoft Office product full version free. Microsoft Office is the complete product developed by Microsoft Corporation. Microsoft Office 365 Product Key is a complete all-in-one package of tools that support making the office full version to use its all features easily and freely.

    -

    -

    Up to 5 household members can use office 365 Home with just one subscription. On subscription, each person will get 60 skype minutes monthly with a space of 1TB in the cloud storage. This office 365 will help you do things more easily anywhere on any device you want to use.

    -

    If you have installed Office from Microsoft, then this step is necessary to continue the office 365 activation. But if you have installed it from ISO Volume or a DVD, It is optional. Run this command by copying and pasting into Command Prompt and hitting Enter.

    -

    Office 365 Home Premium Cracked can be activated by using a valid serial number or an activation key. That you can get from Microsoft itself while downloading the office suite setup on your PC or device. If you do not have it then you can also activate your office 365 via the product keys given below.

    -

    Related Keywords: free Microsoft office 365 product key crack, free Microsoft office 365 product key for iPad, get Microsoft office 365 product key, Microsoft action pack office 365 product key Microsoft office 2013 & office 365 product keys, download Microsoft office 365, install Microsoft office 365 64 bit product key.

    -

    Microsoft Office 365 Product Key Cracked is the subscription service that gives the most up-to-date modern tools from Microsoft. It is the necessary tool you want for your medium or large-scale business. The tools include Word, Excel, PowerPoint, SharePoint, OneNote, OneDrive, and other helping tools. also, One more good thing, Office 365 Cracked v2302 latest is fully compatible with Windows 11 all features and MacOS Monterey. This service is available on every platform. Also, it has suitable plans for home and personal use and now Office 365 is getting more popular than other office versions.

    -

    A complete all-in-one package for small businesses, enterprises, schools, or non-profit organizations. This is the best ever office tool for everyone. Moreover, you can work from any device which you want, the same application runs on multiple platforms. The new version has an office 365 planner which is a project management tool that helps in creating plans, assigning tasks and sharing files.

    -

    Now we compare Office 365 with Office 2023 (the latest version). Here Office Office 365 v2302 is a one-time purchase available for both Windows and Mac operating systems. As it is a one-time purchase, which means for every new update, you will have to buy the upgrade version at a full price. With Office 365, you only pay one time and will get all the useful office apps without paying extra money.

    -

    Using Office 365, you will get the latest features, new tools, and security updates. You will always get bug-free office 365 tools but in office 2022, it is difficult to identify the errors nor it provides updates and new tools. Office 2022 has been used for a single Pc or Mac, but office 365 v2302 (Build 16026.20146) gives the freedom to the user to use up to five different devices. Moreover, you can share 365 subscriptions with up to five other people.

    -

    Step 2. View Applications Registry Keys on the right panel. EaseUS Key Finder will show all keys of installed software. Copy the targeted serial numbers. Also, you can click "Print" or "Save".

    aaccfb2cb3
    -
    -
    \ No newline at end of file diff --git a/spaces/cihyFjudo/fairness-paper-search/The theory of music in arabic writings pdf download discover the secrets of harmony rhythm melody and modes.md b/spaces/cihyFjudo/fairness-paper-search/The theory of music in arabic writings pdf download discover the secrets of harmony rhythm melody and modes.md deleted file mode 100644 index 56a0678e955d5132bb118ce85dab37cf2403dafd..0000000000000000000000000000000000000000 --- a/spaces/cihyFjudo/fairness-paper-search/The theory of music in arabic writings pdf download discover the secrets of harmony rhythm melody and modes.md +++ /dev/null @@ -1,5 +0,0 @@ - -

    1000+ Irish tin whistle (penny whistle) tabs and notes with the sheet music. Learn and play traditional Irish session tunes such as jigs, reels and polkas, all the way to popular songs. Free PDF download available for all tunes & songs.

    -

    the theory of music in arabic writings pdf download


    Download Filehttps://tinurli.com/2uwjzf



    aaccfb2cb3
    -
    -
    \ No newline at end of file diff --git a/spaces/cloudtheboi/Lofi4All/.pythonlibs/lib/python3.10/site-packages/cffi/pkgconfig.py b/spaces/cloudtheboi/Lofi4All/.pythonlibs/lib/python3.10/site-packages/cffi/pkgconfig.py deleted file mode 100644 index 5c93f15a60e6f904b2dd108d6e22044a5890bcb4..0000000000000000000000000000000000000000 --- a/spaces/cloudtheboi/Lofi4All/.pythonlibs/lib/python3.10/site-packages/cffi/pkgconfig.py +++ /dev/null @@ -1,121 +0,0 @@ -# pkg-config, https://www.freedesktop.org/wiki/Software/pkg-config/ integration for cffi -import sys, os, subprocess - -from .error import PkgConfigError - - -def merge_flags(cfg1, cfg2): - """Merge values from cffi config flags cfg2 to cf1 - - Example: - merge_flags({"libraries": ["one"]}, {"libraries": ["two"]}) - {"libraries": ["one", "two"]} - """ - for key, value in cfg2.items(): - if key not in cfg1: - cfg1[key] = value - else: - if not isinstance(cfg1[key], list): - raise TypeError("cfg1[%r] should be a list of strings" % (key,)) - if not isinstance(value, list): - raise TypeError("cfg2[%r] should be a list of strings" % (key,)) - cfg1[key].extend(value) - return cfg1 - - -def call(libname, flag, encoding=sys.getfilesystemencoding()): - """Calls pkg-config and returns the output if found - """ - a = ["pkg-config", "--print-errors"] - a.append(flag) - a.append(libname) - try: - pc = subprocess.Popen(a, stdout=subprocess.PIPE, stderr=subprocess.PIPE) - except EnvironmentError as e: - raise PkgConfigError("cannot run pkg-config: %s" % (str(e).strip(),)) - - bout, berr = pc.communicate() - if pc.returncode != 0: - try: - berr = berr.decode(encoding) - except Exception: - pass - raise PkgConfigError(berr.strip()) - - if sys.version_info >= (3,) and not isinstance(bout, str): # Python 3.x - try: - bout = bout.decode(encoding) - except UnicodeDecodeError: - raise PkgConfigError("pkg-config %s %s returned bytes that cannot " - "be decoded with encoding %r:\n%r" % - (flag, libname, encoding, bout)) - - if os.altsep != '\\' and '\\' in bout: - raise PkgConfigError("pkg-config %s %s returned an unsupported " - "backslash-escaped output:\n%r" % - (flag, libname, bout)) - return bout - - -def flags_from_pkgconfig(libs): - r"""Return compiler line flags for FFI.set_source based on pkg-config output - - Usage - ... - ffibuilder.set_source("_foo", pkgconfig = ["libfoo", "libbar >= 1.8.3"]) - - If pkg-config is installed on build machine, then arguments include_dirs, - library_dirs, libraries, define_macros, extra_compile_args and - extra_link_args are extended with an output of pkg-config for libfoo and - libbar. - - Raises PkgConfigError in case the pkg-config call fails. - """ - - def get_include_dirs(string): - return [x[2:] for x in string.split() if x.startswith("-I")] - - def get_library_dirs(string): - return [x[2:] for x in string.split() if x.startswith("-L")] - - def get_libraries(string): - return [x[2:] for x in string.split() if x.startswith("-l")] - - # convert -Dfoo=bar to list of tuples [("foo", "bar")] expected by distutils - def get_macros(string): - def _macro(x): - x = x[2:] # drop "-D" - if '=' in x: - return tuple(x.split("=", 1)) # "-Dfoo=bar" => ("foo", "bar") - else: - return (x, None) # "-Dfoo" => ("foo", None) - return [_macro(x) for x in string.split() if x.startswith("-D")] - - def get_other_cflags(string): - return [x for x in string.split() if not x.startswith("-I") and - not x.startswith("-D")] - - def get_other_libs(string): - return [x for x in string.split() if not x.startswith("-L") and - not x.startswith("-l")] - - # return kwargs for given libname - def kwargs(libname): - fse = sys.getfilesystemencoding() - all_cflags = call(libname, "--cflags") - all_libs = call(libname, "--libs") - return { - "include_dirs": get_include_dirs(all_cflags), - "library_dirs": get_library_dirs(all_libs), - "libraries": get_libraries(all_libs), - "define_macros": get_macros(all_cflags), - "extra_compile_args": get_other_cflags(all_cflags), - "extra_link_args": get_other_libs(all_libs), - } - - # merge all arguments together - ret = {} - for libname in libs: - lib_flags = kwargs(libname) - merge_flags(ret, lib_flags) - return ret diff --git a/spaces/codeparrot/incoder-subspace/README.md b/spaces/codeparrot/incoder-subspace/README.md deleted file mode 100644 index 5d8f2663708a6fb4c2b27b28bba6246eb83d4c42..0000000000000000000000000000000000000000 --- a/spaces/codeparrot/incoder-subspace/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Incoder Subspace -emoji: 🌖 -colorFrom: blue -colorTo: red -sdk: gradio -sdk_version: 3.21.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference diff --git a/spaces/colakin/video-generater/public/ffmpeg/compat/float/float.h b/spaces/colakin/video-generater/public/ffmpeg/compat/float/float.h deleted file mode 100644 index 1f0d3ab4b507da3094c9658ee66f96242c8c0d76..0000000000000000000000000000000000000000 --- a/spaces/colakin/video-generater/public/ffmpeg/compat/float/float.h +++ /dev/null @@ -1,35 +0,0 @@ -/* - * Work around broken floating point limits on some systems. - * - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - -#include_next - -#ifdef FLT_MAX -#undef FLT_MAX -#define FLT_MAX 3.40282346638528859812e+38F - -#undef FLT_MIN -#define FLT_MIN 1.17549435082228750797e-38F - -#undef DBL_MAX -#define DBL_MAX ((double)1.79769313486231570815e+308L) - -#undef DBL_MIN -#define DBL_MIN ((double)2.22507385850720138309e-308L) -#endif diff --git a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/arm/h264pred_init_arm.c b/spaces/colakin/video-generater/public/ffmpeg/libavcodec/arm/h264pred_init_arm.c deleted file mode 100644 index cc324d7dcac283e57cae6a65406e255cd8e36ded..0000000000000000000000000000000000000000 --- a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/arm/h264pred_init_arm.c +++ /dev/null @@ -1,95 +0,0 @@ -/* - * Copyright (c) 2009 Mans Rullgard - * - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - -#include - -#include "libavutil/attributes.h" -#include "libavutil/arm/cpu.h" -#include "libavcodec/avcodec.h" -#include "libavcodec/h264pred.h" - -void ff_pred16x16_vert_neon(uint8_t *src, ptrdiff_t stride); -void ff_pred16x16_hor_neon(uint8_t *src, ptrdiff_t stride); -void ff_pred16x16_plane_neon(uint8_t *src, ptrdiff_t stride); -void ff_pred16x16_dc_neon(uint8_t *src, ptrdiff_t stride); -void ff_pred16x16_128_dc_neon(uint8_t *src, ptrdiff_t stride); -void ff_pred16x16_left_dc_neon(uint8_t *src, ptrdiff_t stride); -void ff_pred16x16_top_dc_neon(uint8_t *src, ptrdiff_t stride); - -void ff_pred8x8_vert_neon(uint8_t *src, ptrdiff_t stride); -void ff_pred8x8_hor_neon(uint8_t *src, ptrdiff_t stride); -void ff_pred8x8_plane_neon(uint8_t *src, ptrdiff_t stride); -void ff_pred8x8_dc_neon(uint8_t *src, ptrdiff_t stride); -void ff_pred8x8_128_dc_neon(uint8_t *src, ptrdiff_t stride); -void ff_pred8x8_left_dc_neon(uint8_t *src, ptrdiff_t stride); -void ff_pred8x8_top_dc_neon(uint8_t *src, ptrdiff_t stride); -void ff_pred8x8_l0t_dc_neon(uint8_t *src, ptrdiff_t stride); -void ff_pred8x8_0lt_dc_neon(uint8_t *src, ptrdiff_t stride); -void ff_pred8x8_l00_dc_neon(uint8_t *src, ptrdiff_t stride); -void ff_pred8x8_0l0_dc_neon(uint8_t *src, ptrdiff_t stride); - -static av_cold void h264_pred_init_neon(H264PredContext *h, int codec_id, - const int bit_depth, - const int chroma_format_idc) -{ -#if HAVE_NEON - const int high_depth = bit_depth > 8; - - if (high_depth) - return; - - if (chroma_format_idc <= 1) { - h->pred8x8[VERT_PRED8x8 ] = ff_pred8x8_vert_neon; - h->pred8x8[HOR_PRED8x8 ] = ff_pred8x8_hor_neon; - if (codec_id != AV_CODEC_ID_VP7 && codec_id != AV_CODEC_ID_VP8) - h->pred8x8[PLANE_PRED8x8] = ff_pred8x8_plane_neon; - h->pred8x8[DC_128_PRED8x8 ] = ff_pred8x8_128_dc_neon; - if (codec_id != AV_CODEC_ID_RV40 && codec_id != AV_CODEC_ID_VP7 && - codec_id != AV_CODEC_ID_VP8) { - h->pred8x8[DC_PRED8x8 ] = ff_pred8x8_dc_neon; - h->pred8x8[LEFT_DC_PRED8x8] = ff_pred8x8_left_dc_neon; - h->pred8x8[TOP_DC_PRED8x8 ] = ff_pred8x8_top_dc_neon; - h->pred8x8[ALZHEIMER_DC_L0T_PRED8x8] = ff_pred8x8_l0t_dc_neon; - h->pred8x8[ALZHEIMER_DC_0LT_PRED8x8] = ff_pred8x8_0lt_dc_neon; - h->pred8x8[ALZHEIMER_DC_L00_PRED8x8] = ff_pred8x8_l00_dc_neon; - h->pred8x8[ALZHEIMER_DC_0L0_PRED8x8] = ff_pred8x8_0l0_dc_neon; - } - } - - h->pred16x16[DC_PRED8x8 ] = ff_pred16x16_dc_neon; - h->pred16x16[VERT_PRED8x8 ] = ff_pred16x16_vert_neon; - h->pred16x16[HOR_PRED8x8 ] = ff_pred16x16_hor_neon; - h->pred16x16[LEFT_DC_PRED8x8] = ff_pred16x16_left_dc_neon; - h->pred16x16[TOP_DC_PRED8x8 ] = ff_pred16x16_top_dc_neon; - h->pred16x16[DC_128_PRED8x8 ] = ff_pred16x16_128_dc_neon; - if (codec_id != AV_CODEC_ID_SVQ3 && codec_id != AV_CODEC_ID_RV40 && - codec_id != AV_CODEC_ID_VP7 && codec_id != AV_CODEC_ID_VP8) - h->pred16x16[PLANE_PRED8x8 ] = ff_pred16x16_plane_neon; -#endif // HAVE_NEON -} - -av_cold void ff_h264_pred_init_arm(H264PredContext *h, int codec_id, - int bit_depth, const int chroma_format_idc) -{ - int cpu_flags = av_get_cpu_flags(); - - if (have_neon(cpu_flags)) - h264_pred_init_neon(h, codec_id, bit_depth, chroma_format_idc); -} diff --git a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/flac.h b/spaces/colakin/video-generater/public/ffmpeg/libavcodec/flac.h deleted file mode 100644 index 00e631ed20156c568dfb703fb8362d049dcc0648..0000000000000000000000000000000000000000 --- a/spaces/colakin/video-generater/public/ffmpeg/libavcodec/flac.h +++ /dev/null @@ -1,75 +0,0 @@ -/* - * FLAC (Free Lossless Audio Codec) common stuff - * Copyright (c) 2008 Justin Ruggles - * - * This file is part of FFmpeg. - * - * FFmpeg is free software; you can redistribute it and/or - * modify it under the terms of the GNU Lesser General Public - * License as published by the Free Software Foundation; either - * version 2.1 of the License, or (at your option) any later version. - * - * FFmpeg is distributed in the hope that it will be useful, - * but WITHOUT ANY WARRANTY; without even the implied warranty of - * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU - * Lesser General Public License for more details. - * - * You should have received a copy of the GNU Lesser General Public - * License along with FFmpeg; if not, write to the Free Software - * Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA - */ - -/** - * @file - * FLAC (Free Lossless Audio Codec) common stuff - */ - -#ifndef AVCODEC_FLAC_H -#define AVCODEC_FLAC_H - -#include "libavutil/intreadwrite.h" - -#define FLAC_STREAMINFO_SIZE 34 -#define FLAC_MAX_CHANNELS 8 -#define FLAC_MIN_BLOCKSIZE 16 -#define FLAC_MAX_BLOCKSIZE 65535 -#define FLAC_MIN_FRAME_SIZE 10 - -enum { - FLAC_CHMODE_INDEPENDENT = 0, - FLAC_CHMODE_LEFT_SIDE = 1, - FLAC_CHMODE_RIGHT_SIDE = 2, - FLAC_CHMODE_MID_SIDE = 3, -}; - -enum { - FLAC_METADATA_TYPE_STREAMINFO = 0, - FLAC_METADATA_TYPE_PADDING, - FLAC_METADATA_TYPE_APPLICATION, - FLAC_METADATA_TYPE_SEEKTABLE, - FLAC_METADATA_TYPE_VORBIS_COMMENT, - FLAC_METADATA_TYPE_CUESHEET, - FLAC_METADATA_TYPE_PICTURE, - FLAC_METADATA_TYPE_INVALID = 127 -}; - -/** - * Parse the metadata block parameters from the header. - * @param[in] block_header header data, at least 4 bytes - * @param[out] last indicator for last metadata block - * @param[out] type metadata block type - * @param[out] size metadata block size - */ -static av_always_inline void flac_parse_block_header(const uint8_t *block_header, - int *last, int *type, int *size) -{ - int tmp = *block_header; - if (last) - *last = tmp & 0x80; - if (type) - *type = tmp & 0x7F; - if (size) - *size = AV_RB24(block_header + 1); -} - -#endif /* AVCODEC_FLAC_H */ diff --git a/spaces/congsaPfin/Manga-OCR/logs/Download mParivahan and Enjoy the Benefits of Virtual RCDL eChallan and More.md b/spaces/congsaPfin/Manga-OCR/logs/Download mParivahan and Enjoy the Benefits of Virtual RCDL eChallan and More.md deleted file mode 100644 index 4d04ac185d0e3f4b1c28655659ef8aa80a273746..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/Download mParivahan and Enjoy the Benefits of Virtual RCDL eChallan and More.md +++ /dev/null @@ -1,161 +0,0 @@ - -

    Download mParivahan: A Complete Guide

    -

    If you are a vehicle owner or a driver in India, you might have heard of the mParivahan app. It is a mobile-based application that provides transport service access to citizens through various information, services, and utilities related to the transport sector. It is a genuine government app for all India RTO vehicle registration number search, driving license verification, and other transport-related matters.

    -

    In this article, we will tell you everything you need to know about the mParivahan app, including its features, benefits, download steps, usage tips, and alternatives. By the end of this article, you will be able to decide whether you should download the app or not.

    -

    download mparivahan


    Download File ⇒⇒⇒ https://urlca.com/2uObDy



    -

    What is mParivahan and why you should download it

    -

    mParivahan is an initiative of the Ministry of Road Transport and Highways (MoRTH) and the National Informatics Centre (NIC) to bring convenience to citizens and transparency in the system. It aims to digitize the transport sector and make it more efficient, safe, and user-friendly.

    -

    The app allows you to store and access your driving license (DL) and vehicle registration certificate (RC) digitally, making it easy to show them to authorities when required. You can also verify the details of any vehicle or driver by entering their registration number or DL number. Moreover, you can report road offences and accidents using the app, as well as get notifications about transport-related matters.

    -

    What are the main features and benefits of the app

    -

    The mParivahan app offers several useful features related to vehicles and driving licenses. Here are some of the main features of the app:

    -
      -
    • Driving Licence (DL): The app allows you to store and access your driving license digitally, making it easy to show to authorities when required. You can also verify the details of any driver by entering their DL number.
    • -
    • Registration Certificate (RC): The app allows you to store and access your vehicle registration certificate digitally, making it easy to show to authorities when required. You can also verify the details of any vehicle by entering their registration number.
    • -
    • Virtual RC/DL: The app allows you to create virtual RC and DL on your mobile phone, which are encrypted QR codes that contain all your information. You can scan these QR codes using any QR code scanner or another mParivahan app to access your details.
    • -
    • Information Services: The app provides various information services related to the transport sector, such as RTO/Traffic office locations, transport notifications, FAQs, feedback, etc.
    • -
    • DL/RC Search: The app allows you to search for any DL or RC by entering their number. You can get complete information about any vehicle or driver, such as owner name, registration date, registering authority, make model, fuel type, vehicle age, vehicle class, insurance validity, fitness validity, etc.
    • -
    • Road Offence Reporting: The app allows you to report any road offence or violation by entering the details of the offender's vehicle or DL. You can also upload photos or videos as evidence. The report will be sent to the concerned authority for action.
    • -
    • Road Accident Reporting: The app allows you to report any road accident by entering the details of the vehicles involved, location, date and time, injuries,

      casualties, etc. You can also upload photos or videos as evidence. The report will be sent to the concerned authority for action.

    • -
    • Emergency Services: The app allows you to access emergency services such as ambulance, police, fire brigade, etc. by tapping on the SOS button. You can also share your location with your contacts in case of emergency.
    • -
    -

    Some of the benefits of using the mParivahan app are:

    -
      -
    • Convenience: The app saves you from the hassle of carrying physical documents and showing them to authorities when required. You can also access various information and services related to the transport sector at your fingertips.
    • -
    • Security: The app ensures that your data is secure and encrypted. You can also lock your virtual RC and DL using a PIN or fingerprint. The app also verifies the authenticity of any vehicle or driver by checking their details against the central database.
    • -
    • Safety: The app helps you to report any road offence or accident and get help from emergency services in case of need. You can also get notifications about transport-related matters such as pollution norms, traffic rules, etc.
    • -
    • Citizenship: The app helps you to contribute to the improvement of the transport sector and road safety by reporting any violation or incident. You can also give feedback and suggestions to the authorities through the app.
    • -
    -

    How to download and sign up on the mParivahan app

    -

    The mParivahan app is available for both Android and iOS users. You can download it from the Google Play Store or the App Store for free. Here are the steps to download and sign up on the app:

    -

    Step-by-step instructions for Android users

    -
      -
    1. Open the Google Play Store on your Android device and search for "mParivahan".
    2. -
    3. Select the app from the list of results and tap on "Install".
    4. -
    5. Wait for the app to download and install on your device.
    6. -
    7. Open the app and tap on "Sign Up" at the bottom of the screen.
    8. -
    9. Enter your mobile number and tap on "Generate OTP".
    10. -
    11. Enter the OTP that you receive on your phone and tap on "Verify".
    12. -
    13. Create a password and confirm it. Tap on "Submit".
    14. -
    15. You have successfully signed up on the mParivahan app. You can now use it to access various features and services.
    16. -
    -

    Step-by-step instructions for iOS users

    -
      -
    1. Open the App Store on your iOS device and search for "mParivahan".
    2. -
    3. Select the app from the list of results and tap on "Get".
    4. -
    5. Wait for the app to download and install on your device.
    6. -
    7. Open the app and tap on "Sign Up" at the bottom of the screen.
    8. -
    9. Enter your mobile number and tap on "Generate OTP".
    10. -
    11. Enter the OTP that you receive on your phone and tap on "Verify".
    12. -
    13. Create a password and confirm it. Tap on "Submit".
    14. -
    15. You have successfully signed up on the mParivahan app. You can now use it to access various features and services.
    16. -
    -

    How to create virtual RC and DL on the app

    -

    To create virtual RC and DL on the mParivahan app, you need to follow these steps:

    -
      -
    1. Open the app and tap on "Dashboard" at the bottom of the screen.
    2. -
    3. Select either "RC" or "DL" depending on what you want to create.
    4. -
    5. Enter your RC number or DL number and tap on "Search".
    6. -
    7. You will see your details on the screen. Tap on "Add To Dashboard".
    8. -
    9. You will see a confirmation message that your virtual RC or DL has been created.
    10. -
    11. You can now access your virtual RC or DL from your dashboard anytime.
    12. -
    -

    How to use the mParivahan app for various purposes

    -

    The mParivahan app offers various features and services that you can use for different purposes related to vehicles and driving licenses. Here are some of them:

    -

    How to find details of any vehicle by entering the registration number

    -

    If you want to find out the details of any vehicle by entering its registration number, you can use the DL/RC Search feature of the app. Here is how you can do it:

    -

    How to download mparivahan app on android
    -Download mparivahan app for vehicle registration details
    -Benefits of downloading mparivahan app for drivers
    -Download mparivahan app for virtual RC and DL
    -Download mparivahan app for online test and appointment booking
    -Download mparivahan app for fancy number allocation
    -Download mparivahan app for national permit
    -Download mparivahan app for PUCC and eChallan
    -Download mparivahan app for Vahan Green Sewa
    -Download mparivahan app for vehicle recall portal
    -Download mparivahan app latest version
    -Download mparivahan app apk file
    -Download mparivahan app from Google Play Store
    -Download mparivahan app from official website
    -Download mparivahan app for free
    -Download mparivahan app for iOS devices
    -Download mparivahan app for Windows devices
    -Download mparivahan app for PC or laptop
    -Download mparivahan app offline mode
    -Download mparivahan app online mode
    -How to use mparivahan app after downloading
    -How to update mparivahan app after downloading
    -How to create account on mparivahan app after downloading
    -How to add documents on mparivahan app after downloading
    -How to scan QR code on mparivahan app after downloading
    -How to check vehicle details on mparivahan app after downloading
    -How to verify DL details on mparivahan app after downloading
    -How to apply for driving school license on mparivahan app after downloading
    -How to check license and registration status on mparivahan app after downloading
    -How to access citizen guide on mparivahan app after downloading
    -How to view notifications and advisories on mparivahan app after downloading
    -How to access dashboard and reports on mparivahan app after downloading
    -How to contact helpdesk on mparivahan app after downloading
    -How to give feedback or rating on mparivahan app after downloading
    -How to share or refer mparivahan app after downloading
    -Problems or issues with downloading or using mparivahan app
    -Solutions or fixes for downloading or using mparivahan app
    -Reviews or testimonials for downloading or using mparivahan app
    -Features or advantages of downloading or using mparivahan app
    -Alternatives or competitors of downloading or using mparivahan app

    -
      -
    1. Open the app and tap on "DL/RC Search" at the bottom of the screen.
    2. -
    3. Enter the registration number of the vehicle that you want to check and tap on "Search".
    4. -
    5. You will see the details of the vehicle on the screen, such as owner name, registration date, registering authority, make model, fuel type, vehicle age, vehicle class, insurance validity, fitness validity, etc.
    6. -
    7. You can also tap on the QR code icon at the top right corner of the screen to generate a virtual RC for the vehicle.
    8. -
    -

    How to verify your car registration and insurance validity

    -

    If you want to verify your car registration and insurance validity, you can use the RC feature of the app. Here is how you can do it:

    -
      -
    1. Open the app and tap on "Dashboard" at the bottom of the screen.
    2. -
    3. Select "RC" from the list of options.
    4. -
    5. Enter your RC number and tap on "Search".
    6. -
    7. You will see your car details on the screen, including your registration and insurance validity dates.
    8. -
    9. You can also tap on "Add To Dashboard" to create a virtual RC for your car.
    10. -
    -

    How to report road offences and accidents using the app

    -

    If you want to report any road offence or accident using the app, you can use the Road Offence Reporting or Road Accident Reporting features of the app. Here is how you can do it:

    -
      -
    1. Open the app and tap on "Road Offence Reporting" or "Road Accident Reporting" at the bottom of the screen.
    2. -
    3. Enter the details of the offender's vehicle or DL or the vehicles involved in the accident, such as registration number, DL number, location, date and time, etc.
    4. -
    5. You can also upload photos or videos as evidence by tapping on the camera icon.
    6. -
    7. Tap on "Submit" to send your report to the concerned authority.
    8. -
    9. You will see a confirmation message that your report has been submitted successfully.
    10. -
    -

    Alternatives to the mParivahan app

    -

    The mParivahan app is not the only app that offers transport-related services. There are some other apps that provide similar features and functions. Here are some of them:

    -

    DigiLocker

    -

    DigiLocker is another initiative of the Government of India that allows you to store and access your documents digitally. You can link your Aadhaar card with DigiLocker and get access to various documents issued by government agencies, such as driving license, vehicle registration certificate, PAN card, etc. You can also upload your own documents and share them with others securely. DigiLocker is similar to mParivahan in terms of storing and accessing your DL and RC digitally, but it also offers other services such as e-signature, document verification, etc.

    -

    RTO Vehicle Information

    -

    RTO Vehicle Information is an app that allows you to find out the details of any vehicle by entering its registration number. You can get information such as owner name, address, age, engine number, chassis number, etc. You can also check the status of your driving license, vehicle insurance, pollution certificate, etc. You can also find the nearest RTO office and contact details. RTO Vehicle Information is similar to mParivahan in terms of finding vehicle and driver details, but it does not offer other features such as virtual RC and DL, road offence reporting, etc.

    -

    DriveSmart

    -

    DriveSmart is an app that helps you to improve your driving skills and habits. It tracks your driving behavior and gives you feedback and tips on how to drive better. It also rewards you with points and badges for safe and efficient driving. You can also compare your driving score with other drivers and get discounts on car insurance. DriveSmart is different from mParivahan in terms of its focus on driving improvement, but it also offers some features such as vehicle details, emergency services, etc.

    -

    Conclusion

    -

    The mParivahan app is a useful and convenient app for vehicle owners and drivers in India. It allows you to store and access your driving license and vehicle registration certificate digitally, as well as verify the details of any vehicle or driver. It also enables you to report road offences and accidents, and access emergency services. The app is secure, safe, and easy to use.

    -

    If you are looking for an app that provides transport-related services, you should download the mParivahan app from the Google Play Store or the App Store. It will make your life easier and help you to contribute to the improvement of the transport sector and road safety in India.

    -

    FAQs

    -

    Here are some frequently asked questions related to the mParivahan app:

    -

    Is the mParivahan app free?

    -

    Yes, the mParivahan app is free to download and use. You do not need to pay any charges or fees to use the app.

    -

    Is the mParivahan app reliable?

    -

    Yes, the mParivahan app is reliable and authentic. It is an initiative of the Government of India and the data is sourced from the central database of the Ministry of Road Transport and Highways (MoRTH) and the National Informatics Centre (NIC). The app also verifies the details of any vehicle or driver by checking them against the database.

    -

    Is the mParivahan app mandatory?

    -

    No, the mParivahan app is not mandatory. You can still use your physical documents such as driving license and vehicle registration certificate as valid proofs. However, using the mParivahan app can save you from the hassle of carrying physical documents and showing them to authorities when required.

    -

    How can I update my details on the mParivahan app?

    -

    If you want to update your details on the mParivahan app, such as your address, phone number, email id, etc., you need to visit your nearest RTO office and submit the required documents. Once your details are updated in the central database, they will reflect on your virtual RC and DL on the app.

    -

    How can I delete my virtual RC or DL from the mParivahan app?

    -

    If you want to delete your virtual RC or DL from the mParivahan app, you need to follow these steps:

    -
      -
    1. Open the app and tap on "Dashboard" at the bottom of the screen.
    2. -
    3. Select either "RC" or "DL" depending on what you want to delete.
    4. -
    5. Tap on the three dots icon at the top right corner of the screen.
    6. -
    7. Select "Delete" from the menu.
    8. -
    9. You will see a confirmation message that your virtual RC or DL has been deleted from the app.
    10. -

    197e85843d
    -
    -
    \ No newline at end of file diff --git a/spaces/congsaPfin/Manga-OCR/logs/Infinite Flight Simulator APK Hile The Ultimate Flight Simulation Experience for Android Devices.md b/spaces/congsaPfin/Manga-OCR/logs/Infinite Flight Simulator APK Hile The Ultimate Flight Simulation Experience for Android Devices.md deleted file mode 100644 index 817546a0b876e694d37cd00b08516282b4cf636a..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/Infinite Flight Simulator APK Hile The Ultimate Flight Simulation Experience for Android Devices.md +++ /dev/null @@ -1,52 +0,0 @@ -
    -

    Infinite Flight Simulator APK Hile: How to Unlock All Planes and Features

    -

    Introduction

    -

    Do you love flying planes and exploring the world from the sky? Do you want to experience the most realistic and immersive flight simulation on your mobile device? If you answered yes, then you should try Infinite Flight Simulator, one of the best flight simulator games for Android and iOS. But wait, there's more! You can also use Infinite Flight Simulator APK Hile, a modified version of the app that lets you unlock all planes and features for free. Sounds amazing, right? In this article, we will show you what Infinite Flight Simulator APK Hile is, how to download and install it, and how to use it to enjoy the ultimate flight simulation experience. Let's get started!

    -

    What is Infinite Flight Simulator?

    -

    What is Infinite Flight Simulator?

    -

    Infinite Flight Simulator is a flight simulator game that allows you to fly various aircraft models across different regions and airports around the world. You can choose from a wide range of planes, from single-engine propellers to jets, from airliners to military aircraft. You can also customize your flight plan, weather conditions, time of day, and more. You can even join online multiplayer sessions and interact with other pilots and air traffic controllers. Infinite Flight Simulator is designed to provide you with a realistic and fun flight simulation experience on your mobile device.

    -

    infinite flight simulator apk hile


    Downloadhttps://urlca.com/2uOes8



    -

    What is APK Hile?

    -

    APK Hile is a Turkish term that means "APK cheat" or "APK hack". It refers to a modified version of an app that has been altered to provide some advantages or benefits to the user. For example, an APK Hile may unlock premium features, remove ads, increase coins or gems, or enable cheats or hacks. APK Hile is usually created by third-party developers or hackers who modify the original app's code and distribute it online for free.

    -

    Why use Infinite Flight Simulator APK Hile?

    -

    Infinite Flight Simulator APK Hile is a modified version of Infinite Flight Simulator that unlocks all planes and features for free. This means that you can fly any plane you want, from any airport you want, without paying anything. You can also access all the pro features, such as global flight, live weather, live air traffic control, live cockpit instruments, replay mode, and more. You can also customize your flight experience with various settings and options. Infinite Flight Simulator APK Hile gives you the ultimate freedom and flexibility to enjoy flight simulation on your mobile device.

    -

    [Infinite Flight APK for Android Download - APKPure.com](^1^): This is a website that offers the download of Infinite Flight, a flight simulator game for Android devices. It features dozens of aircraft, multiple regions, realistic flight physics, and more[^1^].
    -infinite flight simulator apk mod
    -infinite flight simulator apk full
    -infinite flight simulator apk latest version
    -infinite flight simulator apk free download
    -infinite flight simulator apk unlocked

    -

    How to Download and Install Infinite Flight Simulator APK Hile

    -

    Step 1: Enable Unknown Sources

    -

    Since Infinite Flight Simulator APK Hile is not available on the official app stores, you need to enable unknown sources on your device before installing it. This will allow you to install apps from sources other than the app stores. To do this, go to your device's settings, then security or privacy, then toggle on unknown sources or allow installation from unknown sources.

    -

    Step 2: Download the APK File

    -

    Next, you need to download the APK file of Infinite Flight Simulator APK Hile from a reliable source online. You can search for it on Google or use the link below. Make sure that you download the latest version of the app and that it is compatible with your device.

    -

    Step 3: Install the APK File

    -

    Once you have downloaded the APK file, locate it on your device's file manager or downloads folder. Tap on the file and follow the instructions to install the app on your device. It may take a few minutes to complete the installation process.

    -

    Step 4: Launch the App and Enjoy

    -

    Finally, you can launch the app from your device's app drawer or home screen. You will see that all planes and features are unlocked and available for you to use. You can start flying any plane you want, from any airport you want, and enjoy the realistic and immersive flight simulation experience.

    -

    How to Use Infinite Flight Simulator APK Hile

    -

    How to Unlock All Planes

    -

    One of the main benefits of using Infinite Flight Simulator APK Hile is that you can unlock all planes for free. This means that you can fly any plane you want, from single-engine propellers to jets, from airliners to military aircraft. You can also choose from different liveries and paint schemes for each plane. To unlock all planes, simply go to the main menu of the app, then tap on the planes icon. You will see a list of all planes available in the app, and you can select any plane you want to fly. You can also tap on the information icon next to each plane to see its specifications and features.

    -

    How to Access Pro Features

    -

    Another benefit of using Infinite Flight Simulator APK Hile is that you can access all pro features for free. This means that you can enjoy the global flight mode, which allows you to fly anywhere in the world with high-resolution satellite imagery and accurate topography. You can also access the live weather mode, which updates the weather conditions in real time based on your location and time of day. You can also access the live air traffic control mode, which lets you communicate with other pilots and controllers in online multiplayer sessions. You can also access the live cockpit instruments mode, which displays realistic and functional instruments in your cockpit. You can also access the replay mode, which lets you review your flight and share it with others. To access these pro features, simply go to the main menu of the app, then tap on the settings icon. You will see a list of all pro features available in the app, and you can toggle them on or off as you wish.

    -

    How to Customize Your Flight Experience

    -

    Infinite Flight Simulator APK Hile also lets you customize your flight experience with various settings and options. You can choose your flight plan, weather conditions, time of day, fuel load, weight and balance, and more. You can also adjust your camera angle, sound volume, graphics quality, and controls sensitivity. You can also enable or disable various assists and alerts, such as autopilot, flight director, landing aid, stall warning, etc. To customize your flight experience, simply go to the main menu of the app, then tap on the settings icon. You will see a list of all settings and options available in the app, and you can change them according to your preference.

    -

    Conclusion

    -

    Summary of the Main Points

    -

    Infinite Flight Simulator APK Hile is a modified version of Infinite Flight Simulator that unlocks all planes and features for free. It lets you fly any plane you want, from any airport you want, without paying anything. It also lets you access all pro features, such as global flight, live weather, live air traffic control, live cockpit instruments, replay mode, and more. It also lets you customize your flight experience with various settings and options. Infinite Flight Simulator APK Hile gives you the ultimate freedom and flexibility to enjoy flight simulation on your mobile device.

    -

    Call to Action

    -

    If you are a fan of flight simulation games and want to try Infinite Flight Simulator APK Hile, then don't wait any longer. Download it now from the link below and start flying any plane you want, from any airport you want. Experience the most realistic and immersive flight simulation on your mobile device with Infinite Flight Simulator APK Hile.

    -

    Frequently Asked Questions

    -

    Q: Is Infinite Flight Simulator APK Hile safe to use?

    -

    A: Infinite Flight Simulator APK Hile is generally safe to use as long as you download it from a reliable source online. However, since it is a modified version of an app that is not authorized by the official developers or publishers, it may have some risks or issues such as malware, viruses, bugs, errors, crashes, etc. Therefore, we recommend that you use it at your own risk and discretion.

    -

    Q: Is Infinite Flight Simulator APK Hile legal to use?

    -

    A: Infinite Flight Simulator APK Hile is not legal to use as it violates the terms and conditions of the original app. It also infringes on the intellectual property rights of the official developers or publishers. Therefore, we do not endorse or promote the use of Infinite Flight Simulator APK Hile and we advise you to respect the rights and efforts of the original app creators.

    -

    Q: Does Infinite Flight Simulator APK Hile require root access or jailbreak?

    -

    A: No, Infinite Flight Simulator APK Hile does not require root access or jailbreak to work on your device. You can install and use it without any modifications to your device's system.

    -

    Q: Can I use Infinite Flight Simulator APK Hile offline?

    -

    A: Yes, you can use Infinite Flight Simulator APK Hile offline without any internet connection. However, some features such as global flight, live weather, live air traffic control, and multiplayer may not work properly or at all without an internet connection.

    -

    Q: Can I update Infinite Flight Simulator APK Hile to the latest version?

    -

    A: No, you cannot update Infinite Flight Simulator APK Hile to the latest version as it may cause the app to stop working or lose its modifications. If you want to update the app, you need to uninstall the APK Hile version and install the official version from the app stores. However, you will lose all the unlocked planes and features that you had with the APK Hile version.

    401be4b1e0
    -
    -
    \ No newline at end of file diff --git a/spaces/congsaPfin/Manga-OCR/logs/Link Download College Brawl APK A Game That Combines Violence and Romance.md b/spaces/congsaPfin/Manga-OCR/logs/Link Download College Brawl APK A Game That Combines Violence and Romance.md deleted file mode 100644 index 1ffd0539a52b2833af0d4fcee6362b4e7877d802..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/Link Download College Brawl APK A Game That Combines Violence and Romance.md +++ /dev/null @@ -1,109 +0,0 @@ - -

    Link Download College Brawl APK: A Beat'em Up Game for Adults

    -

    If you are looking for a fun and exciting game that will keep you entertained for hours, then you should try College Brawl APK. This is a beat'em up game for adults where you have to fight your way through a college campus full of girls who want to stop you from getting what you want. In this article, we will tell you what College Brawl APK is, how to download and install it on your Android device, and how to play it on your PC.

    -

    What is College Brawl APK?

    -

    College Brawl APK is a 2-dimensional game where you are a boy who bullies everybody. You have to go through different levels and face various challenges, such as fighting against gangs of girls, managing your health, and having hot moments with them. You can also customize your character, unlock new outfits and weapons, and collect coins and gems along the way.

    -

    link download college brawl apk


    Download File ··· https://urlca.com/2uO76G



    -

    The gameplay of College Brawl APK

    -

    The gameplay of College Brawl APK is simple and intuitive. You can control your character using the virtual joystick and buttons on the screen. You can move around, jump, punch, kick, and use special attacks. You can also interact with objects and people in the environment, such as picking up items, talking to NPCs, and entering buildings. You have to complete the objectives of each level, such as reaching a certain point, defeating a boss, or collecting a certain number of items. You have to be careful not to lose all your health or run out of time, or else you will fail the level.

    -

    The features of College Brawl APK

    -

    College Brawl APK has many features that make it an enjoyable and addictive game. Some of these features are:

    -
      -
    • High-quality graphics and sound effects that create a realistic and immersive atmosphere.
    • -
    • A variety of levels and scenarios that offer different challenges and surprises.
    • -
    • A large number of enemies and bosses that have different skills and behaviors.
    • -
    • A wide range of outfits and weapons that you can unlock and use to customize your character.
    • -
    • A coin and gem system that allows you to buy items and upgrades in the shop.
    • -
    • A rating system that evaluates your performance in each level based on your score, time, health, and coins collected.
    • -
    • An online leaderboard that lets you compare your scores with other players around the world.
    • -
    -

    How to download and install College Brawl APK on your Android device?

    -

    If you want to play College Brawl APK on your Android device, you need to download and install it first. Here are the requirements and steps for doing so:

    -

    The requirements for College Brawl APK

    -

    To download and install College Brawl APK on your Android device, you need to meet the following requirements:

    -
      -
    • Your device must have Android 4.4 or higher.
    • -
    • Your device must have at least 100 MB of free storage space.
    • -
    • Your device must have a stable internet connection.
    • -
    • You must enable the installation of apps from unknown sources in your device settings.
    • -
    -

    The steps to download and install College Brawl APK

    -

    To download and install College Brawl APK on your Android device, you need to follow these steps:

    -
      -
    1. Go to [this link](^1^) or [this link](^2^) or [this link](^3^) or [this link](^4^) or [this link](^5^) using your device browser.
    2. -
    3. Tap on the download button and wait for the file to be downloaded.
    4. Locate the downloaded file in your device file manager and tap on it to open it. -
    5. Tap on the install button and wait for the installation to be completed.
    6. -
    7. Tap on the open button or find the app icon on your device home screen and tap on it to launch the game.
    8. -
    9. Enjoy playing College Brawl APK on your Android device.
    10. -
    -

    How to play College Brawl APK on your PC?

    -

    If you want to play College Brawl APK on your PC, you need to use an Android emulator. An Android emulator is a software that allows you to run Android apps and games on your PC. Here are the benefits and methods of playing College Brawl APK on your PC:

    -

    The benefits of playing College Brawl APK on your PC

    -

    Playing College Brawl APK on your PC has some advantages over playing it on your Android device. Some of these advantages are:

    -

    College Brawl APK free download for Android
    -How to install College Brawl APK on PC
    -College Brawl Mod APK full game + no sensor
    -College Brawl beat 'em up game for adults
    -College Brawl 2 APK latest version
    -College Brawl APK gratis para Android
    -College Brawl APKCombo download link
    -College Brawl game review and tips
    -College Brawl APK file size and requirements
    -College Brawl cheats and hacks
    -College Brawl APK offline mode
    -College Brawl gameplay and walkthrough
    -College Brawl APK update 2023
    -College Brawl APK alternative apps
    -College Brawl APK Malavida download link
    -College Brawl APK unlimited money and health
    -College Brawl APK English version
    -College Brawl APK for Windows 10
    -College Brawl APK safe and secure download
    -College Brawl APK mod menu and features
    -College Brawl APK cracked version
    -College Brawl APK for iOS devices
    -College Brawl APK Samudranesia download link
    -College Brawl APK best settings and graphics
    -College Brawl APK CCM download link
    -College Brawl APK free gems and coins
    -College Brawl APK no ads and in-app purchases
    -College Brawl APK for Mac OS X
    -College Brawl APK latest news and updates
    -College Brawl APK modded by GAMEEEE

    -
      -
    • You can enjoy a bigger and better screen resolution that enhances the graphics and details of the game.
    • -
    • You can use a keyboard and mouse or a gamepad to control your character more easily and accurately.
    • -
    • You can save your device battery and storage space by playing the game on your PC.
    • -
    • You can avoid any interruptions or distractions from phone calls, messages, or notifications while playing the game.
    • -
    -

    The methods to play College Brawl APK on your PC

    -

    To play College Brawl APK on your PC, you need to use one of the following methods:

    -
      -
    1. Download and install an Android emulator such as [BlueStacks], [NoxPlayer], or [LDPlayer] on your PC.
    2. -
    3. Launch the emulator and sign in with your Google account.
    4. -
    5. Download and install College Brawl APK from one of the links mentioned above using the emulator browser.
    6. -
    7. Locate the installed app in the emulator app drawer and click on it to launch the game.
    8. -
    9. Enjoy playing College Brawl APK on your PC.
    10. -
    -

    Conclusion

    -

    College Brawl APK is a beat'em up game for adults that will keep you entertained for hours. You can download and install it on your Android device or play it on your PC using an Android emulator. You can also enjoy its features, such as high-quality graphics, varied levels, diverse enemies, customizable outfits and weapons, coin and gem system, rating system, and online leaderboard. If you are looking for a fun and exciting game that will challenge you and make you laugh, then you should try College Brawl APK today.

    -

    FAQs

    -

    Here are some frequently asked questions about College Brawl APK:

    -

    Is College Brawl APK safe to download and install?

    -

    Yes, College Brawl APK is safe to download and install. It does not contain any viruses, malware, or spyware that can harm your device or data. However, you should always download it from a trusted source and enable the installation of apps from unknown sources in your device settings.

    -

    Is College Brawl APK free to play?

    -

    Yes, College Brawl APK is free to play. You do not need to pay any money to download, install, or play it. However, you can buy items and upgrades in the shop using coins and gems that you can earn in the game or purchase with real money.

    -

    How can I update College Brawl APK?

    -

    To update College Brawl APK, you need to download and install the latest version from one of the links mentioned above. You do not need to uninstall the previous version before installing the new one. Your progress and data will be saved automatically.

    -

    How can I contact the developer of College Brawl APK?

    -

    To contact the developer of College Brawl APK, you can visit their [official website] or their [Facebook page]. You can also send them an email at [email protected] You can also leave a comment or a review on their app page in Google Play Store or App Store.

    -

    What are some similar games to College Brawl APK?

    -

    If you like College Brawl APK, you might also like some similar games, such as:

    -
      -
    • [Anger of Stick 5: Zombie]: A stickman action game where you have to fight against zombies and other enemies using various weapons and skills.
    • -
    • [Beat Street]: A retro-style beat'em up game where you have to fight against gangs of thugs using punches, kicks, combos and special moves.
    • -
    • [Dan the Man: Action Platformer]: A platformer game where you have to run, jump, and fight your way through various levels using weapons, power-ups, and costumes.
    • -
    -

    I hope you enjoyed this article and found it helpful. If you have any questions or feedback, please feel free to leave a comment below. Thank you for reading and have a great day!

    197e85843d
    -
    -
    \ No newline at end of file diff --git a/spaces/congsaPfin/Manga-OCR/logs/Roblox APK - Explore Millions of Immersive Experiences with Friends 2023.md b/spaces/congsaPfin/Manga-OCR/logs/Roblox APK - Explore Millions of Immersive Experiences with Friends 2023.md deleted file mode 100644 index 0f6d6faa7711240eb0b9d80cb3fcd88d49082b4b..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/Roblox APK - Explore Millions of Immersive Experiences with Friends 2023.md +++ /dev/null @@ -1,113 +0,0 @@ -
    -

    Roblox APK Latest Version 2023: Everything You Need to Know

    -

    Roblox is one of the most popular online gaming platforms in the world, with over 500 million users and millions of games to choose from. Whether you want to create your own adventures, compete with others, or just have fun with your friends, Roblox has something for everyone. But what if you want to enjoy Roblox on your Android device without any limitations or restrictions? That's where Roblox APK comes in.

    -

    What is Roblox APK?

    -

    Roblox APK is a modified version of the official Roblox app that allows you to download and install it on your Android device without using Google Play Store. This means that you can access all the features and content of Roblox without having to pay for anything or watch ads. You can also get the latest updates and events before anyone else, as well as improve your gaming performance and security.

    -

    roblox apk latest version 2023


    Download ✺✺✺ https://urlca.com/2uOe6s



    -

    How to download and install Roblox APK on Android devices

    -

    Downloading and installing Roblox APK on your Android device is very easy and simple. Just follow these steps:

    -
      -
    1. Go to a trusted website that offers Roblox APK download links, such as [APKCombo](^1^), [TechSpot](^2^), or [Roblox.com](^9^).
    2. -
    3. Select the latest version of Roblox APK (2.578.564) and tap on the download button.
    4. -
    5. Once the download is complete, locate the file in your device's storage and tap on it.
    6. -
    7. If prompted, enable the installation from unknown sources in your device's settings.
    8. -
    9. Follow the instructions on the screen and wait for the installation to finish.
    10. -
    11. Launch the app and log in with your existing Roblox account or create a new one.
    12. -
    13. Enjoy playing Roblox on your Android device!
    14. -
    -

    What are the features of Roblox APK latest version 2023?

    -

    Roblox APK latest version 2023 has many amazing features that make it stand out from the official app. Here are some of them:

    -

    Create and share your own experiences

    -

    With Roblox APK, you can unleash your creativity and make your own games using the powerful tools and resources available. You can also share your experiences with other players and join a global community of creators. Whether you want to make a simple puzzle game, a complex RPG, or anything in between, you can do it with Roblox APK.

    -

    Explore millions of immersive worlds

    -

    Roblox APK gives you access to millions of games created by other users around the world. You can explore different genres, themes, and styles, from action and adventure to simulation and role-playing. You can also join various events and challenges, such as seasonal festivals, treasure hunts, and more. There's always something new and exciting to discover on Roblox APK.

    -

    Customize your avatar and chat with friends

    -

    Roblox APK lets you express yourself and show off your personality by customizing your avatar with thousands of items, such as hats, shirts, faces, gear, and more. You can also chat with your friends using voice or text messages, as well as join groups and clubs with people who share your interests. You can also play games with your friends and have fun together on Roblox APK.

    -

    Play across different platforms and devices

    -

    Roblox APK is compatible with various platforms and devices, such as Windows, Mac, iOS, Xbox, and more. You can also switch between devices without losing your progress or data. This means that you can play Roblox anytime and anywhere you want, as long as you have an internet connection. You can also sync your account and settings across all your devices with Roblox APK.

    -

    What are the benefits of using Roblox APK?

    -

    Using Roblox APK has many benefits that make it worth trying. Here are some of them:

    -

    roblox apk download for android latest version 2023
    -roblox apk mod menu latest version 2023
    -roblox apk hack unlimited robux latest version 2023
    -roblox apk update new features latest version 2023
    -roblox apk free install latest version 2023
    -roblox apk offline mode latest version 2023
    -roblox apk no ads latest version 2023
    -roblox apk premium unlocked latest version 2023
    -roblox apk full game latest version 2023
    -roblox apk beta tester latest version 2023
    -roblox apk old versions download latest version 2023
    -roblox apk for pc windows latest version 2023
    -roblox apk for ios iphone latest version 2023
    -roblox apk for fire tablet latest version 2023
    -roblox apk for chromebook latest version 2023
    -roblox apk for smart tv latest version 2023
    -roblox apk for android tv box latest version 2023
    -roblox apk for samsung galaxy latest version 2023
    -roblox apk for huawei phone latest version 2023
    -roblox apk for xiaomi device latest version 2023
    -roblox apk for oppo smartphone latest version 2023
    -roblox apk for vivo handset latest version 2023
    -roblox apk for lg mobile latest version 2023
    -roblox apk for nokia phone latest version 2023
    -roblox apk for motorola device latest version 2023
    -roblox apk for sony xperia latest version 2023
    -roblox apk for oneplus phone latest version 2023
    -roblox apk for google pixel latest version 2023
    -roblox apk for asus zenfone latest version 2023
    -roblox apk for lenovo tablet latest version 2023
    -roblox apk for acer chromebook latest version 2023
    -roblox apk for hp laptop latest version 2023
    -roblox apk for dell computer latest version 2023
    -roblox apk for macbook pro latest version 2023
    -roblox apk for ipad air latest version 2023
    -roblox apk for amazon kindle fire latest version 2023
    -roblox apk for roku streaming stick latest version 2023
    -roblox apk for nvidia shield tv latest version 2023
    -roblox apk for xbox one console latest version 2023
    -roblox apk for playstation ps4 ps5 latest version 2023
    -roblox apk for nintendo switch lite latest version 2023
    -roblox apk reviews and ratings latest version 2023
    -roblox apk tips and tricks latest version 2023
    -roblox apk cheats and hacks latest version 2023
    -roblox apk guides and tutorials latest version 2023
    -roblox apk news and updates latest version 2023
    -roblox apk events and promotions latest version 2023
    -roblox apk codes and coupons latest version 2023
    -roblox apk games and experiences latest version 2023

    -

    Enjoy unlimited access to all games and items

    -

    With Roblox APK, you don't have to worry about paying for anything or watching ads. You can access all the games and items on Roblox for free, without any limitations or restrictions. You can also get unlimited Robux, the virtual currency of Roblox, which you can use to buy more items, upgrade your avatar, or unlock premium features. You can also get free membership to the Builders Club, which gives you more privileges and perks on Roblox.

    -

    Get exclusive updates and events

    -

    With Roblox APK, you can get the latest updates and events before anyone else. You can enjoy new features, improvements, bug fixes, and more as soon as they are released. You can also join exclusive events and challenges that are only available for Roblox APK users. You can also get special rewards and prizes for participating in these events.

    -

    Avoid ads and in-app purchases

    -

    With Roblox APK, you don't have to deal with annoying ads and in-app purchases that interrupt your gaming experience. You can play without any distractions or interruptions, and focus on having fun. You also don't have to spend any real money on Roblox, as you can get everything you need for free with Roblox APK.

    -

    Enhance your gaming performance and security

    -

    With Roblox APK, you can improve your gaming performance and security on your Android device. You can optimize your device's settings and resources to run Roblox smoothly and fast. You can also protect your device from malware and viruses that may harm your data or privacy. You can also encrypt your connection and hide your IP address to avoid hackers and trackers on Roblox.

    -

    What are the drawbacks of using Roblox APK?

    -

    Using Roblox APK also has some drawbacks that you should be aware of. Here are some of them:

    -

    Risk of malware and viruses

    -

    Using Roblox APK may expose your device to malware and viruses that may damage your device or steal your information. Not all websites that offer Roblox APK download links are safe and reliable. Some of them may contain malicious files or links that may infect your device or redirect you to harmful sites. Therefore, you should always be careful when downloading and installing Roblox APK from unknown sources.

    -

    Violation of terms of service and privacy policy

    -

    Using Roblox APK may violate the terms of service and privacy policy of Roblox Corporation, the developer of the official app. By using a modified version of the app, you may be breaking the rules and regulations that govern the use of Roblox. This may result in legal consequences or penalties from the developer or other authorities.

    -

    Potential account suspension or ban

    -

    Using Roblox APK may also put your account at risk of suspension or ban from Roblox. The developer may detect that you are using an unauthorized version of the app, and may take action against you. This may include suspending or banning your account, deleting your data or progress, or blocking your access to Roblox. Therefore, you should always use a backup account or a VPN when using Roblox APK.

    -

    Loss of data and progress

    -

    Using Roblox APK may also cause you to lose your data or progress on Roblox. The modified version of the app may not be compatible with the official version, and may cause errors or glitches that may affect your gameplay. You may also lose your data or progress if your account is suspended or banned from Roblox. Therefore, you should always backup your data or progress before using Roblox APK.

    -

    Conclusion

    -

    Roblox APK is a great way to enjoy Roblox on your Android device without any limitations or restrictions. You can access all the features and content of Roblox for free, as well as get exclusive updates and events. You can also improve your gaming performance and security with Roblox APK.

    -

    However, using Roblox APK also has some drawbacks that you should consider before using it. You may risk getting malware or viruses, violating the terms of service and privacy policy, losing your account or data, or facing legal issues. Therefore, you should always be careful and responsible when using Roblox APK.

    -

    If you want to learn more about Roblox APK latest version 2023, you can check out the following FAQs:

    -

    FAQs

    -

    Q: Is Roblox APK safe to use?

    -

    A: Roblox APK is not officially endorsed or supported by Roblox Corporation, and may contain malware or viruses that may harm your device or data. Therefore, you should always download and install Roblox APK from trusted and reliable sources, and scan it with an antivirus software before using it. You should also use a backup account or a VPN to protect your identity and privacy on Roblox.

    -

    Q: Is Roblox APK legal to use?

    -

    A: Roblox APK may violate the terms of service and privacy policy of Roblox Corporation, as well as the intellectual property rights of the developer and other parties. Therefore, using Roblox APK may result in legal consequences or penalties from the developer or other authorities. You should always respect the rights and rules of the official app, and use Roblox APK at your own risk and discretion.

    -

    Q: How do I update Roblox APK?

    -

    A: Roblox APK may not update automatically like the official app, and may require you to download and install the latest version manually. You can check for updates on the website where you downloaded Roblox APK, or on other websites that offer Roblox APK download links. You should always backup your data or progress before updating Roblox APK, as you may lose them during the process.

    -

    Q: How do I uninstall Roblox APK?

    -

    A: You can uninstall Roblox APK like any other app on your Android device. Just go to your device's settings, find the app manager, select Roblox APK, and tap on the uninstall button. You can also delete the downloaded file from your device's storage. You should also clear your cache and cookies from your browser to remove any traces of Roblox APK.

    -

    Q: Where can I find more information about Roblox APK?

    -

    A: You can find more information about Roblox APK on various websites, blogs, forums, videos, or social media platforms that discuss or review it. You can also ask other users who have used or are using Roblox APK for their opinions or feedback. However, you should always be careful and critical when reading or watching online content about Roblox APK, as some of them may be biased or inaccurate.

    401be4b1e0
    -
    -
    \ No newline at end of file diff --git a/spaces/congsaPfin/Manga-OCR/logs/Woza! by Jay Music The Amapiano Song You Need to Hear (MP3 Download).md b/spaces/congsaPfin/Manga-OCR/logs/Woza! by Jay Music The Amapiano Song You Need to Hear (MP3 Download).md deleted file mode 100644 index 3768bc63cf583c1bc38ff7e59b25fb5f336c5f1a..0000000000000000000000000000000000000000 --- a/spaces/congsaPfin/Manga-OCR/logs/Woza! by Jay Music The Amapiano Song You Need to Hear (MP3 Download).md +++ /dev/null @@ -1,99 +0,0 @@ -
    -

    Download Woza Jay Music MP3: How to Enjoy the Latest Amapiano Hit

    -

    If you are a fan of Amapiano music, you have probably heard of the song Woza! by Jay Music. This catchy and upbeat tune has been making waves in the South African music scene, and many people are looking for ways to download it and listen to it offline. In this article, we will tell you everything you need to know about Woza Jay Music MP3, why you should download it, and how to do it safely and legally.

    -

    What is Woza Jay Music MP3?

    -

    Woza Jay Music MP3 is the digital audio format of the song Woza! by Jay Music, a well-known South African DJ and producer. The song was released in September 2022, and it is one of the most popular Amapiano songs of the year. Amapiano is a genre of music that originated in South Africa, and it combines elements of house, jazz, kwaito, and lounge music. It is characterized by smooth piano melodies, deep bass lines, and percussive rhythms.

    -

    download woza jay music mp3


    DOWNLOAD 🆗 https://urlca.com/2uO5vi



    -

    Who is Jay Music?

    -

    Jay Music is a talented and versatile musician who has been producing and mixing Amapiano songs since 2018. He is also the founder of Jay Music Records, a label that promotes and supports emerging Amapiano artists. Some of his previous hits include Resist, Move On, Lockdown House Party Mix, and Thank You Mr DJ. He has collaborated with other famous Amapiano artists such as Kabza De Small, DJ Maphorisa, Vigro Deep, and MFR Souls.

    -

    What is the song Woza! about?

    -

    The song Woza! is a celebration of life, love, and music. The title means "come" or "come on" in Zulu, and it is a common expression used to invite someone to join in the fun. The lyrics are simple but catchy, and they encourage the listener to dance, sing, and enjoy the moment. The song also features some vocal samples from other popular songs, such as "Woza" by Shota ft. Professor, "Woza La" by DJ Tira ft. Bhekzin Terris & Thakzin, and "Woza Nana" by Heavy K ft. Nokwazi.

    -

    download woza jay music mp3 free
    -download woza jay music mp3 online
    -download woza jay music mp3 2023
    -download woza jay music mp3 bamoza
    -download woza jay music mp3 fakaza
    -download woza jay music mp3 amapiano
    -download woza jay music mp3 song
    -download woza jay music mp3 album
    -download woza jay music mp3 video
    -download woza jay music mp3 lyrics
    -download woza jay music mp3 remix
    -download woza jay music mp3 instrumental
    -download woza jay music mp3 zip
    -download woza jay music mp3 320kbps
    -download woza jay music mp3 hiphopza
    -download woza jay music mp3 audiomack
    -download woza jay music mp3 soundcloud
    -download woza jay music mp3 waploaded
    -download woza jay music mp3 zamusic
    -download woza jay music mp3 sahiphopmag
    -download woza jay music mp3 hitvibes
    -download woza jay music mp3 naijavibes
    -download woza jay music mp3 tooxclusive
    -download woza jay music mp3 justnaija
    -download woza jay music mp3 afrobeat9ja
    -download woza jay music mp3 afrohouseking
    -download woza jay music mp3 makhits
    -download woza jay music mp3 flexyjam
    -download woza jay music mp3 capejams
    -download woza jay music mp3 ilovezedmusic
    -download woza jay music mp3 zedwap
    -download woza jay music mp3 zambianplay
    -download woza jay music mp3 zambianmusicblog
    -download woza jay music mp3 zednob
    -download woza jay music mp3 zedpushup
    -download woza jay music mp3 zedgossip
    -download woza jay music mp3 zedjams
    -download woza jay music mp3 zedloudmusic
    -download woza jay music mp3 zedtunesmusic

    -

    Why should you download Woza Jay Music MP3?

    -

    There are many reasons why you should download Woza Jay Music MP3 and add it to your playlist. Here are some of them:

    -

    The benefits of downloading MP3 files

    -

    MP3 files are compressed audio files that can be easily downloaded and stored on your device. They have several advantages over other audio formats, such as:

    -
      -
    • They take up less space on your device's memory.
    • -
    • They can be played on any media player or device that supports MP3.
    • -
    • They can be transferred or shared with others without losing quality.
    • -
    • They can be edited or customized according to your preferences.
    • -
    -

    The popularity and appeal of Amapiano music

    -

    Amapiano music is one of the most popular genres of music in South Africa and beyond. It has a unique sound that appeals to people of different ages

    Amapiano music is one of the most popular genres of music in South Africa and beyond. It has a unique sound that appeals to people of different ages, backgrounds, and tastes. It is also a versatile genre that can be enjoyed in various settings, such as clubs, parties, weddings, or even at home. Amapiano music is a reflection of the South African culture and spirit, and it expresses joy, resilience, and creativity.

    -

    The positive reviews and feedback for Woza!

    -

    Woza! by Jay Music has received a lot of positive reviews and feedback from fans and critics alike. The song has been praised for its catchy melody, uplifting lyrics, and energetic vibe. It has also been nominated for several awards, such as the South African Music Awards (SAMA), the African Muzik Magazine Awards (AFRIMMA), and the MTV Africa Music Awards (MAMA). The song has also been featured on various radio stations, TV shows, and online platforms, such as YouTube, Spotify, Apple Music, and Deezer.

    -

    How to download Woza Jay Music MP3?

    -

    Now that you know why you should download Woza Jay Music MP3, you might be wondering how to do it. There are many sources and platforms that offer MP3 downloads, but not all of them are reliable or legal. Here are some of the best sources and platforms for downloading MP3 files, as well as some steps and tips for downloading them safely and legally.

    -

    The best sources and platforms for downloading MP3 files

    -

    There are many websites and apps that allow you to download MP3 files for free or for a fee. However, not all of them are trustworthy or authorized. Some of them may contain viruses, malware, or spyware that can harm your device or compromise your privacy. Some of them may also violate the artist's rights or the copyright laws. Therefore, you should always use reputable and legal sources and platforms for downloading MP3 files. Here are some of them:

    -

    Bamoza.com

    -

    Bamoza.com is one of the leading websites for downloading Amapiano music and other South African genres. It offers high-quality MP3 files that are updated regularly and categorized by artists, albums, genres, and playlists. It also provides information about the songs, such as the release date, the duration, the lyrics, and the producer. You can download MP3 files from Bamoza.com for free by clicking on the download button below each song.

    -

    Fakaza.com

    -

    Fakaza.com is another popular website for downloading Amapiano music and other South African genres. It also offers high-quality MP3 files that are updated daily and categorized by artists, albums, genres, and playlists. It also provides information about the songs, such as the release date, the duration, the lyrics, and the producer. You can download MP3 files from Fakaza.com for free by clicking on the download button below each song.

    -

    Zamusic.org

    -

    Zamusic.org is a website that specializes in Amapiano music and other South African genres. It also offers high-quality MP3 files that are updated frequently and categorized by artists, albums, genres, and playlists. It also provides information about the songs, such as the release date, the duration, the lyrics, and the producer. You can download MP3 files from Zamusic.org for free by clicking on the download button below each song.

    -

    The steps and tips for downloading MP3 files safely and legally

    -

    Downloading MP3 files from reputable and legal sources and platforms is not enough to ensure your safety and legality. You also need to follow some steps and tips to avoid any problems or issues that may arise from downloading MP3 files. Here are some of them:

    -

    Check the file size and quality

    -

    Before you download any MP3 file, you should always check its size and quality. The size of an MP3 file depends on its bitrate, which is the amount of data that is encoded per second. The higher the bitrate, the larger the file size and the better the quality. However, a higher bitrate also means a longer download time and more storage space required on your device. Therefore, you should choose an MP3 file that has a reasonable size and quality according to your needs and preferences.

    -

    Avoid clicking on ads and pop-ups

    -

    When you visit any website or app that offers MP3 downloads, you may encounter ads and pop-ups that may appear on your screen or redirect you to another page. These ads and pop-ups may be annoying or misleading, but they may also be dangerous or illegal. They may contain viruses,

    They may contain viruses, malware, or spyware that can harm your device or compromise your privacy. They may also violate the artist's rights or the copyright laws. Therefore, you should avoid clicking on ads and pop-ups as much as possible, and use an ad-blocker or a pop-up blocker to prevent them from appearing on your screen or redirecting you to another page.

    -

    Respect the artist's rights and support their work

    -

    Downloading MP3 files for free may seem convenient and cost-effective, but it may also have negative consequences for the artist and the music industry. Downloading MP3 files for free may deprive the artist of their income and recognition, and it may also discourage them from creating more music. Therefore, you should respect the artist's rights and support their work by purchasing their music legally, streaming their music on authorized platforms, or donating to their cause. You should also give credit to the artist when you share their music with others, and avoid distributing their music without their permission.

    -

    Conclusion

    -

    Woza Jay Music MP3 is a great song that you should download and enjoy. It is a catchy and upbeat Amapiano tune that celebrates life, love, and music. It is also a high-quality MP3 file that you can download from reputable and legal sources and platforms, such as Bamoza.com, Fakaza.com, or Zamusic.org. However, you should also follow some steps and tips to download MP3 files safely and legally, such as checking the file size and quality, avoiding clicking on ads and pop-ups, and respecting the artist's rights and supporting their work. By doing so, you can enjoy Woza Jay Music MP3 without any worries or regrets.

    -

    FAQs

    -

    Here are some frequently asked questions about Woza Jay Music MP3:

    -
      -
    • Q: How long is Woza Jay Music MP3?
    • -
    • A: Woza Jay Music MP3 is 6 minutes and 13 seconds long.
    • -
    • Q: How many downloads does Woza Jay Music MP3 have?
    • -
    • A: Woza Jay Music MP3 has over 1 million downloads on various websites and apps.
    • -
    • Q: Is Woza Jay Music MP3 available on YouTube?
    • -
    • A: Yes, Woza Jay Music MP3 is available on YouTube. You can watch the official video here: [text].
    • -
    • Q: Is Woza Jay Music MP3 suitable for children?
    • -
    • A: Yes, Woza Jay Music MP3 is suitable for children. It does not contain any explicit or offensive language or content.
    • -
    • Q: What are some other songs by Jay Music that I can download?
    • -
    • A: Some other songs by Jay Music that you can download are Resist, Move On, Lockdown House Party Mix, Thank You Mr DJ, Amapiano Vol. 1, Amapiano Vol. 2, Amapiano Vol. 3, Amapiano Vol. 4, Amapiano Vol. 5, Amapiano Vol. 6, Amapiano Vol. 7, Amapiano Vol. 8, Amapiano Vol. 9, Amapiano Vol. 10.
    • -

    401be4b1e0
    -
    -
    \ No newline at end of file diff --git a/spaces/contluForse/HuggingGPT/assets/Bobcat T40170 Telescopic Handler Operation and Maintenance Manual Essential Information for Safe and Proper Use.md b/spaces/contluForse/HuggingGPT/assets/Bobcat T40170 Telescopic Handler Operation and Maintenance Manual Essential Information for Safe and Proper Use.md deleted file mode 100644 index 9c7c62faf9eda6a49fc720039affc8bf6225841d..0000000000000000000000000000000000000000 --- a/spaces/contluForse/HuggingGPT/assets/Bobcat T40170 Telescopic Handler Operation and Maintenance Manual Essential Information for Safe and Proper Use.md +++ /dev/null @@ -1,8 +0,0 @@ -
    -

    Bobcat T40140, T40170, T40180 service manual instructions are necessary before operating or servicing machine. Read and understand the Operation & Maintenance Manual, Operator s Handbook and signs (decals) on machine. Follow warnings and instructions in the manuals when making repairs, adjustments or servicing. Check for correct function after adjustments, repairs or service. Untrained operators and failure to follow instructions can cause injury or death.

    -

    These manual is for the Bobcat T40140, T40170, T40180 Telescopic Handler. It provides necessary servicing and adjustment procedures for the Bobcat T40140, T40170, T40180 Telescopic Handler and its component parts and systems. Refer to the Operation & Maintenance Manual and service manual for operating instructions, repair, starting procedure, daily checks, etc.

    -

    Bobcat T40170 Manual


    DOWNLOAD ⚹⚹⚹ https://ssurll.com/2uzx6S



    -

    If you are looking for a specific manual and if you can not find it on our store, contact our customer support team, with details of the required manual, we will do our absolute best to acquire it for you.

    -

    This manual contains original instructions, verified by the manufacturer (or their authorized representative)
    This is Original factory pdf manual and This pdf workshop Manual very clear and it is 100% printable version, contains with high quality images and diagrams,

    aaccfb2cb3
    -
    -
    \ No newline at end of file diff --git a/spaces/cymic/Waifu_Diffusion_Webui/scripts/sd_upscale.py b/spaces/cymic/Waifu_Diffusion_Webui/scripts/sd_upscale.py deleted file mode 100644 index ed57153349a35c46f0084189e7f4eb639a0286f6..0000000000000000000000000000000000000000 --- a/spaces/cymic/Waifu_Diffusion_Webui/scripts/sd_upscale.py +++ /dev/null @@ -1,97 +0,0 @@ -import math - -import modules.scripts as scripts -import gradio as gr -from PIL import Image - -from modules import processing, shared, sd_samplers, images, devices -from modules.processing import Processed -from modules.shared import opts, cmd_opts, state - - -class Script(scripts.Script): - def title(self): - return "SD upscale" - - def show(self, is_img2img): - return is_img2img - - def ui(self, is_img2img): - info = gr.HTML("

    Will upscale the image to twice the dimensions; use width and height sliders to set tile size

    ") - overlap = gr.Slider(minimum=0, maximum=256, step=16, label='Tile overlap', value=64, visible=False) - upscaler_index = gr.Radio(label='Upscaler', choices=[x.name for x in shared.sd_upscalers], value=shared.sd_upscalers[0].name, type="index", visible=False) - - return [info, overlap, upscaler_index] - - def run(self, p, _, overlap, upscaler_index): - processing.fix_seed(p) - upscaler = shared.sd_upscalers[upscaler_index] - - p.extra_generation_params["SD upscale overlap"] = overlap - p.extra_generation_params["SD upscale upscaler"] = upscaler.name - - initial_info = None - seed = p.seed - - init_img = p.init_images[0] - - if(upscaler.name != "None"): - img = upscaler.scaler.upscale(init_img, 2, upscaler.data_path) - else: - img = init_img - - devices.torch_gc() - - grid = images.split_grid(img, tile_w=p.width, tile_h=p.height, overlap=overlap) - - batch_size = p.batch_size - upscale_count = p.n_iter - p.n_iter = 1 - p.do_not_save_grid = True - p.do_not_save_samples = True - - work = [] - - for y, h, row in grid.tiles: - for tiledata in row: - work.append(tiledata[2]) - - batch_count = math.ceil(len(work) / batch_size) - state.job_count = batch_count * upscale_count - - print(f"SD upscaling will process a total of {len(work)} images tiled as {len(grid.tiles[0][2])}x{len(grid.tiles)} per upscale in a total of {state.job_count} batches.") - - result_images = [] - for n in range(upscale_count): - start_seed = seed + n - p.seed = start_seed - - work_results = [] - for i in range(batch_count): - p.batch_size = batch_size - p.init_images = work[i*batch_size:(i+1)*batch_size] - - state.job = f"Batch {i + 1 + n * batch_count} out of {state.job_count}" - processed = processing.process_images(p) - - if initial_info is None: - initial_info = processed.info - - p.seed = processed.seed + 1 - work_results += processed.images - - image_index = 0 - for y, h, row in grid.tiles: - for tiledata in row: - tiledata[2] = work_results[image_index] if image_index < len(work_results) else Image.new("RGB", (p.width, p.height)) - image_index += 1 - - combined_image = images.combine_grid(grid) - result_images.append(combined_image) - - if opts.samples_save: - images.save_image(combined_image, p.outpath_samples, "", start_seed, p.prompt, opts.samples_format, info=initial_info, p=p) - - processed = Processed(p, result_images, seed, initial_info) - - return processed diff --git a/spaces/davidfischer/ea-classifier/app.py b/spaces/davidfischer/ea-classifier/app.py deleted file mode 100644 index 8c18d657e59f5e1a0090e8afeb8f90075f96c6b9..0000000000000000000000000000000000000000 --- a/spaces/davidfischer/ea-classifier/app.py +++ /dev/null @@ -1,30 +0,0 @@ -from pprint import pformat - -import spacy -import gradio as gr -from textacy import preprocessing - -import en_ethicalads_topics - - -ea_nlp = en_ethicalads_topics.load() - -preprocessor = preprocessing.make_pipeline( - preprocessing.normalize.unicode, - preprocessing.remove.punctuation, - preprocessing.normalize.whitespace, -) - - -def classify(input_text): - processed_input = preprocessor(input_text) - ea_output = ea_nlp(processed_input) - return pformat(sorted(ea_output.cats.items(), key=lambda x: x[1], reverse=True)) - - -iface = gr.Interface( - fn=classify, - inputs=gr.Textbox(lines=5, placeholder="Input text to detect the topic classification. Works best on inputs of 100+ words."), - outputs="text", -) -iface.launch() diff --git a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/charset_normalizer/cli/__init__.py b/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/charset_normalizer/cli/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/fsspec/conftest.py b/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/fsspec/conftest.py deleted file mode 100644 index 6874a42c4895c3c7b973dc5d63fd4488a4e60b44..0000000000000000000000000000000000000000 --- a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/fsspec/conftest.py +++ /dev/null @@ -1,55 +0,0 @@ -import os -import shutil -import subprocess -import sys -import time - -import pytest - -import fsspec -from fsspec.implementations.cached import CachingFileSystem - - -@pytest.fixture() -def m(): - """ - Fixture providing a memory filesystem. - """ - m = fsspec.filesystem("memory") - m.store.clear() - m.pseudo_dirs.clear() - m.pseudo_dirs.append("") - try: - yield m - finally: - m.store.clear() - m.pseudo_dirs.clear() - m.pseudo_dirs.append("") - - -@pytest.fixture -def ftp_writable(tmpdir): - """ - Fixture providing a writable FTP filesystem. - """ - pytest.importorskip("pyftpdlib") - from fsspec.implementations.ftp import FTPFileSystem - - FTPFileSystem.clear_instance_cache() # remove lingering connections - CachingFileSystem.clear_instance_cache() - d = str(tmpdir) - with open(os.path.join(d, "out"), "wb") as f: - f.write(b"hello" * 10000) - P = subprocess.Popen( - [sys.executable, "-m", "pyftpdlib", "-d", d, "-u", "user", "-P", "pass", "-w"] - ) - try: - time.sleep(1) - yield "localhost", 2121, "user", "pass" - finally: - P.terminate() - P.wait() - try: - shutil.rmtree(tmpdir) - except Exception: - pass diff --git a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/gradio/templates/frontend/assets/StaticTabs-26fecbee.js b/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/gradio/templates/frontend/assets/StaticTabs-26fecbee.js deleted file mode 100644 index 1462d2b7c0184842f29550dcc972d4a8c1ba263f..0000000000000000000000000000000000000000 --- a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/gradio/templates/frontend/assets/StaticTabs-26fecbee.js +++ /dev/null @@ -1,2 +0,0 @@ -import{S as H,e as J,s as M,I as C,a9 as O,m as T,o as S,g,Y as I,h as v,j as k,J as R,ay as V,ab as P,ac as U,ad as Y,w as y,u as w,k as j,a7 as A,a1 as E,C as z,U as W,P as N,aA as B,t as K,p as X,x as L,N as Z,O as x,F as $,G as ee,T as te,H as le,E as D}from"./index-39fce9e2.js";function F(l,e,s){const t=l.slice();return t[14]=e[s],t[16]=s,t}function se(l){let e,s=l[14].name+"",t,_,o,n;function i(){return l[12](l[14],l[16])}return{c(){e=T("button"),t=K(s),_=S(),g(e,"class","svelte-kqij2n")},m(u,r){v(u,e,r),k(e,t),k(e,_),o||(n=X(e,"click",i),o=!0)},p(u,r){l=u,r&8&&s!==(s=l[14].name+"")&&L(t,s)},d(u){u&&j(e),o=!1,n()}}}function ne(l){let e,s=l[14].name+"",t,_;return{c(){e=T("button"),t=K(s),_=S(),g(e,"class","selected svelte-kqij2n")},m(o,n){v(o,e,n),k(e,t),k(e,_)},p(o,n){n&8&&s!==(s=o[14].name+"")&&L(t,s)},d(o){o&&j(e)}}}function G(l,e){let s,t;function _(i,u){return i[14].id===i[4]?ne:se}let o=_(e),n=o(e);return{key:l,first:null,c(){s=N(),n.c(),t=N(),this.first=s},m(i,u){v(i,s,u),n.m(i,u),v(i,t,u)},p(i,u){e=i,o===(o=_(e))&&n?n.p(e,u):(n.d(1),n=o(e),n&&(n.c(),n.m(t.parentNode,t)))},d(i){i&&(j(s),j(t)),n.d(i)}}}function ie(l){let e,s,t=[],_=new Map,o,n,i,u=C(l[3]);const r=a=>a[14].id;for(let a=0;as(4,_=f));const c=A(0);E(l,c,f=>s(13,t=f));const b=z();W(ae,{register_tab:f=>(d.push({name:f.name,id:f.id}),a.update(h=>h??f.id),s(3,d),d.length-1),unregister_tab:f=>{const h=d.findIndex(p=>p.id===f.id);d.splice(h,1),a.update(p=>p===f.id?d[h]?.id||d[d.length-1]?.id:p)},selected_tab:a,selected_tab_index:c});function q(f){s(9,m=f),B(a,_=f,_),B(c,t=d.findIndex(h=>h.id===f),t),b("change")}const Q=(f,h)=>{q(f.id),b("select",{value:f.name,index:h})};return l.$$set=f=>{"visible"in f&&s(0,i=f.visible),"elem_id"in f&&s(1,u=f.elem_id),"elem_classes"in f&&s(2,r=f.elem_classes),"selected"in f&&s(9,m=f.selected),"$$scope"in f&&s(10,n=f.$$scope)},l.$$.update=()=>{l.$$.dirty&512&&m!==null&&q(m)},[i,u,r,d,_,a,c,b,q,m,n,o,Q]}class _e extends H{constructor(e){super(),J(this,e,ce,ie,M,{visible:0,elem_id:1,elem_classes:2,selected:9})}}function ue(l){let e;const s=l[4].default,t=O(s,l,l[8],null);return{c(){t&&t.c()},m(_,o){t&&t.m(_,o),e=!0},p(_,o){t&&t.p&&(!e||o&256)&&P(t,s,_,_[8],e?Y(s,_[8],o,null):U(_[8]),null)},i(_){e||(y(t,_),e=!0)},o(_){w(t,_),e=!1},d(_){t&&t.d(_)}}}function oe(l){let e,s,t;function _(n){l[5](n)}let o={visible:l[1],elem_id:l[2],elem_classes:l[3],$$slots:{default:[ue]},$$scope:{ctx:l}};return l[0]!==void 0&&(o.selected=l[0]),e=new _e({props:o}),Z.push(()=>x(e,"selected",_)),e.$on("change",l[6]),e.$on("select",l[7]),{c(){$(e.$$.fragment)},m(n,i){ee(e,n,i),t=!0},p(n,[i]){const u={};i&2&&(u.visible=n[1]),i&4&&(u.elem_id=n[2]),i&8&&(u.elem_classes=n[3]),i&256&&(u.$$scope={dirty:i,ctx:n}),!s&&i&1&&(s=!0,u.selected=n[0],te(()=>s=!1)),e.$set(u)},i(n){t||(y(e.$$.fragment,n),t=!0)},o(n){w(e.$$.fragment,n),t=!1},d(n){le(e,n)}}}function fe(l,e,s){let{$$slots:t={},$$scope:_}=e;const o=z();let{visible:n=!0}=e,{elem_id:i=""}=e,{elem_classes:u=[]}=e,{selected:r}=e;function m(c){r=c,s(0,r)}function d(c){D.call(this,l,c)}function a(c){D.call(this,l,c)}return l.$$set=c=>{"visible"in c&&s(1,n=c.visible),"elem_id"in c&&s(2,i=c.elem_id),"elem_classes"in c&&s(3,u=c.elem_classes),"selected"in c&&s(0,r=c.selected),"$$scope"in c&&s(8,_=c.$$scope)},l.$$.update=()=>{l.$$.dirty&1&&o("prop_change",{selected:r})},[r,n,i,u,t,m,d,a,_]}class de extends H{constructor(e){super(),J(this,e,fe,oe,M,{visible:1,elem_id:2,elem_classes:3,selected:0})}}const me=de;export{me as S,ae as T}; -//# sourceMappingURL=StaticTabs-26fecbee.js.map diff --git a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/gradio/templates/frontend/assets/index-ea4a5a13.js b/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/gradio/templates/frontend/assets/index-ea4a5a13.js deleted file mode 100644 index 0373e13c562e23673a8db8c876ae2fc448eb5f32..0000000000000000000000000000000000000000 --- a/spaces/dcarpintero/nlp-summarizer-pegasus/.venv/lib/python3.9/site-packages/gradio/templates/frontend/assets/index-ea4a5a13.js +++ /dev/null @@ -1,2 +0,0 @@ -import{S as ie,e as ne,s as se,m as p,F as q,o as ue,g as d,Y as de,h as z,G as y,j as Le,r as x,u as k,v as $,w as v,k as H,H as F,C as Ye,am as qe,t as ye,x as Fe,a4 as we,E as w,N as j,ap as R,aj as Ge,p as T,b as Ie,B as ae,P as ge,n as le,y as Oe,z as Pe,ak as b,O as U,T as V,V as Ee,ae as Ne,Q as Be,R as Ce}from"./index-39fce9e2.js";import{f as Qe,B as Se}from"./Button-79f6e3bf.js";import{B as Re}from"./BlockTitle-fa702e63.js";import{C as Ue,a as Ve}from"./Copy-77b3f70c.js";function Ae(t){let e;return{c(){e=ye(t[3])},m(l,a){z(l,e,a)},p(l,a){a[0]&8&&Fe(e,l[3])},d(l){l&&H(e)}}}function Je(t){let e,l,a,s,n,_,g,u,o=t[6]&&t[10]&&ve(t);return{c(){o&&o.c(),e=ue(),l=p("textarea"),d(l,"data-testid","textbox"),d(l,"class","scroll-hide svelte-1f354aw"),d(l,"dir",a=t[11]?"rtl":"ltr"),d(l,"placeholder",t[2]),d(l,"rows",t[1]),l.disabled=t[5],l.autofocus=t[12],d(l,"style",s=t[13]?"text-align: "+t[13]:"")},m(i,h){o&&o.m(i,h),z(i,e,h),z(i,l,h),R(l,t[0]),t[36](l),_=!0,t[12]&&l.focus(),g||(u=[Ge(n=t[19].call(null,l,t[0])),T(l,"input",t[35]),T(l,"keypress",t[18]),T(l,"blur",t[27]),T(l,"select",t[17]),T(l,"focus",t[28])],g=!0)},p(i,h){i[6]&&i[10]?o?(o.p(i,h),h[0]&1088&&v(o,1)):(o=ve(i),o.c(),v(o,1),o.m(e.parentNode,e)):o&&(x(),k(o,1,1,()=>{o=null}),$()),(!_||h[0]&2048&&a!==(a=i[11]?"rtl":"ltr"))&&d(l,"dir",a),(!_||h[0]&4)&&d(l,"placeholder",i[2]),(!_||h[0]&2)&&d(l,"rows",i[1]),(!_||h[0]&32)&&(l.disabled=i[5]),(!_||h[0]&4096)&&(l.autofocus=i[12]),(!_||h[0]&8192&&s!==(s=i[13]?"text-align: "+i[13]:""))&&d(l,"style",s),n&&Ie(n.update)&&h[0]&1&&n.update.call(null,i[0]),h[0]&1&&R(l,i[0])},i(i){_||(v(o),_=!0)},o(i){k(o),_=!1},d(i){i&&(H(e),H(l)),o&&o.d(i),t[36](null),g=!1,ae(u)}}}function Me(t){let e;function l(n,_){if(n[9]==="text")return xe;if(n[9]==="password")return pe;if(n[9]==="email")return Ze}let a=l(t),s=a&&a(t);return{c(){s&&s.c(),e=ge()},m(n,_){s&&s.m(n,_),z(n,e,_)},p(n,_){a===(a=l(n))&&s?s.p(n,_):(s&&s.d(1),s=a&&a(n),s&&(s.c(),s.m(e.parentNode,e)))},i:le,o:le,d(n){n&&H(e),s&&s.d(n)}}}function ve(t){let e,l,a,s;const n=[Xe,We],_=[];function g(u,o){return u[15]?0:1}return e=g(t),l=_[e]=n[e](t),{c(){l.c(),a=ge()},m(u,o){_[e].m(u,o),z(u,a,o),s=!0},p(u,o){let i=e;e=g(u),e===i?_[e].p(u,o):(x(),k(_[i],1,1,()=>{_[i]=null}),$(),l=_[e],l?l.p(u,o):(l=_[e]=n[e](u),l.c()),v(l,1),l.m(a.parentNode,a))},i(u){s||(v(l),s=!0)},o(u){k(l),s=!1},d(u){u&&H(a),_[e].d(u)}}}function We(t){let e,l,a,s,n;return l=new Ue({}),{c(){e=p("button"),q(l.$$.fragment),d(e,"class","copy-text svelte-1f354aw")},m(_,g){z(_,e,g),y(l,e,null),a=!0,s||(n=T(e,"click",t[16]),s=!0)},p:le,i(_){a||(v(l.$$.fragment,_),a=!0)},o(_){k(l.$$.fragment,_),a=!1},d(_){_&&H(e),F(l),s=!1,n()}}}function Xe(t){let e,l,a,s;return l=new Ve({}),{c(){e=p("button"),q(l.$$.fragment),d(e,"class","svelte-1f354aw")},m(n,_){z(n,e,_),y(l,e,null),s=!0},p:le,i(n){s||(v(l.$$.fragment,n),n&&(a||Oe(()=>{a=Pe(e,Qe,{duration:300}),a.start()})),s=!0)},o(n){k(l.$$.fragment,n),s=!1},d(n){n&&H(e),F(l)}}}function Ze(t){let e,l,a;return{c(){e=p("input"),d(e,"data-testid","textbox"),d(e,"type","email"),d(e,"class","scroll-hide svelte-1f354aw"),d(e,"placeholder",t[2]),e.disabled=t[5],e.autofocus=t[12],d(e,"autocomplete","email")},m(s,n){z(s,e,n),R(e,t[0]),t[34](e),t[12]&&e.focus(),l||(a=[T(e,"input",t[33]),T(e,"keypress",t[18]),T(e,"blur",t[25]),T(e,"select",t[17]),T(e,"focus",t[26])],l=!0)},p(s,n){n[0]&4&&d(e,"placeholder",s[2]),n[0]&32&&(e.disabled=s[5]),n[0]&4096&&(e.autofocus=s[12]),n[0]&1&&e.value!==s[0]&&R(e,s[0])},d(s){s&&H(e),t[34](null),l=!1,ae(a)}}}function pe(t){let e,l,a;return{c(){e=p("input"),d(e,"data-testid","password"),d(e,"type","password"),d(e,"class","scroll-hide svelte-1f354aw"),d(e,"placeholder",t[2]),e.disabled=t[5],e.autofocus=t[12],d(e,"autocomplete","")},m(s,n){z(s,e,n),R(e,t[0]),t[32](e),t[12]&&e.focus(),l||(a=[T(e,"input",t[31]),T(e,"keypress",t[18]),T(e,"blur",t[23]),T(e,"select",t[17]),T(e,"focus",t[24])],l=!0)},p(s,n){n[0]&4&&d(e,"placeholder",s[2]),n[0]&32&&(e.disabled=s[5]),n[0]&4096&&(e.autofocus=s[12]),n[0]&1&&e.value!==s[0]&&R(e,s[0])},d(s){s&&H(e),t[32](null),l=!1,ae(a)}}}function xe(t){let e,l,a,s,n;return{c(){e=p("input"),d(e,"data-testid","textbox"),d(e,"type","text"),d(e,"class","scroll-hide svelte-1f354aw"),d(e,"dir",l=t[11]?"rtl":"ltr"),d(e,"placeholder",t[2]),e.disabled=t[5],e.autofocus=t[12],d(e,"style",a=t[13]?"text-align: "+t[13]:"")},m(_,g){z(_,e,g),R(e,t[0]),t[30](e),t[12]&&e.focus(),s||(n=[T(e,"input",t[29]),T(e,"keypress",t[18]),T(e,"blur",t[21]),T(e,"select",t[17]),T(e,"focus",t[22])],s=!0)},p(_,g){g[0]&2048&&l!==(l=_[11]?"rtl":"ltr")&&d(e,"dir",l),g[0]&4&&d(e,"placeholder",_[2]),g[0]&32&&(e.disabled=_[5]),g[0]&4096&&(e.autofocus=_[12]),g[0]&8192&&a!==(a=_[13]?"text-align: "+_[13]:"")&&d(e,"style",a),g[0]&1&&e.value!==_[0]&&R(e,_[0])},d(_){_&&H(e),t[30](null),s=!1,ae(n)}}}function $e(t){let e,l,a,s,n,_;l=new Re({props:{show_label:t[6],info:t[4],$$slots:{default:[Ae]},$$scope:{ctx:t}}});const g=[Me,Je],u=[];function o(i,h){return i[1]===1&&i[8]===1?0:1}return s=o(t),n=u[s]=g[s](t),{c(){e=p("label"),q(l.$$.fragment),a=ue(),n.c(),d(e,"class","svelte-1f354aw"),de(e,"container",t[7])},m(i,h){z(i,e,h),y(l,e,null),Le(e,a),u[s].m(e,null),_=!0},p(i,h){const m={};h[0]&64&&(m.show_label=i[6]),h[0]&16&&(m.info=i[4]),h[0]&8|h[1]&2048&&(m.$$scope={dirty:h,ctx:i}),l.$set(m);let C=s;s=o(i),s===C?u[s].p(i,h):(x(),k(u[C],1,1,()=>{u[C]=null}),$(),n=u[s],n?n.p(i,h):(n=u[s]=g[s](i),n.c()),v(n,1),n.m(e,null)),(!_||h[0]&128)&&de(e,"container",i[7])},i(i){_||(v(l.$$.fragment,i),v(n),_=!0)},o(i){k(l.$$.fragment,i),k(n),_=!1},d(i){i&&H(e),F(l),u[s].d()}}}function et(t,e,l){let{value:a=""}=e,{value_is_output:s=!1}=e,{lines:n=1}=e,{placeholder:_="Type here..."}=e,{label:g}=e,{info:u=void 0}=e,{disabled:o=!1}=e,{show_label:i=!0}=e,{container:h=!0}=e,{max_lines:m}=e,{type:C="text"}=e,{show_copy_button:D=!1}=e,{rtl:K=!1}=e,{autofocus:L=!1}=e,{text_align:Y=void 0}=e,E,N=!1,B;const S=Ye();function G(){S("change",a),s||S("input")}qe(()=>{l(20,s=!1)});async function P(){"clipboard"in navigator&&(await navigator.clipboard.writeText(a),A())}function A(){l(15,N=!0),B&&clearTimeout(B),B=setTimeout(()=>{l(15,N=!1)},1e3)}function J(r){const O=r.target,ee=O.value,Q=[O.selectionStart,O.selectionEnd];S("select",{value:ee.substring(...Q),index:Q})}async function M(r){await we(),(r.key==="Enter"&&r.shiftKey&&n>1||r.key==="Enter"&&!r.shiftKey&&n===1&&m>=1)&&(r.preventDefault(),S("submit"))}async function I(r){if(await we(),n===m||!h)return;let O=m===void 0?!1:m===void 0?21*11:21*(m+1),ee=21*(n+1);const Q=r.target;Q.style.height="1px";let te;O&&Q.scrollHeight>O?te=O:Q.scrollHeightr.removeEventListener("input",I)}}function X(r){w.call(this,t,r)}function Z(r){w.call(this,t,r)}function f(r){w.call(this,t,r)}function _e(r){w.call(this,t,r)}function fe(r){w.call(this,t,r)}function oe(r){w.call(this,t,r)}function he(r){w.call(this,t,r)}function ce(r){w.call(this,t,r)}function re(){a=this.value,l(0,a)}function be(r){j[r?"unshift":"push"](()=>{E=r,l(14,E)})}function me(){a=this.value,l(0,a)}function c(r){j[r?"unshift":"push"](()=>{E=r,l(14,E)})}function He(){a=this.value,l(0,a)}function je(r){j[r?"unshift":"push"](()=>{E=r,l(14,E)})}function De(){a=this.value,l(0,a)}function Ke(r){j[r?"unshift":"push"](()=>{E=r,l(14,E)})}return t.$$set=r=>{"value"in r&&l(0,a=r.value),"value_is_output"in r&&l(20,s=r.value_is_output),"lines"in r&&l(1,n=r.lines),"placeholder"in r&&l(2,_=r.placeholder),"label"in r&&l(3,g=r.label),"info"in r&&l(4,u=r.info),"disabled"in r&&l(5,o=r.disabled),"show_label"in r&&l(6,i=r.show_label),"container"in r&&l(7,h=r.container),"max_lines"in r&&l(8,m=r.max_lines),"type"in r&&l(9,C=r.type),"show_copy_button"in r&&l(10,D=r.show_copy_button),"rtl"in r&&l(11,K=r.rtl),"autofocus"in r&&l(12,L=r.autofocus),"text_align"in r&&l(13,Y=r.text_align)},t.$$.update=()=>{t.$$.dirty[0]&1&&a===null&&l(0,a=""),t.$$.dirty[0]&16643&&E&&n!==m&&I({target:E}),t.$$.dirty[0]&1&&G()},[a,n,_,g,u,o,i,h,m,C,D,K,L,Y,E,N,P,J,M,W,s,X,Z,f,_e,fe,oe,he,ce,re,be,me,c,He,je,De,Ke]}let ze=class extends ie{constructor(e){super(),ne(this,e,et,$e,se,{value:0,value_is_output:20,lines:1,placeholder:2,label:3,info:4,disabled:5,show_label:6,container:7,max_lines:8,type:9,show_copy_button:10,rtl:11,autofocus:12,text_align:13},null,[-1,-1])}};function ke(t){let e,l;const a=[t[16]];let s={};for(let n=0;nU(l,"value",g)),j.push(()=>U(l,"value_is_output",u)),l.$on("change",t[22]),l.$on("input",t[23]),l.$on("submit",t[24]),l.$on("blur",t[25]),l.$on("select",t[26]),l.$on("focus",t[27]),{c(){_&&_.c(),e=ue(),q(l.$$.fragment)},m(i,h){_&&_.m(i,h),z(i,e,h),y(l,i,h),n=!0},p(i,h){i[16]?_?(_.p(i,h),h&65536&&v(_,1)):(_=ke(i),_.c(),v(_,1),_.m(e.parentNode,e)):_&&(x(),k(_,1,1,()=>{_=null}),$());const m={};h&4&&(m.label=i[2]),h&8&&(m.info=i[3]),h&512&&(m.show_label=i[9]),h&128&&(m.lines=i[7]),h&2048&&(m.type=i[11]),h&131072&&(m.rtl=i[17]),h&262144&&(m.text_align=i[18]),h&1152&&(m.max_lines=i[10]?i[10]:i[7]+1),h&256&&(m.placeholder=i[8]),h&32768&&(m.show_copy_button=i[15]),h&524288&&(m.autofocus=i[19]),h&4096&&(m.container=i[12]),!a&&h&1&&(a=!0,m.value=i[0],V(()=>a=!1)),!s&&h&2&&(s=!0,m.value_is_output=i[1],V(()=>s=!1)),l.$set(m)},i(i){n||(v(_),v(l.$$.fragment,i),n=!0)},o(i){k(_),k(l.$$.fragment,i),n=!1},d(i){i&&H(e),_&&_.d(i),F(l,i)}}}function lt(t){let e,l;return e=new Se({props:{visible:t[6],elem_id:t[4],elem_classes:t[5],scale:t[13],min_width:t[14],allow_overflow:!1,padding:t[12],$$slots:{default:[tt]},$$scope:{ctx:t}}}),{c(){q(e.$$.fragment)},m(a,s){y(e,a,s),l=!0},p(a,[s]){const n={};s&64&&(n.visible=a[6]),s&16&&(n.elem_id=a[4]),s&32&&(n.elem_classes=a[5]),s&8192&&(n.scale=a[13]),s&16384&&(n.min_width=a[14]),s&4096&&(n.padding=a[12]),s&269459343&&(n.$$scope={dirty:s,ctx:a}),e.$set(n)},i(a){l||(v(e.$$.fragment,a),l=!0)},o(a){k(e.$$.fragment,a),l=!1},d(a){F(e,a)}}}function it(t,e,l){let{label:a="Textbox"}=e,{info:s=void 0}=e,{elem_id:n=""}=e,{elem_classes:_=[]}=e,{visible:g=!0}=e,{value:u=""}=e,{lines:o}=e,{placeholder:i=""}=e,{show_label:h}=e,{max_lines:m}=e,{type:C="text"}=e,{container:D=!0}=e,{scale:K=null}=e,{min_width:L=void 0}=e,{show_copy_button:Y=!1}=e,{loading_status:E=void 0}=e,{value_is_output:N=!1}=e,{rtl:B=!1}=e,{text_align:S=void 0}=e,{autofocus:G=!1}=e;function P(f){u=f,l(0,u)}function A(f){N=f,l(1,N)}function J(f){w.call(this,t,f)}function M(f){w.call(this,t,f)}function I(f){w.call(this,t,f)}function W(f){w.call(this,t,f)}function X(f){w.call(this,t,f)}function Z(f){w.call(this,t,f)}return t.$$set=f=>{"label"in f&&l(2,a=f.label),"info"in f&&l(3,s=f.info),"elem_id"in f&&l(4,n=f.elem_id),"elem_classes"in f&&l(5,_=f.elem_classes),"visible"in f&&l(6,g=f.visible),"value"in f&&l(0,u=f.value),"lines"in f&&l(7,o=f.lines),"placeholder"in f&&l(8,i=f.placeholder),"show_label"in f&&l(9,h=f.show_label),"max_lines"in f&&l(10,m=f.max_lines),"type"in f&&l(11,C=f.type),"container"in f&&l(12,D=f.container),"scale"in f&&l(13,K=f.scale),"min_width"in f&&l(14,L=f.min_width),"show_copy_button"in f&&l(15,Y=f.show_copy_button),"loading_status"in f&&l(16,E=f.loading_status),"value_is_output"in f&&l(1,N=f.value_is_output),"rtl"in f&&l(17,B=f.rtl),"text_align"in f&&l(18,S=f.text_align),"autofocus"in f&&l(19,G=f.autofocus)},[u,N,a,s,n,_,g,o,i,h,m,C,D,K,L,Y,E,B,S,G,P,A,J,M,I,W,X,Z]}class nt extends ie{constructor(e){super(),ne(this,e,it,lt,se,{label:2,info:3,elem_id:4,elem_classes:5,visible:6,value:0,lines:7,placeholder:8,show_label:9,max_lines:10,type:11,container:12,scale:13,min_width:14,show_copy_button:15,loading_status:16,value_is_output:1,rtl:17,text_align:18,autofocus:19})}get label(){return this.$$.ctx[2]}set label(e){this.$$set({label:e}),b()}get info(){return this.$$.ctx[3]}set info(e){this.$$set({info:e}),b()}get elem_id(){return this.$$.ctx[4]}set elem_id(e){this.$$set({elem_id:e}),b()}get elem_classes(){return this.$$.ctx[5]}set elem_classes(e){this.$$set({elem_classes:e}),b()}get visible(){return this.$$.ctx[6]}set visible(e){this.$$set({visible:e}),b()}get value(){return this.$$.ctx[0]}set value(e){this.$$set({value:e}),b()}get lines(){return this.$$.ctx[7]}set lines(e){this.$$set({lines:e}),b()}get placeholder(){return this.$$.ctx[8]}set placeholder(e){this.$$set({placeholder:e}),b()}get show_label(){return this.$$.ctx[9]}set show_label(e){this.$$set({show_label:e}),b()}get max_lines(){return this.$$.ctx[10]}set max_lines(e){this.$$set({max_lines:e}),b()}get type(){return this.$$.ctx[11]}set type(e){this.$$set({type:e}),b()}get container(){return this.$$.ctx[12]}set container(e){this.$$set({container:e}),b()}get scale(){return this.$$.ctx[13]}set scale(e){this.$$set({scale:e}),b()}get min_width(){return this.$$.ctx[14]}set min_width(e){this.$$set({min_width:e}),b()}get show_copy_button(){return this.$$.ctx[15]}set show_copy_button(e){this.$$set({show_copy_button:e}),b()}get loading_status(){return this.$$.ctx[16]}set loading_status(e){this.$$set({loading_status:e}),b()}get value_is_output(){return this.$$.ctx[1]}set value_is_output(e){this.$$set({value_is_output:e}),b()}get rtl(){return this.$$.ctx[17]}set rtl(e){this.$$set({rtl:e}),b()}get text_align(){return this.$$.ctx[18]}set text_align(e){this.$$set({text_align:e}),b()}get autofocus(){return this.$$.ctx[19]}set autofocus(e){this.$$set({autofocus:e}),b()}}function Te(t){let e,l;const a=[t[16]];let s={};for(let n=0;nU(l,"value",g)),j.push(()=>U(l,"value_is_output",u)),l.$on("change",t[22]),l.$on("input",t[23]),l.$on("submit",t[24]),l.$on("blur",t[25]),l.$on("select",t[26]),l.$on("focus",t[27]),{c(){_&&_.c(),e=ue(),q(l.$$.fragment)},m(i,h){_&&_.m(i,h),z(i,e,h),y(l,i,h),n=!0},p(i,h){i[16]?_?(_.p(i,h),h&65536&&v(_,1)):(_=Te(i),_.c(),v(_,1),_.m(e.parentNode,e)):_&&(x(),k(_,1,1,()=>{_=null}),$());const m={};h&4&&(m.label=i[2]),h&8&&(m.info=i[3]),h&512&&(m.show_label=i[9]),h&128&&(m.lines=i[7]),h&2048&&(m.type=i[11]),h&131072&&(m.rtl=i[17]),h&262144&&(m.text_align=i[18]),h&1152&&(m.max_lines=i[10]?i[10]:i[7]+1),h&256&&(m.placeholder=i[8]),h&32768&&(m.show_copy_button=i[15]),h&524288&&(m.autofocus=i[19]),h&4096&&(m.container=i[12]),!a&&h&1&&(a=!0,m.value=i[0],V(()=>a=!1)),!s&&h&2&&(s=!0,m.value_is_output=i[1],V(()=>s=!1)),l.$set(m)},i(i){n||(v(_),v(l.$$.fragment,i),n=!0)},o(i){k(_),k(l.$$.fragment,i),n=!1},d(i){i&&H(e),_&&_.d(i),F(l,i)}}}function ut(t){let e,l;return e=new Se({props:{visible:t[6],elem_id:t[4],elem_classes:t[5],scale:t[13],min_width:t[14],allow_overflow:!1,padding:t[12],$$slots:{default:[st]},$$scope:{ctx:t}}}),{c(){q(e.$$.fragment)},m(a,s){y(e,a,s),l=!0},p(a,[s]){const n={};s&64&&(n.visible=a[6]),s&16&&(n.elem_id=a[4]),s&32&&(n.elem_classes=a[5]),s&8192&&(n.scale=a[13]),s&16384&&(n.min_width=a[14]),s&4096&&(n.padding=a[12]),s&269459343&&(n.$$scope={dirty:s,ctx:a}),e.$set(n)},i(a){l||(v(e.$$.fragment,a),l=!0)},o(a){k(e.$$.fragment,a),l=!1},d(a){F(e,a)}}}function at(t,e,l){let{label:a="Textbox"}=e,{info:s=void 0}=e,{elem_id:n=""}=e,{elem_classes:_=[]}=e,{visible:g=!0}=e,{value:u=""}=e,{lines:o}=e,{placeholder:i=""}=e,{show_label:h}=e,{max_lines:m}=e,{type:C="text"}=e,{container:D=!0}=e,{scale:K=null}=e,{min_width:L=void 0}=e,{show_copy_button:Y=!1}=e,{loading_status:E=void 0}=e,{value_is_output:N=!1}=e,{rtl:B=!1}=e,{text_align:S=void 0}=e,{autofocus:G=!1}=e;function P(f){u=f,l(0,u)}function A(f){N=f,l(1,N)}function J(f){w.call(this,t,f)}function M(f){w.call(this,t,f)}function I(f){w.call(this,t,f)}function W(f){w.call(this,t,f)}function X(f){w.call(this,t,f)}function Z(f){w.call(this,t,f)}return t.$$set=f=>{"label"in f&&l(2,a=f.label),"info"in f&&l(3,s=f.info),"elem_id"in f&&l(4,n=f.elem_id),"elem_classes"in f&&l(5,_=f.elem_classes),"visible"in f&&l(6,g=f.visible),"value"in f&&l(0,u=f.value),"lines"in f&&l(7,o=f.lines),"placeholder"in f&&l(8,i=f.placeholder),"show_label"in f&&l(9,h=f.show_label),"max_lines"in f&&l(10,m=f.max_lines),"type"in f&&l(11,C=f.type),"container"in f&&l(12,D=f.container),"scale"in f&&l(13,K=f.scale),"min_width"in f&&l(14,L=f.min_width),"show_copy_button"in f&&l(15,Y=f.show_copy_button),"loading_status"in f&&l(16,E=f.loading_status),"value_is_output"in f&&l(1,N=f.value_is_output),"rtl"in f&&l(17,B=f.rtl),"text_align"in f&&l(18,S=f.text_align),"autofocus"in f&&l(19,G=f.autofocus)},[u,N,a,s,n,_,g,o,i,h,m,C,D,K,L,Y,E,B,S,G,P,A,J,M,I,W,X,Z]}class _t extends ie{constructor(e){super(),ne(this,e,at,ut,se,{label:2,info:3,elem_id:4,elem_classes:5,visible:6,value:0,lines:7,placeholder:8,show_label:9,max_lines:10,type:11,container:12,scale:13,min_width:14,show_copy_button:15,loading_status:16,value_is_output:1,rtl:17,text_align:18,autofocus:19})}get label(){return this.$$.ctx[2]}set label(e){this.$$set({label:e}),b()}get info(){return this.$$.ctx[3]}set info(e){this.$$set({info:e}),b()}get elem_id(){return this.$$.ctx[4]}set elem_id(e){this.$$set({elem_id:e}),b()}get elem_classes(){return this.$$.ctx[5]}set elem_classes(e){this.$$set({elem_classes:e}),b()}get visible(){return this.$$.ctx[6]}set visible(e){this.$$set({visible:e}),b()}get value(){return this.$$.ctx[0]}set value(e){this.$$set({value:e}),b()}get lines(){return this.$$.ctx[7]}set lines(e){this.$$set({lines:e}),b()}get placeholder(){return this.$$.ctx[8]}set placeholder(e){this.$$set({placeholder:e}),b()}get show_label(){return this.$$.ctx[9]}set show_label(e){this.$$set({show_label:e}),b()}get max_lines(){return this.$$.ctx[10]}set max_lines(e){this.$$set({max_lines:e}),b()}get type(){return this.$$.ctx[11]}set type(e){this.$$set({type:e}),b()}get container(){return this.$$.ctx[12]}set container(e){this.$$set({container:e}),b()}get scale(){return this.$$.ctx[13]}set scale(e){this.$$set({scale:e}),b()}get min_width(){return this.$$.ctx[14]}set min_width(e){this.$$set({min_width:e}),b()}get show_copy_button(){return this.$$.ctx[15]}set show_copy_button(e){this.$$set({show_copy_button:e}),b()}get loading_status(){return this.$$.ctx[16]}set loading_status(e){this.$$set({loading_status:e}),b()}get value_is_output(){return this.$$.ctx[1]}set value_is_output(e){this.$$set({value_is_output:e}),b()}get rtl(){return this.$$.ctx[17]}set rtl(e){this.$$set({rtl:e}),b()}get text_align(){return this.$$.ctx[18]}set text_align(e){this.$$set({text_align:e}),b()}get autofocus(){return this.$$.ctx[19]}set autofocus(e){this.$$set({autofocus:e}),b()}}function ft(t){let e,l,a,s;function n(u){t[29](u)}function _(u){t[30](u)}let g={label:t[2],info:t[3],elem_id:t[4],elem_classes:t[5],visible:t[6],lines:t[7],placeholder:t[8],show_label:t[9],max_lines:t[10],type:t[11],container:t[12],scale:t[13],min_width:t[14],show_copy_button:t[15],loading_status:t[16],rtl:t[18],text_align:t[19],autofocus:t[20]};return t[0]!==void 0&&(g.value=t[0]),t[1]!==void 0&&(g.value_is_output=t[1]),e=new _t({props:g}),j.push(()=>U(e,"value",n)),j.push(()=>U(e,"value_is_output",_)),e.$on("change",t[31]),e.$on("input",t[32]),e.$on("submit",t[33]),e.$on("blur",t[34]),e.$on("select",t[35]),e.$on("focus",t[36]),{c(){q(e.$$.fragment)},m(u,o){y(e,u,o),s=!0},p(u,o){const i={};o[0]&4&&(i.label=u[2]),o[0]&8&&(i.info=u[3]),o[0]&16&&(i.elem_id=u[4]),o[0]&32&&(i.elem_classes=u[5]),o[0]&64&&(i.visible=u[6]),o[0]&128&&(i.lines=u[7]),o[0]&256&&(i.placeholder=u[8]),o[0]&512&&(i.show_label=u[9]),o[0]&1024&&(i.max_lines=u[10]),o[0]&2048&&(i.type=u[11]),o[0]&4096&&(i.container=u[12]),o[0]&8192&&(i.scale=u[13]),o[0]&16384&&(i.min_width=u[14]),o[0]&32768&&(i.show_copy_button=u[15]),o[0]&65536&&(i.loading_status=u[16]),o[0]&262144&&(i.rtl=u[18]),o[0]&524288&&(i.text_align=u[19]),o[0]&1048576&&(i.autofocus=u[20]),!l&&o[0]&1&&(l=!0,i.value=u[0],V(()=>l=!1)),!a&&o[0]&2&&(a=!0,i.value_is_output=u[1],V(()=>a=!1)),e.$set(i)},i(u){s||(v(e.$$.fragment,u),s=!0)},o(u){k(e.$$.fragment,u),s=!1},d(u){F(e,u)}}}function ot(t){let e,l,a,s;function n(u){t[21](u)}function _(u){t[22](u)}let g={label:t[2],info:t[3],elem_id:t[4],elem_classes:t[5],visible:t[6],lines:t[7],placeholder:t[8],show_label:t[9],max_lines:t[10],type:t[11],container:t[12],scale:t[13],min_width:t[14],show_copy_button:t[15],loading_status:t[16],rtl:t[18],text_align:t[19],autofocus:t[20]};return t[0]!==void 0&&(g.value=t[0]),t[1]!==void 0&&(g.value_is_output=t[1]),e=new nt({props:g}),j.push(()=>U(e,"value",n)),j.push(()=>U(e,"value_is_output",_)),e.$on("change",t[23]),e.$on("input",t[24]),e.$on("submit",t[25]),e.$on("blur",t[26]),e.$on("select",t[27]),e.$on("focus",t[28]),{c(){q(e.$$.fragment)},m(u,o){y(e,u,o),s=!0},p(u,o){const i={};o[0]&4&&(i.label=u[2]),o[0]&8&&(i.info=u[3]),o[0]&16&&(i.elem_id=u[4]),o[0]&32&&(i.elem_classes=u[5]),o[0]&64&&(i.visible=u[6]),o[0]&128&&(i.lines=u[7]),o[0]&256&&(i.placeholder=u[8]),o[0]&512&&(i.show_label=u[9]),o[0]&1024&&(i.max_lines=u[10]),o[0]&2048&&(i.type=u[11]),o[0]&4096&&(i.container=u[12]),o[0]&8192&&(i.scale=u[13]),o[0]&16384&&(i.min_width=u[14]),o[0]&32768&&(i.show_copy_button=u[15]),o[0]&65536&&(i.loading_status=u[16]),o[0]&262144&&(i.rtl=u[18]),o[0]&524288&&(i.text_align=u[19]),o[0]&1048576&&(i.autofocus=u[20]),!l&&o[0]&1&&(l=!0,i.value=u[0],V(()=>l=!1)),!a&&o[0]&2&&(a=!0,i.value_is_output=u[1],V(()=>a=!1)),e.$set(i)},i(u){s||(v(e.$$.fragment,u),s=!0)},o(u){k(e.$$.fragment,u),s=!1},d(u){F(e,u)}}}function ht(t){let e,l,a,s;const n=[ot,ft],_=[];function g(u,o){return u[17]==="static"?0:1}return e=g(t),l=_[e]=n[e](t),{c(){l.c(),a=ge()},m(u,o){_[e].m(u,o),z(u,a,o),s=!0},p(u,o){let i=e;e=g(u),e===i?_[e].p(u,o):(x(),k(_[i],1,1,()=>{_[i]=null}),$(),l=_[e],l?l.p(u,o):(l=_[e]=n[e](u),l.c()),v(l,1),l.m(a.parentNode,a))},i(u){s||(v(l),s=!0)},o(u){k(l),s=!1},d(u){u&&H(a),_[e].d(u)}}}function ct(t,e,l){let{label:a="Textbox"}=e,{info:s=void 0}=e,{elem_id:n=""}=e,{elem_classes:_=[]}=e,{visible:g=!0}=e,{value:u=""}=e,{lines:o}=e,{placeholder:i=""}=e,{show_label:h}=e,{max_lines:m}=e,{type:C="text"}=e,{container:D=!0}=e,{scale:K=null}=e,{min_width:L=void 0}=e,{show_copy_button:Y=!1}=e,{loading_status:E=void 0}=e,{mode:N}=e,{value_is_output:B=!1}=e,{rtl:S=!1}=e,{text_align:G=void 0}=e,{autofocus:P=!1}=e;function A(c){u=c,l(0,u)}function J(c){B=c,l(1,B)}function M(c){w.call(this,t,c)}function I(c){w.call(this,t,c)}function W(c){w.call(this,t,c)}function X(c){w.call(this,t,c)}function Z(c){w.call(this,t,c)}function f(c){w.call(this,t,c)}function _e(c){u=c,l(0,u)}function fe(c){B=c,l(1,B)}function oe(c){w.call(this,t,c)}function he(c){w.call(this,t,c)}function ce(c){w.call(this,t,c)}function re(c){w.call(this,t,c)}function be(c){w.call(this,t,c)}function me(c){w.call(this,t,c)}return t.$$set=c=>{"label"in c&&l(2,a=c.label),"info"in c&&l(3,s=c.info),"elem_id"in c&&l(4,n=c.elem_id),"elem_classes"in c&&l(5,_=c.elem_classes),"visible"in c&&l(6,g=c.visible),"value"in c&&l(0,u=c.value),"lines"in c&&l(7,o=c.lines),"placeholder"in c&&l(8,i=c.placeholder),"show_label"in c&&l(9,h=c.show_label),"max_lines"in c&&l(10,m=c.max_lines),"type"in c&&l(11,C=c.type),"container"in c&&l(12,D=c.container),"scale"in c&&l(13,K=c.scale),"min_width"in c&&l(14,L=c.min_width),"show_copy_button"in c&&l(15,Y=c.show_copy_button),"loading_status"in c&&l(16,E=c.loading_status),"mode"in c&&l(17,N=c.mode),"value_is_output"in c&&l(1,B=c.value_is_output),"rtl"in c&&l(18,S=c.rtl),"text_align"in c&&l(19,G=c.text_align),"autofocus"in c&&l(20,P=c.autofocus)},[u,B,a,s,n,_,g,o,i,h,m,C,D,K,L,Y,E,N,S,G,P,A,J,M,I,W,X,Z,f,_e,fe,oe,he,ce,re,be,me]}class wt extends ie{constructor(e){super(),ne(this,e,ct,ht,se,{label:2,info:3,elem_id:4,elem_classes:5,visible:6,value:0,lines:7,placeholder:8,show_label:9,max_lines:10,type:11,container:12,scale:13,min_width:14,show_copy_button:15,loading_status:16,mode:17,value_is_output:1,rtl:18,text_align:19,autofocus:20},null,[-1,-1])}get label(){return this.$$.ctx[2]}set label(e){this.$$set({label:e}),b()}get info(){return this.$$.ctx[3]}set info(e){this.$$set({info:e}),b()}get elem_id(){return this.$$.ctx[4]}set elem_id(e){this.$$set({elem_id:e}),b()}get elem_classes(){return this.$$.ctx[5]}set elem_classes(e){this.$$set({elem_classes:e}),b()}get visible(){return this.$$.ctx[6]}set visible(e){this.$$set({visible:e}),b()}get value(){return this.$$.ctx[0]}set value(e){this.$$set({value:e}),b()}get lines(){return this.$$.ctx[7]}set lines(e){this.$$set({lines:e}),b()}get placeholder(){return this.$$.ctx[8]}set placeholder(e){this.$$set({placeholder:e}),b()}get show_label(){return this.$$.ctx[9]}set show_label(e){this.$$set({show_label:e}),b()}get max_lines(){return this.$$.ctx[10]}set max_lines(e){this.$$set({max_lines:e}),b()}get type(){return this.$$.ctx[11]}set type(e){this.$$set({type:e}),b()}get container(){return this.$$.ctx[12]}set container(e){this.$$set({container:e}),b()}get scale(){return this.$$.ctx[13]}set scale(e){this.$$set({scale:e}),b()}get min_width(){return this.$$.ctx[14]}set min_width(e){this.$$set({min_width:e}),b()}get show_copy_button(){return this.$$.ctx[15]}set show_copy_button(e){this.$$set({show_copy_button:e}),b()}get loading_status(){return this.$$.ctx[16]}set loading_status(e){this.$$set({loading_status:e}),b()}get mode(){return this.$$.ctx[17]}set mode(e){this.$$set({mode:e}),b()}get value_is_output(){return this.$$.ctx[1]}set value_is_output(e){this.$$set({value_is_output:e}),b()}get rtl(){return this.$$.ctx[18]}set rtl(e){this.$$set({rtl:e}),b()}get text_align(){return this.$$.ctx[19]}set text_align(e){this.$$set({text_align:e}),b()}get autofocus(){return this.$$.ctx[20]}set autofocus(e){this.$$set({autofocus:e}),b()}}export{wt as T}; -//# sourceMappingURL=index-ea4a5a13.js.map diff --git a/spaces/declare-lab/tango/diffusers/docker/diffusers-pytorch-cuda/Dockerfile b/spaces/declare-lab/tango/diffusers/docker/diffusers-pytorch-cuda/Dockerfile deleted file mode 100644 index 8087be4299967c535e9d34590118113f001721bd..0000000000000000000000000000000000000000 --- a/spaces/declare-lab/tango/diffusers/docker/diffusers-pytorch-cuda/Dockerfile +++ /dev/null @@ -1,42 +0,0 @@ -FROM nvidia/cuda:11.7.1-cudnn8-runtime-ubuntu20.04 -LABEL maintainer="Hugging Face" -LABEL repository="diffusers" - -ENV DEBIAN_FRONTEND=noninteractive - -RUN apt update && \ - apt install -y bash \ - build-essential \ - git \ - git-lfs \ - curl \ - ca-certificates \ - libsndfile1-dev \ - python3.8 \ - python3-pip \ - python3.8-venv && \ - rm -rf /var/lib/apt/lists - -# make sure to use venv -RUN python3 -m venv /opt/venv -ENV PATH="/opt/venv/bin:$PATH" - -# pre-install the heavy dependencies (these can later be overridden by the deps from setup.py) -RUN python3 -m pip install --no-cache-dir --upgrade pip && \ - python3 -m pip install --no-cache-dir \ - torch \ - torchvision \ - torchaudio \ - python3 -m pip install --no-cache-dir \ - accelerate \ - datasets \ - hf-doc-builder \ - huggingface-hub \ - Jinja2 \ - librosa \ - numpy \ - scipy \ - tensorboard \ - transformers - -CMD ["/bin/bash"] diff --git a/spaces/declare-lab/tango/diffusers/src/diffusers/models/transformer_2d.py b/spaces/declare-lab/tango/diffusers/src/diffusers/models/transformer_2d.py deleted file mode 100644 index d590b1d0978104719e987504244ec60c53882539..0000000000000000000000000000000000000000 --- a/spaces/declare-lab/tango/diffusers/src/diffusers/models/transformer_2d.py +++ /dev/null @@ -1,321 +0,0 @@ -# Copyright 2023 The HuggingFace Team. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from dataclasses import dataclass -from typing import Any, Dict, Optional - -import torch -import torch.nn.functional as F -from torch import nn - -from ..configuration_utils import ConfigMixin, register_to_config -from ..models.embeddings import ImagePositionalEmbeddings -from ..utils import BaseOutput, deprecate -from .attention import BasicTransformerBlock -from .embeddings import PatchEmbed -from .modeling_utils import ModelMixin - - -@dataclass -class Transformer2DModelOutput(BaseOutput): - """ - Args: - sample (`torch.FloatTensor` of shape `(batch_size, num_channels, height, width)` or `(batch size, num_vector_embeds - 1, num_latent_pixels)` if [`Transformer2DModel`] is discrete): - Hidden states conditioned on `encoder_hidden_states` input. If discrete, returns probability distributions - for the unnoised latent pixels. - """ - - sample: torch.FloatTensor - - -class Transformer2DModel(ModelMixin, ConfigMixin): - """ - Transformer model for image-like data. Takes either discrete (classes of vector embeddings) or continuous (actual - embeddings) inputs. - - When input is continuous: First, project the input (aka embedding) and reshape to b, t, d. Then apply standard - transformer action. Finally, reshape to image. - - When input is discrete: First, input (classes of latent pixels) is converted to embeddings and has positional - embeddings applied, see `ImagePositionalEmbeddings`. Then apply standard transformer action. Finally, predict - classes of unnoised image. - - Note that it is assumed one of the input classes is the masked latent pixel. The predicted classes of the unnoised - image do not contain a prediction for the masked pixel as the unnoised image cannot be masked. - - Parameters: - num_attention_heads (`int`, *optional*, defaults to 16): The number of heads to use for multi-head attention. - attention_head_dim (`int`, *optional*, defaults to 88): The number of channels in each head. - in_channels (`int`, *optional*): - Pass if the input is continuous. The number of channels in the input and output. - num_layers (`int`, *optional*, defaults to 1): The number of layers of Transformer blocks to use. - dropout (`float`, *optional*, defaults to 0.0): The dropout probability to use. - cross_attention_dim (`int`, *optional*): The number of encoder_hidden_states dimensions to use. - sample_size (`int`, *optional*): Pass if the input is discrete. The width of the latent images. - Note that this is fixed at training time as it is used for learning a number of position embeddings. See - `ImagePositionalEmbeddings`. - num_vector_embeds (`int`, *optional*): - Pass if the input is discrete. The number of classes of the vector embeddings of the latent pixels. - Includes the class for the masked latent pixel. - activation_fn (`str`, *optional*, defaults to `"geglu"`): Activation function to be used in feed-forward. - num_embeds_ada_norm ( `int`, *optional*): Pass if at least one of the norm_layers is `AdaLayerNorm`. - The number of diffusion steps used during training. Note that this is fixed at training time as it is used - to learn a number of embeddings that are added to the hidden states. During inference, you can denoise for - up to but not more than steps than `num_embeds_ada_norm`. - attention_bias (`bool`, *optional*): - Configure if the TransformerBlocks' attention should contain a bias parameter. - """ - - @register_to_config - def __init__( - self, - num_attention_heads: int = 16, - attention_head_dim: int = 88, - in_channels: Optional[int] = None, - out_channels: Optional[int] = None, - num_layers: int = 1, - dropout: float = 0.0, - norm_num_groups: int = 32, - cross_attention_dim: Optional[int] = None, - attention_bias: bool = False, - sample_size: Optional[int] = None, - num_vector_embeds: Optional[int] = None, - patch_size: Optional[int] = None, - activation_fn: str = "geglu", - num_embeds_ada_norm: Optional[int] = None, - use_linear_projection: bool = False, - only_cross_attention: bool = False, - upcast_attention: bool = False, - norm_type: str = "layer_norm", - norm_elementwise_affine: bool = True, - ): - super().__init__() - self.use_linear_projection = use_linear_projection - self.num_attention_heads = num_attention_heads - self.attention_head_dim = attention_head_dim - inner_dim = num_attention_heads * attention_head_dim - - # 1. Transformer2DModel can process both standard continuous images of shape `(batch_size, num_channels, width, height)` as well as quantized image embeddings of shape `(batch_size, num_image_vectors)` - # Define whether input is continuous or discrete depending on configuration - self.is_input_continuous = (in_channels is not None) and (patch_size is None) - self.is_input_vectorized = num_vector_embeds is not None - self.is_input_patches = in_channels is not None and patch_size is not None - - if norm_type == "layer_norm" and num_embeds_ada_norm is not None: - deprecation_message = ( - f"The configuration file of this model: {self.__class__} is outdated. `norm_type` is either not set or" - " incorrectly set to `'layer_norm'`.Make sure to set `norm_type` to `'ada_norm'` in the config." - " Please make sure to update the config accordingly as leaving `norm_type` might led to incorrect" - " results in future versions. If you have downloaded this checkpoint from the Hugging Face Hub, it" - " would be very nice if you could open a Pull request for the `transformer/config.json` file" - ) - deprecate("norm_type!=num_embeds_ada_norm", "1.0.0", deprecation_message, standard_warn=False) - norm_type = "ada_norm" - - if self.is_input_continuous and self.is_input_vectorized: - raise ValueError( - f"Cannot define both `in_channels`: {in_channels} and `num_vector_embeds`: {num_vector_embeds}. Make" - " sure that either `in_channels` or `num_vector_embeds` is None." - ) - elif self.is_input_vectorized and self.is_input_patches: - raise ValueError( - f"Cannot define both `num_vector_embeds`: {num_vector_embeds} and `patch_size`: {patch_size}. Make" - " sure that either `num_vector_embeds` or `num_patches` is None." - ) - elif not self.is_input_continuous and not self.is_input_vectorized and not self.is_input_patches: - raise ValueError( - f"Has to define `in_channels`: {in_channels}, `num_vector_embeds`: {num_vector_embeds}, or patch_size:" - f" {patch_size}. Make sure that `in_channels`, `num_vector_embeds` or `num_patches` is not None." - ) - - # 2. Define input layers - if self.is_input_continuous: - self.in_channels = in_channels - - self.norm = torch.nn.GroupNorm(num_groups=norm_num_groups, num_channels=in_channels, eps=1e-6, affine=True) - if use_linear_projection: - self.proj_in = nn.Linear(in_channels, inner_dim) - else: - self.proj_in = nn.Conv2d(in_channels, inner_dim, kernel_size=1, stride=1, padding=0) - elif self.is_input_vectorized: - assert sample_size is not None, "Transformer2DModel over discrete input must provide sample_size" - assert num_vector_embeds is not None, "Transformer2DModel over discrete input must provide num_embed" - - self.height = sample_size - self.width = sample_size - self.num_vector_embeds = num_vector_embeds - self.num_latent_pixels = self.height * self.width - - self.latent_image_embedding = ImagePositionalEmbeddings( - num_embed=num_vector_embeds, embed_dim=inner_dim, height=self.height, width=self.width - ) - elif self.is_input_patches: - assert sample_size is not None, "Transformer2DModel over patched input must provide sample_size" - - self.height = sample_size - self.width = sample_size - - self.patch_size = patch_size - self.pos_embed = PatchEmbed( - height=sample_size, - width=sample_size, - patch_size=patch_size, - in_channels=in_channels, - embed_dim=inner_dim, - ) - - # 3. Define transformers blocks - self.transformer_blocks = nn.ModuleList( - [ - BasicTransformerBlock( - inner_dim, - num_attention_heads, - attention_head_dim, - dropout=dropout, - cross_attention_dim=cross_attention_dim, - activation_fn=activation_fn, - num_embeds_ada_norm=num_embeds_ada_norm, - attention_bias=attention_bias, - only_cross_attention=only_cross_attention, - upcast_attention=upcast_attention, - norm_type=norm_type, - norm_elementwise_affine=norm_elementwise_affine, - ) - for d in range(num_layers) - ] - ) - - # 4. Define output layers - self.out_channels = in_channels if out_channels is None else out_channels - if self.is_input_continuous: - # TODO: should use out_channels for continuous projections - if use_linear_projection: - self.proj_out = nn.Linear(inner_dim, in_channels) - else: - self.proj_out = nn.Conv2d(inner_dim, in_channels, kernel_size=1, stride=1, padding=0) - elif self.is_input_vectorized: - self.norm_out = nn.LayerNorm(inner_dim) - self.out = nn.Linear(inner_dim, self.num_vector_embeds - 1) - elif self.is_input_patches: - self.norm_out = nn.LayerNorm(inner_dim, elementwise_affine=False, eps=1e-6) - self.proj_out_1 = nn.Linear(inner_dim, 2 * inner_dim) - self.proj_out_2 = nn.Linear(inner_dim, patch_size * patch_size * self.out_channels) - - def forward( - self, - hidden_states: torch.Tensor, - encoder_hidden_states: Optional[torch.Tensor] = None, - timestep: Optional[torch.LongTensor] = None, - class_labels: Optional[torch.LongTensor] = None, - cross_attention_kwargs: Dict[str, Any] = None, - attention_mask: Optional[torch.Tensor] = None, - encoder_attention_mask: Optional[torch.Tensor] = None, - return_dict: bool = True, - ): - """ - Args: - hidden_states ( When discrete, `torch.LongTensor` of shape `(batch size, num latent pixels)`. - When continuous, `torch.FloatTensor` of shape `(batch size, channel, height, width)`): Input - hidden_states - encoder_hidden_states ( `torch.LongTensor` of shape `(batch size, encoder_hidden_states dim)`, *optional*): - Conditional embeddings for cross attention layer. If not given, cross-attention defaults to - self-attention. - timestep ( `torch.LongTensor`, *optional*): - Optional timestep to be applied as an embedding in AdaLayerNorm's. Used to indicate denoising step. - class_labels ( `torch.LongTensor` of shape `(batch size, num classes)`, *optional*): - Optional class labels to be applied as an embedding in AdaLayerZeroNorm. Used to indicate class labels - conditioning. - attention_mask ( `torch.Tensor` of shape (batch size, num latent pixels), *optional* ). - Bias to add to attention scores. - encoder_attention_mask ( `torch.Tensor` of shape (batch size, num encoder tokens), *optional* ). - Bias to add to cross-attention scores. - return_dict (`bool`, *optional*, defaults to `True`): - Whether or not to return a [`models.unet_2d_condition.UNet2DConditionOutput`] instead of a plain tuple. - - Returns: - [`~models.transformer_2d.Transformer2DModelOutput`] or `tuple`: - [`~models.transformer_2d.Transformer2DModelOutput`] if `return_dict` is True, otherwise a `tuple`. When - returning a tuple, the first element is the sample tensor. - """ - # 1. Input - if self.is_input_continuous: - batch, _, height, width = hidden_states.shape - residual = hidden_states - - hidden_states = self.norm(hidden_states) - if not self.use_linear_projection: - hidden_states = self.proj_in(hidden_states) - inner_dim = hidden_states.shape[1] - hidden_states = hidden_states.permute(0, 2, 3, 1).reshape(batch, height * width, inner_dim) - else: - inner_dim = hidden_states.shape[1] - hidden_states = hidden_states.permute(0, 2, 3, 1).reshape(batch, height * width, inner_dim) - hidden_states = self.proj_in(hidden_states) - elif self.is_input_vectorized: - hidden_states = self.latent_image_embedding(hidden_states) - elif self.is_input_patches: - hidden_states = self.pos_embed(hidden_states) - - # 2. Blocks - for block in self.transformer_blocks: - hidden_states = block( - hidden_states, - attention_mask=attention_mask, - encoder_hidden_states=encoder_hidden_states, - encoder_attention_mask=encoder_attention_mask, - timestep=timestep, - cross_attention_kwargs=cross_attention_kwargs, - class_labels=class_labels, - ) - - # 3. Output - if self.is_input_continuous: - if not self.use_linear_projection: - hidden_states = hidden_states.reshape(batch, height, width, inner_dim).permute(0, 3, 1, 2).contiguous() - hidden_states = self.proj_out(hidden_states) - else: - hidden_states = self.proj_out(hidden_states) - hidden_states = hidden_states.reshape(batch, height, width, inner_dim).permute(0, 3, 1, 2).contiguous() - - output = hidden_states + residual - elif self.is_input_vectorized: - hidden_states = self.norm_out(hidden_states) - logits = self.out(hidden_states) - # (batch, self.num_vector_embeds - 1, self.num_latent_pixels) - logits = logits.permute(0, 2, 1) - - # log(p(x_0)) - output = F.log_softmax(logits.double(), dim=1).float() - elif self.is_input_patches: - # TODO: cleanup! - conditioning = self.transformer_blocks[0].norm1.emb( - timestep, class_labels, hidden_dtype=hidden_states.dtype - ) - shift, scale = self.proj_out_1(F.silu(conditioning)).chunk(2, dim=1) - hidden_states = self.norm_out(hidden_states) * (1 + scale[:, None]) + shift[:, None] - hidden_states = self.proj_out_2(hidden_states) - - # unpatchify - height = width = int(hidden_states.shape[1] ** 0.5) - hidden_states = hidden_states.reshape( - shape=(-1, height, width, self.patch_size, self.patch_size, self.out_channels) - ) - hidden_states = torch.einsum("nhwpqc->nchpwq", hidden_states) - output = hidden_states.reshape( - shape=(-1, self.out_channels, height * self.patch_size, width * self.patch_size) - ) - - if not return_dict: - return (output,) - - return Transformer2DModelOutput(sample=output) diff --git a/spaces/derful/Chatgpt-academic/config.py b/spaces/derful/Chatgpt-academic/config.py deleted file mode 100644 index e7b0bbe19d49a38d21019a83213777d28f0abcd4..0000000000000000000000000000000000000000 --- a/spaces/derful/Chatgpt-academic/config.py +++ /dev/null @@ -1,43 +0,0 @@ -# API_KEY = "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" 此key无效 -API_KEY = "sk-此处填API秘钥" -API_URL = "https://api.openai.com/v1/chat/completions" - -# 改为True应用代理 -USE_PROXY = False -if USE_PROXY: - - # 填写格式是 [协议]:// [地址] :[端口] , - # 例如 "socks5h://localhost:11284" - # [协议] 常见协议无非socks5h/http,例如 v2*** 和 s** 的默认本地协议是socks5h,cl**h 的默认本地协议是http - # [地址] 懂的都懂,不懂就填localhost或者127.0.0.1肯定错不了(localhost意思是代理软件安装在本机上) - # [端口] 在代理软件的设置里,不同的代理软件界面不一样,但端口号都应该在最显眼的位置上 - - # 代理网络的地址,打开你的科学上网软件查看代理的协议(socks5/http)、地址(localhost)和端口(11284) - proxies = { "http": "socks5h://localhost:11284", "https": "socks5h://localhost:11284", } - print('网络代理状态:运行。') -else: - proxies = None - print('网络代理状态:未配置。无代理状态下很可能无法访问。') - -# 发送请求到OpenAI后,等待多久判定为超时 -TIMEOUT_SECONDS = 25 - -# 网页的端口, -1代表随机端口 -WEB_PORT = -1 - -# 如果OpenAI不响应(网络卡顿、代理失败、KEY失效),重试的次数限制 -MAX_RETRY = 2 - -# 选择的OpenAI模型是(gpt4现在只对申请成功的人开放) -LLM_MODEL = "gpt-3.5-turbo" - -# 设置并行使用的线程数 -CONCURRENT_COUNT = 100 - -# 设置用户名和密码 -AUTHENTICATION = [] # [("username", "password"), ("username2", "password2"), ...] - -# 检查一下是不是忘了改config -# if len(API_KEY) != 51: -# assert False, "正确的API_KEY密钥是51位,请在config文件中修改API密钥, 添加海外代理之后再运行。" + \ -# "(如果您刚更新过代码,请确保旧版config_private文件中没有遗留任何新增键值)" diff --git a/spaces/diacanFperku/AutoGPT/3dsmax 2013 Vray 2.40.03.x86x64 Free Download Torretns LINK.md b/spaces/diacanFperku/AutoGPT/3dsmax 2013 Vray 2.40.03.x86x64 Free Download Torretns LINK.md deleted file mode 100644 index e8cb0ea2b3acb1a1e15d92d30804845493d62dc8..0000000000000000000000000000000000000000 --- a/spaces/diacanFperku/AutoGPT/3dsmax 2013 Vray 2.40.03.x86x64 Free Download Torretns LINK.md +++ /dev/null @@ -1,118 +0,0 @@ -
    -

    3dsmax 2013 Vray 2.40.03.x86x64 Free Download Torretns: How to Get the Best Rendering Plugin for 3D Studio Max

    - -

    If you are a 3D artist or designer, you probably know how important it is to have a powerful and realistic rendering plugin for your 3D software. Rendering is the process of creating photorealistic images and animations from your 3D models and scenes. Rendering can make or break your project, as it determines the quality and realism of your final output.

    -

    3dsmax 2013 vray 2.40.03.x86x64 free download torretns


    Download ✶✶✶ https://gohhs.com/2uFU0P



    - -

    One of the most popular and widely used rendering plugins for 3D Studio Max is V-Ray. V-Ray is a product of Chaos Group, a company that specializes in developing advanced software technology for graphics, audio and video editing. V-Ray is a versatile and flexible plugin that allows you to quickly and easily create stunning images and animations with full control over the 3D production process.

    - -

    V-Ray has many features and options that can help you achieve the best results for your project. Some of these features include:

    - -
      -
    • Global illumination: V-Ray can simulate realistic lighting effects such as indirect illumination, caustics, ambient occlusion, color bleeding and more.
    • -
    • Physical materials: V-Ray has a library of physically based materials that can mimic the properties of real-world objects such as metal, glass, wood, fabric, etc.
    • -
    • Ray tracing: V-Ray can render accurate reflections, refractions, shadows and depth of field using ray tracing algorithms.
    • -
    • Image-based lighting: V-Ray can use high dynamic range images (HDRI) as light sources to create natural and realistic lighting conditions.
    • -
    • Distributed rendering: V-Ray can use multiple computers to speed up the rendering process and reduce the render time.
    • -
    - -

    If you want to get V-Ray for your 3D Studio Max 2013, you can download it for free from various torretns sites. However, you need to be careful about the source and quality of the files you download, as some of them may contain viruses or malware that can harm your computer. You also need to make sure that you have a compatible version of 3D Studio Max installed on your system, as V-Ray may not work with older or newer versions.

    - -

    One of the best sources to download V-Ray for 3D Studio Max 2013 is Get Into PC, a website that provides free downloads of various software applications. You can find the link to download V-Ray for 3D Studio Max 2009 / 2010 / 2011 / 2012 from this site in the web search results above. This download includes both the x86 and x64 versions of V-Ray, as well as a crack file that you need to apply after installing the plugin.

    -

    - -

    To install V-Ray for 3D Studio Max 2013, you need to follow these steps:

    - -
      -
    1. Download the V-Ray_2.40.03_for_3ds_Max.rar file from Get Into PC or any other torretns site.
    2. -
    3. Extract the file using WinRAR or any other file compression software.
    4. -
    5. Run the installer for your version of 3D Studio Max (e.g., VRay_2.40.03_for_3dsMax_2013_x64.exe).
    6. -
    7. Follow the instructions on the screen and complete the installation.
    8. -
    9. Copy the vray.dll file from the crack folder and paste it into the installation directory (e.g., C:\Program Files\Autodesk\3ds Max 2013\).
    10. -
    11. Restart your computer and launch 3D Studio Max.
    12. -
    13. Enjoy using V-Ray for your rendering projects!
    14. -
    - -

    V-Ray is a powerful and versatile rendering plugin that can help you create amazing images and animations with 3D Studio Max. By downloading it from torretns sites, you can get it for free and use it without any limitations. However, you need to be careful about the source and quality of the files you download, as well as the compatibility of the plugin with your version of 3D Studio Max. If you follow these tips, you can get V-Ray for 3D Studio Max 2013 x86/x64 free download torretns and enjoy its benefits.

    -

    How to Use V-Ray for 3ds Max 2013 x86/x64

    - -

    Once you have installed V-Ray for 3ds Max 2013 x86/x64, you can start using it for your rendering projects. V-Ray integrates seamlessly with 3ds Max and provides you with a variety of tools and settings to customize your render output. You can access V-Ray from the main menu, the toolbar, or the render setup dialog.

    - -

    V-Ray has two main modes of rendering: production and interactive. Production mode is used for final quality renders, while interactive mode is used for quick previews and feedback. You can switch between the modes from the render setup dialog or the V-Ray toolbar.

    - -

    V-Ray also has two main engines for rendering: CPU and GPU. CPU engine uses your computer's processor to calculate the render, while GPU engine uses your graphics card to accelerate the render. You can choose the engine from the render setup dialog or the V-Ray toolbar.

    - -

    V-Ray has many parameters and options that you can adjust to fine-tune your render output. Some of the most important ones are:

    - -
      -
    • Camera: V-Ray has its own physical camera that simulates real-world camera behavior. You can control the exposure, depth of field, motion blur, white balance, and lens effects of the camera.
    • -
    • Lights: V-Ray has a variety of light types that you can use to illuminate your scene. You can use standard 3ds Max lights or V-Ray specific lights such as dome light, sun and sky, IES light, etc.
    • -
    • Materials: V-Ray has a powerful material system that allows you to create realistic and complex materials for your objects. You can use standard 3ds Max materials or V-Ray specific materials such as VRayMtl, VRayFastSSS2, VRayCarPaintMtl, etc.
    • -
    • Global illumination: V-Ray can calculate realistic global illumination effects such as indirect illumination, caustics, ambient occlusion, color bleeding, etc. You can control the quality and speed of the global illumination from the render setup dialog.
    • -
    • Render elements: V-Ray can output various render elements that you can use for post-processing and compositing. You can enable and disable the render elements from the render setup dialog.
    • -
    - -

    How to Download V-Ray for 3ds Max 2013 x86/x64 Free from Torretns

    - -

    If you want to download V-Ray for 3ds Max 2013 x86/x64 free from torretns, you need to have a torretn client installed on your computer. A torretn client is a software that allows you to download files from other users who are sharing them on a peer-to-peer network.

    - -

    One of the most popular and reliable torretn clients is uTorrent. You can download uTorrent from its official website or from any other trusted source. Once you have installed uTorrent, you can use it to download V-Ray for 3ds Max 2013 x86/x64 free from torretns.

    - -

    To download V-Ray for 3ds Max 2013 x86/x64 free from torretns, you need to find a torretn file that contains the information about the files you want to download. A torretn file is a small file that has a .torretn extension. You can find torretn files for V-Ray for 3ds Max 2013 x86/x64 on various websites that host torretns such as The Pirate Bay, Kickass Torretns, RARBG, etc.

    - -

    Once you have found a torretn file for V-Ray for 3ds Max 2013 x86/x64, you need to open it with uTorrent. uTorrent will then connect to other users who are sharing the files and start downloading them to your computer. You can monitor the progress and speed of the download from uTorrent's interface.

    - -

    When the download is complete, you will have a folder that contains all the files you need to install and use V-Ray for 3ds Max 2013 x86/x64. You can follow the installation steps mentioned above to install V-Ray for 3ds Max 2013 x86/x64 on your system.

    - -

    Conclusion

    - -

    V-Ray is a powerful and versatile rendering plugin that can help you create amazing images and animations with 3ds Max. By downloading it from torretns sites, you can get it for free and use it without any limitations. However, you need to be careful about the source and quality of the files you download, as well as the compatibility of the plugin with your version of 3ds Max. If you follow these tips, you can get V-Ray for 3ds Max 2013 x86/x64 free download torretns and enjoy its benefits.

    -

    How to Troubleshoot V-Ray for 3ds Max 2013 x86/x64

    - -

    Although V-Ray for 3ds Max 2013 x86/x64 is a reliable and stable plugin, you may encounter some issues or errors while using it. Some of the common problems that users face are:

    - -
      -
    • V-Ray does not load or crashes 3ds Max.
    • -
    • V-Ray renders are noisy or blotchy.
    • -
    • V-Ray renders are too dark or too bright.
    • -
    • V-Ray renders have artifacts or missing elements.
    • -
    • V-Ray renders take too long or consume too much memory.
    • -
    - -

    If you face any of these problems, you can try some of the following solutions:

    - -
      -
    • Check your system requirements and make sure that your computer meets the minimum specifications for running V-Ray and 3ds Max.
    • -
    • Update your graphics card drivers and make sure that they are compatible with V-Ray and 3ds Max.
    • -
    • Update your V-Ray and 3ds Max to the latest versions and make sure that they are compatible with each other.
    • -
    • Check your V-Ray license and make sure that it is valid and activated.
    • -
    • Check your V-Ray settings and make sure that they are appropriate for your scene and render output.
    • -
    • Check your 3ds Max scene and make sure that there are no errors or corrupted objects.
    • -
    • Check your render elements and make sure that they are enabled and configured correctly.
    • -
    • Check your output format and resolution and make sure that they are supported by V-Ray and 3ds Max.
    • -
    - -

    If none of these solutions work, you can contact the V-Ray support team or visit the V-Ray forums for more help and advice.

    - -

    How to Learn V-Ray for 3ds Max 2013 x86/x64

    - -

    If you want to learn how to use V-Ray for 3ds Max 2013 x86/x64 effectively and efficiently, you can take advantage of various resources and tutorials available online. Some of the best sources to learn V-Ray are:

    - -
      -
    • The official V-Ray website: This is the main source of information and documentation for V-Ray. You can find user guides, FAQs, videos, webinars, forums, blogs, etc. on this site.
    • -
    • The official V-Ray YouTube channel: This is the best place to watch video tutorials and demos for V-Ray. You can find videos on various topics such as lighting, materials, global illumination, rendering, etc. on this channel.
    • -
    • The official V-Ray Facebook page: This is a great place to interact with other V-Ray users and get updates on the latest news and events related to V-Ray. You can also post your questions and feedback on this page.
    • -
    • The CG Persia website: This is a popular website that provides free downloads of various software applications related to computer graphics. You can find torretns for V-Ray for 3ds Max 2013 x86/x64 on this site, as well as other plugins, tools, models, etc.
    • -
    • The CG Archives website: This is another popular website that provides free downloads of various software applications related to computer graphics. You can find direct links for V-Ray for 3ds Max 2013 x86/x64 on this site, as well as other plugins, tools, models, etc.
    • -
    - -

    By using these resources and tutorials, you can learn how to use V-Ray for 3ds Max 2013 x86/x64 like a pro and create stunning images and animations with ease.

    - -

    Conclusion

    - -

    V-Ray is a powerful and versatile rendering plugin that can help you create amazing images and animations with 3ds Max. By downloading it from torretns sites, you can get it for free and use it without any limitations. However, you need to be careful about the source and quality of the files you download, as well as the compatibility of the plugin with your version of 3ds Max. If you follow these tips, you can get V-Ray for 3ds Max 2013 x86/x64 free download torretns and enjoy its benefits. You can also troubleshoot any issues or errors that you may encounter while using V-Ray, as well as learn how to use it effectively and efficiently from various resources and tutorials available online.

    -

    Conclusion

    - -

    V-Ray is a powerful and versatile rendering plugin that can help you create amazing images and animations with 3ds Max. By downloading it from torretns sites, you can get it for free and use it without any limitations. However, you need to be careful about the source and quality of the files you download, as well as the compatibility of the plugin with your version of 3ds Max. If you follow these tips, you can get V-Ray for 3ds Max 2013 x86/x64 free download torretns and enjoy its benefits. You can also troubleshoot any issues or errors that you may encounter while using V-Ray, as well as learn how to use it effectively and efficiently from various resources and tutorials available online.

    3cee63e6c2
    -
    -
    \ No newline at end of file diff --git a/spaces/diacanFperku/AutoGPT/Descargar Historia De Chile Walterio Millar Pdf 70.md b/spaces/diacanFperku/AutoGPT/Descargar Historia De Chile Walterio Millar Pdf 70.md deleted file mode 100644 index a5b6c23cf029a4a6e56c4a4a9b39542eb3c9459f..0000000000000000000000000000000000000000 --- a/spaces/diacanFperku/AutoGPT/Descargar Historia De Chile Walterio Millar Pdf 70.md +++ /dev/null @@ -1,6 +0,0 @@ -

    descargar historia de chile walterio millar pdf 70


    Downloadhttps://gohhs.com/2uFU2r



    -
    -.EXE Hot! Descargar Historia De Chile Walterio Millar Pdf 70 5 ..EXE ( 70, 2015 ).Hot! Descargar Historia De Chile Walterio Millar Pdf 70 5. Hot! Descargar Historia De Chile Walterio Millar Pdf 70 5.Hot! Descargar Historia De Chile Walterio Millar Pdf 70 5. Hot! Descargar Historia De Chile Walterio Millar Pdf 70 5.Hot! Descargar Historia De Chile Walterio Millar Pdf 70 5. ( 70, 2015 n Hot! Descargar Historia De Chile Walterio Millar Pdf 70 5.EXE : i.Hot! Descargar Historia De Chile Walterio Millar Pdf 70 5.Hot! Descargar Historia De Chile Walterio Millar Pdf 70 5.Hot! Descargar Historia De Chile Walterio Millar Pdf 70 5.Hot! Descargar Historia De Chile Walterio Millar Pdf 70 5. Hot! Descargar Historia De Chile Walterio Millar Pdf 70 5.Hot! Descargar Historia De Chile Walterio Millar Pdf 70 5. ( 70, 2015 n HOT! Descargar Historia De Chile Walterio Millar Pdf 70 5.Hot! Descargar Historia De Chile Walterio Millar Pdf 70 5.Hot! Descargar Historia De Chile Walterio Millar Pdf 70 5.Hot! Descargar Historia De Chile Walterio Millar Pdf 70 5.Hot! Descargar Historia De Chile Walterio Millar Pdf 70 5.Hot! Descargar Historia De Chile Walterio Millar Pdf 70 5.Hot! Descargar Historia De Chile Walterio Millar Pdf 70 5. ( 70, 2015 n W Hot! Descargar Historia De Chile Walterio Millar Pdf 70 5.W Hot! Descargar Historia De Chile Walterio Millar Pdf 70 5.W ( 70, 2015 ) Hot! Descargar Historia De Chile Walterio Millar Pdf 70 5.W Hot! Descargar Historia De Chile Walterio Millar Pdf 70 5.W Hot! Descargar Historia De Chile Walterio Millar Pdf 70 5.W Hot! Descargar Historia De Chile Walterio Millar Pdf 70 5 4fefd39f24
    -
    -
    -

    diff --git a/spaces/diacanFperku/AutoGPT/Msi 2ab4 Driver Download !EXCLUSIVE!.md b/spaces/diacanFperku/AutoGPT/Msi 2ab4 Driver Download !EXCLUSIVE!.md deleted file mode 100644 index b4d74b4eb22f9c9849e292852e8c0e44807a4b03..0000000000000000000000000000000000000000 --- a/spaces/diacanFperku/AutoGPT/Msi 2ab4 Driver Download !EXCLUSIVE!.md +++ /dev/null @@ -1,6 +0,0 @@ -

    Msi 2ab4 Driver Download


    Download >>> https://gohhs.com/2uFTZz



    - -This site maintains the list of MSI Drivers available for Download. Just browse our organized database and find a driver that fits your needs. If you has any Drivers ... 4d29de3e1b
    -
    -
    -

    diff --git a/spaces/dineshreddy/WALT/mmdet/core/visualization/__init__.py b/spaces/dineshreddy/WALT/mmdet/core/visualization/__init__.py deleted file mode 100644 index 4ff995c0861490941f8cfc19ebbd41a2ee7e2d65..0000000000000000000000000000000000000000 --- a/spaces/dineshreddy/WALT/mmdet/core/visualization/__init__.py +++ /dev/null @@ -1,4 +0,0 @@ -from .image import (color_val_matplotlib, imshow_det_bboxes, - imshow_gt_det_bboxes) - -__all__ = ['imshow_det_bboxes', 'imshow_gt_det_bboxes', 'color_val_matplotlib'] diff --git a/spaces/dinhminh20521597/OCR_DEMO/configs/textdet/fcenet/fcenet_r50_fpn_1500e_icdar2015.py b/spaces/dinhminh20521597/OCR_DEMO/configs/textdet/fcenet/fcenet_r50_fpn_1500e_icdar2015.py deleted file mode 100644 index d4a9c642307466c86f667d64bbeb4057db571b66..0000000000000000000000000000000000000000 --- a/spaces/dinhminh20521597/OCR_DEMO/configs/textdet/fcenet/fcenet_r50_fpn_1500e_icdar2015.py +++ /dev/null @@ -1,33 +0,0 @@ -_base_ = [ - '../../_base_/default_runtime.py', - '../../_base_/schedules/schedule_sgd_1500e.py', - '../../_base_/det_models/fcenet_r50_fpn.py', - '../../_base_/det_datasets/icdar2015.py', - '../../_base_/det_pipelines/fcenet_pipeline.py' -] - -train_list = {{_base_.train_list}} -test_list = {{_base_.test_list}} - -train_pipeline_icdar2015 = {{_base_.train_pipeline_icdar2015}} -test_pipeline_icdar2015 = {{_base_.test_pipeline_icdar2015}} - -data = dict( - samples_per_gpu=8, - workers_per_gpu=2, - val_dataloader=dict(samples_per_gpu=1), - test_dataloader=dict(samples_per_gpu=1), - train=dict( - type='UniformConcatDataset', - datasets=train_list, - pipeline=train_pipeline_icdar2015), - val=dict( - type='UniformConcatDataset', - datasets=test_list, - pipeline=test_pipeline_icdar2015), - test=dict( - type='UniformConcatDataset', - datasets=test_list, - pipeline=test_pipeline_icdar2015)) - -evaluation = dict(interval=10, metric='hmean-iou') diff --git a/spaces/dirge/voicevox/README.md b/spaces/dirge/voicevox/README.md deleted file mode 100644 index e60210408513fc5dadb055ce754600da3916bc98..0000000000000000000000000000000000000000 --- a/spaces/dirge/voicevox/README.md +++ /dev/null @@ -1,580 +0,0 @@ ---- -license: lgpl-3.0 -title: voicevox -sdk: docker -emoji: 🐢 -colorFrom: blue -colorTo: pink -pinned: true -duplicated_from: 2ndelement/voicevox ---- -# VOICEVOX ENGINE - -[![build](https://github.com/VOICEVOX/voicevox_engine/actions/workflows/build.yml/badge.svg)](https://github.com/VOICEVOX/voicevox_engine/actions/workflows/build.yml) -[![releases](https://img.shields.io/github/v/release/VOICEVOX/voicevox_engine)](https://github.com/VOICEVOX/voicevox_engine/releases) -[![discord](https://img.shields.io/discord/879570910208733277?color=5865f2&label=&logo=discord&logoColor=ffffff)](https://discord.gg/WMwWetrzuh) - -[![test](https://github.com/VOICEVOX/voicevox_engine/actions/workflows/test.yml/badge.svg)](https://github.com/VOICEVOX/voicevox_engine/actions/workflows/test.yml) -[![Coverage Status](https://coveralls.io/repos/github/VOICEVOX/voicevox_engine/badge.svg)](https://coveralls.io/github/VOICEVOX/voicevox_engine) - -[![build-docker](https://github.com/VOICEVOX/voicevox_engine/actions/workflows/build-docker.yml/badge.svg)](https://github.com/VOICEVOX/voicevox_engine/actions/workflows/build-docker.yml) -[![docker](https://img.shields.io/docker/pulls/voicevox/voicevox_engine)](https://hub.docker.com/r/voicevox/voicevox_engine) - -[VOICEVOX](https://voicevox.hiroshiba.jp/) のエンジンです。 -実態は HTTP サーバーなので、リクエストを送信すればテキスト音声合成できます。 - -(エディターは [VOICEVOX](https://github.com/VOICEVOX/voicevox/) 、 -コアは [VOICEVOX CORE](https://github.com/VOICEVOX/voicevox_core/) 、 -全体構成は [こちら](https://github.com/VOICEVOX/voicevox/blob/main/docs/%E5%85%A8%E4%BD%93%E6%A7%8B%E6%88%90.md) に詳細があります。) - -## ダウンロード - -[こちら](https://github.com/VOICEVOX/voicevox_engine/releases/latest)から対応するエンジンをダウンロードしてください。 - -## API ドキュメント - -[API ドキュメント](https://voicevox.github.io/voicevox_engine/api/)をご参照ください。 - -VOICEVOX エンジンもしくはエディタを起動した状態で http://127.0.0.1:50021/docs にアクセスすると、起動中のエンジンのドキュメントも確認できます。 -今後の方針などについては [VOICEVOX 音声合成エンジンとの連携](./docs/VOICEVOX音声合成エンジンとの連携.md) も参考になるかもしれません。 - -リクエスト・レスポンスの文字コードはすべて UTF-8 です。 - -### HTTP リクエストで音声合成するサンプルコード - -```bash -echo -n "こんにちは、音声合成の世界へようこそ" >text.txt - -curl -s \ - -X POST \ - "127.0.0.1:50021/audio_query?speaker=1"\ - --get --data-urlencode text@text.txt \ - > query.json - -curl -s \ - -H "Content-Type: application/json" \ - -X POST \ - -d @query.json \ - "127.0.0.1:50021/synthesis?speaker=1" \ - > audio.wav -``` - -生成される音声はサンプリングレートが 24000Hz と少し特殊なため、音声プレーヤーによっては再生できない場合があります。 - -`speaker` に指定する値は `/speakers` エンドポイントで得られる `style_id` です。互換性のために `speaker` という名前になっています。 - -### 読み方を AquesTalk 記法で取得・修正するサンプルコード - -`/audio_query`のレスポンスにはエンジンが判断した読み方が AquesTalk ライクな記法([本家の記法](https://www.a-quest.com/archive/manual/siyo_onseikigou.pdf)とは一部異なります)で記録されています。 -記法は次のルールに従います。 - -- 全てのカナはカタカナで記述される -- アクセント句は`/`または`、`で区切る。`、`で区切った場合に限り無音区間が挿入される。 -- カナの手前に`_`を入れるとそのカナは無声化される -- アクセント位置を`'`で指定する。全てのアクセント句にはアクセント位置を 1 つ指定する必要がある。 -- アクセント句末に`?`(全角)を入れることにより疑問文の発音ができる - -```bash -# 読ませたい文章をutf-8でtext.txtに書き出す -echo -n "ディープラーニングは万能薬ではありません" >text.txt - -curl -s \ - -X POST \ - "127.0.0.1:50021/audio_query?speaker=1" \ - --get --data-urlencode text@text.txt \ - > query.json - -cat query.json | grep -o -E "\"kana\":\".*\"" -# 結果... "kana":"ディ'イプ/ラ'アニングワ/バンノオヤクデワアリマセ'ン" - -# "ディイプラ'アニングワ/バンノ'オヤクデワ/アリマセ'ン"と読ませたいので、 -# is_kana=trueをつけてイントネーションを取得しnewphrases.jsonに保存 -echo -n "ディイプラ'アニングワ/バンノ'オヤクデワ/アリマセ'ン" > kana.txt -curl -s \ - -X POST \ - "127.0.0.1:50021/accent_phrases?speaker=1&is_kana=true" \ - --get --data-urlencode text@kana.txt \ - > newphrases.json - -# query.jsonの"accent_phrases"の内容をnewphrases.jsonの内容に置き換える -cat query.json | sed -e "s/\[{.*}\]/$(cat newphrases.json)/g" > newquery.json - -curl -s \ - -H "Content-Type: application/json" \ - -X POST \ - -d @newquery.json \ - "127.0.0.1:50021/synthesis?speaker=1" \ - > audio.wav -``` - -### ユーザー辞書機能について - -APIからユーザー辞書の参照、単語の追加、編集、削除を行うことができます。 - -#### 参照 - -`/user_dict`にGETリクエストを投げることでユーザー辞書の一覧を取得することができます。 - -```bash -curl -s -X GET "127.0.0.1:50021/user_dict" -``` - -#### 単語追加 - -`/user_dict_word`にPOSTリクエストを投げる事でユーザー辞書に単語を追加することができます。 -URLパラメータとして、以下が必要です。 -- surface (辞書に登録する単語) -- pronunciation (カタカナでの読み方) -- accent_type (アクセント核位置、整数) - -アクセント核位置については、こちらの文章が参考になるかと思います。 -〇型となっている数字の部分がアクセント核位置になります。 -https://tdmelodic.readthedocs.io/ja/latest/pages/introduction.html - -成功した場合の返り値は単語に割り当てられるUUIDの文字列になります。 - -```bash -surface="test" -pronunciation="テスト" -accent_type="1" - -curl -s -X POST "127.0.0.1:50021/user_dict_word" \ - --get \ - --data-urlencode "surface=$surface" \ - --data-urlencode "pronunciation=$pronunciation" \ - --data-urlencode "accent_type=$accent_type" -``` - -#### 単語修正 - -`/user_dict_word/{word_uuid}`にPUTリクエストを投げる事でユーザー辞書の単語を修正することができます。 -URLパラメータとして、以下が必要です。 -- surface (辞書に登録するワード) -- pronunciation (カタカナでの読み方) -- accent_type (アクセント核位置、整数) - -word_uuidは単語追加時に確認できるほか、ユーザー辞書を参照することでも確認できます。 -成功した場合の返り値は`204 No Content`になります。 - -```bash -surface="test2" -pronunciation="テストツー" -accent_type="2" -# 環境によってword_uuidは適宜書き換えてください -word_uuid="cce59b5f-86ab-42b9-bb75-9fd3407f1e2d" - -curl -s -X PUT "127.0.0.1:50021/user_dict_word/$word_uuid" \ - --get \ - --data-urlencode "surface=$surface" \ - --data-urlencode "pronunciation=$pronunciation" \ - --data-urlencode "accent_type=$accent_type" -``` - -#### 単語削除 - -`/user_dict_word/{word_uuid}`にDELETEリクエストを投げる事でユーザー辞書の単語を削除することができます。 - -word_uuidは単語追加時に確認できるほか、ユーザー辞書を参照することでも確認できます。 -成功した場合の返り値は`204 No Content`になります。 - -```bash -# 環境によってword_uuidは適宜書き換えてください -word_uuid="cce59b5f-86ab-42b9-bb75-9fd3407f1e2d" - -curl -s -X DELETE "127.0.0.1:50021/user_dict_word/$word_uuid" -``` - -### プリセット機能について - -`presets.yaml`を編集することで話者や話速などのプリセットを使うことができます。 - -```bash -echo -n "プリセットをうまく活用すれば、サードパーティ間で同じ設定を使うことができます" >text.txt - -# プリセット情報を取得 -curl -s -X GET "127.0.0.1:50021/presets" > presets.json - -preset_id=$(cat presets.json | sed -r 's/^.+"id"\:\s?([0-9]+?).+$/\1/g') -style_id=$(cat presets.json | sed -r 's/^.+"style_id"\:\s?([0-9]+?).+$/\1/g') - -# AudioQueryの取得 -curl -s \ - -X POST \ - "127.0.0.1:50021/audio_query_from_preset?preset_id=$preset_id"\ - --get --data-urlencode text@text.txt \ - > query.json - -# 音声合成 -curl -s \ - -H "Content-Type: application/json" \ - -X POST \ - -d @query.json \ - "127.0.0.1:50021/synthesis?speaker=$style_id" \ - > audio.wav -``` - -- `speaker_uuid`は、`/speakers`で確認できます -- `id`は重複してはいけません -- エンジン起動後にファイルを書き換えるとエンジンに反映されます - -### 2 人の話者でモーフィングするサンプルコード - -`/synthesis_morphing`では、2 人の話者でそれぞれ合成された音声を元に、モーフィングした音声を生成します。 - -```bash -echo -n "モーフィングを利用することで、2つの声を混ぜることができます。" > text.txt - -curl -s \ - -X POST \ - "127.0.0.1:50021/audio_query?speaker=0"\ - --get --data-urlencode text@text.txt \ - > query.json - -# 元の話者での合成結果 -curl -s \ - -H "Content-Type: application/json" \ - -X POST \ - -d @query.json \ - "127.0.0.1:50021/synthesis?speaker=0" \ - > audio.wav - -export MORPH_RATE=0.5 - -# 話者2人分の音声合成+WORLDによる音声分析が入るため時間が掛かるので注意 -curl -s \ - -H "Content-Type: application/json" \ - -X POST \ - -d @query.json \ - "127.0.0.1:50021/synthesis_morphing?base_speaker=0&target_speaker=1&morph_rate=$MORPH_RATE" \ - > audio.wav - -export MORPH_RATE=0.9 - -# query、base_speaker、target_speakerが同じ場合はキャッシュが使用されるため比較的高速に生成される -curl -s \ - -H "Content-Type: application/json" \ - -X POST \ - -d @query.json \ - "127.0.0.1:50021/synthesis_morphing?base_speaker=0&target_speaker=1&morph_rate=$MORPH_RATE" \ - > audio.wav -``` - -### 話者の追加情報を取得するサンプルコード - -追加情報の中の portrait.png を取得するコードです。 -([jq](https://stedolan.github.io/jq/)を使用して json をパースしています。) - -```bash -curl -s -X GET "127.0.0.1:50021/speaker_info?speaker_uuid=7ffcb7ce-00ec-4bdc-82cd-45a8889e43ff" \ - | jq -r ".portrait" \ - | base64 -d \ - > portrait.png -``` - -### キャンセル可能な音声合成 - -`/cancellable_synthesis`では通信を切断した場合に即座に計算リソースが開放されます。 -(`/synthesis`では通信を切断しても最後まで音声合成の計算が行われます) -この API は実験的機能であり、エンジン起動時に引数で`--enable_cancellable_synthesis`を指定しないと有効化されません。 -音声合成に必要なパラメータは`/synthesis`と同様です。 - -### CORS設定 - -VOICEVOXではセキュリティ保護のため`localhost`・`127.0.0.1`・`app://`・Originなし以外のOriginからリクエストを受け入れないようになっています。 -そのため、一部のサードパーティアプリからのレスポンスを受け取れない可能性があります。 -これを回避する方法として、エンジンから設定できるUIを用意しています。 - -#### 設定方法 - -1. にアクセスします。 -2. 利用するアプリに合わせて設定を変更、追加してください。 -3. 保存ボタンを押して、変更を確定してください。 -4. 設定の適用にはエンジンの再起動が必要です。必要に応じて再起動をしてください。 - -## アップデート - -エンジンディレクトリ内にあるファイルを全て消去し、新しいものに置き換えてください。 - -## Docker イメージ - -### CPU - -```bash -docker pull voicevox/voicevox_engine:cpu-ubuntu20.04-latest -docker run --rm -p '127.0.0.1:50021:50021' voicevox/voicevox_engine:cpu-ubuntu20.04-latest -``` - -### GPU - -```bash -docker pull voicevox/voicevox_engine:nvidia-ubuntu20.04-latest -docker run --rm --gpus all -p '127.0.0.1:50021:50021' voicevox/voicevox_engine:nvidia-ubuntu20.04-latest -``` - -#### トラブルシューティング -GPU版を利用する場合、環境によってエラーが発生することがあります。その場合、`--runtime=nvidia`を`docker run`につけて実行すると解決できることがあります。 - -## 貢献者の方へ - -Issue を解決するプルリクエストを作成される際は、別の方と同じ Issue に取り組むことを避けるため、 -Issue 側で取り組み始めたことを伝えるか、最初に Draft プルリクエストを作成してください。 - -[VOICEVOX 非公式 Discord サーバー](https://discord.gg/WMwWetrzuh)にて、開発の議論や雑談を行っています。気軽にご参加ください。 - -## 環境構築 - -`Python 3.11.3` を用いて開発されています。 -インストールするには、各 OS ごとの C/C++ コンパイラ、CMake が必要になります。 - -```bash -# 開発に必要なライブラリのインストール -python -m pip install -r requirements-dev.txt -r requirements-test.txt - -# とりあえず実行したいだけなら代わりにこちら -python -m pip install -r requirements.txt -``` - -## 実行 - -コマンドライン引数の詳細は以下のコマンドで確認してください。 - -```bash -python run.py --help -``` - -```bash -# 製品版 VOICEVOX でサーバーを起動 -VOICEVOX_DIR="C:/path/to/voicevox" # 製品版 VOICEVOX ディレクトリのパス -python run.py --voicevox_dir=$VOICEVOX_DIR -``` - - - -```bash -# モックでサーバー起動 -python run.py --enable_mock -``` - -```bash -# ログをUTF8に変更 -python run.py --output_log_utf8 -# もしくは VV_OUTPUT_LOG_UTF8=1 python run.py -``` - -### CPU スレッド数を指定する - -CPU スレッド数が未指定の場合は、論理コア数の半分か物理コア数が使われます。(殆どの CPU で、これは全体の処理能力の半分です) -もし IaaS 上で実行していたり、専用サーバーで実行している場合など、 -エンジンが使う処理能力を調節したい場合は、CPU スレッド数を指定することで実現できます。 - -- 実行時引数で指定する - - ```bash - python run.py --voicevox_dir=$VOICEVOX_DIR --cpu_num_threads=4 - ``` - -- 環境変数で指定する - ```bash - export VV_CPU_NUM_THREADS=4 - python run.py --voicevox_dir=$VOICEVOX_DIR - ``` - -### 過去のバージョンのコアを使う -VOICEVOX Core 0.5.4以降のコアを使用する事が可能です。 -Macでのlibtorch版コアのサポートはしていません。 - -#### 過去のバイナリを指定する -製品版VOICEVOXもしくはコンパイル済みエンジンのディレクトリを`--voicevox_dir`引数で指定すると、そのバージョンのコアが使用されます。 -```bash -python run.py --voicevox_dir="/path/to/voicevox" -``` -Macでは、`DYLD_LIBRARY_PATH`の指定が必要です。 -```bash -DYLD_LIBRARY_PATH="/path/to/voicevox" python run.py --voicevox_dir="/path/to/voicevox" -``` - -#### 音声ライブラリを直接指定する -[VOICEVOX Coreのzipファイル](https://github.com/VOICEVOX/voicevox_core/releases)を解凍したディレクトリを`--voicelib_dir`引数で指定します。 -また、コアのバージョンに合わせて、[libtorch](https://pytorch.org/)や[onnxruntime](https://github.com/microsoft/onnxruntime)のディレクトリを`--runtime_dir`引数で指定します。 -ただし、システムの探索パス上にlibtorch、onnxruntimeがある場合、`--runtime_dir`引数の指定は不要です。 -`--voicelib_dir`引数、`--runtime_dir`引数は複数回使用可能です。 -APIエンドポイントでコアのバージョンを指定する場合は`core_version`引数を指定してください。(未指定の場合は最新のコアが使用されます) -```bash -python run.py --voicelib_dir="/path/to/voicevox_core" --runtime_dir="/path/to/libtorch_or_onnx" -``` -Macでは、`--runtime_dir`引数の代わりに`DYLD_LIBRARY_PATH`の指定が必要です。 -```bash -DYLD_LIBRARY_PATH="/path/to/onnx" python run.py --voicelib_dir="/path/to/voicevox_core" -``` - -## コードフォーマット - -このソフトウェアでは、リモートにプッシュする前にコードフォーマットを確認する仕組み(静的解析ツール)を利用できます。 -利用するには、開発に必要なライブラリのインストールに加えて、以下のコマンドを実行してください。 -プルリクエストを作成する際は、利用することを推奨します。 - -```bash -pre-commit install -t pre-push -``` - -エラーが出た際は、以下のコマンドで修正することが可能です。なお、完全に修正できるわけではないので注意してください。 - -```bash -pysen run format lint -``` - -## タイポチェック - -[typos](https://github.com/crate-ci/typos) を使ってタイポのチェックを行っています。 -[typos をインストール](https://github.com/crate-ci/typos#install) した後 - -```bash -typos -``` - -でタイポチェックを行えます。 -もし誤判定やチェックから除外すべきファイルがあれば -[設定ファイルの説明](https://github.com/crate-ci/typos#false-positives) に従って`_typos.toml`を編集してください。 - -## API ドキュメントの確認 - -[API ドキュメント](https://voicevox.github.io/voicevox_engine/api/)(実体は`docs/api/index.html`)は自動で更新されます。 -次のコマンドで API ドキュメントを手動で作成することができます。 - -```bash -python make_docs.py -``` - -## ビルド - -この方法でビルドしたものは、リリースで公開されているものとは異なります。 -また、GPUで利用するにはcuDNNやCUDA、DirectMLなどのライブラリが追加で必要となります。 - -```bash -python -m pip install -r requirements-dev.txt - -OUTPUT_LICENSE_JSON_PATH=licenses.json \ -bash build_util/create_venv_and_generate_licenses.bash - -# ビルド自体はLIBCORE_PATH及びLIBONNXRUNTIME_PATHの指定がなくても可能です -LIBCORE_PATH="/path/to/libcore" \ - LIBONNXRUNTIME_PATH="/path/to/libonnxruntime" \ - pyinstaller --noconfirm run.spec -``` - -## 依存関係 - -### 更新 - -[Poetry](https://python-poetry.org/) を用いて依存ライブラリのバージョンを固定しています。 -以下のコマンドで操作できます: - -```bash -# パッケージを追加する場合 -poetry add `パッケージ名` -poetry add --group dev `パッケージ名` # 開発依存の追加 -poetry add --group test `パッケージ名` # テスト依存の追加 - -# パッケージをアップデートする場合 -poetry update `パッケージ名` -poetry update # 全部更新 - -# requirements.txtの更新 -poetry export --without-hashes -o requirements.txt # こちらを更新する場合は下3つも更新する必要があります。 -poetry export --without-hashes --with dev -o requirements-dev.txt -poetry export --without-hashes --with test -o requirements-test.txt -poetry export --without-hashes --with license -o requirements-license.txt -``` - -### ライセンス - -依存ライブラリは「コアビルド時にリンクして一体化しても、コア部のコード非公開 OK」なライセンスを持つ必要があります。 -主要ライセンスの可否は以下の通りです。 - -- MIT/Apache/BSD-3: OK -- LGPL: OK (コアと動的分離されているため) -- GPL: NG (全関連コードの公開が必要なため) - -## ユーザー辞書の更新について - -以下のコマンドで openjtalk のユーザー辞書をコンパイルできます。 - -```bash -python -c "import pyopenjtalk; pyopenjtalk.create_user_dict('default.csv','user.dic')" -``` - -## マルチエンジン機能に関して - -VOICEVOX エディターでは、複数のエンジンを同時に起動することができます。 -この機能を利用することで、自作の音声合成エンジンや既存の音声合成エンジンを VOICEVOX エディター上で動かすことが可能です。 - - - -
    - -### マルチエンジン機能の仕組み - -VOICEVOX API に準拠した複数のエンジンの Web API をポートを分けて起動し、統一的に扱うことでマルチエンジン機能を実現しています。 -エディターがそれぞれのエンジンを実行バイナリ経由で起動し、EngineID と結びつけて設定や状態を個別管理します。 - -### マルチエンジン機能への対応方法 - -VOICEVOX API 準拠エンジンを起動する実行バイナリを作ることで対応が可能です。 -VOICEVOX ENGINE リポジトリを fork し、一部の機能を改造するのが簡単です。 - -改造すべき点はエンジン情報・キャラクター情報・音声合成の3点です。 - -エンジンの情報はエンジンマニフェスト(`engine_manifest.json`)で管理されています。 -マニフェストファイル内の情報を見て適宜変更してください。 -音声合成手法によっては、例えばモーフィング機能など、VOICEVOX と同じ機能を持つことができない場合があります。 -その場合はマニフェストファイル内の`supported_features`内の情報を適宜変更してください。 - -キャラクター情報は`speaker_info`ディレクトリ内のファイルで管理されています。 -ダミーのアイコンなどが用意されているので適宜変更してください。 - -音声合成は`voicevox_engine/synthesis_engine/synthesis_engine.py`で行われています。 -VOICEVOX API での音声合成は、エンジン側で音声合成クエリ`AudioQuery`の初期値を作成してユーザーに返し、ユーザーが必要に応じてクエリを編集したあと、エンジンがクエリに従って音声合成することで実現しています。 -クエリ作成は`/audio_query`エンドポイントで、音声合成は`/synthesis`エンドポイントで行っており、最低この2つに対応すれば VOICEVOX API に準拠したことになります。 - -### マルチエンジン機能対応エンジンの配布方法 - -VVPP ファイルとして配布するのがおすすめです。 -VVPP は「VOICEVOX プラグインパッケージ」の略で、中身はビルドしたエンジンなどを含んだディレクトリの Zip ファイルです。 -拡張子を`.vvpp`にすると、ダブルクリックで VOICEVOX エディターにインストールできます。 - -エディター側は受け取った VVPP ファイルをローカルディスク上に Zip 展開したあと、ルートの直下にある`engine_manifest.json`に従ってファイルを探査します。 -VOICEVOX エディターにうまく読み込ませられないときは、エディターのエラーログを参照してください。 - -また、`xxx.vvpp`は分割して連番を付けた`xxx.0.vvppp`ファイルとして配布することも可能です。 -これはファイル容量が大きくて配布が困難な場合に有用です。 - -
    - -## GitHub Actions - -### Variables - -| name | description | -| :----------------- | :---------------------------------------------------------------------- | -| DOCKERHUB_USERNAME | Docker Hub ユーザ名 | - -### Secrets - -| name | description | -| :----------------- | :---------------------------------------------------------------------- | -| DOCKERHUB_TOKEN | [Docker Hub アクセストークン](https://hub.docker.com/settings/security) | - -## 事例紹介 - -**[voicevox-client](https://github.com/tuna2134/voicevox-client) [@tuna2134](https://github.com/tuna2134)** ・・・ VOICEVOX ENGINE のためのPythonラッパー - -## ライセンス - -LGPL v3 と、ソースコードの公開が不要な別ライセンスのデュアルライセンスです。 -別ライセンスを取得したい場合は、ヒホ(twitter: @hiho_karuta)に求めてください。 \ No newline at end of file diff --git a/spaces/dragao-elastico/RVC_V2/lib/infer_pack/modules.py b/spaces/dragao-elastico/RVC_V2/lib/infer_pack/modules.py deleted file mode 100644 index c83289df7c79a4810dacd15c050148544ba0b6a9..0000000000000000000000000000000000000000 --- a/spaces/dragao-elastico/RVC_V2/lib/infer_pack/modules.py +++ /dev/null @@ -1,522 +0,0 @@ -import copy -import math -import numpy as np -import scipy -import torch -from torch import nn -from torch.nn import functional as F - -from torch.nn import Conv1d, ConvTranspose1d, AvgPool1d, Conv2d -from torch.nn.utils import weight_norm, remove_weight_norm - -from lib.infer_pack import commons -from lib.infer_pack.commons import init_weights, get_padding -from lib.infer_pack.transforms import piecewise_rational_quadratic_transform - - -LRELU_SLOPE = 0.1 - - -class LayerNorm(nn.Module): - def __init__(self, channels, eps=1e-5): - super().__init__() - self.channels = channels - self.eps = eps - - self.gamma = nn.Parameter(torch.ones(channels)) - self.beta = nn.Parameter(torch.zeros(channels)) - - def forward(self, x): - x = x.transpose(1, -1) - x = F.layer_norm(x, (self.channels,), self.gamma, self.beta, self.eps) - return x.transpose(1, -1) - - -class ConvReluNorm(nn.Module): - def __init__( - self, - in_channels, - hidden_channels, - out_channels, - kernel_size, - n_layers, - p_dropout, - ): - super().__init__() - self.in_channels = in_channels - self.hidden_channels = hidden_channels - self.out_channels = out_channels - self.kernel_size = kernel_size - self.n_layers = n_layers - self.p_dropout = p_dropout - assert n_layers > 1, "Number of layers should be larger than 0." - - self.conv_layers = nn.ModuleList() - self.norm_layers = nn.ModuleList() - self.conv_layers.append( - nn.Conv1d( - in_channels, hidden_channels, kernel_size, padding=kernel_size // 2 - ) - ) - self.norm_layers.append(LayerNorm(hidden_channels)) - self.relu_drop = nn.Sequential(nn.ReLU(), nn.Dropout(p_dropout)) - for _ in range(n_layers - 1): - self.conv_layers.append( - nn.Conv1d( - hidden_channels, - hidden_channels, - kernel_size, - padding=kernel_size // 2, - ) - ) - self.norm_layers.append(LayerNorm(hidden_channels)) - self.proj = nn.Conv1d(hidden_channels, out_channels, 1) - self.proj.weight.data.zero_() - self.proj.bias.data.zero_() - - def forward(self, x, x_mask): - x_org = x - for i in range(self.n_layers): - x = self.conv_layers[i](x * x_mask) - x = self.norm_layers[i](x) - x = self.relu_drop(x) - x = x_org + self.proj(x) - return x * x_mask - - -class DDSConv(nn.Module): - """ - Dialted and Depth-Separable Convolution - """ - - def __init__(self, channels, kernel_size, n_layers, p_dropout=0.0): - super().__init__() - self.channels = channels - self.kernel_size = kernel_size - self.n_layers = n_layers - self.p_dropout = p_dropout - - self.drop = nn.Dropout(p_dropout) - self.convs_sep = nn.ModuleList() - self.convs_1x1 = nn.ModuleList() - self.norms_1 = nn.ModuleList() - self.norms_2 = nn.ModuleList() - for i in range(n_layers): - dilation = kernel_size**i - padding = (kernel_size * dilation - dilation) // 2 - self.convs_sep.append( - nn.Conv1d( - channels, - channels, - kernel_size, - groups=channels, - dilation=dilation, - padding=padding, - ) - ) - self.convs_1x1.append(nn.Conv1d(channels, channels, 1)) - self.norms_1.append(LayerNorm(channels)) - self.norms_2.append(LayerNorm(channels)) - - def forward(self, x, x_mask, g=None): - if g is not None: - x = x + g - for i in range(self.n_layers): - y = self.convs_sep[i](x * x_mask) - y = self.norms_1[i](y) - y = F.gelu(y) - y = self.convs_1x1[i](y) - y = self.norms_2[i](y) - y = F.gelu(y) - y = self.drop(y) - x = x + y - return x * x_mask - - -class WN(torch.nn.Module): - def __init__( - self, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - gin_channels=0, - p_dropout=0, - ): - super(WN, self).__init__() - assert kernel_size % 2 == 1 - self.hidden_channels = hidden_channels - self.kernel_size = (kernel_size,) - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.gin_channels = gin_channels - self.p_dropout = p_dropout - - self.in_layers = torch.nn.ModuleList() - self.res_skip_layers = torch.nn.ModuleList() - self.drop = nn.Dropout(p_dropout) - - if gin_channels != 0: - cond_layer = torch.nn.Conv1d( - gin_channels, 2 * hidden_channels * n_layers, 1 - ) - self.cond_layer = torch.nn.utils.weight_norm(cond_layer, name="weight") - - for i in range(n_layers): - dilation = dilation_rate**i - padding = int((kernel_size * dilation - dilation) / 2) - in_layer = torch.nn.Conv1d( - hidden_channels, - 2 * hidden_channels, - kernel_size, - dilation=dilation, - padding=padding, - ) - in_layer = torch.nn.utils.weight_norm(in_layer, name="weight") - self.in_layers.append(in_layer) - - # last one is not necessary - if i < n_layers - 1: - res_skip_channels = 2 * hidden_channels - else: - res_skip_channels = hidden_channels - - res_skip_layer = torch.nn.Conv1d(hidden_channels, res_skip_channels, 1) - res_skip_layer = torch.nn.utils.weight_norm(res_skip_layer, name="weight") - self.res_skip_layers.append(res_skip_layer) - - def forward(self, x, x_mask, g=None, **kwargs): - output = torch.zeros_like(x) - n_channels_tensor = torch.IntTensor([self.hidden_channels]) - - if g is not None: - g = self.cond_layer(g) - - for i in range(self.n_layers): - x_in = self.in_layers[i](x) - if g is not None: - cond_offset = i * 2 * self.hidden_channels - g_l = g[:, cond_offset : cond_offset + 2 * self.hidden_channels, :] - else: - g_l = torch.zeros_like(x_in) - - acts = commons.fused_add_tanh_sigmoid_multiply(x_in, g_l, n_channels_tensor) - acts = self.drop(acts) - - res_skip_acts = self.res_skip_layers[i](acts) - if i < self.n_layers - 1: - res_acts = res_skip_acts[:, : self.hidden_channels, :] - x = (x + res_acts) * x_mask - output = output + res_skip_acts[:, self.hidden_channels :, :] - else: - output = output + res_skip_acts - return output * x_mask - - def remove_weight_norm(self): - if self.gin_channels != 0: - torch.nn.utils.remove_weight_norm(self.cond_layer) - for l in self.in_layers: - torch.nn.utils.remove_weight_norm(l) - for l in self.res_skip_layers: - torch.nn.utils.remove_weight_norm(l) - - -class ResBlock1(torch.nn.Module): - def __init__(self, channels, kernel_size=3, dilation=(1, 3, 5)): - super(ResBlock1, self).__init__() - self.convs1 = nn.ModuleList( - [ - weight_norm( - Conv1d( - channels, - channels, - kernel_size, - 1, - dilation=dilation[0], - padding=get_padding(kernel_size, dilation[0]), - ) - ), - weight_norm( - Conv1d( - channels, - channels, - kernel_size, - 1, - dilation=dilation[1], - padding=get_padding(kernel_size, dilation[1]), - ) - ), - weight_norm( - Conv1d( - channels, - channels, - kernel_size, - 1, - dilation=dilation[2], - padding=get_padding(kernel_size, dilation[2]), - ) - ), - ] - ) - self.convs1.apply(init_weights) - - self.convs2 = nn.ModuleList( - [ - weight_norm( - Conv1d( - channels, - channels, - kernel_size, - 1, - dilation=1, - padding=get_padding(kernel_size, 1), - ) - ), - weight_norm( - Conv1d( - channels, - channels, - kernel_size, - 1, - dilation=1, - padding=get_padding(kernel_size, 1), - ) - ), - weight_norm( - Conv1d( - channels, - channels, - kernel_size, - 1, - dilation=1, - padding=get_padding(kernel_size, 1), - ) - ), - ] - ) - self.convs2.apply(init_weights) - - def forward(self, x, x_mask=None): - for c1, c2 in zip(self.convs1, self.convs2): - xt = F.leaky_relu(x, LRELU_SLOPE) - if x_mask is not None: - xt = xt * x_mask - xt = c1(xt) - xt = F.leaky_relu(xt, LRELU_SLOPE) - if x_mask is not None: - xt = xt * x_mask - xt = c2(xt) - x = xt + x - if x_mask is not None: - x = x * x_mask - return x - - def remove_weight_norm(self): - for l in self.convs1: - remove_weight_norm(l) - for l in self.convs2: - remove_weight_norm(l) - - -class ResBlock2(torch.nn.Module): - def __init__(self, channels, kernel_size=3, dilation=(1, 3)): - super(ResBlock2, self).__init__() - self.convs = nn.ModuleList( - [ - weight_norm( - Conv1d( - channels, - channels, - kernel_size, - 1, - dilation=dilation[0], - padding=get_padding(kernel_size, dilation[0]), - ) - ), - weight_norm( - Conv1d( - channels, - channels, - kernel_size, - 1, - dilation=dilation[1], - padding=get_padding(kernel_size, dilation[1]), - ) - ), - ] - ) - self.convs.apply(init_weights) - - def forward(self, x, x_mask=None): - for c in self.convs: - xt = F.leaky_relu(x, LRELU_SLOPE) - if x_mask is not None: - xt = xt * x_mask - xt = c(xt) - x = xt + x - if x_mask is not None: - x = x * x_mask - return x - - def remove_weight_norm(self): - for l in self.convs: - remove_weight_norm(l) - - -class Log(nn.Module): - def forward(self, x, x_mask, reverse=False, **kwargs): - if not reverse: - y = torch.log(torch.clamp_min(x, 1e-5)) * x_mask - logdet = torch.sum(-y, [1, 2]) - return y, logdet - else: - x = torch.exp(x) * x_mask - return x - - -class Flip(nn.Module): - def forward(self, x, *args, reverse=False, **kwargs): - x = torch.flip(x, [1]) - if not reverse: - logdet = torch.zeros(x.size(0)).to(dtype=x.dtype, device=x.device) - return x, logdet - else: - return x - - -class ElementwiseAffine(nn.Module): - def __init__(self, channels): - super().__init__() - self.channels = channels - self.m = nn.Parameter(torch.zeros(channels, 1)) - self.logs = nn.Parameter(torch.zeros(channels, 1)) - - def forward(self, x, x_mask, reverse=False, **kwargs): - if not reverse: - y = self.m + torch.exp(self.logs) * x - y = y * x_mask - logdet = torch.sum(self.logs * x_mask, [1, 2]) - return y, logdet - else: - x = (x - self.m) * torch.exp(-self.logs) * x_mask - return x - - -class ResidualCouplingLayer(nn.Module): - def __init__( - self, - channels, - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - p_dropout=0, - gin_channels=0, - mean_only=False, - ): - assert channels % 2 == 0, "channels should be divisible by 2" - super().__init__() - self.channels = channels - self.hidden_channels = hidden_channels - self.kernel_size = kernel_size - self.dilation_rate = dilation_rate - self.n_layers = n_layers - self.half_channels = channels // 2 - self.mean_only = mean_only - - self.pre = nn.Conv1d(self.half_channels, hidden_channels, 1) - self.enc = WN( - hidden_channels, - kernel_size, - dilation_rate, - n_layers, - p_dropout=p_dropout, - gin_channels=gin_channels, - ) - self.post = nn.Conv1d(hidden_channels, self.half_channels * (2 - mean_only), 1) - self.post.weight.data.zero_() - self.post.bias.data.zero_() - - def forward(self, x, x_mask, g=None, reverse=False): - x0, x1 = torch.split(x, [self.half_channels] * 2, 1) - h = self.pre(x0) * x_mask - h = self.enc(h, x_mask, g=g) - stats = self.post(h) * x_mask - if not self.mean_only: - m, logs = torch.split(stats, [self.half_channels] * 2, 1) - else: - m = stats - logs = torch.zeros_like(m) - - if not reverse: - x1 = m + x1 * torch.exp(logs) * x_mask - x = torch.cat([x0, x1], 1) - logdet = torch.sum(logs, [1, 2]) - return x, logdet - else: - x1 = (x1 - m) * torch.exp(-logs) * x_mask - x = torch.cat([x0, x1], 1) - return x - - def remove_weight_norm(self): - self.enc.remove_weight_norm() - - -class ConvFlow(nn.Module): - def __init__( - self, - in_channels, - filter_channels, - kernel_size, - n_layers, - num_bins=10, - tail_bound=5.0, - ): - super().__init__() - self.in_channels = in_channels - self.filter_channels = filter_channels - self.kernel_size = kernel_size - self.n_layers = n_layers - self.num_bins = num_bins - self.tail_bound = tail_bound - self.half_channels = in_channels // 2 - - self.pre = nn.Conv1d(self.half_channels, filter_channels, 1) - self.convs = DDSConv(filter_channels, kernel_size, n_layers, p_dropout=0.0) - self.proj = nn.Conv1d( - filter_channels, self.half_channels * (num_bins * 3 - 1), 1 - ) - self.proj.weight.data.zero_() - self.proj.bias.data.zero_() - - def forward(self, x, x_mask, g=None, reverse=False): - x0, x1 = torch.split(x, [self.half_channels] * 2, 1) - h = self.pre(x0) - h = self.convs(h, x_mask, g=g) - h = self.proj(h) * x_mask - - b, c, t = x0.shape - h = h.reshape(b, c, -1, t).permute(0, 1, 3, 2) # [b, cx?, t] -> [b, c, t, ?] - - unnormalized_widths = h[..., : self.num_bins] / math.sqrt(self.filter_channels) - unnormalized_heights = h[..., self.num_bins : 2 * self.num_bins] / math.sqrt( - self.filter_channels - ) - unnormalized_derivatives = h[..., 2 * self.num_bins :] - - x1, logabsdet = piecewise_rational_quadratic_transform( - x1, - unnormalized_widths, - unnormalized_heights, - unnormalized_derivatives, - inverse=reverse, - tails="linear", - tail_bound=self.tail_bound, - ) - - x = torch.cat([x0, x1], 1) * x_mask - logdet = torch.sum(logabsdet * x_mask, [1, 2]) - if not reverse: - return x, logdet - else: - return x diff --git a/spaces/dragonSwing/video2slide/utils.py b/spaces/dragonSwing/video2slide/utils.py deleted file mode 100644 index c31881d7526841f68a6412a727f8e1ca34cbd56e..0000000000000000000000000000000000000000 --- a/spaces/dragonSwing/video2slide/utils.py +++ /dev/null @@ -1,66 +0,0 @@ -import os -import re -import cv2 -import shutil -import img2pdf -from imutils import paths - -# PIL can also be used to convert the image set into PDFs. -# However, using PIL requires opening each of the images in the set. -# Hence img2pdf package was used, which is able to convert the entire image set into a PDF -# without opening at once. - - -def sanitize_file_name(string): - sanitized_string = re.sub(r'[^\w ]+', '', string) - sanitized_string = re.sub(r'\s+', ' ', sanitized_string) - sanitized_string = sanitized_string.strip() - - return sanitized_string - - -def resize_image_frame(frame, resize_width): - ht, wd, _ = frame.shape - new_height = resize_width * ht / wd - frame = cv2.resize( - frame, (resize_width, int(new_height)), interpolation=cv2.INTER_AREA - ) - - return frame - - -def create_output_directory(video_path, output_path, type_bgsub): - vid_file_name = video_path.rsplit(os.sep)[-1].split(".")[0] - output_dir_path = os.path.join(output_path, vid_file_name, type_bgsub) - - # Remove the output directory if there is already one. - if os.path.exists(output_dir_path): - shutil.rmtree(output_dir_path) - - # Create output directory. - os.makedirs(output_dir_path, exist_ok=True) - print("Output directory created...") - print("Path:", output_dir_path) - print("***" * 10, "\n") - - return output_dir_path - - -def convert_slides_to_pdf(img_dir, output_path=None): - if not os.path.isdir(img_dir): - print("The image directory doesn't exist!") - return - - if output_path == None: - pdf_file_name = os.path.basename(img_dir) + ".pdf" - output_path = os.path.join(img_dir, pdf_file_name) - print("Output PDF Path:", output_path) - - print("Converting captured slide images to PDF...") - with open(output_path, "wb") as f: - f.write(img2pdf.convert(sorted(paths.list_images(img_dir)))) - - print("PDF Created!") - print("***" * 10, "\n") - - return output_path diff --git a/spaces/editing-images/ledtisplusplus/utils.py b/spaces/editing-images/ledtisplusplus/utils.py deleted file mode 100644 index a9a7ec323d63265f418432fad13d3f98e0ea9f14..0000000000000000000000000000000000000000 --- a/spaces/editing-images/ledtisplusplus/utils.py +++ /dev/null @@ -1,114 +0,0 @@ -import PIL -from PIL import Image, ImageDraw ,ImageFont -from matplotlib import pyplot as plt -import torchvision.transforms as T -import os -import torch -import yaml - -def show_torch_img(img): - img = to_np_image(img) - plt.imshow(img) - plt.axis("off") - -def to_np_image(all_images): - all_images = (all_images.permute(0, 2, 3, 1) * 127.5 + 128).clamp(0, 255).to(torch.uint8).cpu().numpy()[0] - return all_images - -def tensor_to_pil(tensor_imgs): - if type(tensor_imgs) == list: - tensor_imgs = torch.cat(tensor_imgs) - tensor_imgs = (tensor_imgs / 2 + 0.5).clamp(0, 1) - to_pil = T.ToPILImage() - pil_imgs = [to_pil(img) for img in tensor_imgs] - return pil_imgs - -def pil_to_tensor(pil_imgs): - to_torch = T.ToTensor() - if type(pil_imgs) == PIL.Image.Image: - tensor_imgs = to_torch(pil_imgs).unsqueeze(0)*2-1 - elif type(pil_imgs) == list: - tensor_imgs = torch.cat([to_torch(pil_imgs).unsqueeze(0)*2-1 for img in pil_imgs]).to(device) - else: - raise Exception("Input need to be PIL.Image or list of PIL.Image") - return tensor_imgs - - -## TODO implement this -# n = 10 -# num_rows = 4 -# num_col = n // num_rows -# num_col = num_col + 1 if n % num_rows else num_col -# num_col -def add_margin(pil_img, top = 0, right = 0, bottom = 0, - left = 0, color = (255,255,255)): - width, height = pil_img.size - new_width = width + right + left - new_height = height + top + bottom - result = Image.new(pil_img.mode, (new_width, new_height), color) - - result.paste(pil_img, (left, top)) - return result - -def image_grid(imgs, rows = 1, cols = None, - size = None, - titles = None, text_pos = (0, 0)): - if type(imgs) == list and type(imgs[0]) == torch.Tensor: - imgs = torch.cat(imgs) - if type(imgs) == torch.Tensor: - imgs = tensor_to_pil(imgs) - - if not size is None: - imgs = [img.resize((size,size)) for img in imgs] - if cols is None: - cols = len(imgs) - assert len(imgs) >= rows*cols - - top=20 - w, h = imgs[0].size - delta = 0 - if len(imgs)> 1 and not imgs[1].size[1] == h: - delta = top - h = imgs[1].size[1] - if not titles is None: - font = ImageFont.truetype("/usr/share/fonts/truetype/freefont/FreeMono.ttf", - size = 20, encoding="unic") - h = top + h - grid = Image.new('RGB', size=(cols*w, rows*h+delta)) - for i, img in enumerate(imgs): - - if not titles is None: - img = add_margin(img, top = top, bottom = 0,left=0) - draw = ImageDraw.Draw(img) - draw.text(text_pos, titles[i],(0,0,0), - font = font) - if not delta == 0 and i > 0: - grid.paste(img, box=(i%cols*w, i//cols*h+delta)) - else: - grid.paste(img, box=(i%cols*w, i//cols*h)) - - return grid - - -""" -input_folder - dataset folder -""" -def load_dataset(input_folder): - # full_file_names = glob.glob(input_folder) - # class_names = [x[0] for x in os.walk(input_folder)] - class_names = next(os.walk(input_folder))[1] - class_names[:] = [d for d in class_names if not d[0] == '.'] - file_names=[] - for class_name in class_names: - cur_path = os.path.join(input_folder, class_name) - filenames = next(os.walk(cur_path), (None, None, []))[2] - filenames = [f for f in filenames if not f[0] == '.'] - file_names.append(filenames) - return class_names, file_names - - -def dataset_from_yaml(yaml_location): - with open(yaml_location, 'r') as stream: - data_loaded = yaml.safe_load(stream) - - return data_loaded \ No newline at end of file diff --git a/spaces/emre/garanti-mybankconcept-img-gen/README.md b/spaces/emre/garanti-mybankconcept-img-gen/README.md deleted file mode 100644 index 084b5346611e93764fba573b0f2f96ecdd8351bd..0000000000000000000000000000000000000000 --- a/spaces/emre/garanti-mybankconcept-img-gen/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Garanti Mybankconcept Img Gen -emoji: 📉 -colorFrom: yellow -colorTo: green -sdk: gradio -sdk_version: 3.13.0 -app_file: app.py -pinned: false -license: creativeml-openrail-m ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/ennet/ChatDev/camel/prompts/base.py b/spaces/ennet/ChatDev/camel/prompts/base.py deleted file mode 100644 index 6b0aec105fb652d826da34a85b7c17a087c7dd8b..0000000000000000000000000000000000000000 --- a/spaces/ennet/ChatDev/camel/prompts/base.py +++ /dev/null @@ -1,233 +0,0 @@ -# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. =========== -# Licensed under the Apache License, Version 2.0 (the “License”); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an “AS IS” BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -# =========== Copyright 2023 @ CAMEL-AI.org. All Rights Reserved. =========== -import inspect -from typing import Any, Callable, Dict, Optional, Set, Tuple, TypeVar, Union - -from camel.typing import RoleType - -T = TypeVar('T') - - -def return_prompt_wrapper( - cls: T, - func: Callable, -) -> Callable[..., Union[T, tuple]]: - r"""Wrapper that converts the return value of a function to an input - class instance if it's a string. - - Args: - cls (type): The class to convert to. - func (Callable): The function to decorate. - - Returns: - Callable[..., Union[T, tuple]]: Decorated function that - returns the decorated class instance if the return value is a - string. - """ - - def wrapper(*args: Any, **kwargs: Any) -> Union[T, tuple]: - r"""Wrapper function that performs the conversion to :obj:`TextPrompt` - instance. - - Args: - *args (Any): Variable length argument list. - **kwargs (Any): Arbitrary keyword arguments. - - Returns: - Union[TextPrompt, tuple]: The converted return value. - """ - result = func(*args, **kwargs) - if isinstance(result, str) and not isinstance(result, cls): - return cls(result) - elif isinstance(result, tuple): - new_result = tuple( - cls(item) if isinstance(item, str) - and not isinstance(item, cls) else item for item in result) - return new_result - return result - - # # Preserve the original function's attributes - wrapper.__name__ = func.__name__ - wrapper.__doc__ = func.__doc__ - - return wrapper - - -def wrap_prompt_functions(cls: T) -> T: - r"""Decorator that wraps functions of a class inherited from :obj:`str` - with the :obj:`return_text_prompt` decorator. - - Args: - cls (type): The class to decorate. - - Returns: - type: Decorated class with wrapped functions. - """ - excluded_attrs = {'__init__', '__new__', '__str__', '__repr__'} - for attr_name in dir(cls): - attr_value = getattr(cls, attr_name) - if callable(attr_value) and attr_name not in excluded_attrs: - if inspect.isroutine(attr_value): - setattr(cls, attr_name, return_prompt_wrapper(cls, attr_value)) - return cls - - -@wrap_prompt_functions -class TextPrompt(str): - r"""A class that represents a text prompt. The :obj:`TextPrompt` class - extends the built-in :obj:`str` class to provide a property for retrieving - the set of key words in the prompt. - - Attributes: - key_words (set): A set of strings representing the key words in the - prompt. - """ - - @property - def key_words(self) -> Set[str]: - r"""Returns a set of strings representing the key words in the prompt. - """ - from camel.utils import get_prompt_template_key_words - return get_prompt_template_key_words(self) - - def format(self, *args: Any, **kwargs: Any) -> 'TextPrompt': - r"""Overrides the built-in :obj:`str.format` method to allow for - default values in the format string. This is used to allow formatting - the partial string. - - Args: - *args (Any): Variable length argument list. - **kwargs (Any): Arbitrary keyword arguments. - - Returns: - TextPrompt: A new :obj:`TextPrompt` object with the format string - replaced with the formatted string. - """ - default_kwargs = {key: '{' + f'{key}' + '}' for key in self.key_words} - default_kwargs.update(kwargs) - return TextPrompt(super().format(*args, **default_kwargs)) - - -@wrap_prompt_functions -class CodePrompt(TextPrompt): - r"""A class that represents a code prompt. It extends the :obj:`TextPrompt` - class with a :obj:`code_type` property. - - Args: - code_string (str): The code string for the prompt. - code_type (str, optional): The type of code. Defaults to None. - """ - - def __new__(cls, *args: Any, **kwargs: Any) -> 'CodePrompt': - r"""Creates a new instance of the :obj:`CodePrompt` class. - - Args: - *args (Any): Positional arguments. - **kwargs (Any): Keyword arguments. - - Returns: - CodePrompt: The created :obj:`CodePrompt` instance. - """ - code_type = kwargs.pop('code_type', None) - instance = super().__new__(cls, *args, **kwargs) - instance._code_type = code_type - return instance - - @property - def code_type(self) -> Optional[str]: - r"""Returns the type of code. - - Returns: - Optional[str]: The type of code. - """ - return self._code_type - - def set_code_type(self, code_type: str) -> None: - r"""Sets the type of code. - - Args: - code_type (str): The type of code. - """ - self._code_type = code_type - - def execute( - self, - global_vars: Optional[Dict] = None) -> Tuple[str, Optional[Dict]]: - r"""Executes the code string. If there is an error, the error is caught - and the traceback is returned. Otherwise, the output string and local - variables are returned. - - Args: - global_vars (Dict, optional): Global variables to be used during - code execution. (default: :obj:`None`) - - Returns: - Tuple[str, Optional[Dict]]: A tuple containing the output string - and local variables. - """ - # NOTE: Only supports Python code for now. - try: - # Execute the code string - import io - import sys - output_str = io.StringIO() - sys.stdout = output_str - - global_vars = global_vars or globals() - local_vars = {} - exec( - self, - global_vars, - local_vars, - ) - sys.stdout = sys.__stdout__ - output_str.seek(0) - - # If there was no error, return the output and local variables - return output_str.read(), local_vars - - except Exception: - import traceback - traceback_str = traceback.format_exc() - sys.stdout = sys.__stdout__ - # If there was an error, return the traceback - return traceback_str, None - - -# flake8: noqa :E501 -class TextPromptDict(Dict[Any, TextPrompt]): - r"""A dictionary class that maps from key to :obj:`TextPrompt` object. - """ - EMBODIMENT_PROMPT = TextPrompt( - """You are the physical embodiment of the {role} who is working on solving a task: {task}. -You can do things in the physical world including browsing the Internet, reading documents, drawing images, creating videos, executing code and so on. -Your job is to perform the physical actions necessary to interact with the physical world. -You will receive thoughts from the {role} and you will need to perform the actions described in the thoughts. -You can write a series of simple commands in Python to act. -You can perform a set of actions by calling the available Python functions. -You should perform actions based on the descriptions of the functions. - -Here is your action space: -{action_space} - -You should only perform actions in the action space. -You can perform multiple actions. -You can perform actions in any order. -First, explain the actions you will perform and your reasons, then write Python code to implement your actions. -If you decide to perform actions, you must write Python code to implement the actions. -You may print intermediate results if necessary.""") - - def __init__(self, *args: Any, **kwargs: Any) -> None: - super().__init__(*args, **kwargs) - self.update({RoleType.EMBODIMENT: self.EMBODIMENT_PROMPT}) diff --git a/spaces/eson/kplug/draft/bert_corrector_test.py b/spaces/eson/kplug/draft/bert_corrector_test.py deleted file mode 100644 index 14eb8030cc2d595f2e4a356dffcd8bc3de14ac9b..0000000000000000000000000000000000000000 --- a/spaces/eson/kplug/draft/bert_corrector_test.py +++ /dev/null @@ -1,10 +0,0 @@ -# coding=utf-8 -# author: xusong -# time: 2022/8/25 15:49 - - -from transformers import pipeline - -if __name__ == "__main__": - classifier = pipeline("fill-mask") - classifier("Paris is the of France.") \ No newline at end of file diff --git a/spaces/falterWliame/Face_Mask_Detection/Adobe Premiere Pro 2020 14.0.0.572 (x64) Multilingual.md b/spaces/falterWliame/Face_Mask_Detection/Adobe Premiere Pro 2020 14.0.0.572 (x64) Multilingual.md deleted file mode 100644 index ecd4ce89015db9ad3ee9182832af77c98a916574..0000000000000000000000000000000000000000 --- a/spaces/falterWliame/Face_Mask_Detection/Adobe Premiere Pro 2020 14.0.0.572 (x64) Multilingual.md +++ /dev/null @@ -1,6 +0,0 @@ -

    Adobe Premiere Pro 2020 14.0.0.572 (x64) Multilingual


    Download Ziphttps://urlca.com/2uDcqo



    -
    -Adobe Premiere Pro 2018 is a powerful, customizable editor for non-linear editing, with which you ... Requirements: MacOS 10.13 or later 64-bit ... 4d29de3e1b
    -
    -
    -

    diff --git a/spaces/falterWliame/Face_Mask_Detection/Erowid Methamphetamine Shake N Bake [HOT].md b/spaces/falterWliame/Face_Mask_Detection/Erowid Methamphetamine Shake N Bake [HOT].md deleted file mode 100644 index 61cab95cda2a2d9fc3dc50d8f20f546109ee0200..0000000000000000000000000000000000000000 --- a/spaces/falterWliame/Face_Mask_Detection/Erowid Methamphetamine Shake N Bake [HOT].md +++ /dev/null @@ -1,34 +0,0 @@ -
    -

    What is Erowid Methamphetamine Shake N Bake and How Dangerous Is It?

    - -

    Methamphetamine is a powerful stimulant drug that can be made from common household chemicals and pseudoephedrine, a decongestant found in some cold medicines. One of the methods used to produce methamphetamine is called "shake n bake" or "one pot", which involves mixing the ingredients in a plastic bottle and shaking it while venting the gas. This method is popular among clandestine chemists because it is fast, cheap, and easy to do with minimal equipment and skills.

    - -

    However, this method is also very dangerous, as it can result in explosions, fires, burns, toxic fumes, and contamination of the environment. According to the Drug Enforcement Administration (DEA), shake n bake meth labs accounted for 80% of all meth lab incidents in 2010. The DEA also warns that shake n bake meth is often impure and may contain harmful byproducts and residues that can cause serious health problems for users and bystanders.

    -

    erowid methamphetamine shake n bake


    Download File ->>> https://urlca.com/2uDdu7



    - -

    Erowid is a website that provides information about psychoactive substances, including methamphetamine. Erowid has a section dedicated to methamphetamine, where users can find facts, research, experiences, media coverage, and other resources related to the drug. Erowid also has a FAQ page that answers some common questions about methamphetamine, such as its effects, chemistry, synthesis, history, and legal status.

    - -

    One of the questions that Erowid addresses is about shake n bake meth. Erowid explains that shake n bake meth is made by combining pseudoephedrine with ammonium nitrate (from cold packs), sodium hydroxide (from drain cleaner), lithium (from batteries), and water in a plastic bottle. The bottle is then shaken and vented periodically to release the pressure. The reaction produces methamphetamine and ammonia gas, which are separated by adding hydrochloric acid (from muriatic acid) to the mixture. The methamphetamine is then filtered out and dried.

    - -

    Erowid cautions that shake n bake meth is not a safe or reliable way to make methamphetamine, as it can result in explosions, injuries, poisoning, and arrest. Erowid also notes that shake n bake meth may contain traces of lithium, ammonia, sodium hydroxide, hydrochloric acid, pseudoephedrine, and other chemicals that can harm the user's body and brain. Erowid advises users to avoid shake n bake meth and seek professional help if they are addicted to methamphetamine.

    - -

    Conclusion

    - -

    Erowid methamphetamine shake n bake is a term that refers to a method of making methamphetamine using pseudoephedrine and household chemicals in a plastic bottle. This method is dangerous and produces low-quality methamphetamine that can have serious health consequences for users and others. Erowid provides information about methamphetamine and other psychoactive substances on its website, where users can learn more about the risks and effects of these drugs.

    - -

    How Does Methamphetamine Affect the Brain and Body?

    - -

    Methamphetamine is a highly addictive drug that stimulates the central nervous system, increasing the levels of dopamine, norepinephrine, and serotonin in the brain. These neurotransmitters are responsible for regulating mood, motivation, reward, attention, and arousal. Methamphetamine can produce feelings of euphoria, alertness, confidence, energy, and sociability in users. However, these effects are short-lived and often followed by a crash, where users experience fatigue, depression, anxiety, irritability, and cravings for more methamphetamine.

    - -

    Methamphetamine can also have negative effects on the brain and body over time. Chronic methamphetamine use can cause neurotoxicity, which is damage to the brain cells and nerve endings. This can result in cognitive impairment, memory loss, learning difficulties, mood disorders, psychosis, paranoia, hallucinations, and aggression. Methamphetamine can also affect the cardiovascular system, increasing the heart rate, blood pressure, and body temperature. This can lead to arrhythmias, chest pain, stroke, heart attack, and death. Methamphetamine can also damage the respiratory system, causing shortness of breath, coughing, wheezing, and lung infections. Methamphetamine can also harm the liver, kidneys, skin, teeth, and immune system.

    - -

    How Is Methamphetamine Used and Abused?

    - -

    Methamphetamine can be taken in different ways depending on its form and purity. The most common forms of methamphetamine are powder (speed), crystals (ice), and pills (yaba). Powder methamphetamine can be snorted, swallowed, or injected. Crystal methamphetamine can be smoked or injected. Pills methamphetamine can be swallowed or crushed and snorted. The route of administration affects how quickly and intensely the drug reaches the brain and how long it lasts in the body.

    - -

    Methamphetamine is often used in a binge-and-crash pattern, where users take repeated doses of the drug to maintain the high and avoid the crash. This can result in tolerance, dependence, and addiction. Tolerance means that users need more of the drug to achieve the same effects. Dependence means that users need the drug to function normally and avoid withdrawal symptoms. Addiction means that users lose control over their drug use and continue to use it despite negative consequences.

    -

    - -

    Methamphetamine is often abused in combination with other drugs such as alcohol, cocaine, heroin, marijuana, ecstasy, or prescription drugs. This can increase the risks and harms of methamphetamine use and cause unpredictable interactions and effects.

    d5da3c52bf
    -
    -
    \ No newline at end of file diff --git a/spaces/falterWliame/Face_Mask_Detection/HD Online Player (Table No. 21 3 Movie Download Kickas) [NEW].md b/spaces/falterWliame/Face_Mask_Detection/HD Online Player (Table No. 21 3 Movie Download Kickas) [NEW].md deleted file mode 100644 index ae86fdeb2151dc2a5c46e900a21646ebb3d3c69c..0000000000000000000000000000000000000000 --- a/spaces/falterWliame/Face_Mask_Detection/HD Online Player (Table No. 21 3 Movie Download Kickas) [NEW].md +++ /dev/null @@ -1,6 +0,0 @@ -

    HD Online Player (Table No. 21 3 movie download kickas)


    Download Zip >>> https://urlca.com/2uDcWl



    - -Panipat Full Hindi Movie 2019 Part 2 - video dailymotion. ... Panipat hd online free. ... this movie from a historic episode point of view and not just a 3 hour ... this time she's on the trail of a 21 year old merciless villain who targets women. ... is inspired from the life of a national level Kabbadi player from India. 1fdad05405
    -
    -
    -

    diff --git a/spaces/fartsmellalmao/combined-GI-RVC-models/lib/infer_pack/modules/F0Predictor/F0Predictor.py b/spaces/fartsmellalmao/combined-GI-RVC-models/lib/infer_pack/modules/F0Predictor/F0Predictor.py deleted file mode 100644 index f56e49e7f0e6eab3babf0711cae2933371b9f9cc..0000000000000000000000000000000000000000 --- a/spaces/fartsmellalmao/combined-GI-RVC-models/lib/infer_pack/modules/F0Predictor/F0Predictor.py +++ /dev/null @@ -1,16 +0,0 @@ -class F0Predictor(object): - def compute_f0(self, wav, p_len): - """ - input: wav:[signal_length] - p_len:int - output: f0:[signal_length//hop_length] - """ - pass - - def compute_f0_uv(self, wav, p_len): - """ - input: wav:[signal_length] - p_len:int - output: f0:[signal_length//hop_length],uv:[signal_length//hop_length] - """ - pass diff --git a/spaces/fatiXbelha/sd/Dolphin Emulator 5.0 A Powerful and Stable Emulator for 32 Bit Android Devices.md b/spaces/fatiXbelha/sd/Dolphin Emulator 5.0 A Powerful and Stable Emulator for 32 Bit Android Devices.md deleted file mode 100644 index 951060eb0bf62b00a64073090821c8385a6102d8..0000000000000000000000000000000000000000 --- a/spaces/fatiXbelha/sd/Dolphin Emulator 5.0 A Powerful and Stable Emulator for 32 Bit Android Devices.md +++ /dev/null @@ -1,169 +0,0 @@ - -

    How to Play Nintendo Games on Your Android Device with Dolphin Emulator

    -

    Do you love Nintendo games but don't have a GameCube or Wii console? Do you want to enjoy your favorite titles in high definition and with enhanced features on your Android device? If so, you might want to try dolphin emulator, a powerful and versatile program that lets you play Nintendo games on your smartphone or tablet.

    -

    What is dolphin emulator and why use it?

    -

    Dolphin emulator is an open-source emulator for the Nintendo GameCube and Wii. It allows you to play games for these two consoles in full HD (1080p) with several enhancements: compatibility with all Android controllers, turbo speed, networked multiplayer, and even more. Dolphin emulator is not affiliated with Nintendo in any way.

    -

    dolphin emulator 32 bits android apk


    DOWNLOAD >> https://urllie.com/2uNFFU



    -

    Dolphin emulator is a great way to enjoy Nintendo games on your Android device for several reasons:

    -
      -
    • You can play hundreds of games from the GameCube and Wii library, including classics like Super Mario Sunshine, The Legend of Zelda: Twilight Princess, Metroid Prime, Super Smash Bros. Melee, and more.
    • -
    • You can customize the graphics and audio settings to suit your preferences and device capabilities.
    • -
    • You can use cheat codes, save states, screenshots, and other features to enhance your gaming experience.
    • -
    • You can connect with other players online or locally using Netplay or Bluetooth.
    • -
    • You can use various types of controllers, including touch screen, keyboard, gamepad, or even a real Wii Remote.
    • -
    -

    What are the main features and benefits of dolphin emulator?

    -

    Dolphin emulator has many features and benefits that make it one of the best emulators for Nintendo games. Here are some of them:

    -
      -
    • Video Backend: Dolphin emulator supports three video backends: OpenGL ES, Vulkan, and Software Renderer. OpenGL ES is the default backend that works on most devices. Vulkan is a newer backend that offers better performance and compatibility on some devices. Software Renderer is a fallback backend that renders everything on the CPU, which is very slow but can be useful for debugging purposes.
    • -
    • Shader Compilation: Dolphin emulator uses shaders to render graphics, which are programs that run on the GPU. Shaders need to be compiled before they can be used, which can cause stuttering or freezing during gameplay. Dolphin emulator offers several options to deal with shader compilation: Synchronous (Ubershaders), Synchronous (Skip Drawing), Asynchronous (Ubershaders), Asynchronous (Skip Rendering), or Hybrid Mode. Each option has its own advantages and disadvantages depending on your device and game.
    • -
    • Enhancements: Dolphin emulator allows you to enhance the graphics of your games by changing the internal resolution, anti-aliasing, anisotropic filtering, post-processing effects, texture scaling, stereoscopic 3D mode, widescreen hack, and more. You can also enable or disable enhancements per game via their GameINI file.
    • -
    • Hacks: Dolphin emulator has some hacks that can improve performance or compatibility in some games. These include Skip EFB Access from CPU, Ignore Format Changes, Store EFB Copies to Texture Only, Defer EFB Copies to RAM, Texture Cache Accuracy, External Frame Buffer (XFB), Fast Depth Calculation, Disable Bounding Box, Force Texture Filtering, Disable Fog, Disable Copy Filter, Arbitrary Mipmap Detection, Force 24-bit Color
    • What are the system requirements and compatibility issues of dolphin emulator?

      -

      Dolphin emulator is a demanding program that requires a powerful device to run smoothly. The minimum system requirements for dolphin emulator are as follows:

      -
        -
      • Android 5.0 (Lollipop) or higher
      • -
      • A 64-bit processor and operating system
      • -
      • At least 2 GB of RAM
      • -
      • A GPU that supports OpenGL ES 3.0 or Vulkan
      • -
      • At least 2 GB of free storage space
      • -
      -

      However, these are only the minimum requirements and do not guarantee a good performance. Some games may require higher specifications or specific settings to run properly. Dolphin emulator is constantly being updated and improved, so compatibility and performance may vary depending on the version you are using and the game you are playing.

      -

      To check the compatibility of a game with dolphin emulator, you can visit the official compatibility list or the user ratings on the dolphin emulator website. You can also search for online guides or videos that show how to configure dolphin emulator for a specific game.

      -

      How to download, install, and configure dolphin emulator on your device?

      -

      To download and install dolphin emulator on your device, you need to follow these steps:

      -
        -
      1. Go to the official website of dolphin emulator and download the latest APK file for Android.
      2. -
      3. Enable the installation of apps from unknown sources on your device. You can do this by going to Settings > Security > Unknown Sources and toggling it on.
      4. -
      5. Locate the downloaded APK file on your device and tap on it to install it.
      6. -
      7. Launch dolphin emulator and grant it the necessary permissions to access your storage, microphone, and location.
      8. -
      9. Agree to the terms of service and privacy policy of dolphin emulator.
      10. -
      -

      To configure dolphin emulator on your device, you need to follow these steps:

      -

      dolphin emulator 32 bit apk download
      -dolphin emulator 32 bit apk latest version
      -dolphin emulator 32 bit apk for android 10
      -dolphin emulator 32 bit apk for android 11
      -dolphin emulator 32 bit apk for android 9
      -dolphin emulator 32 bit apk for android 8
      -dolphin emulator 32 bit apk for android 7
      -dolphin emulator 32 bit apk for android 6
      -dolphin emulator 32 bit apk for android 5
      -dolphin emulator 32 bit apk for android 4
      -dolphin emulator 32 bit apk free download
      -dolphin emulator 32 bit apk mod
      -dolphin emulator 32 bit apk no verification
      -dolphin emulator 32 bit apk offline
      -dolphin emulator 32 bit apk online
      -dolphin emulator 32 bit apk update
      -dolphin emulator 32 bit apk xapk
      -dolphin emulator 32 bit apk zip
      -dolphin emulator 32 bit apk combo[^1^]
      -dolphin emulator 32 bit apk youtube[^2^]
      -dolphin gamecube and wii emulator for android
      -dolphin gamecube and wii emulator for android download
      -dolphin gamecube and wii emulator for android latest version
      -dolphin gamecube and wii emulator for android free download
      -dolphin gamecube and wii emulator for android mod
      -dolphin gamecube and wii emulator for android no verification
      -dolphin gamecube and wii emulator for android offline
      -dolphin gamecube and wii emulator for android online
      -dolphin gamecube and wii emulator for android update
      -dolphin gamecube and wii emulator for android xapk
      -dolphin gamecube and wii emulator for android zip
      -how to install dolphin emulator on android phone
      -how to install dolphin emulator on android tablet
      -how to install dolphin emulator on android tv box
      -how to install dolphin emulator on android without pc
      -how to play gamecube games on android with dolphin
      -how to play wii games on android with dolphin
      -how to set up dolphin emulator on android device
      -how to configure dolphin emulator on android phone
      -how to optimize dolphin emulator on android tablet
      -best settings for dolphin emulator on android device
      -best games for dolphin emulator on android phone
      -best controller for dolphin emulator on android tablet
      -best roms for dolphin emulator on android device
      -best cheats for dolphin emulator on android phone
      -is there a working dolphin emulator for android device?
      -is there a stable dolphin emulator for android phone?
      -is there a fast dolphin emulator for android tablet?
      -is there a safe dolphin emulator for android device?

      -
        -
      1. Tap on the menu icon (three horizontal bars) on the top left corner of the screen and select Settings.
      2. -
      3. Adjust the settings according to your preferences and device capabilities. You can change the video backend, shader compilation mode, enhancements, hacks, controller options, audio options, network options, and more.
      4. -
      5. Save your settings and exit the menu.
      6. -
      7. Tap on the plus icon (+) on the bottom right corner of the screen and browse your device for the game files you want to add to dolphin emulator. You can use ISO, GCM, WBFS, DOL, ELF, or WAD files.
      8. -
      9. Select the game files you want to add and tap on OK. The games will appear on the main screen of dolphin emulator.
      10. -
      11. Tap on a game to launch it and enjoy playing it with dolphin emulator.
      12. -

      How to fix common problems and improve your gaming experience with dolphin emulator?

      -

      Dolphin emulator is a complex program that may encounter some problems or issues while running on your device. Here are some of the common problems and their possible solutions:

      -
        -
      • Game crashes or freezes: This may be caused by a corrupted game file, an incompatible setting, a low battery, or a lack of RAM. To fix this, you can try the following:
          -
        • Check the integrity of your game file and make sure it is not damaged or incomplete.
        • -
        • Change the video backend, shader compilation mode, or other settings that may affect the game's performance.
        • -
        • Charge your device or plug it into a power source.
        • -
        • Close other apps or processes that may be consuming your RAM or CPU.
        • -
        -
      • -
      • Game runs slowly or lags: This may be caused by a low-end device, an inappropriate resolution, a high enhancement level, or a busy network. To fix this, you can try the following:
          -
        • Lower the internal resolution, anti-aliasing, anisotropic filtering, or other enhancements that may strain your GPU.
        • -
        • Enable some hacks that may improve performance, such as Skip EFB Access from CPU, Ignore Format Changes, Store EFB Copies to Texture Only, Fast Depth Calculation, or Disable Fog.
        • -
        • Use a wired connection or a stable Wi-Fi network if you are playing online or using Netplay.
        • -
        -
      • -
      • Game has graphical glitches or artifacts: This may be caused by a faulty GPU, an incorrect enhancement, a wrong hack, or a missing shader. To fix this, you can try the following:
          -
        • Update your GPU drivers or firmware to the latest version.
        • -
        • Disable any enhancements that may cause graphical errors, such as texture scaling, stereoscopic 3D mode, widescreen hack, or post-processing effects.
        • -
        • Disable any hacks that may cause graphical errors, such as Skip EFB Access from CPU, Ignore Format Changes, Store EFB Copies to Texture Only, Defer EFB Copies to RAM, Texture Cache Accuracy, External Frame Buffer (XFB), Disable Bounding Box, Force Texture Filtering, Disable Copy Filter, Arbitrary Mipmap Detection, Force 24-bit Color
        • -
        • Use a different video backend or shader compilation mode that may fix the graphical glitches.
        • -
        -
      • -
      • Game has audio issues or no sound: This may be caused by a muted device, a low volume level, an incompatible audio backend, or a corrupted game file. To fix this, you can try the following:
          -
        • Unmute your device and increase the volume level.
        • -
        • Change the audio backend to either OpenSL ES (default) or Cubeb. You can also change the audio stretching option to either Off (default), Low Latency (faster but lower quality), or High Latency (slower but higher quality).
        • -
        • Check the integrity of your game file and make sure it is not damaged or incomplete.
        • -
        -
      • -
      -

      To improve your gaming experience with dolphin emulator, you can also try the following tips:

      -
        -
      • Use a controller: Playing with a touch screen can be difficult and uncomfortable for some games. You can use a controller to have a better control and feel of the game. Dolphin emulator supports various types of controllers, including keyboard, gamepad, and even a real Wii Remote. You can connect your controller via USB, Bluetooth, or Wi-Fi and configure it in the Settings menu.
      • -
      • Use cheat codes: If you want to have some fun or challenge yourself with your games, you can use cheat codes to modify various aspects of the game. Dolphin emulator supports Action Replay codes and Gecko codes for GameCube and Wii games. You can find cheat codes online or create your own using a hex editor. You can enable or disable cheat codes per game via their GameINI file.
      • -
      • Use save states: If you want to save your progress at any point in the game without using the in-game save feature, you can use save states. Dolphin emulator allows you to create up to four save states per game and load them whenever you want. You can also export and import save states from other devices or users. You can access the save state feature by tapping on the menu icon (three horizontal bars) on the top left corner of the screen while playing a game.
      • -
      • Use screenshots: If you want to capture a memorable moment or a funny glitch in the game, you can use screenshots. Dolphin emulator allows you to take screenshots of the game and save them to your device or share them with others. You can access the screenshot feature by tapping on the menu icon (three horizontal bars) on the top left corner of the screen while playing a game.
      • -
      • Use Netplay: If you want to play with other players online or locally, you can use Netplay. Dolphin emulator supports Netplay for both GameCube and Wii games, allowing you to join or host a game session with up to four players. You can also chat with other players using the built-in chat feature. You can access the Netplay feature by tapping on the menu icon (three horizontal bars) on the top left corner of the screen and selecting Netplay.
      • -
      -

      Conclusion

      -

      Dolphin emulator is an amazing program that lets you play Nintendo GameCube and Wii games on your Android device. It has many features and benefits that make it one of the best emulators for Nintendo games. It also has some problems and issues that can be fixed or avoided with some solutions and tips. Dolphin emulator is constantly being updated and improved, so you can expect more games and features in the future.

      -

      If you are a fan of Nintendo games and want to enjoy them on your Android device, you should definitely give dolphin emulator a try. You can download it from the official website and follow the instructions in this article to install, configure, and troubleshoot it. You can also visit the dolphin emulator website for more information, guides, forums, and support.

      -

      Have fun playing Nintendo games on your Android device with dolphin emulator!

      -

      FAQs

      -

      Here are some frequently asked questions and answers about dolphin emulator:

      -
        -
      1. Is dolphin emulator legal?
      2. -

        Dolphin emulator is legal as long as you own the original game discs and do not distribute or download any copyrighted material. Dolphin emulator does not come with any games or BIOS files, so you need to dump them from your own console or purchase them legally.

        -
      3. Is dolphin emulator safe?
      4. -

        Dolphin emulator is safe as long as you download it from the official website and do not install any malicious software or malware. Dolphin emulator does not contain any viruses or spyware, but you should always scan any file you download from the internet with an antivirus program.

        -
      5. How to update dolphin emulator?
      6. -

        Dolphin emulator is updated regularly with new features, bug fixes, and compatibility improvements. You can update dolphin emulator by downloading the latest APK file from the official website and installing it over the existing version. You can also enable auto-update in the Settings menu to receive notifications when a new version is available.

        -
      7. How to uninstall dolphin emulator?
      8. -

        If you want to uninstall dolphin emulator from your device, you can do so by following these steps:

        -
          -
        • Go to Settings > Apps > Dolphin Emulator and tap on Uninstall.
        • -
        • Confirm your action and wait for the process to finish.
        • -
        • Delete any leftover files or folders related to dolphin emulator from your device.
        • -
        -
      9. How to contact dolphin emulator developers or support?
      10. -

        If you want to contact dolphin emulator developers or support, you can do so by visiting the official website and using one of these methods:

        -

        197e85843d
        -
        -
        \ No newline at end of file diff --git a/spaces/fatiXbelha/sd/Farming Simulator 23 How to Build Your Own Production Chains on Android.md b/spaces/fatiXbelha/sd/Farming Simulator 23 How to Build Your Own Production Chains on Android.md deleted file mode 100644 index ac0803d6f75a06d40d2948f59487babdb97bf75c..0000000000000000000000000000000000000000 --- a/spaces/fatiXbelha/sd/Farming Simulator 23 How to Build Your Own Production Chains on Android.md +++ /dev/null @@ -1,114 +0,0 @@ -
        -

        Farming Simulator Android: A Guide for Beginners

        -

        If you have ever dreamed of becoming a farmer, but don't have the time, money or land to do so in real life, then farming simulator android is the perfect game for you. Farming simulator android is a series of simulation games that let you experience the joys and challenges of running your own farm on your mobile device. You can grow crops, raise animals, drive realistic vehicles and tools, sell your products in a dynamic market, and much more.

        -

        Farming simulator android is not only fun and relaxing, but also educational and rewarding. You can learn about different types of farming activities, machinery and crops, as well as the economic and environmental aspects of agriculture. You can also develop your strategic thinking, planning and management skills as you expand your farm and business.

        -

        farming simulator android


        DOWNLOAD ❤❤❤ https://urllie.com/2uNHVo



        -

        In this article, we will give you a comprehensive guide on how to download and install farming simulator android on your device, how to start your farming career, how to make money and expand your farm, how to use mods and co-op mode, and how to enjoy the farming lifestyle. Whether you are a beginner or a veteran farmer, you will find something useful and interesting in this article.

        -

        How to Download and Install Farming Simulator Android

        -

        The first step to playing farming simulator android is to download and install the game on your device. There are several farming simulator games available for android devices, but the most recent ones are Farming Simulator 20 and Farming Simulator 23 Mobile. Here are the steps to download and install them:

        -
          -
        1. Go to the Google Play Store on your device and search for "farming simulator".
        2. -
        3. Select the game you want to download. Farming Simulator 20 costs $6.99 USD while Farming Simulator 23 Mobile costs $7.99 USD. Both games also have in-app purchases for extra content.
        4. -
        5. Tap on the "Buy" or "Install" button and follow the instructions to complete the payment process.
        6. -
        7. Wait for the game to download and install on your device. The file size may vary depending on the game version and your device model.
        8. -
        9. Once the game is installed, tap on the game icon to launch it.
        10. -
        -

        Congratulations! You have successfully downloaded and installed farming simulator android on your device. Now you are ready to start your farming career.

        -

        Farming Simulator 20 android download
        -Farming Simulator 23 mobile release date
        -Best farming games and simulators for android
        -Farming Simulator 20 android gameplay
        -Farming Simulator 23 mobile trailer
        -How to play farming simulator on android
        -Farming Simulator 20 android mods
        -Farming Simulator 23 mobile pre order
        -Top farming games and simulators for android 2023
        -Farming Simulator 20 android review
        -Farming Simulator 23 mobile system requirements
        -Free farming games and simulators for android
        -Farming Simulator 20 android cheats
        -Farming Simulator 23 mobile multiplayer
        -New farming games and simulators for android
        -Farming Simulator 20 android update
        -Farming Simulator 23 mobile features
        -Offline farming games and simulators for android
        -Farming Simulator 20 android apk
        -Farming Simulator 23 mobile graphics
        -Realistic farming games and simulators for android
        -Farming Simulator 20 android tips and tricks
        -Farming Simulator 23 mobile price
        -Fun farming games and simulators for android
        -Farming Simulator 20 android controller support
        -Farming Simulator 23 mobile gameplay video
        -Best farming simulator for android reddit
        -Farming Simulator 20 android free download
        -Farming Simulator 23 mobile vs pc
        -Popular farming games and simulators for android
        -Farming Simulator 20 android vehicles list
        -Farming Simulator 23 mobile comparison
        -Easy farming games and simulators for android
        -Farming Simulator 20 android cotton harvest
        -Farming Simulator 23 mobile news and updates
        -Challenging farming games and simulators for android
        -Farming Simulator 20 android horses guide
        -Farming Simulator 23 mobile beta test
        -Cute farming games and simulators for android
        -Farming Simulator 20 android bugs and issues
        -Farming Simulator 23 mobile wishlist and suggestions
        -Addictive farming games and simulators for android
        -Farming Simulator 20 android minimum requirements
        -Farming Simulator 23 mobile launch date and time
        -Relaxing farming games and simulators for android
        -Farming Simulator 20 android secrets and easter eggs
        -Farming Simulator 23 mobile official website
        -Educational farming games and simulators for android
        -Farming Simulator 20 android best crops to grow
        -Farming Simulator 23 mobile first impressions

        -

        How to Start Your Farming Career

        -

        When you launch farming simulator android for the first time, you will be greeted by a tutorial that will teach you the basics of the game. You can skip the tutorial if you want, but we recommend that you complete it to get familiar with the controls and mechanics of the game.

        -

        The tutorial will guide you through the following steps:

        -
          -
        • Choosing your farm location: You can choose between - Choosing your farm location: You can choose between three different maps, each with its own climate, terrain, and crops. The maps are North American, European, and Alpine. You can also change the map later if you want. - Choosing your vehicles and tools: You can select from a variety of vehicles and tools to help you with your farming tasks. You can choose from tractors, harvesters, trailers, plows, cultivators, seeders, sprayers, mowers, balers, and more. You can also buy new vehicles and tools later from the shop. - Choosing your crops and animals: You can decide what kind of crops and animals you want to grow and raise on your farm. You can choose from wheat, barley, oats, canola, sunflower, soybean, corn, potatoes, sugar beet, cotton, and poplar for crops. You can choose from cows, sheep, pigs, chickens, and horses for animals. You can also sell your crops and animals later in the market. - Learning how to perform farming tasks: You can learn how to plow, cultivate, sow, fertilize, harvest, transport, store, sell, and buy your products. You can also learn how to feed, water, clean, shear, milk, and ride your animals. You can also learn how to use the GPS system, the cruise control, the camera modes, the map view, and the menu options.
        -

        After completing the tutorial, you will have a basic understanding of how to play farming simulator android. You can then start your own career mode or play in free mode. In career mode, you will have a limited budget and a set of missions to complete. In free mode, you will have unlimited money and no missions. You can also adjust the difficulty level and the game settings according to your preference.

        -

        Farming Simulator 20 vs Farming Simulator 23 Mobile

        -

        If you are wondering which farming simulator game to download for your android device, you may want to compare the features of Farming Simulator 20 and Farming Simulator 23 Mobile. Both games are developed by Giants Software and published by Focus Home Interactive. Both games have similar gameplay and graphics quality. However, there are some differences between them that may affect your decision.

        -

        Here is a table that compares some of the main features of Farming Simulator 20 and Farming Simulator 23 Mobile:

        - | Feature | Farming Simulator 20 | Farming Simulator 23 Mobile | | --- | --- | --- | | Release date | December 3rd 2019 | November 22nd 2022 | | Price | $6.99 USD | $7.99 USD | | File size | 600 MB | 1 GB | | Number of maps | 1 (North American) | 3 (North American, European, Alpine) | | Number of vehicles and tools | Over 100 from 31 brands | Over 200 from 50 brands | | Number of crops | 10 | 11 | | Number of animals | 5 | 5 | | New features | John Deere brand; cotton and oat crops; horse riding; Android TV support; improved graphics; new vehicles and tools | CLAAS brand; sunflower and poplar crops; fishing; dynamic weather; improved physics; new vehicles and tools | | User rating on Google Play Store | 4.1 out of 5 stars (based on over 40k reviews) | 4.4 out of 5 stars (based on over 10k reviews) |

        As you can see from the table above,

        As you can see from the table above, Farming Simulator 23 Mobile has more content and features than Farming Simulator 20, but it also costs more and requires more storage space. Farming Simulator 23 Mobile also has better user ratings and reviews than Farming Simulator 20. However, both games are highly rated and praised by the players for their realism, variety, and fun factor. Ultimately, the choice between Farming Simulator 20 and Farming Simulator 23 Mobile depends on your personal preference, budget, and device compatibility. You can try both games and see which one suits you better. You can also check out the official websites, trailers, and forums of the games for more information and updates.

        How to Make Money and Expand Your Farm

        -

        One of the main goals of playing farming simulator android is to make money and expand your farm. Money is essential for buying new vehicles, tools, land, buildings, animals, and crops. Expanding your farm allows you to increase your production, diversify your products, and improve your efficiency.

        -

        There are several ways to make money and expand your farm in farming simulator android. Here are some of them:

        -
          -
        • Sell your products: The most obvious way to make money is to sell your crops and animal products in the market. You can check the prices of different products on the menu or on the map. You can also see the demand and supply of each product, which affects the price. You can sell your products by driving them to the selling points or by using a train or a boat. You can also store your products in silos or sheds until the price is favorable.
        • -
        • Complete missions: Another way to make money is to complete missions for other farmers or contractors. You can find missions on the menu or on the map. Missions usually involve performing a specific task with a specific vehicle or tool within a time limit. You can earn money and reputation by completing missions. Reputation affects the difficulty and reward of future missions.
        • -
        • Use forestry: Forestry is a lucrative activity that involves cutting down trees, processing them into logs or wood chips, and selling them in the market. You can buy a chainsaw or use a harvester to cut down trees. You can use a trailer or a crane to transport the logs or wood chips. You can also use a wood chipper to turn logs into wood chips. You can sell logs or wood chips at the sawmill or the biomass heating plant.
        • -
        • Use renewable energy: Renewable energy is a sustainable way to make money and reduce your expenses. You can buy solar panels, wind turbines, or biogas plants to generate electricity or gas from natural resources. You can sell the excess electricity or gas to the grid or use it for your own needs. You can also use biogas plants to turn slurry or manure into fertilizer or fuel.
        • -
        • Buy new land: Buying new land is a way to expand your farm and increase your production capacity. You can buy new land from the menu or on the map. You can see the size, price, and ownership status of each plot of land. You can also see what kind of crops are growing on each plot of land. You can buy land that is already cultivated or plowed, or you can buy land that is covered with grass or trees.
        • -
        • Buy new machinery and buildings: Buying new machinery and buildings is a way to improve your efficiency and diversify your products. You can buy new machinery and buildings from the shop or on the map. You can see the specifications, price, and maintenance cost of each vehicle, tool, or building. You can also customize some vehicles and tools with different colors, wheels, engines, or attachments.
        • -
        -

        These are some of the ways to make money and expand your farm in farming simulator android. However, you should also be careful about your expenses and debts. You should keep track of your income and expenditure on the menu or on the map. You should also pay attention to your loan amount and interest rate on the menu or on the bank screen. You should try to balance your budget and pay off your loan as soon as possible.

        -

        How to Use Mods and Co-Op Mode

        -

        If you want to enhance your gaming experience and add more variety and fun to farming simulator android, you may want to use mods and co-op mode.

        -

        Mods are modifications that change some aspects of the game, such as adding new vehicles, tools, crops, animals, maps, buildings, features, or graphics.

        -

        Co-op mode is a multiplayer mode that allows you to play with friends online or offline.

        -

        Here is how to use mods and co-op mode in farming simulator android:

        -
          -
        • Use mods: To use mods in farming simulator android,
        • You can buy new buildings, decorations, fences, signs, etc. to make your farm look more attractive and functional. You can also change the color and design of your vehicles and tools to match your farm theme. These are some of the ways to enjoy the farming lifestyle in farming simulator android. However, you can also create your own ways to have fun and express yourself in the game. You can experiment with different combinations of vehicles, tools, crops, animals, etc. You can also challenge yourself with different goals and scenarios. You can also share your farm and achievements with other players online or offline.

          Conclusion

          -

          Farming simulator android is a game that allows you to experience the joys and challenges of running your own farm on your mobile device. You can grow crops, raise animals, drive realistic vehicles and tools, sell your products in a dynamic market, and much more.

          -

          Farming simulator android is not only fun and relaxing, but also educational and rewarding. You can learn about different types of farming activities, machinery and crops, as well as the economic and environmental aspects of agriculture. You can also develop your strategic thinking, planning and management skills as you expand your farm and business.

          -

          In this article, we have given you a comprehensive guide on how to download and install farming simulator android on your device, how to start your farming career, how to make money and expand your farm, how to use mods and co-op mode, and how to enjoy the farming lifestyle. We hope that this article has helped you to get started with farming simulator android and to have a great time playing it.

          -

          FAQs

          -

          Here are some frequently asked questions and answers about farming simulator android:

          -
            -
          1. Q: What are the system requirements for farming simulator android?
          2. -
          3. A: Farming simulator android requires Android 7.0 or higher and at least 1 GB of RAM. The file size may vary depending on the game version and your device model.
          4. -
          5. Q: How can I save my game progress in farming simulator android?
          6. -
          7. A: Farming simulator android automatically saves your game progress every time you exit the game or switch to another app. You can also manually save your game progress by tapping on the menu button and then on the save button.
          8. -
          9. Q: How can I reset my game progress in farming simulator android?
          10. -
          11. A: To reset your game progress in farming simulator android, you need to delete your save file from your device. To do this, go to Android/data/com.giantssoftware.fs22/files/savegame1 (or savegame2 or savegame3 depending on which slot you used) and delete the file inside. Note that this will erase all your game data and cannot be undone.
          12. -
          13. Q: How can I get more money in farming simulator android?
          14. -
          15. A: There are several ways to get more money in farming simulator android, such as selling your products, completing missions, using forestry, using renewable energy, etc. You can also use cheats or mods to get unlimited money, but this may affect your gameplay experience and achievements.
          16. -
          17. Q: How can I contact the developers or report a bug in farming simulator android?
          18. -
          19. A: You can contact the developers or report a bug in farming simulator android by visiting their official website or their social media pages . You can also send them an email at support@giants-software.com or leave a review on the Google Play Store.
          20. -
          - : https://www.farming-simulator.com/ : https://www.farming-simulator.com/mods.php : https://www.facebook.com/giants.farming.simulator : https://twitter.com/farmingsim

          401be4b1e0
          -
          -
          \ No newline at end of file diff --git a/spaces/fb700/chatglm-fitness-RLHF/src/face3d/models/arcface_torch/configs/glint360k_r18.py b/spaces/fb700/chatglm-fitness-RLHF/src/face3d/models/arcface_torch/configs/glint360k_r18.py deleted file mode 100644 index 7a8db34cd547e8e667103c93585296e47a894e97..0000000000000000000000000000000000000000 --- a/spaces/fb700/chatglm-fitness-RLHF/src/face3d/models/arcface_torch/configs/glint360k_r18.py +++ /dev/null @@ -1,26 +0,0 @@ -from easydict import EasyDict as edict - -# make training faster -# our RAM is 256G -# mount -t tmpfs -o size=140G tmpfs /train_tmp - -config = edict() -config.loss = "cosface" -config.network = "r18" -config.resume = False -config.output = None -config.embedding_size = 512 -config.sample_rate = 1.0 -config.fp16 = True -config.momentum = 0.9 -config.weight_decay = 5e-4 -config.batch_size = 128 -config.lr = 0.1 # batch size is 512 - -config.rec = "/train_tmp/glint360k" -config.num_classes = 360232 -config.num_image = 17091657 -config.num_epoch = 20 -config.warmup_epoch = -1 -config.decay_epoch = [8, 12, 15, 18] -config.val_targets = ["lfw", "cfp_fp", "agedb_30"] diff --git a/spaces/fclong/summary/fengshen/examples/pretrain_taiyi_clip/test.py b/spaces/fclong/summary/fengshen/examples/pretrain_taiyi_clip/test.py deleted file mode 100644 index c5927a8688618678c8838162bf0c42fac6067e19..0000000000000000000000000000000000000000 --- a/spaces/fclong/summary/fengshen/examples/pretrain_taiyi_clip/test.py +++ /dev/null @@ -1,36 +0,0 @@ -from pytorch_lightning import ( - Trainer, -) -from fengshen.models.model_utils import ( - add_module_args, -) -import argparse -from fengshen.data.universal_datamodule import UniversalDataModule -from fengshen.utils.universal_checkpoint import UniversalCheckpoint -from fengshen.examples.pretrain_taiyi_clip.pretrain import ( - TaiyiCLIP, - Collator, -) -from fengshen.data.fs_datasets import load_dataset -from torch.utils.data import DataLoader - -if __name__ == '__main__': - args_parser = argparse.ArgumentParser() - args_parser = add_module_args(args_parser) - args_parser = UniversalDataModule.add_data_specific_args(args_parser) - args_parser = Trainer.add_argparse_args(args_parser) - args_parser = TaiyiCLIP.add_module_specific_args(args_parser) - args_parser = UniversalCheckpoint.add_argparse_args(args_parser) - args = args_parser.parse_args() - checkpoint_callback = UniversalCheckpoint(args) - trainer = Trainer.from_argparse_args(args, callbacks=[ - checkpoint_callback - ]) - - model = TaiyiCLIP(args) - processor = model.processor - collate_fn = Collator(processor) - datasets = load_dataset(args.datasets_name) - dataloader = DataLoader(datasets[args.test_datasets_field], - batch_size=args.test_batchsize, num_workers=2, collate_fn=collate_fn) - trainer.validate(model, dataloaders=dataloader, ckpt_path=args.load_ckpt_path) diff --git a/spaces/felixrosberg/face-swap/options/swap_options.py b/spaces/felixrosberg/face-swap/options/swap_options.py deleted file mode 100644 index 2a90c349bb7078823ddd99ed96700cb2569579cd..0000000000000000000000000000000000000000 --- a/spaces/felixrosberg/face-swap/options/swap_options.py +++ /dev/null @@ -1,43 +0,0 @@ -import argparse - - -class SwapOptions(): - def __init__(self): - self.parser = argparse.ArgumentParser() - self.initialized = False - - def initialize(self): - # paths (data, models, etc...) - self.parser.add_argument('--arcface_path', type=str, - default="arcface_model/arcface/arc_res50.h5", - help='path to arcface model. Used to extract identity from source.') - - # Video/Image necessary models - self.parser.add_argument('--retina_path', type=str, - default="retinaface/retinaface_res50.h5", - help='path to retinaface model.') - self.parser.add_argument('--compare', type=bool, - default=True, - help='If true, concatenates the frame with the manipulated frame') - - self.parser.add_argument('--load', type=int, - default=30, - help='int of number to load checkpoint weights.') - self.parser.add_argument('--device_id', type=int, default=0, - help='which device to use') - - # logging and checkpointing - self.parser.add_argument('--log_dir', type=str, default='logs/runs/', - help='logging directory') - self.parser.add_argument('--log_name', type=str, default='affa_f', - help='name of the run, change this to track several experiments') - - self.parser.add_argument('--chkp_dir', type=str, default='checkpoints/', - help='checkpoint directory (will use same name as log_name!)') - self.initialized = True - - def parse(self): - if not self.initialized: - self.initialize() - self.opt = self.parser.parse_args() - return self.opt \ No newline at end of file diff --git a/spaces/fffiloni/controlnet-animation-doodle/node_modules/@types/node/vm.d.ts b/spaces/fffiloni/controlnet-animation-doodle/node_modules/@types/node/vm.d.ts deleted file mode 100644 index c96513a50555debf6fd50aa0e414a18d1d342efb..0000000000000000000000000000000000000000 --- a/spaces/fffiloni/controlnet-animation-doodle/node_modules/@types/node/vm.d.ts +++ /dev/null @@ -1,509 +0,0 @@ -/** - * The `vm` module enables compiling and running code within V8 Virtual - * Machine contexts. - * - * **The `vm` module is not a security** - * **mechanism. Do not use it to run untrusted code.** - * - * JavaScript code can be compiled and run immediately or - * compiled, saved, and run later. - * - * A common use case is to run the code in a different V8 Context. This means - * invoked code has a different global object than the invoking code. - * - * One can provide the context by `contextifying` an - * object. The invoked code treats any property in the context like a - * global variable. Any changes to global variables caused by the invoked - * code are reflected in the context object. - * - * ```js - * const vm = require('vm'); - * - * const x = 1; - * - * const context = { x: 2 }; - * vm.createContext(context); // Contextify the object. - * - * const code = 'x += 40; var y = 17;'; - * // `x` and `y` are global variables in the context. - * // Initially, x has the value 2 because that is the value of context.x. - * vm.runInContext(code, context); - * - * console.log(context.x); // 42 - * console.log(context.y); // 17 - * - * console.log(x); // 1; y is not defined. - * ``` - * @see [source](https://github.com/nodejs/node/blob/v18.0.0/lib/vm.js) - */ -declare module 'vm' { - interface Context extends NodeJS.Dict {} - interface BaseOptions { - /** - * Specifies the filename used in stack traces produced by this script. - * Default: `''`. - */ - filename?: string | undefined; - /** - * Specifies the line number offset that is displayed in stack traces produced by this script. - * Default: `0`. - */ - lineOffset?: number | undefined; - /** - * Specifies the column number offset that is displayed in stack traces produced by this script. - * @default 0 - */ - columnOffset?: number | undefined; - } - interface ScriptOptions extends BaseOptions { - displayErrors?: boolean | undefined; - timeout?: number | undefined; - cachedData?: Buffer | undefined; - /** @deprecated in favor of `script.createCachedData()` */ - produceCachedData?: boolean | undefined; - } - interface RunningScriptOptions extends BaseOptions { - /** - * When `true`, if an `Error` occurs while compiling the `code`, the line of code causing the error is attached to the stack trace. - * Default: `true`. - */ - displayErrors?: boolean | undefined; - /** - * Specifies the number of milliseconds to execute code before terminating execution. - * If execution is terminated, an `Error` will be thrown. This value must be a strictly positive integer. - */ - timeout?: number | undefined; - /** - * If `true`, the execution will be terminated when `SIGINT` (Ctrl+C) is received. - * Existing handlers for the event that have been attached via `process.on('SIGINT')` will be disabled during script execution, but will continue to work after that. - * If execution is terminated, an `Error` will be thrown. - * Default: `false`. - */ - breakOnSigint?: boolean | undefined; - /** - * If set to `afterEvaluate`, microtasks will be run immediately after the script has run. - */ - microtaskMode?: 'afterEvaluate' | undefined; - } - interface CompileFunctionOptions extends BaseOptions { - /** - * Provides an optional data with V8's code cache data for the supplied source. - */ - cachedData?: Buffer | undefined; - /** - * Specifies whether to produce new cache data. - * Default: `false`, - */ - produceCachedData?: boolean | undefined; - /** - * The sandbox/context in which the said function should be compiled in. - */ - parsingContext?: Context | undefined; - /** - * An array containing a collection of context extensions (objects wrapping the current scope) to be applied while compiling - */ - contextExtensions?: Object[] | undefined; - } - interface CreateContextOptions { - /** - * Human-readable name of the newly created context. - * @default 'VM Context i' Where i is an ascending numerical index of the created context. - */ - name?: string | undefined; - /** - * Corresponds to the newly created context for display purposes. - * The origin should be formatted like a `URL`, but with only the scheme, host, and port (if necessary), - * like the value of the `url.origin` property of a URL object. - * Most notably, this string should omit the trailing slash, as that denotes a path. - * @default '' - */ - origin?: string | undefined; - codeGeneration?: - | { - /** - * If set to false any calls to eval or function constructors (Function, GeneratorFunction, etc) - * will throw an EvalError. - * @default true - */ - strings?: boolean | undefined; - /** - * If set to false any attempt to compile a WebAssembly module will throw a WebAssembly.CompileError. - * @default true - */ - wasm?: boolean | undefined; - } - | undefined; - /** - * If set to `afterEvaluate`, microtasks will be run immediately after the script has run. - */ - microtaskMode?: 'afterEvaluate' | undefined; - } - type MeasureMemoryMode = 'summary' | 'detailed'; - interface MeasureMemoryOptions { - /** - * @default 'summary' - */ - mode?: MeasureMemoryMode | undefined; - context?: Context | undefined; - } - interface MemoryMeasurement { - total: { - jsMemoryEstimate: number; - jsMemoryRange: [number, number]; - }; - } - /** - * Instances of the `vm.Script` class contain precompiled scripts that can be - * executed in specific contexts. - * @since v0.3.1 - */ - class Script { - constructor(code: string, options?: ScriptOptions); - /** - * Runs the compiled code contained by the `vm.Script` object within the given`contextifiedObject` and returns the result. Running code does not have access - * to local scope. - * - * The following example compiles code that increments a global variable, sets - * the value of another global variable, then execute the code multiple times. - * The globals are contained in the `context` object. - * - * ```js - * const vm = require('vm'); - * - * const context = { - * animal: 'cat', - * count: 2 - * }; - * - * const script = new vm.Script('count += 1; name = "kitty";'); - * - * vm.createContext(context); - * for (let i = 0; i < 10; ++i) { - * script.runInContext(context); - * } - * - * console.log(context); - * // Prints: { animal: 'cat', count: 12, name: 'kitty' } - * ``` - * - * Using the `timeout` or `breakOnSigint` options will result in new event loops - * and corresponding threads being started, which have a non-zero performance - * overhead. - * @since v0.3.1 - * @param contextifiedObject A `contextified` object as returned by the `vm.createContext()` method. - * @return the result of the very last statement executed in the script. - */ - runInContext(contextifiedObject: Context, options?: RunningScriptOptions): any; - /** - * First contextifies the given `contextObject`, runs the compiled code contained - * by the `vm.Script` object within the created context, and returns the result. - * Running code does not have access to local scope. - * - * The following example compiles code that sets a global variable, then executes - * the code multiple times in different contexts. The globals are set on and - * contained within each individual `context`. - * - * ```js - * const vm = require('vm'); - * - * const script = new vm.Script('globalVar = "set"'); - * - * const contexts = [{}, {}, {}]; - * contexts.forEach((context) => { - * script.runInNewContext(context); - * }); - * - * console.log(contexts); - * // Prints: [{ globalVar: 'set' }, { globalVar: 'set' }, { globalVar: 'set' }] - * ``` - * @since v0.3.1 - * @param contextObject An object that will be `contextified`. If `undefined`, a new object will be created. - * @return the result of the very last statement executed in the script. - */ - runInNewContext(contextObject?: Context, options?: RunningScriptOptions): any; - /** - * Runs the compiled code contained by the `vm.Script` within the context of the - * current `global` object. Running code does not have access to local scope, but _does_ have access to the current `global` object. - * - * The following example compiles code that increments a `global` variable then - * executes that code multiple times: - * - * ```js - * const vm = require('vm'); - * - * global.globalVar = 0; - * - * const script = new vm.Script('globalVar += 1', { filename: 'myfile.vm' }); - * - * for (let i = 0; i < 1000; ++i) { - * script.runInThisContext(); - * } - * - * console.log(globalVar); - * - * // 1000 - * ``` - * @since v0.3.1 - * @return the result of the very last statement executed in the script. - */ - runInThisContext(options?: RunningScriptOptions): any; - /** - * Creates a code cache that can be used with the `Script` constructor's`cachedData` option. Returns a `Buffer`. This method may be called at any - * time and any number of times. - * - * ```js - * const script = new vm.Script(` - * function add(a, b) { - * return a + b; - * } - * - * const x = add(1, 2); - * `); - * - * const cacheWithoutX = script.createCachedData(); - * - * script.runInThisContext(); - * - * const cacheWithX = script.createCachedData(); - * ``` - * @since v10.6.0 - */ - createCachedData(): Buffer; - /** @deprecated in favor of `script.createCachedData()` */ - cachedDataProduced?: boolean | undefined; - cachedDataRejected?: boolean | undefined; - cachedData?: Buffer | undefined; - } - /** - * If given a `contextObject`, the `vm.createContext()` method will `prepare - * that object` so that it can be used in calls to {@link runInContext} or `script.runInContext()`. Inside such scripts, - * the `contextObject` will be the global object, retaining all of its existing - * properties but also having the built-in objects and functions any standard [global object](https://es5.github.io/#x15.1) has. Outside of scripts run by the vm module, global variables - * will remain unchanged. - * - * ```js - * const vm = require('vm'); - * - * global.globalVar = 3; - * - * const context = { globalVar: 1 }; - * vm.createContext(context); - * - * vm.runInContext('globalVar *= 2;', context); - * - * console.log(context); - * // Prints: { globalVar: 2 } - * - * console.log(global.globalVar); - * // Prints: 3 - * ``` - * - * If `contextObject` is omitted (or passed explicitly as `undefined`), a new, - * empty `contextified` object will be returned. - * - * The `vm.createContext()` method is primarily useful for creating a single - * context that can be used to run multiple scripts. For instance, if emulating a - * web browser, the method can be used to create a single context representing a - * window's global object, then run all ` -""" -with open(os.path.join(gr.networking.STATIC_TEMPLATE_LIB, "frontend", "index.html"), "a") as f: - f.write(SCRIPT) - - - -repo = Repository( - local_dir="data", clone_from=DATASET_REPO_URL, use_auth_token=HF_TOKEN -) - - -def generate_html() -> str: - with open(DATA_FILE) as csvfile: - reader = csv.DictReader(csvfile) - rows = [] - for row in reader: - rows.append(row) - rows.reverse() - if len(rows) == 0: - return "no messages yet" - else: - html = "
          " - for row in rows: - html += "
          " - html += f"{row['name']}" - html += f"{row['message']}" - html += "
          " - html += "
          " - return html - - -def store_message(name: str, message: str): - if name and message: - with open(DATA_FILE, "a") as csvfile: - writer = csv.DictWriter(csvfile, fieldnames=["name", "message", "time"]) - writer.writerow( - {"name": name, "message": message, "time": str(datetime.now())} - ) - commit_url = repo.push_to_hub() - print(commit_url) - - return generate_html() - - -iface = gr.Interface( - store_message, - [ - inputs.Textbox(placeholder="Your name"), - inputs.Textbox(placeholder="Your message", lines=2), - ], - "html", - css=""" - .message {background-color:cornflowerblue;color:white; padding:4px;margin:4px;border-radius:4px; } - """, - title="Reading/writing to a HuggingFace dataset repo from Spaces", - description=f"This is a demo of how to do simple *shared data persistence* in a Gradio Space, backed by a dataset repo.", - article=f"The dataset repo is [{DATASET_REPO_URL}]({DATASET_REPO_URL}) (open in new tab)", -) - -iface.launch() diff --git a/spaces/justest/gpt4free/g4f/.v1/testing/theb_test.py b/spaces/justest/gpt4free/g4f/.v1/testing/theb_test.py deleted file mode 100644 index 5fa80908c401a98afe362c261344c0b8624f94e9..0000000000000000000000000000000000000000 --- a/spaces/justest/gpt4free/g4f/.v1/testing/theb_test.py +++ /dev/null @@ -1,5 +0,0 @@ -from gpt4free import theb - -for token in theb.Completion.create('hello world'): - print(token, end='', flush=True) - print('asdsos') diff --git a/spaces/jy46604790/Fake-News-Recognition/Part4.md b/spaces/jy46604790/Fake-News-Recognition/Part4.md deleted file mode 100644 index b8aea823261bb4cd144f7a0975affb1a877b1ecf..0000000000000000000000000000000000000000 --- a/spaces/jy46604790/Fake-News-Recognition/Part4.md +++ /dev/null @@ -1,10 +0,0 @@ -## Critical Analysis - -1. The current model can only take first 500 words in the text due to training resources. It might cause problems when a long text is entered into the model which context is highly relevant. - -2. When the input text is too short, it often judges it as fake news due to our strategy of truncating first 500 words and padding short text. - - ### Further improvement - - 1. We may add more features such as title, author information to improve the model. - 2. The current pre-trained model RoBERTa is still quite slow when trainning on big datasets. Probably we can try other models and compare the runtime and performance of the models. \ No newline at end of file diff --git a/spaces/jyseo/3DFuse/cldm/logger.py b/spaces/jyseo/3DFuse/cldm/logger.py deleted file mode 100644 index 6a8803846f2a8979f87f3cf9ea5b12869439e62f..0000000000000000000000000000000000000000 --- a/spaces/jyseo/3DFuse/cldm/logger.py +++ /dev/null @@ -1,76 +0,0 @@ -import os - -import numpy as np -import torch -import torchvision -from PIL import Image -from pytorch_lightning.callbacks import Callback -from pytorch_lightning.utilities.distributed import rank_zero_only - - -class ImageLogger(Callback): - def __init__(self, batch_frequency=2000, max_images=4, clamp=True, increase_log_steps=True, - rescale=True, disabled=False, log_on_batch_idx=False, log_first_step=False, - log_images_kwargs=None): - super().__init__() - self.rescale = rescale - self.batch_freq = batch_frequency - self.max_images = max_images - if not increase_log_steps: - self.log_steps = [self.batch_freq] - self.clamp = clamp - self.disabled = disabled - self.log_on_batch_idx = log_on_batch_idx - self.log_images_kwargs = log_images_kwargs if log_images_kwargs else {} - self.log_first_step = log_first_step - - @rank_zero_only - def log_local(self, save_dir, split, images, global_step, current_epoch, batch_idx): - root = os.path.join(save_dir, "image_log", split) - for k in images: - grid = torchvision.utils.make_grid(images[k], nrow=4) - if self.rescale: - grid = (grid + 1.0) / 2.0 # -1,1 -> 0,1; c,h,w - grid = grid.transpose(0, 1).transpose(1, 2).squeeze(-1) - grid = grid.numpy() - grid = (grid * 255).astype(np.uint8) - filename = "{}_gs-{:06}_e-{:06}_b-{:06}.png".format(k, global_step, current_epoch, batch_idx) - path = os.path.join(root, filename) - os.makedirs(os.path.split(path)[0], exist_ok=True) - Image.fromarray(grid).save(path) - - def log_img(self, pl_module, batch, batch_idx, split="train"): - check_idx = batch_idx # if self.log_on_batch_idx else pl_module.global_step - if (self.check_frequency(check_idx) and # batch_idx % self.batch_freq == 0 - hasattr(pl_module, "log_images") and - callable(pl_module.log_images) and - self.max_images > 0): - logger = type(pl_module.logger) - - is_train = pl_module.training - if is_train: - pl_module.eval() - - with torch.no_grad(): - images = pl_module.log_images(batch, split=split, **self.log_images_kwargs) - - for k in images: - N = min(images[k].shape[0], self.max_images) - images[k] = images[k][:N] - if isinstance(images[k], torch.Tensor): - images[k] = images[k].detach().cpu() - if self.clamp: - images[k] = torch.clamp(images[k], -1., 1.) - - self.log_local(pl_module.logger.save_dir, split, images, - pl_module.global_step, pl_module.current_epoch, batch_idx) - - if is_train: - pl_module.train() - - def check_frequency(self, check_idx): - return check_idx % self.batch_freq == 0 - - def on_train_batch_end(self, trainer, pl_module, outputs, batch, batch_idx, dataloader_idx): - if not self.disabled: - self.log_img(pl_module, batch, batch_idx, split="train") diff --git a/spaces/kangjian99/Panel_PDF_QA/Dockerfile b/spaces/kangjian99/Panel_PDF_QA/Dockerfile deleted file mode 100644 index 473cf11d7894728d53fadf84e63b9ad292ce1596..0000000000000000000000000000000000000000 --- a/spaces/kangjian99/Panel_PDF_QA/Dockerfile +++ /dev/null @@ -1,16 +0,0 @@ -FROM python:3.9 - -WORKDIR /code - -COPY ./requirements.txt /code/requirements.txt -RUN python3 -m pip install --no-cache-dir --upgrade pip -RUN python3 -m pip install --no-cache-dir --upgrade -r /code/requirements.txt - -COPY . . - -CMD ["panel", "serve", "/code/LangChain_QA_Panel_App.ipynb", "--address", "0.0.0.0", "--port", "7860", "--allow-websocket-origin", "kangjian99-panel-pdf-qa.hf.space", "--allow-websocket-origin", "0.0.0.0:7860"] - -RUN mkdir /.cache -RUN chmod 777 /.cache -RUN mkdir .chroma -RUN chmod 777 .chroma \ No newline at end of file diff --git a/spaces/kaushalya/medclip-roco/tools/create_embeddings.py b/spaces/kaushalya/medclip-roco/tools/create_embeddings.py deleted file mode 100644 index 6f07374aa19976706e1ed821ec0efb9cdf0a92ff..0000000000000000000000000000000000000000 --- a/spaces/kaushalya/medclip-roco/tools/create_embeddings.py +++ /dev/null @@ -1,39 +0,0 @@ -import os -import jax - -from transformers import AutoTokenizer, CLIPProcessor -from configuration_hybrid_clip import HybridCLIPConfig -from modeling_hybrid_clip import FlaxHybridCLIP -from PIL import Image - -import matplotlib.pyplot as plt -import torch -import torchvision -from torchvision.transforms.functional import InterpolationMode -from torchvision.transforms import Resize, Normalize, ConvertImageDtype, ToTensor -import numpy as np -import pandas as pd - - -def main(): - model = FlaxHybridCLIP.from_pretrained("flax-community/medclip-roco") - vision_model_name = "openai/clip-vit-base-patch32" - img_dir = "/Users/kaumad/Documents/coding/hf-flax/demo/medclip-roco/images" - - processor = CLIPProcessor.from_pretrained(vision_model_name) - - img_list = os.listdir(img_dir) - embeddings = [] - - for idx, img_path in enumerate(img_list): - if idx % 10 == 0: - print(f"{idx} images processed") - img = Image.open(os.path.join(img_dir, img_path)).convert('RGB') - inputs = processor(images=img, return_tensors="jax", padding=True) - inputs['pixel_values'] = inputs['pixel_values'].transpose(0, 2, 3, 1) - img_vec = model.get_image_features(**inputs) - img_vec = np.array(img_vec).reshape(-1).tolist() - embeddings.append(img_vec) - -if __name__=='__main__': - main() \ No newline at end of file diff --git a/spaces/kazuk/youtube-whisper-00/README.md b/spaces/kazuk/youtube-whisper-00/README.md deleted file mode 100644 index c3180680339155aaf1d27f629129b68d12cac021..0000000000000000000000000000000000000000 --- a/spaces/kazuk/youtube-whisper-00/README.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -title: Youtube Whisper -emoji: ⚡ -colorFrom: green -colorTo: red -sdk: gradio -sdk_version: 3.16.2 -app_file: app.py -pinned: false -license: unknown -duplicated_from: kazuk/youtube-whisper ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/keras-io/TabTransformer_Classification/README.md b/spaces/keras-io/TabTransformer_Classification/README.md deleted file mode 100644 index 3bbcb7ab5be10ec6b8ebda19725d209c9a27f1b7..0000000000000000000000000000000000000000 --- a/spaces/keras-io/TabTransformer_Classification/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: TabTransformer Classification -emoji: 👨‍💻 -colorFrom: yellow -colorTo: purple -sdk: gradio -sdk_version: 3.0.15 -app_file: app.py -pinned: false -license: apache-2.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/kevinwang676/Bark-with-Voice-Cloning/bark/hubert/pre_kmeans_hubert.py b/spaces/kevinwang676/Bark-with-Voice-Cloning/bark/hubert/pre_kmeans_hubert.py deleted file mode 100644 index 5208bd2792dd32e7f761ae787927a70bdcb2e5d6..0000000000000000000000000000000000000000 --- a/spaces/kevinwang676/Bark-with-Voice-Cloning/bark/hubert/pre_kmeans_hubert.py +++ /dev/null @@ -1,107 +0,0 @@ -""" -Modified HuBERT model without kmeans. -Original author: https://github.com/lucidrains/ -Modified by: https://www.github.com/gitmylo/ -License: MIT -""" - -# Modified code from https://github.com/lucidrains/audiolm-pytorch/blob/main/audiolm_pytorch/hubert_kmeans.py - -from pathlib import Path - -import torch -from torch import nn -from einops import pack, unpack - -import fairseq - -from torchaudio.functional import resample - -from audiolm_pytorch.utils import curtail_to_multiple - -import logging -logging.root.setLevel(logging.ERROR) - - -def exists(val): - return val is not None - - -def default(val, d): - return val if exists(val) else d - - -class CustomHubert(nn.Module): - """ - checkpoint and kmeans can be downloaded at https://github.com/facebookresearch/fairseq/tree/main/examples/hubert - or you can train your own - """ - - def __init__( - self, - checkpoint_path, - target_sample_hz=16000, - seq_len_multiple_of=None, - output_layer=9, - device=None - ): - super().__init__() - self.target_sample_hz = target_sample_hz - self.seq_len_multiple_of = seq_len_multiple_of - self.output_layer = output_layer - - if device is not None: - self.to(device) - - model_path = Path(checkpoint_path) - - assert model_path.exists(), f'path {checkpoint_path} does not exist' - - print(f"Loading Hubert {checkpoint_path}") - checkpoint = torch.load(checkpoint_path) - load_model_input = {checkpoint_path: checkpoint} - model, *_ = fairseq.checkpoint_utils.load_model_ensemble_and_task(load_model_input) - - if device is not None: - model[0].to(device) - - self.model = model[0] - self.model.eval() - - @property - def groups(self): - return 1 - - @torch.no_grad() - def forward( - self, - wav_input, - flatten=True, - input_sample_hz=None - ): - device = wav_input.device - - if exists(input_sample_hz): - wav_input = resample(wav_input, input_sample_hz, self.target_sample_hz) - - if exists(self.seq_len_multiple_of): - wav_input = curtail_to_multiple(wav_input, self.seq_len_multiple_of) - - embed = self.model( - wav_input, - features_only=True, - mask=False, # thanks to @maitycyrus for noticing that mask is defaulted to True in the fairseq code - output_layer=self.output_layer - ) - - embed, packed_shape = pack([embed['x']], '* d') - - # codebook_indices = self.kmeans.predict(embed.cpu().detach().numpy()) - - codebook_indices = torch.from_numpy(embed.cpu().detach().numpy()).to(device) # .long() - - if flatten: - return codebook_indices - - codebook_indices, = unpack(codebook_indices, packed_shape, '*') - return codebook_indices diff --git a/spaces/kevinwang676/ChatGLM2-SadTalker-VC/mel_processing.py b/spaces/kevinwang676/ChatGLM2-SadTalker-VC/mel_processing.py deleted file mode 100644 index 99c5b35beb83f3b288af0fac5b49ebf2c69f062c..0000000000000000000000000000000000000000 --- a/spaces/kevinwang676/ChatGLM2-SadTalker-VC/mel_processing.py +++ /dev/null @@ -1,112 +0,0 @@ -import math -import os -import random -import torch -from torch import nn -import torch.nn.functional as F -import torch.utils.data -import numpy as np -import librosa -import librosa.util as librosa_util -from librosa.util import normalize, pad_center, tiny -from scipy.signal import get_window -from scipy.io.wavfile import read -from librosa.filters import mel as librosa_mel_fn - -MAX_WAV_VALUE = 32768.0 - - -def dynamic_range_compression_torch(x, C=1, clip_val=1e-5): - """ - PARAMS - ------ - C: compression factor - """ - return torch.log(torch.clamp(x, min=clip_val) * C) - - -def dynamic_range_decompression_torch(x, C=1): - """ - PARAMS - ------ - C: compression factor used to compress - """ - return torch.exp(x) / C - - -def spectral_normalize_torch(magnitudes): - output = dynamic_range_compression_torch(magnitudes) - return output - - -def spectral_de_normalize_torch(magnitudes): - output = dynamic_range_decompression_torch(magnitudes) - return output - - -mel_basis = {} -hann_window = {} - - -def spectrogram_torch(y, n_fft, sampling_rate, hop_size, win_size, center=False): - if torch.min(y) < -1.: - print('min value is ', torch.min(y)) - if torch.max(y) > 1.: - print('max value is ', torch.max(y)) - - global hann_window - dtype_device = str(y.dtype) + '_' + str(y.device) - wnsize_dtype_device = str(win_size) + '_' + dtype_device - if wnsize_dtype_device not in hann_window: - hann_window[wnsize_dtype_device] = torch.hann_window(win_size).to(dtype=y.dtype, device=y.device) - - y = torch.nn.functional.pad(y.unsqueeze(1), (int((n_fft-hop_size)/2), int((n_fft-hop_size)/2)), mode='reflect') - y = y.squeeze(1) - - spec = torch.stft(y, n_fft, hop_length=hop_size, win_length=win_size, window=hann_window[wnsize_dtype_device], - center=center, pad_mode='reflect', normalized=False, onesided=True, return_complex=False) - - spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6) - return spec - - -def spec_to_mel_torch(spec, n_fft, num_mels, sampling_rate, fmin, fmax): - global mel_basis - dtype_device = str(spec.dtype) + '_' + str(spec.device) - fmax_dtype_device = str(fmax) + '_' + dtype_device - if fmax_dtype_device not in mel_basis: - mel = librosa_mel_fn(sr=sampling_rate, n_fft=n_fft, n_mels=num_mels, fmin=fmin, fmax=fmax) - mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(dtype=spec.dtype, device=spec.device) - spec = torch.matmul(mel_basis[fmax_dtype_device], spec) - spec = spectral_normalize_torch(spec) - return spec - - -def mel_spectrogram_torch(y, n_fft, num_mels, sampling_rate, hop_size, win_size, fmin, fmax, center=False): - if torch.min(y) < -1.: - print('min value is ', torch.min(y)) - if torch.max(y) > 1.: - print('max value is ', torch.max(y)) - - global mel_basis, hann_window - dtype_device = str(y.dtype) + '_' + str(y.device) - fmax_dtype_device = str(fmax) + '_' + dtype_device - wnsize_dtype_device = str(win_size) + '_' + dtype_device - if fmax_dtype_device not in mel_basis: - mel = librosa_mel_fn(sr=sampling_rate, n_fft=n_fft, n_mels=num_mels, fmin=fmin, fmax=fmax) - mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(dtype=y.dtype, device=y.device) - if wnsize_dtype_device not in hann_window: - hann_window[wnsize_dtype_device] = torch.hann_window(win_size).to(dtype=y.dtype, device=y.device) - - y = torch.nn.functional.pad(y.unsqueeze(1), (int((n_fft-hop_size)/2), int((n_fft-hop_size)/2)), mode='reflect') - y = y.squeeze(1) - - spec = torch.stft(y, n_fft, hop_length=hop_size, win_length=win_size, window=hann_window[wnsize_dtype_device], - center=center, pad_mode='reflect', normalized=False, onesided=True, return_complex=False) - - spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6) - - spec = torch.matmul(mel_basis[fmax_dtype_device], spec) - spec = spectral_normalize_torch(spec) - - return spec diff --git a/spaces/kevinwang676/SadTalker/src/facerender/modules/make_animation.py b/spaces/kevinwang676/SadTalker/src/facerender/modules/make_animation.py deleted file mode 100644 index 3360c53501a064f35d7db21a5361f89aa9658b42..0000000000000000000000000000000000000000 --- a/spaces/kevinwang676/SadTalker/src/facerender/modules/make_animation.py +++ /dev/null @@ -1,170 +0,0 @@ -from scipy.spatial import ConvexHull -import torch -import torch.nn.functional as F -import numpy as np -from tqdm import tqdm - -def normalize_kp(kp_source, kp_driving, kp_driving_initial, adapt_movement_scale=False, - use_relative_movement=False, use_relative_jacobian=False): - if adapt_movement_scale: - source_area = ConvexHull(kp_source['value'][0].data.cpu().numpy()).volume - driving_area = ConvexHull(kp_driving_initial['value'][0].data.cpu().numpy()).volume - adapt_movement_scale = np.sqrt(source_area) / np.sqrt(driving_area) - else: - adapt_movement_scale = 1 - - kp_new = {k: v for k, v in kp_driving.items()} - - if use_relative_movement: - kp_value_diff = (kp_driving['value'] - kp_driving_initial['value']) - kp_value_diff *= adapt_movement_scale - kp_new['value'] = kp_value_diff + kp_source['value'] - - if use_relative_jacobian: - jacobian_diff = torch.matmul(kp_driving['jacobian'], torch.inverse(kp_driving_initial['jacobian'])) - kp_new['jacobian'] = torch.matmul(jacobian_diff, kp_source['jacobian']) - - return kp_new - -def headpose_pred_to_degree(pred): - device = pred.device - idx_tensor = [idx for idx in range(66)] - idx_tensor = torch.FloatTensor(idx_tensor).type_as(pred).to(device) - pred = F.softmax(pred) - degree = torch.sum(pred*idx_tensor, 1) * 3 - 99 - return degree - -def get_rotation_matrix(yaw, pitch, roll): - yaw = yaw / 180 * 3.14 - pitch = pitch / 180 * 3.14 - roll = roll / 180 * 3.14 - - roll = roll.unsqueeze(1) - pitch = pitch.unsqueeze(1) - yaw = yaw.unsqueeze(1) - - pitch_mat = torch.cat([torch.ones_like(pitch), torch.zeros_like(pitch), torch.zeros_like(pitch), - torch.zeros_like(pitch), torch.cos(pitch), -torch.sin(pitch), - torch.zeros_like(pitch), torch.sin(pitch), torch.cos(pitch)], dim=1) - pitch_mat = pitch_mat.view(pitch_mat.shape[0], 3, 3) - - yaw_mat = torch.cat([torch.cos(yaw), torch.zeros_like(yaw), torch.sin(yaw), - torch.zeros_like(yaw), torch.ones_like(yaw), torch.zeros_like(yaw), - -torch.sin(yaw), torch.zeros_like(yaw), torch.cos(yaw)], dim=1) - yaw_mat = yaw_mat.view(yaw_mat.shape[0], 3, 3) - - roll_mat = torch.cat([torch.cos(roll), -torch.sin(roll), torch.zeros_like(roll), - torch.sin(roll), torch.cos(roll), torch.zeros_like(roll), - torch.zeros_like(roll), torch.zeros_like(roll), torch.ones_like(roll)], dim=1) - roll_mat = roll_mat.view(roll_mat.shape[0], 3, 3) - - rot_mat = torch.einsum('bij,bjk,bkm->bim', pitch_mat, yaw_mat, roll_mat) - - return rot_mat - -def keypoint_transformation(kp_canonical, he, wo_exp=False): - kp = kp_canonical['value'] # (bs, k, 3) - yaw, pitch, roll= he['yaw'], he['pitch'], he['roll'] - yaw = headpose_pred_to_degree(yaw) - pitch = headpose_pred_to_degree(pitch) - roll = headpose_pred_to_degree(roll) - - if 'yaw_in' in he: - yaw = he['yaw_in'] - if 'pitch_in' in he: - pitch = he['pitch_in'] - if 'roll_in' in he: - roll = he['roll_in'] - - rot_mat = get_rotation_matrix(yaw, pitch, roll) # (bs, 3, 3) - - t, exp = he['t'], he['exp'] - if wo_exp: - exp = exp*0 - - # keypoint rotation - kp_rotated = torch.einsum('bmp,bkp->bkm', rot_mat, kp) - - # keypoint translation - t[:, 0] = t[:, 0]*0 - t[:, 2] = t[:, 2]*0 - t = t.unsqueeze(1).repeat(1, kp.shape[1], 1) - kp_t = kp_rotated + t - - # add expression deviation - exp = exp.view(exp.shape[0], -1, 3) - kp_transformed = kp_t + exp - - return {'value': kp_transformed} - - - -def make_animation(source_image, source_semantics, target_semantics, - generator, kp_detector, he_estimator, mapping, - yaw_c_seq=None, pitch_c_seq=None, roll_c_seq=None, - use_exp=True, use_half=False): - with torch.no_grad(): - predictions = [] - - kp_canonical = kp_detector(source_image) - he_source = mapping(source_semantics) - kp_source = keypoint_transformation(kp_canonical, he_source) - - for frame_idx in tqdm(range(target_semantics.shape[1]), 'Face Renderer:'): - # still check the dimension - # print(target_semantics.shape, source_semantics.shape) - target_semantics_frame = target_semantics[:, frame_idx] - he_driving = mapping(target_semantics_frame) - if yaw_c_seq is not None: - he_driving['yaw_in'] = yaw_c_seq[:, frame_idx] - if pitch_c_seq is not None: - he_driving['pitch_in'] = pitch_c_seq[:, frame_idx] - if roll_c_seq is not None: - he_driving['roll_in'] = roll_c_seq[:, frame_idx] - - kp_driving = keypoint_transformation(kp_canonical, he_driving) - - kp_norm = kp_driving - out = generator(source_image, kp_source=kp_source, kp_driving=kp_norm) - ''' - source_image_new = out['prediction'].squeeze(1) - kp_canonical_new = kp_detector(source_image_new) - he_source_new = he_estimator(source_image_new) - kp_source_new = keypoint_transformation(kp_canonical_new, he_source_new, wo_exp=True) - kp_driving_new = keypoint_transformation(kp_canonical_new, he_driving, wo_exp=True) - out = generator(source_image_new, kp_source=kp_source_new, kp_driving=kp_driving_new) - ''' - predictions.append(out['prediction']) - predictions_ts = torch.stack(predictions, dim=1) - return predictions_ts - -class AnimateModel(torch.nn.Module): - """ - Merge all generator related updates into single model for better multi-gpu usage - """ - - def __init__(self, generator, kp_extractor, mapping): - super(AnimateModel, self).__init__() - self.kp_extractor = kp_extractor - self.generator = generator - self.mapping = mapping - - self.kp_extractor.eval() - self.generator.eval() - self.mapping.eval() - - def forward(self, x): - - source_image = x['source_image'] - source_semantics = x['source_semantics'] - target_semantics = x['target_semantics'] - yaw_c_seq = x['yaw_c_seq'] - pitch_c_seq = x['pitch_c_seq'] - roll_c_seq = x['roll_c_seq'] - - predictions_video = make_animation(source_image, source_semantics, target_semantics, - self.generator, self.kp_extractor, - self.mapping, use_exp = True, - yaw_c_seq=yaw_c_seq, pitch_c_seq=pitch_c_seq, roll_c_seq=roll_c_seq) - - return predictions_video \ No newline at end of file diff --git a/spaces/kevinwang676/VoiceChangers/src/face3d/models/losses.py b/spaces/kevinwang676/VoiceChangers/src/face3d/models/losses.py deleted file mode 100644 index 09d6a85870af1ef2b857e4a3fdd4b2f7fc991317..0000000000000000000000000000000000000000 --- a/spaces/kevinwang676/VoiceChangers/src/face3d/models/losses.py +++ /dev/null @@ -1,113 +0,0 @@ -import numpy as np -import torch -import torch.nn as nn -from kornia.geometry import warp_affine -import torch.nn.functional as F - -def resize_n_crop(image, M, dsize=112): - # image: (b, c, h, w) - # M : (b, 2, 3) - return warp_affine(image, M, dsize=(dsize, dsize), align_corners=True) - -### perceptual level loss -class PerceptualLoss(nn.Module): - def __init__(self, recog_net, input_size=112): - super(PerceptualLoss, self).__init__() - self.recog_net = recog_net - self.preprocess = lambda x: 2 * x - 1 - self.input_size=input_size - def forward(imageA, imageB, M): - """ - 1 - cosine distance - Parameters: - imageA --torch.tensor (B, 3, H, W), range (0, 1) , RGB order - imageB --same as imageA - """ - - imageA = self.preprocess(resize_n_crop(imageA, M, self.input_size)) - imageB = self.preprocess(resize_n_crop(imageB, M, self.input_size)) - - # freeze bn - self.recog_net.eval() - - id_featureA = F.normalize(self.recog_net(imageA), dim=-1, p=2) - id_featureB = F.normalize(self.recog_net(imageB), dim=-1, p=2) - cosine_d = torch.sum(id_featureA * id_featureB, dim=-1) - # assert torch.sum((cosine_d > 1).float()) == 0 - return torch.sum(1 - cosine_d) / cosine_d.shape[0] - -def perceptual_loss(id_featureA, id_featureB): - cosine_d = torch.sum(id_featureA * id_featureB, dim=-1) - # assert torch.sum((cosine_d > 1).float()) == 0 - return torch.sum(1 - cosine_d) / cosine_d.shape[0] - -### image level loss -def photo_loss(imageA, imageB, mask, eps=1e-6): - """ - l2 norm (with sqrt, to ensure backward stabililty, use eps, otherwise Nan may occur) - Parameters: - imageA --torch.tensor (B, 3, H, W), range (0, 1), RGB order - imageB --same as imageA - """ - loss = torch.sqrt(eps + torch.sum((imageA - imageB) ** 2, dim=1, keepdims=True)) * mask - loss = torch.sum(loss) / torch.max(torch.sum(mask), torch.tensor(1.0).to(mask.device)) - return loss - -def landmark_loss(predict_lm, gt_lm, weight=None): - """ - weighted mse loss - Parameters: - predict_lm --torch.tensor (B, 68, 2) - gt_lm --torch.tensor (B, 68, 2) - weight --numpy.array (1, 68) - """ - if not weight: - weight = np.ones([68]) - weight[28:31] = 20 - weight[-8:] = 20 - weight = np.expand_dims(weight, 0) - weight = torch.tensor(weight).to(predict_lm.device) - loss = torch.sum((predict_lm - gt_lm)**2, dim=-1) * weight - loss = torch.sum(loss) / (predict_lm.shape[0] * predict_lm.shape[1]) - return loss - - -### regulization -def reg_loss(coeffs_dict, opt=None): - """ - l2 norm without the sqrt, from yu's implementation (mse) - tf.nn.l2_loss https://www.tensorflow.org/api_docs/python/tf/nn/l2_loss - Parameters: - coeffs_dict -- a dict of torch.tensors , keys: id, exp, tex, angle, gamma, trans - - """ - # coefficient regularization to ensure plausible 3d faces - if opt: - w_id, w_exp, w_tex = opt.w_id, opt.w_exp, opt.w_tex - else: - w_id, w_exp, w_tex = 1, 1, 1, 1 - creg_loss = w_id * torch.sum(coeffs_dict['id'] ** 2) + \ - w_exp * torch.sum(coeffs_dict['exp'] ** 2) + \ - w_tex * torch.sum(coeffs_dict['tex'] ** 2) - creg_loss = creg_loss / coeffs_dict['id'].shape[0] - - # gamma regularization to ensure a nearly-monochromatic light - gamma = coeffs_dict['gamma'].reshape([-1, 3, 9]) - gamma_mean = torch.mean(gamma, dim=1, keepdims=True) - gamma_loss = torch.mean((gamma - gamma_mean) ** 2) - - return creg_loss, gamma_loss - -def reflectance_loss(texture, mask): - """ - minimize texture variance (mse), albedo regularization to ensure an uniform skin albedo - Parameters: - texture --torch.tensor, (B, N, 3) - mask --torch.tensor, (N), 1 or 0 - - """ - mask = mask.reshape([1, mask.shape[0], 1]) - texture_mean = torch.sum(mask * texture, dim=1, keepdims=True) / torch.sum(mask) - loss = torch.sum(((texture - texture_mean) * mask)**2) / (texture.shape[0] * torch.sum(mask)) - return loss - diff --git a/spaces/koajoel/PolyFormer/fairseq/examples/speech_synthesis/preprocessing/__init__.py b/spaces/koajoel/PolyFormer/fairseq/examples/speech_synthesis/preprocessing/__init__.py deleted file mode 100644 index 6264236915a7269a4d920ee8213004374dd86a9a..0000000000000000000000000000000000000000 --- a/spaces/koajoel/PolyFormer/fairseq/examples/speech_synthesis/preprocessing/__init__.py +++ /dev/null @@ -1,4 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. diff --git a/spaces/kurianbenoy/audioclassification/article.md b/spaces/kurianbenoy/audioclassification/article.md deleted file mode 100644 index 492077ead07ce661656739dfbd488e0cc6043ff3..0000000000000000000000000000000000000000 --- a/spaces/kurianbenoy/audioclassification/article.md +++ /dev/null @@ -1,44 +0,0 @@ -> Note: The examples provides doesn't work on Safari, in case people are trying to access on a Mac. Please try it in a different browser. - -During first lesson of Practical Deep Learning for Coders course, Jeremy had mentioned how using simple computer vision model by being a bit creative we can build a state of the art model to classify audio with same image classification model. I was curious on how I can train an music classifier, as I have never worked on audio data problems before. - - -[You can find how I trained this music genre classification using fast.ai in this blogpost.](https://kurianbenoy.com/posts/2022/2022-05-01-audiocnndemo.html). - -## Dataset - -1. [The competition data](https://www.kaggle.com/competitions/kaggle-pog-series-s01e02/data) -2. [Image data generated from converting audio to melspectograms in form of images](https://www.kaggle.com/datasets/dienhoa/music-genre-spectrogram-pogchamps) - - -## Training - -Fast.ai was used to train this classifier with a ResNet50 vision learner for 10 epochs. - -| epoch | train_loss | valid_loss | error_rate | time | -|-------|---------------|---------------|---------------|-------| -|0 | 2.312176 | 1.843815 | 0.558654 | 02:07 | -|1 | 2.102361 | 1.719162 | 0.539061 | 02:08 | -|2 | 1.867139 | 1.623988 | 0.527003 | 02:08 | -|3 | 1.710557 | 1.527913 | 0.507661 | 02:07 | -|4 | 1.629478 | 1.456836 | 0.479779 | 02:05 | -|5 | 1.519305 | 1.433036 | 0.474253 | 02:05 | -|6 | 1.457465 | 1.379757 | 0.464456 | 02:05 | -|7 | 1.396283 | 1.369344 | 0.457925 | 02:05 | -|8 | 1.359388 | 1.367973 | 0.453655 | 02:05 | -|9 | 1.364363 | 1.368887 | 0.456167 | 02:04 | - - -## Examples - -The example images provided in the demo are from the validation data from Kaggle competition data, which was not used during training. - -## Credits - -Thanks [Dien Hoa Truong](https://twitter.com/DienhoaT) for providing [inference code](https://www.kaggle.com/code/dienhoa/inference-submission-music-genre) for creating end to end pipeline from creating audio to converting to melspectograms, and then doing prediction. - -Thanks [@suvash](https://twitter.com/suvash) for helping me get started with huggingface -spaces and for his [excellent space](https://huggingface.co/spaces/suvash/food-101-resnet50) which was a reference for this work. - -Thanks [@strickvl](https://twitter.com/strickvl) for reporting issue in safari browser -and trying this space out. diff --git a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/fontTools/colorLib/errors.py b/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/fontTools/colorLib/errors.py deleted file mode 100644 index 18cbebbaf91ff7d5a515321a006be3eb1d83faaf..0000000000000000000000000000000000000000 --- a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/fontTools/colorLib/errors.py +++ /dev/null @@ -1,2 +0,0 @@ -class ColorLibError(Exception): - pass diff --git a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/fontTools/subset/cff.py b/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/fontTools/subset/cff.py deleted file mode 100644 index dd79f6db37a482891b6f151159ef4c9b89475b8e..0000000000000000000000000000000000000000 --- a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/fontTools/subset/cff.py +++ /dev/null @@ -1,536 +0,0 @@ -from fontTools.misc import psCharStrings -from fontTools import ttLib -from fontTools.pens.basePen import NullPen -from fontTools.misc.roundTools import otRound -from fontTools.misc.loggingTools import deprecateFunction -from fontTools.subset.util import _add_method, _uniq_sort - - -class _ClosureGlyphsT2Decompiler(psCharStrings.SimpleT2Decompiler): - def __init__(self, components, localSubrs, globalSubrs): - psCharStrings.SimpleT2Decompiler.__init__(self, localSubrs, globalSubrs) - self.components = components - - def op_endchar(self, index): - args = self.popall() - if len(args) >= 4: - from fontTools.encodings.StandardEncoding import StandardEncoding - - # endchar can do seac accent bulding; The T2 spec says it's deprecated, - # but recent software that shall remain nameless does output it. - adx, ady, bchar, achar = args[-4:] - baseGlyph = StandardEncoding[bchar] - accentGlyph = StandardEncoding[achar] - self.components.add(baseGlyph) - self.components.add(accentGlyph) - - -@_add_method(ttLib.getTableClass("CFF ")) -def closure_glyphs(self, s): - cff = self.cff - assert len(cff) == 1 - font = cff[cff.keys()[0]] - glyphSet = font.CharStrings - - decompose = s.glyphs - while decompose: - components = set() - for g in decompose: - if g not in glyphSet: - continue - gl = glyphSet[g] - - subrs = getattr(gl.private, "Subrs", []) - decompiler = _ClosureGlyphsT2Decompiler(components, subrs, gl.globalSubrs) - decompiler.execute(gl) - components -= s.glyphs - s.glyphs.update(components) - decompose = components - - -def _empty_charstring(font, glyphName, isCFF2, ignoreWidth=False): - c, fdSelectIndex = font.CharStrings.getItemAndSelector(glyphName) - if isCFF2 or ignoreWidth: - # CFF2 charstrings have no widths nor 'endchar' operators - c.setProgram([] if isCFF2 else ["endchar"]) - else: - if hasattr(font, "FDArray") and font.FDArray is not None: - private = font.FDArray[fdSelectIndex].Private - else: - private = font.Private - dfltWdX = private.defaultWidthX - nmnlWdX = private.nominalWidthX - pen = NullPen() - c.draw(pen) # this will set the charstring's width - if c.width != dfltWdX: - c.program = [c.width - nmnlWdX, "endchar"] - else: - c.program = ["endchar"] - - -@_add_method(ttLib.getTableClass("CFF ")) -def prune_pre_subset(self, font, options): - cff = self.cff - # CFF table must have one font only - cff.fontNames = cff.fontNames[:1] - - if options.notdef_glyph and not options.notdef_outline: - isCFF2 = cff.major > 1 - for fontname in cff.keys(): - font = cff[fontname] - _empty_charstring(font, ".notdef", isCFF2=isCFF2) - - # Clear useless Encoding - for fontname in cff.keys(): - font = cff[fontname] - # https://github.com/fonttools/fonttools/issues/620 - font.Encoding = "StandardEncoding" - - return True # bool(cff.fontNames) - - -@_add_method(ttLib.getTableClass("CFF ")) -def subset_glyphs(self, s): - cff = self.cff - for fontname in cff.keys(): - font = cff[fontname] - cs = font.CharStrings - - glyphs = s.glyphs.union(s.glyphs_emptied) - - # Load all glyphs - for g in font.charset: - if g not in glyphs: - continue - c, _ = cs.getItemAndSelector(g) - - if cs.charStringsAreIndexed: - indices = [i for i, g in enumerate(font.charset) if g in glyphs] - csi = cs.charStringsIndex - csi.items = [csi.items[i] for i in indices] - del csi.file, csi.offsets - if hasattr(font, "FDSelect"): - sel = font.FDSelect - sel.format = None - sel.gidArray = [sel.gidArray[i] for i in indices] - newCharStrings = {} - for indicesIdx, charsetIdx in enumerate(indices): - g = font.charset[charsetIdx] - if g in cs.charStrings: - newCharStrings[g] = indicesIdx - cs.charStrings = newCharStrings - else: - cs.charStrings = {g: v for g, v in cs.charStrings.items() if g in glyphs} - font.charset = [g for g in font.charset if g in glyphs] - font.numGlyphs = len(font.charset) - - if s.options.retain_gids: - isCFF2 = cff.major > 1 - for g in s.glyphs_emptied: - _empty_charstring(font, g, isCFF2=isCFF2, ignoreWidth=True) - - return True # any(cff[fontname].numGlyphs for fontname in cff.keys()) - - -@_add_method(psCharStrings.T2CharString) -def subset_subroutines(self, subrs, gsubrs): - p = self.program - for i in range(1, len(p)): - if p[i] == "callsubr": - assert isinstance(p[i - 1], int) - p[i - 1] = subrs._used.index(p[i - 1] + subrs._old_bias) - subrs._new_bias - elif p[i] == "callgsubr": - assert isinstance(p[i - 1], int) - p[i - 1] = ( - gsubrs._used.index(p[i - 1] + gsubrs._old_bias) - gsubrs._new_bias - ) - - -@_add_method(psCharStrings.T2CharString) -def drop_hints(self): - hints = self._hints - - if hints.deletions: - p = self.program - for idx in reversed(hints.deletions): - del p[idx - 2 : idx] - - if hints.has_hint: - assert not hints.deletions or hints.last_hint <= hints.deletions[0] - self.program = self.program[hints.last_hint :] - if not self.program: - # TODO CFF2 no need for endchar. - self.program.append("endchar") - if hasattr(self, "width"): - # Insert width back if needed - if self.width != self.private.defaultWidthX: - # For CFF2 charstrings, this should never happen - assert ( - self.private.defaultWidthX is not None - ), "CFF2 CharStrings must not have an initial width value" - self.program.insert(0, self.width - self.private.nominalWidthX) - - if hints.has_hintmask: - i = 0 - p = self.program - while i < len(p): - if p[i] in ["hintmask", "cntrmask"]: - assert i + 1 <= len(p) - del p[i : i + 2] - continue - i += 1 - - assert len(self.program) - - del self._hints - - -class _MarkingT2Decompiler(psCharStrings.SimpleT2Decompiler): - def __init__(self, localSubrs, globalSubrs, private): - psCharStrings.SimpleT2Decompiler.__init__( - self, localSubrs, globalSubrs, private - ) - for subrs in [localSubrs, globalSubrs]: - if subrs and not hasattr(subrs, "_used"): - subrs._used = set() - - def op_callsubr(self, index): - self.localSubrs._used.add(self.operandStack[-1] + self.localBias) - psCharStrings.SimpleT2Decompiler.op_callsubr(self, index) - - def op_callgsubr(self, index): - self.globalSubrs._used.add(self.operandStack[-1] + self.globalBias) - psCharStrings.SimpleT2Decompiler.op_callgsubr(self, index) - - -class _DehintingT2Decompiler(psCharStrings.T2WidthExtractor): - class Hints(object): - def __init__(self): - # Whether calling this charstring produces any hint stems - # Note that if a charstring starts with hintmask, it will - # have has_hint set to True, because it *might* produce an - # implicit vstem if called under certain conditions. - self.has_hint = False - # Index to start at to drop all hints - self.last_hint = 0 - # Index up to which we know more hints are possible. - # Only relevant if status is 0 or 1. - self.last_checked = 0 - # The status means: - # 0: after dropping hints, this charstring is empty - # 1: after dropping hints, there may be more hints - # continuing after this, or there might be - # other things. Not clear yet. - # 2: no more hints possible after this charstring - self.status = 0 - # Has hintmask instructions; not recursive - self.has_hintmask = False - # List of indices of calls to empty subroutines to remove. - self.deletions = [] - - pass - - def __init__( - self, css, localSubrs, globalSubrs, nominalWidthX, defaultWidthX, private=None - ): - self._css = css - psCharStrings.T2WidthExtractor.__init__( - self, localSubrs, globalSubrs, nominalWidthX, defaultWidthX - ) - self.private = private - - def execute(self, charString): - old_hints = charString._hints if hasattr(charString, "_hints") else None - charString._hints = self.Hints() - - psCharStrings.T2WidthExtractor.execute(self, charString) - - hints = charString._hints - - if hints.has_hint or hints.has_hintmask: - self._css.add(charString) - - if hints.status != 2: - # Check from last_check, make sure we didn't have any operators. - for i in range(hints.last_checked, len(charString.program) - 1): - if isinstance(charString.program[i], str): - hints.status = 2 - break - else: - hints.status = 1 # There's *something* here - hints.last_checked = len(charString.program) - - if old_hints: - assert hints.__dict__ == old_hints.__dict__ - - def op_callsubr(self, index): - subr = self.localSubrs[self.operandStack[-1] + self.localBias] - psCharStrings.T2WidthExtractor.op_callsubr(self, index) - self.processSubr(index, subr) - - def op_callgsubr(self, index): - subr = self.globalSubrs[self.operandStack[-1] + self.globalBias] - psCharStrings.T2WidthExtractor.op_callgsubr(self, index) - self.processSubr(index, subr) - - def op_hstem(self, index): - psCharStrings.T2WidthExtractor.op_hstem(self, index) - self.processHint(index) - - def op_vstem(self, index): - psCharStrings.T2WidthExtractor.op_vstem(self, index) - self.processHint(index) - - def op_hstemhm(self, index): - psCharStrings.T2WidthExtractor.op_hstemhm(self, index) - self.processHint(index) - - def op_vstemhm(self, index): - psCharStrings.T2WidthExtractor.op_vstemhm(self, index) - self.processHint(index) - - def op_hintmask(self, index): - rv = psCharStrings.T2WidthExtractor.op_hintmask(self, index) - self.processHintmask(index) - return rv - - def op_cntrmask(self, index): - rv = psCharStrings.T2WidthExtractor.op_cntrmask(self, index) - self.processHintmask(index) - return rv - - def processHintmask(self, index): - cs = self.callingStack[-1] - hints = cs._hints - hints.has_hintmask = True - if hints.status != 2: - # Check from last_check, see if we may be an implicit vstem - for i in range(hints.last_checked, index - 1): - if isinstance(cs.program[i], str): - hints.status = 2 - break - else: - # We are an implicit vstem - hints.has_hint = True - hints.last_hint = index + 1 - hints.status = 0 - hints.last_checked = index + 1 - - def processHint(self, index): - cs = self.callingStack[-1] - hints = cs._hints - hints.has_hint = True - hints.last_hint = index - hints.last_checked = index - - def processSubr(self, index, subr): - cs = self.callingStack[-1] - hints = cs._hints - subr_hints = subr._hints - - # Check from last_check, make sure we didn't have - # any operators. - if hints.status != 2: - for i in range(hints.last_checked, index - 1): - if isinstance(cs.program[i], str): - hints.status = 2 - break - hints.last_checked = index - - if hints.status != 2: - if subr_hints.has_hint: - hints.has_hint = True - - # Decide where to chop off from - if subr_hints.status == 0: - hints.last_hint = index - else: - hints.last_hint = index - 2 # Leave the subr call in - - elif subr_hints.status == 0: - hints.deletions.append(index) - - hints.status = max(hints.status, subr_hints.status) - - -@_add_method(ttLib.getTableClass("CFF ")) -def prune_post_subset(self, ttfFont, options): - cff = self.cff - for fontname in cff.keys(): - font = cff[fontname] - cs = font.CharStrings - - # Drop unused FontDictionaries - if hasattr(font, "FDSelect"): - sel = font.FDSelect - indices = _uniq_sort(sel.gidArray) - sel.gidArray = [indices.index(ss) for ss in sel.gidArray] - arr = font.FDArray - arr.items = [arr[i] for i in indices] - del arr.file, arr.offsets - - # Desubroutinize if asked for - if options.desubroutinize: - cff.desubroutinize() - - # Drop hints if not needed - if not options.hinting: - self.remove_hints() - elif not options.desubroutinize: - self.remove_unused_subroutines() - return True - - -def _delete_empty_subrs(private_dict): - if hasattr(private_dict, "Subrs") and not private_dict.Subrs: - if "Subrs" in private_dict.rawDict: - del private_dict.rawDict["Subrs"] - del private_dict.Subrs - - -@deprecateFunction( - "use 'CFFFontSet.desubroutinize()' instead", category=DeprecationWarning -) -@_add_method(ttLib.getTableClass("CFF ")) -def desubroutinize(self): - self.cff.desubroutinize() - - -@_add_method(ttLib.getTableClass("CFF ")) -def remove_hints(self): - cff = self.cff - for fontname in cff.keys(): - font = cff[fontname] - cs = font.CharStrings - # This can be tricky, but doesn't have to. What we do is: - # - # - Run all used glyph charstrings and recurse into subroutines, - # - For each charstring (including subroutines), if it has any - # of the hint stem operators, we mark it as such. - # Upon returning, for each charstring we note all the - # subroutine calls it makes that (recursively) contain a stem, - # - Dropping hinting then consists of the following two ops: - # * Drop the piece of the program in each charstring before the - # last call to a stem op or a stem-calling subroutine, - # * Drop all hintmask operations. - # - It's trickier... A hintmask right after hints and a few numbers - # will act as an implicit vstemhm. As such, we track whether - # we have seen any non-hint operators so far and do the right - # thing, recursively... Good luck understanding that :( - css = set() - for g in font.charset: - c, _ = cs.getItemAndSelector(g) - c.decompile() - subrs = getattr(c.private, "Subrs", []) - decompiler = _DehintingT2Decompiler( - css, - subrs, - c.globalSubrs, - c.private.nominalWidthX, - c.private.defaultWidthX, - c.private, - ) - decompiler.execute(c) - c.width = decompiler.width - for charstring in css: - charstring.drop_hints() - del css - - # Drop font-wide hinting values - all_privs = [] - if hasattr(font, "FDArray"): - all_privs.extend(fd.Private for fd in font.FDArray) - else: - all_privs.append(font.Private) - for priv in all_privs: - for k in [ - "BlueValues", - "OtherBlues", - "FamilyBlues", - "FamilyOtherBlues", - "BlueScale", - "BlueShift", - "BlueFuzz", - "StemSnapH", - "StemSnapV", - "StdHW", - "StdVW", - "ForceBold", - "LanguageGroup", - "ExpansionFactor", - ]: - if hasattr(priv, k): - setattr(priv, k, None) - self.remove_unused_subroutines() - - -@_add_method(ttLib.getTableClass("CFF ")) -def remove_unused_subroutines(self): - cff = self.cff - for fontname in cff.keys(): - font = cff[fontname] - cs = font.CharStrings - # Renumber subroutines to remove unused ones - - # Mark all used subroutines - for g in font.charset: - c, _ = cs.getItemAndSelector(g) - subrs = getattr(c.private, "Subrs", []) - decompiler = _MarkingT2Decompiler(subrs, c.globalSubrs, c.private) - decompiler.execute(c) - - all_subrs = [font.GlobalSubrs] - if hasattr(font, "FDArray"): - all_subrs.extend( - fd.Private.Subrs - for fd in font.FDArray - if hasattr(fd.Private, "Subrs") and fd.Private.Subrs - ) - elif hasattr(font.Private, "Subrs") and font.Private.Subrs: - all_subrs.append(font.Private.Subrs) - - subrs = set(subrs) # Remove duplicates - - # Prepare - for subrs in all_subrs: - if not hasattr(subrs, "_used"): - subrs._used = set() - subrs._used = _uniq_sort(subrs._used) - subrs._old_bias = psCharStrings.calcSubrBias(subrs) - subrs._new_bias = psCharStrings.calcSubrBias(subrs._used) - - # Renumber glyph charstrings - for g in font.charset: - c, _ = cs.getItemAndSelector(g) - subrs = getattr(c.private, "Subrs", []) - c.subset_subroutines(subrs, font.GlobalSubrs) - - # Renumber subroutines themselves - for subrs in all_subrs: - if subrs == font.GlobalSubrs: - if not hasattr(font, "FDArray") and hasattr(font.Private, "Subrs"): - local_subrs = font.Private.Subrs - else: - local_subrs = [] - else: - local_subrs = subrs - - subrs.items = [subrs.items[i] for i in subrs._used] - if hasattr(subrs, "file"): - del subrs.file - if hasattr(subrs, "offsets"): - del subrs.offsets - - for subr in subrs.items: - subr.subset_subroutines(local_subrs, font.GlobalSubrs) - - # Delete local SubrsIndex if empty - if hasattr(font, "FDArray"): - for fd in font.FDArray: - _delete_empty_subrs(fd.Private) - else: - _delete_empty_subrs(font.Private) - - # Cleanup - for subrs in all_subrs: - del subrs._used, subrs._old_bias, subrs._new_bias diff --git a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/gradio/templates/cdn/assets/index-ab710fed.css b/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/gradio/templates/cdn/assets/index-ab710fed.css deleted file mode 100644 index 871a31e7497cf8bef56ead42f8051f89bbe7d759..0000000000000000000000000000000000000000 --- a/spaces/ky2k/Toxicity_Classifier_POC/.venv/lib/python3.9/site-packages/gradio/templates/cdn/assets/index-ab710fed.css +++ /dev/null @@ -1 +0,0 @@ -.output-class.svelte-1mylvt5.svelte-1mylvt5.svelte-1mylvt5{display:flex;justify-content:center;align-items:center;padding:var(--size-6) var(--size-4);color:var(--body-text-color);font-weight:var(--weight-bold);font-size:var(--text-xxl)}.confidence-set.svelte-1mylvt5.svelte-1mylvt5.svelte-1mylvt5{display:flex;justify-content:space-between;align-items:flex-start;margin-bottom:var(--size-2);color:var(--body-text-color);line-height:var(--line-none);font-family:var(--font-mono)}.confidence-set.svelte-1mylvt5.svelte-1mylvt5.svelte-1mylvt5:last-child{margin-bottom:0}.inner-wrap.svelte-1mylvt5.svelte-1mylvt5.svelte-1mylvt5{flex:1 1 0%}.bar.svelte-1mylvt5.svelte-1mylvt5.svelte-1mylvt5{margin-bottom:var(--size-1);border-radius:var(--radius-md);background:var(--stat-background-fill);height:var(--size-1)}.label.svelte-1mylvt5.svelte-1mylvt5.svelte-1mylvt5{display:flex;align-items:baseline}.label.svelte-1mylvt5>.svelte-1mylvt5+.svelte-1mylvt5{margin-left:var(--size-2)}.confidence-set.svelte-1mylvt5:hover .label.svelte-1mylvt5.svelte-1mylvt5{color:var(--color-accent)}.text.svelte-1mylvt5.svelte-1mylvt5.svelte-1mylvt5{line-height:var(--line-md)}.line.svelte-1mylvt5.svelte-1mylvt5.svelte-1mylvt5{flex:1 1 0%;border:1px dashed var(--border-color-primary);padding-right:var(--size-4);padding-left:var(--size-4)}.confidence.svelte-1mylvt5.svelte-1mylvt5.svelte-1mylvt5{margin-left:auto;text-align:right}.selectable.svelte-1mylvt5.svelte-1mylvt5.svelte-1mylvt5{cursor:pointer} diff --git a/spaces/leafShen/CodeFormer/CodeFormer/basicsr/train.py b/spaces/leafShen/CodeFormer/CodeFormer/basicsr/train.py deleted file mode 100644 index a01c0dfccdb8b02283100ec5b792c33afaf22f5e..0000000000000000000000000000000000000000 --- a/spaces/leafShen/CodeFormer/CodeFormer/basicsr/train.py +++ /dev/null @@ -1,225 +0,0 @@ -import argparse -import datetime -import logging -import math -import copy -import random -import time -import torch -from os import path as osp - -from basicsr.data import build_dataloader, build_dataset -from basicsr.data.data_sampler import EnlargedSampler -from basicsr.data.prefetch_dataloader import CPUPrefetcher, CUDAPrefetcher -from basicsr.models import build_model -from basicsr.utils import (MessageLogger, check_resume, get_env_info, get_root_logger, init_tb_logger, - init_wandb_logger, make_exp_dirs, mkdir_and_rename, set_random_seed) -from basicsr.utils.dist_util import get_dist_info, init_dist -from basicsr.utils.options import dict2str, parse - -import warnings -# ignore UserWarning: Detected call of `lr_scheduler.step()` before `optimizer.step()`. -warnings.filterwarnings("ignore", category=UserWarning) - -def parse_options(root_path, is_train=True): - parser = argparse.ArgumentParser() - parser.add_argument('-opt', type=str, required=True, help='Path to option YAML file.') - parser.add_argument('--launcher', choices=['none', 'pytorch', 'slurm'], default='none', help='job launcher') - parser.add_argument('--local_rank', type=int, default=0) - args = parser.parse_args() - opt = parse(args.opt, root_path, is_train=is_train) - - # distributed settings - if args.launcher == 'none': - opt['dist'] = False - print('Disable distributed.', flush=True) - else: - opt['dist'] = True - if args.launcher == 'slurm' and 'dist_params' in opt: - init_dist(args.launcher, **opt['dist_params']) - else: - init_dist(args.launcher) - - opt['rank'], opt['world_size'] = get_dist_info() - - # random seed - seed = opt.get('manual_seed') - if seed is None: - seed = random.randint(1, 10000) - opt['manual_seed'] = seed - set_random_seed(seed + opt['rank']) - - return opt - - -def init_loggers(opt): - log_file = osp.join(opt['path']['log'], f"train_{opt['name']}.log") - logger = get_root_logger(logger_name='basicsr', log_level=logging.INFO, log_file=log_file) - logger.info(get_env_info()) - logger.info(dict2str(opt)) - - # initialize wandb logger before tensorboard logger to allow proper sync: - if (opt['logger'].get('wandb') is not None) and (opt['logger']['wandb'].get('project') is not None): - assert opt['logger'].get('use_tb_logger') is True, ('should turn on tensorboard when using wandb') - init_wandb_logger(opt) - tb_logger = None - if opt['logger'].get('use_tb_logger'): - tb_logger = init_tb_logger(log_dir=osp.join('tb_logger', opt['name'])) - return logger, tb_logger - - -def create_train_val_dataloader(opt, logger): - # create train and val dataloaders - train_loader, val_loader = None, None - for phase, dataset_opt in opt['datasets'].items(): - if phase == 'train': - dataset_enlarge_ratio = dataset_opt.get('dataset_enlarge_ratio', 1) - train_set = build_dataset(dataset_opt) - train_sampler = EnlargedSampler(train_set, opt['world_size'], opt['rank'], dataset_enlarge_ratio) - train_loader = build_dataloader( - train_set, - dataset_opt, - num_gpu=opt['num_gpu'], - dist=opt['dist'], - sampler=train_sampler, - seed=opt['manual_seed']) - - num_iter_per_epoch = math.ceil( - len(train_set) * dataset_enlarge_ratio / (dataset_opt['batch_size_per_gpu'] * opt['world_size'])) - total_iters = int(opt['train']['total_iter']) - total_epochs = math.ceil(total_iters / (num_iter_per_epoch)) - logger.info('Training statistics:' - f'\n\tNumber of train images: {len(train_set)}' - f'\n\tDataset enlarge ratio: {dataset_enlarge_ratio}' - f'\n\tBatch size per gpu: {dataset_opt["batch_size_per_gpu"]}' - f'\n\tWorld size (gpu number): {opt["world_size"]}' - f'\n\tRequire iter number per epoch: {num_iter_per_epoch}' - f'\n\tTotal epochs: {total_epochs}; iters: {total_iters}.') - - elif phase == 'val': - val_set = build_dataset(dataset_opt) - val_loader = build_dataloader( - val_set, dataset_opt, num_gpu=opt['num_gpu'], dist=opt['dist'], sampler=None, seed=opt['manual_seed']) - logger.info(f'Number of val images/folders in {dataset_opt["name"]}: ' f'{len(val_set)}') - else: - raise ValueError(f'Dataset phase {phase} is not recognized.') - - return train_loader, train_sampler, val_loader, total_epochs, total_iters - - -def train_pipeline(root_path): - # parse options, set distributed setting, set ramdom seed - opt = parse_options(root_path, is_train=True) - - torch.backends.cudnn.benchmark = True - # torch.backends.cudnn.deterministic = True - - # load resume states if necessary - if opt['path'].get('resume_state'): - device_id = torch.cuda.current_device() - resume_state = torch.load( - opt['path']['resume_state'], map_location=lambda storage, loc: storage.cuda(device_id)) - else: - resume_state = None - - # mkdir for experiments and logger - if resume_state is None: - make_exp_dirs(opt) - if opt['logger'].get('use_tb_logger') and opt['rank'] == 0: - mkdir_and_rename(osp.join('tb_logger', opt['name'])) - - # initialize loggers - logger, tb_logger = init_loggers(opt) - - # create train and validation dataloaders - result = create_train_val_dataloader(opt, logger) - train_loader, train_sampler, val_loader, total_epochs, total_iters = result - - # create model - if resume_state: # resume training - check_resume(opt, resume_state['iter']) - model = build_model(opt) - model.resume_training(resume_state) # handle optimizers and schedulers - logger.info(f"Resuming training from epoch: {resume_state['epoch']}, " f"iter: {resume_state['iter']}.") - start_epoch = resume_state['epoch'] - current_iter = resume_state['iter'] - else: - model = build_model(opt) - start_epoch = 0 - current_iter = 0 - - # create message logger (formatted outputs) - msg_logger = MessageLogger(opt, current_iter, tb_logger) - - # dataloader prefetcher - prefetch_mode = opt['datasets']['train'].get('prefetch_mode') - if prefetch_mode is None or prefetch_mode == 'cpu': - prefetcher = CPUPrefetcher(train_loader) - elif prefetch_mode == 'cuda': - prefetcher = CUDAPrefetcher(train_loader, opt) - logger.info(f'Use {prefetch_mode} prefetch dataloader') - if opt['datasets']['train'].get('pin_memory') is not True: - raise ValueError('Please set pin_memory=True for CUDAPrefetcher.') - else: - raise ValueError(f'Wrong prefetch_mode {prefetch_mode}.' "Supported ones are: None, 'cuda', 'cpu'.") - - # training - logger.info(f'Start training from epoch: {start_epoch}, iter: {current_iter+1}') - data_time, iter_time = time.time(), time.time() - start_time = time.time() - - for epoch in range(start_epoch, total_epochs + 1): - train_sampler.set_epoch(epoch) - prefetcher.reset() - train_data = prefetcher.next() - - while train_data is not None: - data_time = time.time() - data_time - - current_iter += 1 - if current_iter > total_iters: - break - # update learning rate - model.update_learning_rate(current_iter, warmup_iter=opt['train'].get('warmup_iter', -1)) - # training - model.feed_data(train_data) - model.optimize_parameters(current_iter) - iter_time = time.time() - iter_time - # log - if current_iter % opt['logger']['print_freq'] == 0: - log_vars = {'epoch': epoch, 'iter': current_iter} - log_vars.update({'lrs': model.get_current_learning_rate()}) - log_vars.update({'time': iter_time, 'data_time': data_time}) - log_vars.update(model.get_current_log()) - msg_logger(log_vars) - - # save models and training states - if current_iter % opt['logger']['save_checkpoint_freq'] == 0: - logger.info('Saving models and training states.') - model.save(epoch, current_iter) - - # validation - if opt.get('val') is not None and opt['datasets'].get('val') is not None \ - and (current_iter % opt['val']['val_freq'] == 0): - model.validation(val_loader, current_iter, tb_logger, opt['val']['save_img']) - - data_time = time.time() - iter_time = time.time() - train_data = prefetcher.next() - # end of iter - - # end of epoch - - consumed_time = str(datetime.timedelta(seconds=int(time.time() - start_time))) - logger.info(f'End of training. Time consumed: {consumed_time}') - logger.info('Save the latest model.') - model.save(epoch=-1, current_iter=-1) # -1 stands for the latest - if opt.get('val') is not None and opt['datasets'].get('val'): - model.validation(val_loader, current_iter, tb_logger, opt['val']['save_img']) - if tb_logger: - tb_logger.close() - - -if __name__ == '__main__': - root_path = osp.abspath(osp.join(__file__, osp.pardir, osp.pardir)) - train_pipeline(root_path) diff --git a/spaces/librarian-bots/tutorials/README.md b/spaces/librarian-bots/tutorials/README.md deleted file mode 100644 index 43e87e975bf5a3fc0191cb29ee01cf8da3e75618..0000000000000000000000000000000000000000 --- a/spaces/librarian-bots/tutorials/README.md +++ /dev/null @@ -1,10 +0,0 @@ ---- -title: Tutorials -emoji: 🐢 -colorFrom: blue -colorTo: yellow -sdk: static -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/limcheekin/WizardCoder-Python-13B-V1.0-GGUF/Dockerfile b/spaces/limcheekin/WizardCoder-Python-13B-V1.0-GGUF/Dockerfile deleted file mode 100644 index ecbca32cee0dd28d29c3febc5b56fc6c64db8b53..0000000000000000000000000000000000000000 --- a/spaces/limcheekin/WizardCoder-Python-13B-V1.0-GGUF/Dockerfile +++ /dev/null @@ -1,35 +0,0 @@ -# Grab a fresh copy of the Python image -FROM python:3.10-slim - -# Install build and runtime dependencies -RUN apt-get update && \ - apt-get install -y \ - libopenblas-dev \ - ninja-build \ - build-essential \ - pkg-config \ - curl - -RUN pip install -U pip setuptools wheel && \ - CMAKE_ARGS="-DLLAMA_BLAS=ON -DLLAMA_BLAS_VENDOR=OpenBLAS" FORCE_CMAKE=1 pip install --verbose llama-cpp-python[server] - -# Download model -RUN mkdir model && \ - curl -L https://huggingface.co/TheBloke/WizardCoder-Python-13B-V1.0-GGUF/resolve/main/wizardcoder-python-13b-v1.0.Q5_K_M.gguf -o model/gguf-model.bin - -COPY ./start_server.sh ./ -COPY ./main.py ./ -COPY ./index.html ./ - -# Make the server start script executable -RUN chmod +x ./start_server.sh - -# Set environment variable for the host -ENV HOST=0.0.0.0 -ENV PORT=7860 - -# Expose a port for the server -EXPOSE ${PORT} - -# Run the server start script -CMD ["/bin/sh", "./start_server.sh"] \ No newline at end of file diff --git a/spaces/lincquiQcaudo/Top-20-Diffusion/Assassins.Creed.IV.Black.Flag.Crack.Only.V8.md b/spaces/lincquiQcaudo/Top-20-Diffusion/Assassins.Creed.IV.Black.Flag.Crack.Only.V8.md deleted file mode 100644 index 688b181d7d0f0b587c3be40db8214613e720693c..0000000000000000000000000000000000000000 --- a/spaces/lincquiQcaudo/Top-20-Diffusion/Assassins.Creed.IV.Black.Flag.Crack.Only.V8.md +++ /dev/null @@ -1,6 +0,0 @@ -

          Assassin's.Creed.IV.Black.Flag.Crack.Only.V8


          Download »»» https://bytlly.com/2uGwnt



          -
          - d5da3c52bf
          -
          -
          -

          diff --git a/spaces/lincquiQcaudo/Top-20-Diffusion/Blq Socotec 2012 [UPDATED].md b/spaces/lincquiQcaudo/Top-20-Diffusion/Blq Socotec 2012 [UPDATED].md deleted file mode 100644 index 61085742d92031911d84aae0896246a706eabe34..0000000000000000000000000000000000000000 --- a/spaces/lincquiQcaudo/Top-20-Diffusion/Blq Socotec 2012 [UPDATED].md +++ /dev/null @@ -1,6 +0,0 @@ -

          blq socotec 2012


          Download ……… https://bytlly.com/2uGwLo



          - -Fіlе: blq socotec 2012. Downloads: 5232. Nick: storsikca. Sрeеd: 17 Mb/s. Lаtеst Rеlеаsе: 22.09.2012. Amount: 54.69 MB Сompасtiоn: ехе. 4d29de3e1b
          -
          -
          -

          diff --git a/spaces/lincquiQcaudo/Top-20-Diffusion/Efilm 3.4 Keygen.md b/spaces/lincquiQcaudo/Top-20-Diffusion/Efilm 3.4 Keygen.md deleted file mode 100644 index 08807b196e4eb64754ec83f7207dc312733ccc34..0000000000000000000000000000000000000000 --- a/spaces/lincquiQcaudo/Top-20-Diffusion/Efilm 3.4 Keygen.md +++ /dev/null @@ -1,6 +0,0 @@ -

          Efilm 3.4 Keygen


          Download Ziphttps://bytlly.com/2uGyuG



          -
          - 4d29de3e1b
          -
          -
          -

          diff --git a/spaces/lincquiQcaudo/Top-20-Diffusion/Lpeconnectfixzip [BETTER].md b/spaces/lincquiQcaudo/Top-20-Diffusion/Lpeconnectfixzip [BETTER].md deleted file mode 100644 index f51c30da87d5b75e327085e7a02be8eab9288b5a..0000000000000000000000000000000000000000 --- a/spaces/lincquiQcaudo/Top-20-Diffusion/Lpeconnectfixzip [BETTER].md +++ /dev/null @@ -1,6 +0,0 @@ -

          lpeconnectfixzip


          Download Zip ––– https://bytlly.com/2uGvDJ



          -
          -Yeh Dil Aashiqanaa 2002 Hindi 720p DvDRip x264 Full Movie Download. 40f0e43ec1. netsupport school 12 keygen 12 · lpe connect fix.zip 4d29de3e1b
          -
          -
          -

          diff --git a/spaces/lithiumice/SadTalker/run.sh b/spaces/lithiumice/SadTalker/run.sh deleted file mode 100644 index f2738305aaf8ec6f28bd9445a8a4c1879ea6a6b2..0000000000000000000000000000000000000000 --- a/spaces/lithiumice/SadTalker/run.sh +++ /dev/null @@ -1 +0,0 @@ -python app.py \ No newline at end of file diff --git a/spaces/ljjggr/bingo/src/pages/api/sydney.ts b/spaces/ljjggr/bingo/src/pages/api/sydney.ts deleted file mode 100644 index 0e7bbf23d77c2e1a6635185a060eeee58b8c8e66..0000000000000000000000000000000000000000 --- a/spaces/ljjggr/bingo/src/pages/api/sydney.ts +++ /dev/null @@ -1,62 +0,0 @@ -import { NextApiRequest, NextApiResponse } from 'next' -import { WebSocket, debug } from '@/lib/isomorphic' -import { BingWebBot } from '@/lib/bots/bing' -import { websocketUtils } from '@/lib/bots/bing/utils' -import { WatchDog, createHeaders } from '@/lib/utils' - - -export default async function handler(req: NextApiRequest, res: NextApiResponse) { - const conversationContext = req.body - const headers = createHeaders(req.cookies) - debug(headers) - res.setHeader('Content-Type', 'text/stream; charset=UTF-8') - - const ws = new WebSocket('wss://sydney.bing.com/sydney/ChatHub', { - headers: { - ...headers, - 'accept-language': 'zh-CN,zh;q=0.9', - 'cache-control': 'no-cache', - 'x-ms-useragent': 'azsdk-js-api-client-factory/1.0.0-beta.1 core-rest-pipeline/1.10.0 OS/Win32', - pragma: 'no-cache', - } - }) - - const closeDog = new WatchDog() - const timeoutDog = new WatchDog() - ws.onmessage = (event) => { - timeoutDog.watch(() => { - ws.send(websocketUtils.packMessage({ type: 6 })) - }, 1500) - closeDog.watch(() => { - ws.close() - }, 10000) - res.write(event.data) - if (/\{"type":([367])\}/.test(String(event.data))) { - const type = parseInt(RegExp.$1, 10) - debug('connection type', type) - if (type === 3) { - ws.close() - } else { - ws.send(websocketUtils.packMessage({ type })) - } - } - } - - ws.onclose = () => { - timeoutDog.reset() - closeDog.reset() - debug('connection close') - res.end() - } - - await new Promise((resolve) => ws.onopen = resolve) - ws.send(websocketUtils.packMessage({ protocol: 'json', version: 1 })) - ws.send(websocketUtils.packMessage({ type: 6 })) - ws.send(websocketUtils.packMessage(BingWebBot.buildChatRequest(conversationContext!))) - req.socket.once('close', () => { - ws.close() - if (!res.closed) { - res.end() - } - }) -} diff --git a/spaces/lunarflu/HF-QA-Demo-3/data/hugging_face_docs_dataset.py b/spaces/lunarflu/HF-QA-Demo-3/data/hugging_face_docs_dataset.py deleted file mode 100644 index 27f80fc7a2dd72b551f63d382ada8ff218f20273..0000000000000000000000000000000000000000 --- a/spaces/lunarflu/HF-QA-Demo-3/data/hugging_face_docs_dataset.py +++ /dev/null @@ -1,190 +0,0 @@ -import glob -import json -import os -import re -import subprocess -from typing import List - -import requests -import pandas as pd -from bs4 import BeautifulSoup -from markdown import markdown -import nbformat -from nbconvert import MarkdownExporter -from nbconvert.preprocessors import Preprocessor, ClearOutputPreprocessor -from tqdm import tqdm - - -VALIDATE_URLS = False - - -def download_repositories(repo_urls_file: str, repo_dir: str): - """ - Downloads the Hugging Face repositories. - """ - if not os.path.exists(repo_dir): - os.makedirs(repo_dir) - with open(repo_urls_file, "r") as f: - repositories_urls = json.load(f)["urls"] - print(f'Downloading {len(repositories_urls)} repositories') - for url in repositories_urls: - try: - subprocess.run(["git", "clone", url], cwd=repo_dir) - except subprocess.CalledProcessError as e: - print("Command failed with error:", e.stderr) - - -class EmptyCellPreprocessor(Preprocessor): - def preprocess_cell(self, cell, resources, index): - if cell.source.strip() == '': - cell.source = '' - cell.cell_type = 'raw' - return cell, resources - - -def convert_notebook_to_txt(filename: str): - """ - Converts a notebook to a markdown file. - """ - with open(filename) as f: - notebook = nbformat.read(f, as_version=4) - # id validation error fix - for cell in notebook['cells']: - cell['id'] = str(cell['id']) - - clear_output = ClearOutputPreprocessor() - notebook, resources = clear_output.preprocess(notebook, {}) - - exporter = MarkdownExporter() - exporter.register_preprocessor(EmptyCellPreprocessor, enabled=True) - output_notebook_text, resources = exporter.from_notebook_node(notebook) - - new_filename = filename.replace('.ipynb', '_ipynb.md') - with open(new_filename, 'w') as f: - f.write(output_notebook_text) - return new_filename - - -def extract_files_from_directories( - repo_urls_file: str, - repo_dir: str, - docs_dir: str, - files_extensions: List[str] -) -> None: - - """ - This function reads markdown and markdownx files from the repositories directory, - filters out non-English files, and adds the source GitHub URL as the first line of each file. - The resulting files are saved in the docs_dir. - """ - languages = pd.read_csv("language-codes.csv").loc[:,"alpha2"].tolist() - languages.remove("en") - - files = [ - filename - for extension in files_extensions - for filename in glob.glob(repo_dir + f"**/*{extension}", recursive=True) - ] - print(f'Used extensions: {", ".join(files_extensions)}') - print(f'Found {len(files)} files') - - repo_urls = [] - with open(repo_urls_file, "r") as f: - repo_urls = json.load(f)["urls"] - - # filter out the files that are not in english - filtered_files = [] - for filename in files: - sep_file = filename.split("/") - for seq in sep_file: - if seq in languages: - break - else: - filtered_files.append(filename) - print(f'Found {len(filtered_files)} files in English') - - # generate a GitHub URL for a file based on its name and a list of possible repository URLs - def get_github_url(filename: str, repo_urls: str, repo_dir: str) -> str: - source = filename.replace(repo_dir, '') - repo_name, file_path = source.split('/', 1) - repo_url_prefix = None - for repo_url in repo_urls: - if repo_name == repo_url.split('/')[-1]: - repo_url_prefix = repo_url - break - if not repo_url_prefix: - raise ValueError(f"Repo URL not found for {repo_name}") - url = f'{repo_url_prefix}/blob/main/{file_path}' - if VALIDATE_URLS: - try: - response = requests.get(url) - response.raise_for_status() - except: - print(f'filename: {filename}') - print(f'repo: {repo_name}, file: {file_path}') - print(f'url: {url}') - raise - return url - - # creates a valid filename by replacing certain characters and removing the repo_dir path - def create_filename_from_path(filename: str, repo_dir: str) -> str: - filename = filename.replace(repo_dir, '') - chars_to_replace = ['/', '{', '}', '-', '.'] - filename = ''.join(['_' if c in chars_to_replace else c for c in filename]) - return filename - - # copy the files with the source added in the first line - if not os.path.exists(docs_dir): - os.makedirs(docs_dir) - copied_files = [] - for filename in tqdm(filtered_files): - source_url = get_github_url(filename, repo_urls, repo_dir) - data = f"source: {source_url}\n\n" - # convert jupyter notebooks to txt files - try: - if filename.endswith('.ipynb'): - filename = convert_notebook_to_txt(filename) - # rename and copy files - with open(filename, 'r') as f: - data += f.read() - output_filename = docs_dir + create_filename_from_path(filename, repo_dir) - with open(output_filename, 'w') as f: - f.write(data) - if not os.path.isfile(output_filename): - raise ValueError(f"Failed to create the output file: {output_filename}") - copied_files.append(output_filename) - except Exception as ex: - print(f'Failed to copy file {filename}: {ex}') - - print(f'Successfully copied {len(set(copied_files))}/{len(filtered_files)} files') - - -def markdown_cleaner(data: str): - """ - Clean markdown text. - - Args: - data (str): The markdown text to be cleaned. - - Returns: - str: The cleaned markdown text. - """ - soupped = BeautifulSoup(markdown(data), "html.parser") - raw_text = ''.join(soupped.findAll(string=True)) - clean_text = re.sub(r"", "", raw_text, flags=re.DOTALL) - # remove any special tokens e.g <|endoftext|> - clean_text = re.sub(r"<\|endoftext\|>", "", clean_text, flags=re.DOTALL) - # discard non english text - clean_text = re.sub(r"[^a-zA-Z0-9\s]", "", clean_text, flags=re.DOTALL) - return "\n".join([t for t in clean_text.split("\n") if t]) - - -if __name__ == '__main__': - repo_urls_file = "./datasets/hf_repositories_urls.json" - repo_dir = "./datasets/huggingface_repositories/" - docs_dir = "./datasets/huggingface_docs/" - download_repositories(repo_urls_file, repo_dir) - extract_files_from_directories( - repo_urls_file, repo_dir, docs_dir, - files_extensions=['.md', '.mdx', '.ipynb'] - ) diff --git a/spaces/luxuedong/lxd/src/pages/api/proxy.ts b/spaces/luxuedong/lxd/src/pages/api/proxy.ts deleted file mode 100644 index 240b5fb5561d993c6381649bf4544ce12f3cdab2..0000000000000000000000000000000000000000 --- a/spaces/luxuedong/lxd/src/pages/api/proxy.ts +++ /dev/null @@ -1,24 +0,0 @@ -'use server' - -import { NextApiRequest, NextApiResponse } from 'next' -import { fetch } from '@/lib/isomorphic' - -export default async function handler(req: NextApiRequest, res: NextApiResponse) { - try { - const { url, headers, method = 'GET', body } = req.body - if (!url) { - return res.end('ok') - } - const response = await fetch(url, { headers, method, body, redirect: 'manual' }) - const text = await response.text() - res.writeHead(200, { - 'Content-Type': 'application/text', - 'x-url': response.url, - 'x-status': response.status, - }) - res.end(text) - } catch (e) { - console.log(e) - return res.end(e) - } -} diff --git a/spaces/ma-xu/LIVE/thrust/thrust/scan.h b/spaces/ma-xu/LIVE/thrust/thrust/scan.h deleted file mode 100644 index 5b79af04895ddab6df64b3080f713ac43e60173b..0000000000000000000000000000000000000000 --- a/spaces/ma-xu/LIVE/thrust/thrust/scan.h +++ /dev/null @@ -1,1564 +0,0 @@ -/* - * Copyright 2008-2013 NVIDIA Corporation - * - * Licensed under the Apache License, Version 2.0 (the "License"); - * you may not use this file except in compliance with the License. - * You may obtain a copy of the License at - * - * http://www.apache.org/licenses/LICENSE-2.0 - * - * Unless required by applicable law or agreed to in writing, software - * distributed under the License is distributed on an "AS IS" BASIS, - * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - * See the License for the specific language governing permissions and - * limitations under the License. - */ - - -/*! \file scan.h - * \brief Functions for computing prefix sums - */ - -#pragma once - -#include -#include - -namespace thrust -{ - - -/*! \addtogroup algorithms - */ - - -/*! \addtogroup prefixsums Prefix Sums - * \ingroup algorithms - * \{ - */ - - -/*! \p inclusive_scan computes an inclusive prefix sum operation. The - * term 'inclusive' means that each result includes the corresponding - * input operand in the partial sum. More precisely, *first is - * assigned to *result and the sum of *first and - * *(first + 1) is assigned to *(result + 1), and so on. - * This version of \p inclusive_scan assumes plus as the associative operator. - * When the input and output sequences are the same, the scan is performed - * in-place. - - * \p inclusive_scan is similar to \c std::partial_sum in the STL. The primary - * difference between the two functions is that \c std::partial_sum guarantees - * a serial summation order, while \p inclusive_scan requires associativity of - * the binary operation to parallelize the prefix sum. - * - * The algorithm's execution is parallelized as determined by \p exec. - * - * \param exec The execution policy to use for parallelization. - * \param first The beginning of the input sequence. - * \param last The end of the input sequence. - * \param result The beginning of the output sequence. - * \return The end of the output sequence. - * - * \tparam DerivedPolicy The name of the derived execution policy. - * \tparam InputIterator is a model of Input Iterator - * and \c InputIterator's \c value_type is convertible to - * \c OutputIterator's \c value_type. - * \tparam OutputIterator is a model of Output Iterator, - * and if \c x and \c y are objects of \c OutputIterator's - * \c value_type, then x + y is defined. If \c T is - * \c OutputIterator's \c value_type, then T(0) is - * defined. - * - * \pre \p first may equal \p result but the range [first, last) and the range [result, result + (last - first)) shall not overlap otherwise. - * - * The following code snippet demonstrates how to use \p inclusive_scan to compute an in-place - * prefix sum using the \p thrust::host execution policy for parallelization: - * - * \code - * #include - * #include - * ... - * - * int data[6] = {1, 0, 2, 2, 1, 3}; - * - * thrust::inclusive_scan(thrust::host, data, data + 6, data); // in-place scan - * - * // data is now {1, 1, 3, 5, 6, 9} - * \endcode - * - * \see http://www.sgi.com/tech/stl/partial_sum.html - * - */ -template -__host__ __device__ - OutputIterator inclusive_scan(const thrust::detail::execution_policy_base &exec, - InputIterator first, - InputIterator last, - OutputIterator result); - - -/*! \p inclusive_scan computes an inclusive prefix sum operation. The - * term 'inclusive' means that each result includes the corresponding - * input operand in the partial sum. More precisely, *first is - * assigned to *result and the sum of *first and - * *(first + 1) is assigned to *(result + 1), and so on. - * This version of \p inclusive_scan assumes plus as the associative operator. - * When the input and output sequences are the same, the scan is performed - * in-place. - - * \p inclusive_scan is similar to \c std::partial_sum in the STL. The primary - * difference between the two functions is that \c std::partial_sum guarantees - * a serial summation order, while \p inclusive_scan requires associativity of - * the binary operation to parallelize the prefix sum. - * - * \param first The beginning of the input sequence. - * \param last The end of the input sequence. - * \param result The beginning of the output sequence. - * \return The end of the output sequence. - * - * \tparam InputIterator is a model of Input Iterator - * and \c InputIterator's \c value_type is convertible to - * \c OutputIterator's \c value_type. - * \tparam OutputIterator is a model of Output Iterator, - * and if \c x and \c y are objects of \c OutputIterator's - * \c value_type, then x + y is defined. If \c T is - * \c OutputIterator's \c value_type, then T(0) is - * defined. - * - * \pre \p first may equal \p result but the range [first, last) and the range [result, result + (last - first)) shall not overlap otherwise. - * - * The following code snippet demonstrates how to use \p inclusive_scan - * - * \code - * #include - * - * int data[6] = {1, 0, 2, 2, 1, 3}; - * - * thrust::inclusive_scan(data, data + 6, data); // in-place scan - * - * // data is now {1, 1, 3, 5, 6, 9} - * \endcode - * - * \see http://www.sgi.com/tech/stl/partial_sum.html - * - */ -template - OutputIterator inclusive_scan(InputIterator first, - InputIterator last, - OutputIterator result); - - -/*! \p inclusive_scan computes an inclusive prefix sum operation. The - * term 'inclusive' means that each result includes the corresponding - * input operand in the partial sum. When the input and output sequences - * are the same, the scan is performed in-place. - * - * \p inclusive_scan is similar to \c std::partial_sum in the STL. The primary - * difference between the two functions is that \c std::partial_sum guarantees - * a serial summation order, while \p inclusive_scan requires associativity of - * the binary operation to parallelize the prefix sum. - * - * The algorithm's execution is parallelized as determined by \p exec. - * - * \param exec The execution policy to use for parallelization. - * \param first The beginning of the input sequence. - * \param last The end of the input sequence. - * \param result The beginning of the output sequence. - * \param binary_op The associatve operator used to 'sum' values. - * \return The end of the output sequence. - * - * \tparam DerivedPolicy The name of the derived execution policy. - * \tparam InputIterator is a model of Input Iterator - * and \c InputIterator's \c value_type is convertible to - * \c OutputIterator's \c value_type. - * \tparam OutputIterator is a model of Output Iterator - * and \c OutputIterator's \c value_type is convertible to - * both \c AssociativeOperator's \c first_argument_type and - * \c second_argument_type. - * \tparam AssociativeOperator is a model of Binary Function - * and \c AssociativeOperator's \c result_type is - * convertible to \c OutputIterator's \c value_type. - * - * \pre \p first may equal \p result but the range [first, last) and the range [result, result + (last - first)) shall not overlap otherwise. - * - * The following code snippet demonstrates how to use \p inclusive_scan to compute an in-place - * prefix sum using the \p thrust::host execution policy for parallelization: - * - * \code - * int data[10] = {-5, 0, 2, -3, 2, 4, 0, -1, 2, 8}; - * - * thrust::maximum binary_op; - * - * thrust::inclusive_scan(thrust::host, data, data + 10, data, binary_op); // in-place scan - * - * // data is now {-5, 0, 2, 2, 2, 4, 4, 4, 4, 8} - * \endcode - * - * \see http://www.sgi.com/tech/stl/partial_sum.html - */ -template -__host__ __device__ - OutputIterator inclusive_scan(const thrust::detail::execution_policy_base &exec, - InputIterator first, - InputIterator last, - OutputIterator result, - AssociativeOperator binary_op); - - -/*! \p inclusive_scan computes an inclusive prefix sum operation. The - * term 'inclusive' means that each result includes the corresponding - * input operand in the partial sum. When the input and output sequences - * are the same, the scan is performed in-place. - * - * \p inclusive_scan is similar to \c std::partial_sum in the STL. The primary - * difference between the two functions is that \c std::partial_sum guarantees - * a serial summation order, while \p inclusive_scan requires associativity of - * the binary operation to parallelize the prefix sum. - * - * \param first The beginning of the input sequence. - * \param last The end of the input sequence. - * \param result The beginning of the output sequence. - * \param binary_op The associatve operator used to 'sum' values. - * \return The end of the output sequence. - * - * \tparam InputIterator is a model of Input Iterator - * and \c InputIterator's \c value_type is convertible to - * \c OutputIterator's \c value_type. - * \tparam OutputIterator is a model of Output Iterator - * and \c OutputIterator's \c value_type is convertible to - * both \c AssociativeOperator's \c first_argument_type and - * \c second_argument_type. - * \tparam AssociativeOperator is a model of Binary Function - * and \c AssociativeOperator's \c result_type is - * convertible to \c OutputIterator's \c value_type. - * - * \pre \p first may equal \p result but the range [first, last) and the range [result, result + (last - first)) shall not overlap otherwise. - * - * The following code snippet demonstrates how to use \p inclusive_scan - * - * \code - * int data[10] = {-5, 0, 2, -3, 2, 4, 0, -1, 2, 8}; - * - * thrust::maximum binary_op; - * - * thrust::inclusive_scan(data, data + 10, data, binary_op); // in-place scan - * - * // data is now {-5, 0, 2, 2, 2, 4, 4, 4, 4, 8} - * \endcode - * - * \see http://www.sgi.com/tech/stl/partial_sum.html - */ -template - OutputIterator inclusive_scan(InputIterator first, - InputIterator last, - OutputIterator result, - AssociativeOperator binary_op); - - -/*! \p exclusive_scan computes an exclusive prefix sum operation. The - * term 'exclusive' means that each result does not include the - * corresponding input operand in the partial sum. More precisely, - * 0 is assigned to *result and the sum of - * 0 and *first is assigned to *(result + 1), - * and so on. This version of \p exclusive_scan assumes plus as the - * associative operator and \c 0 as the initial value. When the input and - * output sequences are the same, the scan is performed in-place. - * - * The algorithm's execution is parallelized as determined by \p exec. - * - * \param exec The execution policy to use for parallelization. - * \param first The beginning of the input sequence. - * \param last The end of the input sequence. - * \param result The beginning of the output sequence. - * \return The end of the output sequence. - * - * \tparam DerivedPolicy The name of the derived execution policy. - * \tparam InputIterator is a model of Input Iterator - * and \c InputIterator's \c value_type is convertible to - * \c OutputIterator's \c value_type. - * \tparam OutputIterator is a model of Output Iterator, - * and if \c x and \c y are objects of \c OutputIterator's - * \c value_type, then x + y is defined. If \c T is - * \c OutputIterator's \c value_type, then T(0) is - * defined. - * - * \pre \p first may equal \p result but the range [first, last) and the range [result, result + (last - first)) shall not overlap otherwise. - * - * The following code snippet demonstrates how to use \p exclusive_scan to compute an in-place - * prefix sum using the \p thrust::host execution policy for parallelization: - * - * \code - * #include - * #include - * ... - * - * int data[6] = {1, 0, 2, 2, 1, 3}; - * - * thrust::exclusive_scan(thrust::host, data, data + 6, data); // in-place scan - * - * // data is now {0, 1, 1, 3, 5, 6} - * \endcode - * - * \see http://www.sgi.com/tech/stl/partial_sum.html - */ -template -__host__ __device__ - OutputIterator exclusive_scan(const thrust::detail::execution_policy_base &exec, - InputIterator first, - InputIterator last, - OutputIterator result); - - -/*! \p exclusive_scan computes an exclusive prefix sum operation. The - * term 'exclusive' means that each result does not include the - * corresponding input operand in the partial sum. More precisely, - * 0 is assigned to *result and the sum of - * 0 and *first is assigned to *(result + 1), - * and so on. This version of \p exclusive_scan assumes plus as the - * associative operator and \c 0 as the initial value. When the input and - * output sequences are the same, the scan is performed in-place. - * - * \param first The beginning of the input sequence. - * \param last The end of the input sequence. - * \param result The beginning of the output sequence. - * \return The end of the output sequence. - * - * \tparam InputIterator is a model of Input Iterator - * and \c InputIterator's \c value_type is convertible to - * \c OutputIterator's \c value_type. - * \tparam OutputIterator is a model of Output Iterator, - * and if \c x and \c y are objects of \c OutputIterator's - * \c value_type, then x + y is defined. If \c T is - * \c OutputIterator's \c value_type, then T(0) is - * defined. - * - * \pre \p first may equal \p result but the range [first, last) and the range [result, result + (last - first)) shall not overlap otherwise. - * - * The following code snippet demonstrates how to use \p exclusive_scan - * - * \code - * #include - * - * int data[6] = {1, 0, 2, 2, 1, 3}; - * - * thrust::exclusive_scan(data, data + 6, data); // in-place scan - * - * // data is now {0, 1, 1, 3, 5, 6} - * \endcode - * - * \see http://www.sgi.com/tech/stl/partial_sum.html - */ -template - OutputIterator exclusive_scan(InputIterator first, - InputIterator last, - OutputIterator result); - - -/*! \p exclusive_scan computes an exclusive prefix sum operation. The - * term 'exclusive' means that each result does not include the - * corresponding input operand in the partial sum. More precisely, - * \p init is assigned to *result and the sum of \p init and - * *first is assigned to *(result + 1), and so on. - * This version of \p exclusive_scan assumes plus as the associative - * operator but requires an initial value \p init. When the input and - * output sequences are the same, the scan is performed in-place. - * - * The algorithm's execution is parallelized as determined by \p exec. - * - * \param exec The execution policy to use for parallelization. - * \param first The beginning of the input sequence. - * \param last The end of the input sequence. - * \param result The beginning of the output sequence. - * \param init The initial value. - * \return The end of the output sequence. - * - * \tparam DerivedPolicy The name of the derived execution policy. - * \tparam InputIterator is a model of Input Iterator - * and \c InputIterator's \c value_type is convertible to - * \c OutputIterator's \c value_type. - * \tparam OutputIterator is a model of Output Iterator, - * and if \c x and \c y are objects of \c OutputIterator's - * \c value_type, then x + y is defined. - * \tparam T is convertible to \c OutputIterator's \c value_type. - * - * \pre \p first may equal \p result but the range [first, last) and the range [result, result + (last - first)) shall not overlap otherwise. - * - * The following code snippet demonstrates how to use \p exclusive_scan to compute an in-place - * prefix sum using the \p thrust::host execution policy for parallelization: - * - * \code - * #include - * #include - * - * int data[6] = {1, 0, 2, 2, 1, 3}; - * - * thrust::exclusive_scan(thrust::host, data, data + 6, data, 4); // in-place scan - * - * // data is now {4, 5, 5, 7, 9, 10} - * \endcode - * - * \see http://www.sgi.com/tech/stl/partial_sum.html - */ -template -__host__ __device__ - OutputIterator exclusive_scan(const thrust::detail::execution_policy_base &exec, - InputIterator first, - InputIterator last, - OutputIterator result, - T init); - - -/*! \p exclusive_scan computes an exclusive prefix sum operation. The - * term 'exclusive' means that each result does not include the - * corresponding input operand in the partial sum. More precisely, - * \p init is assigned to *result and the sum of \p init and - * *first is assigned to *(result + 1), and so on. - * This version of \p exclusive_scan assumes plus as the associative - * operator but requires an initial value \p init. When the input and - * output sequences are the same, the scan is performed in-place. - * - * \param first The beginning of the input sequence. - * \param last The end of the input sequence. - * \param result The beginning of the output sequence. - * \param init The initial value. - * \return The end of the output sequence. - * - * \tparam InputIterator is a model of Input Iterator - * and \c InputIterator's \c value_type is convertible to - * \c OutputIterator's \c value_type. - * \tparam OutputIterator is a model of Output Iterator, - * and if \c x and \c y are objects of \c OutputIterator's - * \c value_type, then x + y is defined. - * \tparam T is convertible to \c OutputIterator's \c value_type. - * - * \pre \p first may equal \p result but the range [first, last) and the range [result, result + (last - first)) shall not overlap otherwise. - * - * The following code snippet demonstrates how to use \p exclusive_scan - * - * \code - * #include - * - * int data[6] = {1, 0, 2, 2, 1, 3}; - * - * thrust::exclusive_scan(data, data + 6, data, 4); // in-place scan - * - * // data is now {4, 5, 5, 7, 9, 10} - * \endcode - * - * \see http://www.sgi.com/tech/stl/partial_sum.html - */ -template - OutputIterator exclusive_scan(InputIterator first, - InputIterator last, - OutputIterator result, - T init); - - -/*! \p exclusive_scan computes an exclusive prefix sum operation. The - * term 'exclusive' means that each result does not include the - * corresponding input operand in the partial sum. More precisely, - * \p init is assigned to \*result and the value - * binary_op(init, \*first) is assigned to \*(result + 1), - * and so on. This version of the function requires both an associative - * operator and an initial value \p init. When the input and output - * sequences are the same, the scan is performed in-place. - * - * The algorithm's execution is parallelized as determined by \p exec. - * - * \param exec The execution policy to use for parallelization. - * \param first The beginning of the input sequence. - * \param last The end of the input sequence. - * \param result The beginning of the output sequence. - * \param init The initial value. - * \param binary_op The associatve operator used to 'sum' values. - * \return The end of the output sequence. - * - * \tparam DerivedPolicy The name of the derived execution policy. - * \tparam InputIterator is a model of Input Iterator - * and \c InputIterator's \c value_type is convertible to - * \c OutputIterator's \c value_type. - * \tparam OutputIterator is a model of Output Iterator - * and \c OutputIterator's \c value_type is convertible to - * both \c AssociativeOperator's \c first_argument_type and - * \c second_argument_type. - * \tparam T is convertible to \c OutputIterator's \c value_type. - * \tparam AssociativeOperator is a model of Binary Function - * and \c AssociativeOperator's \c result_type is - * convertible to \c OutputIterator's \c value_type. - * - * \pre \p first may equal \p result but the range [first, last) and the range [result, result + (last - first)) shall not overlap otherwise. - * - * The following code snippet demonstrates how to use \p exclusive_scan to compute an in-place - * prefix sum using the \p thrust::host execution policy for parallelization: - * - * \code - * #include - * #include - * #include - * ... - * - * int data[10] = {-5, 0, 2, -3, 2, 4, 0, -1, 2, 8}; - * - * thrust::maximum binary_op; - * - * thrust::exclusive_scan(thrust::host, data, data + 10, data, 1, binary_op); // in-place scan - * - * // data is now {1, 1, 1, 2, 2, 2, 4, 4, 4, 4 } - * \endcode - * - * \see http://www.sgi.com/tech/stl/partial_sum.html - */ -template -__host__ __device__ - OutputIterator exclusive_scan(const thrust::detail::execution_policy_base &exec, - InputIterator first, - InputIterator last, - OutputIterator result, - T init, - AssociativeOperator binary_op); - - -/*! \p exclusive_scan computes an exclusive prefix sum operation. The - * term 'exclusive' means that each result does not include the - * corresponding input operand in the partial sum. More precisely, - * \p init is assigned to \*result and the value - * binary_op(init, \*first) is assigned to \*(result + 1), - * and so on. This version of the function requires both an associative - * operator and an initial value \p init. When the input and output - * sequences are the same, the scan is performed in-place. - * - * \param first The beginning of the input sequence. - * \param last The end of the input sequence. - * \param result The beginning of the output sequence. - * \param init The initial value. - * \param binary_op The associatve operator used to 'sum' values. - * \return The end of the output sequence. - * - * \tparam InputIterator is a model of Input Iterator - * and \c InputIterator's \c value_type is convertible to - * \c OutputIterator's \c value_type. - * \tparam OutputIterator is a model of Output Iterator - * and \c OutputIterator's \c value_type is convertible to - * both \c AssociativeOperator's \c first_argument_type and - * \c second_argument_type. - * \tparam T is convertible to \c OutputIterator's \c value_type. - * \tparam AssociativeOperator is a model of Binary Function - * and \c AssociativeOperator's \c result_type is - * convertible to \c OutputIterator's \c value_type. - * - * \pre \p first may equal \p result but the range [first, last) and the range [result, result + (last - first)) shall not overlap otherwise. - * - * The following code snippet demonstrates how to use \p exclusive_scan - * - * \code - * #include - * #include - * - * int data[10] = {-5, 0, 2, -3, 2, 4, 0, -1, 2, 8}; - * - * thrust::maximum binary_op; - * - * thrust::exclusive_scan(data, data + 10, data, 1, binary_op); // in-place scan - * - * // data is now {1, 1, 1, 2, 2, 2, 4, 4, 4, 4 } - * \endcode - * - * \see http://www.sgi.com/tech/stl/partial_sum.html - */ -template - OutputIterator exclusive_scan(InputIterator first, - InputIterator last, - OutputIterator result, - T init, - AssociativeOperator binary_op); - - -/*! \addtogroup segmentedprefixsums Segmented Prefix Sums - * \ingroup prefixsums - * \{ - */ - - -/*! \p inclusive_scan_by_key computes an inclusive key-value or 'segmented' prefix - * sum operation. The term 'inclusive' means that each result includes - * the corresponding input operand in the partial sum. The term 'segmented' - * means that the partial sums are broken into distinct segments. In other - * words, within each segment a separate inclusive scan operation is computed. - * Refer to the code sample below for example usage. - * - * This version of \p inclusive_scan_by_key assumes \c equal_to as the binary - * predicate used to compare adjacent keys. Specifically, consecutive iterators - * i and i+1 in the range [first1, last1) - * belong to the same segment if *i == *(i+1), and belong to - * different segments otherwise. - * - * This version of \p inclusive_scan_by_key assumes \c plus as the associative - * operator used to perform the prefix sum. When the input and output sequences - * are the same, the scan is performed in-place. - * - * The algorithm's execution is parallelized as determined by \p exec. - * - * \param exec The execution policy to use for parallelization. - * \param first1 The beginning of the key sequence. - * \param last1 The end of the key sequence. - * \param first2 The beginning of the input value sequence. - * \param result The beginning of the output value sequence. - * \return The end of the output sequence. - * - * \tparam DerivedPolicy The name of the derived execution policy. - * \tparam InputIterator1 is a model of Input Iterator - * \tparam InputIterator2 is a model of Input Iterator - * and \c InputIterator2's \c value_type is convertible to \c OutputIterator's \c value_type. - * \tparam OutputIterator is a model of Output Iterator, - * and if \c x and \c y are objects of \c OutputIterator's \c value_type, then - * binary_op(x,y) is defined. - * - * \pre \p first1 may equal \p result but the range [first1, last1) and the range [result, result + (last1 - first1)) shall not overlap otherwise. - * \pre \p first2 may equal \p result but the range [first2, first2 + (last1 - first1) and range [result, result + (last1 - first1)) shall not overlap otherwise. - * - * The following code snippet demonstrates how to use \p inclusive_scan_by_key using the \p thrust::host - * execution policy for parallelization: - * - * \code - * #include - * #include - * ... - * - * int data[10] = {1, 1, 1, 1, 1, 1, 1, 1, 1, 1}; - * int keys[10] = {0, 0, 0, 1, 1, 2, 3, 3, 3, 3}; - * - * thrust::inclusive_scan_by_key(thrust::host, keys, keys + 10, data, data); // in-place scan - * - * // data is now {1, 2, 3, 1, 2, 1, 1, 2, 3, 4}; - * \endcode - * - * \see inclusive_scan - * \see exclusive_scan_by_key - * - */ -template -__host__ __device__ - OutputIterator inclusive_scan_by_key(const thrust::detail::execution_policy_base &exec, - InputIterator1 first1, - InputIterator1 last1, - InputIterator2 first2, - OutputIterator result); - - -/*! \p inclusive_scan_by_key computes an inclusive key-value or 'segmented' prefix - * sum operation. The term 'inclusive' means that each result includes - * the corresponding input operand in the partial sum. The term 'segmented' - * means that the partial sums are broken into distinct segments. In other - * words, within each segment a separate inclusive scan operation is computed. - * Refer to the code sample below for example usage. - * - * This version of \p inclusive_scan_by_key assumes \c equal_to as the binary - * predicate used to compare adjacent keys. Specifically, consecutive iterators - * i and i+1 in the range [first1, last1) - * belong to the same segment if *i == *(i+1), and belong to - * different segments otherwise. - * - * This version of \p inclusive_scan_by_key assumes \c plus as the associative - * operator used to perform the prefix sum. When the input and output sequences - * are the same, the scan is performed in-place. - * - * \param first1 The beginning of the key sequence. - * \param last1 The end of the key sequence. - * \param first2 The beginning of the input value sequence. - * \param result The beginning of the output value sequence. - * \return The end of the output sequence. - * - * \tparam InputIterator1 is a model of Input Iterator - * \tparam InputIterator2 is a model of Input Iterator - * and \c InputIterator2's \c value_type is convertible to \c OutputIterator's \c value_type. - * \tparam OutputIterator is a model of Output Iterator, - * and if \c x and \c y are objects of \c OutputIterator's \c value_type, then - * binary_op(x,y) is defined. - * - * \pre \p first1 may equal \p result but the range [first1, last1) and the range [result, result + (last1 - first1)) shall not overlap otherwise. - * \pre \p first2 may equal \p result but the range [first2, first2 + (last1 - first1) and range [result, result + (last1 - first1)) shall not overlap otherwise. - * - * The following code snippet demonstrates how to use \p inclusive_scan_by_key - * - * \code - * #include - * - * int data[10] = {1, 1, 1, 1, 1, 1, 1, 1, 1, 1}; - * int keys[10] = {0, 0, 0, 1, 1, 2, 3, 3, 3, 3}; - * - * thrust::inclusive_scan_by_key(keys, keys + 10, data, data); // in-place scan - * - * // data is now {1, 2, 3, 1, 2, 1, 1, 2, 3, 4}; - * \endcode - * - * \see inclusive_scan - * \see exclusive_scan_by_key - * - */ -template - OutputIterator inclusive_scan_by_key(InputIterator1 first1, - InputIterator1 last1, - InputIterator2 first2, - OutputIterator result); - - -/*! \p inclusive_scan_by_key computes an inclusive key-value or 'segmented' prefix - * sum operation. The term 'inclusive' means that each result includes - * the corresponding input operand in the partial sum. The term 'segmented' - * means that the partial sums are broken into distinct segments. In other - * words, within each segment a separate inclusive scan operation is computed. - * Refer to the code sample below for example usage. - * - * This version of \p inclusive_scan_by_key uses the binary predicate - * \c pred to compare adjacent keys. Specifically, consecutive iterators - * i and i+1 in the range [first1, last1) - * belong to the same segment if binary_pred(*i, *(i+1)) is true, and belong to - * different segments otherwise. - * - * This version of \p inclusive_scan_by_key assumes \c plus as the associative - * operator used to perform the prefix sum. When the input and output sequences - * are the same, the scan is performed in-place. - * - * The algorithm's execution is parallelized as determined by \p exec. - * - * \param exec The execution policy to use for parallelization. - * \param first1 The beginning of the key sequence. - * \param last1 The end of the key sequence. - * \param first2 The beginning of the input value sequence. - * \param result The beginning of the output value sequence. - * \param binary_pred The binary predicate used to determine equality of keys. - * \return The end of the output sequence. - * - * \tparam DerivedPolicy The name of the derived execution policy. - * \tparam InputIterator1 is a model of Input Iterator - * \tparam InputIterator2 is a model of Input Iterator - * and \c InputIterator2's \c value_type is convertible to \c OutputIterator's \c value_type. - * \tparam OutputIterator is a model of Output Iterator, - * and if \c x and \c y are objects of \c OutputIterator's \c value_type, then - * binary_op(x,y) is defined. - * \tparam BinaryPredicate is a model of Binary Predicate. - * - * \pre \p first1 may equal \p result but the range [first1, last1) and the range [result, result + (last1 - first1)) shall not overlap otherwise. - * \pre \p first2 may equal \p result but the range [first2, first2 + (last1 - first1) and range [result, result + (last1 - first1)) shall not overlap otherwise. - * - * The following code snippet demonstrates how to use \p inclusive_scan_by_key using the \p thrust::host - * execution policy for parallelization: - * - * \code - * #include - * #include - * #include - * ... - * - * int data[10] = {1, 1, 1, 1, 1, 1, 1, 1, 1, 1}; - * int keys[10] = {0, 0, 0, 1, 1, 2, 3, 3, 3, 3}; - * - * thrust::equal_to binary_pred; - * - * thrust::inclusive_scan_by_key(thrust::host, keys, keys + 10, data, data, binary_pred); // in-place scan - * - * // data is now {1, 2, 3, 1, 2, 1, 1, 2, 3, 4}; - * \endcode - * - * \see inclusive_scan - * \see exclusive_scan_by_key - * - */ -template -__host__ __device__ - OutputIterator inclusive_scan_by_key(const thrust::detail::execution_policy_base &exec, - InputIterator1 first1, - InputIterator1 last1, - InputIterator2 first2, - OutputIterator result, - BinaryPredicate binary_pred); - - -/*! \p inclusive_scan_by_key computes an inclusive key-value or 'segmented' prefix - * sum operation. The term 'inclusive' means that each result includes - * the corresponding input operand in the partial sum. The term 'segmented' - * means that the partial sums are broken into distinct segments. In other - * words, within each segment a separate inclusive scan operation is computed. - * Refer to the code sample below for example usage. - * - * This version of \p inclusive_scan_by_key uses the binary predicate - * \c pred to compare adjacent keys. Specifically, consecutive iterators - * i and i+1 in the range [first1, last1) - * belong to the same segment if binary_pred(*i, *(i+1)) is true, and belong to - * different segments otherwise. - * - * This version of \p inclusive_scan_by_key assumes \c plus as the associative - * operator used to perform the prefix sum. When the input and output sequences - * are the same, the scan is performed in-place. - * - * \param first1 The beginning of the key sequence. - * \param last1 The end of the key sequence. - * \param first2 The beginning of the input value sequence. - * \param result The beginning of the output value sequence. - * \param binary_pred The binary predicate used to determine equality of keys. - * \return The end of the output sequence. - * - * \tparam InputIterator1 is a model of Input Iterator - * \tparam InputIterator2 is a model of Input Iterator - * and \c InputIterator2's \c value_type is convertible to \c OutputIterator's \c value_type. - * \tparam OutputIterator is a model of Output Iterator, - * and if \c x and \c y are objects of \c OutputIterator's \c value_type, then - * binary_op(x,y) is defined. - * \tparam BinaryPredicate is a model of Binary Predicate. - * - * \pre \p first1 may equal \p result but the range [first1, last1) and the range [result, result + (last1 - first1)) shall not overlap otherwise. - * \pre \p first2 may equal \p result but the range [first2, first2 + (last1 - first1) and range [result, result + (last1 - first1)) shall not overlap otherwise. - * - * The following code snippet demonstrates how to use \p inclusive_scan_by_key - * - * \code - * #include - * #include - * - * int data[10] = {1, 1, 1, 1, 1, 1, 1, 1, 1, 1}; - * int keys[10] = {0, 0, 0, 1, 1, 2, 3, 3, 3, 3}; - * - * thrust::equal_to binary_pred; - * - * thrust::inclusive_scan_by_key(keys, keys + 10, data, data, binary_pred); // in-place scan - * - * // data is now {1, 2, 3, 1, 2, 1, 1, 2, 3, 4}; - * \endcode - * - * \see inclusive_scan - * \see exclusive_scan_by_key - * - */ -template - OutputIterator inclusive_scan_by_key(InputIterator1 first1, - InputIterator1 last1, - InputIterator2 first2, - OutputIterator result, - BinaryPredicate binary_pred); - - -/*! \p inclusive_scan_by_key computes an inclusive key-value or 'segmented' prefix - * sum operation. The term 'inclusive' means that each result includes - * the corresponding input operand in the partial sum. The term 'segmented' - * means that the partial sums are broken into distinct segments. In other - * words, within each segment a separate inclusive scan operation is computed. - * Refer to the code sample below for example usage. - * - * This version of \p inclusive_scan_by_key uses the binary predicate - * \c pred to compare adjacent keys. Specifically, consecutive iterators - * i and i+1 in the range [first1, last1) - * belong to the same segment if binary_pred(*i, *(i+1)) is true, and belong to - * different segments otherwise. - * - * This version of \p inclusive_scan_by_key uses the associative operator - * \c binary_op to perform the prefix sum. When the input and output sequences - * are the same, the scan is performed in-place. - * - * The algorithm's execution is parallelized as determined by \p exec. - * - * \param exec The execution policy to use for parallelization. - * \param first1 The beginning of the key sequence. - * \param last1 The end of the key sequence. - * \param first2 The beginning of the input value sequence. - * \param result The beginning of the output value sequence. - * \param binary_pred The binary predicate used to determine equality of keys. - * \param binary_op The associatve operator used to 'sum' values. - * \return The end of the output sequence. - * - * \tparam DerivedPolicy The name of the derived execution policy. - * \tparam InputIterator1 is a model of Input Iterator - * \tparam InputIterator2 is a model of Input Iterator - * and \c InputIterator2's \c value_type is convertible to \c OutputIterator's \c value_type. - * \tparam OutputIterator is a model of Output Iterator, - * and if \c x and \c y are objects of \c OutputIterator's \c value_type, then - * binary_op(x,y) is defined. - * \tparam BinaryPredicate is a model of Binary Predicate. - * \tparam AssociativeOperator is a model of Binary Function - * and \c AssociativeOperator's \c result_type is - * convertible to \c OutputIterator's \c value_type. - * - * \pre \p first1 may equal \p result but the range [first1, last1) and the range [result, result + (last1 - first1)) shall not overlap otherwise. - * \pre \p first2 may equal \p result but the range [first2, first2 + (last1 - first1) and range [result, result + (last1 - first1)) shall not overlap otherwise. - * - * The following code snippet demonstrates how to use \p inclusive_scan_by_key using the \p thrust::host - * execution policy for parallelization: - * - * \code - * #include - * #include - * #include - * ... - * - * int data[10] = {1, 1, 1, 1, 1, 1, 1, 1, 1, 1}; - * int keys[10] = {0, 0, 0, 1, 1, 2, 3, 3, 3, 3}; - * - * thrust::equal_to binary_pred; - * thrust::plus binary_op; - * - * thrust::inclusive_scan_by_key(thrust::host, keys, keys + 10, data, data, binary_pred, binary_op); // in-place scan - * - * // data is now {1, 2, 3, 1, 2, 1, 1, 2, 3, 4}; - * \endcode - * - * \see inclusive_scan - * \see exclusive_scan_by_key - * - */ -template -__host__ __device__ - OutputIterator inclusive_scan_by_key(const thrust::detail::execution_policy_base &exec, - InputIterator1 first1, - InputIterator1 last1, - InputIterator2 first2, - OutputIterator result, - BinaryPredicate binary_pred, - AssociativeOperator binary_op); - - -/*! \p inclusive_scan_by_key computes an inclusive key-value or 'segmented' prefix - * sum operation. The term 'inclusive' means that each result includes - * the corresponding input operand in the partial sum. The term 'segmented' - * means that the partial sums are broken into distinct segments. In other - * words, within each segment a separate inclusive scan operation is computed. - * Refer to the code sample below for example usage. - * - * This version of \p inclusive_scan_by_key uses the binary predicate - * \c pred to compare adjacent keys. Specifically, consecutive iterators - * i and i+1 in the range [first1, last1) - * belong to the same segment if binary_pred(*i, *(i+1)) is true, and belong to - * different segments otherwise. - * - * This version of \p inclusive_scan_by_key uses the associative operator - * \c binary_op to perform the prefix sum. When the input and output sequences - * are the same, the scan is performed in-place. - * - * \param first1 The beginning of the key sequence. - * \param last1 The end of the key sequence. - * \param first2 The beginning of the input value sequence. - * \param result The beginning of the output value sequence. - * \param binary_pred The binary predicate used to determine equality of keys. - * \param binary_op The associatve operator used to 'sum' values. - * \return The end of the output sequence. - * - * \tparam InputIterator1 is a model of Input Iterator - * \tparam InputIterator2 is a model of Input Iterator - * and \c InputIterator2's \c value_type is convertible to \c OutputIterator's \c value_type. - * \tparam OutputIterator is a model of Output Iterator, - * and if \c x and \c y are objects of \c OutputIterator's \c value_type, then - * binary_op(x,y) is defined. - * \tparam BinaryPredicate is a model of Binary Predicate. - * \tparam AssociativeOperator is a model of Binary Function - * and \c AssociativeOperator's \c result_type is - * convertible to \c OutputIterator's \c value_type. - * - * \pre \p first1 may equal \p result but the range [first1, last1) and the range [result, result + (last1 - first1)) shall not overlap otherwise. - * \pre \p first2 may equal \p result but the range [first2, first2 + (last1 - first1) and range [result, result + (last1 - first1)) shall not overlap otherwise. - * - * The following code snippet demonstrates how to use \p inclusive_scan_by_key - * - * \code - * #include - * #include - * - * int data[10] = {1, 1, 1, 1, 1, 1, 1, 1, 1, 1}; - * int keys[10] = {0, 0, 0, 1, 1, 2, 3, 3, 3, 3}; - * - * thrust::equal_to binary_pred; - * thrust::plus binary_op; - * - * thrust::inclusive_scan_by_key(keys, keys + 10, data, data, binary_pred, binary_op); // in-place scan - * - * // data is now {1, 2, 3, 1, 2, 1, 1, 2, 3, 4}; - * \endcode - * - * \see inclusive_scan - * \see exclusive_scan_by_key - * - */ -template - OutputIterator inclusive_scan_by_key(InputIterator1 first1, - InputIterator1 last1, - InputIterator2 first2, - OutputIterator result, - BinaryPredicate binary_pred, - AssociativeOperator binary_op); - - -/*! \p exclusive_scan_by_key computes an exclusive segmented prefix - * - * This version of \p exclusive_scan_by_key uses the value \c 0 to - * initialize the exclusive scan operation. - * - * This version of \p exclusive_scan_by_key assumes \c plus as the associative - * operator used to perform the prefix sum. When the input and output sequences - * are the same, the scan is performed in-place. - * - * This version of \p exclusive_scan_by_key assumes \c equal_to as the binary - * predicate used to compare adjacent keys. Specifically, consecutive iterators - * i and i+1 in the range [first1, last1 - * belong to the same segment if *i == *(i+1), and belong to - * different segments otherwise. - * - * Refer to the most general form of \p exclusive_scan_by_key for additional details. - * - * The algorithm's execution is parallelized as determined by \p exec. - * - * \param exec The execution policy to use for parallelization. - * \param first1 The beginning of the key sequence. - * \param last1 The end of the key sequence. - * \param first2 The beginning of the input value sequence. - * \param result The beginning of the output value sequence. - * - * \pre \p first1 may equal \p result but the range [first1, last1) and the range [result, result + (last1 - first1)) shall not overlap otherwise. - * \pre \p first2 may equal \p result but the range [first2, first2 + (last1 - first1) and range [result, result + (last1 - first1)) shall not overlap otherwise. - * - * The following code snippet demonstrates how to use \p exclusive_scan_by_key using the - * \p thrust::host execution policy for parallelization: - * - * \code - * #include - * #include - * ... - * - * int keys[10] = {0, 0, 0, 1, 1, 2, 3, 3, 3, 3}; - * int vals[10] = {1, 1, 1, 1, 1, 1, 1, 1, 1, 1}; - * - * thrust::exclusive_scan_by_key(thrust::host, key, key + 10, vals, vals); // in-place scan - * - * // vals is now {0, 1, 2, 0, 1, 0, 0, 1, 2, 3}; - * \endcode - * - * \see exclusive_scan - * - */ -template -__host__ __device__ - OutputIterator exclusive_scan_by_key(const thrust::detail::execution_policy_base &exec, - InputIterator1 first1, - InputIterator1 last1, - InputIterator2 first2, - OutputIterator result); - - -/*! \p exclusive_scan_by_key computes an exclusive segmented prefix - * - * This version of \p exclusive_scan_by_key uses the value \c 0 to - * initialize the exclusive scan operation. - * - * This version of \p exclusive_scan_by_key assumes \c plus as the associative - * operator used to perform the prefix sum. When the input and output sequences - * are the same, the scan is performed in-place. - * - * This version of \p exclusive_scan_by_key assumes \c equal_to as the binary - * predicate used to compare adjacent keys. Specifically, consecutive iterators - * i and i+1 in the range [first1, last1 - * belong to the same segment if *i == *(i+1), and belong to - * different segments otherwise. - * - * Refer to the most general form of \p exclusive_scan_by_key for additional details. - * - * \param first1 The beginning of the key sequence. - * \param last1 The end of the key sequence. - * \param first2 The beginning of the input value sequence. - * \param result The beginning of the output value sequence. - * - * \pre \p first1 may equal \p result but the range [first1, last1) and the range [result, result + (last1 - first1)) shall not overlap otherwise. - * \pre \p first2 may equal \p result but the range [first2, first2 + (last1 - first1) and range [result, result + (last1 - first1)) shall not overlap otherwise. - * - * The following code snippet demonstrates how to use \p exclusive_scan_by_key. - * - * \code - * #include - * - * int keys[10] = {0, 0, 0, 1, 1, 2, 3, 3, 3, 3}; - * int vals[10] = {1, 1, 1, 1, 1, 1, 1, 1, 1, 1}; - * - * thrust::exclusive_scan_by_key(key, key + 10, vals, vals); // in-place scan - * - * // vals is now {0, 1, 2, 0, 1, 0, 0, 1, 2, 3}; - * \endcode - * - * \see exclusive_scan - * - */ -template - OutputIterator exclusive_scan_by_key(InputIterator1 first1, - InputIterator1 last1, - InputIterator2 first2, - OutputIterator result); - - -/*! \p exclusive_scan_by_key computes an exclusive key-value or 'segmented' prefix - * sum operation. The term 'exclusive' means that each result does not include - * the corresponding input operand in the partial sum. The term 'segmented' - * means that the partial sums are broken into distinct segments. In other - * words, within each segment a separate exclusive scan operation is computed. - * Refer to the code sample below for example usage. - * - * This version of \p exclusive_scan_by_key uses the value \c init to - * initialize the exclusive scan operation. - * - * The algorithm's execution is parallelized as determined by \p exec. - * - * \param exec The execution policy to use for parallelization. - * \param first1 The beginning of the key sequence. - * \param last1 The end of the key sequence. - * \param first2 The beginning of the input value sequence. - * \param result The beginning of the output value sequence. - * \param init The initial of the exclusive sum value. - * \return The end of the output sequence. - * - * \pre \p first1 may equal \p result but the range [first1, last1) and the range [result, result + (last1 - first1)) shall not overlap otherwise. - * \pre \p first2 may equal \p result but the range [first2, first2 + (last1 - first1) and range [result, result + (last1 - first1)) shall not overlap otherwise. - * - * The following code snippet demonstrates how to use \p exclusive_scan_by_key using the \p - * thrust::host execution policy for parallelization: - * - * \code - * #include - * #include - * #include - * ... - * - * int keys[10] = {0, 0, 0, 1, 1, 2, 3, 3, 3, 3}; - * int vals[10] = {1, 1, 1, 1, 1, 1, 1, 1, 1, 1}; - * - * int init = 5; - * - * thrust::exclusive_scan_by_key(thrust::host, key, key + 10, vals, vals, init); // in-place scan - * - * // vals is now {5, 6, 7, 5, 6, 5, 5, 6, 7, 8}; - * \endcode - * - * \see exclusive_scan - * \see inclusive_scan_by_key - * - */ -template -__host__ __device__ - OutputIterator exclusive_scan_by_key(const thrust::detail::execution_policy_base &exec, - InputIterator1 first1, - InputIterator1 last1, - InputIterator2 first2, - OutputIterator result, - T init); - - -/*! \p exclusive_scan_by_key computes an exclusive key-value or 'segmented' prefix - * sum operation. The term 'exclusive' means that each result does not include - * the corresponding input operand in the partial sum. The term 'segmented' - * means that the partial sums are broken into distinct segments. In other - * words, within each segment a separate exclusive scan operation is computed. - * Refer to the code sample below for example usage. - * - * This version of \p exclusive_scan_by_key uses the value \c init to - * initialize the exclusive scan operation. - * - * \param first1 The beginning of the key sequence. - * \param last1 The end of the key sequence. - * \param first2 The beginning of the input value sequence. - * \param result The beginning of the output value sequence. - * \param init The initial of the exclusive sum value. - * \return The end of the output sequence. - * - * \pre \p first1 may equal \p result but the range [first1, last1) and the range [result, result + (last1 - first1)) shall not overlap otherwise. - * \pre \p first2 may equal \p result but the range [first2, first2 + (last1 - first1) and range [result, result + (last1 - first1)) shall not overlap otherwise. - * - * The following code snippet demonstrates how to use \p exclusive_scan_by_key - * - * \code - * #include - * #include - * - * int keys[10] = {0, 0, 0, 1, 1, 2, 3, 3, 3, 3}; - * int vals[10] = {1, 1, 1, 1, 1, 1, 1, 1, 1, 1}; - * - * int init = 5; - * - * thrust::exclusive_scan_by_key(key, key + 10, vals, vals, init); // in-place scan - * - * // vals is now {5, 6, 7, 5, 6, 5, 5, 6, 7, 8}; - * \endcode - * - * \see exclusive_scan - * \see inclusive_scan_by_key - * - */ -template - OutputIterator exclusive_scan_by_key(InputIterator1 first1, - InputIterator1 last1, - InputIterator2 first2, - OutputIterator result, - T init); - - -/*! \p exclusive_scan_by_key computes an exclusive key-value or 'segmented' prefix - * sum operation. The term 'exclusive' means that each result does not include - * the corresponding input operand in the partial sum. The term 'segmented' - * means that the partial sums are broken into distinct segments. In other - * words, within each segment a separate exclusive scan operation is computed. - * Refer to the code sample below for example usage. - * - * This version of \p exclusive_scan_by_key uses the value \c init to - * initialize the exclusive scan operation. - * - * This version of \p exclusive_scan_by_key uses the binary predicate \c binary_pred - * to compare adjacent keys. Specifically, consecutive iterators i and - * i+1 in the range [first1, last1) belong to the same segment if - * binary_pred(*i, *(i+1)) is true, and belong to different segments otherwise. - * - * The algorithm's execution is parallelized as determined by \p exec. - * - * \param exec The execution policy to use for parallelization. - * \param first1 The beginning of the key sequence. - * \param last1 The end of the key sequence. - * \param first2 The beginning of the input value sequence. - * \param result The beginning of the output value sequence. - * \param init The initial of the exclusive sum value. - * \param binary_pred The binary predicate used to determine equality of keys. - * \return The end of the output sequence. - * - * \pre \p first1 may equal \p result but the range [first1, last1) and the range [result, result + (last1 - first1)) shall not overlap otherwise. - * \pre \p first2 may equal \p result but the range [first2, first2 + (last1 - first1) and range [result, result + (last1 - first1)) shall not overlap otherwise. - * - * The following code snippet demonstrates how to use \p exclusive_scan_by_key using the - * \p thrust::host execution policy for parallelization: - * - * \code - * #include - * #include - * #include - * ... - * - * int keys[10] = {0, 0, 0, 1, 1, 2, 3, 3, 3, 3}; - * int vals[10] = {1, 1, 1, 1, 1, 1, 1, 1, 1, 1}; - * - * int init = 5; - * - * thrust::equal_to binary_pred; - * - * thrust::exclusive_scan_by_key(thrust::host, key, key + 10, vals, vals, init, binary_pred); // in-place scan - * - * // vals is now {5, 6, 7, 5, 6, 5, 5, 6, 7, 8}; - * \endcode - * - * \see exclusive_scan - * \see inclusive_scan_by_key - * - */ -template -__host__ __device__ - OutputIterator exclusive_scan_by_key(const thrust::detail::execution_policy_base &exec, - InputIterator1 first1, - InputIterator1 last1, - InputIterator2 first2, - OutputIterator result, - T init, - BinaryPredicate binary_pred); - - -/*! \p exclusive_scan_by_key computes an exclusive key-value or 'segmented' prefix - * sum operation. The term 'exclusive' means that each result does not include - * the corresponding input operand in the partial sum. The term 'segmented' - * means that the partial sums are broken into distinct segments. In other - * words, within each segment a separate exclusive scan operation is computed. - * Refer to the code sample below for example usage. - * - * This version of \p exclusive_scan_by_key uses the value \c init to - * initialize the exclusive scan operation. - * - * This version of \p exclusive_scan_by_key uses the binary predicate \c binary_pred - * to compare adjacent keys. Specifically, consecutive iterators i and - * i+1 in the range [first1, last1) belong to the same segment if - * binary_pred(*i, *(i+1)) is true, and belong to different segments otherwise. - * - * \param first1 The beginning of the key sequence. - * \param last1 The end of the key sequence. - * \param first2 The beginning of the input value sequence. - * \param result The beginning of the output value sequence. - * \param init The initial of the exclusive sum value. - * \param binary_pred The binary predicate used to determine equality of keys. - * \return The end of the output sequence. - * - * \pre \p first1 may equal \p result but the range [first1, last1) and the range [result, result + (last1 - first1)) shall not overlap otherwise. - * \pre \p first2 may equal \p result but the range [first2, first2 + (last1 - first1) and range [result, result + (last1 - first1)) shall not overlap otherwise. - * - * The following code snippet demonstrates how to use \p exclusive_scan_by_key - * - * \code - * #include - * #include - * - * int keys[10] = {0, 0, 0, 1, 1, 2, 3, 3, 3, 3}; - * int vals[10] = {1, 1, 1, 1, 1, 1, 1, 1, 1, 1}; - * - * int init = 5; - * - * thrust::equal_to binary_pred; - * - * thrust::exclusive_scan_by_key(key, key + 10, vals, vals, init, binary_pred); // in-place scan - * - * // vals is now {5, 6, 7, 5, 6, 5, 5, 6, 7, 8}; - * \endcode - * - * \see exclusive_scan - * \see inclusive_scan_by_key - * - */ -template - OutputIterator exclusive_scan_by_key(InputIterator1 first1, - InputIterator1 last1, - InputIterator2 first2, - OutputIterator result, - T init, - BinaryPredicate binary_pred); - - -/*! \p exclusive_scan_by_key computes an exclusive key-value or 'segmented' prefix - * sum operation. The term 'exclusive' means that each result does not include - * the corresponding input operand in the partial sum. The term 'segmented' - * means that the partial sums are broken into distinct segments. In other - * words, within each segment a separate exclusive scan operation is computed. - * Refer to the code sample below for example usage. - * - * This version of \p exclusive_scan_by_key uses the value \c init to - * initialize the exclusive scan operation. - * - * This version of \p exclusive_scan_by_key uses the binary predicate \c binary_pred - * to compare adjacent keys. Specifically, consecutive iterators i and - * i+1 in the range [first1, last1) belong to the same segment if - * binary_pred(*i, *(i+1)) is true, and belong to different segments otherwise. - * - * This version of \p exclusive_scan_by_key uses the associative operator - * \c binary_op to perform the prefix sum. When the input and output sequences - * are the same, the scan is performed in-place. - * - * The algorithm's execution is parallelized as determined by \p exec. - * - * \param exec The execution policy to use for parallelization. - * \param first1 The beginning of the key sequence. - * \param last1 The end of the key sequence. - * \param first2 The beginning of the input value sequence. - * \param result The beginning of the output value sequence. - * \param init The initial of the exclusive sum value. - * \param binary_pred The binary predicate used to determine equality of keys. - * \param binary_op The associatve operator used to 'sum' values. - * \return The end of the output sequence. - * - * \tparam DerivedPolicy The name of the derived execution policy. - * \tparam InputIterator1 is a model of Input Iterator - * \tparam InputIterator2 is a model of Input Iterator - * and \c InputIterator2's \c value_type is convertible to \c OutputIterator's \c value_type. - * \tparam OutputIterator is a model of Output Iterator, - * and if \c x and \c y are objects of \c OutputIterator's \c value_type, then - * binary_op(x,y) is defined. - * \tparam T is convertible to \c OutputIterator's \c value_type. - * \tparam BinaryPredicate is a model of Binary Predicate. - * \tparam AssociativeOperator is a model of Binary Function - * and \c AssociativeOperator's \c result_type is convertible to \c OutputIterator's \c value_type. - * - * \pre \p first1 may equal \p result but the range [first1, last1) and the range [result, result + (last1 - first1)) shall not overlap otherwise. - * \pre \p first2 may equal \p result but the range [first2, first2 + (last1 - first1) and range [result, result + (last1 - first1)) shall not overlap otherwise. - * - * The following code snippet demonstrates how to use \p exclusive_scan_by_key using the - * \p thrust::host execution policy for parallelization: - * - * \code - * #include - * #include - * #include - * ... - * - * int keys[10] = {0, 0, 0, 1, 1, 2, 3, 3, 3, 3}; - * int vals[10] = {1, 1, 1, 1, 1, 1, 1, 1, 1, 1}; - * - * int init = 5; - * - * thrust::equal_to binary_pred; - * thrust::plus binary_op; - * - * thrust::exclusive_scan_by_key(thrust::host, key, key + 10, vals, vals, init, binary_pred, binary_op); // in-place scan - * - * // vals is now {5, 6, 7, 5, 6, 5, 5, 6, 7, 8}; - * \endcode - * - * \see exclusive_scan - * \see inclusive_scan_by_key - * - */ -template -__host__ __device__ - OutputIterator exclusive_scan_by_key(const thrust::detail::execution_policy_base &exec, - InputIterator1 first1, - InputIterator1 last1, - InputIterator2 first2, - OutputIterator result, - T init, - BinaryPredicate binary_pred, - AssociativeOperator binary_op); - - -/*! \p exclusive_scan_by_key computes an exclusive key-value or 'segmented' prefix - * sum operation. The term 'exclusive' means that each result does not include - * the corresponding input operand in the partial sum. The term 'segmented' - * means that the partial sums are broken into distinct segments. In other - * words, within each segment a separate exclusive scan operation is computed. - * Refer to the code sample below for example usage. - * - * This version of \p exclusive_scan_by_key uses the value \c init to - * initialize the exclusive scan operation. - * - * This version of \p exclusive_scan_by_key uses the binary predicate \c binary_pred - * to compare adjacent keys. Specifically, consecutive iterators i and - * i+1 in the range [first1, last1) belong to the same segment if - * binary_pred(*i, *(i+1)) is true, and belong to different segments otherwise. - * - * This version of \p exclusive_scan_by_key uses the associative operator - * \c binary_op to perform the prefix sum. When the input and output sequences - * are the same, the scan is performed in-place. - * - * \param first1 The beginning of the key sequence. - * \param last1 The end of the key sequence. - * \param first2 The beginning of the input value sequence. - * \param result The beginning of the output value sequence. - * \param init The initial of the exclusive sum value. - * \param binary_pred The binary predicate used to determine equality of keys. - * \param binary_op The associatve operator used to 'sum' values. - * \return The end of the output sequence. - * - * \tparam InputIterator1 is a model of Input Iterator - * \tparam InputIterator2 is a model of Input Iterator - * and \c InputIterator2's \c value_type is convertible to \c OutputIterator's \c value_type. - * \tparam OutputIterator is a model of Output Iterator, - * and if \c x and \c y are objects of \c OutputIterator's \c value_type, then - * binary_op(x,y) is defined. - * \tparam T is convertible to \c OutputIterator's \c value_type. - * \tparam BinaryPredicate is a model of Binary Predicate. - * \tparam AssociativeOperator is a model of Binary Function - * and \c AssociativeOperator's \c result_type is convertible to \c OutputIterator's \c value_type. - * - * \pre \p first1 may equal \p result but the range [first1, last1) and the range [result, result + (last1 - first1)) shall not overlap otherwise. - * \pre \p first2 may equal \p result but the range [first2, first2 + (last1 - first1) and range [result, result + (last1 - first1)) shall not overlap otherwise. - * - * The following code snippet demonstrates how to use \p exclusive_scan_by_key - * - * \code - * #include - * #include - * - * int keys[10] = {0, 0, 0, 1, 1, 2, 3, 3, 3, 3}; - * int vals[10] = {1, 1, 1, 1, 1, 1, 1, 1, 1, 1}; - * - * int init = 5; - * - * thrust::equal_to binary_pred; - * thrust::plus binary_op; - * - * thrust::exclusive_scan_by_key(key, key + 10, vals, vals, init, binary_pred, binary_op); // in-place scan - * - * // vals is now {5, 6, 7, 5, 6, 5, 5, 6, 7, 8}; - * \endcode - * - * \see exclusive_scan - * \see inclusive_scan_by_key - * - */ -template - OutputIterator exclusive_scan_by_key(InputIterator1 first1, - InputIterator1 last1, - InputIterator2 first2, - OutputIterator result, - T init, - BinaryPredicate binary_pred, - AssociativeOperator binary_op); - - -/*! \} // end segmentedprefixsums - */ - - -/*! \} // end prefix sums - */ - - -} // end namespace thrust - -#include - diff --git a/spaces/marioboy/neil-breen/toolbox/ui.py b/spaces/marioboy/neil-breen/toolbox/ui.py deleted file mode 100644 index d56b5740e276751f954aae1ca17e5ed485b48937..0000000000000000000000000000000000000000 --- a/spaces/marioboy/neil-breen/toolbox/ui.py +++ /dev/null @@ -1,611 +0,0 @@ -import matplotlib.pyplot as plt -from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas -from matplotlib.figure import Figure -from PyQt5.QtCore import Qt, QStringListModel -from PyQt5.QtWidgets import * -from encoder.inference import plot_embedding_as_heatmap -from toolbox.utterance import Utterance -from pathlib import Path -from typing import List, Set -import sounddevice as sd -import soundfile as sf -import numpy as np -# from sklearn.manifold import TSNE # You can try with TSNE if you like, I prefer UMAP -from time import sleep -import umap -import sys -from warnings import filterwarnings, warn -filterwarnings("ignore") - - -colormap = np.array([ - [0, 127, 70], - [255, 0, 0], - [255, 217, 38], - [0, 135, 255], - [165, 0, 165], - [255, 167, 255], - [97, 142, 151], - [0, 255, 255], - [255, 96, 38], - [142, 76, 0], - [33, 0, 127], - [0, 0, 0], - [183, 183, 183], - [76, 255, 0], -], dtype=np.float) / 255 - -default_text = \ - "Welcome to the toolbox! To begin, load an utterance from your datasets or record one " \ - "yourself.\nOnce its embedding has been created, you can synthesize any text written here.\n" \ - "The synthesizer expects to generate " \ - "outputs that are somewhere between 5 and 12 seconds.\nTo mark breaks, write a new line. " \ - "Each line will be treated separately.\nThen, they are joined together to make the final " \ - "spectrogram. Use the vocoder to generate audio.\nThe vocoder generates almost in constant " \ - "time, so it will be more time efficient for longer inputs like this one.\nOn the left you " \ - "have the embedding projections. Load or record more utterances to see them.\nIf you have " \ - "at least 2 or 3 utterances from a same speaker, a cluster should form.\nSynthesized " \ - "utterances are of the same color as the speaker whose voice was used, but they're " \ - "represented with a cross." - - -class UI(QDialog): - min_umap_points = 4 - max_log_lines = 5 - max_saved_utterances = 20 - - def draw_utterance(self, utterance: Utterance, which): - self.draw_spec(utterance.spec, which) - self.draw_embed(utterance.embed, utterance.name, which) - - def draw_embed(self, embed, name, which): - embed_ax, _ = self.current_ax if which == "current" else self.gen_ax - embed_ax.figure.suptitle("" if embed is None else name) - - ## Embedding - # Clear the plot - if len(embed_ax.images) > 0: - embed_ax.images[0].colorbar.remove() - embed_ax.clear() - - # Draw the embed - if embed is not None: - plot_embedding_as_heatmap(embed, embed_ax) - embed_ax.set_title("embedding") - embed_ax.set_aspect("equal", "datalim") - embed_ax.set_xticks([]) - embed_ax.set_yticks([]) - embed_ax.figure.canvas.draw() - - def draw_spec(self, spec, which): - _, spec_ax = self.current_ax if which == "current" else self.gen_ax - - ## Spectrogram - # Draw the spectrogram - spec_ax.clear() - if spec is not None: - im = spec_ax.imshow(spec, aspect="auto", interpolation="none") - # spec_ax.figure.colorbar(mappable=im, shrink=0.65, orientation="horizontal", - # spec_ax=spec_ax) - spec_ax.set_title("mel spectrogram") - - spec_ax.set_xticks([]) - spec_ax.set_yticks([]) - spec_ax.figure.canvas.draw() - if which != "current": - self.vocode_button.setDisabled(spec is None) - - def draw_umap_projections(self, utterances: Set[Utterance]): - self.umap_ax.clear() - - speakers = np.unique([u.speaker_name for u in utterances]) - colors = {speaker_name: colormap[i] for i, speaker_name in enumerate(speakers)} - embeds = [u.embed for u in utterances] - - # Display a message if there aren't enough points - if len(utterances) < self.min_umap_points: - self.umap_ax.text(.5, .5, "Add %d more points to\ngenerate the projections" % - (self.min_umap_points - len(utterances)), - horizontalalignment='center', fontsize=15) - self.umap_ax.set_title("") - - # Compute the projections - else: - if not self.umap_hot: - self.log( - "Drawing UMAP projections for the first time, this will take a few seconds.") - self.umap_hot = True - - reducer = umap.UMAP(int(np.ceil(np.sqrt(len(embeds)))), metric="cosine") - # reducer = TSNE() - projections = reducer.fit_transform(embeds) - - speakers_done = set() - for projection, utterance in zip(projections, utterances): - color = colors[utterance.speaker_name] - mark = "x" if "_gen_" in utterance.name else "o" - label = None if utterance.speaker_name in speakers_done else utterance.speaker_name - speakers_done.add(utterance.speaker_name) - self.umap_ax.scatter(projection[0], projection[1], c=[color], marker=mark, - label=label) - # self.umap_ax.set_title("UMAP projections") - self.umap_ax.legend(prop={'size': 10}) - - # Draw the plot - self.umap_ax.set_aspect("equal", "datalim") - self.umap_ax.set_xticks([]) - self.umap_ax.set_yticks([]) - self.umap_ax.figure.canvas.draw() - - def save_audio_file(self, wav, sample_rate): - dialog = QFileDialog() - dialog.setDefaultSuffix(".wav") - fpath, _ = dialog.getSaveFileName( - parent=self, - caption="Select a path to save the audio file", - filter="Audio Files (*.flac *.wav)" - ) - if fpath: - #Default format is wav - if Path(fpath).suffix == "": - fpath += ".wav" - sf.write(fpath, wav, sample_rate) - - def setup_audio_devices(self, sample_rate): - input_devices = [] - output_devices = [] - for device in sd.query_devices(): - # Check if valid input - try: - sd.check_input_settings(device=device["name"], samplerate=sample_rate) - input_devices.append(device["name"]) - except: - pass - - # Check if valid output - try: - sd.check_output_settings(device=device["name"], samplerate=sample_rate) - output_devices.append(device["name"]) - except Exception as e: - # Log a warning only if the device is not an input - if not device["name"] in input_devices: - warn("Unsupported output device %s for the sample rate: %d \nError: %s" % (device["name"], sample_rate, str(e))) - - if len(input_devices) == 0: - self.log("No audio input device detected. Recording may not work.") - self.audio_in_device = None - else: - self.audio_in_device = input_devices[0] - - if len(output_devices) == 0: - self.log("No supported output audio devices were found! Audio output may not work.") - self.audio_out_devices_cb.addItems(["None"]) - self.audio_out_devices_cb.setDisabled(True) - else: - self.audio_out_devices_cb.clear() - self.audio_out_devices_cb.addItems(output_devices) - self.audio_out_devices_cb.currentTextChanged.connect(self.set_audio_device) - - self.set_audio_device() - - def set_audio_device(self): - - output_device = self.audio_out_devices_cb.currentText() - if output_device == "None": - output_device = None - - # If None, sounddevice queries portaudio - sd.default.device = (self.audio_in_device, output_device) - - def play(self, wav, sample_rate): - try: - sd.stop() - sd.play(wav, sample_rate) - except Exception as e: - print(e) - self.log("Error in audio playback. Try selecting a different audio output device.") - self.log("Your device must be connected before you start the toolbox.") - - def stop(self): - sd.stop() - - def record_one(self, sample_rate, duration): - self.record_button.setText("Recording...") - self.record_button.setDisabled(True) - - self.log("Recording %d seconds of audio" % duration) - sd.stop() - try: - wav = sd.rec(duration * sample_rate, sample_rate, 1) - except Exception as e: - print(e) - self.log("Could not record anything. Is your recording device enabled?") - self.log("Your device must be connected before you start the toolbox.") - return None - - for i in np.arange(0, duration, 0.1): - self.set_loading(i, duration) - sleep(0.1) - self.set_loading(duration, duration) - sd.wait() - - self.log("Done recording.") - self.record_button.setText("Record") - self.record_button.setDisabled(False) - - return wav.squeeze() - - @property - def current_dataset_name(self): - return self.dataset_box.currentText() - - @property - def current_speaker_name(self): - return self.speaker_box.currentText() - - @property - def current_utterance_name(self): - return self.utterance_box.currentText() - - def browse_file(self): - fpath = QFileDialog().getOpenFileName( - parent=self, - caption="Select an audio file", - filter="Audio Files (*.mp3 *.flac *.wav *.m4a)" - ) - return Path(fpath[0]) if fpath[0] != "" else "" - - @staticmethod - def repopulate_box(box, items, random=False): - """ - Resets a box and adds a list of items. Pass a list of (item, data) pairs instead to join - data to the items - """ - box.blockSignals(True) - box.clear() - for item in items: - item = list(item) if isinstance(item, tuple) else [item] - box.addItem(str(item[0]), *item[1:]) - if len(items) > 0: - box.setCurrentIndex(np.random.randint(len(items)) if random else 0) - box.setDisabled(len(items) == 0) - box.blockSignals(False) - - def populate_browser(self, datasets_root: Path, recognized_datasets: List, level: int, - random=True): - # Select a random dataset - if level <= 0: - if datasets_root is not None: - datasets = [datasets_root.joinpath(d) for d in recognized_datasets] - datasets = [d.relative_to(datasets_root) for d in datasets if d.exists()] - self.browser_load_button.setDisabled(len(datasets) == 0) - if datasets_root is None or len(datasets) == 0: - msg = "Warning: you d" + ("id not pass a root directory for datasets as argument" \ - if datasets_root is None else "o not have any of the recognized datasets" \ - " in %s" % datasets_root) - self.log(msg) - msg += ".\nThe recognized datasets are:\n\t%s\nFeel free to add your own. You " \ - "can still use the toolbox by recording samples yourself." % \ - ("\n\t".join(recognized_datasets)) - print(msg, file=sys.stderr) - - self.random_utterance_button.setDisabled(True) - self.random_speaker_button.setDisabled(True) - self.random_dataset_button.setDisabled(True) - self.utterance_box.setDisabled(True) - self.speaker_box.setDisabled(True) - self.dataset_box.setDisabled(True) - self.browser_load_button.setDisabled(True) - self.auto_next_checkbox.setDisabled(True) - return - self.repopulate_box(self.dataset_box, datasets, random) - - # Select a random speaker - if level <= 1: - speakers_root = datasets_root.joinpath(self.current_dataset_name) - speaker_names = [d.stem for d in speakers_root.glob("*") if d.is_dir()] - self.repopulate_box(self.speaker_box, speaker_names, random) - - # Select a random utterance - if level <= 2: - utterances_root = datasets_root.joinpath( - self.current_dataset_name, - self.current_speaker_name - ) - utterances = [] - for extension in ['mp3', 'flac', 'wav', 'm4a']: - utterances.extend(Path(utterances_root).glob("**/*.%s" % extension)) - utterances = [fpath.relative_to(utterances_root) for fpath in utterances] - self.repopulate_box(self.utterance_box, utterances, random) - - def browser_select_next(self): - index = (self.utterance_box.currentIndex() + 1) % len(self.utterance_box) - self.utterance_box.setCurrentIndex(index) - - @property - def current_encoder_fpath(self): - return self.encoder_box.itemData(self.encoder_box.currentIndex()) - - @property - def current_synthesizer_fpath(self): - return self.synthesizer_box.itemData(self.synthesizer_box.currentIndex()) - - @property - def current_vocoder_fpath(self): - return self.vocoder_box.itemData(self.vocoder_box.currentIndex()) - - def populate_models(self, encoder_models_dir: Path, synthesizer_models_dir: Path, - vocoder_models_dir: Path): - # Encoder - encoder_fpaths = list(encoder_models_dir.glob("*.pt")) - if len(encoder_fpaths) == 0: - raise Exception("No encoder models found in %s" % encoder_models_dir) - self.repopulate_box(self.encoder_box, [(f.stem, f) for f in encoder_fpaths]) - - # Synthesizer - synthesizer_fpaths = list(synthesizer_models_dir.glob("**/*.pt")) - if len(synthesizer_fpaths) == 0: - raise Exception("No synthesizer models found in %s" % synthesizer_models_dir) - self.repopulate_box(self.synthesizer_box, [(f.stem, f) for f in synthesizer_fpaths]) - - # Vocoder - vocoder_fpaths = list(vocoder_models_dir.glob("**/*.pt")) - vocoder_items = [(f.stem, f) for f in vocoder_fpaths] + [("Griffin-Lim", None)] - self.repopulate_box(self.vocoder_box, vocoder_items) - - @property - def selected_utterance(self): - return self.utterance_history.itemData(self.utterance_history.currentIndex()) - - def register_utterance(self, utterance: Utterance): - self.utterance_history.blockSignals(True) - self.utterance_history.insertItem(0, utterance.name, utterance) - self.utterance_history.setCurrentIndex(0) - self.utterance_history.blockSignals(False) - - if len(self.utterance_history) > self.max_saved_utterances: - self.utterance_history.removeItem(self.max_saved_utterances) - - self.play_button.setDisabled(False) - self.generate_button.setDisabled(False) - self.synthesize_button.setDisabled(False) - - def log(self, line, mode="newline"): - if mode == "newline": - self.logs.append(line) - if len(self.logs) > self.max_log_lines: - del self.logs[0] - elif mode == "append": - self.logs[-1] += line - elif mode == "overwrite": - self.logs[-1] = line - log_text = '\n'.join(self.logs) - - self.log_window.setText(log_text) - self.app.processEvents() - - def set_loading(self, value, maximum=1): - self.loading_bar.setValue(value * 100) - self.loading_bar.setMaximum(maximum * 100) - self.loading_bar.setTextVisible(value != 0) - self.app.processEvents() - - def populate_gen_options(self, seed, trim_silences): - if seed is not None: - self.random_seed_checkbox.setChecked(True) - self.seed_textbox.setText(str(seed)) - self.seed_textbox.setEnabled(True) - else: - self.random_seed_checkbox.setChecked(False) - self.seed_textbox.setText(str(0)) - self.seed_textbox.setEnabled(False) - - if not trim_silences: - self.trim_silences_checkbox.setChecked(False) - self.trim_silences_checkbox.setDisabled(True) - - def update_seed_textbox(self): - if self.random_seed_checkbox.isChecked(): - self.seed_textbox.setEnabled(True) - else: - self.seed_textbox.setEnabled(False) - - def reset_interface(self): - self.draw_embed(None, None, "current") - self.draw_embed(None, None, "generated") - self.draw_spec(None, "current") - self.draw_spec(None, "generated") - self.draw_umap_projections(set()) - self.set_loading(0) - self.play_button.setDisabled(True) - self.generate_button.setDisabled(True) - self.synthesize_button.setDisabled(True) - self.vocode_button.setDisabled(True) - self.replay_wav_button.setDisabled(True) - self.export_wav_button.setDisabled(True) - [self.log("") for _ in range(self.max_log_lines)] - - def __init__(self): - ## Initialize the application - self.app = QApplication(sys.argv) - super().__init__(None) - self.setWindowTitle("SV2TTS toolbox") - - - ## Main layouts - # Root - root_layout = QGridLayout() - self.setLayout(root_layout) - - # Browser - browser_layout = QGridLayout() - root_layout.addLayout(browser_layout, 0, 0, 1, 2) - - # Generation - gen_layout = QVBoxLayout() - root_layout.addLayout(gen_layout, 0, 2, 1, 2) - - # Projections - self.projections_layout = QVBoxLayout() - root_layout.addLayout(self.projections_layout, 1, 0, 1, 1) - - # Visualizations - vis_layout = QVBoxLayout() - root_layout.addLayout(vis_layout, 1, 1, 1, 3) - - - ## Projections - # UMap - fig, self.umap_ax = plt.subplots(figsize=(3, 3), facecolor="#F0F0F0") - fig.subplots_adjust(left=0.02, bottom=0.02, right=0.98, top=0.98) - self.projections_layout.addWidget(FigureCanvas(fig)) - self.umap_hot = False - self.clear_button = QPushButton("Clear") - self.projections_layout.addWidget(self.clear_button) - - - ## Browser - # Dataset, speaker and utterance selection - i = 0 - self.dataset_box = QComboBox() - browser_layout.addWidget(QLabel("Dataset"), i, 0) - browser_layout.addWidget(self.dataset_box, i + 1, 0) - self.speaker_box = QComboBox() - browser_layout.addWidget(QLabel("Speaker"), i, 1) - browser_layout.addWidget(self.speaker_box, i + 1, 1) - self.utterance_box = QComboBox() - browser_layout.addWidget(QLabel("Utterance"), i, 2) - browser_layout.addWidget(self.utterance_box, i + 1, 2) - self.browser_load_button = QPushButton("Load") - browser_layout.addWidget(self.browser_load_button, i + 1, 3) - i += 2 - - # Random buttons - self.random_dataset_button = QPushButton("Random") - browser_layout.addWidget(self.random_dataset_button, i, 0) - self.random_speaker_button = QPushButton("Random") - browser_layout.addWidget(self.random_speaker_button, i, 1) - self.random_utterance_button = QPushButton("Random") - browser_layout.addWidget(self.random_utterance_button, i, 2) - self.auto_next_checkbox = QCheckBox("Auto select next") - self.auto_next_checkbox.setChecked(True) - browser_layout.addWidget(self.auto_next_checkbox, i, 3) - i += 1 - - # Utterance box - browser_layout.addWidget(QLabel("Use embedding from:"), i, 0) - self.utterance_history = QComboBox() - browser_layout.addWidget(self.utterance_history, i, 1, 1, 3) - i += 1 - - # Random & next utterance buttons - self.browser_browse_button = QPushButton("Browse") - browser_layout.addWidget(self.browser_browse_button, i, 0) - self.record_button = QPushButton("Record") - browser_layout.addWidget(self.record_button, i, 1) - self.play_button = QPushButton("Play") - browser_layout.addWidget(self.play_button, i, 2) - self.stop_button = QPushButton("Stop") - browser_layout.addWidget(self.stop_button, i, 3) - i += 1 - - - # Model and audio output selection - self.encoder_box = QComboBox() - browser_layout.addWidget(QLabel("Encoder"), i, 0) - browser_layout.addWidget(self.encoder_box, i + 1, 0) - self.synthesizer_box = QComboBox() - browser_layout.addWidget(QLabel("Synthesizer"), i, 1) - browser_layout.addWidget(self.synthesizer_box, i + 1, 1) - self.vocoder_box = QComboBox() - browser_layout.addWidget(QLabel("Vocoder"), i, 2) - browser_layout.addWidget(self.vocoder_box, i + 1, 2) - - self.audio_out_devices_cb=QComboBox() - browser_layout.addWidget(QLabel("Audio Output"), i, 3) - browser_layout.addWidget(self.audio_out_devices_cb, i + 1, 3) - i += 2 - - #Replay & Save Audio - browser_layout.addWidget(QLabel("Toolbox Output:"), i, 0) - self.waves_cb = QComboBox() - self.waves_cb_model = QStringListModel() - self.waves_cb.setModel(self.waves_cb_model) - self.waves_cb.setToolTip("Select one of the last generated waves in this section for replaying or exporting") - browser_layout.addWidget(self.waves_cb, i, 1) - self.replay_wav_button = QPushButton("Replay") - self.replay_wav_button.setToolTip("Replay last generated vocoder") - browser_layout.addWidget(self.replay_wav_button, i, 2) - self.export_wav_button = QPushButton("Export") - self.export_wav_button.setToolTip("Save last generated vocoder audio in filesystem as a wav file") - browser_layout.addWidget(self.export_wav_button, i, 3) - i += 1 - - - ## Embed & spectrograms - vis_layout.addStretch() - - gridspec_kw = {"width_ratios": [1, 4]} - fig, self.current_ax = plt.subplots(1, 2, figsize=(10, 2.25), facecolor="#F0F0F0", - gridspec_kw=gridspec_kw) - fig.subplots_adjust(left=0, bottom=0.1, right=1, top=0.8) - vis_layout.addWidget(FigureCanvas(fig)) - - fig, self.gen_ax = plt.subplots(1, 2, figsize=(10, 2.25), facecolor="#F0F0F0", - gridspec_kw=gridspec_kw) - fig.subplots_adjust(left=0, bottom=0.1, right=1, top=0.8) - vis_layout.addWidget(FigureCanvas(fig)) - - for ax in self.current_ax.tolist() + self.gen_ax.tolist(): - ax.set_facecolor("#F0F0F0") - for side in ["top", "right", "bottom", "left"]: - ax.spines[side].set_visible(False) - - - ## Generation - self.text_prompt = QPlainTextEdit(default_text) - gen_layout.addWidget(self.text_prompt, stretch=1) - - self.generate_button = QPushButton("Synthesize and vocode") - gen_layout.addWidget(self.generate_button) - - layout = QHBoxLayout() - self.synthesize_button = QPushButton("Synthesize only") - layout.addWidget(self.synthesize_button) - self.vocode_button = QPushButton("Vocode only") - layout.addWidget(self.vocode_button) - gen_layout.addLayout(layout) - - layout_seed = QGridLayout() - self.random_seed_checkbox = QCheckBox("Random seed:") - self.random_seed_checkbox.setToolTip("When checked, makes the synthesizer and vocoder deterministic.") - layout_seed.addWidget(self.random_seed_checkbox, 0, 0) - self.seed_textbox = QLineEdit() - self.seed_textbox.setMaximumWidth(80) - layout_seed.addWidget(self.seed_textbox, 0, 1) - self.trim_silences_checkbox = QCheckBox("Enhance vocoder output") - self.trim_silences_checkbox.setToolTip("When checked, trims excess silence in vocoder output." - " This feature requires `webrtcvad` to be installed.") - layout_seed.addWidget(self.trim_silences_checkbox, 0, 2, 1, 2) - gen_layout.addLayout(layout_seed) - - self.loading_bar = QProgressBar() - gen_layout.addWidget(self.loading_bar) - - self.log_window = QLabel() - self.log_window.setAlignment(Qt.AlignBottom | Qt.AlignLeft) - gen_layout.addWidget(self.log_window) - self.logs = [] - gen_layout.addStretch() - - - ## Set the size of the window and of the elements - max_size = QDesktopWidget().availableGeometry(self).size() * 0.8 - self.resize(max_size) - - ## Finalize the display - self.reset_interface() - self.show() - - def start(self): - self.app.exec_() diff --git a/spaces/matthoffner/wizardcoder-ggml/README.md b/spaces/matthoffner/wizardcoder-ggml/README.md deleted file mode 100644 index 4000954d0f9f88e47e3b8925888bca4ccce0f507..0000000000000000000000000000000000000000 --- a/spaces/matthoffner/wizardcoder-ggml/README.md +++ /dev/null @@ -1,20 +0,0 @@ ---- -title: wizardcoder -emoji: 🪄⚡️ -sdk: docker -app_port: 8000 ---- - -# wizardCoder-ggml - -## FastAPI Docs - -## ggml -## ctransformers - -### Updates - -* Refactored /v1/chat/completions to match OpenAI spec -* Added /v1/chat/completions -* [Start using ctransformers](https://github.com/marella/ctransformers) -* [Added starcoder example](https://github.com/ggerganov/ggml/tree/master/examples/starcoder) \ No newline at end of file diff --git a/spaces/meraih/English-Japanese-Anime-TTS/text/symbols.py b/spaces/meraih/English-Japanese-Anime-TTS/text/symbols.py deleted file mode 100644 index 053a7105f7ce95aa51614f6995399fa2172b3eb2..0000000000000000000000000000000000000000 --- a/spaces/meraih/English-Japanese-Anime-TTS/text/symbols.py +++ /dev/null @@ -1,76 +0,0 @@ -''' -Defines the set of symbols used in text input to the model. -''' - -# japanese_cleaners -_pad = '_' -_punctuation = ',.!?-' -_letters = 'AEINOQUabdefghijkmnoprstuvwyzʃʧ↓↑ ' - - -'''# japanese_cleaners2 -_pad = '_' -_punctuation = ',.!?-~…' -_letters = 'AEINOQUabdefghijkmnoprstuvwyzʃʧʦ↓↑ ' -''' - - -'''# korean_cleaners -_pad = '_' -_punctuation = ',.!?…~' -_letters = 'ㄱㄴㄷㄹㅁㅂㅅㅇㅈㅊㅋㅌㅍㅎㄲㄸㅃㅆㅉㅏㅓㅗㅜㅡㅣㅐㅔ ' -''' - -'''# chinese_cleaners -_pad = '_' -_punctuation = ',。!?—…' -_letters = 'ㄅㄆㄇㄈㄉㄊㄋㄌㄍㄎㄏㄐㄑㄒㄓㄔㄕㄖㄗㄘㄙㄚㄛㄜㄝㄞㄟㄠㄡㄢㄣㄤㄥㄦㄧㄨㄩˉˊˇˋ˙ ' -''' - -'''# zh_ja_mixture_cleaners -_pad = '_' -_punctuation = ',.!?-~…' -_letters = 'AEINOQUabdefghijklmnoprstuvwyzʃʧʦɯɹəɥ⁼ʰ`→↓↑ ' -''' - -'''# sanskrit_cleaners -_pad = '_' -_punctuation = '।' -_letters = 'ँंःअआइईउऊऋएऐओऔकखगघङचछजझञटठडढणतथदधनपफबभमयरलळवशषसहऽािीुूृॄेैोौ्ॠॢ ' -''' - -'''# cjks_cleaners -_pad = '_' -_punctuation = ',.!?-~…' -_letters = 'NQabdefghijklmnopstuvwxyzʃʧʥʦɯɹəɥçɸɾβŋɦː⁼ʰ`^#*=→↓↑ ' -''' - -'''# thai_cleaners -_pad = '_' -_punctuation = '.!? ' -_letters = 'กขฃคฆงจฉชซฌญฎฏฐฑฒณดตถทธนบปผฝพฟภมยรฤลวศษสหฬอฮฯะัาำิีึืุูเแโใไๅๆ็่้๊๋์' -''' - -'''# cjke_cleaners2 -_pad = '_' -_punctuation = ',.!?-~…' -_letters = 'NQabdefghijklmnopstuvwxyzɑæʃʑçɯɪɔɛɹðəɫɥɸʊɾʒθβŋɦ⁼ʰ`^#*=ˈˌ→↓↑ ' -''' - -'''# shanghainese_cleaners -_pad = '_' -_punctuation = ',.!?…' -_letters = 'abdfghiklmnopstuvyzøŋȵɑɔɕəɤɦɪɿʑʔʰ̩̃ᴀᴇ15678 ' -''' - -'''# chinese_dialect_cleaners -_pad = '_' -_punctuation = ',.!?~…─' -_letters = '#Nabdefghijklmnoprstuvwxyzæçøŋœȵɐɑɒɓɔɕɗɘəɚɛɜɣɤɦɪɭɯɵɷɸɻɾɿʂʅʊʋʌʏʑʔʦʮʰʷˀː˥˦˧˨˩̥̩̃̚ᴀᴇ↑↓∅ⱼ ' -''' - -# Export all symbols: -symbols = [_pad] + list(_punctuation) + list(_letters) - -# Special symbol ids -SPACE_ID = symbols.index(" ") diff --git a/spaces/merve/data-leak/server-side/fill-in-the-blank/py/model_bert_large_export.py b/spaces/merve/data-leak/server-side/fill-in-the-blank/py/model_bert_large_export.py deleted file mode 100644 index 4619908ff52a7f4a76c6ed7a66907cbdd3a7c819..0000000000000000000000000000000000000000 --- a/spaces/merve/data-leak/server-side/fill-in-the-blank/py/model_bert_large_export.py +++ /dev/null @@ -1,19 +0,0 @@ -from transformers import (BertForMaskedLM, BertTokenizer) - -modelpath = 'bert-large-uncased-whole-word-masking' -model = BertForMaskedLM.from_pretrained(modelpath) - -model.save_pretrained('./bert-large-uncased-whole-word-masking') - - - - -# from transformers import (BertForMaskedLM, BertTokenizer) - -# modelpath = 'bert-large-uncased' -# model = BertForMaskedLM.from_pretrained(modelpath) - -# model.save_pretrained('./bert-large-uncased') - - - diff --git a/spaces/merve/fill-in-the-blank/public/measuring-diversity/columns-height.js b/spaces/merve/fill-in-the-blank/public/measuring-diversity/columns-height.js deleted file mode 100644 index 3933c17b4bb8abe209b3573bb436c53c47543b1b..0000000000000000000000000000000000000000 --- a/spaces/merve/fill-in-the-blank/public/measuring-diversity/columns-height.js +++ /dev/null @@ -1,177 +0,0 @@ -window.initColumns = function(id, metrics, measures){ - var c = d3.conventions({ - sel: d3.select(id).html('').st({width: 775, margin: '0px auto', left: 27}), - margin: {left: 260, top: 40}, - height: 600, - }) - - var sets = d3.range(numRows).map(i => { - var shapes = columnShapes[i] - shapes = _.sortBy(shapes, d => d.shape) - shapes = _.sortBy(shapes, d => d.size) - shapes = _.sortBy(shapes, d => d.color) - shapes = _.sortBy(shapes, d => d.color == 'green' ? 0 : 1) - - - shapes.nG = d3.sum(shapes, d => d.color == 'green') - shapes.nB = d3.sum(shapes, d => d.color == 'blue') - shapes.nO = d3.sum(shapes, d => d.color == 'orange') - shapes.nR = d3.sum(shapes, d => d.color == 'red') - - shapes.forEach((d, i) => { - d.i = i - d.sizeVal = d.sizeVal < 1 ? .6 : 1 - }) - shapes.i = i - return shapes - }) - - var colW = 200 - var colWpad = 50 - var colH = 20 - var colHpad = 10 - var offsetW = -20 - - var colSel = c.svg.appendMany('g', measures) - .translate((d, i) => [.5 + i*(colW + colWpad) + offsetW, .5]) - - colSel.append('text').text(d => d.ranking_display_text) - .at({y: -20, textAnchor: 'middle', x: colW/2, fontWeight: 600, }) - - var rowSel = colSel.appendMany('g.row', sets) - .translate(d => d.i*(colH + colHpad), 1) - - var colMean = colSel.filter((d, i) => i === 0) - var colMin = colSel.filter((d, i) => i === 1) - var scoreLabelsMean = colMean.selectAll('.row').append('text') - .at({x: -5, y: 15, textAnchor: 'end'}) - .st({fontSize: '13px', opacity: .7}) - var scoreLabelsMin = colMin.selectAll('.row').append('text') - .at({x: 222, y: 15, textAnchor: 'end'}) - .st({fontSize: '13px', opacity: .7}) - - colSel.each(function(d, i){ - d.rowSel = d3.select(this).selectAll('.row') - - c.svg.append('marker') - .attr('id', 'arrow') - .attr('viewBox', '-10 -10 20 20') - .attr('markerWidth', 20) - .attr('markerHeight', 20) - .attr('orient', 'auto') - .append('path') - .attr('d', 'M-6.75,-6.75 L 0,0 L -6.75,6.75') - .at({fill: '#000'}) - - - if (i){ - var pathstr = ['M', 160, -25, 'C', 215, -25, 215, -25, 215, -5].join(' ') - } else{ - var pathstr = ['M', 35, -25, 'C', -20, -25, -20, -25, -20, -5].join(' ') - } - d3.select(this).append('path') - .at({stroke: '#000', fill: 'none', d: pathstr, markerEnd: 'url(#arrow)', strokeWidth: .6}) - }) - - - var s = colH - var p = 2 - - var l0Sel = c.svg.appendMany('path.set', sets).classed('set1', true) - .translate(d => [colW + offsetW, s/2 + .5]) - - drawRow(rowSel) - function drawRow(rowSel){ - rowSel.append('rect.set.no-stroke') - .at({x: -p, y: -p, width: colW + p*2, height: colH + p*2, fill: '#fff'}).classed('set1', true) - - rowSel.appendMany('g', d => d) - .translate(d => [d.i*s + s/2, s/2]) - .each(function(d){ - - var sOffset = 12 - var classNames = [d.shape, d.size, d.color, 'rank-item'].join(' ') - var shapeSel = d3.select(this).append('rect') - .at({ - x: -s/2, - y: -s/2 + (d.size == 'small' ? sOffset/2 : 0) - .5, - width: s - .5, - height: s - (d.size == 'small' ? sOffset : 0), - fill: d.fill, - class: classNames - }) - - if (d.shape == 'triangle'){ - var shapeSel = d3.select(this).append('circle') - .at({r: 2, fill: '#fff', stroke: '#000', strokeWidth: .5, class: classNames}) - } - }) - - } - - var setSel = c.svg.selectAll('.set1') - .on('mouseover', selectSet) - - sets.selected = sets[0] - function selectSet(set){ - sets.selected = set - sets.forEach(d => d.selected = d == set) - setSel - .classed('selected', d => d.selected) - .filter(d => d.selected) - .lower() - - rowSel.classed('selected', d => d.selected) - - sliders.render() - } - - - var sliders = makeSliders(metrics, sets, c, selectSet, drawRow, () => { - sets.forEach(shapes => { - shapes.score = metrics.map(m => { - var v = d3.sum(shapes, (d, i) => shapes[i][m.field] == m.key) - return Math.abs(m.target - v/shapes.length) - }) - }) - - measures.forEach(m => { - sets.forEach(shapes => { - shapes[m.str] = m.fn(shapes.score) - }) - _.sortBy(sets, d => d[m.str] + d.i/10000000)//.reverse() - .forEach((d, i) => d['i' + m.str] = i) - - m.rowSel.translate(d => d['i' + m.str]*(colH + colHpad), 1) - }) - - var p = 0 - l0Sel.at({d: d => [ - 'M', p, d['iUtilitarian']*(colH + colHpad), - 'L', colWpad - p, d['iEgalitarian']*(colH + colHpad), - ].join(' ')}) - - - scoreLabelsMean.text(d => { - return d3.format('.2f')(d['Utilitarian'])// + '%' - }) - scoreLabelsMin.text(d => { - return measures[1].ppFn(d['score']).replace('%', '')// + '%' - }) - }) - - sliders.render() - selectSet(_.sortBy(sets, d => d.iEgalitarian)[0]) -} -window.initColumns('#columns-height', metrics1, measures) -window.initColumns('#columns-height-disagree', metrics2, measures2) - -// Only highlight green items in the second ranking chart. -d3.select('#columns-height-disagree').selectAll('.rank-item').at({opacity: .3}) -d3.select('#columns-height-disagree').selectAll('.green').at({opacity: 1}) - -// Only highlight the green slider in the second ranking chart. -d3.select('#columns-height-disagree').selectAll('.slider').at({opacity: d => { - return d.key !== 'green' ? 0.35: 1 -}}) - diff --git a/spaces/merve/uncertainty-calibration/public/private-and-fair/accuracy-v-privacy-class.js b/spaces/merve/uncertainty-calibration/public/private-and-fair/accuracy-v-privacy-class.js deleted file mode 100644 index 39daddb629006c967bfa8c3a6c1d43fc9887bc1b..0000000000000000000000000000000000000000 --- a/spaces/merve/uncertainty-calibration/public/private-and-fair/accuracy-v-privacy-class.js +++ /dev/null @@ -1,285 +0,0 @@ -var state = { - dataset_size: 15000, - threshold: .8, - label: 8 -} - -var sel = d3.select('.accuracy-v-privacy-class').html('') - .at({role: 'graphics-document', 'aria-label': `Line chart showing that high accuracy models can still perform poorly on some digit classes.`}) - -async function loadData(){ - var rawData = await util.getFile(`cns-cache/grid_${state.dataset_size}trainpoints_test_labels.csv`) - - rawData.forEach(d => { - delete d[''] - d.i = +d.i - d.label = +d.label - }) - - var aVal2Meta = {} - var metadata = await util.getFile('cns-cache/model_grid_test_accuracy.json') - metadata - .filter(d => d.dataset_size == state.dataset_size) - .forEach(d => aVal2Meta['aVal_' + d.aVal] = d) - - var allCols = d3.keys(rawData[0]) - .filter(d => d.includes('aVal')) - .map(key => { - var {epsilon, aVal} = aVal2Meta[key] - return {key, epsilon, aVal} - }) - - var byDigit = d3.nestBy(rawData, d => d.label) - byDigit.forEach(d => { - d.label = +d.key - }) - byDigit.forEach(digitClass => { - digitClass.cols = allCols.map(({key, epsilon}, colIndex) => { - return { - key, - colIndex, - epsilon, - digitClass, - label: digitClass.label, - accuracy: d3.mean(digitClass, d => d[key] > state.threshold) - } - }) - }) - - var data = _.flatten(byDigit.map(d => d.cols)) - .filter(d => util.epsilonExtent[1] <= d.epsilon && d.epsilon <= util.epsilonExtent[0]) - var byLabel = d3.nestBy(data, d => d.label) - byLabel.forEach((d, i) => { - d.label = d.key - }) - - return {data, byLabel} -} - - -async function initChart(){ - var {data, byLabel} = await loadData() - - var c = d3.conventions({ - sel: sel.append('div'), - height: 400, - margin: {bottom: 75, top: 5}, - layers: 'ds', - }) - - c.x = d3.scaleLog().domain(util.epsilonExtent).range(c.x.range()) - c.xAxis = d3.axisBottom(c.x).tickFormat(d => { - var rv = d + '' - if (rv.split('').filter(d => d !=0 && d != '.')[0] == 1) return rv - }) - - c.yAxis.tickFormat(d => d3.format('.0%')(d))//.ticks(8) - d3.drawAxis(c) - util.addAxisLabel(c, 'Higher Privacy →', '') - util.ggPlotBg(c, false) - c.layers[0].append('div') - .st({fontSize: 12, color: '#555', width: 120*2, textAlign: 'center', lineHeight: '1.3em', verticalAlign: 'top'}) - .translate([c.width/2 - 120, c.height + 45]) - .html('in ε') - - var line = d3.line().x(d => c.x(d.epsilon)).y(d => c.y(d.accuracy)) - - var lineSel = c.svg.append('g').appendMany('path.accuracy-line', byLabel) - .at({ - d: line, - fill: 'none', - stroke: '#000', - // opacity: 0, - }) - .on('mousemove', setActiveLabel) - - var circleSel = c.svg.append('g') - .appendMany('g.accuracy-circle', data) - .translate(d => [c.x(d.epsilon), c.y(d.accuracy)]) - .on('mousemove', setActiveLabel) - // .call(d3.attachTooltip) - - circleSel.append('circle') - .at({r: 7, stroke: '#fff'}) - - circleSel.append('text') - .text(d => d.label) - .at({textAnchor: 'middle', fontSize: 10, fill: '#fff', dy: '.33em'}) - - setActiveLabel(state) - function setActiveLabel({label}){ - lineSel - .classed('active', 0) - .filter(d => d.label == label) - .classed('active', 1) - .raise() - - circleSel - .classed('active', 0) - .filter(d => d.label == label) - .classed('active', 1) - .raise() - - state.label = label - } - - - async function updateDatasetSize(){ - var newData = await loadData() - data = newData.data - byLabel = newData.byLabel - - lineSel.data(byLabel) - .transition() - .at({d: line}) - - circleSel.data(data) - .transition() - .translate(d => [c.x(d.epsilon), c.y(d.accuracy)]) - - c.svg.select('text.annotation').remove() - } - - function updateThreshold(){ - data.forEach(d => { - d.accuracy = d3.mean(d.digitClass, e => e[d.key] > state.threshold) - }) - - lineSel.at({d: line}) - circleSel.translate(d => [c.x(d.epsilon), c.y(d.accuracy)]) - - c.svg.select('.y .axis-label').text(`Test Points With More Than ${d3.format('.2%')(state.threshold)} Confidence In Label`) - - c.svg.select('text.annotation').remove() - } - updateThreshold() - - return {c, updateDatasetSize, updateThreshold} -} - - -async function init(){ - sel.append('div.chart-title').text('High accuracy models can still perform poorly on some digit classes') - - var chart = await initChart() - - var buttonRowSel = sel.append('div.button-row') - .st({height: 50}) - - var buttonSel = buttonRowSel.append('div') - .st({width: 500}) - .append('span.chart-title').text('Training points') - .parent() - .append('div').st({display: 'inline-block', width: 300, marginLeft: 10}) - .append('div.digit-button-container.dataset_size') - .appendMany('div.button', [2000, 3750, 7500, 15000, 30000, 60000]) - .text(d3.format(',')) - .classed('active', d => d == state.dataset_size) - .on('click', d => { - buttonSel.classed('active', e => e == d) - state.dataset_size = d - chart.updateDatasetSize() - }) - - buttonRowSel.append('div.conf-slider') - .append('span.chart-title').text('Confidence threshold') - .parent() - .append('input.slider-native') - .at({ - type: 'range', - min: .0001, - max: .9999, - step: .0001, - value: state.threshold, - }) - .on('input', function(){ - state.threshold = this.value - chart.updateThreshold() - }) - - - function addSliders(){ - var width = 140 - var height = 30 - var color = '#000' - - var sliders = [ - {key: 'threshold', label: 'Confidence threshold', r: [.0001, .9999]}, - ] - sliders.forEach(d => { - d.value = state[d.key] - d.xScale = d3.scaleLinear().range([0, width]).domain(d.r).clamp(1) - }) - - d3.select('.conf-slider .slider-container').remove() - d3.select('.slider-native').remove() - - var svgSel = d3.select('.conf-slider').parent() - // .st({marginTop: 5, marginBottom: 5}) - .appendMany('div.slider-container', sliders) - .append('svg').at({width, height}) - .append('g').translate([10, 25]) - - var sliderSel = svgSel - .on('click', function(d){ - d.value = d.xScale.invert(d3.mouse(this)[0]) - renderSliders(d) - }) - .classed('slider', true) - .st({cursor: 'pointer'}) - - var textSel = sliderSel.append('text.annotation') - .at({y: -15, fontWeight: 300, textAnchor: 'middle', x: 180/2}) - - sliderSel.append('rect') - .at({width, height, y: -height/2, fill: 'rgba(0,0,0,0)'}) - - sliderSel.append('path').at({ - d: `M 0 -.5 H ${width}`, - stroke: color, - strokeWidth: 1 - }) - - var leftPathSel = sliderSel.append('path').at({ - d: `M 0 -.5 H ${width}`, - stroke: color, - strokeWidth: 3 - }) - - var drag = d3.drag() - .on('drag', function(d){ - var x = d3.mouse(this)[0] - d.value = d.xScale.invert(x) - - renderSliders(d) - }) - - var circleSel = sliderSel.append('circle').call(drag) - .at({r: 7, stroke: '#000'}) - - function renderSliders(d){ - if (d) state[d.key] = d.value - - circleSel.at({cx: d => d.xScale(d.value)}) - leftPathSel.at({d: d => `M 0 -.5 H ${d.xScale(d.value)}`}) - textSel - .at({x: d => d.xScale(d.value)}) - .text(d => d3.format('.2%')(d.value)) - chart.updateThreshold() - } - renderSliders() - } - addSliders() - - - chart.c.svg.append('text.annotation') - .translate([505, 212]) - .tspans(d3.wordwrap(`8s are correctly predicted with high confidence much more rarely than other digits`, 25), 12) - .at({textAnchor: 'end'}) - -} -init() - - - - diff --git a/spaces/mikkoar/marco/tests/kblob.ts b/spaces/mikkoar/marco/tests/kblob.ts deleted file mode 100644 index 9e15b41c1c94a690beb61b23cdb42fc78767ccd2..0000000000000000000000000000000000000000 --- a/spaces/mikkoar/marco/tests/kblob.ts +++ /dev/null @@ -1,27 +0,0 @@ -import FormData from 'form-data' - -import { fetch } from '@/lib/isomorphic' - -const formData = new FormData() - -const knowledgeRequest = {"imageInfo":{"url":"https://www.baidu.com/img/PCfb_5bf082d29588c07f842ccde3f97243ea.png"},"knowledgeRequest":{"invokedSkills":["ImageById"],"subscriptionId":"Bing.Chat.Multimodal","invokedSkillsRequestData":{"enableFaceBlur":true},"convoData":{"convoid":"51D|BingProdUnAuthenticatedUsers|E3DCA904FF236C67C3450163BCEC64CFF3F618CC8A4AFD75FD518F5ED0ADA080","convotone":"Creative"}}} - -formData.append('knowledgeRequest', JSON.stringify(knowledgeRequest)) - - -fetch('https://bing.vcanbb.top/images/kblob', - { - method: 'POST', - body: formData.getBuffer(), - headers: { - "sec-ch-ua": "\"Not/A)Brand\";v=\"99\", \"Google Chrome\";v=\"115\", \"Chromium\";v=\"115\"", - "sec-ch-ua-mobile": "?0", - "sec-ch-ua-platform": "\"Windows\"", - "Referer": "https://bing.vcanbb.top/web/index.html", - "Referrer-Policy": "origin-when-cross-origin", - ...formData.getHeaders() - } - - } -).then(res => res.text()) -.then(res => console.log('res', res)) diff --git a/spaces/mila-quebec/SAI/src/cfg.py b/spaces/mila-quebec/SAI/src/cfg.py deleted file mode 100644 index 5228f9e6ea881b68330617999f6a898075db34c5..0000000000000000000000000000000000000000 --- a/spaces/mila-quebec/SAI/src/cfg.py +++ /dev/null @@ -1,205 +0,0 @@ -import logging -import os -from pathlib import Path - -import openai - -from buster.busterbot import Buster, BusterConfig -from buster.completers import ChatGPTCompleter, DocumentAnswerer -from buster.formatters.documents import DocumentsFormatterJSON -from buster.formatters.prompts import PromptFormatter -from buster.retriever import Retriever, ServiceRetriever -from buster.tokenizers import GPTTokenizer -from buster.validators import QuestionAnswerValidator, Validator -from src.app_utils import get_logging_db_name, init_db - -logger = logging.getLogger(__name__) -logging.basicConfig(level=logging.INFO) - -# Note: The app will not launch if the environment variables aren't set. This is intentional. -# Set OpenAI Configurations -openai.api_key = os.environ["OPENAI_API_KEY"] -openai.organization = os.environ["OPENAI_ORGANIZATION"] - -# Pinecone Configurations -PINECONE_API_KEY = os.environ["PINECONE_API_KEY"] -PINECONE_ENV = "asia-southeast1-gcp" -PINECONE_INDEX = "oecd" -PINECONE_NAMESPACE = "data-2023-11-02" - -# MongoDB Configurations -MONGO_URI = os.environ["MONGO_URI"] - -# Instance Configurations -INSTANCE_NAME = os.environ["INSTANCE_NAME"] # e.g., huggingface, heroku -INSTANCE_TYPE = os.environ["INSTANCE_TYPE"] # e.g. ["dev", "prod", "local"] - -# MongoDB Databases -MONGO_DATABASE_LOGGING = get_logging_db_name(INSTANCE_TYPE) # Where all interactions will be stored -MONGO_DATABASE_DATA = "data-2023-11-02" # Where documents are stored - -# Check that data chunks are aligned on Mongo and Pinecone -if MONGO_DATABASE_DATA != PINECONE_NAMESPACE: - logger.warning( - f"""The collection is different on pinecone and Mongo, is this expected? - - {MONGO_DATABASE_DATA=} - {PINECONE_NAMESPACE=} - """ - ) - -# MongoDB Collections -# Naming convention: Collection name followed by purpose. -MONGO_COLLECTION_FEEDBACK = "feedback" # Feedback form -MONGO_COLLECTION_INTERACTION = "interaction" # User interaction -MONGO_COLLECTION_FLAGGED = "flagged" # Flagged interactions - -# Make the connections to the databases -mongo_db = init_db(mongo_uri=MONGO_URI, db_name=MONGO_DATABASE_LOGGING) - - -# Set relative path to data dir -current_dir = Path(__file__).resolve().parent -data_dir = current_dir.parent / "data" # ../data - -app_name = "SAI ️💬" - -# sample questions -example_questions = [ - "Are there any AI policies related to AI adoption in the public sector in the UK?", - "How is Canada evaluating the success of its AI strategy?", - "Has the EU proposed specific legislation on AI?", -] - - -disclaimer = f""" -**Use the feedback form on the right to help us improve** 👉 - -**Always verify the integrity of {app_name} responses using the sources provided below** 👇 -""" - -buster_cfg = BusterConfig( - validator_cfg={ - "unknown_response_templates": [ - "I cannot answer this question based on the information I have available", - "The information I have access to does not address the question", - ], - "unknown_threshold": 0.84, - "embedding_model": "text-embedding-ada-002", - "use_reranking": True, - "invalid_question_response": """Thank you for your question! Unfortunately, I haven't been able to find the information you're looking for. Your question might be: - * Outside the scope of AI policy documents - * Too recent (i.e. draft policies) or about the future - * Building on my previous answer (I have no memory of previous conversations) - * Vague (i.e not affiliated with a specific country) - * Asking the model to perform its own assessment of the policies (i.e. What is the best/worst AI policy) - You can always try rewording your question and ask again! - """, - "check_question_prompt": """You are a chatbot answering questions on behalf of the OECD specifically on AI policies. -Your first job is to determine whether or not a question is valid, and should be answered. -For a question to be considered valid, it must be related to AI and policies. -More general questions are not considered valid, even if you might know the response. -A user will submit a question. Respond 'true' if it is valid, respond 'false' if it is invalid. -Do not judge the tone of the question. As long as it is relevant to the topic, respond 'true'. - -For example: -Q: What policies did countries like Canada put in place with respect to artificial intelligence? -true - -Q: What policies are put in place to ensure the wellbeing of agriculture? -false - -Q: -""", - "completion_kwargs": { - "model": "gpt-3.5-turbo-0613", - "stream": False, - "temperature": 0, - }, - }, - retriever_cfg={ - "pinecone_api_key": PINECONE_API_KEY, - "pinecone_env": PINECONE_ENV, - "pinecone_index": PINECONE_INDEX, - "pinecone_namespace": PINECONE_NAMESPACE, - "mongo_uri": MONGO_URI, - "mongo_db_name": MONGO_DATABASE_DATA, - "top_k": 3, - "thresh": 0.7, - "max_tokens": 3000, - "embedding_model": "text-embedding-ada-002", - }, - documents_answerer_cfg={ - "no_documents_message": "No documents are available for this question.", - }, - completion_cfg={ - "completion_kwargs": { - "model": "gpt-3.5-turbo-0613", - "stream": True, - "temperature": 0, - }, - }, - tokenizer_cfg={ - "model_name": "gpt-3.5-turbo-0613", - }, - documents_formatter_cfg={ - "max_tokens": 3500, - "columns": ["content", "source", "title"], - }, - prompt_formatter_cfg={ - "max_tokens": 4000, - "text_before_docs": ( - "You are a chatbot assistant answering questions about artificial intelligence (AI) policies and laws. " - "You represent the OECD AI Policy Observatory. " - "You can only respond to a question if the content necessary to answer the question is contained in the information provided to you. " - "The information will be provided in a json format. " - "If the answer is found in the information provided, summarize it in a helpful way to the user. " - "If it isn't, simply reply that you cannot answer the question. " - "Do not refer to the documents directly, but use the information provided within it to answer questions. " - "Always cite which document you pulled information from. " - "Do not say 'according to the documentation' or related phrases. " - "Do not mention the documents directly, but use the information available within them to answer the question. " - "You are forbidden from using the expressions 'according to the documentation' and 'the provided documents'. " - "Here is the information available to you in a json table:\n" - ), - "text_after_docs": ( - "REMEMBER:\n" - "You are a chatbot assistant answering questions about artificial intelligence (AI) policies and laws. " - "You represent the OECD AI Policy Observatory. " - "Here are the rules you must follow:\n" - "1) You must only respond with information contained in the documents above. Say you do not know if the information is not provided.\n" - "2) Make sure to format your answers in Markdown format, including code block and snippets.\n" - "3) Do not reference any links, urls or hyperlinks in your answers.\n" - "4) Do not mention the documentation directly, but use the information provided within it to answer questions.\n" - "5) You are forbidden from using the expressions 'according to the documentation' and 'the provided documents'.\n" - "6) If you do not know the answer to a question, or if it is completely irrelevant to the library usage, simply reply with:\n" - "'I'm sorry, but I am an AI language model trained to assist with questions related to AI policies and laws. I cannot answer that question as it is not relevant to AI policies and laws. Is there anything else I can assist you with?'\n" - "For example:\n" - "Q: What is the meaning of life for a qa bot?\n" - "A: I'm sorry, but I am an AI language model trained to assist with questions related to AI policies and laws. I cannot answer that question as it is not relevant to AI policies and laws. Is there anything else I can assist you with?\n" - "7) Always cite which document you pulled information from. Do this directly in the text. You can refer directly to the title in-line with your answer. Make it clear when information came directly from a source. " - "8) If the information available to you does not directly address the question, simply state that you do not have the information required to answer. Do not summarize what is available to you. " - "For example, say: 'I cannot answer this question based on the information I have available.'\n" - "9) Keep a neutral tone, and put things into context.\n" - "For example:\n" - "Q: What do African countries say about data privacy?\n" - "A: There are currently 28 countries in Africa that have personal data protection legislation. However, limited resources, a lack of clear leadership in the region and localized approaches with regard to data-driven technology run the risk of creating an unfavorable environment for data privacy regulation from a business and data rights standpoint. For example, without policies for data sharing across countries, multinational data companies may choose to move their foreign direct investment to more favorable destinations. African countries also grapple with issues of higher priority, which stunts progress in the field of data privacy. According to one regional policy expert, “a government that is still battling [to set up a] school feeding programme in 2019 is not going to be the one to prioritise data and data protection policies with respect to AI.”\n" - "Now answer the following question:\n" - ), - }, -) - - -def setup_buster(buster_cfg): - retriever: Retriever = ServiceRetriever(**buster_cfg.retriever_cfg) - tokenizer = GPTTokenizer(**buster_cfg.tokenizer_cfg) - document_answerer: DocumentAnswerer = DocumentAnswerer( - completer=ChatGPTCompleter(**buster_cfg.completion_cfg), - documents_formatter=DocumentsFormatterJSON(tokenizer=tokenizer, **buster_cfg.documents_formatter_cfg), - prompt_formatter=PromptFormatter(tokenizer=tokenizer, **buster_cfg.prompt_formatter_cfg), - **buster_cfg.documents_answerer_cfg, - ) - validator: Validator = QuestionAnswerValidator(**buster_cfg.validator_cfg) - - buster: Buster = Buster(retriever=retriever, document_answerer=document_answerer, validator=validator) - return buster diff --git a/spaces/mindwrapped/gpt2-lotr-fellowship/README.md b/spaces/mindwrapped/gpt2-lotr-fellowship/README.md deleted file mode 100644 index 5518f61e8b0a9f4cc61a1ce8fe2ef9a3770899f4..0000000000000000000000000000000000000000 --- a/spaces/mindwrapped/gpt2-lotr-fellowship/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Gpt2 Lotr Fellowship -emoji: 📚 -colorFrom: purple -colorTo: pink -sdk: gradio -sdk_version: 3.0.18 -app_file: app.py -pinned: false -license: mit ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/mithril-security/blind_chat/src/routes/conversation/[id]/summarize/+server.ts b/spaces/mithril-security/blind_chat/src/routes/conversation/[id]/summarize/+server.ts deleted file mode 100644 index 18c599c09473ebabb6bbeb3adda0205b5bc9bd31..0000000000000000000000000000000000000000 --- a/spaces/mithril-security/blind_chat/src/routes/conversation/[id]/summarize/+server.ts +++ /dev/null @@ -1,56 +0,0 @@ -import { buildPrompt } from "$lib/buildPrompt"; -import { authCondition } from "$lib/server/auth"; -import { collections } from "$lib/server/database"; -import { generateFromDefaultEndpoint } from "$lib/server/generateFromDefaultEndpoint"; -import { defaultModel } from "$lib/server/models"; -import { error } from "@sveltejs/kit"; - -export async function POST({ params, locals }) { - /*const convId = new ObjectId(params.id); - - const conversation = await collections.conversations.findOne({ - _id: convId, - ...authCondition(locals), - }); - - if (!conversation) { - throw error(404, "Conversation not found"); - } - - const firstMessage = conversation.messages.find((m) => m.from === "user"); - - const userPrompt = - `Please summarize the following message as a single sentence of less than 5 words:\n` + - firstMessage?.content; - - const prompt = await buildPrompt({ - messages: [{ from: "user", content: userPrompt }], - model: defaultModel, - }); - const generated_text = await generateFromDefaultEndpoint(prompt); - - if (generated_text) { - await collections.conversations.updateOne( - { - _id: convId, - ...authCondition(locals), - }, - { - $set: { title: generated_text }, - } - ); - } - - return new Response( - JSON.stringify( - generated_text - ? { - title: generated_text, - } - : {} - ), - { headers: { "Content-Type": "application/json" } } - );*/ - - return new Response(JSON.stringify({}), { headers: { "Content-Type": "application/json" } }); -} diff --git a/spaces/momegas/megas-bot/app.py b/spaces/momegas/megas-bot/app.py deleted file mode 100644 index 6085cfd5edeb774220075a51f63ed2cb8c23202b..0000000000000000000000000000000000000000 --- a/spaces/momegas/megas-bot/app.py +++ /dev/null @@ -1,30 +0,0 @@ -from megabots import bot, memory, create_interface - -prompt = """ -You are an AI assistant that is called Μegabot and answers everything in Greek. -You never leave, half finished sentences. -You give lengthy and concise answers depending on the matter. -You try to give complete answers depending on your knowledge or the context below. -You also like to make jokes in your answers - -Context: -{context} - -Conversation History: -{history} -Human: {question} -Μegabot: -""" - -qnabot = bot( - "qna-over-docs", - prompt=prompt, - memory=memory("conversation-buffer-window", k=5), - verbose=True, -) - -qnabot.save_index("index.pkl") - - -iface = create_interface(qnabot) -iface.launch() diff --git a/spaces/mrneuralnet/P-PD/global_classifier.py b/spaces/mrneuralnet/P-PD/global_classifier.py deleted file mode 100644 index b634772d9d51d076a2a43fa5a4da54c104fdcea0..0000000000000000000000000000000000000000 --- a/spaces/mrneuralnet/P-PD/global_classifier.py +++ /dev/null @@ -1,67 +0,0 @@ -import argparse -import os -import sys -import torch -from PIL import Image -import torchvision.transforms as transforms - -from networks.drn_seg import DRNSub -from utils.tools import * -from utils.visualize import * - - -def load_classifier(model_path, gpu_id): - if torch.cuda.is_available() and gpu_id != -1: - device = 'cuda:{}'.format(gpu_id) - else: - device = 'cpu' - model = DRNSub(1) - state_dict = torch.load(model_path, map_location='cpu') - model.load_state_dict(state_dict['model']) - model.to(device) - model.device = device - model.eval() - return model - - -tf = transforms.Compose([transforms.ToTensor(), - transforms.Normalize(mean=[0.485, 0.456, 0.406], - std=[0.229, 0.224, 0.225])]) -def classify_fake(model, img_path, no_crop=False, - model_file='utils/dlib_face_detector/mmod_human_face_detector.dat'): - # Data preprocessing - im_w, im_h = Image.open(img_path).size - if no_crop: - face = Image.open(img_path).convert('RGB') - else: - faces = face_detection(img_path, verbose=False, model_file=model_file) - if len(faces) == 0: - print("no face detected by dlib, exiting") - sys.exit() - face, box = faces[0] - face = resize_shorter_side(face, 400)[0] - face_tens = tf(face).to(model.device) - - # Prediction - with torch.no_grad(): - prob = model(face_tens.unsqueeze(0))[0].sigmoid().cpu().item() - return prob - - -if __name__ == '__main__': - parser = argparse.ArgumentParser() - parser.add_argument( - "--input_path", required=True, help="the model input") - parser.add_argument( - "--model_path", required=True, help="path to the drn model") - parser.add_argument( - "--gpu_id", default='0', help="the id of the gpu to run model on") - parser.add_argument( - "--no_crop", - action="store_true", - help="do not use a face detector, instead run on the full input image") - args = parser.parse_args() - - model = load_classifier(args.model_path, args.gpu_id) - prob = classify_fake(model, args.input_path, args.no_crop) - print("Probibility being modified by Photoshop FAL: {:.2f}%".format(prob*100)) diff --git a/spaces/mshukor/UnIVAL/fairseq/examples/truncated_bptt/transformer_xl_model.py b/spaces/mshukor/UnIVAL/fairseq/examples/truncated_bptt/transformer_xl_model.py deleted file mode 100644 index a6c8b25a07276c2ee30c0aa5f0e4b0a2837ed5ca..0000000000000000000000000000000000000000 --- a/spaces/mshukor/UnIVAL/fairseq/examples/truncated_bptt/transformer_xl_model.py +++ /dev/null @@ -1,155 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import logging -from dataclasses import dataclass, field -from typing import Dict, List, Optional - -import torch -from fairseq.dataclass import FairseqDataclass -from fairseq.models import ( - FairseqIncrementalDecoder, - FairseqLanguageModel, - register_model, -) -from fairseq.modules.checkpoint_activations import checkpoint_wrapper -from omegaconf import II - - -logger = logging.getLogger(__name__) - - -@dataclass -class TransformerXLConfig(FairseqDataclass): - # defaults come from the original Transformer-XL code - cutoffs: List[int] = field(default_factory=lambda: [20000, 40000, 200000]) - d_model: int = 500 - n_head: int = 10 - d_head: int = 50 - d_inner: int = 1000 - div_val: int = 1 - n_layer: int = 12 - mem_len: int = 0 - clamp_len: int = -1 - same_length: bool = False - dropout: float = 0.0 - dropatt: float = 0.0 - checkpoint_activations: bool = False - offload_activations: bool = False - max_target_positions: int = II("task.max_target_positions") - - -@register_model("transformer_xl", dataclass=TransformerXLConfig) -class TransformerXLLanguageModel(FairseqLanguageModel): - @classmethod - def build_model(cls, cfg: TransformerXLConfig, task): - return cls(TransformerXLDecoder(cfg, task)) - - -class TransformerXLDecoder(FairseqIncrementalDecoder): - def __init__(self, cfg, task): - try: - from transformers.models.transfo_xl import ( - TransfoXLConfig, - TransfoXLLMHeadModel, - ) - except ImportError: - from transformers.configuration_transfo_xl import TransfoXLConfig - from transformers.modeling_transfo_xl import TransfoXLLMHeadModel - - super().__init__(task.target_dictionary) - self.cfg = cfg - - # remove any cutoffs larger than the vocab size - cutoffs = [ - cutoff for cutoff in cfg.cutoffs if cutoff < len(task.target_dictionary) - ] - - config = TransfoXLConfig( - vocab_size=len(task.target_dictionary), - cutoffs=cutoffs, - d_model=cfg.d_model, - d_embed=cfg.d_model, - n_head=cfg.n_head, - d_head=cfg.d_head, - d_inner=cfg.d_inner, - div_val=cfg.div_val, - n_layer=cfg.n_layer, - mem_len=cfg.mem_len, - clamp_len=cfg.clamp_len, - same_length=cfg.same_length, - dropout=cfg.dropout, - dropatt=cfg.dropatt, - ) - logger.info(config) - self.model = TransfoXLLMHeadModel(config) - - # Workaround a bug in huggingface's ``ProjectedAdaptiveLogSoftmax`` - # which adds ``None`` values to an ``nn.ParameterList``, which is not - # supported in PyTorch. Instead we can replace this with an - # ``nn.ModuleList``, which does support ``None`` values. - try: - if all(p is None for p in self.model.crit.out_projs._parameters.values()): - self.model.crit.out_projs = torch.nn.ModuleList( - [None] * len(self.model.crit.out_projs._parameters) - ) - except Exception: - pass - - if cfg.checkpoint_activations or cfg.offload_activations: - for i in range(len(self.model.transformer.layers)): - self.model.transformer.layers[i] = checkpoint_wrapper( - self.model.transformer.layers[i], - offload_to_cpu=cfg.offload_activations, - ) - # TODO: may save mem to wrap(layer.pos_ff.CoreNet[3]) - - self._mems = None - - def forward( - self, - src_tokens, - src_lengths=None, # unused - incremental_state: Optional[Dict[str, List[torch.Tensor]]] = None, - encoder_out=None, - ): - if incremental_state is not None: # used during inference - mems = self.get_incremental_state(incremental_state, "mems") - src_tokens = src_tokens[:, -1:] # only keep the most recent token - else: - mems = self._mems - - output = self.model( - input_ids=src_tokens, - mems=mems, - return_dict=False, - ) - - if len(output) >= 2: - if incremental_state is not None: - self.set_incremental_state(incremental_state, "mems", output[1]) - else: - self._mems = output[1] - - return (output[0],) - - def max_positions(self): - return self.cfg.max_target_positions - - def reorder_incremental_state( - self, - incremental_state: Dict[str, Dict[str, Optional[torch.Tensor]]], - new_order: torch.Tensor, - ): - """Reorder incremental state. - - This will be called when the order of the input has changed from the - previous time step. A typical use case is beam search, where the input - order changes between time steps based on the selection of beams. - """ - mems = self.get_incremental_state(incremental_state, "mems") - if mems is not None: - new_mems = [mems_i.index_select(1, new_order) for mems_i in mems] - self.set_incremental_state(incremental_state, "mems", new_mems) diff --git a/spaces/mshukor/UnIVAL/fairseq/fairseq/modules/layer_norm.py b/spaces/mshukor/UnIVAL/fairseq/fairseq/modules/layer_norm.py deleted file mode 100644 index 234609d9e213a650e0032aaa0ca0462a818bfead..0000000000000000000000000000000000000000 --- a/spaces/mshukor/UnIVAL/fairseq/fairseq/modules/layer_norm.py +++ /dev/null @@ -1,50 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import torch -import torch.nn as nn -import torch.nn.functional as F - - -try: - from apex.normalization import FusedLayerNorm as _FusedLayerNorm - - has_fused_layernorm = True - - class FusedLayerNorm(_FusedLayerNorm): - @torch.jit.unused - def forward(self, x): - if not x.is_cuda: - return super().forward(x) - else: - with torch.cuda.device(x.device): - return super().forward(x) - - -except ImportError: - has_fused_layernorm = False - - -def LayerNorm(normalized_shape, eps=1e-5, elementwise_affine=True, export=False): - if torch.jit.is_scripting(): - export = True - if not export and torch.cuda.is_available() and has_fused_layernorm: - return FusedLayerNorm(normalized_shape, eps, elementwise_affine) - return torch.nn.LayerNorm(normalized_shape, eps, elementwise_affine) - - -class Fp32LayerNorm(nn.LayerNorm): - def __init__(self, *args, **kwargs): - super().__init__(*args, **kwargs) - - def forward(self, input): - output = F.layer_norm( - input.float(), - self.normalized_shape, - self.weight.float() if self.weight is not None else None, - self.bias.float() if self.bias is not None else None, - self.eps, - ) - return output.type_as(input) diff --git a/spaces/mshukor/UnIVAL/preprocess/.ipynb_checkpoints/create_tsv_files-checkpoint.py b/spaces/mshukor/UnIVAL/preprocess/.ipynb_checkpoints/create_tsv_files-checkpoint.py deleted file mode 100644 index 7f3cb08ffdc59e31c8f61abde78420f16a68539a..0000000000000000000000000000000000000000 --- a/spaces/mshukor/UnIVAL/preprocess/.ipynb_checkpoints/create_tsv_files-checkpoint.py +++ /dev/null @@ -1,133 +0,0 @@ -from utils import get_tsv_data_from_jsons, create_imagenet_txt_files -import csv -from io import StringIO -from tqdm import tqdm - - -# with image conversion -# datasets = ['/data/mshukor/data/our_albef_data/json_pretrain/vg_albef.json', -# '/data/mshukor/data/our_albef_data/json_pretrain/sbu.json', -# ] - -# output_paths = ['/data/mshukor/data/ofa/pretrain_ours/vg_albef.tsv', -# '/data/mshukor/data/ofa/pretrain_ours/sbu.tsv', -# ] - -# task_types = ['caption', -# 'caption'] - -# start_id = 566747 -# for data, task_type, output_path in zip(datasets, task_types, output_paths): - - - -# tsvs = get_tsv_data_from_jsons([data], start_id, [task_type]) - -# start_id = tsvs[-1][0] + 1 - -# print("save tsv to:", output_path) - -# with open(output_path, 'w', newline='') as f_output: -# csv_output = csv.writer(f_output, delimiter='\t') -# for t in tqdm(tsvs): -# csv_output.writerow(t) - -######################################################## -# without image conversion - -# datasets = ['/data/mshukor/data/our_albef_data/json_pretrain/coco_karp.json', -# '/data/mshukor/data/our_albef_data/json_pretrain/vg_albef.json', -# '/data/mshukor/data/our_albef_data/json_pretrain/sbu.json', -# '/data/mshukor/data/our_albef_data/json_pretrain/cc3m.json'] - -# start_id = 0 -# task_types = ['caption', -# 'caption', -# 'caption', -# 'caption'] - -# tsvs = get_tsv_data_from_jsons(datasets, start_id, task_types, convert_images=False) - -# output_path = '/data/mshukor/data/ofa/pretrain_ours/vision_language_4m.tsv' - -# with open(output_path, 'w', newline='') as f_output: -# csv_output = csv.writer(f_output, delimiter='\t') - -# for t in tqdm(tsvs): -# csv_output.writerow(t) - - - -######################################################## - - -# datasets = [ -# '/data/mshukor/data/our_albef_data/json_pretrain/coco_karp.json', -# '/data/mshukor/data/our_albef_data/json_pretrain/vg_albef.json', -# '/data/mshukor/data/our_albef_data/json_pretrain/sbu.json', -# '/data/mshukor/data/our_albef_data/json_pretrain/cc3m.json', - -# ['/data/mshukor/data/refcoco/refcoco+/refs(unc).p', '/data/mshukor/data/refcoco/refcoco+/instances.json'], - -# '/data/mshukor/data/our_albef_data/data/vqa_train.json', -# ] - -# start_id = 0 -# task_types = ['caption', -# 'caption', -# 'caption', -# 'caption', -# 'visual_grounding', -# 'qa',] - -# tsvs = get_tsv_data_from_jsons(datasets, start_id, task_types, convert_images=False) - - -# output_path = '/data/mshukor/data/ofa/pretrain_ours/vision_language_mini.tsv' - -# with open(output_path, 'w', newline='') as f_output: -# csv_output = csv.writer(f_output, delimiter='\t') - -# for t in tqdm(tsvs): -# csv_output.writerow(t) - - - -#### imagenet - -path_data = '/data/mshukor/data/imagenet/val' -output_path = '/data/mshukor/data/ofa/pretrain_ours/imagenet_val.txt' - - -create_imagenet_txt_files(path_data, output_path) - - - -####### object detection - - - - -from preprocess.utils import get_tsv_data_from_jsons - -datasets = [ - ['coco', '/data/mshukor/data/coco/annotations/instances_train2014.json'], - ['vg', '/data/mshukor/data/visual_genome/annotations/objects.json', '/data/mshukor/data/visual_genome/images'], -] - -start_id = 0 -task_types = ['detection', - 'detection',] - - - -tsvs = get_tsv_data_from_jsons(datasets, start_id, task_types, convert_images=False) - - -output_path = '/data/mshukor/data/ofa/pretrain_ours/detection_mini.tsv' - -with open(output_path, 'w', newline='') as f_output: - csv_output = csv.writer(f_output, delimiter='\t') - - for t in tqdm(tsvs): - csv_output.writerow(t) \ No newline at end of file diff --git a/spaces/mthsk/sovits-100orangejuice/cluster/train_cluster.py b/spaces/mthsk/sovits-100orangejuice/cluster/train_cluster.py deleted file mode 100644 index 4ac025d400414226e66849407f477ae786c3d5d3..0000000000000000000000000000000000000000 --- a/spaces/mthsk/sovits-100orangejuice/cluster/train_cluster.py +++ /dev/null @@ -1,89 +0,0 @@ -import os -from glob import glob -from pathlib import Path -import torch -import logging -import argparse -import torch -import numpy as np -from sklearn.cluster import KMeans, MiniBatchKMeans -import tqdm -logging.basicConfig(level=logging.INFO) -logger = logging.getLogger(__name__) -import time -import random - -def train_cluster(in_dir, n_clusters, use_minibatch=True, verbose=False): - - logger.info(f"Loading features from {in_dir}") - features = [] - nums = 0 - for path in tqdm.tqdm(in_dir.glob("*.soft.pt")): - features.append(torch.load(path).squeeze(0).numpy().T) - # print(features[-1].shape) - features = np.concatenate(features, axis=0) - print(nums, features.nbytes/ 1024**2, "MB , shape:",features.shape, features.dtype) - features = features.astype(np.float32) - logger.info(f"Clustering features of shape: {features.shape}") - t = time.time() - if use_minibatch: - kmeans = MiniBatchKMeans(n_clusters=n_clusters,verbose=verbose, batch_size=4096, max_iter=80).fit(features) - else: - kmeans = KMeans(n_clusters=n_clusters,verbose=verbose).fit(features) - print(time.time()-t, "s") - - x = { - "n_features_in_": kmeans.n_features_in_, - "_n_threads": kmeans._n_threads, - "cluster_centers_": kmeans.cluster_centers_, - } - print("end") - - return x - - -if __name__ == "__main__": - - parser = argparse.ArgumentParser() - parser.add_argument('--dataset', type=Path, default="./dataset/44k", - help='path of training data directory') - parser.add_argument('--output', type=Path, default="logs/44k", - help='path of model output directory') - - args = parser.parse_args() - - checkpoint_dir = args.output - dataset = args.dataset - n_clusters = 10000 - - ckpt = {} - for spk in os.listdir(dataset): - if os.path.isdir(dataset/spk): - print(f"train kmeans for {spk}...") - in_dir = dataset/spk - x = train_cluster(in_dir, n_clusters, verbose=False) - ckpt[spk] = x - - checkpoint_path = checkpoint_dir / f"kmeans_{n_clusters}.pt" - checkpoint_path.parent.mkdir(exist_ok=True, parents=True) - torch.save( - ckpt, - checkpoint_path, - ) - - - # import cluster - # for spk in tqdm.tqdm(os.listdir("dataset")): - # if os.path.isdir(f"dataset/{spk}"): - # print(f"start kmeans inference for {spk}...") - # for feature_path in tqdm.tqdm(glob(f"dataset/{spk}/*.discrete.npy", recursive=True)): - # mel_path = feature_path.replace(".discrete.npy",".mel.npy") - # mel_spectrogram = np.load(mel_path) - # feature_len = mel_spectrogram.shape[-1] - # c = np.load(feature_path) - # c = utils.tools.repeat_expand_2d(torch.FloatTensor(c), feature_len).numpy() - # feature = c.T - # feature_class = cluster.get_cluster_result(feature, spk) - # np.save(feature_path.replace(".discrete.npy", ".discrete_class.npy"), feature_class) - - diff --git a/spaces/mthsk/sovits-models-misc/data_utils.py b/spaces/mthsk/sovits-models-misc/data_utils.py deleted file mode 100644 index 7c76fd1c3a45b8304d916161718c7763874f3e35..0000000000000000000000000000000000000000 --- a/spaces/mthsk/sovits-models-misc/data_utils.py +++ /dev/null @@ -1,155 +0,0 @@ -import time -import os -import random -import numpy as np -import torch -import torch.utils.data - -import modules.commons as commons -import utils -from modules.mel_processing import spectrogram_torch, spec_to_mel_torch -from utils import load_wav_to_torch, load_filepaths_and_text - -# import h5py - - -"""Multi speaker version""" - - -class TextAudioSpeakerLoader(torch.utils.data.Dataset): - """ - 1) loads audio, speaker_id, text pairs - 2) normalizes text and converts them to sequences of integers - 3) computes spectrograms from audio files. - """ - - def __init__(self, audiopaths, hparams, all_in_mem: bool = False): - self.audiopaths = load_filepaths_and_text(audiopaths) - self.max_wav_value = hparams.data.max_wav_value - self.sampling_rate = hparams.data.sampling_rate - self.filter_length = hparams.data.filter_length - self.hop_length = hparams.data.hop_length - self.win_length = hparams.data.win_length - self.sampling_rate = hparams.data.sampling_rate - self.use_sr = hparams.train.use_sr - self.spec_len = hparams.train.max_speclen - self.spk_map = hparams.spk - - random.seed(1234) - random.shuffle(self.audiopaths) - - self.all_in_mem = all_in_mem - if self.all_in_mem: - self.cache = [self.get_audio(p[0]) for p in self.audiopaths] - - def get_audio(self, filename): - filename = filename.replace("\\", "/") - audio, sampling_rate = load_wav_to_torch(filename) - if sampling_rate != self.sampling_rate: - raise ValueError("{} SR doesn't match target {} SR".format( - sampling_rate, self.sampling_rate)) - audio_norm = audio / self.max_wav_value - audio_norm = audio_norm.unsqueeze(0) - spec_filename = filename.replace(".wav", ".spec.pt") - - # Ideally, all data generated after Mar 25 should have .spec.pt - if os.path.exists(spec_filename): - spec = torch.load(spec_filename) - else: - spec = spectrogram_torch(audio_norm, self.filter_length, - self.sampling_rate, self.hop_length, self.win_length, - center=False) - spec = torch.squeeze(spec, 0) - torch.save(spec, spec_filename) - - spk = filename.split("/")[-2] - spk = torch.LongTensor([self.spk_map[spk]]) - - f0 = np.load(filename + ".f0.npy") - f0, uv = utils.interpolate_f0(f0) - f0 = torch.FloatTensor(f0) - uv = torch.FloatTensor(uv) - - c = torch.load(filename+ ".soft.pt") - c = utils.repeat_expand_2d(c.squeeze(0), f0.shape[0]) - - - lmin = min(c.size(-1), spec.size(-1)) - assert abs(c.size(-1) - spec.size(-1)) < 3, (c.size(-1), spec.size(-1), f0.shape, filename) - assert abs(audio_norm.shape[1]-lmin * self.hop_length) < 3 * self.hop_length - spec, c, f0, uv = spec[:, :lmin], c[:, :lmin], f0[:lmin], uv[:lmin] - audio_norm = audio_norm[:, :lmin * self.hop_length] - - return c, f0, spec, audio_norm, spk, uv - - def random_slice(self, c, f0, spec, audio_norm, spk, uv): - # if spec.shape[1] < 30: - # print("skip too short audio:", filename) - # return None - if spec.shape[1] > 800: - start = random.randint(0, spec.shape[1]-800) - end = start + 790 - spec, c, f0, uv = spec[:, start:end], c[:, start:end], f0[start:end], uv[start:end] - audio_norm = audio_norm[:, start * self.hop_length : end * self.hop_length] - - return c, f0, spec, audio_norm, spk, uv - - def __getitem__(self, index): - if self.all_in_mem: - return self.random_slice(*self.cache[index]) - else: - return self.random_slice(*self.get_audio(self.audiopaths[index][0])) - - def __len__(self): - return len(self.audiopaths) - - -class TextAudioCollate: - - def __call__(self, batch): - batch = [b for b in batch if b is not None] - - input_lengths, ids_sorted_decreasing = torch.sort( - torch.LongTensor([x[0].shape[1] for x in batch]), - dim=0, descending=True) - - max_c_len = max([x[0].size(1) for x in batch]) - max_wav_len = max([x[3].size(1) for x in batch]) - - lengths = torch.LongTensor(len(batch)) - - c_padded = torch.FloatTensor(len(batch), batch[0][0].shape[0], max_c_len) - f0_padded = torch.FloatTensor(len(batch), max_c_len) - spec_padded = torch.FloatTensor(len(batch), batch[0][2].shape[0], max_c_len) - wav_padded = torch.FloatTensor(len(batch), 1, max_wav_len) - spkids = torch.LongTensor(len(batch), 1) - uv_padded = torch.FloatTensor(len(batch), max_c_len) - - c_padded.zero_() - spec_padded.zero_() - f0_padded.zero_() - wav_padded.zero_() - uv_padded.zero_() - - for i in range(len(ids_sorted_decreasing)): - row = batch[ids_sorted_decreasing[i]] - - c = row[0] - c_padded[i, :, :c.size(1)] = c - lengths[i] = c.size(1) - - f0 = row[1] - f0_padded[i, :f0.size(0)] = f0 - - spec = row[2] - spec_padded[i, :, :spec.size(1)] = spec - - wav = row[3] - wav_padded[i, :, :wav.size(1)] = wav - - spkids[i, 0] = row[4] - - uv = row[5] - uv_padded[i, :uv.size(0)] = uv - - return c_padded, f0_padded, spec_padded, wav_padded, spkids, lengths, uv_padded diff --git a/spaces/muellerzr/accelerate-presentation/Accelerate_files/libs/revealjs/plugin/zoom/zoom.js b/spaces/muellerzr/accelerate-presentation/Accelerate_files/libs/revealjs/plugin/zoom/zoom.js deleted file mode 100644 index b52804d66ce86c7e8d7c4630a6bc8f132fd0e86e..0000000000000000000000000000000000000000 --- a/spaces/muellerzr/accelerate-presentation/Accelerate_files/libs/revealjs/plugin/zoom/zoom.js +++ /dev/null @@ -1,4 +0,0 @@ -!function(e,t){"object"==typeof exports&&"undefined"!=typeof module?module.exports=t():"function"==typeof define&&define.amd?define(t):(e="undefined"!=typeof globalThis?globalThis:e||self).RevealZoom=t()}(this,(function(){"use strict"; -/*! - * reveal.js Zoom plugin - */var e={id:"zoom",init:function(e){e.getRevealElement().addEventListener("mousedown",(function(o){var n=/Linux/.test(window.navigator.platform)?"ctrl":"alt",i=(e.getConfig().zoomKey?e.getConfig().zoomKey:n)+"Key",d=e.getConfig().zoomLevel?e.getConfig().zoomLevel:2;o[i]&&!e.isOverview()&&(o.preventDefault(),t.to({x:o.clientX,y:o.clientY,scale:d,pan:!1}))}))},destroy:function(){t.reset()}},t=function(){var e=1,o=0,n=0,i=-1,d=-1,l="transform"in document.body.style;function s(t,o){var n=r();if(t.width=t.width||1,t.height=t.height||1,t.x-=(window.innerWidth-t.width*o)/2,t.y-=(window.innerHeight-t.height*o)/2,l)if(1===o)document.body.style.transform="";else{var i=n.x+"px "+n.y+"px",d="translate("+-t.x+"px,"+-t.y+"px) scale("+o+")";document.body.style.transformOrigin=i,document.body.style.transform=d}else 1===o?(document.body.style.position="",document.body.style.left="",document.body.style.top="",document.body.style.width="",document.body.style.height="",document.body.style.zoom=""):(document.body.style.position="relative",document.body.style.left=-(n.x+t.x)/o+"px",document.body.style.top=-(n.y+t.y)/o+"px",document.body.style.width=100*o+"%",document.body.style.height=100*o+"%",document.body.style.zoom=o);e=o,document.documentElement.classList&&(1!==e?document.documentElement.classList.add("zoomed"):document.documentElement.classList.remove("zoomed"))}function c(){var t=.12*window.innerWidth,i=.12*window.innerHeight,d=r();nwindow.innerHeight-i&&window.scroll(d.x,d.y+(1-(window.innerHeight-n)/i)*(14/e)),owindow.innerWidth-t&&window.scroll(d.x+(1-(window.innerWidth-o)/t)*(14/e),d.y)}function r(){return{x:void 0!==window.scrollX?window.scrollX:window.pageXOffset,y:void 0!==window.scrollY?window.scrollY:window.pageYOffset}}return l&&(document.body.style.transition="transform 0.8s ease"),document.addEventListener("keyup",(function(o){1!==e&&27===o.keyCode&&t.out()})),document.addEventListener("mousemove",(function(t){1!==e&&(o=t.clientX,n=t.clientY)})),{to:function(o){if(1!==e)t.out();else{if(o.x=o.x||0,o.y=o.y||0,o.element){var n=o.element.getBoundingClientRect();o.x=n.left-20,o.y=n.top-20,o.width=n.width+40,o.height=n.height+40}void 0!==o.width&&void 0!==o.height&&(o.scale=Math.max(Math.min(window.innerWidth/o.width,window.innerHeight/o.height),1)),o.scale>1&&(o.x*=o.scale,o.y*=o.scale,s(o,o.scale),!1!==o.pan&&(i=setTimeout((function(){d=setInterval(c,1e3/60)}),800)))}},out:function(){clearTimeout(i),clearInterval(d),s({x:0,y:0},1),e=1},magnify:function(e){this.to(e)},reset:function(){this.out()},zoomLevel:function(){return e}}}();return function(){return e}})); diff --git a/spaces/multimodalart/coca-captioning/README.md b/spaces/multimodalart/coca-captioning/README.md deleted file mode 100644 index 26d44973e08d4eae7b249ee7af5d7e35c3f97e3e..0000000000000000000000000000000000000000 --- a/spaces/multimodalart/coca-captioning/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Coca Captioning -emoji: ⚡ -colorFrom: green -colorTo: pink -sdk: gradio -sdk_version: 3.17.0 -app_file: app.py -pinned: false -license: mit ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/nakas/MusicGenDemucs/audiocraft/utils/utils.py b/spaces/nakas/MusicGenDemucs/audiocraft/utils/utils.py deleted file mode 100644 index 86e1448d065fa182ca69aae00d2f2a7eea55d8a4..0000000000000000000000000000000000000000 --- a/spaces/nakas/MusicGenDemucs/audiocraft/utils/utils.py +++ /dev/null @@ -1,234 +0,0 @@ -# Copyright (c) Meta Platforms, Inc. and affiliates. -# All rights reserved. -# -# This source code is licensed under the license found in the -# LICENSE file in the root directory of this source tree. - -from concurrent.futures import ProcessPoolExecutor -from functools import wraps -import hashlib -import logging -import typing as tp - -import flashy -import flashy.distrib -import omegaconf -import torch -from torch.nn.utils.rnn import pad_sequence - - -logger = logging.getLogger(__name__) - - -def dict_from_config(cfg: omegaconf.DictConfig) -> dict: - """Convenience function to map an omegaconf configuration to a dictionary. - - Args: - cfg (omegaconf.DictConfig): Original configuration to map to dict. - Returns: - dict: Config as dictionary object. - """ - dct = omegaconf.OmegaConf.to_container(cfg, resolve=True) - assert isinstance(dct, dict) - return dct - - -def random_subset(dataset, max_samples: int, seed: int = 42) -> torch.utils.data.Subset: - if max_samples >= len(dataset): - return dataset - - generator = torch.Generator().manual_seed(seed) - perm = torch.randperm(len(dataset), generator=generator) - return torch.utils.data.Subset(dataset, perm[:max_samples].tolist()) - - -def get_loader(dataset, num_samples: tp.Optional[int], batch_size: int, - num_workers: int, seed: int, **kwargs) -> torch.utils.data.DataLoader: - """Convenience function to load dataset into a dataloader with optional subset sampling. - - Args: - dataset: Dataset to load. - num_samples (Optional[int]): Number of samples to limit subset size. - batch_size (int): Batch size. - num_workers (int): Number of workers for data loading. - seed (int): Random seed. - """ - if num_samples is not None: - dataset = random_subset(dataset, num_samples, seed) - - dataloader = flashy.distrib.loader( - dataset, - batch_size=batch_size, - num_workers=num_workers, - **kwargs - ) - return dataloader - - -def get_dataset_from_loader(dataloader): - dataset = dataloader.dataset - if isinstance(dataset, torch.utils.data.Subset): - return dataset.dataset - else: - return dataset - - -def multinomial(input: torch.Tensor, num_samples: int, replacement=False, *, generator=None): - """torch.multinomial with arbitrary number of dimensions, and number of candidates on the last dimension. - - Args: - input (torch.Tensor): The input tensor containing probabilities. - num_samples (int): Number of samples to draw. - replacement (bool): Whether to draw with replacement or not. - Keywords args: - generator (torch.Generator): A pseudorandom number generator for sampling. - Returns: - torch.Tensor: Last dimension contains num_samples indices - sampled from the multinomial probability distribution - located in the last dimension of tensor input. - """ - input_ = input.reshape(-1, input.shape[-1]) - output_ = torch.multinomial(input_, num_samples=num_samples, replacement=replacement, generator=generator) - output = output_.reshape(*list(input.shape[:-1]), -1) - return output - - -def sample_top_k(probs: torch.Tensor, k: int) -> torch.Tensor: - """Sample next token from top K values along the last dimension of the input probs tensor. - - Args: - probs (torch.Tensor): Input probabilities with token candidates on the last dimension. - k (int): The k in “top-k”. - Returns: - torch.Tensor: Sampled tokens. - """ - top_k_value, _ = torch.topk(probs, k, dim=-1) - min_value_top_k = top_k_value[..., [-1]] - probs *= (probs >= min_value_top_k).float() - probs.div_(probs.sum(dim=-1, keepdim=True)) - next_token = multinomial(probs, num_samples=1) - return next_token - - -def sample_top_p(probs: torch.Tensor, p: float) -> torch.Tensor: - """Sample next token from top P probabilities along the last dimension of the input probs tensor. - - Args: - probs (torch.Tensor): Input probabilities with token candidates on the last dimension. - p (int): The p in “top-p”. - Returns: - torch.Tensor: Sampled tokens. - """ - probs_sort, probs_idx = torch.sort(probs, dim=-1, descending=True) - probs_sum = torch.cumsum(probs_sort, dim=-1) - mask = probs_sum - probs_sort > p - probs_sort *= (~mask).float() - probs_sort.div_(probs_sort.sum(dim=-1, keepdim=True)) - next_token = multinomial(probs_sort, num_samples=1) - next_token = torch.gather(probs_idx, -1, next_token) - return next_token - - -class DummyPoolExecutor: - """Dummy pool executor to use when we actually have only 1 worker. - (e.g. instead of ProcessPoolExecutor). - """ - class DummyResult: - def __init__(self, func, *args, **kwargs): - self.func = func - self.args = args - self.kwargs = kwargs - - def result(self): - return self.func(*self.args, **self.kwargs) - - def __init__(self, workers, mp_context=None): - pass - - def submit(self, func, *args, **kwargs): - return DummyPoolExecutor.DummyResult(func, *args, **kwargs) - - def __enter__(self): - return self - - def __exit__(self, exc_type, exc_value, exc_tb): - return - - -def get_pool_executor(num_workers: int, mp_context=None): - return ProcessPoolExecutor(num_workers, mp_context) if num_workers > 1 else DummyPoolExecutor(1) - - -def length_to_mask(lengths: torch.Tensor, max_len: tp.Optional[int] = None) -> torch.Tensor: - """Utility function to convert a tensor of sequence lengths to a mask (useful when working on padded sequences). - For example: [3, 5] => [[1, 1, 1, 0, 0], [1, 1, 1, 1, 1]] - - Args: - lengths (torch.Tensor): tensor with lengths - max_len (int): can set the max length manually. Defaults to None. - Returns: - torch.Tensor: mask with 0s where there is pad tokens else 1s - """ - assert len(lengths.shape) == 1, "Length shape should be 1 dimensional." - final_length = lengths.max().item() if not max_len else max_len - final_length = max(final_length, 1) # if all seqs are of len zero we don't want a zero-size tensor - return torch.arange(final_length)[None, :].to(lengths.device) < lengths[:, None] - - -def hash_trick(word: str, vocab_size: int) -> int: - """Hash trick to pair each word with an index - - Args: - word (str): word we wish to convert to an index - vocab_size (int): size of the vocabulary - Returns: - int: index of the word in the embedding LUT - """ - hash = int(hashlib.sha256(word.encode("utf-8")).hexdigest(), 16) - return hash % vocab_size - - -def with_rank_rng(base_seed: int = 1234): - """Decorator for a function so that the function will use a Random Number Generator - whose state depend on the GPU rank. The original RNG state is restored upon returning. - - Args: - base_seed (int): Random seed. - """ - def _decorator(fun: tp.Callable): - @wraps(fun) - def _decorated(*args, **kwargs): - state = torch.get_rng_state() - seed = base_seed ^ flashy.distrib.rank() - torch.manual_seed(seed) - logger.debug('Rank dependent seed set to %d', seed) - try: - return fun(*args, **kwargs) - finally: - torch.set_rng_state(state) - logger.debug('RNG state restored.') - return _decorated - return _decorator - - -def collate(tensors: tp.List[torch.Tensor], dim: int = 0) -> tp.Tuple[torch.Tensor, torch.Tensor]: - """Get a list of tensors and collate them to a single tensor. according to the following logic: - - `dim` specifies the time dimension which will be stacked and padded. - - The output will contain 1 new dimension (dimension index 0) which will be the size of - of the original list. - - Args: - tensors (tp.List[torch.Tensor]): List of tensors to collate. - dim (int): Dimension which will be stacked and padded. - Returns: - tp.Tuple[torch.Tensor, torch.Tensor]: - torch.Tensor: Stacked and padded tensor. The output will contain 1 new dimension - (dimension index 0) which will be the size of the original list. - torch.Tensor: Tensor containing length of original tensor sizes (without padding). - """ - tensors = [x.transpose(0, dim) for x in tensors] - lens = torch.LongTensor([len(x) for x in tensors]) - padded_tensors = pad_sequence(tensors) - padded_tensors = padded_tensors.transpose(0, 1) - padded_tensors = padded_tensors.transpose(1, dim + 1) - return padded_tensors, lens diff --git a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Mocha-Pro-2019-602-Plugin-For-OFX-Crack-Mac-Osx-BETTER.md b/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Mocha-Pro-2019-602-Plugin-For-OFX-Crack-Mac-Osx-BETTER.md deleted file mode 100644 index a72176a8816155fa884fbb5286fd6595dd481f41..0000000000000000000000000000000000000000 --- a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Mocha-Pro-2019-602-Plugin-For-OFX-Crack-Mac-Osx-BETTER.md +++ /dev/null @@ -1,85 +0,0 @@ -## Mocha Pro 2019 6.0.2 plugin for OFX Crack Mac Osx - - - -**Download ->->->-> [https://jinyurl.com/2tx27t](https://jinyurl.com/2tx27t)** - - - - Here is a possible title and article with HTML formatting for the keyword "Mocha Pro 2019 6.0.2 plugin for OFX Crack Mac Osx": - -# Mocha Pro 2019 6.0.2: A Powerful Planar Tracking and VFX Tool for Mac - - - -Mocha Pro 2019 6.0.2 is the latest version of the award-winning planar tracking and visual effects software from Boris FX. Mocha Pro 2019 6.0.2 plugin for OFX is compatible with popular Mac editing and compositing applications such as Adobe After Effects, Adobe Premiere Pro, Avid Media Composer, Blackmagic Fusion, Foundry Nuke, and more. - - - -Mocha Pro 2019 6.0.2 offers many new features and improvements that make it easier and faster to create stunning visual effects and motion graphics. Some of the highlights include: - - - -- GPU-accelerated object removal: Mocha Pro 2019 6.0.2 can remove unwanted objects, wires, rigs, logos, and more from your footage with blazing speed and high quality. - -- PowerMesh: Mocha Pro 2019 6.0.2 introduces a new mesh tracking feature that allows you to track and warp complex surfaces such as clothing, skin, or fabric. - -- Insert Module with PowerMesh: Mocha Pro 2019 6.0.2 also enhances the Insert Module with PowerMesh support, allowing you to composite warped inserts on distorted surfaces with motion blur and blend modes. - -- Planar tracking enhancements: Mocha Pro 2019 6.0.2 improves the planar tracking algorithm with new RGB channel tracking, multi-link layers, and link mesh tracking to existing planar layers. - -- Lens Module update: Mocha Pro 2019 6.0.2 updates the Lens Module with a new spline-based calibration method that can handle more complex lens distortions. - -- Apple M1 support: Mocha Pro 2019 6.0.2 is optimized for the new Apple silicon hardware, delivering faster video import and export. - - - -If you are looking for a powerful and versatile planar tracking and VFX tool for your Mac, you should definitely check out Mocha Pro 2019 6.0.2 plugin for OFX. - - - -However, before you download Mocha Pro 2019 6.0.2 plugin for OFX crack Mac Osx, you should be aware of the risks and consequences of using pirated software. - - - -## The Dangers of Using Mocha Pro 2019 6.0.2 Plugin for OFX Crack Mac Osx - - - -Using Mocha Pro 2019 6.0.2 plugin for OFX crack Mac Osx may seem like a tempting option to save money and get access to all the features of the software without paying for a license. - - - -However, using cracked software is illegal, unethical, and risky. - - - -Here are some of the dangers of using Mocha Pro 2019 6.0.2 plugin for OFX crack Mac Osx: - - - -- You may violate the intellectual property rights of Boris FX and face legal action or fines. - -- You may expose your Mac to malware, viruses, spyware, or ransomware that can compromise your data, privacy, and security. - -- You may experience poor performance, crashes, errors, bugs, or compatibility issues that can ruin your work or projects. - -- You may miss out on updates, patches, support, and customer service from Boris FX that can help you resolve issues or improve your workflow. - -- You may damage your reputation or credibility as a professional or artist by using stolen software. - - - -Therefore, instead of using Mocha Pro 2019 6.0.2 plugin for OFX crack Mac Osx, you should consider buying a legitimate license from Boris FX or using their free trial version to test the software before purchasing it. - - - -### How to Buy or Try Mocha Pro 2019 6.0.2 Plugin for OFX Legally - - - -If you want to use Mocha Pro 2019 6.0.2 plugin for OFX legally and safely on your Mac, you have two options: - - - -1. Buy dfd1c89656 \ No newline at end of file diff --git a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Panchathanthiram Tamil Movie Subtitles Download 16l.md b/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Panchathanthiram Tamil Movie Subtitles Download 16l.md deleted file mode 100644 index 81790c9a404e8fdba95e1063409477d8a5683bcf..0000000000000000000000000000000000000000 --- a/spaces/netiMophi/DreamlikeArt-Diffusion-1.0/Panchathanthiram Tamil Movie Subtitles Download 16l.md +++ /dev/null @@ -1,21 +0,0 @@ -
          -

          How to Download Subtitles for Panchathanthiram Tamil Movie

          -

          Panchathanthiram is a 2002 Tamil comedy film directed by K.S. Ravikumar and starring Kamal Haasan, Simran, Ramya Krishnan and others. The film is about five friends who get into trouble after one of them cheats on his wife with a prostitute. The film was a blockbuster hit and received positive reviews from critics and audiences.

          -

          If you want to watch Panchathanthiram with English subtitles, you can download them from Subscene, a website that provides subtitles for various movies and TV shows. Here are the steps to download subtitles for Panchathanthiram:

          -

          Panchathanthiram Tamil Movie Subtitles Download 16l


          Download File ===== https://urlcod.com/2uIau7



          -
            -
          1. Go to Subscene and search for Panchathanthiram in the search box.
          2. -
          3. Select the English subtitle file that matches your movie version and click on the download button.
          4. -
          5. Extract the subtitle file from the zip folder and rename it to match your movie file name.
          6. -
          7. Place the subtitle file in the same folder as your movie file and play the movie with a media player that supports subtitles.
          8. -
          -

          Enjoy watching Panchathanthiram with English subtitles!

          - -

          Panchathanthiram is not only a hilarious comedy but also a tribute to the classic Tamil literature of the same name. The film is loosely based on the five stories of the Panchatantra, an ancient Indian collection of animal fables. The film references the stories of The Lion and the Rabbit, The Monkey and the Crocodile, The Crows and the Snake, The Tortoise and the Geese, and The Blue Jackal. The film also pays homage to other Tamil films and actors, such as Sivaji Ganesan, MGR, Rajinikanth and Kamal Haasan himself.

          -

          Panchathanthiram is a must-watch for Tamil cinema lovers who enjoy comedy, drama and romance. The film has a stellar cast of actors who deliver memorable performances and dialogues. The film also has a catchy soundtrack composed by Deva, with songs like "Vai Raja Vai", "Kadhal Piriyamal" and "Hi Sonna Pothum". The film is available on various online platforms such as YouTube, Amazon Prime Video and Hotstar.

          - -

          Panchathanthiram is also a film that explores the themes of friendship, loyalty, trust and forgiveness. The five friends, Ram (Kamal Haasan), Mythili (Simran), Seenu (Jayaram), Raghu (Ramesh Aravind) and Krishna (Sriman) are inseparable since childhood. They support each other through thick and thin, even when they face marital problems, financial troubles and legal issues. They also have a lot of fun together, going on trips, playing pranks and cracking jokes. The film shows how their friendship is tested when Ram gets involved with a prostitute named Maggi (Ramya Krishnan) and gets accused of her murder. The film also shows how Ram tries to save his marriage with Mythili, who suspects him of infidelity.

          -

          -

          Panchathanthiram is a film that will make you laugh, cry and think. The film has a lot of twists and turns that keep you hooked till the end. The film also has a lot of emotional moments that touch your heart. The film has a message that no matter what mistakes you make in life, you can always redeem yourself with the help of your true friends and your sincere love. The film also teaches you to value your relationships and not take them for granted.

          7196e7f11a
          -
          -
          \ No newline at end of file diff --git a/spaces/neural-ti/NeTI/scripts/inference.py b/spaces/neural-ti/NeTI/scripts/inference.py deleted file mode 100644 index bbc33c869559aa10f1b63a3352dcaa91dc8ea774..0000000000000000000000000000000000000000 --- a/spaces/neural-ti/NeTI/scripts/inference.py +++ /dev/null @@ -1,170 +0,0 @@ -import sys -from dataclasses import dataclass, field -from pathlib import Path -from typing import Optional, List, Tuple, Union - -import numpy as np -import pyrallis -import torch -from PIL import Image -from diffusers import DPMSolverMultistepScheduler, StableDiffusionPipeline -from transformers import CLIPTokenizer - -sys.path.append("") -sys.path.append("../src") - -import constants -from models.neti_clip_text_encoder import NeTICLIPTextModel -from models.neti_mapper import NeTIMapper -from prompt_manager import PromptManager -from sd_pipeline_call import sd_pipeline_call -from models.xti_attention_processor import XTIAttenProc -from checkpoint_handler import CheckpointHandler -from utils import vis_utils - - -@dataclass -class InferenceConfig: - # Specifies which checkpoint iteration we want to load - iteration: Optional[int] = None - # The input directory containing the saved models and embeddings - input_dir: Optional[Path] = None - # Where the save the inference results to - inference_dir: Optional[Path] = None - # Specific path to the mapper you want to load, overrides `input_dir` - mapper_checkpoint_path: Optional[Path] = None - # Specific path to the embeddings you want to load, overrides `input_dir` - learned_embeds_path: Optional[Path] = None - # List of prompts to run inference on - prompts: Optional[List[str]] = None - # Text file containing a prompts to run inference on (one prompt per line), overrides `prompts` - prompts_file_path: Optional[Path] = None - # List of random seeds to run on - seeds: List[int] = field(default_factory=lambda: [42]) - # If you want to run with dropout at inference time, this specifies the truncation indices for applying dropout. - # None indicates that no dropout will be performed. If a list of indices is provided, will run all indices. - truncation_idxs: Optional[Union[int, List[int]]] = None - # Whether to run with torch.float16 or torch.float32 - torch_dtype: str = "fp16" - - def __post_init__(self): - assert bool(self.prompts) != bool(self.prompts_file_path), \ - "You must provide either prompts or prompts_file_path, but not both!" - self._set_prompts() - self._set_input_paths() - self.inference_dir.mkdir(exist_ok=True, parents=True) - if type(self.truncation_idxs) == int: - self.truncation_idxs = [self.truncation_idxs] - self.torch_dtype = torch.float16 if self.torch_dtype == "fp16" else torch.float32 - - def _set_input_paths(self): - if self.inference_dir is None: - assert self.input_dir is not None, "You must pass an input_dir if you do not specify inference_dir" - self.inference_dir = self.input_dir / f"inference_{self.iteration}" - if self.mapper_checkpoint_path is None: - assert self.input_dir is not None, "You must pass an input_dir if you do not specify mapper_checkpoint_path" - self.mapper_checkpoint_path = self.input_dir / f"mapper-steps-{self.iteration}.pt" - if self.learned_embeds_path is None: - assert self.input_dir is not None, "You must pass an input_dir if you do not specify learned_embeds_path" - self.learned_embeds_path = self.input_dir / f"learned_embeds-steps-{self.iteration}.bin" - - def _set_prompts(self): - if self.prompts_file_path is not None: - assert self.prompts_file_path.exists(), f"Prompts file {self.prompts_file_path} does not exist!" - self.prompts = self.prompts_file_path.read_text().splitlines() - - -@pyrallis.wrap() -def main(infer_cfg: InferenceConfig): - train_cfg, mapper = CheckpointHandler.load_mapper(infer_cfg.mapper_checkpoint_path) - pipeline, placeholder_token, placeholder_token_id = load_stable_diffusion_model( - pretrained_model_name_or_path=train_cfg.model.pretrained_model_name_or_path, - mapper=mapper, - learned_embeds_path=infer_cfg.learned_embeds_path, - torch_dtype=infer_cfg.torch_dtype - ) - prompt_manager = PromptManager(tokenizer=pipeline.tokenizer, - text_encoder=pipeline.text_encoder, - timesteps=pipeline.scheduler.timesteps, - unet_layers=constants.UNET_LAYERS, - placeholder_token=placeholder_token, - placeholder_token_id=placeholder_token_id, - torch_dtype=infer_cfg.torch_dtype) - for prompt in infer_cfg.prompts: - output_path = infer_cfg.inference_dir / prompt.format(placeholder_token) - output_path.mkdir(exist_ok=True, parents=True) - for truncation_idx in infer_cfg.truncation_idxs: - print(f"Running with truncation index: {truncation_idx}") - prompt_image = run_inference(prompt=prompt, - pipeline=pipeline, - prompt_manager=prompt_manager, - seeds=infer_cfg.seeds, - output_path=output_path, - num_images_per_prompt=1, - truncation_idx=truncation_idx) - if truncation_idx is not None: - save_name = f"{prompt.format(placeholder_token)}_truncation_{truncation_idx}.png" - else: - save_name = f"{prompt.format(placeholder_token)}.png" - prompt_image.save(infer_cfg.inference_dir / save_name) - - -def run_inference(prompt: str, - pipeline: StableDiffusionPipeline, - prompt_manager: PromptManager, - seeds: List[int], - output_path: Optional[Path] = None, - num_images_per_prompt: int = 1, - truncation_idx: Optional[int] = None) -> Image.Image: - with torch.autocast("cuda"): - with torch.no_grad(): - prompt_embeds = prompt_manager.embed_prompt(prompt, - num_images_per_prompt=num_images_per_prompt, - truncation_idx=truncation_idx) - joined_images = [] - for seed in seeds: - generator = torch.Generator(device='cuda').manual_seed(seed) - images = sd_pipeline_call(pipeline, - prompt_embeds=prompt_embeds, - generator=generator, - num_images_per_prompt=num_images_per_prompt).images - seed_image = Image.fromarray(np.concatenate(images, axis=1)).convert("RGB") - if output_path is not None: - save_name = f'{seed}_truncation_{truncation_idx}.png' if truncation_idx is not None else f'{seed}.png' - seed_image.save(output_path / save_name) - joined_images.append(seed_image) - joined_image = vis_utils.get_image_grid(joined_images) - return joined_image - - -def load_stable_diffusion_model(pretrained_model_name_or_path: str, - learned_embeds_path: Path, - mapper: Optional[NeTIMapper] = None, - num_denoising_steps: int = 50, - torch_dtype: torch.dtype = torch.float16) -> Tuple[StableDiffusionPipeline, str, int]: - tokenizer = CLIPTokenizer.from_pretrained( - pretrained_model_name_or_path, subfolder="tokenizer") - text_encoder = NeTICLIPTextModel.from_pretrained( - pretrained_model_name_or_path, subfolder="text_encoder", torch_dtype=torch_dtype, - ) - if mapper is not None: - text_encoder.text_model.embeddings.set_mapper(mapper) - placeholder_token, placeholder_token_id = CheckpointHandler.load_learned_embed_in_clip( - learned_embeds_path=learned_embeds_path, - text_encoder=text_encoder, - tokenizer=tokenizer - ) - pipeline = StableDiffusionPipeline.from_pretrained( - pretrained_model_name_or_path, - torch_dtype=torch_dtype, - text_encoder=text_encoder, - tokenizer=tokenizer - ).to("cuda") - pipeline.scheduler = DPMSolverMultistepScheduler.from_config(pipeline.scheduler.config) - pipeline.scheduler.set_timesteps(num_denoising_steps, device=pipeline.device) - pipeline.unet.set_attn_processor(XTIAttenProc()) - return pipeline, placeholder_token, placeholder_token_id - - -if __name__ == '__main__': - main() diff --git a/spaces/ngoctuanai/stable-diffusion/README.md b/spaces/ngoctuanai/stable-diffusion/README.md deleted file mode 100644 index 58ad5f9c2ce86c6b7589308e25860e986cfd45e0..0000000000000000000000000000000000000000 --- a/spaces/ngoctuanai/stable-diffusion/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Stable Diffusion -emoji: ⚡ -colorFrom: yellow -colorTo: gray -sdk: gradio -sdk_version: 3.42.0 -app_file: app.py -pinned: false -disable_embedding: true -license: mit ---- \ No newline at end of file diff --git a/spaces/nickil/weakly-supervised-parsing/README.md b/spaces/nickil/weakly-supervised-parsing/README.md deleted file mode 100644 index e0fd65c0419b1b89f3011e170ec48299f1d247a9..0000000000000000000000000000000000000000 --- a/spaces/nickil/weakly-supervised-parsing/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: Weakly Supervised Parsing -emoji: 👀 -colorFrom: purple -colorTo: green -sdk: gradio -sdk_version: 2.9.1 -app_file: app.py -pinned: false -license: mit ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces#reference diff --git a/spaces/niew/vits-uma-genshin-honka/mel_processing.py b/spaces/niew/vits-uma-genshin-honka/mel_processing.py deleted file mode 100644 index 3e252e76320522a8a4195a60665168f22769aec2..0000000000000000000000000000000000000000 --- a/spaces/niew/vits-uma-genshin-honka/mel_processing.py +++ /dev/null @@ -1,101 +0,0 @@ -import torch -import torch.utils.data -from librosa.filters import mel as librosa_mel_fn - -MAX_WAV_VALUE = 32768.0 - - -def dynamic_range_compression_torch(x, C=1, clip_val=1e-5): - """ - PARAMS - ------ - C: compression factor - """ - return torch.log(torch.clamp(x, min=clip_val) * C) - - -def dynamic_range_decompression_torch(x, C=1): - """ - PARAMS - ------ - C: compression factor used to compress - """ - return torch.exp(x) / C - - -def spectral_normalize_torch(magnitudes): - output = dynamic_range_compression_torch(magnitudes) - return output - - -def spectral_de_normalize_torch(magnitudes): - output = dynamic_range_decompression_torch(magnitudes) - return output - - -mel_basis = {} -hann_window = {} - - -def spectrogram_torch(y, n_fft, sampling_rate, hop_size, win_size, center=False): - if torch.min(y) < -1.: - print('min value is ', torch.min(y)) - if torch.max(y) > 1.: - print('max value is ', torch.max(y)) - - global hann_window - dtype_device = str(y.dtype) + '_' + str(y.device) - wnsize_dtype_device = str(win_size) + '_' + dtype_device - if wnsize_dtype_device not in hann_window: - hann_window[wnsize_dtype_device] = torch.hann_window(win_size).to(dtype=y.dtype, device=y.device) - - y = torch.nn.functional.pad(y.unsqueeze(1), (int((n_fft-hop_size)/2), int((n_fft-hop_size)/2)), mode='reflect') - y = y.squeeze(1) - - spec = torch.stft(y, n_fft, hop_length=hop_size, win_length=win_size, window=hann_window[wnsize_dtype_device], - center=center, pad_mode='reflect', normalized=False, onesided=True, return_complex=False) - - spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6) - return spec - - -def spec_to_mel_torch(spec, n_fft, num_mels, sampling_rate, fmin, fmax): - global mel_basis - dtype_device = str(spec.dtype) + '_' + str(spec.device) - fmax_dtype_device = str(fmax) + '_' + dtype_device - if fmax_dtype_device not in mel_basis: - mel = librosa_mel_fn(sampling_rate, n_fft, num_mels, fmin, fmax) - mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(dtype=spec.dtype, device=spec.device) - spec = torch.matmul(mel_basis[fmax_dtype_device], spec) - spec = spectral_normalize_torch(spec) - return spec - - -def mel_spectrogram_torch(y, n_fft, num_mels, sampling_rate, hop_size, win_size, fmin, fmax, center=False): - if torch.min(y) < -1.: - print('min value is ', torch.min(y)) - if torch.max(y) > 1.: - print('max value is ', torch.max(y)) - - global mel_basis, hann_window - dtype_device = str(y.dtype) + '_' + str(y.device) - fmax_dtype_device = str(fmax) + '_' + dtype_device - wnsize_dtype_device = str(win_size) + '_' + dtype_device - if fmax_dtype_device not in mel_basis: - mel = librosa_mel_fn(sampling_rate, n_fft, num_mels, fmin, fmax) - mel_basis[fmax_dtype_device] = torch.from_numpy(mel).to(dtype=y.dtype, device=y.device) - if wnsize_dtype_device not in hann_window: - hann_window[wnsize_dtype_device] = torch.hann_window(win_size).to(dtype=y.dtype, device=y.device) - - y = torch.nn.functional.pad(y.unsqueeze(1), (int((n_fft-hop_size)/2), int((n_fft-hop_size)/2)), mode='reflect') - y = y.squeeze(1) - - spec = torch.stft(y, n_fft, hop_length=hop_size, win_length=win_size, window=hann_window[wnsize_dtype_device], - center=center, pad_mode='reflect', normalized=False, onesided=True) - - spec = torch.sqrt(spec.pow(2).sum(-1) + 1e-6) - - spec = torch.matmul(mel_basis[fmax_dtype_device], spec) - spec = spectral_normalize_torch(spec) - - return spec diff --git a/spaces/nikitaPDL2023/assignment4/detectron2/configs/COCO-InstanceSegmentation/mask_rcnn_regnety_4gf_dds_fpn_1x.py b/spaces/nikitaPDL2023/assignment4/detectron2/configs/COCO-InstanceSegmentation/mask_rcnn_regnety_4gf_dds_fpn_1x.py deleted file mode 100644 index 72c6b7a5c8939970bd0e1e4a3c1155695943b19a..0000000000000000000000000000000000000000 --- a/spaces/nikitaPDL2023/assignment4/detectron2/configs/COCO-InstanceSegmentation/mask_rcnn_regnety_4gf_dds_fpn_1x.py +++ /dev/null @@ -1,35 +0,0 @@ -from ..common.optim import SGD as optimizer -from ..common.coco_schedule import lr_multiplier_1x as lr_multiplier -from ..common.data.coco import dataloader -from ..common.models.mask_rcnn_fpn import model -from ..common.train import train - -from detectron2.config import LazyCall as L -from detectron2.modeling.backbone import RegNet -from detectron2.modeling.backbone.regnet import SimpleStem, ResBottleneckBlock - - -# Replace default ResNet with RegNetY-4GF from the DDS paper. Config source: -# https://github.com/facebookresearch/pycls/blob/2c152a6e5d913e898cca4f0a758f41e6b976714d/configs/dds_baselines/regnety/RegNetY-4.0GF_dds_8gpu.yaml#L4-L10 # noqa -model.backbone.bottom_up = L(RegNet)( - stem_class=SimpleStem, - stem_width=32, - block_class=ResBottleneckBlock, - depth=22, - w_a=31.41, - w_0=96, - w_m=2.24, - group_width=64, - se_ratio=0.25, - freeze_at=2, - norm="FrozenBN", - out_features=["s1", "s2", "s3", "s4"], -) -model.pixel_std = [57.375, 57.120, 58.395] - -optimizer.weight_decay = 5e-5 -train.init_checkpoint = ( - "https://dl.fbaipublicfiles.com/pycls/dds_baselines/160906838/RegNetY-4.0GF_dds_8gpu.pyth" -) -# RegNets benefit from enabling cudnn benchmark mode -train.cudnn_benchmark = True diff --git a/spaces/nikitaPDL2023/assignment4/detectron2/projects/ViTDet/configs/LVIS/cascade_mask_rcnn_swin_l_in21k_50ep.py b/spaces/nikitaPDL2023/assignment4/detectron2/projects/ViTDet/configs/LVIS/cascade_mask_rcnn_swin_l_in21k_50ep.py deleted file mode 100644 index 9e22e3b28777003776774f61273c04bbb2abea1e..0000000000000000000000000000000000000000 --- a/spaces/nikitaPDL2023/assignment4/detectron2/projects/ViTDet/configs/LVIS/cascade_mask_rcnn_swin_l_in21k_50ep.py +++ /dev/null @@ -1,12 +0,0 @@ -from .cascade_mask_rcnn_swin_b_in21k_50ep import ( - dataloader, - lr_multiplier, - model, - train, - optimizer, -) - -model.backbone.bottom_up.embed_dim = 192 -model.backbone.bottom_up.num_heads = [6, 12, 24, 48] - -train.init_checkpoint = "detectron2://ImageNetPretrained/swin/swin_large_patch4_window7_224_22k.pth" diff --git a/spaces/nirali/microsoft-trocr-large-handwritten/app.py b/spaces/nirali/microsoft-trocr-large-handwritten/app.py deleted file mode 100644 index ae1c7d3135af63d7865bfe6c6a093d14382347fb..0000000000000000000000000000000000000000 --- a/spaces/nirali/microsoft-trocr-large-handwritten/app.py +++ /dev/null @@ -1,3 +0,0 @@ -import gradio as gr - -gr.Interface.load("models/microsoft/trocr-large-handwritten").launch() \ No newline at end of file diff --git a/spaces/nyh/newbing/Dockerfile b/spaces/nyh/newbing/Dockerfile deleted file mode 100644 index 3698c7cb7938e025afc53b18a571ae2961fbdffe..0000000000000000000000000000000000000000 --- a/spaces/nyh/newbing/Dockerfile +++ /dev/null @@ -1,34 +0,0 @@ -# Build Stage -# 使用 golang:alpine 作为构建阶段的基础镜像 -FROM golang:alpine AS builder - -# 添加 git,以便之后能从GitHub克隆项目 -RUN apk --no-cache add git - -# 从 GitHub 克隆 go-proxy-bingai 项目到 /workspace/app 目录下 -RUN git clone https://github.com/Harry-zklcdc/go-proxy-bingai.git /workspace/app - -# 设置工作目录为之前克隆的项目目录 -WORKDIR /workspace/app - -# 编译 go 项目。-ldflags="-s -w" 是为了减少编译后的二进制大小 -RUN go build -ldflags="-s -w" -tags netgo -trimpath -o go-proxy-bingai main.go - -# Runtime Stage -# 使用轻量级的 alpine 镜像作为运行时的基础镜像 -FROM alpine - -# 设置工作目录 -WORKDIR /workspace/app - -# 从构建阶段复制编译后的二进制文件到运行时镜像中 -COPY --from=builder /workspace/app/go-proxy-bingai . - -# 设置环境变量,此处为随机字符 -ENV Go_Proxy_BingAI_USER_TOKEN_1="kJs8hD92ncMzLaoQWYtX5rG6bE3fZ4iO" - -# 暴露8080端口 -EXPOSE 8080 - -# 容器启动时运行的命令 -CMD ["/workspace/app/go-proxy-bingai"] \ No newline at end of file diff --git a/spaces/ogawa0071/cyberagent-open-calm-small/README.md b/spaces/ogawa0071/cyberagent-open-calm-small/README.md deleted file mode 100644 index cf980c6a6fc288daea50fedf354c83090d013d3d..0000000000000000000000000000000000000000 --- a/spaces/ogawa0071/cyberagent-open-calm-small/README.md +++ /dev/null @@ -1,13 +0,0 @@ ---- -title: CyberAgent OpenCALM-Small -emoji: ✍️ -colorFrom: green -colorTo: green -sdk: gradio -sdk_version: 3.29.0 -app_file: app.py -pinned: false -license: cc-by-sa-4.0 ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/pablodawson/ldm3d-inpainting/diffuserslocal/src/diffusers/models/controlnet.py b/spaces/pablodawson/ldm3d-inpainting/diffuserslocal/src/diffusers/models/controlnet.py deleted file mode 100644 index db05b0689cff5fafc5c8d4b846dff3e1018ad15f..0000000000000000000000000000000000000000 --- a/spaces/pablodawson/ldm3d-inpainting/diffuserslocal/src/diffusers/models/controlnet.py +++ /dev/null @@ -1,837 +0,0 @@ -# Copyright 2023 The HuggingFace Team. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -from dataclasses import dataclass -from typing import Any, Dict, List, Optional, Tuple, Union - -import torch -from torch import nn -from torch.nn import functional as F - -from ..configuration_utils import ConfigMixin, register_to_config -from ..loaders import FromOriginalControlnetMixin -from ..utils import BaseOutput, logging -from .attention_processor import ( - ADDED_KV_ATTENTION_PROCESSORS, - CROSS_ATTENTION_PROCESSORS, - AttentionProcessor, - AttnAddedKVProcessor, - AttnProcessor, -) -from .embeddings import TextImageProjection, TextImageTimeEmbedding, TextTimeEmbedding, TimestepEmbedding, Timesteps -from .modeling_utils import ModelMixin -from .unet_2d_blocks import ( - CrossAttnDownBlock2D, - DownBlock2D, - UNetMidBlock2DCrossAttn, - get_down_block, -) -from .unet_2d_condition import UNet2DConditionModel - - -logger = logging.get_logger(__name__) # pylint: disable=invalid-name - - -@dataclass -class ControlNetOutput(BaseOutput): - """ - The output of [`ControlNetModel`]. - - Args: - down_block_res_samples (`tuple[torch.Tensor]`): - A tuple of downsample activations at different resolutions for each downsampling block. Each tensor should - be of shape `(batch_size, channel * resolution, height //resolution, width // resolution)`. Output can be - used to condition the original UNet's downsampling activations. - mid_down_block_re_sample (`torch.Tensor`): - The activation of the midde block (the lowest sample resolution). Each tensor should be of shape - `(batch_size, channel * lowest_resolution, height // lowest_resolution, width // lowest_resolution)`. - Output can be used to condition the original UNet's middle block activation. - """ - - down_block_res_samples: Tuple[torch.Tensor] - mid_block_res_sample: torch.Tensor - - -class ControlNetConditioningEmbedding(nn.Module): - """ - Quoting from https://arxiv.org/abs/2302.05543: "Stable Diffusion uses a pre-processing method similar to VQ-GAN - [11] to convert the entire dataset of 512 × 512 images into smaller 64 × 64 “latent images” for stabilized - training. This requires ControlNets to convert image-based conditions to 64 × 64 feature space to match the - convolution size. We use a tiny network E(·) of four convolution layers with 4 × 4 kernels and 2 × 2 strides - (activated by ReLU, channels are 16, 32, 64, 128, initialized with Gaussian weights, trained jointly with the full - model) to encode image-space conditions ... into feature maps ..." - """ - - def __init__( - self, - conditioning_embedding_channels: int, - conditioning_channels: int = 3, - block_out_channels: Tuple[int] = (16, 32, 96, 256), - ): - super().__init__() - - self.conv_in = nn.Conv2d(conditioning_channels, block_out_channels[0], kernel_size=3, padding=1) - - self.blocks = nn.ModuleList([]) - - for i in range(len(block_out_channels) - 1): - channel_in = block_out_channels[i] - channel_out = block_out_channels[i + 1] - self.blocks.append(nn.Conv2d(channel_in, channel_in, kernel_size=3, padding=1)) - self.blocks.append(nn.Conv2d(channel_in, channel_out, kernel_size=3, padding=1, stride=2)) - - self.conv_out = zero_module( - nn.Conv2d(block_out_channels[-1], conditioning_embedding_channels, kernel_size=3, padding=1) - ) - - def forward(self, conditioning): - embedding = self.conv_in(conditioning) - embedding = F.silu(embedding) - - for block in self.blocks: - embedding = block(embedding) - embedding = F.silu(embedding) - - embedding = self.conv_out(embedding) - - return embedding - - -class ControlNetModel(ModelMixin, ConfigMixin, FromOriginalControlnetMixin): - """ - A ControlNet model. - - Args: - in_channels (`int`, defaults to 4): - The number of channels in the input sample. - flip_sin_to_cos (`bool`, defaults to `True`): - Whether to flip the sin to cos in the time embedding. - freq_shift (`int`, defaults to 0): - The frequency shift to apply to the time embedding. - down_block_types (`tuple[str]`, defaults to `("CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "CrossAttnDownBlock2D", "DownBlock2D")`): - The tuple of downsample blocks to use. - only_cross_attention (`Union[bool, Tuple[bool]]`, defaults to `False`): - block_out_channels (`tuple[int]`, defaults to `(320, 640, 1280, 1280)`): - The tuple of output channels for each block. - layers_per_block (`int`, defaults to 2): - The number of layers per block. - downsample_padding (`int`, defaults to 1): - The padding to use for the downsampling convolution. - mid_block_scale_factor (`float`, defaults to 1): - The scale factor to use for the mid block. - act_fn (`str`, defaults to "silu"): - The activation function to use. - norm_num_groups (`int`, *optional*, defaults to 32): - The number of groups to use for the normalization. If None, normalization and activation layers is skipped - in post-processing. - norm_eps (`float`, defaults to 1e-5): - The epsilon to use for the normalization. - cross_attention_dim (`int`, defaults to 1280): - The dimension of the cross attention features. - transformer_layers_per_block (`int` or `Tuple[int]`, *optional*, defaults to 1): - The number of transformer blocks of type [`~models.attention.BasicTransformerBlock`]. Only relevant for - [`~models.unet_2d_blocks.CrossAttnDownBlock2D`], [`~models.unet_2d_blocks.CrossAttnUpBlock2D`], - [`~models.unet_2d_blocks.UNetMidBlock2DCrossAttn`]. - encoder_hid_dim (`int`, *optional*, defaults to None): - If `encoder_hid_dim_type` is defined, `encoder_hidden_states` will be projected from `encoder_hid_dim` - dimension to `cross_attention_dim`. - encoder_hid_dim_type (`str`, *optional*, defaults to `None`): - If given, the `encoder_hidden_states` and potentially other embeddings are down-projected to text - embeddings of dimension `cross_attention` according to `encoder_hid_dim_type`. - attention_head_dim (`Union[int, Tuple[int]]`, defaults to 8): - The dimension of the attention heads. - use_linear_projection (`bool`, defaults to `False`): - class_embed_type (`str`, *optional*, defaults to `None`): - The type of class embedding to use which is ultimately summed with the time embeddings. Choose from None, - `"timestep"`, `"identity"`, `"projection"`, or `"simple_projection"`. - addition_embed_type (`str`, *optional*, defaults to `None`): - Configures an optional embedding which will be summed with the time embeddings. Choose from `None` or - "text". "text" will use the `TextTimeEmbedding` layer. - num_class_embeds (`int`, *optional*, defaults to 0): - Input dimension of the learnable embedding matrix to be projected to `time_embed_dim`, when performing - class conditioning with `class_embed_type` equal to `None`. - upcast_attention (`bool`, defaults to `False`): - resnet_time_scale_shift (`str`, defaults to `"default"`): - Time scale shift config for ResNet blocks (see `ResnetBlock2D`). Choose from `default` or `scale_shift`. - projection_class_embeddings_input_dim (`int`, *optional*, defaults to `None`): - The dimension of the `class_labels` input when `class_embed_type="projection"`. Required when - `class_embed_type="projection"`. - controlnet_conditioning_channel_order (`str`, defaults to `"rgb"`): - The channel order of conditional image. Will convert to `rgb` if it's `bgr`. - conditioning_embedding_out_channels (`tuple[int]`, *optional*, defaults to `(16, 32, 96, 256)`): - The tuple of output channel for each block in the `conditioning_embedding` layer. - global_pool_conditions (`bool`, defaults to `False`): - """ - - _supports_gradient_checkpointing = True - - @register_to_config - def __init__( - self, - in_channels: int = 4, - conditioning_channels: int = 3, - flip_sin_to_cos: bool = True, - freq_shift: int = 0, - down_block_types: Tuple[str] = ( - "CrossAttnDownBlock2D", - "CrossAttnDownBlock2D", - "CrossAttnDownBlock2D", - "DownBlock2D", - ), - only_cross_attention: Union[bool, Tuple[bool]] = False, - block_out_channels: Tuple[int] = (320, 640, 1280, 1280), - layers_per_block: int = 2, - downsample_padding: int = 1, - mid_block_scale_factor: float = 1, - act_fn: str = "silu", - norm_num_groups: Optional[int] = 32, - norm_eps: float = 1e-5, - cross_attention_dim: int = 1280, - transformer_layers_per_block: Union[int, Tuple[int]] = 1, - encoder_hid_dim: Optional[int] = None, - encoder_hid_dim_type: Optional[str] = None, - attention_head_dim: Union[int, Tuple[int]] = 8, - num_attention_heads: Optional[Union[int, Tuple[int]]] = None, - use_linear_projection: bool = False, - class_embed_type: Optional[str] = None, - addition_embed_type: Optional[str] = None, - addition_time_embed_dim: Optional[int] = None, - num_class_embeds: Optional[int] = None, - upcast_attention: bool = False, - resnet_time_scale_shift: str = "default", - projection_class_embeddings_input_dim: Optional[int] = None, - controlnet_conditioning_channel_order: str = "rgb", - conditioning_embedding_out_channels: Optional[Tuple[int]] = (16, 32, 96, 256), - global_pool_conditions: bool = False, - addition_embed_type_num_heads=64, - ): - super().__init__() - - # If `num_attention_heads` is not defined (which is the case for most models) - # it will default to `attention_head_dim`. This looks weird upon first reading it and it is. - # The reason for this behavior is to correct for incorrectly named variables that were introduced - # when this library was created. The incorrect naming was only discovered much later in https://github.com/huggingface/diffusers/issues/2011#issuecomment-1547958131 - # Changing `attention_head_dim` to `num_attention_heads` for 40,000+ configurations is too backwards breaking - # which is why we correct for the naming here. - num_attention_heads = num_attention_heads or attention_head_dim - - # Check inputs - if len(block_out_channels) != len(down_block_types): - raise ValueError( - f"Must provide the same number of `block_out_channels` as `down_block_types`. `block_out_channels`: {block_out_channels}. `down_block_types`: {down_block_types}." - ) - - if not isinstance(only_cross_attention, bool) and len(only_cross_attention) != len(down_block_types): - raise ValueError( - f"Must provide the same number of `only_cross_attention` as `down_block_types`. `only_cross_attention`: {only_cross_attention}. `down_block_types`: {down_block_types}." - ) - - if not isinstance(num_attention_heads, int) and len(num_attention_heads) != len(down_block_types): - raise ValueError( - f"Must provide the same number of `num_attention_heads` as `down_block_types`. `num_attention_heads`: {num_attention_heads}. `down_block_types`: {down_block_types}." - ) - - if isinstance(transformer_layers_per_block, int): - transformer_layers_per_block = [transformer_layers_per_block] * len(down_block_types) - - # input - conv_in_kernel = 3 - conv_in_padding = (conv_in_kernel - 1) // 2 - self.conv_in = nn.Conv2d( - in_channels, block_out_channels[0], kernel_size=conv_in_kernel, padding=conv_in_padding - ) - - # time - time_embed_dim = block_out_channels[0] * 4 - self.time_proj = Timesteps(block_out_channels[0], flip_sin_to_cos, freq_shift) - timestep_input_dim = block_out_channels[0] - self.time_embedding = TimestepEmbedding( - timestep_input_dim, - time_embed_dim, - act_fn=act_fn, - ) - - if encoder_hid_dim_type is None and encoder_hid_dim is not None: - encoder_hid_dim_type = "text_proj" - self.register_to_config(encoder_hid_dim_type=encoder_hid_dim_type) - logger.info("encoder_hid_dim_type defaults to 'text_proj' as `encoder_hid_dim` is defined.") - - if encoder_hid_dim is None and encoder_hid_dim_type is not None: - raise ValueError( - f"`encoder_hid_dim` has to be defined when `encoder_hid_dim_type` is set to {encoder_hid_dim_type}." - ) - - if encoder_hid_dim_type == "text_proj": - self.encoder_hid_proj = nn.Linear(encoder_hid_dim, cross_attention_dim) - elif encoder_hid_dim_type == "text_image_proj": - # image_embed_dim DOESN'T have to be `cross_attention_dim`. To not clutter the __init__ too much - # they are set to `cross_attention_dim` here as this is exactly the required dimension for the currently only use - # case when `addition_embed_type == "text_image_proj"` (Kadinsky 2.1)` - self.encoder_hid_proj = TextImageProjection( - text_embed_dim=encoder_hid_dim, - image_embed_dim=cross_attention_dim, - cross_attention_dim=cross_attention_dim, - ) - - elif encoder_hid_dim_type is not None: - raise ValueError( - f"encoder_hid_dim_type: {encoder_hid_dim_type} must be None, 'text_proj' or 'text_image_proj'." - ) - else: - self.encoder_hid_proj = None - - # class embedding - if class_embed_type is None and num_class_embeds is not None: - self.class_embedding = nn.Embedding(num_class_embeds, time_embed_dim) - elif class_embed_type == "timestep": - self.class_embedding = TimestepEmbedding(timestep_input_dim, time_embed_dim) - elif class_embed_type == "identity": - self.class_embedding = nn.Identity(time_embed_dim, time_embed_dim) - elif class_embed_type == "projection": - if projection_class_embeddings_input_dim is None: - raise ValueError( - "`class_embed_type`: 'projection' requires `projection_class_embeddings_input_dim` be set" - ) - # The projection `class_embed_type` is the same as the timestep `class_embed_type` except - # 1. the `class_labels` inputs are not first converted to sinusoidal embeddings - # 2. it projects from an arbitrary input dimension. - # - # Note that `TimestepEmbedding` is quite general, being mainly linear layers and activations. - # When used for embedding actual timesteps, the timesteps are first converted to sinusoidal embeddings. - # As a result, `TimestepEmbedding` can be passed arbitrary vectors. - self.class_embedding = TimestepEmbedding(projection_class_embeddings_input_dim, time_embed_dim) - else: - self.class_embedding = None - - if addition_embed_type == "text": - if encoder_hid_dim is not None: - text_time_embedding_from_dim = encoder_hid_dim - else: - text_time_embedding_from_dim = cross_attention_dim - - self.add_embedding = TextTimeEmbedding( - text_time_embedding_from_dim, time_embed_dim, num_heads=addition_embed_type_num_heads - ) - elif addition_embed_type == "text_image": - # text_embed_dim and image_embed_dim DON'T have to be `cross_attention_dim`. To not clutter the __init__ too much - # they are set to `cross_attention_dim` here as this is exactly the required dimension for the currently only use - # case when `addition_embed_type == "text_image"` (Kadinsky 2.1)` - self.add_embedding = TextImageTimeEmbedding( - text_embed_dim=cross_attention_dim, image_embed_dim=cross_attention_dim, time_embed_dim=time_embed_dim - ) - elif addition_embed_type == "text_time": - self.add_time_proj = Timesteps(addition_time_embed_dim, flip_sin_to_cos, freq_shift) - self.add_embedding = TimestepEmbedding(projection_class_embeddings_input_dim, time_embed_dim) - - elif addition_embed_type is not None: - raise ValueError(f"addition_embed_type: {addition_embed_type} must be None, 'text' or 'text_image'.") - - # control net conditioning embedding - self.controlnet_cond_embedding = ControlNetConditioningEmbedding( - conditioning_embedding_channels=block_out_channels[0], - block_out_channels=conditioning_embedding_out_channels, - conditioning_channels=conditioning_channels, - ) - - self.down_blocks = nn.ModuleList([]) - self.controlnet_down_blocks = nn.ModuleList([]) - - if isinstance(only_cross_attention, bool): - only_cross_attention = [only_cross_attention] * len(down_block_types) - - if isinstance(attention_head_dim, int): - attention_head_dim = (attention_head_dim,) * len(down_block_types) - - if isinstance(num_attention_heads, int): - num_attention_heads = (num_attention_heads,) * len(down_block_types) - - # down - output_channel = block_out_channels[0] - - controlnet_block = nn.Conv2d(output_channel, output_channel, kernel_size=1) - controlnet_block = zero_module(controlnet_block) - self.controlnet_down_blocks.append(controlnet_block) - - for i, down_block_type in enumerate(down_block_types): - input_channel = output_channel - output_channel = block_out_channels[i] - is_final_block = i == len(block_out_channels) - 1 - - down_block = get_down_block( - down_block_type, - num_layers=layers_per_block, - transformer_layers_per_block=transformer_layers_per_block[i], - in_channels=input_channel, - out_channels=output_channel, - temb_channels=time_embed_dim, - add_downsample=not is_final_block, - resnet_eps=norm_eps, - resnet_act_fn=act_fn, - resnet_groups=norm_num_groups, - cross_attention_dim=cross_attention_dim, - num_attention_heads=num_attention_heads[i], - attention_head_dim=attention_head_dim[i] if attention_head_dim[i] is not None else output_channel, - downsample_padding=downsample_padding, - use_linear_projection=use_linear_projection, - only_cross_attention=only_cross_attention[i], - upcast_attention=upcast_attention, - resnet_time_scale_shift=resnet_time_scale_shift, - ) - self.down_blocks.append(down_block) - - for _ in range(layers_per_block): - controlnet_block = nn.Conv2d(output_channel, output_channel, kernel_size=1) - controlnet_block = zero_module(controlnet_block) - self.controlnet_down_blocks.append(controlnet_block) - - if not is_final_block: - controlnet_block = nn.Conv2d(output_channel, output_channel, kernel_size=1) - controlnet_block = zero_module(controlnet_block) - self.controlnet_down_blocks.append(controlnet_block) - - # mid - mid_block_channel = block_out_channels[-1] - - controlnet_block = nn.Conv2d(mid_block_channel, mid_block_channel, kernel_size=1) - controlnet_block = zero_module(controlnet_block) - self.controlnet_mid_block = controlnet_block - - self.mid_block = UNetMidBlock2DCrossAttn( - transformer_layers_per_block=transformer_layers_per_block[-1], - in_channels=mid_block_channel, - temb_channels=time_embed_dim, - resnet_eps=norm_eps, - resnet_act_fn=act_fn, - output_scale_factor=mid_block_scale_factor, - resnet_time_scale_shift=resnet_time_scale_shift, - cross_attention_dim=cross_attention_dim, - num_attention_heads=num_attention_heads[-1], - resnet_groups=norm_num_groups, - use_linear_projection=use_linear_projection, - upcast_attention=upcast_attention, - ) - - @classmethod - def from_unet( - cls, - unet: UNet2DConditionModel, - controlnet_conditioning_channel_order: str = "rgb", - conditioning_embedding_out_channels: Optional[Tuple[int]] = (16, 32, 96, 256), - load_weights_from_unet: bool = True, - ): - r""" - Instantiate a [`ControlNetModel`] from [`UNet2DConditionModel`]. - - Parameters: - unet (`UNet2DConditionModel`): - The UNet model weights to copy to the [`ControlNetModel`]. All configuration options are also copied - where applicable. - """ - transformer_layers_per_block = ( - unet.config.transformer_layers_per_block if "transformer_layers_per_block" in unet.config else 1 - ) - encoder_hid_dim = unet.config.encoder_hid_dim if "encoder_hid_dim" in unet.config else None - encoder_hid_dim_type = unet.config.encoder_hid_dim_type if "encoder_hid_dim_type" in unet.config else None - addition_embed_type = unet.config.addition_embed_type if "addition_embed_type" in unet.config else None - addition_time_embed_dim = ( - unet.config.addition_time_embed_dim if "addition_time_embed_dim" in unet.config else None - ) - - controlnet = cls( - encoder_hid_dim=encoder_hid_dim, - encoder_hid_dim_type=encoder_hid_dim_type, - addition_embed_type=addition_embed_type, - addition_time_embed_dim=addition_time_embed_dim, - transformer_layers_per_block=transformer_layers_per_block, - in_channels=unet.config.in_channels, - flip_sin_to_cos=unet.config.flip_sin_to_cos, - freq_shift=unet.config.freq_shift, - down_block_types=unet.config.down_block_types, - only_cross_attention=unet.config.only_cross_attention, - block_out_channels=unet.config.block_out_channels, - layers_per_block=unet.config.layers_per_block, - downsample_padding=unet.config.downsample_padding, - mid_block_scale_factor=unet.config.mid_block_scale_factor, - act_fn=unet.config.act_fn, - norm_num_groups=unet.config.norm_num_groups, - norm_eps=unet.config.norm_eps, - cross_attention_dim=unet.config.cross_attention_dim, - attention_head_dim=unet.config.attention_head_dim, - num_attention_heads=unet.config.num_attention_heads, - use_linear_projection=unet.config.use_linear_projection, - class_embed_type=unet.config.class_embed_type, - num_class_embeds=unet.config.num_class_embeds, - upcast_attention=unet.config.upcast_attention, - resnet_time_scale_shift=unet.config.resnet_time_scale_shift, - projection_class_embeddings_input_dim=unet.config.projection_class_embeddings_input_dim, - controlnet_conditioning_channel_order=controlnet_conditioning_channel_order, - conditioning_embedding_out_channels=conditioning_embedding_out_channels, - ) - - if load_weights_from_unet: - controlnet.conv_in.load_state_dict(unet.conv_in.state_dict()) - controlnet.time_proj.load_state_dict(unet.time_proj.state_dict()) - controlnet.time_embedding.load_state_dict(unet.time_embedding.state_dict()) - - if controlnet.class_embedding: - controlnet.class_embedding.load_state_dict(unet.class_embedding.state_dict()) - - controlnet.down_blocks.load_state_dict(unet.down_blocks.state_dict()) - controlnet.mid_block.load_state_dict(unet.mid_block.state_dict()) - - return controlnet - - @property - # Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.attn_processors - def attn_processors(self) -> Dict[str, AttentionProcessor]: - r""" - Returns: - `dict` of attention processors: A dictionary containing all attention processors used in the model with - indexed by its weight name. - """ - # set recursively - processors = {} - - def fn_recursive_add_processors(name: str, module: torch.nn.Module, processors: Dict[str, AttentionProcessor]): - if hasattr(module, "get_processor"): - processors[f"{name}.processor"] = module.get_processor(return_deprecated_lora=True) - - for sub_name, child in module.named_children(): - fn_recursive_add_processors(f"{name}.{sub_name}", child, processors) - - return processors - - for name, module in self.named_children(): - fn_recursive_add_processors(name, module, processors) - - return processors - - # Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.set_attn_processor - def set_attn_processor(self, processor: Union[AttentionProcessor, Dict[str, AttentionProcessor]]): - r""" - Sets the attention processor to use to compute attention. - - Parameters: - processor (`dict` of `AttentionProcessor` or only `AttentionProcessor`): - The instantiated processor class or a dictionary of processor classes that will be set as the processor - for **all** `Attention` layers. - - If `processor` is a dict, the key needs to define the path to the corresponding cross attention - processor. This is strongly recommended when setting trainable attention processors. - - """ - count = len(self.attn_processors.keys()) - - if isinstance(processor, dict) and len(processor) != count: - raise ValueError( - f"A dict of processors was passed, but the number of processors {len(processor)} does not match the" - f" number of attention layers: {count}. Please make sure to pass {count} processor classes." - ) - - def fn_recursive_attn_processor(name: str, module: torch.nn.Module, processor): - if hasattr(module, "set_processor"): - if not isinstance(processor, dict): - module.set_processor(processor) - else: - module.set_processor(processor.pop(f"{name}.processor")) - - for sub_name, child in module.named_children(): - fn_recursive_attn_processor(f"{name}.{sub_name}", child, processor) - - for name, module in self.named_children(): - fn_recursive_attn_processor(name, module, processor) - - # Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.set_default_attn_processor - def set_default_attn_processor(self): - """ - Disables custom attention processors and sets the default attention implementation. - """ - if all(proc.__class__ in ADDED_KV_ATTENTION_PROCESSORS for proc in self.attn_processors.values()): - processor = AttnAddedKVProcessor() - elif all(proc.__class__ in CROSS_ATTENTION_PROCESSORS for proc in self.attn_processors.values()): - processor = AttnProcessor() - else: - raise ValueError( - f"Cannot call `set_default_attn_processor` when attention processors are of type {next(iter(self.attn_processors.values()))}" - ) - - self.set_attn_processor(processor) - - # Copied from diffusers.models.unet_2d_condition.UNet2DConditionModel.set_attention_slice - def set_attention_slice(self, slice_size): - r""" - Enable sliced attention computation. - - When this option is enabled, the attention module splits the input tensor in slices to compute attention in - several steps. This is useful for saving some memory in exchange for a small decrease in speed. - - Args: - slice_size (`str` or `int` or `list(int)`, *optional*, defaults to `"auto"`): - When `"auto"`, input to the attention heads is halved, so attention is computed in two steps. If - `"max"`, maximum amount of memory is saved by running only one slice at a time. If a number is - provided, uses as many slices as `attention_head_dim // slice_size`. In this case, `attention_head_dim` - must be a multiple of `slice_size`. - """ - sliceable_head_dims = [] - - def fn_recursive_retrieve_sliceable_dims(module: torch.nn.Module): - if hasattr(module, "set_attention_slice"): - sliceable_head_dims.append(module.sliceable_head_dim) - - for child in module.children(): - fn_recursive_retrieve_sliceable_dims(child) - - # retrieve number of attention layers - for module in self.children(): - fn_recursive_retrieve_sliceable_dims(module) - - num_sliceable_layers = len(sliceable_head_dims) - - if slice_size == "auto": - # half the attention head size is usually a good trade-off between - # speed and memory - slice_size = [dim // 2 for dim in sliceable_head_dims] - elif slice_size == "max": - # make smallest slice possible - slice_size = num_sliceable_layers * [1] - - slice_size = num_sliceable_layers * [slice_size] if not isinstance(slice_size, list) else slice_size - - if len(slice_size) != len(sliceable_head_dims): - raise ValueError( - f"You have provided {len(slice_size)}, but {self.config} has {len(sliceable_head_dims)} different" - f" attention layers. Make sure to match `len(slice_size)` to be {len(sliceable_head_dims)}." - ) - - for i in range(len(slice_size)): - size = slice_size[i] - dim = sliceable_head_dims[i] - if size is not None and size > dim: - raise ValueError(f"size {size} has to be smaller or equal to {dim}.") - - # Recursively walk through all the children. - # Any children which exposes the set_attention_slice method - # gets the message - def fn_recursive_set_attention_slice(module: torch.nn.Module, slice_size: List[int]): - if hasattr(module, "set_attention_slice"): - module.set_attention_slice(slice_size.pop()) - - for child in module.children(): - fn_recursive_set_attention_slice(child, slice_size) - - reversed_slice_size = list(reversed(slice_size)) - for module in self.children(): - fn_recursive_set_attention_slice(module, reversed_slice_size) - - def _set_gradient_checkpointing(self, module, value=False): - if isinstance(module, (CrossAttnDownBlock2D, DownBlock2D)): - module.gradient_checkpointing = value - - def forward( - self, - sample: torch.FloatTensor, - timestep: Union[torch.Tensor, float, int], - encoder_hidden_states: torch.Tensor, - controlnet_cond: torch.FloatTensor, - conditioning_scale: float = 1.0, - class_labels: Optional[torch.Tensor] = None, - timestep_cond: Optional[torch.Tensor] = None, - attention_mask: Optional[torch.Tensor] = None, - added_cond_kwargs: Optional[Dict[str, torch.Tensor]] = None, - cross_attention_kwargs: Optional[Dict[str, Any]] = None, - guess_mode: bool = False, - return_dict: bool = True, - ) -> Union[ControlNetOutput, Tuple]: - """ - The [`ControlNetModel`] forward method. - - Args: - sample (`torch.FloatTensor`): - The noisy input tensor. - timestep (`Union[torch.Tensor, float, int]`): - The number of timesteps to denoise an input. - encoder_hidden_states (`torch.Tensor`): - The encoder hidden states. - controlnet_cond (`torch.FloatTensor`): - The conditional input tensor of shape `(batch_size, sequence_length, hidden_size)`. - conditioning_scale (`float`, defaults to `1.0`): - The scale factor for ControlNet outputs. - class_labels (`torch.Tensor`, *optional*, defaults to `None`): - Optional class labels for conditioning. Their embeddings will be summed with the timestep embeddings. - timestep_cond (`torch.Tensor`, *optional*, defaults to `None`): - attention_mask (`torch.Tensor`, *optional*, defaults to `None`): - added_cond_kwargs (`dict`): - Additional conditions for the Stable Diffusion XL UNet. - cross_attention_kwargs (`dict[str]`, *optional*, defaults to `None`): - A kwargs dictionary that if specified is passed along to the `AttnProcessor`. - guess_mode (`bool`, defaults to `False`): - In this mode, the ControlNet encoder tries its best to recognize the input content of the input even if - you remove all prompts. A `guidance_scale` between 3.0 and 5.0 is recommended. - return_dict (`bool`, defaults to `True`): - Whether or not to return a [`~models.controlnet.ControlNetOutput`] instead of a plain tuple. - - Returns: - [`~models.controlnet.ControlNetOutput`] **or** `tuple`: - If `return_dict` is `True`, a [`~models.controlnet.ControlNetOutput`] is returned, otherwise a tuple is - returned where the first element is the sample tensor. - """ - # check channel order - channel_order = self.config.controlnet_conditioning_channel_order - - if channel_order == "rgb": - # in rgb order by default - ... - elif channel_order == "bgr": - controlnet_cond = torch.flip(controlnet_cond, dims=[1]) - else: - raise ValueError(f"unknown `controlnet_conditioning_channel_order`: {channel_order}") - - # prepare attention_mask - if attention_mask is not None: - attention_mask = (1 - attention_mask.to(sample.dtype)) * -10000.0 - attention_mask = attention_mask.unsqueeze(1) - - # 1. time - timesteps = timestep - if not torch.is_tensor(timesteps): - # TODO: this requires sync between CPU and GPU. So try to pass timesteps as tensors if you can - # This would be a good case for the `match` statement (Python 3.10+) - is_mps = sample.device.type == "mps" - if isinstance(timestep, float): - dtype = torch.float32 if is_mps else torch.float64 - else: - dtype = torch.int32 if is_mps else torch.int64 - timesteps = torch.tensor([timesteps], dtype=dtype, device=sample.device) - elif len(timesteps.shape) == 0: - timesteps = timesteps[None].to(sample.device) - - # broadcast to batch dimension in a way that's compatible with ONNX/Core ML - timesteps = timesteps.expand(sample.shape[0]) - - t_emb = self.time_proj(timesteps) - - # timesteps does not contain any weights and will always return f32 tensors - # but time_embedding might actually be running in fp16. so we need to cast here. - # there might be better ways to encapsulate this. - t_emb = t_emb.to(dtype=sample.dtype) - - emb = self.time_embedding(t_emb, timestep_cond) - aug_emb = None - - if self.class_embedding is not None: - if class_labels is None: - raise ValueError("class_labels should be provided when num_class_embeds > 0") - - if self.config.class_embed_type == "timestep": - class_labels = self.time_proj(class_labels) - - class_emb = self.class_embedding(class_labels).to(dtype=self.dtype) - emb = emb + class_emb - - if self.config.addition_embed_type is not None: - if self.config.addition_embed_type == "text": - aug_emb = self.add_embedding(encoder_hidden_states) - - elif self.config.addition_embed_type == "text_time": - if "text_embeds" not in added_cond_kwargs: - raise ValueError( - f"{self.__class__} has the config param `addition_embed_type` set to 'text_time' which requires the keyword argument `text_embeds` to be passed in `added_cond_kwargs`" - ) - text_embeds = added_cond_kwargs.get("text_embeds") - if "time_ids" not in added_cond_kwargs: - raise ValueError( - f"{self.__class__} has the config param `addition_embed_type` set to 'text_time' which requires the keyword argument `time_ids` to be passed in `added_cond_kwargs`" - ) - time_ids = added_cond_kwargs.get("time_ids") - time_embeds = self.add_time_proj(time_ids.flatten()) - time_embeds = time_embeds.reshape((text_embeds.shape[0], -1)) - - add_embeds = torch.concat([text_embeds, time_embeds], dim=-1) - add_embeds = add_embeds.to(emb.dtype) - aug_emb = self.add_embedding(add_embeds) - - emb = emb + aug_emb if aug_emb is not None else emb - - # 2. pre-process - sample = self.conv_in(sample) - - controlnet_cond = self.controlnet_cond_embedding(controlnet_cond) - sample = sample + controlnet_cond - - # 3. down - down_block_res_samples = (sample,) - for downsample_block in self.down_blocks: - if hasattr(downsample_block, "has_cross_attention") and downsample_block.has_cross_attention: - sample, res_samples = downsample_block( - hidden_states=sample, - temb=emb, - encoder_hidden_states=encoder_hidden_states, - attention_mask=attention_mask, - cross_attention_kwargs=cross_attention_kwargs, - ) - else: - sample, res_samples = downsample_block(hidden_states=sample, temb=emb) - - down_block_res_samples += res_samples - - # 4. mid - if self.mid_block is not None: - sample = self.mid_block( - sample, - emb, - encoder_hidden_states=encoder_hidden_states, - attention_mask=attention_mask, - cross_attention_kwargs=cross_attention_kwargs, - ) - - # 5. Control net blocks - - controlnet_down_block_res_samples = () - - for down_block_res_sample, controlnet_block in zip(down_block_res_samples, self.controlnet_down_blocks): - down_block_res_sample = controlnet_block(down_block_res_sample) - controlnet_down_block_res_samples = controlnet_down_block_res_samples + (down_block_res_sample,) - - down_block_res_samples = controlnet_down_block_res_samples - - mid_block_res_sample = self.controlnet_mid_block(sample) - - # 6. scaling - if guess_mode and not self.config.global_pool_conditions: - scales = torch.logspace(-1, 0, len(down_block_res_samples) + 1, device=sample.device) # 0.1 to 1.0 - - scales = scales * conditioning_scale - down_block_res_samples = [sample * scale for sample, scale in zip(down_block_res_samples, scales)] - mid_block_res_sample = mid_block_res_sample * scales[-1] # last one - else: - down_block_res_samples = [sample * conditioning_scale for sample in down_block_res_samples] - mid_block_res_sample = mid_block_res_sample * conditioning_scale - - if self.config.global_pool_conditions: - down_block_res_samples = [ - torch.mean(sample, dim=(2, 3), keepdim=True) for sample in down_block_res_samples - ] - mid_block_res_sample = torch.mean(mid_block_res_sample, dim=(2, 3), keepdim=True) - - if not return_dict: - return (down_block_res_samples, mid_block_res_sample) - - return ControlNetOutput( - down_block_res_samples=down_block_res_samples, mid_block_res_sample=mid_block_res_sample - ) - - -def zero_module(module): - for p in module.parameters(): - nn.init.zeros_(p) - return module diff --git a/spaces/parkyzh/bingo/src/components/ui/tooltip.tsx b/spaces/parkyzh/bingo/src/components/ui/tooltip.tsx deleted file mode 100644 index af1d48beb90dd5ae311796539843700871052cae..0000000000000000000000000000000000000000 --- a/spaces/parkyzh/bingo/src/components/ui/tooltip.tsx +++ /dev/null @@ -1,30 +0,0 @@ -'use client' - -import * as React from 'react' -import * as TooltipPrimitive from '@radix-ui/react-tooltip' - -import { cn } from '@/lib/utils' - -const TooltipProvider = TooltipPrimitive.Provider - -const Tooltip = TooltipPrimitive.Root - -const TooltipTrigger = TooltipPrimitive.Trigger - -const TooltipContent = React.forwardRef< - React.ElementRef, - React.ComponentPropsWithoutRef ->(({ className, sideOffset = 4, ...props }, ref) => ( - -)) -TooltipContent.displayName = TooltipPrimitive.Content.displayName - -export { Tooltip, TooltipTrigger, TooltipContent, TooltipProvider } diff --git a/spaces/pierrefdz/semantle/README.md b/spaces/pierrefdz/semantle/README.md deleted file mode 100644 index da0c6785adb03b0230788a5260ac7844aceb3a6d..0000000000000000000000000000000000000000 --- a/spaces/pierrefdz/semantle/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Semantle -emoji: 📜 -colorFrom: indigo -colorTo: red -sdk: gradio -app_file: app.py -pinned: false ---- - -Helper for the game Cémantix (https://cemantix.herokuapp.com/). Gives closest or furthest words to a given word. -Uses [fasttext](https://fasttext.cc/) embeddings. diff --git a/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/rich/status.py b/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/rich/status.py deleted file mode 100644 index 09eff405ec194ee2884f203cb48c5df54ff0b9c7..0000000000000000000000000000000000000000 --- a/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/pip/_vendor/rich/status.py +++ /dev/null @@ -1,132 +0,0 @@ -from types import TracebackType -from typing import Optional, Type - -from .console import Console, RenderableType -from .jupyter import JupyterMixin -from .live import Live -from .spinner import Spinner -from .style import StyleType - - -class Status(JupyterMixin): - """Displays a status indicator with a 'spinner' animation. - - Args: - status (RenderableType): A status renderable (str or Text typically). - console (Console, optional): Console instance to use, or None for global console. Defaults to None. - spinner (str, optional): Name of spinner animation (see python -m rich.spinner). Defaults to "dots". - spinner_style (StyleType, optional): Style of spinner. Defaults to "status.spinner". - speed (float, optional): Speed factor for spinner animation. Defaults to 1.0. - refresh_per_second (float, optional): Number of refreshes per second. Defaults to 12.5. - """ - - def __init__( - self, - status: RenderableType, - *, - console: Optional[Console] = None, - spinner: str = "dots", - spinner_style: StyleType = "status.spinner", - speed: float = 1.0, - refresh_per_second: float = 12.5, - ): - self.status = status - self.spinner_style = spinner_style - self.speed = speed - self._spinner = Spinner(spinner, text=status, style=spinner_style, speed=speed) - self._live = Live( - self.renderable, - console=console, - refresh_per_second=refresh_per_second, - transient=True, - ) - - @property - def renderable(self) -> Spinner: - return self._spinner - - @property - def console(self) -> "Console": - """Get the Console used by the Status objects.""" - return self._live.console - - def update( - self, - status: Optional[RenderableType] = None, - *, - spinner: Optional[str] = None, - spinner_style: Optional[StyleType] = None, - speed: Optional[float] = None, - ) -> None: - """Update status. - - Args: - status (Optional[RenderableType], optional): New status renderable or None for no change. Defaults to None. - spinner (Optional[str], optional): New spinner or None for no change. Defaults to None. - spinner_style (Optional[StyleType], optional): New spinner style or None for no change. Defaults to None. - speed (Optional[float], optional): Speed factor for spinner animation or None for no change. Defaults to None. - """ - if status is not None: - self.status = status - if spinner_style is not None: - self.spinner_style = spinner_style - if speed is not None: - self.speed = speed - if spinner is not None: - self._spinner = Spinner( - spinner, text=self.status, style=self.spinner_style, speed=self.speed - ) - self._live.update(self.renderable, refresh=True) - else: - self._spinner.update( - text=self.status, style=self.spinner_style, speed=self.speed - ) - - def start(self) -> None: - """Start the status animation.""" - self._live.start() - - def stop(self) -> None: - """Stop the spinner animation.""" - self._live.stop() - - def __rich__(self) -> RenderableType: - return self.renderable - - def __enter__(self) -> "Status": - self.start() - return self - - def __exit__( - self, - exc_type: Optional[Type[BaseException]], - exc_val: Optional[BaseException], - exc_tb: Optional[TracebackType], - ) -> None: - self.stop() - - -if __name__ == "__main__": # pragma: no cover - - from time import sleep - - from .console import Console - - console = Console() - with console.status("[magenta]Covid detector booting up") as status: - sleep(3) - console.log("Importing advanced AI") - sleep(3) - console.log("Advanced Covid AI Ready") - sleep(3) - status.update(status="[bold blue] Scanning for Covid", spinner="earth") - sleep(3) - console.log("Found 10,000,000,000 copies of Covid32.exe") - sleep(3) - status.update( - status="[bold red]Moving Covid32.exe to Trash", - spinner="bouncingBall", - spinner_style="yellow", - ) - sleep(5) - console.print("[bold green]Covid deleted successfully") diff --git a/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/pkg_resources/_vendor/zipp.py b/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/pkg_resources/_vendor/zipp.py deleted file mode 100644 index 26b723c1fd3e25740e0268b8c9b50905c58c3d4a..0000000000000000000000000000000000000000 --- a/spaces/pknez/face-swap-docker/mynewshinyroop/Lib/site-packages/pkg_resources/_vendor/zipp.py +++ /dev/null @@ -1,329 +0,0 @@ -import io -import posixpath -import zipfile -import itertools -import contextlib -import sys -import pathlib - -if sys.version_info < (3, 7): - from collections import OrderedDict -else: - OrderedDict = dict - - -__all__ = ['Path'] - - -def _parents(path): - """ - Given a path with elements separated by - posixpath.sep, generate all parents of that path. - - >>> list(_parents('b/d')) - ['b'] - >>> list(_parents('/b/d/')) - ['/b'] - >>> list(_parents('b/d/f/')) - ['b/d', 'b'] - >>> list(_parents('b')) - [] - >>> list(_parents('')) - [] - """ - return itertools.islice(_ancestry(path), 1, None) - - -def _ancestry(path): - """ - Given a path with elements separated by - posixpath.sep, generate all elements of that path - - >>> list(_ancestry('b/d')) - ['b/d', 'b'] - >>> list(_ancestry('/b/d/')) - ['/b/d', '/b'] - >>> list(_ancestry('b/d/f/')) - ['b/d/f', 'b/d', 'b'] - >>> list(_ancestry('b')) - ['b'] - >>> list(_ancestry('')) - [] - """ - path = path.rstrip(posixpath.sep) - while path and path != posixpath.sep: - yield path - path, tail = posixpath.split(path) - - -_dedupe = OrderedDict.fromkeys -"""Deduplicate an iterable in original order""" - - -def _difference(minuend, subtrahend): - """ - Return items in minuend not in subtrahend, retaining order - with O(1) lookup. - """ - return itertools.filterfalse(set(subtrahend).__contains__, minuend) - - -class CompleteDirs(zipfile.ZipFile): - """ - A ZipFile subclass that ensures that implied directories - are always included in the namelist. - """ - - @staticmethod - def _implied_dirs(names): - parents = itertools.chain.from_iterable(map(_parents, names)) - as_dirs = (p + posixpath.sep for p in parents) - return _dedupe(_difference(as_dirs, names)) - - def namelist(self): - names = super(CompleteDirs, self).namelist() - return names + list(self._implied_dirs(names)) - - def _name_set(self): - return set(self.namelist()) - - def resolve_dir(self, name): - """ - If the name represents a directory, return that name - as a directory (with the trailing slash). - """ - names = self._name_set() - dirname = name + '/' - dir_match = name not in names and dirname in names - return dirname if dir_match else name - - @classmethod - def make(cls, source): - """ - Given a source (filename or zipfile), return an - appropriate CompleteDirs subclass. - """ - if isinstance(source, CompleteDirs): - return source - - if not isinstance(source, zipfile.ZipFile): - return cls(_pathlib_compat(source)) - - # Only allow for FastLookup when supplied zipfile is read-only - if 'r' not in source.mode: - cls = CompleteDirs - - source.__class__ = cls - return source - - -class FastLookup(CompleteDirs): - """ - ZipFile subclass to ensure implicit - dirs exist and are resolved rapidly. - """ - - def namelist(self): - with contextlib.suppress(AttributeError): - return self.__names - self.__names = super(FastLookup, self).namelist() - return self.__names - - def _name_set(self): - with contextlib.suppress(AttributeError): - return self.__lookup - self.__lookup = super(FastLookup, self)._name_set() - return self.__lookup - - -def _pathlib_compat(path): - """ - For path-like objects, convert to a filename for compatibility - on Python 3.6.1 and earlier. - """ - try: - return path.__fspath__() - except AttributeError: - return str(path) - - -class Path: - """ - A pathlib-compatible interface for zip files. - - Consider a zip file with this structure:: - - . - ├── a.txt - └── b - ├── c.txt - └── d - └── e.txt - - >>> data = io.BytesIO() - >>> zf = zipfile.ZipFile(data, 'w') - >>> zf.writestr('a.txt', 'content of a') - >>> zf.writestr('b/c.txt', 'content of c') - >>> zf.writestr('b/d/e.txt', 'content of e') - >>> zf.filename = 'mem/abcde.zip' - - Path accepts the zipfile object itself or a filename - - >>> root = Path(zf) - - From there, several path operations are available. - - Directory iteration (including the zip file itself): - - >>> a, b = root.iterdir() - >>> a - Path('mem/abcde.zip', 'a.txt') - >>> b - Path('mem/abcde.zip', 'b/') - - name property: - - >>> b.name - 'b' - - join with divide operator: - - >>> c = b / 'c.txt' - >>> c - Path('mem/abcde.zip', 'b/c.txt') - >>> c.name - 'c.txt' - - Read text: - - >>> c.read_text() - 'content of c' - - existence: - - >>> c.exists() - True - >>> (b / 'missing.txt').exists() - False - - Coercion to string: - - >>> import os - >>> str(c).replace(os.sep, posixpath.sep) - 'mem/abcde.zip/b/c.txt' - - At the root, ``name``, ``filename``, and ``parent`` - resolve to the zipfile. Note these attributes are not - valid and will raise a ``ValueError`` if the zipfile - has no filename. - - >>> root.name - 'abcde.zip' - >>> str(root.filename).replace(os.sep, posixpath.sep) - 'mem/abcde.zip' - >>> str(root.parent) - 'mem' - """ - - __repr = "{self.__class__.__name__}({self.root.filename!r}, {self.at!r})" - - def __init__(self, root, at=""): - """ - Construct a Path from a ZipFile or filename. - - Note: When the source is an existing ZipFile object, - its type (__class__) will be mutated to a - specialized type. If the caller wishes to retain the - original type, the caller should either create a - separate ZipFile object or pass a filename. - """ - self.root = FastLookup.make(root) - self.at = at - - def open(self, mode='r', *args, pwd=None, **kwargs): - """ - Open this entry as text or binary following the semantics - of ``pathlib.Path.open()`` by passing arguments through - to io.TextIOWrapper(). - """ - if self.is_dir(): - raise IsADirectoryError(self) - zip_mode = mode[0] - if not self.exists() and zip_mode == 'r': - raise FileNotFoundError(self) - stream = self.root.open(self.at, zip_mode, pwd=pwd) - if 'b' in mode: - if args or kwargs: - raise ValueError("encoding args invalid for binary operation") - return stream - return io.TextIOWrapper(stream, *args, **kwargs) - - @property - def name(self): - return pathlib.Path(self.at).name or self.filename.name - - @property - def suffix(self): - return pathlib.Path(self.at).suffix or self.filename.suffix - - @property - def suffixes(self): - return pathlib.Path(self.at).suffixes or self.filename.suffixes - - @property - def stem(self): - return pathlib.Path(self.at).stem or self.filename.stem - - @property - def filename(self): - return pathlib.Path(self.root.filename).joinpath(self.at) - - def read_text(self, *args, **kwargs): - with self.open('r', *args, **kwargs) as strm: - return strm.read() - - def read_bytes(self): - with self.open('rb') as strm: - return strm.read() - - def _is_child(self, path): - return posixpath.dirname(path.at.rstrip("/")) == self.at.rstrip("/") - - def _next(self, at): - return self.__class__(self.root, at) - - def is_dir(self): - return not self.at or self.at.endswith("/") - - def is_file(self): - return self.exists() and not self.is_dir() - - def exists(self): - return self.at in self.root._name_set() - - def iterdir(self): - if not self.is_dir(): - raise ValueError("Can't listdir a file") - subs = map(self._next, self.root.namelist()) - return filter(self._is_child, subs) - - def __str__(self): - return posixpath.join(self.root.filename, self.at) - - def __repr__(self): - return self.__repr.format(self=self) - - def joinpath(self, *other): - next = posixpath.join(self.at, *map(_pathlib_compat, other)) - return self._next(self.root.resolve_dir(next)) - - __truediv__ = joinpath - - @property - def parent(self): - if not self.at: - return self.filename.parent - parent_at = posixpath.dirname(self.at.rstrip('/')) - if parent_at: - parent_at += '/' - return self._next(parent_at) diff --git a/spaces/prerna9811/Chord/portaudio/qa/loopback/src/test_audio_analyzer.c b/spaces/prerna9811/Chord/portaudio/qa/loopback/src/test_audio_analyzer.c deleted file mode 100644 index 82fa859f5ea2c3de89b1de3108be584b948d19c8..0000000000000000000000000000000000000000 --- a/spaces/prerna9811/Chord/portaudio/qa/loopback/src/test_audio_analyzer.c +++ /dev/null @@ -1,718 +0,0 @@ - -/* - * PortAudio Portable Real-Time Audio Library - * Latest Version at: http://www.portaudio.com - * - * Copyright (c) 1999-2010 Phil Burk and Ross Bencina - * - * Permission is hereby granted, free of charge, to any person obtaining - * a copy of this software and associated documentation files - * (the "Software"), to deal in the Software without restriction, - * including without limitation the rights to use, copy, modify, merge, - * publish, distribute, sublicense, and/or sell copies of the Software, - * and to permit persons to whom the Software is furnished to do so, - * subject to the following conditions: - * - * The above copyright notice and this permission notice shall be - * included in all copies or substantial portions of the Software. - * - * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, - * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF - * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. - * IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR - * ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF - * CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION - * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. - */ - -/* - * The text above constitutes the entire PortAudio license; however, - * the PortAudio community also makes the following non-binding requests: - * - * Any person wishing to distribute modifications to the Software is - * requested to send the modifications to the original developer so that - * they can be incorporated into the canonical version. It is also - * requested that these non-binding requests be included along with the - * license above. - */ - -#include -#include -#include -#include "qa_tools.h" -#include "audio_analyzer.h" -#include "test_audio_analyzer.h" -#include "write_wav.h" -#include "biquad_filter.h" - -#define FRAMES_PER_BLOCK (64) -#define PRINT_REPORTS 0 - -#define TEST_SAVED_WAVE (0) - -/*==========================================================================================*/ -/** - * Detect a single tone. - */ -static int TestSingleMonoTone( void ) -{ - int result = 0; - PaQaSineGenerator generator; - PaQaRecording recording; - float buffer[FRAMES_PER_BLOCK]; - double sampleRate = 44100.0; - int maxFrames = ((int)sampleRate) * 1; - int samplesPerFrame = 1; - int stride = 1; - int done = 0; - - double freq = 234.5; - double amp = 0.5; - - double mag1, mag2; - - // Setup a sine oscillator. - PaQa_SetupSineGenerator( &generator, freq, amp, sampleRate ); - - result = PaQa_InitializeRecording( &recording, maxFrames, (int) sampleRate ); - QA_ASSERT_EQUALS( "PaQa_InitializeRecording failed", 0, result ); - - done = 0; - while (!done) - { - PaQa_EraseBuffer( buffer, FRAMES_PER_BLOCK, samplesPerFrame ); - PaQa_MixSine( &generator, buffer, FRAMES_PER_BLOCK, stride ); - done = PaQa_WriteRecording( &recording, buffer, FRAMES_PER_BLOCK, samplesPerFrame ); - } - - mag1 = PaQa_CorrelateSine( &recording, freq, sampleRate, 0, recording.numFrames, NULL ); - QA_ASSERT_CLOSE( "exact frequency match", amp, mag1, 0.01 ); - - mag2 = PaQa_CorrelateSine( &recording, freq * 1.23, sampleRate, 0, recording.numFrames, NULL ); - QA_ASSERT_CLOSE( "wrong frequency", 0.0, mag2, 0.01 ); - - PaQa_TerminateRecording( &recording ); - return 0; - -error: - PaQa_TerminateRecording( &recording); - return 1; - -} - -/*==========================================================================================*/ -/** - * Mix multiple tones and then detect them. - */ - -static int TestMixedMonoTones( void ) -{ - int i; - int result = 0; -#define NUM_TONES (5) - PaQaSineGenerator generators[NUM_TONES]; - PaQaRecording recording; - float buffer[FRAMES_PER_BLOCK]; - double sampleRate = 44100.0; - int maxFrames = ((int)sampleRate) * 1; - int samplesPerFrame = 1; - - double baseFreq = 234.5; - double amp = 0.1; - - double mag2; - - int stride = samplesPerFrame; - int done = 0; - - // Setup a sine oscillator. - for( i=0; istartDelay; - - int stride = 1; - // Record some initial silence. - int done = PaQa_WriteSilence( recording, testTone->startDelay ); - - // Setup a sine oscillator. - PaQa_SetupSineGenerator( &generator, testTone->frequency, testTone->amplitude, testTone->sampleRate ); - - while (!done) - { - int framesThisLoop = BUFFER_SIZE; - - if( frameCounter == glitchPosition ) - { - if( framesToAdd > 0 ) - { - // Record some frozen data without advancing the sine generator. - done = PaQa_RecordFreeze( recording, framesToAdd ); - frameCounter += framesToAdd; - } - else if( framesToAdd < 0 ) - { - // Advance sine generator a few frames. - PaQa_MixSine( &generator, buffer, 0 - framesToAdd, stride ); - } - - } - else if( (frameCounter < glitchPosition) && ((frameCounter + framesThisLoop) > glitchPosition) ) - { - // Go right up to the glitchPosition. - framesThisLoop = glitchPosition - frameCounter; - } - - if( framesThisLoop > 0 ) - { - PaQa_EraseBuffer( buffer, framesThisLoop, testTone->samplesPerFrame ); - PaQa_MixSine( &generator, buffer, framesThisLoop, stride ); - done = PaQa_WriteRecording( recording, buffer, framesThisLoop, testTone->samplesPerFrame ); - } - frameCounter += framesThisLoop; - } -} - - -/*==========================================================================================*/ -/** - * Generate a clean recording. - */ - -static void MakeCleanRecording( PaQaRecording *recording, PaQaTestTone *testTone ) -{ - PaQaSineGenerator generator; -#define BUFFER_SIZE 512 - float buffer[BUFFER_SIZE]; - - int stride = 1; - // Record some initial silence. - int done = PaQa_WriteSilence( recording, testTone->startDelay ); - - // Setup a sine oscillator. - PaQa_SetupSineGenerator( &generator, testTone->frequency, testTone->amplitude, testTone->sampleRate ); - - // Generate recording with good phase. - while (!done) - { - PaQa_EraseBuffer( buffer, BUFFER_SIZE, testTone->samplesPerFrame ); - PaQa_MixSine( &generator, buffer, BUFFER_SIZE, stride ); - done = PaQa_WriteRecording( recording, buffer, BUFFER_SIZE, testTone->samplesPerFrame ); - } -} - -/*==========================================================================================*/ -/** - * Generate a recording with pop. - */ - -static void MakeRecordingWithPop( PaQaRecording *recording, PaQaTestTone *testTone, int popPosition, int popWidth, double popAmplitude ) -{ - int i; - - MakeCleanRecording( recording, testTone ); - - // Apply glitch to good recording. - if( (popPosition + popWidth) >= recording->numFrames ) - { - popWidth = (recording->numFrames - popPosition) - 1; - } - - for( i=0; ibuffer[i+popPosition]; - float bad = (good > 0.0) ? (good - popAmplitude) : (good + popAmplitude); - recording->buffer[i+popPosition] = bad; - } -} - -/*==========================================================================================*/ -/** - * Detect one phase error in a recording. - */ -static int TestDetectSinglePhaseError( double sampleRate, int cycleSize, int latencyFrames, int glitchPosition, int framesAdded ) -{ - int result = 0; - PaQaRecording recording; - PaQaTestTone testTone; - PaQaAnalysisResult analysisResult = { 0.0 }; - int framesDropped = 0; - int maxFrames = ((int)sampleRate) * 2; - - testTone.samplesPerFrame = 1; - testTone.sampleRate = sampleRate; - testTone.frequency = sampleRate / cycleSize; - testTone.amplitude = 0.5; - testTone.startDelay = latencyFrames; - - result = PaQa_InitializeRecording( &recording, maxFrames, (int) sampleRate ); - QA_ASSERT_EQUALS( "PaQa_InitializeRecording failed", 0, result ); - - MakeRecordingWithAddedFrames( &recording, &testTone, glitchPosition, framesAdded ); - - PaQa_AnalyseRecording( &recording, &testTone, &analysisResult ); - - if( framesAdded < 0 ) - { - framesDropped = -framesAdded; - framesAdded = 0; - } - -#if PRINT_REPORTS - printf("\n=== Dropped Frame Analysis ===================\n"); - printf(" expected actual\n"); - printf(" latency: %10.3f %10.3f\n", (double)latencyFrames, analysisResult.latency ); - printf(" num added frames: %10.3f %10.3f\n", (double)framesAdded, analysisResult.numAddedFrames ); - printf(" added frames at: %10.3f %10.3f\n", (double)glitchPosition, analysisResult.addedFramesPosition ); - printf(" num dropped frames: %10.3f %10.3f\n", (double)framesDropped, analysisResult.numDroppedFrames ); - printf(" dropped frames at: %10.3f %10.3f\n", (double)glitchPosition, analysisResult.droppedFramesPosition ); -#endif - - QA_ASSERT_CLOSE( "PaQa_AnalyseRecording latency", latencyFrames, analysisResult.latency, 0.5 ); - QA_ASSERT_CLOSE( "PaQa_AnalyseRecording framesAdded", framesAdded, analysisResult.numAddedFrames, 1.0 ); - QA_ASSERT_CLOSE( "PaQa_AnalyseRecording framesDropped", framesDropped, analysisResult.numDroppedFrames, 1.0 ); -// QA_ASSERT_CLOSE( "PaQa_AnalyseRecording glitchPosition", glitchPosition, analysisResult.glitchPosition, cycleSize ); - - PaQa_TerminateRecording( &recording ); - return 0; - -error: - PaQa_TerminateRecording( &recording); - return 1; -} - -/*==========================================================================================*/ -/** - * Test various dropped sample scenarios. - */ -static int TestDetectPhaseErrors( void ) -{ - int result; - - result = TestDetectSinglePhaseError( 44100, 200, 477, -1, 0 ); - if( result < 0 ) return result; -/* - result = TestDetectSinglePhaseError( 44100, 200, 77, -1, 0 ); - if( result < 0 ) return result; - - result = TestDetectSinglePhaseError( 44100, 200, 83, 3712, 9 ); - if( result < 0 ) return result; - - result = TestDetectSinglePhaseError( 44100, 280, 83, 3712, 27 ); - if( result < 0 ) return result; - - result = TestDetectSinglePhaseError( 44100, 200, 234, 3712, -9 ); - if( result < 0 ) return result; - - result = TestDetectSinglePhaseError( 44100, 200, 2091, 8923, -2 ); - if( result < 0 ) return result; - - result = TestDetectSinglePhaseError( 44100, 120, 1782, 5772, -18 ); - if( result < 0 ) return result; - - // Note that if the frequency is too high then it is hard to detect single dropped frames. - result = TestDetectSinglePhaseError( 44100, 200, 500, 4251, -1 ); - if( result < 0 ) return result; -*/ - return 0; -} - -/*==========================================================================================*/ -/** - * Detect one pop in a recording. - */ -static int TestDetectSinglePop( double sampleRate, int cycleSize, int latencyFrames, int popPosition, int popWidth, double popAmplitude ) -{ - int result = 0; - PaQaRecording recording; - PaQaTestTone testTone; - PaQaAnalysisResult analysisResult = { 0.0 }; - int maxFrames = ((int)sampleRate) * 2; - - testTone.samplesPerFrame = 1; - testTone.sampleRate = sampleRate; - testTone.frequency = sampleRate / cycleSize; - testTone.amplitude = 0.5; - testTone.startDelay = latencyFrames; - - result = PaQa_InitializeRecording( &recording, maxFrames, (int) sampleRate ); - QA_ASSERT_EQUALS( "PaQa_InitializeRecording failed", 0, result ); - - MakeRecordingWithPop( &recording, &testTone, popPosition, popWidth, popAmplitude ); - - PaQa_AnalyseRecording( &recording, &testTone, &analysisResult ); - -#if PRINT_REPORTS - printf("\n=== Pop Analysis ===================\n"); - printf(" expected actual\n"); - printf(" latency: %10.3f %10.3f\n", (double)latencyFrames, analysisResult.latency ); - printf(" popPosition: %10.3f %10.3f\n", (double)popPosition, analysisResult.popPosition ); - printf(" popAmplitude: %10.3f %10.3f\n", popAmplitude, analysisResult.popAmplitude ); - printf(" cycleSize: %6d\n", cycleSize ); - printf(" num added frames: %10.3f\n", analysisResult.numAddedFrames ); - printf(" added frames at: %10.3f\n", analysisResult.addedFramesPosition ); - printf(" num dropped frames: %10.3f\n", analysisResult.numDroppedFrames ); - printf(" dropped frames at: %10.3f\n", analysisResult.droppedFramesPosition ); -#endif - - QA_ASSERT_CLOSE( "PaQa_AnalyseRecording latency", latencyFrames, analysisResult.latency, 0.5 ); - QA_ASSERT_CLOSE( "PaQa_AnalyseRecording popPosition", popPosition, analysisResult.popPosition, 10 ); - if( popWidth > 0 ) - { - QA_ASSERT_CLOSE( "PaQa_AnalyseRecording popAmplitude", popAmplitude, analysisResult.popAmplitude, 0.1 * popAmplitude ); - } - - PaQa_TerminateRecording( &recording ); - return 0; - -error: - PaQa_SaveRecordingToWaveFile( &recording, "bad_recording.wav" ); - PaQa_TerminateRecording( &recording); - return 1; -} - -/*==========================================================================================*/ -/** - * Analyse recording with a DC offset. - */ -static int TestSingleInitialSpike( double sampleRate, int stepPosition, int cycleSize, int latencyFrames, double stepAmplitude ) -{ - int i; - int result = 0; - // Account for highpass filter offset. - int expectedLatency = latencyFrames + 1; - PaQaRecording recording; - - PaQaRecording hipassOutput = { 0 }; - BiquadFilter hipassFilter; - - PaQaTestTone testTone; - PaQaAnalysisResult analysisResult = { 0.0 }; - int maxFrames = ((int)sampleRate) * 2; - - testTone.samplesPerFrame = 1; - testTone.sampleRate = sampleRate; - testTone.frequency = sampleRate / cycleSize; - testTone.amplitude = -0.5; - testTone.startDelay = latencyFrames; - - result = PaQa_InitializeRecording( &recording, maxFrames, (int) sampleRate ); - QA_ASSERT_EQUALS( "PaQa_InitializeRecording failed", 0, result ); - - result = PaQa_InitializeRecording( &hipassOutput, maxFrames, (int) sampleRate ); - QA_ASSERT_EQUALS( "PaQa_InitializeRecording failed", 0, result ); - - MakeCleanRecording( &recording, &testTone ); - - // Apply DC step. - for( i=stepPosition; i
      -

      0a6ba089eb
      -
      -
      \ No newline at end of file diff --git a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Backuptrans Whatsapp Crack For Android [EXCLUSIVE].md b/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Backuptrans Whatsapp Crack For Android [EXCLUSIVE].md deleted file mode 100644 index bb110d9981e761793c652e62ac176cdcc38b21c7..0000000000000000000000000000000000000000 --- a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Backuptrans Whatsapp Crack For Android [EXCLUSIVE].md +++ /dev/null @@ -1,12 +0,0 @@ -

      backuptrans whatsapp crack for android


      Download Zip ✦✦✦ https://urlgoal.com/2uCKrD



      - -Backuptrans Android WhatsApp Transfer Transfer WhatsApp chat history from Android to computer or other Android with ease. 13.54 MB Backuptrans Android. How to sync contacts on android with google. -How to transfer old contacts to a new Android phone or tablet. -2 methods:Back up your contacts on your computerBack up your contacts on. -Transfer data from Android to Android and vice versa -How to transfer contacts from Android to Android: 3 ways -How to transfer contacts to Android: via mail, from a device or using a PC. -Transfer contacts from Android to Android - step by step instructions 8a78ff9644
      -
      -
      -

      diff --git a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Blazevideo Hdtv Player 66 Pro Serial Number.md b/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Blazevideo Hdtv Player 66 Pro Serial Number.md deleted file mode 100644 index 8098c61d538756dc8d2fc2d37bd9f42e363fc9fb..0000000000000000000000000000000000000000 --- a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Blazevideo Hdtv Player 66 Pro Serial Number.md +++ /dev/null @@ -1,6 +0,0 @@ -

      Blazevideo Hdtv Player 66 Pro Serial Number


      Download Ziphttps://urlgoal.com/2uCJbg



      -
      - 4d29de3e1b
      -
      -
      -

      diff --git a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Businessmathfranksbudnick4theditionsolutionmanual.md b/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Businessmathfranksbudnick4theditionsolutionmanual.md deleted file mode 100644 index ffb0da5dd1828ae932b51a05b4ae1974ebf0b5aa..0000000000000000000000000000000000000000 --- a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Businessmathfranksbudnick4theditionsolutionmanual.md +++ /dev/null @@ -1,51 +0,0 @@ - -

      How to Find the Solution Manual for Business Math by Frank S. Budnick (4th Edition)

      - -

      If you are looking for the solution manual for Business Math by Frank S. Budnick (4th Edition), you might have a hard time finding it online. This book is a popular textbook for students who want to learn the mathematical concepts and applications that are relevant for business, economics, and the social sciences. However, the solution manual is not widely available for free download or purchase.

      -

      businessmathfranksbudnick4theditionsolutionmanual


      DOWNLOADhttps://urlgoal.com/2uCMrh



      - -

      So, how can you get access to the solution manual for this book? Here are some possible ways:

      - -
        -
      • Ask your instructor or classmates. They might have a copy of the solution manual that they are willing to share with you. This is the easiest and most ethical way to get the answers you need.
      • -
      • Search for it on online platforms. There are some websites that offer solution manuals for various textbooks, either for free or for a fee. However, you should be careful about the quality and legality of these sources. Some of them might be inaccurate, incomplete, or infringing on the author's copyright.
      • -
      • Use a search engine optimization (SEO) tool. SEO tools are software applications that help you optimize your website or content for search engines. They can also help you find relevant keywords, phrases, and links that are related to your topic. For example, you can use an SEO tool like Ahrefs[^6^] to find out what people are searching for when they look for the solution manual for Business Math by Frank S. Budnick (4th Edition). You can then use those keywords and phrases to refine your search query and find more relevant results.
      • -
      • Write your own solutions. If you can't find the solution manual anywhere, you might have to solve the problems yourself. This might be challenging, but it will also help you improve your understanding and skills in business math. You can use online resources like Khan Academy or Wolfram Alpha to learn more about the topics and concepts covered in the book.
      • -
      - -

      Hopefully, this article has given you some useful tips on how to find the solution manual for Business Math by Frank S. Budnick (4th Edition). Remember to always cite your sources and respect the author's rights when using any online material.

      - -

      What is Business Math and Why is it Important?

      - -

      Business math is a branch of mathematics that deals with the practical aspects of business, such as accounting, finance, marketing, production, and operations. It involves applying mathematical methods and techniques to solve problems and make decisions in various business situations.

      -

      - -

      Business math is important because it helps business professionals and managers to perform their tasks more efficiently and effectively. It also helps them to understand and analyze data, trends, and patterns that are relevant for their business goals and strategies. Business math can help them to:

      - -
        -
      • Calculate and compare costs, revenues, profits, and taxes;
      • -
      • Prepare and interpret financial statements and reports;
      • -
      • Create and evaluate budgets, forecasts, and plans;
      • -
      • Determine the optimal price, quantity, and mix of products or services;
      • -
      • Measure and improve the performance and quality of processes and systems;
      • -
      • Assess and manage risks and uncertainties;
      • -
      • Optimize the allocation and utilization of resources;
      • -
      • Negotiate and communicate effectively with stakeholders.
      • -
      - -

      What are the Topics Covered in Business Math by Frank S. Budnick (4th Edition)?

      - -

      Business Math by Frank S. Budnick (4th Edition) is a comprehensive textbook that covers the essential topics and concepts of business math. It is divided into six parts:

      - -
        -
      1. Basic Mathematics: This part reviews the fundamental skills and techniques of arithmetic, algebra, geometry, and trigonometry that are necessary for business math.
      2. -
      3. Mathematics of Finance: This part covers the topics of interest, annuities, amortization, sinking funds, bonds, and present value analysis.
      4. -
      5. Linear Programming: This part introduces the concepts and methods of linear programming, a mathematical technique for optimizing a linear objective function subject to a set of linear constraints.
      6. -
      7. Statistics: This part covers the topics of descriptive statistics, probability, probability distributions, sampling, estimation, hypothesis testing, correlation, and regression.
      8. -
      9. Logic: This part covers the topics of logic, sets, relations, functions, matrices, and determinants.
      10. -
      11. Additional Topics: This part covers some additional topics that are relevant for business math, such as game theory, inventory models, queuing theory, simulation, Markov chains, and decision analysis.
      12. -
      - -

      The book also includes numerous examples, exercises, applications, and case studies that illustrate the practical relevance and usefulness of business math.

      d5da3c52bf
      -
      -
      \ No newline at end of file diff --git a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/CRACK PremiumSoft Navicat Premium V11.0.18 X86 X64 BEST.md b/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/CRACK PremiumSoft Navicat Premium V11.0.18 X86 X64 BEST.md deleted file mode 100644 index e95e07df5932c4ad3fa55a207f265468961e0905..0000000000000000000000000000000000000000 --- a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/CRACK PremiumSoft Navicat Premium V11.0.18 X86 X64 BEST.md +++ /dev/null @@ -1,6 +0,0 @@ -

      CRACK PremiumSoft Navicat Premium v11.0.18 x86 x64


      Download File > https://urlgoal.com/2uCL34



      - -Listen to PremiumSoft Navicat Premium V11.0.18 X86 X64 Crack and 185 more episodes by HACK Magnifying Glass Pro V1.8 (Multilanguage) Key [RH], free! 4d29de3e1b
      -
      -
      -

      diff --git a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Chordastic 1.3.6.rar _TOP_.md b/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Chordastic 1.3.6.rar _TOP_.md deleted file mode 100644 index 61af59100672ea648cf0f45addcbf61f0f4512ec..0000000000000000000000000000000000000000 --- a/spaces/recenWmenso/ChatGPT-with-Voice-Cloning-for-All/datasets/Chordastic 1.3.6.rar _TOP_.md +++ /dev/null @@ -1,11 +0,0 @@ -

      Chordastic 1.3.6.rar


      DOWNLOADhttps://urlgoal.com/2uCM2r



      - -Nov 16, 2017 - Chordastic (HKLM-x32. ... (Version: 1.3.6 - Chordastic) CrashPlan (HKLM-x32\\. ... WinRAR archiver) (Version: 4.01.0 - win.rar GmbH) VLC Media Player -23 Mar 2019 - Download: Chordastic (x32-x64) 1.3.2. -Chordastic is a great alternative to the restrictive iTunes App Store. -With the help of Chordastic, you can -24 Oct 2017 - Download: Chordastic (x32-x64) 1.3.2. -With 8a78ff9644
      -
      -
      -

      diff --git a/spaces/rockeycoss/Prompt-Segment-Anything-Demo/mmdet/models/utils/inverted_residual.py b/spaces/rockeycoss/Prompt-Segment-Anything-Demo/mmdet/models/utils/inverted_residual.py deleted file mode 100644 index 1f241ae3e433c4aba1496cf2038ae88e9ef395ef..0000000000000000000000000000000000000000 --- a/spaces/rockeycoss/Prompt-Segment-Anything-Demo/mmdet/models/utils/inverted_residual.py +++ /dev/null @@ -1,130 +0,0 @@ -# Copyright (c) OpenMMLab. All rights reserved. -import torch.nn as nn -import torch.utils.checkpoint as cp -from mmcv.cnn import ConvModule -from mmcv.cnn.bricks import DropPath -from mmcv.runner import BaseModule - -from .se_layer import SELayer - - -class InvertedResidual(BaseModule): - """Inverted Residual Block. - - Args: - in_channels (int): The input channels of this Module. - out_channels (int): The output channels of this Module. - mid_channels (int): The input channels of the depthwise convolution. - kernel_size (int): The kernel size of the depthwise convolution. - Default: 3. - stride (int): The stride of the depthwise convolution. Default: 1. - se_cfg (dict): Config dict for se layer. Default: None, which means no - se layer. - with_expand_conv (bool): Use expand conv or not. If set False, - mid_channels must be the same with in_channels. - Default: True. - conv_cfg (dict): Config dict for convolution layer. Default: None, - which means using conv2d. - norm_cfg (dict): Config dict for normalization layer. - Default: dict(type='BN'). - act_cfg (dict): Config dict for activation layer. - Default: dict(type='ReLU'). - drop_path_rate (float): stochastic depth rate. Defaults to 0. - with_cp (bool): Use checkpoint or not. Using checkpoint will save some - memory while slowing down the training speed. Default: False. - init_cfg (dict or list[dict], optional): Initialization config dict. - Default: None - - Returns: - Tensor: The output tensor. - """ - - def __init__(self, - in_channels, - out_channels, - mid_channels, - kernel_size=3, - stride=1, - se_cfg=None, - with_expand_conv=True, - conv_cfg=None, - norm_cfg=dict(type='BN'), - act_cfg=dict(type='ReLU'), - drop_path_rate=0., - with_cp=False, - init_cfg=None): - super(InvertedResidual, self).__init__(init_cfg) - self.with_res_shortcut = (stride == 1 and in_channels == out_channels) - assert stride in [1, 2], f'stride must in [1, 2]. ' \ - f'But received {stride}.' - self.with_cp = with_cp - self.drop_path = DropPath( - drop_path_rate) if drop_path_rate > 0 else nn.Identity() - self.with_se = se_cfg is not None - self.with_expand_conv = with_expand_conv - - if self.with_se: - assert isinstance(se_cfg, dict) - if not self.with_expand_conv: - assert mid_channels == in_channels - - if self.with_expand_conv: - self.expand_conv = ConvModule( - in_channels=in_channels, - out_channels=mid_channels, - kernel_size=1, - stride=1, - padding=0, - conv_cfg=conv_cfg, - norm_cfg=norm_cfg, - act_cfg=act_cfg) - self.depthwise_conv = ConvModule( - in_channels=mid_channels, - out_channels=mid_channels, - kernel_size=kernel_size, - stride=stride, - padding=kernel_size // 2, - groups=mid_channels, - conv_cfg=conv_cfg, - norm_cfg=norm_cfg, - act_cfg=act_cfg) - - if self.with_se: - self.se = SELayer(**se_cfg) - - self.linear_conv = ConvModule( - in_channels=mid_channels, - out_channels=out_channels, - kernel_size=1, - stride=1, - padding=0, - conv_cfg=conv_cfg, - norm_cfg=norm_cfg, - act_cfg=None) - - def forward(self, x): - - def _inner_forward(x): - out = x - - if self.with_expand_conv: - out = self.expand_conv(out) - - out = self.depthwise_conv(out) - - if self.with_se: - out = self.se(out) - - out = self.linear_conv(out) - - if self.with_res_shortcut: - return x + self.drop_path(out) - else: - return out - - if self.with_cp and x.requires_grad: - out = cp.checkpoint(_inner_forward, x) - else: - out = _inner_forward(x) - - return out diff --git a/spaces/rorallitri/biomedical-language-models/logs/Babado Forte Erika Palomino Pdf 19 !!EXCLUSIVE!!.md b/spaces/rorallitri/biomedical-language-models/logs/Babado Forte Erika Palomino Pdf 19 !!EXCLUSIVE!!.md deleted file mode 100644 index 50b585b9f4bf05b213b0f87780c7aaf0a3ee9c7b..0000000000000000000000000000000000000000 --- a/spaces/rorallitri/biomedical-language-models/logs/Babado Forte Erika Palomino Pdf 19 !!EXCLUSIVE!!.md +++ /dev/null @@ -1,6 +0,0 @@ -

      Babado Forte Erika Palomino Pdf 19


      Download File ►►►►► https://tinurll.com/2uzn0S



      -
      -... 0.000145151573518 PDF 0.000144914703060 create 0.000144785704884 includes ... 0.000050789811374 Resort 0.000050761618576 Fort 0.000050760578225 rent ... Conditional 0.000002254301598 Erika 0.000002254189369 predetermined ... parishes 0.000001577020648 nineteen 0.000001576965424 jose ... 1fdad05405
      -
      -
      -

      diff --git a/spaces/rorallitri/biomedical-language-models/logs/Farmacologia De Katzung 11 Edicion Pdf Descargar 85 Cmo Elegir y Usar los Frmacos en los Pacientes y Monitorear sus Efectos.md b/spaces/rorallitri/biomedical-language-models/logs/Farmacologia De Katzung 11 Edicion Pdf Descargar 85 Cmo Elegir y Usar los Frmacos en los Pacientes y Monitorear sus Efectos.md deleted file mode 100644 index e4be71ec529c4898bffda2cfda97c5f9e7e18a13..0000000000000000000000000000000000000000 --- a/spaces/rorallitri/biomedical-language-models/logs/Farmacologia De Katzung 11 Edicion Pdf Descargar 85 Cmo Elegir y Usar los Frmacos en los Pacientes y Monitorear sus Efectos.md +++ /dev/null @@ -1,6 +0,0 @@ -

      Farmacologia De Katzung 11 Edicion Pdf Descargar 85


      DOWNLOADhttps://tinurll.com/2uzmDf



      - - aaccfb2cb3
      -
      -
      -

      diff --git a/spaces/rorallitri/biomedical-language-models/logs/Fruit Ninja Frenzy Game Free Download For Android.md b/spaces/rorallitri/biomedical-language-models/logs/Fruit Ninja Frenzy Game Free Download For Android.md deleted file mode 100644 index 76f562692325cae7d299dda58d496b2a9ce7ef7c..0000000000000000000000000000000000000000 --- a/spaces/rorallitri/biomedical-language-models/logs/Fruit Ninja Frenzy Game Free Download For Android.md +++ /dev/null @@ -1,5 +0,0 @@ - -

      En garde! Fruit meets fairytales in the juiciest game this year - Fruit Ninja: Puss in Boots! The suave fruit-slashing swashbuckler, Puss in Boots, faces a challenge that would make Sensei proud. His search for the Magic Beans brings him to developer Halfbrick's legendary hit game Fruit Ninja. Prepare for a journey full of familiar and fruit slicing action he encounters a huge variety of new and exciting challenges! Fruit Ninja: Puss in Boots features the all-new Bandito mode! Slice through a series of increasingly exciting challenges to become the greatest Fruit Ninja warrior ever! Each stage thrusts you into never-seen-before fruit frenzy adventure: Massive fruit from the Giant's castle, precision and timing challenges, all-out fruit onslaughts with new obstacles And for the first time EVER: Throw down against the most-requested addition in Fruit Ninja history - the tomato! Real Banditos must put their best blade forward because scoring is based on the number of fruit sliced, ninja reflexes and slicing efficiency. A true produce warrior can attain massive high scores and upload their best to Global Leaderboards to compare against friends and the best players online! Bring your blade to Desperado mode - an enhanced and re-mastered version of the Classic Fruit Ninja game that introduced 70 million players to the world's biggest dojo! But this time, you will face even more fruit, unique waves and Puss in Boots' elusive Magic Beans from the DreamWorks Animation feature film! Finally, bask in the fruit-stained glory of Puss in Boots' own exclusive ninja Stash, featuring a whole range of unique customizable content, including new backgrounds and blades with elements from both the film and Fruit Ninja. Stay juicy, amigos!****LIKE FRUIT NINJA ON FACEBOOK! HALFBRICK ON TWITTER! ****App Version - 1.0.4 (13)

      -

      Fruit Ninja Frenzy Game Free Download For Android


      Download Zip ——— https://tinurll.com/2uznZs



      aaccfb2cb3
      -
      -
      \ No newline at end of file diff --git a/spaces/rorallitri/biomedical-language-models/logs/Hero movie in tamil hd 1080p Yuvan Shankar Rajas musical extravaganza.md b/spaces/rorallitri/biomedical-language-models/logs/Hero movie in tamil hd 1080p Yuvan Shankar Rajas musical extravaganza.md deleted file mode 100644 index da1c1b0738eeb1b458335fe1b76769f44fea2d51..0000000000000000000000000000000000000000 --- a/spaces/rorallitri/biomedical-language-models/logs/Hero movie in tamil hd 1080p Yuvan Shankar Rajas musical extravaganza.md +++ /dev/null @@ -1,9 +0,0 @@ -
      -

      An Action Hero 2022 Hindi Movie Download HD Quality 1080p: As friends An Action Hero 2022 movie has recently been released all over the world on 2 December 2022, but since its release some Movie Torrent Websites have leaked An Action Hero 2022 Hindi movie.

      -

      Hero movie in tamil hd 1080p


      Download File ->>->>->> https://tinurll.com/2uzlWT



      -

      Download An Action Hero Movie After the release of the movie, Filmyzilla had leaked An Action Hero Movie in different video quality such as 320p, 480p, 720p and 1080p, due to which the box office collection of An Action Hero Movie has also been affected, Filmyzilla is a famous Movie Torrent. There is a website which leaks movies by violating the rules of the government.

      -

      Filmymeet popular websites for leaking Hollywood, Bollywood, South, Web Series, Tv-Shows, and other languages. dubbed movies for free, so here we can see the impact of downloading movies on the torrent website. There are many options on these sites like An Action Hero Movie Download Filmymeet HD printing, 720p 300Mb, 480p, 1080p, and 4K.

      -

      Mp4moviez popular websites for leaking Hollywood, Bollywood, South, Web Series, Tv-Shows, and other languages. dubbed movies for free, so here we can see the impact of downloading movies on the torrent website. There are many options on these sites like HD printing, An Action Hero Movie Download In Hindi mp4moviez 720p 300Mb, 480p, 1080p, and 4K.

      -

      aaccfb2cb3
      -
      -
      \ No newline at end of file diff --git a/spaces/rossellison/kpop-face-generator/stylegan3-fun/torch_utils/ops/filtered_lrelu.py b/spaces/rossellison/kpop-face-generator/stylegan3-fun/torch_utils/ops/filtered_lrelu.py deleted file mode 100644 index 5d8865f6900df9bae8e716b4dbaa22c09475815b..0000000000000000000000000000000000000000 --- a/spaces/rossellison/kpop-face-generator/stylegan3-fun/torch_utils/ops/filtered_lrelu.py +++ /dev/null @@ -1,279 +0,0 @@ -# Copyright (c) 2021, NVIDIA CORPORATION & AFFILIATES. All rights reserved. -# -# NVIDIA CORPORATION and its licensors retain all intellectual property -# and proprietary rights in and to this software, related documentation -# and any modifications thereto. Any use, reproduction, disclosure or -# distribution of this software and related documentation without an express -# license agreement from NVIDIA CORPORATION is strictly prohibited. - -import os -import numpy as np -import torch -import warnings - -from .. import custom_ops -from .. import misc -from . import upfirdn2d -from . import bias_act - -#---------------------------------------------------------------------------- - -_plugin = None - -def _init(): - global _plugin - # Bob Burrough's PR (#45) so that the plugins work in Windows: https://github.com/NVlabs/stylegan3/pull/45 - extras = {} - if os.name == 'nt': - extras['extra_cflags'] = ['/std:c++17'] - if _plugin is None: - _plugin = custom_ops.get_plugin( - module_name='filtered_lrelu_plugin', - sources=['filtered_lrelu.cpp', 'filtered_lrelu_wr.cu', 'filtered_lrelu_rd.cu', 'filtered_lrelu_ns.cu'], - headers=['filtered_lrelu.h', 'filtered_lrelu.cu'], - source_dir=os.path.dirname(__file__), - extra_cuda_cflags=['--use_fast_math', '--allow-unsupported-compiler'], - **extras, - ) - return True - -def _get_filter_size(f): - if f is None: - return 1, 1 - assert isinstance(f, torch.Tensor) - assert 1 <= f.ndim <= 2 - return f.shape[-1], f.shape[0] # width, height - -def _parse_padding(padding): - if isinstance(padding, int): - padding = [padding, padding] - assert isinstance(padding, (list, tuple)) - assert all(isinstance(x, (int, np.integer)) for x in padding) - padding = [int(x) for x in padding] - if len(padding) == 2: - px, py = padding - padding = [px, px, py, py] - px0, px1, py0, py1 = padding - return px0, px1, py0, py1 - -#---------------------------------------------------------------------------- - -def filtered_lrelu(x, fu=None, fd=None, b=None, up=1, down=1, padding=0, gain=np.sqrt(2), slope=0.2, clamp=None, flip_filter=False, impl='cuda'): - r"""Filtered leaky ReLU for a batch of 2D images. - - Performs the following sequence of operations for each channel: - - 1. Add channel-specific bias if provided (`b`). - - 2. Upsample the image by inserting N-1 zeros after each pixel (`up`). - - 3. Pad the image with the specified number of zeros on each side (`padding`). - Negative padding corresponds to cropping the image. - - 4. Convolve the image with the specified upsampling FIR filter (`fu`), shrinking it - so that the footprint of all output pixels lies within the input image. - - 5. Multiply each value by the provided gain factor (`gain`). - - 6. Apply leaky ReLU activation function to each value. - - 7. Clamp each value between -clamp and +clamp, if `clamp` parameter is provided. - - 8. Convolve the image with the specified downsampling FIR filter (`fd`), shrinking - it so that the footprint of all output pixels lies within the input image. - - 9. Downsample the image by keeping every Nth pixel (`down`). - - The fused op is considerably more efficient than performing the same calculation - using standard PyTorch ops. It supports gradients of arbitrary order. - - Args: - x: Float32/float16/float64 input tensor of the shape - `[batch_size, num_channels, in_height, in_width]`. - fu: Float32 upsampling FIR filter of the shape - `[filter_height, filter_width]` (non-separable), - `[filter_taps]` (separable), or - `None` (identity). - fd: Float32 downsampling FIR filter of the shape - `[filter_height, filter_width]` (non-separable), - `[filter_taps]` (separable), or - `None` (identity). - b: Bias vector, or `None` to disable. Must be a 1D tensor of the same type - as `x`. The length of vector must must match the channel dimension of `x`. - up: Integer upsampling factor (default: 1). - down: Integer downsampling factor. (default: 1). - padding: Padding with respect to the upsampled image. Can be a single number - or a list/tuple `[x, y]` or `[x_before, x_after, y_before, y_after]` - (default: 0). - gain: Overall scaling factor for signal magnitude (default: sqrt(2)). - slope: Slope on the negative side of leaky ReLU (default: 0.2). - clamp: Maximum magnitude for leaky ReLU output (default: None). - flip_filter: False = convolution, True = correlation (default: False). - impl: Implementation to use. Can be `'ref'` or `'cuda'` (default: `'cuda'`). - - Returns: - Tensor of the shape `[batch_size, num_channels, out_height, out_width]`. - """ - assert isinstance(x, torch.Tensor) - assert impl in ['ref', 'cuda'] - if impl == 'cuda' and x.device.type == 'cuda' and _init(): - return _filtered_lrelu_cuda(up=up, down=down, padding=padding, gain=gain, slope=slope, clamp=clamp, flip_filter=flip_filter).apply(x, fu, fd, b, None, 0, 0) - return _filtered_lrelu_ref(x, fu=fu, fd=fd, b=b, up=up, down=down, padding=padding, gain=gain, slope=slope, clamp=clamp, flip_filter=flip_filter) - -#---------------------------------------------------------------------------- - -@misc.profiled_function -def _filtered_lrelu_ref(x, fu=None, fd=None, b=None, up=1, down=1, padding=0, gain=np.sqrt(2), slope=0.2, clamp=None, flip_filter=False): - """Slow and memory-inefficient reference implementation of `filtered_lrelu()` using - existing `upfirdn2n()` and `bias_act()` ops. - """ - assert isinstance(x, torch.Tensor) and x.ndim == 4 - fu_w, fu_h = _get_filter_size(fu) - fd_w, fd_h = _get_filter_size(fd) - if b is not None: - assert isinstance(b, torch.Tensor) and b.dtype == x.dtype - misc.assert_shape(b, [x.shape[1]]) - assert isinstance(up, int) and up >= 1 - assert isinstance(down, int) and down >= 1 - px0, px1, py0, py1 = _parse_padding(padding) - assert gain == float(gain) and gain > 0 - assert slope == float(slope) and slope >= 0 - assert clamp is None or (clamp == float(clamp) and clamp >= 0) - - # Calculate output size. - batch_size, channels, in_h, in_w = x.shape - in_dtype = x.dtype - out_w = (in_w * up + (px0 + px1) - (fu_w - 1) - (fd_w - 1) + (down - 1)) // down - out_h = (in_h * up + (py0 + py1) - (fu_h - 1) - (fd_h - 1) + (down - 1)) // down - - # Compute using existing ops. - x = bias_act.bias_act(x=x, b=b) # Apply bias. - x = upfirdn2d.upfirdn2d(x=x, f=fu, up=up, padding=[px0, px1, py0, py1], gain=up**2, flip_filter=flip_filter) # Upsample. - x = bias_act.bias_act(x=x, act='lrelu', alpha=slope, gain=gain, clamp=clamp) # Bias, leaky ReLU, clamp. - x = upfirdn2d.upfirdn2d(x=x, f=fd, down=down, flip_filter=flip_filter) # Downsample. - - # Check output shape & dtype. - misc.assert_shape(x, [batch_size, channels, out_h, out_w]) - assert x.dtype == in_dtype - return x - -#---------------------------------------------------------------------------- - -_filtered_lrelu_cuda_cache = dict() - -def _filtered_lrelu_cuda(up=1, down=1, padding=0, gain=np.sqrt(2), slope=0.2, clamp=None, flip_filter=False): - """Fast CUDA implementation of `filtered_lrelu()` using custom ops. - """ - assert isinstance(up, int) and up >= 1 - assert isinstance(down, int) and down >= 1 - px0, px1, py0, py1 = _parse_padding(padding) - assert gain == float(gain) and gain > 0 - gain = float(gain) - assert slope == float(slope) and slope >= 0 - slope = float(slope) - assert clamp is None or (clamp == float(clamp) and clamp >= 0) - clamp = float(clamp if clamp is not None else 'inf') - - # Lookup from cache. - key = (up, down, px0, px1, py0, py1, gain, slope, clamp, flip_filter) - if key in _filtered_lrelu_cuda_cache: - return _filtered_lrelu_cuda_cache[key] - - # Forward op. - class FilteredLReluCuda(torch.autograd.Function): - @staticmethod - def forward(ctx, x, fu, fd, b, si, sx, sy): # pylint: disable=arguments-differ - assert isinstance(x, torch.Tensor) and x.ndim == 4 - - # Replace empty up/downsample kernels with full 1x1 kernels (faster than separable). - if fu is None: - fu = torch.ones([1, 1], dtype=torch.float32, device=x.device) - if fd is None: - fd = torch.ones([1, 1], dtype=torch.float32, device=x.device) - assert 1 <= fu.ndim <= 2 - assert 1 <= fd.ndim <= 2 - - # Replace separable 1x1 kernels with full 1x1 kernels when scale factor is 1. - if up == 1 and fu.ndim == 1 and fu.shape[0] == 1: - fu = fu.square()[None] - if down == 1 and fd.ndim == 1 and fd.shape[0] == 1: - fd = fd.square()[None] - - # Missing sign input tensor. - if si is None: - si = torch.empty([0]) - - # Missing bias tensor. - if b is None: - b = torch.zeros([x.shape[1]], dtype=x.dtype, device=x.device) - - # Construct internal sign tensor only if gradients are needed. - write_signs = (si.numel() == 0) and (x.requires_grad or b.requires_grad) - - # Warn if input storage strides are not in decreasing order due to e.g. channels-last layout. - strides = [x.stride(i) for i in range(x.ndim) if x.size(i) > 1] - if any(a < b for a, b in zip(strides[:-1], strides[1:])): - warnings.warn("low-performance memory layout detected in filtered_lrelu input", RuntimeWarning) - - # Call C++/Cuda plugin if datatype is supported. - if x.dtype in [torch.float16, torch.float32]: - if torch.cuda.current_stream(x.device) != torch.cuda.default_stream(x.device): - warnings.warn("filtered_lrelu called with non-default cuda stream but concurrent execution is not supported", RuntimeWarning) - y, so, return_code = _plugin.filtered_lrelu(x, fu, fd, b, si, up, down, px0, px1, py0, py1, sx, sy, gain, slope, clamp, flip_filter, write_signs) - else: - return_code = -1 - - # No Cuda kernel found? Fall back to generic implementation. Still more memory efficient than the reference implementation because - # only the bit-packed sign tensor is retained for gradient computation. - if return_code < 0: - warnings.warn("filtered_lrelu called with parameters that have no optimized CUDA kernel, using generic fallback", RuntimeWarning) - - y = x.add(b.unsqueeze(-1).unsqueeze(-1)) # Add bias. - y = upfirdn2d.upfirdn2d(x=y, f=fu, up=up, padding=[px0, px1, py0, py1], gain=up**2, flip_filter=flip_filter) # Upsample. - so = _plugin.filtered_lrelu_act_(y, si, sx, sy, gain, slope, clamp, write_signs) # Activation function and sign handling. Modifies y in-place. - y = upfirdn2d.upfirdn2d(x=y, f=fd, down=down, flip_filter=flip_filter) # Downsample. - - # Prepare for gradient computation. - ctx.save_for_backward(fu, fd, (si if si.numel() else so)) - ctx.x_shape = x.shape - ctx.y_shape = y.shape - ctx.s_ofs = sx, sy - return y - - @staticmethod - def backward(ctx, dy): # pylint: disable=arguments-differ - fu, fd, si = ctx.saved_tensors - _, _, xh, xw = ctx.x_shape - _, _, yh, yw = ctx.y_shape - sx, sy = ctx.s_ofs - dx = None # 0 - dfu = None; assert not ctx.needs_input_grad[1] - dfd = None; assert not ctx.needs_input_grad[2] - db = None # 3 - dsi = None; assert not ctx.needs_input_grad[4] - dsx = None; assert not ctx.needs_input_grad[5] - dsy = None; assert not ctx.needs_input_grad[6] - - if ctx.needs_input_grad[0] or ctx.needs_input_grad[3]: - pp = [ - (fu.shape[-1] - 1) + (fd.shape[-1] - 1) - px0, - xw * up - yw * down + px0 - (up - 1), - (fu.shape[0] - 1) + (fd.shape[0] - 1) - py0, - xh * up - yh * down + py0 - (up - 1), - ] - gg = gain * (up ** 2) / (down ** 2) - ff = (not flip_filter) - sx = sx - (fu.shape[-1] - 1) + px0 - sy = sy - (fu.shape[0] - 1) + py0 - dx = _filtered_lrelu_cuda(up=down, down=up, padding=pp, gain=gg, slope=slope, clamp=None, flip_filter=ff).apply(dy, fd, fu, None, si, sx, sy) - - if ctx.needs_input_grad[3]: - db = dx.sum([0, 2, 3]) - - return dx, dfu, dfd, db, dsi, dsx, dsy - - # Add to cache. - _filtered_lrelu_cuda_cache[key] = FilteredLReluCuda - return FilteredLReluCuda - -#---------------------------------------------------------------------------- diff --git a/spaces/rstallman/Mayfair-Partner-Music/MODEL_CARD.md b/spaces/rstallman/Mayfair-Partner-Music/MODEL_CARD.md deleted file mode 100644 index 6c2c9f883969eb905e74ad3376966d156cc5ca00..0000000000000000000000000000000000000000 --- a/spaces/rstallman/Mayfair-Partner-Music/MODEL_CARD.md +++ /dev/null @@ -1,81 +0,0 @@ -# MusicGen Model Card - -## Model details - -**Organization developing the model:** The FAIR team of Meta AI. - -**Model date:** MusicGen was trained between April 2023 and May 2023. - -**Model version:** This is the version 1 of the model. - -**Model type:** MusicGen consists of an EnCodec model for audio tokenization, an auto-regressive language model based on the transformer architecture for music modeling. The model comes in different sizes: 300M, 1.5B and 3.3B parameters ; and two variants: a model trained for text-to-music generation task and a model trained for melody-guided music generation. - -**Paper or resources for more information:** More information can be found in the paper [Simple and Controllable Music Generation][arxiv]. - -**Citation details** See [our paper][arxiv] - -**License** Code is released under MIT, model weights are released under CC-BY-NC 4.0. - -**Where to send questions or comments about the model:** Questions and comments about MusicGen can be sent via the [Github repository](https://github.com/facebookresearch/audiocraft) of the project, or by opening an issue. - -## Intended use -**Primary intended use:** The primary use of MusicGen is research on AI-based music generation, including: - -- Research efforts, such as probing and better understanding the limitations of generative models to further improve the state of science -- Generation of music guided by text or melody to understand current abilities of generative AI models by machine learning amateurs - -**Primary intended users:** The primary intended users of the model are researchers in audio, machine learning and artificial intelligence, as well as amateur seeking to better understand those models. - -**Out-of-scope use cases** The model should not be used on downstream applications without further risk evaluation and mitigation. The model should not be used to intentionally create or disseminate music pieces that create hostile or alienating environments for people. This includes generating music that people would foreseeably find disturbing, distressing, or offensive; or content that propagates historical or current stereotypes. - -## Metrics - -**Models performance measures:** We used the following objective measure to evaluate the model on a standard music benchmark: - -- Frechet Audio Distance computed on features extracted from a pre-trained audio classifier (VGGish) -- Kullback-Leibler Divergence on label distributions extracted from a pre-trained audio classifier (PaSST) -- CLAP Score between audio embedding and text embedding extracted from a pre-trained CLAP model - -Additionally, we run qualitative studies with human participants, evaluating the performance of the model with the following axes: - -- Overall quality of the music samples; -- Text relevance to the provided text input; -- Adherence to the melody for melody-guided music generation. - -More details on performance measures and human studies can be found in the paper. - -**Decision thresholds:** Not applicable. - -## Evaluation datasets - -The model was evaluated on the [MusicCaps benchmark](https://www.kaggle.com/datasets/googleai/musiccaps) and on an in-domain held-out evaluation set, with no artist overlap with the training set. - -## Training datasets - -The model was trained on licensed data using the following sources: the [Meta Music Initiative Sound Collection](https://www.fb.com/sound), [Shutterstock music collection](https://www.shutterstock.com/music) and the [Pond5 music collection](https://www.pond5.com/). See the paper for more details about the training set and corresponding preprocessing. - -## Quantitative analysis - -More information can be found in the paper [Simple and Controllable Music Generation][arxiv], in the Experimental Setup section. - -## Limitations and biases - -**Data:** The data sources used to train the model are created by music professionals and covered by legal agreements with the right holders. The model is trained on 20K hours of data, we believe that scaling the model on larger datasets can further improve the performance of the model. - -**Mitigations:** Vocals have been removed from the data source using corresponding tags, and then using using a state-of-the-art music source separation method, namely using the open source [Hybrid Transformer for Music Source Separation](https://github.com/facebookresearch/demucs) (HT-Demucs). - -**Limitations:** - -- The model is not able to generate realistic vocals. -- The model has been trained with English descriptions and will not perform as well in other languages. -- The model does not perform equally well for all music styles and cultures. -- The model sometimes generates end of songs, collapsing to silence. -- It is sometimes difficult to assess what types of text descriptions provide the best generations. Prompt engineering may be required to obtain satisfying results. - -**Biases:** The source of data is potentially lacking diversity and all music cultures are not equally represented in the dataset. The model may not perform equally well on the wide variety of music genres that exists. The generated samples from the model will reflect the biases from the training data. Further work on this model should include methods for balanced and just representations of cultures, for example, by scaling the training data to be both diverse and inclusive. - -**Risks and harms:** Biases and limitations of the model may lead to generation of samples that may be considered as biased, inappropriate or offensive. We believe that providing the code to reproduce the research and train new models will allow to broaden the application to new and more representative data. - -**Use cases:** Users must be aware of the biases, limitations and risks of the model. MusicGen is a model developed for artificial intelligence research on controllable music generation. As such, it should not be used for downstream applications without further investigation and mitigation of risks. - -[arxiv]: https://arxiv.org/abs/2306.05284 diff --git a/spaces/rstallman/chatgpt4/app.py b/spaces/rstallman/chatgpt4/app.py deleted file mode 100644 index d028e2423feea0fbc0bf719c9057c30967e9590a..0000000000000000000000000000000000000000 --- a/spaces/rstallman/chatgpt4/app.py +++ /dev/null @@ -1,192 +0,0 @@ -import gradio as gr -import os -import time -import pandas as pd - - -from langchain.document_loaders import OnlinePDFLoader #for laoding the pdf -from langchain.embeddings import OpenAIEmbeddings # for creating embeddings -from langchain.vectorstores import Chroma # for the vectorization part -from langchain.chains import RetrievalQA # for conversing with chatGPT -from langchain.chat_models import ChatOpenAI # the LLM model we'll use (ChatGPT) -from langchain import PromptTemplate - -def load_pdf_and_generate_embeddings(pdf_doc, open_ai_key, relevant_pages): - if openai_key is not None: - os.environ['OPENAI_API_KEY'] = open_ai_key - #Load the pdf file - loader = OnlinePDFLoader(pdf_doc.name) - pages = loader.load_and_split() - - #Create an instance of OpenAIEmbeddings, which is responsible for generating embeddings for text - embeddings = OpenAIEmbeddings() - - pages_to_be_loaded =[] - - if relevant_pages: - page_numbers = relevant_pages.split(",") - if len(page_numbers) != 0: - for page_number in page_numbers: - if page_number.isdigit(): - pageIndex = int(page_number)-1 - if pageIndex >=0 and pageIndex -

      Chatbot for PDFs - GPT-4

      -

      Upload a .PDF, click the "Upload PDF and generate embeddings" button,
      - Wait for the Status to show Ready. You can chose to get answers to the pre-defined question set OR ask your own question
      - The app is built on GPT-4 and leverages PromptTemplate

      - -""" - -with gr.Blocks(css=css,theme=gr.themes.Monochrome()) as demo: - with gr.Column(elem_id="col-container"): - gr.HTML(title) - - with gr.Column(): - openai_key = gr.Textbox(label="Your GPT-4 OpenAI API key", type="password") - pdf_doc = gr.File(label="Load a pdf",file_types=['.pdf'],type='file') - relevant_pages = gr.Textbox(label="*Optional - List comma separated page numbers to load or leave this field blank to use the entire PDF") - - with gr.Row(): - status = gr.Textbox(label="Status", placeholder="", interactive=False) - load_pdf = gr.Button("Upload PDF and generate embeddings").style(full_width=False) - - with gr.Row(): - document_type = gr.Radio(['Deed of Trust', 'Transmittal Summary'], label="Select the Document Type") - answers = gr.Dataframe(label="Answers to Predefined Question set") - answers_for_predefined_question_set = gr.Button("Get gpt-4 answers to pre-defined question set").style(full_width=False) - - with gr.Row(): - input = gr.Textbox(label="Type in your question") - output = gr.Textbox(label="Answer") - submit_query = gr.Button("Submit your own question to gpt-4").style(full_width=False) - - - load_pdf.click(load_pdf_and_generate_embeddings, inputs=[pdf_doc, openai_key, relevant_pages], outputs=status) - - answers_for_predefined_question_set.click(answer_predefined_questions, document_type, answers) - - submit_query.click(answer_query,input,output) - - -demo.launch() - - - - diff --git a/spaces/runa91/bite_gradio/src/combined_model/__init__.py b/spaces/runa91/bite_gradio/src/combined_model/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/russellc/BLIP/models/__init__.py b/spaces/russellc/BLIP/models/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/safi842/FashionGen/models/stylegan2/stylegan2-pytorch/lpips/dist_model.py b/spaces/safi842/FashionGen/models/stylegan2/stylegan2-pytorch/lpips/dist_model.py deleted file mode 100644 index 4ff0aa4ca6e4b217954c167787eaac1ca1f8e304..0000000000000000000000000000000000000000 --- a/spaces/safi842/FashionGen/models/stylegan2/stylegan2-pytorch/lpips/dist_model.py +++ /dev/null @@ -1,284 +0,0 @@ - -from __future__ import absolute_import - -import sys -import numpy as np -import torch -from torch import nn -import os -from collections import OrderedDict -from torch.autograd import Variable -import itertools -from .base_model import BaseModel -from scipy.ndimage import zoom -import fractions -import functools -import skimage.transform -from tqdm import tqdm - -from IPython import embed - -from . import networks_basic as networks -import lpips as util - -class DistModel(BaseModel): - def name(self): - return self.model_name - - def initialize(self, model='net-lin', net='alex', colorspace='Lab', pnet_rand=False, pnet_tune=False, model_path=None, - use_gpu=True, printNet=False, spatial=False, - is_train=False, lr=.0001, beta1=0.5, version='0.1', gpu_ids=[0]): - ''' - INPUTS - model - ['net-lin'] for linearly calibrated network - ['net'] for off-the-shelf network - ['L2'] for L2 distance in Lab colorspace - ['SSIM'] for ssim in RGB colorspace - net - ['squeeze','alex','vgg'] - model_path - if None, will look in weights/[NET_NAME].pth - colorspace - ['Lab','RGB'] colorspace to use for L2 and SSIM - use_gpu - bool - whether or not to use a GPU - printNet - bool - whether or not to print network architecture out - spatial - bool - whether to output an array containing varying distances across spatial dimensions - spatial_shape - if given, output spatial shape. if None then spatial shape is determined automatically via spatial_factor (see below). - spatial_factor - if given, specifies upsampling factor relative to the largest spatial extent of a convolutional layer. if None then resized to size of input images. - spatial_order - spline order of filter for upsampling in spatial mode, by default 1 (bilinear). - is_train - bool - [True] for training mode - lr - float - initial learning rate - beta1 - float - initial momentum term for adam - version - 0.1 for latest, 0.0 was original (with a bug) - gpu_ids - int array - [0] by default, gpus to use - ''' - BaseModel.initialize(self, use_gpu=use_gpu, gpu_ids=gpu_ids) - - self.model = model - self.net = net - self.is_train = is_train - self.spatial = spatial - self.gpu_ids = gpu_ids - self.model_name = '%s [%s]'%(model,net) - - if(self.model == 'net-lin'): # pretrained net + linear layer - self.net = networks.PNetLin(pnet_rand=pnet_rand, pnet_tune=pnet_tune, pnet_type=net, - use_dropout=True, spatial=spatial, version=version, lpips=True) - kw = {} - if not use_gpu: - kw['map_location'] = 'cpu' - if(model_path is None): - import inspect - model_path = os.path.abspath(os.path.join(inspect.getfile(self.initialize), '..', 'weights/v%s/%s.pth'%(version,net))) - - if(not is_train): - print('Loading model from: %s'%model_path) - self.net.load_state_dict(torch.load(model_path, **kw), strict=False) - - elif(self.model=='net'): # pretrained network - self.net = networks.PNetLin(pnet_rand=pnet_rand, pnet_type=net, lpips=False) - elif(self.model in ['L2','l2']): - self.net = networks.L2(use_gpu=use_gpu,colorspace=colorspace) # not really a network, only for testing - self.model_name = 'L2' - elif(self.model in ['DSSIM','dssim','SSIM','ssim']): - self.net = networks.DSSIM(use_gpu=use_gpu,colorspace=colorspace) - self.model_name = 'SSIM' - else: - raise ValueError("Model [%s] not recognized." % self.model) - - self.parameters = list(self.net.parameters()) - - if self.is_train: # training mode - # extra network on top to go from distances (d0,d1) => predicted human judgment (h*) - self.rankLoss = networks.BCERankingLoss() - self.parameters += list(self.rankLoss.net.parameters()) - self.lr = lr - self.old_lr = lr - self.optimizer_net = torch.optim.Adam(self.parameters, lr=lr, betas=(beta1, 0.999)) - else: # test mode - self.net.eval() - - if(use_gpu): - self.net.to(gpu_ids[0]) - self.net = torch.nn.DataParallel(self.net, device_ids=gpu_ids) - if(self.is_train): - self.rankLoss = self.rankLoss.to(device=gpu_ids[0]) # just put this on GPU0 - - if(printNet): - print('---------- Networks initialized -------------') - networks.print_network(self.net) - print('-----------------------------------------------') - - def forward(self, in0, in1, retPerLayer=False): - ''' Function computes the distance between image patches in0 and in1 - INPUTS - in0, in1 - torch.Tensor object of shape Nx3xXxY - image patch scaled to [-1,1] - OUTPUT - computed distances between in0 and in1 - ''' - - return self.net.forward(in0, in1, retPerLayer=retPerLayer) - - # ***** TRAINING FUNCTIONS ***** - def optimize_parameters(self): - self.forward_train() - self.optimizer_net.zero_grad() - self.backward_train() - self.optimizer_net.step() - self.clamp_weights() - - def clamp_weights(self): - for module in self.net.modules(): - if(hasattr(module, 'weight') and module.kernel_size==(1,1)): - module.weight.data = torch.clamp(module.weight.data,min=0) - - def set_input(self, data): - self.input_ref = data['ref'] - self.input_p0 = data['p0'] - self.input_p1 = data['p1'] - self.input_judge = data['judge'] - - if(self.use_gpu): - self.input_ref = self.input_ref.to(device=self.gpu_ids[0]) - self.input_p0 = self.input_p0.to(device=self.gpu_ids[0]) - self.input_p1 = self.input_p1.to(device=self.gpu_ids[0]) - self.input_judge = self.input_judge.to(device=self.gpu_ids[0]) - - self.var_ref = Variable(self.input_ref,requires_grad=True) - self.var_p0 = Variable(self.input_p0,requires_grad=True) - self.var_p1 = Variable(self.input_p1,requires_grad=True) - - def forward_train(self): # run forward pass - # print(self.net.module.scaling_layer.shift) - # print(torch.norm(self.net.module.net.slice1[0].weight).item(), torch.norm(self.net.module.lin0.model[1].weight).item()) - - self.d0 = self.forward(self.var_ref, self.var_p0) - self.d1 = self.forward(self.var_ref, self.var_p1) - self.acc_r = self.compute_accuracy(self.d0,self.d1,self.input_judge) - - self.var_judge = Variable(1.*self.input_judge).view(self.d0.size()) - - self.loss_total = self.rankLoss.forward(self.d0, self.d1, self.var_judge*2.-1.) - - return self.loss_total - - def backward_train(self): - torch.mean(self.loss_total).backward() - - def compute_accuracy(self,d0,d1,judge): - ''' d0, d1 are Variables, judge is a Tensor ''' - d1_lt_d0 = (d1 %f' % (type,self.old_lr, lr)) - self.old_lr = lr - -def score_2afc_dataset(data_loader, func, name=''): - ''' Function computes Two Alternative Forced Choice (2AFC) score using - distance function 'func' in dataset 'data_loader' - INPUTS - data_loader - CustomDatasetDataLoader object - contains a TwoAFCDataset inside - func - callable distance function - calling d=func(in0,in1) should take 2 - pytorch tensors with shape Nx3xXxY, and return numpy array of length N - OUTPUTS - [0] - 2AFC score in [0,1], fraction of time func agrees with human evaluators - [1] - dictionary with following elements - d0s,d1s - N arrays containing distances between reference patch to perturbed patches - gts - N array in [0,1], preferred patch selected by human evaluators - (closer to "0" for left patch p0, "1" for right patch p1, - "0.6" means 60pct people preferred right patch, 40pct preferred left) - scores - N array in [0,1], corresponding to what percentage function agreed with humans - CONSTS - N - number of test triplets in data_loader - ''' - - d0s = [] - d1s = [] - gts = [] - - for data in tqdm(data_loader.load_data(), desc=name): - d0s+=func(data['ref'],data['p0']).data.cpu().numpy().flatten().tolist() - d1s+=func(data['ref'],data['p1']).data.cpu().numpy().flatten().tolist() - gts+=data['judge'].cpu().numpy().flatten().tolist() - - d0s = np.array(d0s) - d1s = np.array(d1s) - gts = np.array(gts) - scores = (d0shi patty,
      i have installed the driver for win7 64 bit. i have installed the driver for 64 bit win10. and i can see the modal dialog box for "device manager". but, still it fails to load at "minecraft windows 10" game. i have installed the driver for "intel hd graphics 620" driver. and my graphics card is "intel iris graphics 655". please help me.
      thanks,

      -

      hi patty,
      i am using windows 7 64 bit and minecraft windows 10. i installed the driver from the inf file i downloaded from your website. i installed the driver at the end of the guide at the bottom of the page. when i run minecraft, i get a black screen and nothing else. i have tried rebooting and shutting down the computer, but it did not help. i just keep getting a black screen. what should i do?

      -

      Sis Mirage 3 Opengl Driver


      Download Ziphttps://gohhs.com/2uEApw



      -

      youve got bigger problems than just graphic glitches then. actually check on your graphic card vendors website for any updated drivers, any auto checking doesnt mean a thing. try an updated build from to see if that improves things. reset to factory settings from the file menu or delete the.b25.blend file (may be hidden, do a file search)

      -

      1. make sure your graphics card is up to date with the latest driver. you can check this by clicking on the "device manager" from the start menu (right-click, select start menu, all programs, accessories, device manager). you should see an entry for your video card.

      -

      -

      4. on the next screen, you can select "let me pick from a list of device drivers on my computer." if there are any new drivers available, you will see a list of them. click on the "search" button to let the computer search for the new drivers. when the computer finds the new drivers, you can select them and click "install" to install them.

      899543212b
      -
      -
      \ No newline at end of file diff --git a/spaces/sczhou/ProPainter/web-demos/hugging_face/README.md b/spaces/sczhou/ProPainter/web-demos/hugging_face/README.md deleted file mode 100644 index f5f075add318978857dfc6ea6a57522e05eb2b7e..0000000000000000000000000000000000000000 --- a/spaces/sczhou/ProPainter/web-demos/hugging_face/README.md +++ /dev/null @@ -1,57 +0,0 @@ -## Get Started -1. Install ProPainter Dependencies -You can follow the [Dependencies and Installation](https://github.com/Luo-Yihang/ProPainter-pr/tree/dev_yihang#dependencies-and-installation) - -2. Install Demo Dependencies -```shell -cd web-demos/hugging_face - -# install python dependencies -pip3 install -r requirements.txt - -# Run the demo -python app.py -``` - -## Usage Guidance -* Step 1: Upload your video and click the `Get video info` button. -![Step 1](./assets/step1.png) - -* Step 2: - 1. *[Optional]* Specify the tracking period for the currently added mask by dragging the `Track start frame` or `Track end frame`. - 2. Click the image on the left to select the mask area. - 3. - Click `Add mask` if you are satisfied with the mask, or - - *[Optional]* Click `Clear clicks` if you want to reselect the mask area, or - - *[Optional]* Click `Remove mask` to remove all masks. - 4. *[Optional]* Go back to step 2.1 to add another mask. -![Step 2](./assets/step2.png) - -* Step 3: - 1. Click the `Tracking` button to track the masks for the whole video. - 2. *[Optional]* Select the ProPainter parameters if the `ProPainter Parameters` dropdown. - 2. Then click `Inpainting` to get the inpainting results. -![Step 3](./assets/step3.png) - -*You can always refer to the `Highlighted Text` box on the page for guidance on the next step!* - - -## Citation -If you find our repo useful for your research, please consider citing our paper: -```bibtex -@inproceedings{zhou2023propainter, - title={{ProPainter}: Improving Propagation and Transformer for Video Inpainting}, - author={Zhou, Shangchen and Li, Chongyi and Chan, Kelvin C.K and Loy, Chen Change}, - booktitle={Proceedings of IEEE International Conference on Computer Vision (ICCV)}, - year={2023} -} -``` - - -## License - -This project is licensed under NTU S-Lab License 1.0. Redistribution and use should follow this license. - - -## Acknowledgements - -The project harnesses the capabilities from [Track Anything](https://github.com/gaomingqi/Track-Anything), [Segment Anything](https://github.com/facebookresearch/segment-anything), [Cutie](https://github.com/hkchengrex/Cutie), and [E2FGVI](https://github.com/MCG-NKU/E2FGVI). Thanks for their awesome works. diff --git a/spaces/sczhou/ProPainter/web-demos/hugging_face/track_anything.py b/spaces/sczhou/ProPainter/web-demos/hugging_face/track_anything.py deleted file mode 100644 index deeb88e647bec7260db1a6de55aa86f32723b72e..0000000000000000000000000000000000000000 --- a/spaces/sczhou/ProPainter/web-demos/hugging_face/track_anything.py +++ /dev/null @@ -1,40 +0,0 @@ -import numpy as np -from tqdm import tqdm - -from tools.interact_tools import SamControler -from tracker.base_tracker import BaseTracker -from inpainter.base_inpainter import ProInpainter - - -class TrackingAnything(): - def __init__(self, sam_checkpoint, cutie_checkpoint, propainter_checkpoint, raft_checkpoint, flow_completion_checkpoint, args): - self.args = args - self.samcontroler = SamControler(sam_checkpoint, args.sam_model_type, args.device) - self.cutie = BaseTracker(cutie_checkpoint, device=args.device) - self.baseinpainter = ProInpainter(propainter_checkpoint, raft_checkpoint, flow_completion_checkpoint, args.device) - - def first_frame_click(self, image: np.ndarray, points:np.ndarray, labels: np.ndarray, multimask=True): - mask, logit, painted_image = self.samcontroler.first_frame_click(image, points, labels, multimask) - return mask, logit, painted_image - - def generator(self, images: list, template_mask:np.ndarray): - masks = [] - logits = [] - painted_images = [] - for i in tqdm(range(len(images)), desc="Tracking image"): - if i==0: - mask, logit, painted_image = self.cutie.track(images[i], template_mask) - masks.append(mask) - logits.append(logit) - painted_images.append(painted_image) - else: - mask, logit, painted_image = self.cutie.track(images[i]) - masks.append(mask) - logits.append(logit) - painted_images.append(painted_image) - return masks, logits, painted_images - - - - - \ No newline at end of file diff --git a/spaces/shenfangqi/Retrieval-based-Voice-Conversion-WebUI/Retrieval-based-Voice-Conversion-WebUI/uvr5_pack/lib_v5/layers_537238KB.py b/spaces/shenfangqi/Retrieval-based-Voice-Conversion-WebUI/Retrieval-based-Voice-Conversion-WebUI/uvr5_pack/lib_v5/layers_537238KB.py deleted file mode 100644 index 78e539250075d7fed2f349d05e3317dfe2c96804..0000000000000000000000000000000000000000 --- a/spaces/shenfangqi/Retrieval-based-Voice-Conversion-WebUI/Retrieval-based-Voice-Conversion-WebUI/uvr5_pack/lib_v5/layers_537238KB.py +++ /dev/null @@ -1,126 +0,0 @@ -import torch -from torch import nn -import torch.nn.functional as F - -from uvr5_pack.lib_v5 import spec_utils - - -class Conv2DBNActiv(nn.Module): - def __init__(self, nin, nout, ksize=3, stride=1, pad=1, dilation=1, activ=nn.ReLU): - super(Conv2DBNActiv, self).__init__() - self.conv = nn.Sequential( - nn.Conv2d( - nin, - nout, - kernel_size=ksize, - stride=stride, - padding=pad, - dilation=dilation, - bias=False, - ), - nn.BatchNorm2d(nout), - activ(), - ) - - def __call__(self, x): - return self.conv(x) - - -class SeperableConv2DBNActiv(nn.Module): - def __init__(self, nin, nout, ksize=3, stride=1, pad=1, dilation=1, activ=nn.ReLU): - super(SeperableConv2DBNActiv, self).__init__() - self.conv = nn.Sequential( - nn.Conv2d( - nin, - nin, - kernel_size=ksize, - stride=stride, - padding=pad, - dilation=dilation, - groups=nin, - bias=False, - ), - nn.Conv2d(nin, nout, kernel_size=1, bias=False), - nn.BatchNorm2d(nout), - activ(), - ) - - def __call__(self, x): - return self.conv(x) - - -class Encoder(nn.Module): - def __init__(self, nin, nout, ksize=3, stride=1, pad=1, activ=nn.LeakyReLU): - super(Encoder, self).__init__() - self.conv1 = Conv2DBNActiv(nin, nout, ksize, 1, pad, activ=activ) - self.conv2 = Conv2DBNActiv(nout, nout, ksize, stride, pad, activ=activ) - - def __call__(self, x): - skip = self.conv1(x) - h = self.conv2(skip) - - return h, skip - - -class Decoder(nn.Module): - def __init__( - self, nin, nout, ksize=3, stride=1, pad=1, activ=nn.ReLU, dropout=False - ): - super(Decoder, self).__init__() - self.conv = Conv2DBNActiv(nin, nout, ksize, 1, pad, activ=activ) - self.dropout = nn.Dropout2d(0.1) if dropout else None - - def __call__(self, x, skip=None): - x = F.interpolate(x, scale_factor=2, mode="bilinear", align_corners=True) - if skip is not None: - skip = spec_utils.crop_center(skip, x) - x = torch.cat([x, skip], dim=1) - h = self.conv(x) - - if self.dropout is not None: - h = self.dropout(h) - - return h - - -class ASPPModule(nn.Module): - def __init__(self, nin, nout, dilations=(4, 8, 16, 32, 64), activ=nn.ReLU): - super(ASPPModule, self).__init__() - self.conv1 = nn.Sequential( - nn.AdaptiveAvgPool2d((1, None)), - Conv2DBNActiv(nin, nin, 1, 1, 0, activ=activ), - ) - self.conv2 = Conv2DBNActiv(nin, nin, 1, 1, 0, activ=activ) - self.conv3 = SeperableConv2DBNActiv( - nin, nin, 3, 1, dilations[0], dilations[0], activ=activ - ) - self.conv4 = SeperableConv2DBNActiv( - nin, nin, 3, 1, dilations[1], dilations[1], activ=activ - ) - self.conv5 = SeperableConv2DBNActiv( - nin, nin, 3, 1, dilations[2], dilations[2], activ=activ - ) - self.conv6 = SeperableConv2DBNActiv( - nin, nin, 3, 1, dilations[2], dilations[2], activ=activ - ) - self.conv7 = SeperableConv2DBNActiv( - nin, nin, 3, 1, dilations[2], dilations[2], activ=activ - ) - self.bottleneck = nn.Sequential( - Conv2DBNActiv(nin * 7, nout, 1, 1, 0, activ=activ), nn.Dropout2d(0.1) - ) - - def forward(self, x): - _, _, h, w = x.size() - feat1 = F.interpolate( - self.conv1(x), size=(h, w), mode="bilinear", align_corners=True - ) - feat2 = self.conv2(x) - feat3 = self.conv3(x) - feat4 = self.conv4(x) - feat5 = self.conv5(x) - feat6 = self.conv6(x) - feat7 = self.conv7(x) - out = torch.cat((feat1, feat2, feat3, feat4, feat5, feat6, feat7), dim=1) - bottle = self.bottleneck(out) - return bottle diff --git a/spaces/shikunl/prismer/prismer/experts/segmentation/mask2former/utils/__init__.py b/spaces/shikunl/prismer/prismer/experts/segmentation/mask2former/utils/__init__.py deleted file mode 100644 index 9020c2df23e2af280b7bb168b996ae9eaf312eb8..0000000000000000000000000000000000000000 --- a/spaces/shikunl/prismer/prismer/experts/segmentation/mask2former/utils/__init__.py +++ /dev/null @@ -1 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. diff --git a/spaces/sidharthism/fashion-eye/netdissect/runningstats.py b/spaces/sidharthism/fashion-eye/netdissect/runningstats.py deleted file mode 100644 index fe4093e0318edeecf8aebc34771adbde5043e2d4..0000000000000000000000000000000000000000 --- a/spaces/sidharthism/fashion-eye/netdissect/runningstats.py +++ /dev/null @@ -1,773 +0,0 @@ -''' -Running statistics on the GPU using pytorch. - -RunningTopK maintains top-k statistics for a set of channels in parallel. -RunningQuantile maintains (sampled) quantile statistics for a set of channels. -''' - -import torch, math, numpy -from collections import defaultdict - -class RunningTopK: - ''' - A class to keep a running tally of the the top k values (and indexes) - of any number of torch feature components. Will work on the GPU if - the data is on the GPU. - - This version flattens all arrays to avoid crashes. - ''' - def __init__(self, k=100, state=None): - if state is not None: - self.set_state_dict(state) - return - self.k = k - self.count = 0 - # This version flattens all data internally to 2-d tensors, - # to avoid crashes with the current pytorch topk implementation. - # The data is puffed back out to arbitrary tensor shapes on ouput. - self.data_shape = None - self.top_data = None - self.top_index = None - self.next = 0 - self.linear_index = 0 - self.perm = None - - def add(self, data): - ''' - Adds a batch of data to be considered for the running top k. - The zeroth dimension enumerates the observations. All other - dimensions enumerate different features. - ''' - if self.top_data is None: - # Allocation: allocate a buffer of size 5*k, at least 10, for each. - self.data_shape = data.shape[1:] - feature_size = int(numpy.prod(self.data_shape)) - self.top_data = torch.zeros( - feature_size, max(10, self.k * 5), out=data.new()) - self.top_index = self.top_data.clone().long() - self.linear_index = 0 if len(data.shape) == 1 else torch.arange( - feature_size, out=self.top_index.new()).mul_( - self.top_data.shape[-1])[:,None] - size = data.shape[0] - sk = min(size, self.k) - if self.top_data.shape[-1] < self.next + sk: - # Compression: if full, keep topk only. - self.top_data[:,:self.k], self.top_index[:,:self.k] = ( - self.result(sorted=False, flat=True)) - self.next = self.k - free = self.top_data.shape[-1] - self.next - # Pick: copy the top sk of the next batch into the buffer. - # Currently strided topk is slow. So we clone after transpose. - # TODO: remove the clone() if it becomes faster. - cdata = data.contiguous().view(size, -1).t().clone() - td, ti = cdata.topk(sk, sorted=False) - self.top_data[:,self.next:self.next+sk] = td - self.top_index[:,self.next:self.next+sk] = (ti + self.count) - self.next += sk - self.count += size - - def result(self, sorted=True, flat=False): - ''' - Returns top k data items and indexes in each dimension, - with channels in the first dimension and k in the last dimension. - ''' - k = min(self.k, self.next) - # bti are top indexes relative to buffer array. - td, bti = self.top_data[:,:self.next].topk(k, sorted=sorted) - # we want to report top indexes globally, which is ti. - ti = self.top_index.view(-1)[ - (bti + self.linear_index).view(-1) - ].view(*bti.shape) - if flat: - return td, ti - else: - return (td.view(*(self.data_shape + (-1,))), - ti.view(*(self.data_shape + (-1,)))) - - def to_(self, device): - self.top_data = self.top_data.to(device) - self.top_index = self.top_index.to(device) - if isinstance(self.linear_index, torch.Tensor): - self.linear_index = self.linear_index.to(device) - - def state_dict(self): - return dict( - constructor=self.__module__ + '.' + - self.__class__.__name__ + '()', - k=self.k, - count=self.count, - data_shape=tuple(self.data_shape), - top_data=self.top_data.cpu().numpy(), - top_index=self.top_index.cpu().numpy(), - next=self.next, - linear_index=(self.linear_index.cpu().numpy() - if isinstance(self.linear_index, torch.Tensor) - else self.linear_index), - perm=self.perm) - - def set_state_dict(self, dic): - self.k = dic['k'].item() - self.count = dic['count'].item() - self.data_shape = tuple(dic['data_shape']) - self.top_data = torch.from_numpy(dic['top_data']) - self.top_index = torch.from_numpy(dic['top_index']) - self.next = dic['next'].item() - self.linear_index = (torch.from_numpy(dic['linear_index']) - if len(dic['linear_index'].shape) > 0 - else dic['linear_index'].item()) - -class RunningQuantile: - """ - Streaming randomized quantile computation for torch. - - Add any amount of data repeatedly via add(data). At any time, - quantile estimates (or old-style percentiles) can be read out using - quantiles(q) or percentiles(p). - - Accuracy scales according to resolution: the default is to - set resolution to be accurate to better than 0.1%, - while limiting storage to about 50,000 samples. - - Good for computing quantiles of huge data without using much memory. - Works well on arbitrary data with probability near 1. - - Based on the optimal KLL quantile algorithm by Karnin, Lang, and Liberty - from FOCS 2016. http://ieee-focs.org/FOCS-2016-Papers/3933a071.pdf - """ - - def __init__(self, resolution=6 * 1024, buffersize=None, seed=None, - state=None): - if state is not None: - self.set_state_dict(state) - return - self.depth = None - self.dtype = None - self.device = None - self.resolution = resolution - # Default buffersize: 128 samples (and smaller than resolution). - if buffersize is None: - buffersize = min(128, (resolution + 7) // 8) - self.buffersize = buffersize - self.samplerate = 1.0 - self.data = None - self.firstfree = [0] - self.randbits = torch.ByteTensor(resolution) - self.currentbit = len(self.randbits) - 1 - self.extremes = None - self.size = 0 - - def _lazy_init(self, incoming): - self.depth = incoming.shape[1] - self.dtype = incoming.dtype - self.device = incoming.device - self.data = [torch.zeros(self.depth, self.resolution, - dtype=self.dtype, device=self.device)] - self.extremes = torch.zeros(self.depth, 2, - dtype=self.dtype, device=self.device) - self.extremes[:,0] = float('inf') - self.extremes[:,-1] = -float('inf') - - def to_(self, device): - """Switches internal storage to specified device.""" - if device != self.device: - old_data = self.data - old_extremes = self.extremes - self.data = [d.to(device) for d in self.data] - self.extremes = self.extremes.to(device) - self.device = self.extremes.device - del old_data - del old_extremes - - def add(self, incoming): - if self.depth is None: - self._lazy_init(incoming) - assert len(incoming.shape) == 2 - assert incoming.shape[1] == self.depth, (incoming.shape[1], self.depth) - self.size += incoming.shape[0] - # Convert to a flat torch array. - if self.samplerate >= 1.0: - self._add_every(incoming) - return - # If we are sampling, then subsample a large chunk at a time. - self._scan_extremes(incoming) - chunksize = int(math.ceil(self.buffersize / self.samplerate)) - for index in range(0, len(incoming), chunksize): - batch = incoming[index:index+chunksize] - sample = sample_portion(batch, self.samplerate) - if len(sample): - self._add_every(sample) - - def _add_every(self, incoming): - supplied = len(incoming) - index = 0 - while index < supplied: - ff = self.firstfree[0] - available = self.data[0].shape[1] - ff - if available == 0: - if not self._shift(): - # If we shifted by subsampling, then subsample. - incoming = incoming[index:] - if self.samplerate >= 0.5: - # First time sampling - the data source is very large. - self._scan_extremes(incoming) - incoming = sample_portion(incoming, self.samplerate) - index = 0 - supplied = len(incoming) - ff = self.firstfree[0] - available = self.data[0].shape[1] - ff - copycount = min(available, supplied - index) - self.data[0][:,ff:ff + copycount] = torch.t( - incoming[index:index + copycount,:]) - self.firstfree[0] += copycount - index += copycount - - def _shift(self): - index = 0 - # If remaining space at the current layer is less than half prev - # buffer size (rounding up), then we need to shift it up to ensure - # enough space for future shifting. - while self.data[index].shape[1] - self.firstfree[index] < ( - -(-self.data[index-1].shape[1] // 2) if index else 1): - if index + 1 >= len(self.data): - return self._expand() - data = self.data[index][:,0:self.firstfree[index]] - data = data.sort()[0] - if index == 0 and self.samplerate >= 1.0: - self._update_extremes(data[:,0], data[:,-1]) - offset = self._randbit() - position = self.firstfree[index + 1] - subset = data[:,offset::2] - self.data[index + 1][:,position:position + subset.shape[1]] = subset - self.firstfree[index] = 0 - self.firstfree[index + 1] += subset.shape[1] - index += 1 - return True - - def _scan_extremes(self, incoming): - # When sampling, we need to scan every item still to get extremes - self._update_extremes( - torch.min(incoming, dim=0)[0], - torch.max(incoming, dim=0)[0]) - - def _update_extremes(self, minr, maxr): - self.extremes[:,0] = torch.min( - torch.stack([self.extremes[:,0], minr]), dim=0)[0] - self.extremes[:,-1] = torch.max( - torch.stack([self.extremes[:,-1], maxr]), dim=0)[0] - - def _randbit(self): - self.currentbit += 1 - if self.currentbit >= len(self.randbits): - self.randbits.random_(to=2) - self.currentbit = 0 - return self.randbits[self.currentbit] - - def state_dict(self): - return dict( - constructor=self.__module__ + '.' + - self.__class__.__name__ + '()', - resolution=self.resolution, - depth=self.depth, - buffersize=self.buffersize, - samplerate=self.samplerate, - data=[d.cpu().numpy()[:,:f].T - for d, f in zip(self.data, self.firstfree)], - sizes=[d.shape[1] for d in self.data], - extremes=self.extremes.cpu().numpy(), - size=self.size) - - def set_state_dict(self, dic): - self.resolution = int(dic['resolution']) - self.randbits = torch.ByteTensor(self.resolution) - self.currentbit = len(self.randbits) - 1 - self.depth = int(dic['depth']) - self.buffersize = int(dic['buffersize']) - self.samplerate = float(dic['samplerate']) - firstfree = [] - buffers = [] - for d, s in zip(dic['data'], dic['sizes']): - firstfree.append(d.shape[0]) - buf = numpy.zeros((d.shape[1], s), dtype=d.dtype) - buf[:,:d.shape[0]] = d.T - buffers.append(torch.from_numpy(buf)) - self.firstfree = firstfree - self.data = buffers - self.extremes = torch.from_numpy((dic['extremes'])) - self.size = int(dic['size']) - self.dtype = self.extremes.dtype - self.device = self.extremes.device - - def minmax(self): - if self.firstfree[0]: - self._scan_extremes(self.data[0][:,:self.firstfree[0]].t()) - return self.extremes.clone() - - def median(self): - return self.quantiles([0.5])[:,0] - - def mean(self): - return self.integrate(lambda x: x) / self.size - - def variance(self): - mean = self.mean()[:,None] - return self.integrate(lambda x: (x - mean).pow(2)) / (self.size - 1) - - def stdev(self): - return self.variance().sqrt() - - def _expand(self): - cap = self._next_capacity() - if cap > 0: - # First, make a new layer of the proper capacity. - self.data.insert(0, torch.zeros(self.depth, cap, - dtype=self.dtype, device=self.device)) - self.firstfree.insert(0, 0) - else: - # Unless we're so big we are just subsampling. - assert self.firstfree[0] == 0 - self.samplerate *= 0.5 - for index in range(1, len(self.data)): - # Scan for existing data that needs to be moved down a level. - amount = self.firstfree[index] - if amount == 0: - continue - position = self.firstfree[index-1] - # Move data down if it would leave enough empty space there - # This is the key invariant: enough empty space to fit half - # of the previous level's buffer size (rounding up) - if self.data[index-1].shape[1] - (amount + position) >= ( - -(-self.data[index-2].shape[1] // 2) if (index-1) else 1): - self.data[index-1][:,position:position + amount] = ( - self.data[index][:,:amount]) - self.firstfree[index-1] += amount - self.firstfree[index] = 0 - else: - # Scrunch the data if it would not. - data = self.data[index][:,:amount] - data = data.sort()[0] - if index == 1: - self._update_extremes(data[:,0], data[:,-1]) - offset = self._randbit() - scrunched = data[:,offset::2] - self.data[index][:,:scrunched.shape[1]] = scrunched - self.firstfree[index] = scrunched.shape[1] - return cap > 0 - - def _next_capacity(self): - cap = int(math.ceil(self.resolution * (0.67 ** len(self.data)))) - if cap < 2: - return 0 - # Round up to the nearest multiple of 8 for better GPU alignment. - cap = -8 * (-cap // 8) - return max(self.buffersize, cap) - - def _weighted_summary(self, sort=True): - if self.firstfree[0]: - self._scan_extremes(self.data[0][:,:self.firstfree[0]].t()) - size = sum(self.firstfree) + 2 - weights = torch.FloatTensor(size) # Floating point - summary = torch.zeros(self.depth, size, - dtype=self.dtype, device=self.device) - weights[0:2] = 0 - summary[:,0:2] = self.extremes - index = 2 - for level, ff in enumerate(self.firstfree): - if ff == 0: - continue - summary[:,index:index + ff] = self.data[level][:,:ff] - weights[index:index + ff] = 2.0 ** level - index += ff - assert index == summary.shape[1] - if sort: - summary, order = torch.sort(summary, dim=-1) - weights = weights[order.view(-1).cpu()].view(order.shape) - return (summary, weights) - - def quantiles(self, quantiles, old_style=False): - if self.size == 0: - return torch.full((self.depth, len(quantiles)), torch.nan) - summary, weights = self._weighted_summary() - cumweights = torch.cumsum(weights, dim=-1) - weights / 2 - if old_style: - # To be convenient with torch.percentile - cumweights -= cumweights[:,0:1].clone() - cumweights /= cumweights[:,-1:].clone() - else: - cumweights /= torch.sum(weights, dim=-1, keepdim=True) - result = torch.zeros(self.depth, len(quantiles), - dtype=self.dtype, device=self.device) - # numpy is needed for interpolation - if not hasattr(quantiles, 'cpu'): - quantiles = torch.Tensor(quantiles) - nq = quantiles.cpu().numpy() - ncw = cumweights.cpu().numpy() - nsm = summary.cpu().numpy() - for d in range(self.depth): - result[d] = torch.tensor(numpy.interp(nq, ncw[d], nsm[d]), - dtype=self.dtype, device=self.device) - return result - - def integrate(self, fun): - result = None - for level, ff in enumerate(self.firstfree): - if ff == 0: - continue - term = torch.sum( - fun(self.data[level][:,:ff]) * (2.0 ** level), - dim=-1) - if result is None: - result = term - else: - result += term - if result is not None: - result /= self.samplerate - return result - - def percentiles(self, percentiles): - return self.quantiles(percentiles, old_style=True) - - def readout(self, count=1001, old_style=True): - return self.quantiles( - torch.linspace(0.0, 1.0, count), old_style=old_style) - - def normalize(self, data): - ''' - Given input data as taken from the training distirbution, - normalizes every channel to reflect quantile values, - uniformly distributed, within [0, 1]. - ''' - assert self.size > 0 - assert data.shape[0] == self.depth - summary, weights = self._weighted_summary() - cumweights = torch.cumsum(weights, dim=-1) - weights / 2 - cumweights /= torch.sum(weights, dim=-1, keepdim=True) - result = torch.zeros_like(data).float() - # numpy is needed for interpolation - ndata = data.cpu().numpy().reshape((data.shape[0], -1)) - ncw = cumweights.cpu().numpy() - nsm = summary.cpu().numpy() - for d in range(self.depth): - normed = torch.tensor(numpy.interp(ndata[d], nsm[d], ncw[d]), - dtype=torch.float, device=data.device).clamp_(0.0, 1.0) - if len(data.shape) > 1: - normed = normed.view(*(data.shape[1:])) - result[d] = normed - return result - - -class RunningConditionalQuantile: - ''' - Equivalent to a map from conditions (any python hashable type) - to RunningQuantiles. The reason for the type is to allow limited - GPU memory to be exploited while counting quantile stats on many - different conditions, a few of which are common and which benefit - from GPU, but most of which are rare and would not all fit into - GPU RAM. - - To move a set of conditions to a device, use rcq.to_(device, conds). - Then in the future, move the tallied data to the device before - calling rcq.add, that is, rcq.add(cond, data.to(device)). - - To allow the caller to decide which conditions to allow to use GPU, - rcq.most_common_conditions(n) returns a list of the n most commonly - added conditions so far. - ''' - def __init__(self, resolution=6 * 1024, buffersize=None, seed=None, - state=None): - self.first_rq = None - self.call_stats = defaultdict(int) - self.running_quantiles = {} - if state is not None: - self.set_state_dict(state) - return - self.rq_args = dict(resolution=resolution, buffersize=buffersize, - seed=seed) - - def add(self, condition, incoming): - if condition not in self.running_quantiles: - self.running_quantiles[condition] = RunningQuantile(**self.rq_args) - if self.first_rq is None: - self.first_rq = self.running_quantiles[condition] - self.call_stats[condition] += 1 - rq = self.running_quantiles[condition] - # For performance reasons, the caller can move some conditions to - # the CPU if they are not among the most common conditions. - if rq.device is not None and (rq.device != incoming.device): - rq.to_(incoming.device) - self.running_quantiles[condition].add(incoming) - - def most_common_conditions(self, n): - return sorted(self.call_stats.keys(), - key=lambda c: -self.call_stats[c])[:n] - - def collected_add(self, conditions, incoming): - for c in conditions: - self.add(c, incoming) - - def conditional(self, c): - return self.running_quantiles[c] - - def collected_quantiles(self, conditions, quantiles, old_style=False): - result = torch.zeros( - size=(len(conditions), self.first_rq.depth, len(quantiles)), - dtype=self.first_rq.dtype, - device=self.first_rq.device) - for i, c in enumerate(conditions): - if c in self.running_quantiles: - result[i] = self.running_quantiles[c].quantiles( - quantiles, old_style) - return result - - def collected_normalize(self, conditions, values): - result = torch.zeros( - size=(len(conditions), values.shape[0], values.shape[1]), - dtype=torch.float, - device=self.first_rq.device) - for i, c in enumerate(conditions): - if c in self.running_quantiles: - result[i] = self.running_quantiles[c].normalize(values) - return result - - def to_(self, device, conditions=None): - if conditions is None: - conditions = self.running_quantiles.keys() - for cond in conditions: - if cond in self.running_quantiles: - self.running_quantiles[cond].to_(device) - - def state_dict(self): - conditions = sorted(self.running_quantiles.keys()) - result = dict( - constructor=self.__module__ + '.' + - self.__class__.__name__ + '()', - rq_args=self.rq_args, - conditions=conditions) - for i, c in enumerate(conditions): - result.update({ - '%d.%s' % (i, k): v - for k, v in self.running_quantiles[c].state_dict().items()}) - return result - - def set_state_dict(self, dic): - self.rq_args = dic['rq_args'].item() - conditions = list(dic['conditions']) - subdicts = defaultdict(dict) - for k, v in dic.items(): - if '.' in k: - p, s = k.split('.', 1) - subdicts[p][s] = v - self.running_quantiles = { - c: RunningQuantile(state=subdicts[str(i)]) - for i, c in enumerate(conditions)} - if conditions: - self.first_rq = self.running_quantiles[conditions[0]] - - # example usage: - # levels = rqc.conditional(()).quantiles(1 - fracs) - # denoms = 1 - rqc.collected_normalize(cats, levels) - # isects = 1 - rqc.collected_normalize(labels, levels) - # unions = fracs + denoms[cats] - isects - # iou = isects / unions - - - - -class RunningCrossCovariance: - ''' - Running computation. Use this when an off-diagonal block of the - covariance matrix is needed (e.g., when the whole covariance matrix - does not fit in the GPU). - - Chan-style numerically stable update of mean and full covariance matrix. - Chan, Golub. LeVeque. 1983. http://www.jstor.org/stable/2683386 - ''' - def __init__(self, state=None): - if state is not None: - self.set_state_dict(state) - return - self.count = 0 - self._mean = None - self.cmom2 = None - self.v_cmom2 = None - - def add(self, a, b): - if len(a.shape) == 1: - a = a[None, :] - b = b[None, :] - assert(a.shape[0] == b.shape[0]) - if len(a.shape) > 2: - a, b = [d.view(d.shape[0], d.shape[1], -1).permute(0, 2, 1 - ).contiguous().view(-1, d.shape[1]) for d in [a, b]] - batch_count = a.shape[0] - batch_mean = [d.sum(0) / batch_count for d in [a, b]] - centered = [d - bm for d, bm in zip([a, b], batch_mean)] - # If more than 10 billion operations, divide into batches. - sub_batch = -(-(10 << 30) // (a.shape[1] * b.shape[1])) - # Initial batch. - if self._mean is None: - self.count = batch_count - self._mean = batch_mean - self.v_cmom2 = [c.pow(2).sum(0) for c in centered] - self.cmom2 = a.new(a.shape[1], b.shape[1]).zero_() - progress_addbmm(self.cmom2, centered[0][:,:,None], - centered[1][:,None,:], sub_batch) - return - # Update a batch using Chan-style update for numerical stability. - oldcount = self.count - self.count += batch_count - new_frac = float(batch_count) / self.count - # Update the mean according to the batch deviation from the old mean. - delta = [bm.sub_(m).mul_(new_frac) - for bm, m in zip(batch_mean, self._mean)] - for m, d in zip(self._mean, delta): - m.add_(d) - # Update the cross-covariance using the batch deviation - progress_addbmm(self.cmom2, centered[0][:,:,None], - centered[1][:,None,:], sub_batch) - self.cmom2.addmm_(alpha=new_frac * oldcount, - mat1=delta[0][:,None], mat2=delta[1][None,:]) - # Update the variance using the batch deviation - for c, vc2, d in zip(centered, self.v_cmom2, delta): - vc2.add_(c.pow(2).sum(0)) - vc2.add_(d.pow_(2).mul_(new_frac * oldcount)) - - def mean(self): - return self._mean - - def variance(self): - return [vc2 / (self.count - 1) for vc2 in self.v_cmom2] - - def stdev(self): - return [v.sqrt() for v in self.variance()] - - def covariance(self): - return self.cmom2 / (self.count - 1) - - def correlation(self): - covariance = self.covariance() - rstdev = [s.reciprocal() for s in self.stdev()] - cor = rstdev[0][:,None] * covariance * rstdev[1][None,:] - # Remove NaNs - cor[torch.isnan(cor)] = 0 - return cor - - def to_(self, device): - self._mean = [m.to(device) for m in self._mean] - self.v_cmom2 = [vcs.to(device) for vcs in self.v_cmom2] - self.cmom2 = self.cmom2.to(device) - - def state_dict(self): - return dict( - constructor=self.__module__ + '.' + - self.__class__.__name__ + '()', - count=self.count, - mean_a=self._mean[0].cpu().numpy(), - mean_b=self._mean[1].cpu().numpy(), - cmom2_a=self.v_cmom2[0].cpu().numpy(), - cmom2_b=self.v_cmom2[1].cpu().numpy(), - cmom2=self.cmom2.cpu().numpy()) - - def set_state_dict(self, dic): - self.count = dic['count'].item() - self._mean = [torch.from_numpy(dic[k]) for k in ['mean_a', 'mean_b']] - self.v_cmom2 = [torch.from_numpy(dic[k]) - for k in ['cmom2_a', 'cmom2_b']] - self.cmom2 = torch.from_numpy(dic['cmom2']) - -def progress_addbmm(accum, x, y, batch_size): - ''' - Break up very large adbmm operations into batches so progress can be seen. - ''' - from .progress import default_progress - if x.shape[0] <= batch_size: - return accum.addbmm_(x, y) - progress = default_progress(None) - for i in progress(range(0, x.shape[0], batch_size), desc='bmm'): - accum.addbmm_(x[i:i+batch_size], y[i:i+batch_size]) - return accum - - -def sample_portion(vec, p=0.5): - bits = torch.bernoulli(torch.zeros(vec.shape[0], dtype=torch.uint8, - device=vec.device), p) - return vec[bits] - -if __name__ == '__main__': - import warnings - warnings.filterwarnings("error") - import time - import argparse - parser = argparse.ArgumentParser( - description='Test things out') - parser.add_argument('--mode', default='cpu', help='cpu or cuda') - parser.add_argument('--test_size', type=int, default=1000000) - args = parser.parse_args() - - # An adverarial case: we keep finding more numbers in the middle - # as the stream goes on. - amount = args.test_size - quantiles = 1000 - data = numpy.arange(float(amount)) - data[1::2] = data[-1::-2] + (len(data) - 1) - data /= 2 - depth = 50 - test_cuda = torch.cuda.is_available() - alldata = data[:,None] + (numpy.arange(depth) * amount)[None, :] - actual_sum = torch.FloatTensor(numpy.sum(alldata * alldata, axis=0)) - amt = amount // depth - for r in range(depth): - numpy.random.shuffle(alldata[r*amt:r*amt+amt,r]) - if args.mode == 'cuda': - alldata = torch.cuda.FloatTensor(alldata) - dtype = torch.float - device = torch.device('cuda') - else: - alldata = torch.FloatTensor(alldata) - dtype = torch.float - device = None - starttime = time.time() - qc = RunningQuantile(resolution=6 * 1024) - qc.add(alldata) - # Test state dict - saved = qc.state_dict() - # numpy.savez('foo.npz', **saved) - # saved = numpy.load('foo.npz') - qc = RunningQuantile(state=saved) - assert not qc.device.type == 'cuda' - qc.add(alldata) - actual_sum *= 2 - ro = qc.readout(1001).cpu() - endtime = time.time() - gt = torch.linspace(0, amount, quantiles+1)[None,:] + ( - torch.arange(qc.depth, dtype=torch.float) * amount)[:,None] - maxreldev = torch.max(torch.abs(ro - gt) / amount) * quantiles - print("Maximum relative deviation among %d perentiles: %f" % ( - quantiles, maxreldev)) - minerr = torch.max(torch.abs(qc.minmax().cpu()[:,0] - - torch.arange(qc.depth, dtype=torch.float) * amount)) - maxerr = torch.max(torch.abs((qc.minmax().cpu()[:, -1] + 1) - - (torch.arange(qc.depth, dtype=torch.float) + 1) * amount)) - print("Minmax error %f, %f" % (minerr, maxerr)) - interr = torch.max(torch.abs(qc.integrate(lambda x: x * x).cpu() - - actual_sum) / actual_sum) - print("Integral error: %f" % interr) - medianerr = torch.max(torch.abs(qc.median() - - alldata.median(0)[0]) / alldata.median(0)[0]).cpu() - print("Median error: %f" % interr) - meanerr = torch.max( - torch.abs(qc.mean() - alldata.mean(0)) / alldata.mean(0)).cpu() - print("Mean error: %f" % meanerr) - varerr = torch.max( - torch.abs(qc.variance() - alldata.var(0)) / alldata.var(0)).cpu() - print("Variance error: %f" % varerr) - counterr = ((qc.integrate(lambda x: torch.ones(x.shape[-1]).cpu()) - - qc.size) / (0.0 + qc.size)).item() - print("Count error: %f" % counterr) - print("Time %f" % (endtime - starttime)) - # Algorithm is randomized, so some of these will fail with low probability. - assert maxreldev < 1.0 - assert minerr == 0.0 - assert maxerr == 0.0 - assert interr < 0.01 - assert abs(counterr) < 0.001 - print("OK") diff --git "a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/The.x.files.s06.season.6.720p.bluray.x265.hevc.tangoalpha /TOP\\\\.md" "b/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/The.x.files.s06.season.6.720p.bluray.x265.hevc.tangoalpha /TOP\\\\.md" deleted file mode 100644 index 994b30b1cd5c0c209d9f59357bd5a7e263a1e011..0000000000000000000000000000000000000000 --- "a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/The.x.files.s06.season.6.720p.bluray.x265.hevc.tangoalpha /TOP\\\\.md" +++ /dev/null @@ -1,70 +0,0 @@ -## The.x.files.s06.season.6.720p.bluray.x265.hevc.tangoalpha - - - - - - ![The.x.files.s06.season.6.720p.bluray.x265.hevc.tangoalpha \/\/TOP\\\\](https://3.bp.blogspot.com/-8vxCD_decUQ/V3o628B3z4I/AAAAAAAAACQ/scQjZndd_M03U8uz9hBBmhJ0UeNyUkfsQCK4B/s1600/x_files_04.jpg) - - - - - -**Download File ->>> [https://kneedacexbrew.blogspot.com/?d=2txiFd](https://kneedacexbrew.blogspot.com/?d=2txiFd)** - - - - - - - - - - - - - -# The X-Files Season 6: A Sci-Fi Thriller with a Twist - - - -The X-Files is a popular TV series that follows the adventures of FBI agents Fox Mulder and Dana Scully as they investigate paranormal phenomena and conspiracy theories. The show combines elements of science fiction, horror, drama, and comedy, and has a loyal fan base around the world. - - - -The sixth season of The X-Files aired from 1998 to 1999 and consisted of 22 episodes. It was the first season to be filmed in Los Angeles, California, after the production moved from Vancouver, Canada. The season also marked a shift in tone and style, as the show became more humorous and self-referential, while still maintaining its suspense and mystery. - - - -The season begins with the aftermath of the movie The X-Files: Fight the Future, which revealed the existence of a global alien colonization plot. Mulder and Scully are reassigned to other cases by the FBI, but they continue to pursue the truth behind the X-Files. Along the way, they encounter various monsters, mutants, cults, psychics, and even vampires. They also face personal challenges, such as Mulder's illness, Scully's cancer remission, and their growing feelings for each other. - - - -The sixth season of The X-Files features some of the most memorable and acclaimed episodes of the series, such as "Triangle", "Dreamland", "How the Ghosts Stole Christmas", "The Unnatural", and "The Rain King". It also introduces new characters and villains, such as Agent Jeffrey Spender, Agent Diana Fowley, and the faceless alien rebels. - - - -If you are a fan of The X-Files or sci-fi in general, you will enjoy watching the sixth season in high quality with the 720p BluRay x265 HEVC TangoAlpha release. This release offers a superior video and audio quality compared to other formats, and has a smaller file size for easy downloading and streaming. You can find this release on various torrent sites or online platforms. - - - -The X-Files Season 6 is a thrilling and entertaining season that will keep you hooked from start to finish. Don't miss this chance to watch one of the best TV shows of all time in HD quality with the 720p BluRay x265 HEVC TangoAlpha release. - - - -One of the highlights of the sixth season of The X-Files is the two-part episode "Dreamland", which features a hilarious body swap between Mulder and a mysterious government agent named Morris Fletcher. The episode explores the consequences of living in someone else's life, and how it affects their relationships, careers, and personalities. The episode also showcases the comedic talents of David Duchovny and Michael McKean, who play each other's roles with great skill and humor. - - - -Another standout episode of the season is "The Unnatural", which was written and directed by David Duchovny himself. The episode tells the story of Arthur Dales, a former FBI agent who investigated a case involving an alien who disguised himself as a baseball player in the 1940s. The episode is a tribute to the sport of baseball and its history, as well as a touching exploration of friendship, racism, and love. The episode also features a cameo appearance by Jesse L. Martin as a young Arthur Dales. - - - -The season finale of The X-Files Season 6 is "Biogenesis", which sets up the events for the seventh season. The episode involves the discovery of an ancient artifact that contains alien writing and DNA. The artifact triggers a series of visions and hallucinations in Mulder, who becomes obsessed with finding its origin and meaning. Meanwhile, Scully tries to help Mulder and uncover the truth behind the artifact, while facing opposition from the Syndicate and their allies. - - 1b8d091108 - - - - - diff --git a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/CG ADEO 2017 Exam Paper PDF Download Chhattisgarh Sahayak Vikas Vistar Adhikari Model Papers.md b/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/CG ADEO 2017 Exam Paper PDF Download Chhattisgarh Sahayak Vikas Vistar Adhikari Model Papers.md deleted file mode 100644 index e59299a37ab3effaa6456411e748589e6accca2f..0000000000000000000000000000000000000000 --- a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/CG ADEO 2017 Exam Paper PDF Download Chhattisgarh Sahayak Vikas Vistar Adhikari Model Papers.md +++ /dev/null @@ -1,174 +0,0 @@ -
      -

      ADEO Question Paper 2017 PDF Download: How to Prepare for the CG Vyapam ADEO Exam 2023

      -

      If you are aspiring to become an Assistant Development Extension Officer (ADEO) in Chhattisgarh, you must be aware of the CG Vyapam ADEO exam conducted by the Chhattisgarh Professional Examination Board (CGPEB). This exam is a golden opportunity for candidates who want to work in the rural development sector and contribute to the welfare of the state. In this article, we will tell you everything you need to know about the CG Vyapam ADEO exam 2023, including how to download the ADEO question paper 2017 PDF file and how to prepare for it.

      -

      Introduction

      -

      An Assistant Development Extension Officer (ADEO) is a state government employee who works under the Department of Panchayat and Rural Development in Chhattisgarh. The main duties and responsibilities of an ADEO are:

      -

      adeo question paper 2017 pdf download


      DOWNLOAD 🗸🗸🗸 https://ssurll.com/2uNV00



      -
        -
      • To implement various schemes and programs related to rural development, such as MGNREGA, PMAY-G, NRLM, etc.
      • -
      • To coordinate with different stakeholders, such as panchayats, NGOs, banks, etc., for effective delivery of services.
      • -
      • To monitor and evaluate the progress and impact of rural development activities.
      • -
      • To provide guidance and support to rural communities and beneficiaries.
      • -
      • To report and document the achievements and challenges of rural development initiatives.
      • -
      -

      The CG Vyapam ADEO exam is a competitive examination conducted by the Chhattisgarh Professional Examination Board (CGPEB) every year to recruit eligible candidates for the post of ADEO. The exam is conducted in two stages: written test and interview. The written test consists of 150 objective-type questions carrying two marks each. The total duration of the written test is three hours. The questions are based on four subjects: computer knowledge, Hindi/English, mathematics, and general knowledge. The interview is conducted for those candidates who qualify in the written test. The final merit list is prepared based on the marks obtained in both stages.

      -

      The eligibility criteria for applying for the CG Vyapam ADEO exam 2023 are as follows:

      -
        -
      • The candidate must be a citizen of India.
      • -
      • The candidate must have passed 10+2 or equivalent examination from a recognized board.
      • -
      • The candidate must have a bachelor's degree or equivalent qualification from a recognized university.
      • -
      • The candidate must have basic knowledge of computer applications.
      • -
      • The candidate must be between 18 to 35 years of age as on January 1, 2023. Age relaxation is applicable as per government rules.
      • -
      -

      How to Download ADEO Question Paper 2017 PDF

      -

      One of the best ways to prepare for any competitive exam is to solve previous year question papers as they help you to understand the exam pattern, syllabus, difficulty level, and types of questions asked. They also help you to improve your speed, accuracy, and time management skills. Moreover, they help you to identify your strengths and weaknesses and work on them accordingly.

      -

      To download the ADEO question paper 2017 PDF file, you need to follow these simple steps:

      -
        -
      1. Visit the official website of CG Vyapam at http://cgvyapam.choice.gov.in/.
      2. -
      3. On the homepage, click on the tab "Previous Year Question Papers" under the section "Important Information".
      4. -
      5. A new page will open with a list of various exams conducted by CG Vyapam. Scroll down and find the link for "ADEO 2017".
      6. -
      7. Click on the link and a PDF file will open in a new tab. You can view, download, or print the ADEO question paper 2017 from there.
      8. -
      -

      The ADEO question paper 2017 PDF file has many benefits for your preparation. Some of them are:

      -
        -
      • It gives you an idea of the actual exam scenario and helps you to overcome exam fear and anxiety.
      • -
      • It helps you to revise the entire syllabus in a systematic and comprehensive manner.
      • -
      • It helps you to assess your performance and check your answers with the help of the answer key provided at the end of the PDF file.
      • -
      • It helps you to learn from your mistakes and avoid them in the future.
      • -
      • It helps you to boost your confidence and motivation levels.
      • -
      -

      To use the ADEO question paper 2017 PDF file for practice and revision, you need to follow these tips:

      -

      adeo question paper 2017 pdf download with answer key
      -adeo question paper 2017 pdf download in hindi
      -adeo question paper 2017 pdf download cg vyapam
      -adeo question paper 2017 pdf download solved
      -adeo question paper 2017 pdf download free
      -adeo question paper 2017 pdf download previous year
      -adeo question paper 2017 pdf download model
      -adeo question paper 2017 pdf download sample
      -adeo question paper 2017 pdf download old
      -adeo question paper 2017 pdf download online
      -adeo question paper 2017 pdf download chhattisgarh
      -adeo question paper 2017 pdf download cgpeb
      -adeo question paper 2017 pdf download syllabus
      -adeo question paper 2017 pdf download exam pattern
      -adeo question paper 2017 pdf download preparation
      -adeo question paper 2017 pdf download practice
      -adeo question paper 2017 pdf download mock test
      -adeo question paper 2017 pdf download quiz
      -adeo question paper 2017 pdf download objective
      -adeo question paper 2017 pdf download multiple choice
      -adeo question paper 2017 pdf download general knowledge
      -adeo question paper 2017 pdf download hindi language
      -adeo question paper 2017 pdf download maths
      -adeo question paper 2017 pdf download reasoning
      -adeo question paper 2017 pdf download english
      -adeo question paper 2017 pdf download computer
      -adeo question paper 2017 pdf download current affairs
      -adeo question paper 2017 pdf download gk notes
      -adeo question paper 2017 pdf download youtube video
      -adeo question paper 2017 pdf download indian booklet
      -adeo question paper 2017 pdf download allgk.in website
      -adeo question paper 2017 pdf download high quality
      -adeo question paper 2017 pdf download latest update
      -adeo question paper 2017 pdf download notification
      -adeo question paper 2017 pdf download vacancy details
      -adeo question paper 2017 pdf download eligibility criteria
      -adeo question paper 2017 pdf download application form
      -adeo question paper 2017 pdf download admit card
      -adeo question paper 2017 pdf download result date
      -adeo question paper 2017 pdf download cut off marks
      -adeo question paper 2017 pdf download merit list
      -adeo question paper 2017 pdf download selection process
      -adeo question paper 2017 pdf download interview questions
      -adeo question paper 2017 pdf download tips and tricks
      -adeo question paper 2017 pdf download study material

      -
        -
      • Try to solve the ADEO question paper 2017 in a simulated exam environment. Set a timer for three hours and attempt all the questions without any breaks or distractions.
      • -
      • After solving the ADEO question paper 2017, check your answers with the help of the answer key and calculate your score. Analyze your performance and identify your strong and weak areas.
      • -
      • Focus on improving your weak areas by revising the concepts and practicing more questions on them. Also, review your strong areas and keep them updated.
      • -
      • Try to solve the ADEO question paper 2017 multiple times before the exam. This will help you to improve your speed, accuracy, and time management skills.
      • -
      • Compare your scores with each attempt and track your progress. Aim to score higher than the previous attempt and reach the desired cut-off marks.
      • -
      -

      How to Prepare for the CG Vyapam ADEO Exam 2023

      -

      Besides solving the ADEO question paper 2017 PDF file, you also need to follow a proper study plan and strategy to prepare for the CG Vyapam ADEO exam 2023. Here are some of the best books and study materials that you can refer to for your preparation:

      - - - - - - - - - - - - - - - - - - - - - - - - - -
      SubjectBook/Study Material
      Computer KnowledgeObjective Computer Awareness by Arihant Publications
      Hindi/EnglishLucent's General Hindi/English by Lucent Publications
      MathematicsQuantitative Aptitude by R.S. Aggarwal
      General KnowledgeLucent's General Knowledge by Lucent Publications
      Rural DevelopmentRural Development in India by GPH Panel of Experts
      -

      In addition to these books and study materials, you can also use online platforms, such as YouTube, Unacademy, Testbook, etc., to access video lectures, mock tests, quizzes, current affairs, etc., related to the CG Vyapam ADEO exam 2023.

      -

      To crack the CG Vyapam ADEO exam 2023, you also need to follow some tips and tricks that will help you to enhance your preparation and performance. Here are some of them:

      -
        -
      • Make a realistic and flexible study schedule that covers all the topics of the syllabus. Stick to your study schedule and follow it diligently.
      • -
      • Focus on clearing your concepts and fundamentals before moving on to advanced topics. Use simple and easy-to-understand language and examples to learn new concepts.
      • -
      • Practice as many questions as possible from different sources, such as previous year papers, mock tests, sample papers , etc. This will help you to improve your problem-solving skills and accuracy.
      • -
      • Revise the topics regularly and make short notes of important points, formulas, facts, etc. This will help you to retain the information for a longer time and recall it during the exam.
      • -
      • Stay updated with the current affairs and general knowledge related to Chhattisgarh and India. Read newspapers, magazines, blogs, etc., and watch news channels, documentaries, etc., to enhance your awareness.
      • -
      • Prepare for the interview stage by practicing your communication skills, body language, personality, etc. Also, prepare some common questions that are asked in the interview, such as your introduction, your motivation, your strengths and weaknesses, etc.
      • -
      • Take care of your health and well-being by eating a balanced diet, drinking enough water, sleeping well, exercising regularly, meditating, etc. This will help you to stay fit, calm, and focused.
      • -
      -

      Conclusion

      -

      The CG Vyapam ADEO exam 2023 is a great opportunity for candidates who want to join the rural development sector in Chhattisgarh. To crack this exam, you need to prepare well and practice hard. One of the best ways to do that is to download the ADEO question paper 2017 PDF file and use it for your preparation. This will help you to understand the exam pattern, syllabus, difficulty level, and types of questions asked. It will also help you to improve your speed, accuracy, and time management skills. Moreover, it will help you to identify your strengths and weaknesses and work on them accordingly.

      -

      We hope that this article has provided you with useful information and guidance on how to download the ADEO question paper 2017 PDF file and how to prepare for the CG Vyapam ADEO exam 2023. If you have any queries or doubts, feel free to ask us in the comments section below. We wish you all the best for your exam!

      -

      FAQs

      -

      Here are some of the frequently asked questions and their answers related to the topic:

      -

      Q1. When will the CG Vyapam ADEO exam 2023 be conducted?

      -

      A1. The official notification for the CG Vyapam ADEO exam 2023 has not been released yet. However, based on the previous year trends, it is expected that the exam will be conducted in the month of October or November 2023.

      -

      Q2. How many vacancies are there for the post of ADEO in Chhattisgarh?

      -

      A2. The exact number of vacancies for the post of ADEO in Chhattisgarh will be announced by the CGPEB along with the official notification. However, based on the previous year trends, it is estimated that there will be around 200 vacancies for the post of ADEO in Chhattisgarh.

      -

      Q3. What is the salary of an ADEO in Chhattisgarh?

      -

      A3. The salary of an ADEO in Chhattisgarh is based on the pay scale of Rs. 5200-20200 with grade pay of Rs. 2400 per month. Apart from this, an ADEO also gets various allowances and benefits as per the government rules.

      -

      Q4. How can I apply for the CG Vyapam ADEO exam 2023?

      -

      A4. You can apply for the CG Vyapam ADEO exam 2023 online through the official website of CG Vyapam at http://cgvyapam.choice.gov.in/. You need to register yourself with your personal details, educational qualifications , and other details. You also need to upload your scanned photograph, signature, and thumb impression. You need to pay the application fee of Rs. 350 for general category, Rs. 250 for OBC category, and Rs. 200 for SC/ST/PWD category. You need to submit the application form before the last date and take a printout of the confirmation page for future reference.

      -

      Q5. What are the cut-off marks for the CG Vyapam ADEO exam 2023?

      -

      A5. The cut-off marks for the CG Vyapam ADEO exam 2023 are the minimum marks that a candidate needs to score in order to qualify for the next stage of the selection process. The cut-off marks are decided by the CGPEB based on various factors, such as the number of candidates, the number of vacancies, the difficulty level of the exam, etc. The cut-off marks are different for different categories and subjects. The expected cut-off marks for the CG Vyapam ADEO exam 2023 are as follows:

      - - - - - - - - - - - - - - - - - - - - - - - - - -
      CategoryCut-off Marks (Out of 300)
      General180-190
      OBC170-180
      SC160-170
      ST150-160
      PWD140-150

      401be4b1e0
      -
      -
      \ No newline at end of file diff --git a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Download LinkedIn Lite APK for Android - Fast and Easy.md b/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Download LinkedIn Lite APK for Android - Fast and Easy.md deleted file mode 100644 index 0cc79ef5496d549be681b6285458a597ef661b69..0000000000000000000000000000000000000000 --- a/spaces/simple0urra/skops-model-card-creator-2a23515a-d54e-4804-b365-27ed6e938735/example/Download LinkedIn Lite APK for Android - Fast and Easy.md +++ /dev/null @@ -1,131 +0,0 @@ - -

      Download LinkedIn Lite APK: A Guide for Android Users

      -

      LinkedIn is one of the most popular social media platforms for professionals, job seekers, and employers. It allows you to create a profile, showcase your skills and achievements, connect with people in your industry, and find opportunities for career growth. However, if you have a low-end device or a slow internet connection, you might find the regular LinkedIn app too heavy and slow to use. That's why you should consider downloading LinkedIn Lite APK, a lighter and faster version of the app that works well on any Android device.

      -

      download linkedin lite apk


      Downloadhttps://ssurll.com/2uNSPK



      -

      What is LinkedIn Lite?

      -

      LinkedIn Lite is a simplified version of the LinkedIn app that consumes less data, battery, and storage space. It is designed to work smoothly on low-end devices and in areas with poor network connectivity. It offers the essential features of LinkedIn, such as creating and updating your profile, browsing and applying for jobs, sending and receiving messages, and building your network. It also has a dark mode option that reduces eye strain and saves more battery life.

      -

      Features of LinkedIn Lite

      -

      LinkedIn Lite has the following features:

      -
        -
      • Create and edit your profile with your photo, headline, summary, education, experience, skills, and more.
      • -
      • Browse and apply for jobs that match your qualifications and interests.
      • -
      • Send and receive messages from your connections and recruiters.
      • -
      • Follow companies, influencers, and hashtags that interest you.
      • -
      • Share updates, articles, photos, and videos with your network.
      • -
      • Get notifications for new messages, job alerts, invitations, and more.
      • -
      -

      Benefits of LinkedIn Lite

      -

      LinkedIn Lite has the following benefits:

      -

      download linkedin lite app for android
      -download linkedin lite apk latest version
      -download linkedin lite apk for pc
      -download linkedin lite apk free
      -download linkedin lite apk mod
      -download linkedin lite apk old version
      -download linkedin lite apk pure
      -download linkedin lite apk mirror
      -download linkedin lite apk uptodown
      -download linkedin lite apk file
      -how to download linkedin lite apk
      -where to download linkedin lite apk
      -download linkedin lite easy job search jobs & networking apk
      -download linkedin lite 1.0.0 apk
      -download linkedin lite 1.1.0 apk
      -download linkedin lite 1.2.0 apk
      -download linkedin lite 1.3.0 apk
      -download linkedin lite 1.4.0 apk
      -download linkedin lite 1.5.0 apk
      -download linkedin lite 1.6.0 apk
      -download linkedin lite 2.0.0 apk
      -download linkedin lite 2.1.0 apk
      -download linkedin lite 2.2.0 apk
      -download linkedin lite 2.3.0 apk
      -download linkedin lite 2.4.0 apk
      -download linkedin lite 2.5.0 apk
      -download linkedin lite 3.0.0 apk
      -download linkedin lite 3.1.0 apk
      -download linkedin lite 3.2.0 apk
      -download linkedin lite 3.3.0 apk
      -download linkedin lite 3.4.0 apk
      -download linkedin lite 3.5.0 apk
      -download linkedin lite 4.0.0 apk
      -download linkedin lite 4.1.0 apk
      -download linkedin lite 4.2.0 apk
      -download linkedin lite for android phone
      -download linkedin lite for android tablet
      -download linkedin lite for android tv
      -download linkedin lite for android wear
      -download linkedin lite for android auto
      -why download linkedin lite apk
      -benefits of downloading linkedin lite apk
      -features of downloading linkedin lite apk
      -reviews of downloading linkedin lite apk
      -ratings of downloading linkedin lite apk
      -alternatives to downloading linkedin lite apk
      -tips for downloading linkedin lite apk
      -steps for downloading linkedin lite apk
      -problems with downloading linkedin lite apk
      -solutions for downloading linkedin lite apk

      -
        -
      • It consumes less data than the regular app, saving you money on your mobile plan.
      • -
      • It loads faster and performs better on low-end devices and slow networks.
      • -
      • It takes up less storage space on your device, leaving more room for other apps and files.
      • -
      • It has a simple and user-friendly interface that is easy to navigate.
      • -
      • It offers the same level of security and privacy as the regular app.
      • -
      -

      How to download LinkedIn Lite APK?

      -

      If you want to download LinkedIn Lite APK on your Android device, you need to follow these steps:

      -

      Step 1: Enable unknown sources

      -

      Since LinkedIn Lite is not available on the Google Play Store, you need to enable unknown sources on your device settings. This will allow you to install apps from sources other than the official store. To do this:

      -
        -
      1. Go to Settings > Security > Unknown sources.
      2. -
      3. Toggle on the switch or check the box to enable unknown sources.
      4. -
      5. A warning message will pop up. Tap OK to confirm.
      6. -
      -

      Step 2: Find a reliable source

      -

      The next step is to find a reliable source that offers the latest version of LinkedIn Lite APK. You can search online for websites that provide APK files for various apps. However, be careful not to download from shady or untrustworthy sites that might contain malware or viruses. One of the reputable sources that we recommend is [APKCombo](^1^), which offers safe and fast downloads for thousands of Android apps.

      -

      Step 3: Download and install the APK file

      -

      The final step is to download and install the APK file on your device. To do this:

      -
        -
      1. Open your browser and go to [APKCombo](^1^).
      2. -
      3. Type " LinkedIn Lite" in the search box and hit Enter.
      4. -
      5. Select the app from the list of results and click on the Download APK button.
      6. -
      7. Wait for the download to complete and then open the APK file from your notification bar or file manager.
      8. -
      9. Tap on Install and follow the instructions on the screen.
      10. -
      11. Once the installation is done, you can launch the app from your app drawer or home screen.
      12. -
      -

      How to use LinkedIn Lite?

      -

      Using LinkedIn Lite is very similar to using the regular app. You just need to:

      -

      Sign in or create an account

      -

      If you already have a LinkedIn account, you can sign in with your email and password. If you don't have an account, you can create one by tapping on Join now and filling out the required information. You can also sign in or sign up with your Google or Facebook account.

      -

      Explore the app interface

      -

      The app interface consists of four tabs at the bottom: Home, Jobs, Messages, and My Network. You can switch between them by tapping on them. The Home tab shows you the latest updates from your network and the topics you follow. The Jobs tab shows you the recommended jobs for you based on your profile and preferences. The Messages tab shows you your conversations with your connections and recruiters. The My Network tab shows you your connections, invitations, and suggestions.

      -

      Search for jobs and network with professionals

      -

      You can use the app to search for jobs and network with professionals in your industry. To search for jobs, you can use the search bar at the top of the Jobs tab or tap on the filter icon to refine your search by location, industry, experience level, and more. You can also save your searches and set up job alerts to get notified of new opportunities. To apply for a job, you can tap on the Apply button and fill out a short form or upload your resume. You can also send a message to the recruiter or follow the company for more updates.

      -

      To network with professionals, you can use the search bar at the top of the Home tab or tap on the magnifying glass icon to search for people, companies, groups, or hashtags. You can also use the My Network tab to see who you know and who you might want to know. To connect with someone, you can tap on the Connect button and send a personalized invitation or a default message. You can also send a message to anyone on LinkedIn by tapping on the Message button. You can also like, comment, share, or follow their posts and updates.

      -

      Conclusion

      -

      Summary of the main points

      -

      In this article, we have learned what LinkedIn Lite is, how to download it as an APK file, and how to use it on your Android device. We have seen that LinkedIn Lite is a lighter and faster version of the regular app that offers the essential features of LinkedIn without consuming too much data, battery, or storage space. We have also seen how to use it to create and update your profile, browse and apply for jobs, send and receive messages, and build your network.

      -

      Call to action

      -

      If you are looking for a simple and effective way to use LinkedIn on your Android device, you should definitely give LinkedIn Lite a try. It will help you stay connected with your professional network and find opportunities for career growth. To download LinkedIn Lite APK, just follow the steps we have outlined above or click on this link: [Download LinkedIn Lite APK]. You won't regret it!

      -

      Frequently Asked Questions

      -
        -
      1. Is LinkedIn Lite safe to download?
      2. -

        Yes, LinkedIn Lite is safe to download as long as you download it from a reliable source like [APKCombo]. It has the same level of security and privacy as the regular app.

        -
      3. What are the differences between LinkedIn Lite and LinkedIn?
      4. -

        The main differences between LinkedIn Lite and LinkedIn are:

        -
          -
        • LinkedIn Lite consumes less data, battery, and storage space than LinkedIn.
        • -
        • LinkedIn Lite loads faster and performs better on low-end devices and slow networks than LinkedIn.
        • -
        • LinkedIn Lite has a simpler and user-friendly interface than LinkedIn.
        • -
        • LinkedIn Lite offers only the essential features of LinkedIn without some of the advanced features like stories, live videos, learning courses, etc.
        • -
        -
      5. Can I use both LinkedIn Lite and LinkedIn on my device?
      6. -

        Yes, you can use both LinkedIn Lite and LinkedIn on your device if you want to. They are separate apps that do not interfere with each other. However, you might not need to use both apps as LinkedIn Lite offers most of the features that you need. You can uninstall the regular app to save more space on your device.

        -
      7. How can I update LinkedIn Lite?
      8. -

        You can update LinkedIn Lite by visiting the same source where you downloaded it and checking for the latest version. You can also enable the auto-update option on your device settings to get notified of new updates automatically.

        -
      9. How can I contact LinkedIn Lite support?
      10. -

        You can contact LinkedIn Lite support by tapping on the Menu icon at the top left corner of the app and selecting Help Center. You can also visit this link: [LinkedIn Lite Help Center]. You can find answers to common questions, report a problem, or give feedback.

        -

      197e85843d
      -
      -
      \ No newline at end of file diff --git a/spaces/smajumdar/nemo_multilingual_language_id/app.py b/spaces/smajumdar/nemo_multilingual_language_id/app.py deleted file mode 100644 index 4e7824f870f560bb48e95c5b09d98b55f239dc0c..0000000000000000000000000000000000000000 --- a/spaces/smajumdar/nemo_multilingual_language_id/app.py +++ /dev/null @@ -1,641 +0,0 @@ -import os -import json -import shutil -import uuid -import tempfile -import subprocess -import re -import time -import traceback - -import gradio as gr -import pytube as pt - -import nemo.collections.asr as nemo_asr -import torch - -import speech_to_text_buffered_infer_ctc as buffered_ctc -import speech_to_text_buffered_infer_rnnt as buffered_rnnt -from nemo.utils import logging - -# Set NeMo cache dir as /tmp -from nemo import constants - -os.environ[constants.NEMO_ENV_CACHE_DIR] = "/tmp/nemo/" - - -SAMPLE_RATE = 16000 # Default sample rate for ASR -BUFFERED_INFERENCE_DURATION_THRESHOLD = 60.0 # 60 second and above will require chunked inference. -CHUNK_LEN_IN_SEC = 20.0 # Chunk size -BUFFER_LEN_IN_SEC = 30.0 # Total buffer size - -TITLE = "NeMo ASR Inference on Hugging Face" -DESCRIPTION = "Demo of all languages supported by NeMo ASR" -DEFAULT_EN_MODEL = "nvidia/stt_en_conformer_transducer_xlarge" -DEFAULT_BUFFERED_EN_MODEL = "nvidia/stt_en_conformer_transducer_large" - -# Pre-download and cache the model in disk space -logging.setLevel(logging.ERROR) -tmp_model = nemo_asr.models.ASRModel.from_pretrained(DEFAULT_BUFFERED_EN_MODEL, map_location='cpu') -del tmp_model -logging.setLevel(logging.INFO) - -MARKDOWN = f""" -# {TITLE} - -## {DESCRIPTION} -""" - -CSS = """ -p.big { - font-size: 20px; -} - -/* From https://huggingface.co/spaces/k2-fsa/automatic-speech-recognition/blob/main/app.py */ - -.result {display:flex;flex-direction:column} -.result_item {padding:15px;margin-bottom:8px;border-radius:15px;width:100%;font-size:20px;} -.result_item_success {background-color:mediumaquamarine;color:white;align-self:start} -.result_item_error {background-color:#ff7070;color:white;align-self:start} -""" - -ARTICLE = """ -

      -

      - NeMo ASR - | - Github Repo -

      -""" - -SUPPORTED_LANGUAGES = set([]) -SUPPORTED_MODEL_NAMES = set([]) - -# HF models, grouped by language identifier -hf_filter = nemo_asr.models.ASRModel.get_hf_model_filter() -hf_filter.task = "automatic-speech-recognition" - -hf_infos = nemo_asr.models.ASRModel.search_huggingface_models(model_filter=hf_filter) -for info in hf_infos: - print("Model ID:", info.modelId) - try: - lang_id = info.modelId.split("_")[1] # obtains lang id as str - except Exception: - print("WARNING: Skipping model id -", info) - continue - - SUPPORTED_LANGUAGES.add(lang_id) - SUPPORTED_MODEL_NAMES.add(info.modelId) - -SUPPORTED_MODEL_NAMES = sorted(list(SUPPORTED_MODEL_NAMES)) - -# DEBUG FILTER -# SUPPORTED_MODEL_NAMES = list(filter(lambda x: "en" in x and "conformer_transducer_large" in x, SUPPORTED_MODEL_NAMES)) - -model_dict = {} -for model_name in SUPPORTED_MODEL_NAMES: - try: - iface = gr.Interface.load(f'models/{model_name}') - model_dict[model_name] = iface - - # model_dict[model_name] = None - except: - pass - -if DEFAULT_EN_MODEL in model_dict: - # Preemptively load the default EN model - if model_dict[DEFAULT_EN_MODEL] is None: - model_dict[DEFAULT_EN_MODEL] = gr.Interface.load(f'models/{DEFAULT_EN_MODEL}') - -SUPPORTED_LANG_MODEL_DICT = {} -for lang in SUPPORTED_LANGUAGES: - for model_id in SUPPORTED_MODEL_NAMES: - if ("_" + lang + "_") in model_id: - # create new lang in dict - if lang not in SUPPORTED_LANG_MODEL_DICT: - SUPPORTED_LANG_MODEL_DICT[lang] = [model_id] - else: - SUPPORTED_LANG_MODEL_DICT[lang].append(model_id) - -# Sort model names -for lang in SUPPORTED_LANG_MODEL_DICT.keys(): - model_ids = SUPPORTED_LANG_MODEL_DICT[lang] - model_ids = sorted(model_ids) - SUPPORTED_LANG_MODEL_DICT[lang] = model_ids - - -def get_device(): - gpu_available = torch.cuda.is_available() - if gpu_available: - return torch.cuda.get_device_name() - else: - return "CPU" - - -def parse_duration(audio_file): - """ - FFMPEG to calculate durations. Libraries can do it too, but filetypes cause different libraries to behave differently. - """ - process = subprocess.Popen(['ffmpeg', '-i', audio_file], stdout=subprocess.PIPE, stderr=subprocess.STDOUT) - stdout, stderr = process.communicate() - matches = re.search( - r"Duration:\s{1}(?P\d+?):(?P\d+?):(?P\d+\.\d+?),", stdout.decode(), re.DOTALL - ).groupdict() - - duration = 0.0 - duration += float(matches['hours']) * 60.0 * 60.0 - duration += float(matches['minutes']) * 60.0 - duration += float(matches['seconds']) * 1.0 - return duration - - -def resolve_model_type(model_name: str) -> str: - """ - Map model name to a class type, without loading the model. Has some hardcoded assumptions in - semantics of model naming. - """ - # Loss specific maps - if 'hybrid' in model_name or 'hybrid_ctc' in model_name or 'hybrid_transducer' in model_name: - return 'hybrid' - elif 'transducer' in model_name or 'rnnt' in model_id: - return 'transducer' - elif 'ctc' in model_name: - return 'ctc' - - # Model specific maps - if 'jasper' in model_name: - return 'ctc' - elif 'quartznet' in model_name: - return 'ctc' - elif 'citrinet' in model_name: - return 'ctc' - elif 'contextnet' in model_name: - return 'transducer' - - return None - - -def resolve_model_stride(model_name) -> int: - """ - Model specific pre-calc of stride levels. - Dont laod model to get such info. - """ - if 'jasper' in model_name: - return 2 - if 'quartznet' in model_name: - return 2 - if 'conformer' in model_name: - return 4 - if 'squeezeformer' in model_name: - return 4 - if 'citrinet' in model_name: - return 8 - if 'contextnet' in model_name: - return 8 - - return -1 - - -def convert_audio(audio_filepath): - """ - Transcode all mp3 files to monochannel 16 kHz wav files. - """ - filedir = os.path.split(audio_filepath)[0] - filename, ext = os.path.splitext(audio_filepath) - - if ext == 'wav': - return audio_filepath - - out_filename = os.path.join(filedir, filename + '.wav') - - process = subprocess.Popen( - ['ffmpeg', '-y', '-i', audio_filepath, '-ac', '1', '-ar', str(SAMPLE_RATE), out_filename], - stdout=subprocess.PIPE, - stderr=subprocess.STDOUT, - close_fds=True, - ) - - stdout, stderr = process.communicate() - - if os.path.exists(out_filename): - return out_filename - else: - return None - - -def extract_result_from_manifest(filepath, model_name) -> (bool, str): - """ - Parse the written manifest which is result of the buffered inference process. - """ - data = [] - with open(filepath, 'r', encoding='utf-8') as f: - for line in f: - try: - line = json.loads(line) - data.append(line['pred_text']) - except Exception as e: - pass - - if len(data) > 0: - return True, data[0] - else: - return False, f"Could not perform inference on model with name : {model_name}" - - -def build_html_output(s: str, style: str = "result_item_success"): - return f""" -
      -
      - {s} -
      -
      - """ - - -def infer_audio(model_name: str, audio_file: str) -> str: - """ - Main method that switches from HF inference for small audio files to Buffered CTC/RNNT mode for long audio files. - - Args: - model_name: Str name of the model (potentially with / to denote HF models) - audio_file: Path to an audio file (mp3 or wav) - - Returns: - str which is the transcription if successful. - str which is HTML output of logs. - """ - # Parse the duration of the audio file - duration = parse_duration(audio_file) - - if duration > BUFFERED_INFERENCE_DURATION_THRESHOLD: # Longer than one minute; use buffered mode - # Process audio to be of wav type (possible youtube audio) - audio_file = convert_audio(audio_file) - - # If audio file transcoding failed, let user know - if audio_file is None: - return "Error:- Failed to convert audio file to wav." - - # Extract audio dir from resolved audio filepath - audio_dir = os.path.split(audio_file)[0] - - # Next calculate the stride of each model - model_stride = resolve_model_stride(model_name) - - if model_stride < 0: - return f"Error:- Failed to compute the model stride for model with name : {model_name}" - - # Process model type (CTC/RNNT/Hybrid) - model_type = resolve_model_type(model_name) - - if model_type is None: - - # Model type could not be infered. - # Try all feasible options - RESULT = None - - try: - ctc_config = buffered_ctc.TranscriptionConfig( - pretrained_name=model_name, - audio_dir=audio_dir, - output_filename="output.json", - audio_type="wav", - overwrite_transcripts=True, - model_stride=model_stride, - chunk_len_in_secs=20.0, - total_buffer_in_secs=30.0, - ) - - buffered_ctc.main(ctc_config) - result = extract_result_from_manifest('output.json', model_name) - if result[0]: - RESULT = result[1] - - except Exception as e: - pass - - try: - rnnt_config = buffered_rnnt.TranscriptionConfig( - pretrained_name=model_name, - audio_dir=audio_dir, - output_filename="output.json", - audio_type="wav", - overwrite_transcripts=True, - model_stride=model_stride, - chunk_len_in_secs=20.0, - total_buffer_in_secs=30.0, - ) - - buffered_rnnt.main(rnnt_config) - result = extract_result_from_manifest('output.json', model_name)[-1] - - if result[0]: - RESULT = result[1] - except Exception as e: - pass - - if RESULT is None: - return f"Error:- Could not parse model type; failed to perform inference with model {model_name}!" - - elif model_type == 'ctc': - - # CTC Buffered Inference - ctc_config = buffered_ctc.TranscriptionConfig( - pretrained_name=model_name, - audio_dir=audio_dir, - output_filename="output.json", - audio_type="wav", - overwrite_transcripts=True, - model_stride=model_stride, - chunk_len_in_secs=20.0, - total_buffer_in_secs=30.0, - ) - - buffered_ctc.main(ctc_config) - return extract_result_from_manifest('output.json', model_name)[-1] - - elif model_type == 'transducer': - - # RNNT Buffered Inference - rnnt_config = buffered_rnnt.TranscriptionConfig( - pretrained_name=model_name, - audio_dir=audio_dir, - output_filename="output.json", - audio_type="wav", - overwrite_transcripts=True, - model_stride=model_stride, - chunk_len_in_secs=20.0, - total_buffer_in_secs=30.0, - ) - - buffered_rnnt.main(rnnt_config) - return extract_result_from_manifest('output.json', model_name)[-1] - - else: - return f"Error:- Could not parse model type; failed to perform inference with model {model_name}!" - - else: - # Obtain Gradio Model function from cache of models - if model_name in model_dict: - model = model_dict[model_name] - - if model is None: - # Load the gradio interface - # try: - iface = gr.Interface.load(f'models/{model_name}') - print(iface) - # except: - # iface = None - - if iface is not None: - # Update model cache - model_dict[model_name] = iface - else: - model = None - - if model is not None: - # Use HF API for transcription - try: - transcriptions = model(audio_file) - return transcriptions - except Exception as e: - transcriptions = "" - error = "" - - error += ( - f"The model `{model_name}` is currently loading and cannot be used " - f"for transcription.
      " - f"Please try another model or wait a few minutes." - ) - - return error - - else: - error = ( - f"Error:- Could not find model {model_name} in list of available models : " - f"{list([k for k in model_dict.keys()])}" - ) - return error - - -def transcribe(microphone, audio_file, model_name): - - audio_data = None - warn_output = "" - if (microphone is not None) and (audio_file is not None): - warn_output = ( - "WARNING: You've uploaded an audio file and used the microphone. " - "The recorded file from the microphone will be used and the uploaded audio will be discarded.\n" - ) - audio_data = microphone - - elif (microphone is None) and (audio_file is None): - warn_output = "ERROR: You have to either use the microphone or upload an audio file" - - elif microphone is not None: - audio_data = microphone - else: - audio_data = audio_file - - if audio_data is not None: - audio_duration = parse_duration(audio_data) - else: - audio_duration = None - - time_diff = None - try: - with tempfile.TemporaryDirectory() as tempdir: - filename = os.path.split(audio_data)[-1] - new_audio_data = os.path.join(tempdir, filename) - shutil.copy2(audio_data, new_audio_data) - - if os.path.exists(audio_data): - os.remove(audio_data) - - audio_data = new_audio_data - - # Use HF API for transcription - start = time.time() - transcriptions = infer_audio(model_name, audio_data) - end = time.time() - time_diff = end - start - - except Exception as e: - transcriptions = "" - warn_output = warn_output - - if warn_output != "": - warn_output += "

      " - - warn_output += ( - f"The model `{model_name}` is currently loading and cannot be used " - f"for transcription.
      " - f"Please try another model or wait a few minutes." - ) - - # Built HTML output - if warn_output != "": - html_output = build_html_output(warn_output, style="result_item_error") - else: - if transcriptions.startswith("Error:-"): - html_output = build_html_output(transcriptions, style="result_item_error") - else: - output = f"Successfully transcribed on {get_device()} !
      " f"Transcription Time : {time_diff: 0.3f} s" - - if audio_duration > BUFFERED_INFERENCE_DURATION_THRESHOLD: - output += f"""

      - Note: Audio duration was {audio_duration: 0.3f} s, so model had to be downloaded, initialized, and then - buffered inference was used.
      - """ - - html_output = build_html_output(output) - - return transcriptions, html_output - - -def _return_yt_html_embed(yt_url): - """ Obtained from https://huggingface.co/spaces/whisper-event/whisper-demo """ - video_id = yt_url.split("?v=")[-1] - HTML_str = ( - f'
      ' - "
      " - ) - return HTML_str - - -def yt_transcribe(yt_url: str, model_name: str): - """ Modified from https://huggingface.co/spaces/whisper-event/whisper-demo """ - if yt_url == "": - text = "" - html_embed_str = "" - html_output = build_html_output(f""" - Error:- No YouTube URL was provide ! - """, style='result_item_error') - return text, html_embed_str, html_output - - yt = pt.YouTube(yt_url) - html_embed_str = _return_yt_html_embed(yt_url) - - with tempfile.TemporaryDirectory() as tempdir: - file_uuid = str(uuid.uuid4().hex) - file_uuid = f"{tempdir}/{file_uuid}.mp3" - - # Download YT Audio temporarily - download_time_start = time.time() - - stream = yt.streams.filter(only_audio=True)[0] - stream.download(filename=file_uuid) - - download_time_end = time.time() - - # Get audio duration - audio_duration = parse_duration(file_uuid) - - # Perform transcription - infer_time_start = time.time() - - text = infer_audio(model_name, file_uuid) - - infer_time_end = time.time() - - if text.startswith("Error:-"): - html_output = build_html_output(text, style='result_item_error') - else: - html_output = f""" - Successfully transcribed on {get_device()} !
      - Audio Download Time : {download_time_end - download_time_start: 0.3f} s
      - Transcription Time : {infer_time_end - infer_time_start: 0.3f} s
      - """ - - if audio_duration > BUFFERED_INFERENCE_DURATION_THRESHOLD: - html_output += f"""
      - Note: Audio duration was {audio_duration: 0.3f} s, so model had to be downloaded, initialized, and then - buffered inference was used.
      - """ - - html_output = build_html_output(html_output) - - return text, html_embed_str, html_output - - -def create_lang_selector_component(default_en_model=DEFAULT_EN_MODEL): - """ - Utility function to select a langauge from a dropdown menu, and simultanously update another dropdown - containing the corresponding model checkpoints for that language. - - Args: - default_en_model: str name of a default english model that should be the set default. - - Returns: - Gradio components for lang_selector (Dropdown menu) and models_in_lang (Dropdown menu) - """ - lang_selector = gr.components.Dropdown( - choices=sorted(list(SUPPORTED_LANGUAGES)), value="en", type="value", label="Languages", interactive=True, - ) - models_in_lang = gr.components.Dropdown( - choices=sorted(list(SUPPORTED_LANG_MODEL_DICT["en"])), - value=default_en_model, - label="Models", - interactive=True, - ) - - def update_models_with_lang(lang): - models_names = sorted(list(SUPPORTED_LANG_MODEL_DICT[lang])) - default = models_names[0] - - if lang == 'en': - default = default_en_model - return models_in_lang.update(choices=models_names, value=default) - - lang_selector.change(update_models_with_lang, inputs=[lang_selector], outputs=[models_in_lang]) - - return lang_selector, models_in_lang - - -""" -Define the GUI -""" -demo = gr.Blocks(title=TITLE, css=CSS) - -with demo: - header = gr.Markdown(MARKDOWN) - - with gr.Tab("Transcribe Audio"): - with gr.Row() as row: - file_upload = gr.components.Audio(source="upload", type='filepath', label='Upload File') - microphone = gr.components.Audio(source="microphone", type='filepath', label='Microphone') - - lang_selector, models_in_lang = create_lang_selector_component() - - run = gr.components.Button('Transcribe') - - transcript = gr.components.Label(label='Transcript') - audio_html_output = gr.components.HTML() - - run.click( - transcribe, inputs=[microphone, file_upload, models_in_lang], outputs=[transcript, audio_html_output] - ) - - with gr.Tab("Transcribe Youtube"): - yt_url = gr.components.Textbox( - lines=1, label="Youtube URL", placeholder="Paste the URL to a YouTube video here" - ) - - lang_selector_yt, models_in_lang_yt = create_lang_selector_component( - default_en_model=DEFAULT_BUFFERED_EN_MODEL - ) - - with gr.Row(): - run = gr.components.Button('Transcribe YouTube') - embedded_video = gr.components.HTML() - - transcript = gr.components.Label(label='Transcript') - yt_html_output = gr.components.HTML() - - run.click( - yt_transcribe, inputs=[yt_url, models_in_lang_yt], outputs=[transcript, embedded_video, yt_html_output] - ) - - gr.components.HTML(ARTICLE) - -demo.queue(concurrency_count=1) -demo.launch(enable_queue=True) diff --git a/spaces/spencer/socm/tasks.py b/spaces/spencer/socm/tasks.py deleted file mode 100644 index afe178b5b7f79a2d842eed88b749bee0748bf36d..0000000000000000000000000000000000000000 --- a/spaces/spencer/socm/tasks.py +++ /dev/null @@ -1,93 +0,0 @@ -import glob -from collections import namedtuple -from PIL import Image - -from embeddings import FaissIndex, VectorSearch - - -class Summary: - def __init__(self, video_dir, llm): - self.video_dir = video_dir - self.llm = llm - self.vs = VectorSearch() - - def flatten_list(self, s): - if s == []: - return s - if isinstance(s[0], list): - return self.flatten_list(s[0]) + self.flatten_list(s[1:]) - return s[:1] + self.flatten_list(s[1:]) - - def parse_history(self): - history = [] - with open(f"{self.video_dir}/history.txt") as f: - for line in f: - history.append(line.strip()) - - history_proc = [] - proc = lambda x: list(map(str.strip, x.strip().split(","))) - - Record = namedtuple("Record", "frame places objects activities".split(" ")) - for hist in history: - hist_list = hist.split(":") - flat = self.flatten_list([x.split(".") for x in hist_list]) - frame = flat[0] - - places = proc(flat[3]) - objects = proc(flat[5]) - activities = proc(flat[-1]) - history_proc.append(Record(*[frame, places, objects, activities])) - - return history_proc - - def create_prompts(self, history_proc): - split_idx = [i for i in range(len(history_proc)) if i % 5 == 0] + [ - len(history_proc) - ] - range_idx = [(split_idx[x - 1], split_idx[x]) for x in range(1, len(split_idx))] - prompts = [] - for r in range_idx: - prompts.append(self.vs.prompt_summary(history_proc[r[0] : r[1]])) - - return prompts - - def call_model(self, prompts): - results = [] - for prompt in prompts: - results.append(self.llm(prompt)[0]["generated_text"]) - - return zip(prompts, results) - - def generate_summaries(self): - history_proc = self.parse_history() - prompts = self.create_prompts(history_proc) - results = self.call_model(prompts) - return results - - -class VideoSearch: - def __init__(self, video_dir, vlm, llm=None): - self.video_dir = video_dir - self.fi = FaissIndex(faiss_index_location=f"{self.video_dir}/video.index") - self.vlm = vlm - self.llm = llm - - def find_nearest_frames(self, query): - test = self.vlm.get_text_emb(query) - D, I, frames = self.fi.search(test) - return D, frames - - def get_images(self, frames, k=5): - images = [] - for frame in frames[:k]: - loc = glob.glob(f"{self.video_dir}/*_{frame}.jpg")[0] - images.append(Image.open(loc)) - - return images - - def search_engine(self, query): - - D, frames = self.find_nearest_frames(query) - images = self.get_images(frames) - - return images diff --git a/spaces/sriramelango/Social_Classification_Public/fairseq/examples/__init__.py b/spaces/sriramelango/Social_Classification_Public/fairseq/examples/__init__.py deleted file mode 100644 index 44bb24ae614941f23fea29c56d60167650c39bcb..0000000000000000000000000000000000000000 --- a/spaces/sriramelango/Social_Classification_Public/fairseq/examples/__init__.py +++ /dev/null @@ -1,9 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -try: - from fairseq.version import __version__ # noqa -except ImportError: - pass diff --git a/spaces/sriramelango/Social_Classification_Public/fairseq/examples/speech_synthesis/docs/ljspeech_example.md b/spaces/sriramelango/Social_Classification_Public/fairseq/examples/speech_synthesis/docs/ljspeech_example.md deleted file mode 100644 index 90c524fac8ffdc1819ec9bb36928500320337603..0000000000000000000000000000000000000000 --- a/spaces/sriramelango/Social_Classification_Public/fairseq/examples/speech_synthesis/docs/ljspeech_example.md +++ /dev/null @@ -1,138 +0,0 @@ -[[Back]](..) - -# LJSpeech - -[LJSpeech](https://keithito.com/LJ-Speech-Dataset) is a public domain TTS -corpus with around 24 hours of English speech sampled at 22.05kHz. We provide examples for building -[Transformer](https://arxiv.org/abs/1809.08895) and [FastSpeech 2](https://arxiv.org/abs/2006.04558) -models on this dataset. - - -## Data preparation - -Download data, create splits and generate audio manifests with -```bash -python -m examples.speech_synthesis.preprocessing.get_ljspeech_audio_manifest \ - --output-data-root ${AUDIO_DATA_ROOT} \ - --output-manifest-root ${AUDIO_MANIFEST_ROOT} -``` - -Then, extract log-Mel spectrograms, generate feature manifest and create data configuration YAML with -```bash -python -m examples.speech_synthesis.preprocessing.get_feature_manifest \ - --audio-manifest-root ${AUDIO_MANIFEST_ROOT} \ - --output-root ${FEATURE_MANIFEST_ROOT} \ - --ipa-vocab --use-g2p -``` -where we use phoneme inputs (`--ipa-vocab --use-g2p`) as example. - -FastSpeech 2 additionally requires frame durations, pitch and energy as auxiliary training targets. -Add `--add-fastspeech-targets` to include these fields in the feature manifests. We get frame durations either from -phoneme-level force-alignment or frame-level pseudo-text unit sequence. They should be pre-computed and specified via: -- `--textgrid-zip ${TEXT_GRID_ZIP_PATH}` for a ZIP file, inside which there is one - [TextGrid](https://www.fon.hum.uva.nl/praat/manual/TextGrid.html) file per sample to provide force-alignment info. -- `--id-to-units-tsv ${ID_TO_UNIT_TSV}` for a TSV file, where there are 2 columns for sample ID and - space-delimited pseudo-text unit sequence, respectively. - -For your convenience, we provide pre-computed -[force-alignment](https://dl.fbaipublicfiles.com/fairseq/s2/ljspeech_mfa.zip) from -[Montreal Forced Aligner](https://github.com/MontrealCorpusTools/Montreal-Forced-Aligner) and -[pseudo-text units](s3://dl.fbaipublicfiles.com/fairseq/s2/ljspeech_hubert.tsv) from -[HuBERT](https://github.com/pytorch/fairseq/tree/main/examples/hubert). You can also generate them by yourself using -a different software or model. - - -## Training -#### Transformer -```bash -fairseq-train ${FEATURE_MANIFEST_ROOT} --save-dir ${SAVE_DIR} \ - --config-yaml config.yaml --train-subset train --valid-subset dev \ - --num-workers 4 --max-tokens 30000 --max-update 200000 \ - --task text_to_speech --criterion tacotron2 --arch tts_transformer \ - --clip-norm 5.0 --n-frames-per-step 4 --bce-pos-weight 5.0 \ - --dropout 0.1 --attention-dropout 0.1 --activation-dropout 0.1 \ - --encoder-normalize-before --decoder-normalize-before \ - --optimizer adam --lr 2e-3 --lr-scheduler inverse_sqrt --warmup-updates 4000 \ - --seed 1 --update-freq 8 --eval-inference --best-checkpoint-metric mcd_loss -``` -where `SAVE_DIR` is the checkpoint root path. We set `--update-freq 8` to simulate 8 GPUs with 1 GPU. You may want to -update it accordingly when using more than 1 GPU. - -#### FastSpeech2 -```bash -fairseq-train ${FEATURE_MANIFEST_ROOT} --save-dir ${SAVE_DIR} \ - --config-yaml config.yaml --train-subset train --valid-subset dev \ - --num-workers 4 --max-sentences 6 --max-update 200000 \ - --task text_to_speech --criterion fastspeech2 --arch fastspeech2 \ - --clip-norm 5.0 --n-frames-per-step 1 \ - --dropout 0.1 --attention-dropout 0.1 --activation-dropout 0.1 \ - --encoder-normalize-before --decoder-normalize-before \ - --optimizer adam --lr 5e-4 --lr-scheduler inverse_sqrt --warmup-updates 4000 \ - --seed 1 --update-freq 8 --eval-inference --best-checkpoint-metric mcd_loss -``` - - -## Inference -Average the last 5 checkpoints, generate the test split spectrogram and waveform using the default Griffin-Lim vocoder: -```bash -SPLIT=test -CHECKPOINT_NAME=avg_last_5 -CHECKPOINT_PATH=${SAVE_DIR}/checkpoint_${CHECKPOINT_NAME}.pt -python scripts/average_checkpoints.py --inputs ${SAVE_DIR} \ - --num-epoch-checkpoints 5 \ - --output ${CHECKPOINT_PATH} - -python -m examples.speech_synthesis.generate_waveform ${FEATURE_MANIFEST_ROOT} \ - --config-yaml config.yaml --gen-subset ${SPLIT} --task text_to_speech \ - --path ${CHECKPOINT_PATH} --max-tokens 50000 --spec-bwd-max-iter 32 \ - --dump-waveforms -``` -which dumps files (waveform, feature, attention plot, etc.) to `${SAVE_DIR}/generate-${CHECKPOINT_NAME}-${SPLIT}`. To -re-synthesize target waveforms for automatic evaluation, add `--dump-target`. - -## Automatic Evaluation -To start with, generate the manifest for synthetic speech, which will be taken as inputs by evaluation scripts. -```bash -python -m examples.speech_synthesis.evaluation.get_eval_manifest \ - --generation-root ${SAVE_DIR}/generate-${CHECKPOINT_NAME}-${SPLIT} \ - --audio-manifest ${AUDIO_MANIFEST_ROOT}/${SPLIT}.audio.tsv \ - --output-path ${EVAL_OUTPUT_ROOT}/eval.tsv \ - --vocoder griffin_lim --sample-rate 22050 --audio-format flac \ - --use-resynthesized-target -``` -Speech recognition (ASR) models usually operate at lower sample rates (e.g. 16kHz). For the WER/CER metric, -you may need to resample the audios accordingly --- add `--output-sample-rate 16000` for `generate_waveform.py` and -use `--sample-rate 16000` for `get_eval_manifest.py`. - - -#### WER/CER metric -We use wav2vec 2.0 ASR model as example. [Download](https://github.com/pytorch/fairseq/tree/main/examples/wav2vec) -the model checkpoint and dictionary, then compute WER/CER with -```bash -python -m examples.speech_synthesis.evaluation.eval_asr \ - --audio-header syn --text-header text --err-unit char --split ${SPLIT} \ - --w2v-ckpt ${WAV2VEC2_CHECKPOINT_PATH} --w2v-dict-dir ${WAV2VEC2_DICT_DIR} \ - --raw-manifest ${EVAL_OUTPUT_ROOT}/eval_16khz.tsv --asr-dir ${EVAL_OUTPUT_ROOT}/asr -``` - -#### MCD/MSD metric -```bash -python -m examples.speech_synthesis.evaluation.eval_sp \ - ${EVAL_OUTPUT_ROOT}/eval.tsv --mcd --msd -``` - -#### F0 metrics -```bash -python -m examples.speech_synthesis.evaluation.eval_f0 \ - ${EVAL_OUTPUT_ROOT}/eval.tsv --gpe --vde --ffe -``` - - -## Results - -| --arch | Params | Test MCD | Model | -|---|---|---|---| -| tts_transformer | 54M | 3.8 | [Download](https://dl.fbaipublicfiles.com/fairseq/s2/ljspeech_transformer_phn.tar) | -| fastspeech2 | 41M | 3.8 | [Download](https://dl.fbaipublicfiles.com/fairseq/s2/ljspeech_fastspeech2_phn.tar) | - -[[Back]](..) diff --git a/spaces/sriramelango/Social_Classification_Public/fairseq/fairseq/models/nat/levenshtein_utils.py b/spaces/sriramelango/Social_Classification_Public/fairseq/fairseq/models/nat/levenshtein_utils.py deleted file mode 100644 index 375a98c2e11354de085f0a7926f407bd1a6a2ad4..0000000000000000000000000000000000000000 --- a/spaces/sriramelango/Social_Classification_Public/fairseq/fairseq/models/nat/levenshtein_utils.py +++ /dev/null @@ -1,293 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -# -# This source code is licensed under the MIT license found in the -# LICENSE file in the root directory of this source tree. - -import torch -from fairseq.utils import new_arange - - -# -------------- Helper Functions --------------------------------------------------- # - - -def load_libnat(): - try: - from fairseq import libnat_cuda - - return libnat_cuda, True - - except ImportError as e: - print(str(e) + "... fall back to CPU version") - - try: - from fairseq import libnat - - return libnat, False - - except ImportError as e: - import sys - - sys.stderr.write( - "ERROR: missing libnat_cuda. run `python setup.py build_ext --inplace`\n" - ) - raise e - - -def _get_ins_targets(in_tokens, out_tokens, padding_idx, unk_idx): - libnat, use_cuda = load_libnat() - - def _get_ins_targets_cuda(in_tokens, out_tokens, padding_idx, unk_idx): - in_masks = in_tokens.ne(padding_idx) - out_masks = out_tokens.ne(padding_idx) - mask_ins_targets, masked_tgt_masks = libnat.generate_insertion_labels( - out_tokens.int(), - libnat.levenshtein_distance( - in_tokens.int(), - out_tokens.int(), - in_masks.sum(1).int(), - out_masks.sum(1).int(), - ), - ) - masked_tgt_masks = masked_tgt_masks.bool() & out_masks - mask_ins_targets = mask_ins_targets.type_as(in_tokens)[ - :, 1 : in_masks.size(1) - ].masked_fill_(~in_masks[:, 1:], 0) - masked_tgt_tokens = out_tokens.masked_fill(masked_tgt_masks, unk_idx) - return masked_tgt_masks, masked_tgt_tokens, mask_ins_targets - - def _get_ins_targets_cpu(in_tokens, out_tokens, padding_idx, unk_idx): - in_seq_len, out_seq_len = in_tokens.size(1), out_tokens.size(1) - - in_tokens_list = [ - [t for t in s if t != padding_idx] for i, s in enumerate(in_tokens.tolist()) - ] - out_tokens_list = [ - [t for t in s if t != padding_idx] - for i, s in enumerate(out_tokens.tolist()) - ] - - full_labels = libnat.suggested_ed2_path( - in_tokens_list, out_tokens_list, padding_idx - ) - mask_inputs = [ - [len(c) if c[0] != padding_idx else 0 for c in a[:-1]] for a in full_labels - ] - - # generate labels - masked_tgt_masks = [] - for mask_input in mask_inputs: - mask_label = [] - for beam_size in mask_input[1:-1]: # HACK 1:-1 - mask_label += [0] + [1 for _ in range(beam_size)] - masked_tgt_masks.append( - mask_label + [0 for _ in range(out_seq_len - len(mask_label))] - ) - mask_ins_targets = [ - mask_input[1:-1] - + [0 for _ in range(in_seq_len - 1 - len(mask_input[1:-1]))] - for mask_input in mask_inputs - ] - - # transform to tensor - masked_tgt_masks = torch.tensor( - masked_tgt_masks, device=out_tokens.device - ).bool() - mask_ins_targets = torch.tensor(mask_ins_targets, device=in_tokens.device) - masked_tgt_tokens = out_tokens.masked_fill(masked_tgt_masks, unk_idx) - return masked_tgt_masks, masked_tgt_tokens, mask_ins_targets - - if use_cuda: - return _get_ins_targets_cuda(in_tokens, out_tokens, padding_idx, unk_idx) - return _get_ins_targets_cpu(in_tokens, out_tokens, padding_idx, unk_idx) - - -def _get_del_targets(in_tokens, out_tokens, padding_idx): - libnat, use_cuda = load_libnat() - - def _get_del_targets_cuda(in_tokens, out_tokens, padding_idx): - in_masks = in_tokens.ne(padding_idx) - out_masks = out_tokens.ne(padding_idx) - - word_del_targets = libnat.generate_deletion_labels( - in_tokens.int(), - libnat.levenshtein_distance( - in_tokens.int(), - out_tokens.int(), - in_masks.sum(1).int(), - out_masks.sum(1).int(), - ), - ) - word_del_targets = word_del_targets.type_as(in_tokens).masked_fill_( - ~in_masks, 0 - ) - return word_del_targets - - def _get_del_targets_cpu(in_tokens, out_tokens, padding_idx): - out_seq_len = out_tokens.size(1) - with torch.cuda.device_of(in_tokens): - in_tokens_list = [ - [t for t in s if t != padding_idx] - for i, s in enumerate(in_tokens.tolist()) - ] - out_tokens_list = [ - [t for t in s if t != padding_idx] - for i, s in enumerate(out_tokens.tolist()) - ] - - full_labels = libnat.suggested_ed2_path( - in_tokens_list, out_tokens_list, padding_idx - ) - word_del_targets = [b[-1] for b in full_labels] - word_del_targets = [ - labels + [0 for _ in range(out_seq_len - len(labels))] - for labels in word_del_targets - ] - - # transform to tensor - word_del_targets = torch.tensor(word_del_targets, device=out_tokens.device) - return word_del_targets - - if use_cuda: - return _get_del_targets_cuda(in_tokens, out_tokens, padding_idx) - return _get_del_targets_cpu(in_tokens, out_tokens, padding_idx) - - -def _apply_ins_masks( - in_tokens, in_scores, mask_ins_pred, padding_idx, unk_idx, eos_idx -): - - in_masks = in_tokens.ne(padding_idx) - in_lengths = in_masks.sum(1) - - # HACK: hacky way to shift all the paddings to eos first. - in_tokens.masked_fill_(~in_masks, eos_idx) - mask_ins_pred.masked_fill_(~in_masks[:, 1:], 0) - - out_lengths = in_lengths + mask_ins_pred.sum(1) - out_max_len = out_lengths.max() - out_masks = new_arange(out_lengths, out_max_len)[None, :] < out_lengths[:, None] - - reordering = (mask_ins_pred + in_masks[:, 1:].long()).cumsum(1) - out_tokens = ( - in_tokens.new_zeros(in_tokens.size(0), out_max_len) - .fill_(padding_idx) - .masked_fill_(out_masks, unk_idx) - ) - out_tokens[:, 0] = in_tokens[:, 0] - out_tokens.scatter_(1, reordering, in_tokens[:, 1:]) - - out_scores = None - if in_scores is not None: - in_scores.masked_fill_(~in_masks, 0) - out_scores = in_scores.new_zeros(*out_tokens.size()) - out_scores[:, 0] = in_scores[:, 0] - out_scores.scatter_(1, reordering, in_scores[:, 1:]) - - return out_tokens, out_scores - - -def _apply_ins_words(in_tokens, in_scores, word_ins_pred, word_ins_scores, unk_idx): - word_ins_masks = in_tokens.eq(unk_idx) - out_tokens = in_tokens.masked_scatter(word_ins_masks, word_ins_pred[word_ins_masks]) - - if in_scores is not None: - out_scores = in_scores.masked_scatter( - word_ins_masks, word_ins_scores[word_ins_masks] - ) - else: - out_scores = None - - return out_tokens, out_scores - - -def _apply_del_words( - in_tokens, in_scores, in_attn, word_del_pred, padding_idx, bos_idx, eos_idx -): - # apply deletion to a tensor - in_masks = in_tokens.ne(padding_idx) - bos_eos_masks = in_tokens.eq(bos_idx) | in_tokens.eq(eos_idx) - - max_len = in_tokens.size(1) - word_del_pred.masked_fill_(~in_masks, 1) - word_del_pred.masked_fill_(bos_eos_masks, 0) - - reordering = new_arange(in_tokens).masked_fill_(word_del_pred, max_len).sort(1)[1] - - out_tokens = in_tokens.masked_fill(word_del_pred, padding_idx).gather(1, reordering) - - out_scores = None - if in_scores is not None: - out_scores = in_scores.masked_fill(word_del_pred, 0).gather(1, reordering) - - out_attn = None - if in_attn is not None: - _mask = word_del_pred[:, :, None].expand_as(in_attn) - _reordering = reordering[:, :, None].expand_as(in_attn) - out_attn = in_attn.masked_fill(_mask, 0.0).gather(1, _reordering) - - return out_tokens, out_scores, out_attn - - -def _skip(x, mask): - """ - Getting sliced (dim=0) tensor by mask. Supporting tensor and list/dict of tensors. - """ - if isinstance(x, int): - return x - - if x is None: - return None - - if isinstance(x, torch.Tensor): - if x.size(0) == mask.size(0): - return x[mask] - elif x.size(1) == mask.size(0): - return x[:, mask] - - if isinstance(x, list): - return [_skip(x_i, mask) for x_i in x] - - if isinstance(x, dict): - return {k: _skip(v, mask) for k, v in x.items()} - - raise NotImplementedError - - -def _skip_encoder_out(encoder, encoder_out, mask): - if not mask.any(): - return encoder_out - else: - return encoder.reorder_encoder_out( - encoder_out, mask.nonzero(as_tuple=False).squeeze() - ) - - -def _fill(x, mask, y, padding_idx): - """ - Filling tensor x with y at masked positions (dim=0). - """ - if x is None: - return y - assert x.dim() == y.dim() and mask.size(0) == x.size(0) - assert x.dim() == 2 or (x.dim() == 3 and x.size(2) == y.size(2)) - n_selected = mask.sum() - assert n_selected == y.size(0) - - if n_selected == x.size(0): - return y - - if x.size(1) < y.size(1): - dims = [x.size(0), y.size(1) - x.size(1)] - if x.dim() == 3: - dims.append(x.size(2)) - x = torch.cat([x, x.new_zeros(*dims).fill_(padding_idx)], 1) - x[mask] = y - elif x.size(1) > y.size(1): - x[mask] = padding_idx - if x.dim() == 2: - x[mask, : y.size(1)] = y - else: - x[mask, : y.size(1), :] = y - else: - x[mask] = y - return x diff --git a/spaces/stomexserde/gpt4-ui/Examples/Bricsys BricsCad Platinum V13.1.11.41196 With Key [TorDigger] Free Download Fixed.md b/spaces/stomexserde/gpt4-ui/Examples/Bricsys BricsCad Platinum V13.1.11.41196 With Key [TorDigger] Free Download Fixed.md deleted file mode 100644 index 50b0964f00100fc60a03f9f7647b947d82f23c93..0000000000000000000000000000000000000000 --- a/spaces/stomexserde/gpt4-ui/Examples/Bricsys BricsCad Platinum V13.1.11.41196 With Key [TorDigger] Free Download Fixed.md +++ /dev/null @@ -1,157 +0,0 @@ - -

      Bricsys BricsCAD Platinum V13.1.11.41196 With Key [TorDigger] Free Download

      -

      If you are looking for a powerful and versatile CAD software for 2D and 3D design, you might want to check out Bricsys BricsCAD Platinum. This software is a single, native-DWG platform that offers five product levels, including Lite, Pro, BIM, Mechanical, and Ultimate. It is compatible and interoperable with various industries and applications, such as architecture, engineering, construction, manufacturing, surveying, product design, and more.

      -

      Bricsys BricsCad Platinum V13.1.11.41196 With Key [TorDigger] Free Download


      Download Ziphttps://urlgoal.com/2uIc4R



      -

      In this article, we will show you what are the features and benefits of Bricsys BricsCAD Platinum, how to download and install it for free using a torrent file from TorDigger, how to compare it with other CAD software, and answer some frequently asked questions about it.

      -

      What is Bricsys BricsCAD Platinum?

      -

      A powerful and versatile CAD software for 2D and 3D design

      -

      Bricsys BricsCAD Platinum is a CAD software that allows you to create, edit, view, and annotate any kind of 2D and 3D DWG file with greater ease, speed, and efficiency. It has a familiar user interface that is similar to AutoCAD, but with more features and tools that enhance your productivity and creativity.

      -

      A single, native-DWG platform with five product levels

      -

      Bricsys BricsCAD Platinum is a single, native-DWG platform that supports all DWG versions from R12 to R2020. It also offers five product levels that cater to different needs and budgets:

      -
        -
      • BricsCAD Lite: A high-performing, compatible and concise 2D drafting and detailing solution.
      • -
      • BricsCAD Pro: A powerful, intuitive and interoperable 2D/3D CAD and modeling solution.
      • -
      • BricsCAD BIM: An AI-driven BIM solution built on a compatible 2D and 3D design platform.
      • -
      • BricsCAD Mechanical: A mechanical design and drafting solution combined with intelligent 2D, 3D and modeling tools.BricsCAD Ultimate: A comprehensive solution that includes all the features of the other product levels, plus advanced 3D modeling and rendering capabilities.
      • -
      -

      You can choose the product level that suits your needs and budget, and upgrade anytime without losing your data or settings.

      -

      -

      A compatible and interoperable solution for various industries and applications

      -

      Bricsys BricsCAD Platinum is a compatible and interoperable solution that works seamlessly with other CAD software, such as AutoCAD, Solidworks, SketchUp, Revit, and more. It supports various file formats, such as DWG, DXF, DWF, PDF, STL, IFC, BCF, BMP, JPG, PNG, and more. It also integrates with various third-party applications and plugins that extend its functionality and usability for different industries and purposes, such as architecture, engineering, construction, manufacturing, surveying, product design, and more.

      -

      What are the features and benefits of Bricsys BricsCAD Platinum?

      -

      High-performance 2D drafting and detailing

      -

      Bricsys BricsCAD Platinum offers a high-performance 2D drafting and detailing solution that allows you to create accurate and professional drawings with ease. It has a rich set of tools and commands that help you draw, modify, annotate, dimension, hatch, block, layer, style, print, and export your 2D drawings. It also has a smart cursor and dynamic input that help you input coordinates and angles quickly and precisely. It also has a quad cursor and context menu that help you access the most relevant tools and commands based on your cursor position and selection.

      -

      Advanced 3D modeling and direct editing

      -

      Bricsys BricsCAD Platinum offers an advanced 3D modeling and direct editing solution that allows you to create complex and organic shapes with ease. It has a powerful solid modeling engine that supports ACIS-based solids, surfaces, meshes, regions, bodies, faces, edges, vertices, and more. It also has a direct editing feature that allows you to modify your 3D models without losing their design intent or history. You can push, pull, move, rotate, scale, extrude, fillet, chamfer, shell, slice, loft, sweep, revolve, boolean, heal, deform, and more with your 3D models.

      -

      Intelligent BIM and mechanical design toolsets

      -

      Bricsys BricsCAD Platinum offers intelligent BIM and mechanical design toolsets that allow you to create smart and parametric models for building and mechanical design. It has a BIM toolset that allows you to create building information models from scratch or from existing 2D or 3D drawings. It also has a mechanical toolset that allows you to create mechanical parts and assemblies from standard or custom components. Both toolsets use artificial intelligence (AI) to automate and optimize your design process. They also use constraints and parameters to control the relationships and dimensions of your models.

      -

      Flexible licensing and cloud collaboration options

      -

      Bricsys BricsCAD Platinum offers flexible licensing and cloud collaboration options that allow you to use the software in the way that suits you best. You can choose between perpetual or subscription licenses for single-user or network usage. You can also choose between standalone or cloud-based installation for online or offline access. You can also use the Bricsys 24/7 cloud platform to store, share, manage, and collaborate on your projects with your team and clients. You can also use the Bricsys cloud connection to sync your files and settings across different devices and platforms.

      -

      How to download and install Bricsys BricsCAD Platinum V13.1.11.41196 With Key [TorDigger] for free?

      -

      The steps to download the torrent file from TorDigger

      -

      If you want to download and install Bricsys BricsCAD Platinum V13.1.11.41196 With Key [TorDigger] for free, you will need to use a torrent client, such as uTorrent, BitTorrent, or qBittorrent. You will also need to find a reliable torrent site, such as The Pirate Bay, 1337x, or RARBG. Here are the steps to download the torrent file from TorDigger:

      -
        -
      1. Open your web browser and go to the torrent site of your choice.
      2. -
      3. Search for "Bricsys BricsCAD Platinum V13.1.11.41196 With Key [TorDigger]" in the search box.
      4. -
      5. Find the torrent file that matches the name and size of the software you want to download. Make sure it has a high number of seeders and leechers for faster and safer downloading.
      6. -
      7. Click on the torrent file and download it to your computer.
      8. -
      9. Open your torrent client and add the torrent file to start downloading the software.
      10. -
      -

      The steps to install the software using the key provided by TorDigger

      -

      Once you have downloaded the software, you will need to install it using the key provided by TorDigger. Here are the steps to install the software using the key:

      -
        -
      1. Open the folder where you downloaded the software and extract the zip file using a tool like WinRAR or 7-Zip.
      2. -
      3. Run the setup.exe file as administrator and follow the instructions on the screen.
      4. -
      5. When prompted, enter the key that is included in the readme.txt file or in a separate text file named "key".
      6. -
      7. Complete the installation process and launch the software.
      8. -
      -

      The steps to activate the software and enjoy its full features

      -

      After installing the software, you will need to activate it and enjoy its full features. Here are the steps to activate the software:

      -
        -
      1. Open the software and go to the Help menu.
      2. -
      3. Select About BricsCAD and click on Activate License.
      4. -
      5. Enter your name, email address, and company name (optional) and click on Next.
      6. -
      7. Select Online Activation and click on Next.
      8. -
      9. Enter the same key that you used for installation and click on Next.
      10. -
      11. Wait for the activation process to complete and click on Finish.
      12. -
      -

      Congratulations! You have successfully downloaded, installed, and activated Bricsys BricsCAD Platinum V13.1.11.41196 With Key [TorDigger] for free. You can now enjoy its full features and functionality for 2D and 3D design.

      How to compare Bricsys BricsCAD Platinum with other CAD software?

      -

      Bricsys BricsCAD Platinum is not the only CAD software available in the market. There are other popular and widely used CAD software, such as AutoCAD, Solidworks, SketchUp, Revit, and more. How does Bricsys BricsCAD Platinum compare with these other CAD software? What are the main differences, advantages, and disadvantages of each CAD software?

      -

      A table showing the main differences between Bricsys BricsCAD Platinum and other popular CAD software

      -

      To help you compare Bricsys BricsCAD Platinum with other CAD software, we have created a table that shows the main differences between them in terms of features, price, compatibility, and user reviews. Here is the table:

      - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - - -
      CAD SoftwareFeaturesPriceCompatibilityUser Reviews
      Bricsys BricsCAD PlatinumA single, native-DWG platform that offers five product levels for 2D and 3D design, BIM and mechanical design toolsets, direct editing and AI features, flexible licensing and cloud collaboration options.$1,200 for perpetual license or $400 for annual subscription for Platinum level. Other product levels have lower prices.Supports all DWG versions from R12 to R2020. Compatible and interoperable with various file formats and third-party applications.4.5 out of 5 stars on Capterra. Users praise its performance, versatility, affordability, and compatibility.
      AutoCADA leading CAD software that offers 2D and 3D design, drafting, modeling, rendering, and documentation tools. Includes specialized toolsets for architecture, mechanical, electrical, civil 3D, map 3D, and more.$1,690 for annual subscription or $4,195 for three-year subscription. No perpetual license option.Supports all DWG versions from R12 to R2020. Compatible and interoperable with various file formats and third-party applications.4.6 out of 5 stars on Capterra. Users praise its functionality, reliability, industry standards, and support.
      SolidworksA professional CAD software that offers 3D design, simulation, visualization, manufacturing, and data management tools. Includes specialized solutions for product design, engineering, analysis, testing, and more.$3,995 for perpetual license or $1,295 for annual subscription for Standard level. Other product levels have higher prices.Supports various file formats such as DWG, DXF, STL, STEP, IGES, and more. Compatible and interoperable with various third-party applications.4.5 out of 5 stars on Capterra. Users praise its features, quality, accuracy, and innovation.
      SketchUpA simple and intuitive CAD software that offers 3D design, modeling, rendering, and animation tools. Includes specialized solutions for architecture, interior design, landscape design, and more.$299 for annual subscription or $1,199 for perpetual license for Pro level. Other product levels have lower prices.Supports various file formats such as DWG, DXF, STL, OBJ, 3DS, and more. Compatible and interoperable with various third-party applications.4.5 out of 5 stars on Capterra. Users praise its ease of use, flexibility, creativity, and fun.
      RevitA comprehensive CAD software that offers BIM and 3D design, modeling, analysis, documentation, and collaboration tools. Includes specialized solutions for architecture, structural engineering, MEP engineering, and more.$2,425 for annual subscription or $6,550 for three-year subscription. No perpetual license option.Supports various file formats such as DWG, DXF, DWF, IFC, RVT, and more. Compatible and interoperable with various third-party applications.4.5 out of 5 stars on Capterra. Users praise its features, functionality, integration, and collaboration.
      -

      A summary of the advantages and disadvantages of each CAD software

      -

      To help you decide which CAD software is best for you, we have summarized the advantages and disadvantages of each CAD software based on the table above. Here is the summary:

      -
        -
      • Bricsys BricsCAD Platinum: The main advantages of this CAD software are its versatility, affordability, compatibility, and performance. It offers a single platform that can handle 2D and 3D design, BIM and mechanical design toolsets, direct editing and AI features, flexible licensing and cloud collaboration options. It is also compatible and interoperable with various file formats and third-party applications. The main disadvantages of this CAD software are its relatively low popularity and recognition in the market compared to other CAD software. It may also have some bugs and glitches that need to be fixed.
      • -
      • AutoCAD: The main advantages of this CAD software are its functionality, reliability, industry standards, and support. It offers a leading CAD software that has 2D and 3D design, drafting, modeling, rendering, and documentation tools. It also includes specialized toolsets for various industries and applications. It is also widely used and recognized in the market and has a strong customer support and community. The main disadvantages of this CAD software are its high price, subscription-only licensing, and complex user interface. It is also prone to crashes and errors that may affect your work.
      • -
      • Solidworks: The main advantages of this CAD software are its features, quality, accuracy, and innovation. It offers a professional CAD software that has 3D design, simulation, visualization, manufacturing, and data management tools. It also includes specialized solutions for product design, engineering, analysis, testing, and more. It is also known for its high-quality output and accuracy in modeling and simulation. The main disadvantages of this CAD software are its high price, subscription-only licensing, and hardware requirements. It is also difficult to learn and use for beginners and may have compatibility issues with other CAD software.
      • -
      • SketchUp: The main advantages of this CAD software are its ease of use, flexibility, creativity, and fun. It offers a simple and intuitive CAD software that has 3D design, modeling, rendering, and animation tools. It also includes specialized solutions for architecture, interior design, landscape design, and more. It is also known for its user-friendly interface and creative features that allow you to create anything you can imagine. The main disadvantages of this CAD software are its limited functionality, low performance, and lack of support. It is also not suitable for complex and professional projects and may have security risks when downloading from third-party sources.
      • -
      • Revit: The main advantages of this CAD software are its features, functionality, integration, and collaboration. It offers a comprehensive CAD software that has BIM and 3D design, modeling, analysis, documentation, and collaboration tools. It also includes specialized solutions for architecture, structural engineering, MEP engineering, and more. It is also known for its integration and collaboration features that allow you to work with other CAD software and stakeholders. The main disadvantages of this CAD software are its high price, subscription-only licensing, and steep learning curve. It is also complex and demanding to use and may have performance issues with large and complex models.
      • -
      -

      Conclusion

      -

      In conclusion, Bricsys BricsCAD Platinum is a powerful and versatile CAD software that offers a single, native-DWG platform that can handle 2D and 3D design, BIM and mechanical design toolsets, direct editing and AI features, flexible licensing and cloud collaboration options. It is also compatible and interoperable with various file formats and third-party applications. It has some advantages and disadvantages compared to other popular CAD software, such as AutoCAD, Solidworks, SketchUp, and Revit. You can download and install Bricsys BricsCAD Platinum V13.1.11.41196 With Key [TorDigger] for free using a torrent file from TorDigger.

      -

      If you are interested in trying Bricsys BricsCAD Platinum for free, you can follow the steps we have provided in this article. You can also visit the official website of Bricsys to learn more about the software and its features. You can also watch some tutorials and videos on YouTube to see how the software works and what you can do with it.

      -

      We hope you found this article helpful and informative. If you have any questions or feedback, please feel free to leave a comment below. Thank you for reading!

      -

      FAQs

      -

      What are the system requirements for Bricsys BricsCAD Platinum?

      -

      The system requirements for Bricsys BricsCAD Platinum are as follows:

      -
        -
      • Operating system: Windows 7 or higher, macOS 10.13 or higher, Linux (Ubuntu 18.04 LTS or higher)
      • -
      • Processor: Intel Core i5 or equivalent
      • -
      • Memory: 8 GB RAM or more
      • -
      • Graphics: OpenGL 4.0 compatible graphics card with 1 GB VRAM or more
      • -
      • Storage: 1 GB free disk space or more
      • -
      • Internet connection: Required for online activation and cloud services
      • -
      -

      How to get support and updates for Bricsys BricsCAD Platinum?

      -

      You can get support and updates for Bricsys BricsCAD Platinum by visiting the support page of Bricsys. There you can find various resources, such as manuals, tutorials, forums, blogs, webinars, videos, FAQs, knowledge base, downloads, updates, bug reports, feature requests, feedback forms, contact details, and more.

      -

      How to uninstall Bricsys BricsCAD Platinum?

      -

      You can uninstall Bricsys BricsCAD Platinum by following these steps:

      -
        -
      1. Close the software if it is running.
      2. -
      3. Go to the Control Panel on your computer and select Programs and Features.
      4. -
      5. Find Bricsys BricsCAD Platinum in the list of installed programs and click on Uninstall.
      6. -
      7. Follow the instructions on the screen to complete the uninstallation process.
      8. -
      9. Delete any remaining files or folders related to the software from your computer.
      10. -
      -

      What are some alternatives to TorDigger for downloading free software?

      -

      Some alternatives to TorDigger for downloading free software are as follows:

      -
        -
      • Kickass Torrents: A popular torrent site that offers a large collection of free software, movies, music, games, books, and more. It has a user-friendly interface and a strong community of users and uploaders.
      • -
      • CrackWatch: A website that tracks the status of cracked games and software. It provides links to various sources where you can download free software, such as torrent sites, direct download sites, file hosting sites, and more. It also has a forum where you can discuss and request free software.
      • -
      • GetIntoPC: A website that offers free downloads of various software, such as operating systems, antivirus, office tools, graphics, multimedia, development, and more. It provides direct download links and detailed installation guides for each software.
      • -
      -

      Is it legal and safe to download free software from TorDigger?

      -

      The answer to this question depends on the type and source of the software you are downloading. Generally speaking, downloading free software from TorDigger is not legal and safe for the following reasons:

      -
        -
      • It may violate the intellectual property rights of the software developers and publishers. Downloading free software without their permission or license may be considered as piracy or theft, which can result in legal consequences and penalties.
      • -
      • It may expose your computer and data to various risks and threats. Downloading free software from TorDigger may involve using torrent clients, which can make your computer vulnerable to malware, viruses, spyware, ransomware, and more. It may also involve using keys or cracks, which can compromise your system security and performance.
      • -
      • It may affect the quality and functionality of the software. Downloading free software from TorDigger may not guarantee that you will get the latest version, update, or patch of the software. It may also not guarantee that you will get the full features and functionality of the software. It may also cause errors, bugs, glitches, or crashes that may affect your work.
      • -
      -

      Therefore, we do not recommend downloading free software from TorDigger or any other similar sources. We advise you to purchase the software from the official website or authorized resellers of Bricsys or other CAD software developers. This way, you can support their work, enjoy their features, get their support, and protect your computer and data.

      b2dd77e56b
      -
      -
      \ No newline at end of file diff --git a/spaces/stomexserde/gpt4-ui/Examples/HACK Wondershare Video Editor 4.5.0.10 Crack PORTABLE.md b/spaces/stomexserde/gpt4-ui/Examples/HACK Wondershare Video Editor 4.5.0.10 Crack PORTABLE.md deleted file mode 100644 index 7312d9f2ce6d2ccffb62f26b973c01be65da5cde..0000000000000000000000000000000000000000 --- a/spaces/stomexserde/gpt4-ui/Examples/HACK Wondershare Video Editor 4.5.0.10 Crack PORTABLE.md +++ /dev/null @@ -1,27 +0,0 @@ - -

      HACK Wondershare Video Editor 4.5.0.10 Crack: How to Edit Videos Like a Pro

      -

      Wondershare Video Editor is a powerful and easy-to-use video editing software that lets you create stunning videos with titles, transitions, effects, and more. Whether you want to make a video for YouTube, Instagram, TikTok, or any other platform, Wondershare Video Editor can help you achieve your creative vision.

      -

      HACK Wondershare Video Editor 4.5.0.10 Crack


      Download Ziphttps://urlgoal.com/2uI8Ei



      -

      But what if you don't want to pay for the full version of Wondershare Video Editor? What if you want to hack it and get all the features for free? Well, that's what this article is about. We will show you how to download and install Wondershare Video Editor 4.5.0.10 Crack, a hacked version of the software that bypasses the activation process and gives you unlimited access to all the tools and resources.

      -

      What is Wondershare Video Editor 4.5.0.10 Crack?

      -

      Wondershare Video Editor 4.5.0.10 Crack is a modified version of Wondershare Video Editor 4.5.0.10, which is an older version of the software that was released in 2017[^2^]. The crack is a file that replaces the original executable file of the software and tricks it into thinking that it has been activated with a valid license key.

      -

      By using Wondershare Video Editor 4.5.0.10 Crack, you can enjoy all the features of the software without paying anything. You can edit videos in various formats, add music, voiceover, text, stickers, filters, animations, and more. You can also export your videos in different resolutions and formats, or upload them directly to YouTube and Vimeo.

      -

      How to Download and Install Wondershare Video Editor 4.5.0.10 Crack?

      -

      If you want to try Wondershare Video Editor 4.5.0.10 Crack, you need to follow these steps:

      -
        -
      1. Download Wondershare Video Editor 4.5.0.10 Crack from a reliable source on the internet. You can find it on Google Drive[^1^] or other websites[^2^] [^3^]. Make sure you scan the file with an antivirus program before opening it.
      2. -
      3. Extract the zip file and run the setup file to install Wondershare Video Editor 4.5.0.10 on your computer.
      4. -
      5. Copy the crack file (Crack.exe) and paste it into the installation folder of Wondershare Video Editor 4.5.0.10 (usually C:\Program Files\Wondershare\Video Editor).
      6. -
      7. Run the crack file as administrator and click on the "Patch" button.
      8. -
      9. Wait for the patching process to finish and close the crack window.
      10. -
      11. Launch Wondershare Video Editor 4.5.0.10 and enjoy editing videos like a pro!
      12. -
      -

      What are the Risks of Using Wondershare Video Editor 4.5.0.10 Crack?

      -

      While Wondershare Video Editor 4.5.0.10 Crack may seem like a tempting option for video editing enthusiasts who don't want to spend money on the software, it also comes with some risks and drawbacks that you should be aware of:

      -
        -
      • Wondershare Video Editor 4.5.0.10 Crack is illegal and violates the terms of service of Wondershare Filmora (the official name of Wondershare Video Editor). By using it, you are infringing on the intellectual property rights of Wondershare and may face legal consequences.
      • -
      • Wondershare Video Editor 4.5.0.10 Crack is outdated and lacks many features and improvements that have been added to the latest version of Wondershare Filmora[^4^] [^5^]. For example, you won't be able to use AI-based features like Smart Cutout, Audio Stretch, Audio Denoise, Auto Reframe, Silence Detection, etc.
      • -
      • Wondershare Video Editor 4.5.0.10 Crack may contain viruses, malware, spy

        -

        81aa517590
        -
        -
        \ No newline at end of file diff --git a/spaces/stomexserde/gpt4-ui/Examples/Kaptaan Hd Download VERIFIED.md b/spaces/stomexserde/gpt4-ui/Examples/Kaptaan Hd Download VERIFIED.md deleted file mode 100644 index 47822a0d5ad7c2b349edc9ffa96fb4a3aa75be69..0000000000000000000000000000000000000000 --- a/spaces/stomexserde/gpt4-ui/Examples/Kaptaan Hd Download VERIFIED.md +++ /dev/null @@ -1,22 +0,0 @@ -
        -

        Kaptaan Hd Download: How to Watch the Latest Punjabi Movie Online

        -

        If you are a fan of Punjabi movies, you might be interested in watching Kaptaan, a comedy-drama film starring Gippy Grewal, Monica Gill, and Karishma Kotak. The movie was released in 2016 and received positive reviews from critics and audiences alike. Kaptaan tells the story of a lawyer who decides to pursue his passion for singing after losing a case. Along the way, he meets a journalist who helps him achieve his dream.

        -

        But how can you watch Kaptaan online in HD quality? There are many websites that claim to offer Kaptaan Hd Download, but not all of them are safe and legal. Some of them may contain viruses, malware, or pop-up ads that can harm your device or compromise your privacy. Others may have low-quality videos or broken links that can ruin your viewing experience.

        -

        Kaptaan Hd Download


        DOWNLOAD ✑ ✑ ✑ https://urlgoal.com/2uIaoH



        -

        That's why we have compiled a list of the best and most reliable sources to watch Kaptaan online in HD quality. These are:

        -
          -
        • Netflix: Netflix is one of the most popular and trusted streaming platforms in the world. It has a huge collection of movies and shows from different genres and languages. You can watch Kaptaan on Netflix with a subscription plan that starts from Rs. 199 per month. You can also download the movie on your device and watch it offline.
        • -
        • Prime Video: Prime Video is another great option to watch Kaptaan online in HD quality. It is a part of Amazon Prime, which offers many benefits such as free delivery, music streaming, and more. You can watch Kaptaan on Prime Video with a subscription plan that costs Rs. 129 per month or Rs. 999 per year. You can also download the movie on your device and watch it offline.
        • -
        • Hotstar: Hotstar is a leading streaming platform in India that offers movies, shows, sports, and news. You can watch Kaptaan on Hotstar with a subscription plan that costs Rs. 299 per month or Rs. 1499 per year. You can also download the movie on your device and watch it offline.
        • -
        -

        These are some of the best and most legal ways to watch Kaptaan online in HD quality. We hope you enjoy watching this hilarious and entertaining movie with your friends and family.

        - -

        But what if you don't have a subscription to any of these streaming platforms? Is there any other way to watch Kaptaan online in HD quality? Well, there are some other websites that offer Kaptaan Hd Download, but they are not recommended for various reasons. These are:

        -
          -
        • Filmywap: Filmywap is a notorious website that leaks pirated movies and shows online. It has a large collection of Bollywood, Hollywood, and regional movies, including Kaptaan. However, this website is illegal and unethical, as it violates the copyright laws and deprives the filmmakers of their rightful earnings. Moreover, this website is unsafe and risky, as it may expose your device to viruses, malware, or pop-up ads that can steal your personal information or damage your device.
        • -
        • Moviescounter: Moviescounter is another website that offers Kaptaan Hd Download, along with many other movies and shows. However, this website is also illegal and unethical, as it infringes the intellectual property rights of the creators and distributors. Furthermore, this website is unreliable and low-quality, as it may have poor video quality, distorted audio, or broken links that can spoil your viewing experience.
        • -
        • Worldfree4u: Worldfree4u is a website that provides Kaptaan Hd Download, as well as many other movies and shows. However, this website is also illegal and unethical, as it violates the law and harms the film industry. Additionally, this website is untrustworthy and dangerous, as it may contain harmful content such as adult material, violence, or hate speech that can offend or harm you or others.
        • -
        -

        These are some of the websites that offer Kaptaan Hd Download, but they are not advisable for various reasons. We strongly recommend you to avoid these websites and use only the legal and safe sources mentioned above. By doing so, you will not only enjoy watching Kaptaan online in HD quality, but also support the hard work and talent of the filmmakers and actors.

        7b8c122e87
        -
        -
        \ No newline at end of file diff --git a/spaces/sub314xxl/MetaGPT/metagpt/utils/serialize.py b/spaces/sub314xxl/MetaGPT/metagpt/utils/serialize.py deleted file mode 100644 index ffafca8cdfb20620adeda9053bb8e4be781a7ab4..0000000000000000000000000000000000000000 --- a/spaces/sub314xxl/MetaGPT/metagpt/utils/serialize.py +++ /dev/null @@ -1,67 +0,0 @@ -#!/usr/bin/env python -# -*- coding: utf-8 -*- -# @Desc : the implement of serialization and deserialization - -import copy -import pickle -from typing import Dict, List, Tuple - -from metagpt.actions.action_output import ActionOutput -from metagpt.schema import Message - - -def actionoutout_schema_to_mapping(schema: Dict) -> Dict: - """ - directly traverse the `properties` in the first level. - schema structure likes - ``` - { - "title":"prd", - "type":"object", - "properties":{ - "Original Requirements":{ - "title":"Original Requirements", - "type":"string" - }, - }, - "required":[ - "Original Requirements", - ] - } - ``` - """ - mapping = dict() - for field, property in schema["properties"].items(): - if property["type"] == "string": - mapping[field] = (str, ...) - elif property["type"] == "array" and property["items"]["type"] == "string": - mapping[field] = (List[str], ...) - elif property["type"] == "array" and property["items"]["type"] == "array": - # here only consider the `Tuple[str, str]` situation - mapping[field] = (List[Tuple[str, str]], ...) - return mapping - - -def serialize_message(message: Message): - message_cp = copy.deepcopy(message) # avoid `instruct_content` value update by reference - ic = message_cp.instruct_content - if ic: - # model create by pydantic create_model like `pydantic.main.prd`, can't pickle.dump directly - schema = ic.schema() - mapping = actionoutout_schema_to_mapping(schema) - - message_cp.instruct_content = {"class": schema["title"], "mapping": mapping, "value": ic.dict()} - msg_ser = pickle.dumps(message_cp) - - return msg_ser - - -def deserialize_message(message_ser: str) -> Message: - message = pickle.loads(message_ser) - if message.instruct_content: - ic = message.instruct_content - ic_obj = ActionOutput.create_model_class(class_name=ic["class"], mapping=ic["mapping"]) - ic_new = ic_obj(**ic["value"]) - message.instruct_content = ic_new - - return message diff --git a/spaces/supertori/files/stable-diffusion-webui/modules/ui_common.py b/spaces/supertori/files/stable-diffusion-webui/modules/ui_common.py deleted file mode 100644 index dbd1b5ef524e28285a9cced5a757aee9ac81a272..0000000000000000000000000000000000000000 --- a/spaces/supertori/files/stable-diffusion-webui/modules/ui_common.py +++ /dev/null @@ -1,213 +0,0 @@ -import json -import html -import os -import platform -import sys - -import gradio as gr -import subprocess as sp - -from modules import call_queue, shared -from modules.generation_parameters_copypaste import image_from_url_text -import modules.images - -folder_symbol = '\U0001f4c2' # 📂 - - -def update_generation_info(generation_info, html_info, img_index): - try: - generation_info = json.loads(generation_info) - if img_index < 0 or img_index >= len(generation_info["infotexts"]): - return html_info, gr.update() - return plaintext_to_html(generation_info["infotexts"][img_index]), gr.update() - except Exception: - pass - # if the json parse or anything else fails, just return the old html_info - return html_info, gr.update() - - -def plaintext_to_html(text): - text = "

        " + "
        \n".join([f"{html.escape(x)}" for x in text.split('\n')]) + "

        " - return text - - -def save_files(js_data, images, do_make_zip, index): - import csv - filenames = [] - fullfns = [] - - #quick dictionary to class object conversion. Its necessary due apply_filename_pattern requiring it - class MyObject: - def __init__(self, d=None): - if d is not None: - for key, value in d.items(): - setattr(self, key, value) - - data = json.loads(js_data) - - p = MyObject(data) - path = shared.opts.outdir_save - save_to_dirs = shared.opts.use_save_to_dirs_for_ui - extension: str = shared.opts.samples_format - start_index = 0 - - if index > -1 and shared.opts.save_selected_only and (index >= data["index_of_first_image"]): # ensures we are looking at a specific non-grid picture, and we have save_selected_only - - images = [images[index]] - start_index = index - - os.makedirs(shared.opts.outdir_save, exist_ok=True) - - with open(os.path.join(shared.opts.outdir_save, "log.csv"), "a", encoding="utf8", newline='') as file: - at_start = file.tell() == 0 - writer = csv.writer(file) - if at_start: - writer.writerow(["prompt", "seed", "width", "height", "sampler", "cfgs", "steps", "filename", "negative_prompt"]) - - for image_index, filedata in enumerate(images, start_index): - image = image_from_url_text(filedata) - - is_grid = image_index < p.index_of_first_image - i = 0 if is_grid else (image_index - p.index_of_first_image) - - fullfn, txt_fullfn = modules.images.save_image(image, path, "", seed=p.all_seeds[i], prompt=p.all_prompts[i], extension=extension, info=p.infotexts[image_index], grid=is_grid, p=p, save_to_dirs=save_to_dirs) - - filename = os.path.relpath(fullfn, path) - filenames.append(filename) - fullfns.append(fullfn) - if txt_fullfn: - filenames.append(os.path.basename(txt_fullfn)) - fullfns.append(txt_fullfn) - - writer.writerow([data["prompt"], data["seed"], data["width"], data["height"], data["sampler_name"], data["cfg_scale"], data["steps"], filenames[0], data["negative_prompt"]]) - - # Make Zip - if do_make_zip: - zip_filepath = os.path.join(path, "images.zip") - - from zipfile import ZipFile - with ZipFile(zip_filepath, "w") as zip_file: - for i in range(len(fullfns)): - with open(fullfns[i], mode="rb") as f: - zip_file.writestr(filenames[i], f.read()) - fullfns.insert(0, zip_filepath) - - return gr.File.update(value=fullfns, visible=True), plaintext_to_html(f"Saved: {filenames[0]}") - - -def create_output_panel(tabname, outdir): - from modules import shared - import modules.generation_parameters_copypaste as parameters_copypaste - - def open_folder(f): - if not os.path.exists(f): - print(f'Folder "{f}" does not exist. After you create an image, the folder will be created.') - return - elif not os.path.isdir(f): - print(f""" -WARNING -An open_folder request was made with an argument that is not a folder. -This could be an error or a malicious attempt to run code on your computer. -Requested path was: {f} -""", file=sys.stderr) - return - - if not shared.cmd_opts.hide_ui_dir_config: - path = os.path.normpath(f) - if platform.system() == "Windows": - os.startfile(path) - elif platform.system() == "Darwin": - sp.Popen(["open", path]) - elif "microsoft-standard-WSL2" in platform.uname().release: - sp.Popen(["wsl-open", path]) - else: - sp.Popen(["xdg-open", path]) - - with gr.Column(variant='panel', elem_id=f"{tabname}_results"): - with gr.Group(elem_id=f"{tabname}_gallery_container"): - result_gallery = gr.Gallery(label='Output', show_label=False, elem_id=f"{tabname}_gallery").style(grid=4) - - generation_info = None - with gr.Column(): - with gr.Row(elem_id=f"image_buttons_{tabname}"): - open_folder_button = gr.Button(folder_symbol, elem_id="hidden_element" if shared.cmd_opts.hide_ui_dir_config else f'open_folder_{tabname}') - - if tabname != "extras": - save = gr.Button('Save', elem_id=f'save_{tabname}') - save_zip = gr.Button('Zip', elem_id=f'save_zip_{tabname}') - - buttons = parameters_copypaste.create_buttons(["img2img", "inpaint", "extras"]) - - open_folder_button.click( - fn=lambda: open_folder(shared.opts.outdir_samples or outdir), - inputs=[], - outputs=[], - ) - - if tabname != "extras": - with gr.Row(): - download_files = gr.File(None, file_count="multiple", interactive=False, show_label=False, visible=False, elem_id=f'download_files_{tabname}') - - with gr.Group(): - html_info = gr.HTML(elem_id=f'html_info_{tabname}') - html_log = gr.HTML(elem_id=f'html_log_{tabname}') - - generation_info = gr.Textbox(visible=False, elem_id=f'generation_info_{tabname}') - if tabname == 'txt2img' or tabname == 'img2img': - generation_info_button = gr.Button(visible=False, elem_id=f"{tabname}_generation_info_button") - generation_info_button.click( - fn=update_generation_info, - _js="function(x, y, z){ return [x, y, selected_gallery_index()] }", - inputs=[generation_info, html_info, html_info], - outputs=[html_info, html_info], - ) - - save.click( - fn=call_queue.wrap_gradio_call(save_files), - _js="(x, y, z, w) => [x, y, false, selected_gallery_index()]", - inputs=[ - generation_info, - result_gallery, - html_info, - html_info, - ], - outputs=[ - download_files, - html_log, - ], - show_progress=False, - ) - - save_zip.click( - fn=call_queue.wrap_gradio_call(save_files), - _js="(x, y, z, w) => [x, y, true, selected_gallery_index()]", - inputs=[ - generation_info, - result_gallery, - html_info, - html_info, - ], - outputs=[ - download_files, - html_log, - ] - ) - - else: - html_info_x = gr.HTML(elem_id=f'html_info_x_{tabname}') - html_info = gr.HTML(elem_id=f'html_info_{tabname}') - html_log = gr.HTML(elem_id=f'html_log_{tabname}') - - paste_field_names = [] - if tabname == "txt2img": - paste_field_names = modules.scripts.scripts_txt2img.paste_field_names - elif tabname == "img2img": - paste_field_names = modules.scripts.scripts_img2img.paste_field_names - - for paste_tabname, paste_button in buttons.items(): - parameters_copypaste.register_paste_params_button(parameters_copypaste.ParamBinding( - paste_button=paste_button, tabname=paste_tabname, source_tabname="txt2img" if tabname == "txt2img" else None, source_image_component=result_gallery, - paste_field_names=paste_field_names - )) - - return result_gallery, generation_info if tabname != "extras" else html_info_x, html_info, html_log diff --git a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Filter Forge Crack VERIFIED Keygen 212.md b/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Filter Forge Crack VERIFIED Keygen 212.md deleted file mode 100644 index f2a5f6502d096d67a7e67e2ae7fb9693aabb1710..0000000000000000000000000000000000000000 --- a/spaces/suppsumstagza/text-to-image-stable-diffusion-v1-5/scripts/Filter Forge Crack VERIFIED Keygen 212.md +++ /dev/null @@ -1,6 +0,0 @@ -

        filter forge crack keygen 212


        Download Filehttps://cinurl.com/2uEXKq



        -
        -Siberian Mouse Hd 93 Torrent >>> http://urllio.com/ya8i9 4f22b66579 1st Studio Siberian Mouse Hd 125 ... 130, 132133, 159, 166, 177180, 185, 212, 218, 238 Siberian brown lemming, . ... Filter Forge Crack Keygen 12. 1fdad05405
        -
        -
        -

        diff --git a/spaces/szukevin/VISOR-GPT/train/scripts/convert_xlmroberta_from_huggingface_to_tencentpretrain.py b/spaces/szukevin/VISOR-GPT/train/scripts/convert_xlmroberta_from_huggingface_to_tencentpretrain.py deleted file mode 100644 index 8038440e2d05d92602bc1f08e3d18c62f1c64902..0000000000000000000000000000000000000000 --- a/spaces/szukevin/VISOR-GPT/train/scripts/convert_xlmroberta_from_huggingface_to_tencentpretrain.py +++ /dev/null @@ -1,79 +0,0 @@ -import argparse -import collections -import torch - - -parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter) -parser.add_argument("--input_model_path", type=str, default="models/input_model.bin", - help=".") -parser.add_argument("--output_model_path", type=str, default="models/output_model.bin", - help=".") -parser.add_argument("--layers_num", type=int, default=12, help=".") - -args = parser.parse_args() - -input_model = torch.load(args.input_model_path, map_location='cpu') - -output_model = collections.OrderedDict() -emb_size = \ - input_model["roberta.embeddings.word_embeddings.weight"].shape[1] - -output_model["embedding.word.embedding.weight"] = \ - input_model["roberta.embeddings.word_embeddings.weight"] -output_model["embedding.pos.embedding.weight"] = \ - torch.cat((input_model["roberta.embeddings.position_embeddings.weight"][2:], torch.zeros(2, emb_size)), 0) -output_model["embedding.seg.embedding.weight"] = \ - torch.cat((torch.Tensor(torch.zeros(2, emb_size)), input_model["roberta.embeddings.token_type_embeddings.weight"]), dim=0) -output_model["embedding.layer_norm.gamma"] = \ - input_model["roberta.embeddings.LayerNorm.weight"] -output_model["embedding.layer_norm.beta"] = \ - input_model["roberta.embeddings.LayerNorm.bias"] - -for i in range(args.layers_num): - output_model["encoder.transformer." + str(i) + ".self_attn.linear_layers.0.weight"] = \ - input_model["roberta.encoder.layer." + str(i) + ".attention.self.query.weight"] - output_model["encoder.transformer." + str(i) + ".self_attn.linear_layers.0.bias"] = \ - input_model["roberta.encoder.layer." + str(i) + ".attention.self.query.bias"] - output_model["encoder.transformer." + str(i) + ".self_attn.linear_layers.1.weight"] = \ - input_model["roberta.encoder.layer." + str(i) + ".attention.self.key.weight"] - output_model["encoder.transformer." + str(i) + ".self_attn.linear_layers.1.bias"] = \ - input_model["roberta.encoder.layer." + str(i) + ".attention.self.key.bias"] - output_model["encoder.transformer." + str(i) + ".self_attn.linear_layers.2.weight"] = \ - input_model["roberta.encoder.layer." + str(i) + ".attention.self.value.weight"] - output_model["encoder.transformer." + str(i) + ".self_attn.linear_layers.2.bias"] = \ - input_model["roberta.encoder.layer." + str(i) + ".attention.self.value.bias"] - output_model["encoder.transformer." + str(i) + ".self_attn.final_linear.weight"] = \ - input_model["roberta.encoder.layer." + str(i) + ".attention.output.dense.weight"] - output_model["encoder.transformer." + str(i) + ".self_attn.final_linear.bias"] = \ - input_model["roberta.encoder.layer." + str(i) + ".attention.output.dense.bias"] - output_model["encoder.transformer." + str(i) + ".layer_norm_1.gamma"] = \ - input_model["roberta.encoder.layer." + str(i) + ".attention.output.LayerNorm.weight"] - output_model["encoder.transformer." + str(i) + ".layer_norm_1.beta"] = \ - input_model["roberta.encoder.layer." + str(i) + ".attention.output.LayerNorm.bias"] - output_model["encoder.transformer." + str(i) + ".feed_forward.linear_1.weight"] = \ - input_model["roberta.encoder.layer." + str(i) + ".intermediate.dense.weight"] - output_model["encoder.transformer." + str(i) + ".feed_forward.linear_1.bias"] = \ - input_model["roberta.encoder.layer." + str(i) + ".intermediate.dense.bias"] - output_model["encoder.transformer." + str(i) + ".feed_forward.linear_2.weight"] = \ - input_model["roberta.encoder.layer." + str(i) + ".output.dense.weight"] - output_model["encoder.transformer." + str(i) + ".feed_forward.linear_2.bias"] = \ - input_model["roberta.encoder.layer." + str(i) + ".output.dense.bias"] - output_model["encoder.transformer." + str(i) + ".layer_norm_2.gamma"] = \ - input_model["roberta.encoder.layer." + str(i) + ".output.LayerNorm.weight"] - output_model["encoder.transformer." + str(i) + ".layer_norm_2.beta"] = \ - input_model["roberta.encoder.layer." + str(i) + ".output.LayerNorm.bias"] - -output_model["target.mlm.linear_1.weight"] = \ - input_model["lm_head.dense.weight"] -output_model["target.mlm.linear_1.bias"] = \ - input_model["lm_head.dense.bias"] -output_model["target.mlm.layer_norm.gamma"] = \ - input_model["lm_head.layer_norm.weight"] -output_model["target.mlm.layer_norm.beta"] = \ - input_model["lm_head.layer_norm.bias"] -output_model["target.mlm.linear_2.weight"] = \ - input_model["lm_head.decoder.weight"] -output_model["target.mlm.linear_2.bias"] = \ - input_model["lm_head.bias"] - -torch.save(output_model, args.output_model_path) diff --git a/spaces/teach/README/README.md b/spaces/teach/README/README.md deleted file mode 100644 index ef2e37f4904715a67134a2873a2062b085e1208b..0000000000000000000000000000000000000000 --- a/spaces/teach/README/README.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -title: README -emoji: 🌍 -colorFrom: indigo -colorTo: green -sdk: static -pinned: false ---- -

        -We’ve assembled a toolkit that university instructors can use to easily prepare labs, homework, or classes. The content is designed in a self-contained way such that it can easily be incorporated into the existing curriculum. This content is free and uses widely known Open Source technologies (transformers, gradio, etc). -

        -

        - 🤗 If you are a professor teaching ML undergraduate classes and you are willing to teach how to build Machine Learning collaboratively, join this workshop page! As we are gathering more than 20 members, you can now register for the workshop that we will be held on June 6, 6 pm (CET) ! -

        diff --git a/spaces/teowu/Q-Instruct-on-mPLUG-Owl-2/mplug_owl2/model/__init__.py b/spaces/teowu/Q-Instruct-on-mPLUG-Owl-2/mplug_owl2/model/__init__.py deleted file mode 100644 index 6d6f0775a0abb2c3e220343a4feb05c70c2c7779..0000000000000000000000000000000000000000 --- a/spaces/teowu/Q-Instruct-on-mPLUG-Owl-2/mplug_owl2/model/__init__.py +++ /dev/null @@ -1,2 +0,0 @@ -from .modeling_mplug_owl2 import MPLUGOwl2LlamaForCausalLM -from .configuration_mplug_owl2 import MPLUGOwl2Config \ No newline at end of file diff --git a/spaces/terfces0erbo/CollegeProjectV2/1979 Revolution Black Friday Download Pc Games 88.md b/spaces/terfces0erbo/CollegeProjectV2/1979 Revolution Black Friday Download Pc Games 88.md deleted file mode 100644 index 354dd56f265b27c9deed3ee5e8bbd4b5805db302..0000000000000000000000000000000000000000 --- a/spaces/terfces0erbo/CollegeProjectV2/1979 Revolution Black Friday Download Pc Games 88.md +++ /dev/null @@ -1,9 +0,0 @@ -
        -

        sometime in the future, the world is in ruins, and the human race is at its lowest. a self-proclaimed demi-god by the name of apocalypse is the last man standing. apocalypse plans to conquer the world and usher in a new era by killing all of humanity except four survivors, only to be stopped by a mysterious group of human survivors and called “the four horsemen”.

        -

        alone, the hero must save the world from dark forces. the world is full of secrets, and if you think you know everything there is to know about the mysterious world of heroes, heroes in training, villians, super villains, and the heroes in between, then you don’t know anything at all.

        -

        1979 Revolution: Black Friday download pc games 88


        Downloadhttps://bytlly.com/2uGjYj



        -

        the time vortex is a powerful force that allows the time masters to travel through space and time. the tardis, which they use to travel, is a living ship that is able to “speak” to the doctor through various means, and can be transported anywhere in space and time by the doctor.

        -

        a scrawny army reject at the outset of world war ii, steve rogers became a powerful super hero and decorated veteran known as captain america by the wars end. denied entrance into the armed forces due to his health, steve realized his only hope of fighting for liberty and justice was to volunteer for a risky military science experiment. he was injected with super-soldier serum and physically transformed into a powerful enemy of evil with legendary strength of body and spirit.

        -

        little did wendy and peter pan know that as they journeyed around neverland, they would be swept up into the greatest adventure of all time. from the ship that could go anywhere in time and space to the magical world of the lost boys, and the magical place called neverland itself, the wendy to peter pan adventure is one that will never be forgotten.

        899543212b
        -
        -
        \ No newline at end of file diff --git a/spaces/terfces0erbo/CollegeProjectV2/Circuitlogix Pro V7 04 Crack VERIFIEDed Rar.md b/spaces/terfces0erbo/CollegeProjectV2/Circuitlogix Pro V7 04 Crack VERIFIEDed Rar.md deleted file mode 100644 index 25339443a2cd76593f5d8dc6e348a6e530595230..0000000000000000000000000000000000000000 --- a/spaces/terfces0erbo/CollegeProjectV2/Circuitlogix Pro V7 04 Crack VERIFIEDed Rar.md +++ /dev/null @@ -1,6 +0,0 @@ -

        circuitlogix pro v7 04 cracked rar


        Download File 🆗 https://bytlly.com/2uGiyA



        -
        -d868ddde6e jamevil Jan 29, 2022 5:34 am 676 8a78ff9644
        -
        -
        -

        diff --git a/spaces/terfces0erbo/CollegeProjectV2/Civil 3D 2018 (x64) Keygen Keygen.md b/spaces/terfces0erbo/CollegeProjectV2/Civil 3D 2018 (x64) Keygen Keygen.md deleted file mode 100644 index 47948a13bf0d9b4bd060b192bf9d13711cc70ab3..0000000000000000000000000000000000000000 --- a/spaces/terfces0erbo/CollegeProjectV2/Civil 3D 2018 (x64) Keygen Keygen.md +++ /dev/null @@ -1,6 +0,0 @@ -

        Civil 3D 2018 (x64) Keygen Keygen


        Download Filehttps://bytlly.com/2uGlYb



        -
        -Dec 17, 2019 · Download Graphisoft ArchiCAD 16 Build 3006 x64 + Crack + . ... Electrical Dvd Rip Download, Autocad Mechanical 2018 Pdf, Cle D Activation De ... ArchiCAD is designed for teachers and college students in civil engineering ... 1fdad05405
        -
        -
        -

        diff --git a/spaces/terfces0erbo/CollegeProjectV2/E Stim Mp3 Files Man.zip _BEST_.md b/spaces/terfces0erbo/CollegeProjectV2/E Stim Mp3 Files Man.zip _BEST_.md deleted file mode 100644 index 0d3256573612fa0ddeddead8450bd19efe4b7757..0000000000000000000000000000000000000000 --- a/spaces/terfces0erbo/CollegeProjectV2/E Stim Mp3 Files Man.zip _BEST_.md +++ /dev/null @@ -1,8 +0,0 @@ - -

        this stim file is long, intense, and unpredictable. basically, a second record is made of the same "hit" but with a slightly different time delay to create a dichotic version. this is very effective for stimulating one side of the brain while the other is being stimulated in an opposite direction. thus, it can bring about opposite responses. the shorter versions can be set to a faster stim rate and used for making shorter clips. it will also create an inverted version if you set the drop-out frequency to a low value.

        -

        space mary's story is a heart-wrenching story about a space shuttle astronaut who has been reassigned to a different space mission. with 2 versions: the high-quality version with the original voice and the low-quality version with the new voice to fit into the protracker format. the result is a higher quality version because the compression settings for the protracker version is altered to suit the voice

        -

        E Stim mp3 files Man.zip


        Download Zip ===> https://bytlly.com/2uGkn7



        -

        these two files will give you a richer and fuller sound, smoother sound, more pitch changes, a "spoken as word" sound effect, and with the adjusted setting on your computer, you can experience how the filters work. the bottom one (version 2.0) includes the original spoken tracks along with the 2.0 version with the filters. if you are more comfortable with the original version, you can click the link of the "original" to download that version. both files will sync at the same time when you double click to play. both files are in apple protracker format so you will need protracker 1.3 and above to play them.

        -

        audacity's protracker module is a free plugin which allows you to quickly generate and edit music files using the audacity software on a mac or pc. these music files can be shared quickly and easily among auditors. (you can also edit and improve them yourself.) the protracker format uses all of the flexible editing features of audacity as well as the automation of ableton and the specialized editing capabilities of the kontakt software. these flexible features allow you to make your own variation on the waves library, and only a handful of clicks are needed to generate and edit each track. protracker is supported on almost all major platforms including mac os x, windows, linux, android, ios and ios devices. the only exception is apple products, which do not support the protracker format.

        899543212b
        -
        -
        \ No newline at end of file diff --git a/spaces/terfces0erbo/CollegeProjectV2/EaseUS Data Recovery Wizard Technician 12.2.0 Keygen Utorrent [VERIFIED].md b/spaces/terfces0erbo/CollegeProjectV2/EaseUS Data Recovery Wizard Technician 12.2.0 Keygen Utorrent [VERIFIED].md deleted file mode 100644 index 383e839614f1345f1a5cf5031b28f05ff7b98a17..0000000000000000000000000000000000000000 --- a/spaces/terfces0erbo/CollegeProjectV2/EaseUS Data Recovery Wizard Technician 12.2.0 Keygen Utorrent [VERIFIED].md +++ /dev/null @@ -1,6 +0,0 @@ -

        EaseUS Data Recovery Wizard Technician 12.2.0 Keygen utorrent


        Download Filehttps://bytlly.com/2uGj2p



        - -CRACK VSXu 0.1.16b Player (sexy Music Visual) ... Fix: The game ... EaseUS Data Recovery Wizard Professional Technician 12.2.0 utorrent 1fdad05405
        -
        -
        -

        diff --git a/spaces/terfces0erbo/CollegeProjectV2/Facerig Pro V1 957 Rar Zip !!BETTER!!.md b/spaces/terfces0erbo/CollegeProjectV2/Facerig Pro V1 957 Rar Zip !!BETTER!!.md deleted file mode 100644 index d1b98ed7af1e1beef69e1bb9fc1dcca893163d2c..0000000000000000000000000000000000000000 --- a/spaces/terfces0erbo/CollegeProjectV2/Facerig Pro V1 957 Rar Zip !!BETTER!!.md +++ /dev/null @@ -1,6 +0,0 @@ -

        facerig pro v1 957 rar zip


        DOWNLOAD ❤❤❤ https://bytlly.com/2uGkVW



        -
        - d5da3c52bf
        -
        -
        -

        diff --git a/spaces/theaster/RVC-New-Arknights/app-full.py b/spaces/theaster/RVC-New-Arknights/app-full.py deleted file mode 100644 index 925cc93a66262cd220df53b3ace4a4a3e975519a..0000000000000000000000000000000000000000 --- a/spaces/theaster/RVC-New-Arknights/app-full.py +++ /dev/null @@ -1,267 +0,0 @@ -import os -import glob -import json -import traceback -import logging -import gradio as gr -import numpy as np -import librosa -import torch -import asyncio -import edge_tts -import yt_dlp -import ffmpeg -import subprocess -import sys -import io -import wave -from datetime import datetime -from fairseq import checkpoint_utils -from infer_pack.models import SynthesizerTrnMs256NSFsid, SynthesizerTrnMs256NSFsid_nono -from vc_infer_pipeline import VC -from config import Config -config = Config() -logging.getLogger("numba").setLevel(logging.WARNING) -limitation = os.getenv("SYSTEM") == "spaces" # limit audio length in huggingface spaces - -def create_vc_fn(tgt_sr, net_g, vc, if_f0, file_index): - def vc_fn( - input_audio, - upload_audio, - upload_mode, - f0_up_key, - f0_method, - index_rate, - tts_mode, - tts_text, - tts_voice - ): - try: - if tts_mode: - if len(tts_text) > 100 and limitation: - return "Text is too long", None - if tts_text is None or tts_voice is None: - return "You need to enter text and select a voice", None - asyncio.run(edge_tts.Communicate(tts_text, "-".join(tts_voice.split('-')[:-1])).save("tts.mp3")) - audio, sr = librosa.load("tts.mp3", sr=16000, mono=True) - else: - if upload_mode: - if input_audio is None: - return "You need to upload an audio", None - sampling_rate, audio = upload_audio - duration = audio.shape[0] / sampling_rate - audio = (audio / np.iinfo(audio.dtype).max).astype(np.float32) - if len(audio.shape) > 1: - audio = librosa.to_mono(audio.transpose(1, 0)) - if sampling_rate != 16000: - audio = librosa.resample(audio, orig_sr=sampling_rate, target_sr=16000) - else: - audio, sr = librosa.load(input_audio, sr=16000, mono=True) - times = [0, 0, 0] - f0_up_key = int(f0_up_key) - audio_opt = vc.pipeline( - hubert_model, - net_g, - 0, - audio, - times, - f0_up_key, - f0_method, - file_index, - index_rate, - if_f0, - f0_file=None, - ) - print( - f"[{datetime.now().strftime('%Y-%m-%d %H:%M')}]: npy: {times[0]}, f0: {times[1]}s, infer: {times[2]}s" - ) - return "Success", (tgt_sr, audio_opt) - except: - info = traceback.format_exc() - print(info) - return info, (None, None) - return vc_fn - -def cut_vocal_and_inst(yt_url): - if yt_url != "": - if not os.path.exists("youtube_audio"): - os.mkdir("youtube_audio") - ydl_opts = { - 'format': 'bestaudio/best', - 'postprocessors': [{ - 'key': 'FFmpegExtractAudio', - 'preferredcodec': 'wav', - }], - "outtmpl": 'youtube_audio/audio', - } - with yt_dlp.YoutubeDL(ydl_opts) as ydl: - ydl.download([yt_url]) - yt_audio_path = "youtube_audio/audio.wav" - command = f"demucs --two-stems=vocals {yt_audio_path}" - result = subprocess.run(command.split(), stdout=subprocess.PIPE) - print(result.stdout.decode()) - return ("separated/htdemucs/audio/vocals.wav", "separated/htdemucs/audio/no_vocals.wav", yt_audio_path, "separated/htdemucs/audio/vocals.wav") - -def combine_vocal_and_inst(audio_data, audio_volume): - print(audio_data) - if not os.path.exists("result"): - os.mkdir("result") - vocal_path = "result/output.wav" - inst_path = "separated/htdemucs/audio/no_vocals.wav" - output_path = "result/combine.mp3" - with wave.open(vocal_path, "w") as wave_file: - wave_file.setnchannels(1) - wave_file.setsampwidth(2) - wave_file.setframerate(audio_data[0]) - wave_file.writeframes(audio_data[1].tobytes()) - command = f'ffmpeg -y -i {inst_path} -i {vocal_path} -filter_complex [1:a]volume={audio_volume}dB[v];[0:a][v]amix=inputs=2:duration=longest -b:a 320k -c:a libmp3lame {output_path}' - result = subprocess.run(command.split(), stdout=subprocess.PIPE) - return output_path - -def load_hubert(): - global hubert_model - models, _, _ = checkpoint_utils.load_model_ensemble_and_task( - ["hubert_base.pt"], - suffix="", - ) - hubert_model = models[0] - hubert_model = hubert_model.to(config.device) - if config.is_half: - hubert_model = hubert_model.half() - else: - hubert_model = hubert_model.float() - hubert_model.eval() - -def change_to_tts_mode(tts_mode, upload_mode): - if tts_mode: - return gr.Textbox.update(visible=False), gr.Audio.update(visible=False), gr.Checkbox.update(visible=False), gr.Textbox.update(visible=True), gr.Dropdown.update(visible=True) - else: - if upload_mode: - return gr.Textbox.update(visible=False), gr.Audio.update(visible=True), gr.Checkbox.update(visible=True), gr.Textbox.update(visible=False), gr.Dropdown.update(visible=False) - else: - return gr.Textbox.update(visible=True), gr.Audio.update(visible=False), gr.Checkbox.update(visible=True), gr.Textbox.update(visible=False), gr.Dropdown.update(visible=False) - -def change_to_upload_mode(upload_mode): - if upload_mode: - return gr.Textbox().update(visible=False), gr.Audio().update(visible=True) - else: - return gr.Textbox().update(visible=True), gr.Audio().update(visible=False) - -if __name__ == '__main__': - load_hubert() - models = [] - categories = [] - tts_voice_list = asyncio.get_event_loop().run_until_complete(edge_tts.list_voices()) - voices = [f"{v['ShortName']}-{v['Gender']}" for v in tts_voice_list] - with open("weights/folder_info.json", "r", encoding="utf-8") as f: - folder_info = json.load(f) - for name, info in folder_info.items(): - if not info['enable']: - continue - title = info['title'] - folder = info['folder_path'] - description = info['description'] - categories.append([title, folder, description]) - for (title, folder, description) in categories: - with open(f"weights/{folder}/model_info.json", "r", encoding="utf-8") as f: - models_info = json.load(f) - for name, info in models_info.items(): - if not info['enable']: - continue - title = info['title'] - author = info.get("author", None) - cover = f"weights/{folder}/{name}/{info['cover']}" - index = f"weights/{folder}/{name}/{info['feature_retrieval_library']}" - cpt = torch.load(f"weights/{folder}/{name}/{name}.pth", map_location="cpu") - tgt_sr = cpt["config"][-1] - cpt["config"][-3] = cpt["weight"]["emb_g.weight"].shape[0] # n_spk - if_f0 = cpt.get("f0", 1) - if if_f0 == 1: - net_g = SynthesizerTrnMs256NSFsid(*cpt["config"], is_half=config.is_half) - else: - net_g = SynthesizerTrnMs256NSFsid_nono(*cpt["config"]) - del net_g.enc_q - print(net_g.load_state_dict(cpt["weight"], strict=False)) - net_g.eval().to(config.device) - if config.is_half: - net_g = net_g.half() - else: - net_g = net_g.float() - vc = VC(tgt_sr, config) - print(f"Model loaded: {name}") - models.append((name, title, author, cover, create_vc_fn(tgt_sr, net_g, vc, if_f0, index))) - with gr.Blocks() as app: - gr.Markdown( - "#
        RVC Models [(Latest Update)](https://github.com/RVC-Project/Retrieval-based-Voice-Conversion-WebUI/releases/tag/20230428updated)\n" - "##
        The input audio should be clean and pure voice without background music.\n" - "###
        This project was inspired by [zomehwh](https://huggingface.co/spaces/zomehwh/rvc-models) and [ardha27](https://huggingface.co/spaces/ardha27/rvc-models)\n" - "[![image](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/drive/110kiMZTdP6Ri1lY9-NbQf17GVPPhHyeT?usp=sharing)\n\n" - "[![Original Repo](https://badgen.net/badge/icon/github?icon=github&label=Original%20Repo)](https://github.com/RVC-Project/Retrieval-based-Voice-Conversion-WebUI)" - ) - for (folder_title, folder, description) in categories: - with gr.TabItem(folder_title): - if description: - gr.Markdown(f"
        {description}") - with gr.Tabs(): - if not models: - gr.Markdown("#
        No Model Loaded.") - gr.Markdown("##
        Please added the model or fix your model path.") - continue - for (name, title, author, cover, vc_fn) in models: - with gr.TabItem(name): - with gr.Row(): - gr.Markdown( - '
        ' - f'
        {title}
        \n'+ - (f'
        Model author: {author}
        ' if author else "")+ - (f'' if cover else "")+ - '
        ' - ) - with gr.Row(): - with gr.Column(): - vc_youtube = gr.Textbox(label="Youtube URL") - vc_convert = gr.Button("Convert", variant="primary") - vc_vocal_preview = gr.Audio(label="Vocal Preview") - vc_inst_preview = gr.Audio(label="Instrumental Preview") - vc_audio_preview = gr.Audio(label="Audio Preview") - with gr.Column(): - vc_input = gr.Textbox(label="Input audio path") - vc_upload = gr.Audio(label="Upload audio file", visible=False, interactive=True) - upload_mode = gr.Checkbox(label="Upload mode", value=False) - vc_transpose = gr.Number(label="Transpose", value=0) - vc_f0method = gr.Radio( - label="Pitch extraction algorithm, PM is fast but Harvest is better for low frequencies", - choices=["pm", "harvest"], - value="pm", - interactive=True, - ) - vc_index_ratio = gr.Slider( - minimum=0, - maximum=1, - label="Retrieval feature ratio", - value=0.6, - interactive=True, - ) - tts_mode = gr.Checkbox(label="tts (use edge-tts as input)", value=False) - tts_text = gr.Textbox(visible=False,label="TTS text (100 words limitation)" if limitation else "TTS text") - tts_voice = gr.Dropdown(label="Edge-tts speaker", choices=voices, visible=False, allow_custom_value=False, value="en-US-AnaNeural-Female") - vc_output1 = gr.Textbox(label="Output Message") - vc_output2 = gr.Audio(label="Output Audio") - vc_submit = gr.Button("Generate", variant="primary") - with gr.Column(): - vc_volume = gr.Slider( - minimum=0, - maximum=10, - label="Vocal volume", - value=4, - interactive=True, - step=1 - ) - vc_outputCombine = gr.Audio(label="Output Combined Audio") - vc_combine = gr.Button("Combine",variant="primary") - vc_submit.click(vc_fn, [vc_input, vc_upload, upload_mode, vc_transpose, vc_f0method, vc_index_ratio, tts_mode, tts_text, tts_voice], [vc_output1, vc_output2]) - vc_convert.click(cut_vocal_and_inst, vc_youtube, [vc_vocal_preview, vc_inst_preview, vc_audio_preview, vc_input]) - vc_combine.click(combine_vocal_and_inst, [vc_output2, vc_volume], vc_outputCombine) - tts_mode.change(change_to_tts_mode, [tts_mode, upload_mode], [vc_input, vc_upload, upload_mode, tts_text, tts_voice]) - upload_mode.change(change_to_upload_mode, [upload_mode], [vc_input, vc_upload]) - app.queue(concurrency_count=1, max_size=20, api_open=config.api).launch(share=config.colab) \ No newline at end of file diff --git a/spaces/thinkall/autogen-demos/Dockerfile b/spaces/thinkall/autogen-demos/Dockerfile deleted file mode 100644 index dcda927a0d169e570a734464c78d0c01d091792d..0000000000000000000000000000000000000000 --- a/spaces/thinkall/autogen-demos/Dockerfile +++ /dev/null @@ -1,19 +0,0 @@ -FROM python:3.10.13-slim-bookworm - -# Setup user to not run as root -RUN adduser --disabled-password --gecos '' autogen -RUN adduser autogen sudo -RUN echo '%sudo ALL=(ALL) NOPASSWD:ALL' >> /etc/sudoers -USER autogen - -# Setup working directory -WORKDIR /home/autogen -COPY . /home/autogen/ - -# Install app requirements -RUN pip3 install --no-cache-dir torch --index-url https://download.pytorch.org/whl/cpu -RUN pip3 install -U pip && pip3 install --no-cache-dir -r requirements.txt -ENV PATH="${PATH}:/home/autogen/.local/bin" - -EXPOSE 7860 -ENTRYPOINT ["python3", "app.py"] diff --git a/spaces/tialenAdioni/chat-gpt-api/logs/Farinelli 1994 DVDRip X264 IGET __FULL__.md b/spaces/tialenAdioni/chat-gpt-api/logs/Farinelli 1994 DVDRip X264 IGET __FULL__.md deleted file mode 100644 index 0042fad5f950b230e13cc1d791a01ed5ae4d1315..0000000000000000000000000000000000000000 --- a/spaces/tialenAdioni/chat-gpt-api/logs/Farinelli 1994 DVDRip X264 IGET __FULL__.md +++ /dev/null @@ -1,20 +0,0 @@ - -

        Farinelli 1994 DVDRip x264 iGET: A Review of the Award-Winning Film

        -

        Farinelli is a 1994 biographical drama film that tells the story of the famous 18th-century Italian castrato singer Carlo Broschi, better known by his stage name Farinelli. The film was directed by Gérard Corbiau and starred Stefano Dionisi as Farinelli, Enrico Lo Verso as his brother and composer Riccardo Broschi, and Elsa Zylberstein as Alexandra, a woman who falls in love with both brothers.

        -

        The film was a critical and commercial success, winning the Golden Globe for Best Foreign Language Film and receiving two Academy Award nominations for Best Foreign Language Film and Best Sound. The film also features a stunning soundtrack that combines the voices of two singers, a soprano and a countertenor, to recreate the unique vocal range of Farinelli.

        -

        Farinelli 1994 DVDRip x264 iGET


        Downloadhttps://urlcod.com/2uK9oS



        -

        In this article, we will review the film and explore its historical accuracy, artistic merits, and cultural impact. We will also provide information on how to download the film in high quality from the internet using the keyword "Farinelli 1994 DVDRip x264 iGET".

        -

        Historical Accuracy

        -

        Farinelli is based on the life of one of the most famous and influential singers of all time, who rose to fame in the opera houses of Europe and performed for kings and queens. The film depicts his childhood, his relationship with his brother, his rivalry with the composer Handel, his love affairs, and his decision to retire from singing at the age of 32.

        -

        However, the film also takes some artistic liberties with the facts and dramatizes some aspects of Farinelli's life. For example, the film suggests that Farinelli was castrated at the age of 10 by a barber, while in reality he was castrated at the age of 12 by a surgeon. The film also portrays Farinelli as having a sexual dysfunction due to his castration, while there is no evidence to support this claim. The film also invents the character of Alexandra, who does not exist in historical records.

        -

        Therefore, while Farinelli is an entertaining and captivating film, it should not be taken as a reliable source of historical information. It is rather a fictionalized account that aims to explore the themes of identity, sexuality, artistry, and fame.

        -

        Artistic Merits

        -

        Farinelli is a visually stunning film that recreates the lavish and colorful settings of the 18th-century opera world. The costumes, sets, and props are authentic and detailed, creating a sense of immersion and realism. The cinematography is also impressive, using different angles, lighting, and colors to convey the mood and emotions of the scenes.

        -

        The film also boasts an excellent cast of actors who deliver convincing and nuanced performances. Stefano Dionisi is remarkable as Farinelli, capturing his charisma, vulnerability, passion, and complexity. Enrico Lo Verso is equally impressive as Riccardo Broschi, portraying his devotion, jealousy, frustration, and creativity. Elsa Zylberstein is charming as Alexandra, showing her innocence, curiosity, affection, and confusion.

        -

        The film also features a magnificent soundtrack that showcases some of the most beautiful and virtuosic arias of the baroque era. The soundtrack was composed by Christophe Rousset and performed by Les Talens Lyriques. The voice of Farinelli was created by digitally blending the voices of soprano Ewa Mallas-Godlewska and countertenor Derek Lee Ragin. The result is a unique and mesmerizing sound that captures the power and beauty of Farinelli's voice.

        -

        -

        Cultural Impact

        -

        Farinelli is a film that has had a significant impact on the cultural appreciation and revival of baroque music and opera. The film introduced many people to the genre and sparked their interest in learning more about it. The film also inspired many musicians and singers to explore and perform the repertoire of Farinelli and his contemporaries.

        -

        The film also raised awareness and discussion about the phenomenon of castrato singers, who were once very popular but are now extinct. The film explored the ethical, social, psychological, and artistic implications of castration and its effects on human beings. The film also challenged some stereotypes and prejudices

        e93f5a0c3f
        -
        -
        \ No newline at end of file diff --git a/spaces/tialenAdioni/chat-gpt-api/logs/HD Online Player (Jodhaa Akbar 4 Full Movie In Hindi H) Enjoy the High-Quality Video and Audio of the Bollywood Blockbuster.md b/spaces/tialenAdioni/chat-gpt-api/logs/HD Online Player (Jodhaa Akbar 4 Full Movie In Hindi H) Enjoy the High-Quality Video and Audio of the Bollywood Blockbuster.md deleted file mode 100644 index cac61cbfd8c169a467329810f9fa83d395d46210..0000000000000000000000000000000000000000 --- a/spaces/tialenAdioni/chat-gpt-api/logs/HD Online Player (Jodhaa Akbar 4 Full Movie In Hindi H) Enjoy the High-Quality Video and Audio of the Bollywood Blockbuster.md +++ /dev/null @@ -1,37 +0,0 @@ - -

        How to Hack Passwords Without Survey - Is It Possible?

        - -

        If you are looking for a way to hack passwords without survey, you might be tempted by some websites or apps that claim to offer such a service. However, you should be very careful about these sites, as they are often scams that try to steal your personal information, infect your device with malware, or make you pay for a fake or useless product.

        - -

        In this article, we will explain why you should avoid these password hacking tools, and what are some alternative ways to access or recover passwords without survey.

        -

        skidrow password tool no survey


        Download Filehttps://urlcod.com/2uK4Qi



        - -

        Why You Should Avoid Password Hacking Tools Without Survey

        - -

        There are many reasons why you should not trust any website or app that claims to hack passwords without survey. Here are some of them:

        - -
          -
        • They are illegal and unethical. Hacking someone else's password without their consent is a violation of their privacy and security. You could face legal consequences if you are caught doing so.
        • -
        • They are fake and useless. Most of these tools do not work at all, or they only work for very weak passwords that can be easily guessed or cracked by other methods. They may also require you to download or install some software that is actually malware or spyware that can harm your device or steal your data.
        • -
        • They are risky and expensive. Some of these tools may ask you to complete a survey, enter your email address, phone number, credit card details, or other personal information before giving you the password. However, this is just a trick to collect your data and use it for spamming, phishing, identity theft, or fraud. You may also end up paying for a subscription or a service that you do not need or want.
        • -
        - -

        Therefore, you should never trust any password hacking tool without survey, as they are more likely to cause you trouble than help you.

        - -

        How to Access or Recover Passwords Without Survey

        - -

        If you want to access or recover passwords without survey, there are some legitimate and safe ways to do so. Here are some of them:

        - -
          -
        • Use a password manager. A password manager is a software that stores and encrypts your passwords for different websites and apps. You only need to remember one master password to access all your other passwords. This way, you do not have to worry about forgetting or losing your passwords. Some examples of password managers are LastPass, Dashlane, 1Password, etc.
        • -
        • Use the forgot password option. Most websites and apps have a forgot password option that allows you to reset your password by verifying your identity through your email address, phone number, security questions, etc. You should use this option if you forget your password for a specific site or app.
        • -
        • Use the social login option. Some websites and apps allow you to log in with your social media accounts, such as Facebook, Google, Twitter, etc. This way, you do not have to create or remember a separate password for each site or app. You should use this option if you have a social media account that you trust and use frequently.
        • -
        - -

        These are some of the best ways to access or recover passwords without survey. They are legal, ethical, effective, and secure.

        - -

        Conclusion

        - -

        Password hacking tools without survey are scams that you should avoid at all costs. They can put your privacy, security, and money at risk. Instead, you should use a password manager, the forgot password option, or the social login option to access or recover passwords without survey. These methods are reliable and safe.

        e753bf7129
        -
        -
        \ No newline at end of file diff --git a/spaces/tialenAdioni/chat-gpt-api/logs/How to Get Microsoft Office for Free Without Breaking the Law A Guide to Avoid Reddit Piracy.md b/spaces/tialenAdioni/chat-gpt-api/logs/How to Get Microsoft Office for Free Without Breaking the Law A Guide to Avoid Reddit Piracy.md deleted file mode 100644 index 102d98ba28e2c4b5bcc66db9cf10c62ad679e3fa..0000000000000000000000000000000000000000 --- a/spaces/tialenAdioni/chat-gpt-api/logs/How to Get Microsoft Office for Free Without Breaking the Law A Guide to Avoid Reddit Piracy.md +++ /dev/null @@ -1,19 +0,0 @@ - -

        Microsoft Office Free Download Reddit Piracy: Is It Worth It?

        -

        Microsoft Office is one of the most popular and widely used productivity suites in the world. It offers a range of applications such as Word, Excel, PowerPoint, Outlook, and more that can help you create, edit, and share documents, spreadsheets, presentations, and emails. However, Microsoft Office is not free and requires a subscription or a one-time purchase to use it.

        -

        microsoft office free download reddit piracy


        Download Ziphttps://urlcod.com/2uKa4R



        -

        Some people may not want to pay for Microsoft Office and may look for alternative ways to get it for free. One of the common methods is to use Reddit piracy, which is a subreddit where users share links to download pirated software, movies, games, and other digital content. Microsoft Office free download Reddit piracy is one of the popular topics on this subreddit, where users can find links to download cracked versions of Microsoft Office or activation tools that can bypass the license verification.

        -

        But is Microsoft Office free download Reddit piracy worth it? What are the risks and consequences of using pirated software? In this article, we will explore the pros and cons of Microsoft Office free download Reddit piracy and help you decide whether it is a good idea or not.

        -

        The Pros of Microsoft Office Free Download Reddit Piracy

        -

        The main advantage of Microsoft Office free download Reddit piracy is that you can get Microsoft Office for free without paying anything. This can save you money and allow you to use the full features of Microsoft Office without any limitations. You can also access the latest version of Microsoft Office and enjoy the updates and improvements that come with it.

        -

        Another benefit of Microsoft Office free download Reddit piracy is that you can find a variety of links and sources to download Microsoft Office from. You can choose the version that suits your needs and preferences, such as 32-bit or 64-bit, home or professional, 2016 or 2019, etc. You can also find different languages and regions for Microsoft Office, such as English, Spanish, French, Chinese, etc.

        -

        The Cons of Microsoft Office Free Download Reddit Piracy

        -

        However, Microsoft Office free download Reddit piracy also comes with many drawbacks and risks that you should be aware of. The first and most obvious one is that it is illegal and unethical. By downloading and using pirated software, you are violating the intellectual property rights of Microsoft and breaking the law. You may face legal actions or penalties from Microsoft or the authorities if you are caught using pirated software.

        -

        -

        Another disadvantage of Microsoft Office free download Reddit piracy is that it is unsafe and unreliable. The links and sources that you find on Reddit piracy may not be trustworthy and may contain malware, viruses, spyware, or other harmful programs that can damage your computer or steal your personal information. You may also encounter errors, bugs, crashes, or compatibility issues when using pirated software that can affect your work or data.

        -

        A third drawback of Microsoft Office free download Reddit piracy is that it is not supported or updated by Microsoft. You will not be able to access the official customer service or technical support from Microsoft if you encounter any problems or issues with your pirated software. You will also miss out on the security patches, bug fixes, feature enhancements, or new releases that Microsoft provides for its legitimate users.

        -

        The Conclusion

        -

        In conclusion, Microsoft Office free download Reddit piracy may seem like an attractive option for some people who want to save money and use Microsoft Office for free. However, it also comes with many risks and disadvantages that outweigh the benefits. It is illegal, unethical, unsafe, unreliable, and unsupported by Microsoft.

        -

        Therefore, we do not recommend using Microsoft Office free download Reddit piracy and advise you to use legal and legitimate ways to get Microsoft Office instead. You can either buy a subscription or a license from Microsoft or use alternative productivity suites that are free and legal such as Google Docs, LibreOffice, WPS Office, etc.

        ddb901b051
        -
        -
        \ No newline at end of file diff --git a/spaces/tialenAdioni/chat-gpt-api/logs/Is Burp Suite Pro Worth It.md b/spaces/tialenAdioni/chat-gpt-api/logs/Is Burp Suite Pro Worth It.md deleted file mode 100644 index 5ae33b6e3e158c95b0fff2ddee80a56f4cdd3e2b..0000000000000000000000000000000000000000 --- a/spaces/tialenAdioni/chat-gpt-api/logs/Is Burp Suite Pro Worth It.md +++ /dev/null @@ -1,39 +0,0 @@ -
        -

        Is Burp Suite Pro Worth It? A Review of the Web Security Tester's Toolkit

        -

        Burp Suite Pro is a software tool that allows you to perform web application security testing. It is designed and used by professional testers and hackers, and it can help you find and exploit vulnerabilities in web applications. But is Burp Suite Pro worth it? In this article, we will review some of the features and benefits of Burp Suite Pro, and compare it with other editions and alternatives.

        -

        is burp suite pro worth it


        Download Zip ✸✸✸ https://urlcod.com/2uK6l1



        -

        What is Burp Suite Pro?

        -

        Burp Suite Pro is the premium edition of Burp Suite, a suite of tools that enable you to test the security of web applications. Burp Suite Pro includes the following tools:

        -
          -
        • Burp Proxy: An intercepting proxy that lets you inspect and modify HTTP requests and responses between your browser and the target application.
        • -
        • Burp Scanner: An automated scanner that can crawl and scan web applications for common vulnerabilities, such as SQL injection, cross-site scripting, and broken authentication.
        • -
        • Burp Intruder: A tool that can perform automated attacks on web applications, such as brute-forcing passwords, enumerating identifiers, and fuzzing parameters.
        • -
        • Burp Repeater: A tool that lets you manually modify and resend individual HTTP requests, and analyze the responses.
        • -
        • Burp Sequencer: A tool that analyzes the randomness of session tokens and other data items that are intended to be unpredictable.
        • -
        • Burp Decoder: A tool that lets you decode and encode data using various methods, such as Base64, URL encoding, and hexadecimal.
        • -
        • Burp Comparer: A tool that lets you compare two pieces of data to find differences or similarities.
        • -
        • Burp Extender: A tool that lets you extend the functionality of Burp Suite by loading custom extensions written in Java, Python, or Ruby.
        • -
        -

        Burp Suite Pro also has some advanced features that are not available in other editions, such as:

        -
          -
        • Project files: A feature that lets you save your work in a single file that can be reopened later or shared with others.
        • -
        • Out-of-band application security testing (OAST): A feature that lets you find vulnerabilities that are not visible in normal HTTP responses, such as blind SQL injection, server-side request forgery, and out-of-band XML external entity injection.
        • -
        • Collaborator client: A feature that lets you interact with the Burp Collaborator server, a service that helps you perform OAST by generating unique payloads and monitoring interactions with external systems.
        • -
        • Scan configurations: A feature that lets you customize the behavior of Burp Scanner by selecting which scan checks to run, how to handle different types of content, and how to optimize performance.
        • -
        -

        What are the benefits of Burp Suite Pro?

        -

        Burp Suite Pro has many benefits for web security testers, such as:

        -
          -
        • It can help you find more vulnerabilities faster by combining smart automation with expert-designed manual tools.
        • -
        • It can help you test complex web applications that use modern technologies, such as JavaScript, APIs, and complex authentication sequences.
        • -
        • It can help you test like a pro by using the same toolkit as the industry's best testers and hackers.
        • -
        • It can help you stay ahead of the curve by being updated frequently with new features and enhancements based on the latest research and feedback.
        • -
        • It can help you customize your toolkit to suit your needs by accessing hundreds of extensions and resources from the Burp Suite community or creating your own functionality using the powerful API.
        • -
        -

        How much does Burp Suite Pro cost?

        -

        Burp Suite Pro costs $449 per user per year. You can buy it online from PortSwigger, the company that develops Burp Suite. You can also request a free trial for 30 days to test the product before buying it. You will need a license key to activate Burp Suite Pro after downloading it.

        -

        -

        Is Burp Suite Pro worth it?

        -

        The answer to this question depends on your needs and preferences. Burp Suite Pro is worth it if you are

        ddb901b051
        -
        -
        \ No newline at end of file diff --git a/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/Agar.io Mod APK How to Get God Mode and Unlimited Coins.md b/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/Agar.io Mod APK How to Get God Mode and Unlimited Coins.md deleted file mode 100644 index 419e79701da7d34b30dbb23da262f22c68d2412c..0000000000000000000000000000000000000000 --- a/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/Agar.io Mod APK How to Get God Mode and Unlimited Coins.md +++ /dev/null @@ -1,97 +0,0 @@ - -

        Agar.io Mod Apk God Mode: How to Play Like a Pro

        -

        Do you love playing Agar.io, the addictive online game where you control a cell and try to eat other cells? Do you want to have an edge over your opponents and dominate the leaderboard? If yes, then you might be interested in Agar.io mod apk god mode, a modified version of the game that gives you unlimited power and resources. In this article, we will tell you everything you need to know about Agar.io mod apk god mode, including what it is, how to download and install it, and how to play it like a pro. Let's get started!

        -

        agar.io mod apk god mode


        Download Zip ->>->>->> https://bltlly.com/2uOjY9



        -

        What is Agar.io?

        -

        Agar.io is a massively multiplayer online game that was released in 2015 by Matheus Valadares, a Brazilian developer. The game is inspired by the biological phenomenon of agar, a gelatinous substance used to culture bacteria. In the game, you control a cell that can move around a map and eat smaller cells, while avoiding being eaten by larger cells. The goal is to grow as large as possible and reach the top of the leaderboard.

        -

        The gameplay of Agar.io

        -

        The gameplay of Agar.io is simple but addictive. You start as a small cell and you can move around the map using your mouse or touch screen. You can also split your cell into two smaller cells by pressing the space bar or tapping on the screen, or eject some mass by pressing the W key or tapping on the screen. Splitting and ejecting mass can help you escape from larger cells, chase smaller cells, or feed your allies. However, splitting and ejecting mass also reduces your size and makes you more vulnerable to being eaten.

        -

        The features of Agar.io

        -

        Agar.io has many features that make it fun and challenging. Some of these features are:

        -
          -
        • You can choose from different game modes, such as FFA (free-for-all), Teams, Experimental, Party, Battle Royale, and Zombie.
        • -
        • You can customize your cell with different skins, colors, names, and chat messages.
        • -
        • You can play with your friends or strangers from around the world.
        • -
        • You can earn coins and XP by playing the game and use them to buy more skins, boosts, and potions.
        • -
        • You can join or create clans and compete with other clans for glory and rewards.
        • -
        -

        What is Agar.io mod apk god mode?

        -

        Agar.io mod apk god mode is a modified version of the original game that gives you unlimited power and resources. With this mod, you can enjoy features such as:

        -
          -
        • God mode: You are invincible and cannot be eaten by any cell.
        • -
        • Macro: You can split and eject mass faster and easier.
        • -
        • Zoom hack: You can zoom in and out of the map and see more details.
        • -
        • Auto feed: You can automatically feed yourself or your allies with mass.
        • -
        • Aim-bot: You can automatically target and chase smaller cells.
        • -
        • Coins hack: You can get unlimited coins and use them to buy anything you want.
        • -
        • DNA hack: You can get unlimited DNA and use them to upgrade your cell.
        • -
        How to download and install Agar.io mod apk god mode? -

        If you want to try Agar.io mod apk god mode, you need to download and install it on your device. However, you should be careful and follow some precautions before doing so, as this mod is not authorized by the official game developers and may contain viruses or malware. Here are the steps and precautions you need to take:

        -

        agar.io mod apk unlimited money and coins
        -agar.io mod apk download latest version
        -agar.io mod apk no ads and no root
        -agar.io mod apk with zoom hack and skins
        -agar.io mod apk offline and online mode
        -agar.io mod apk free shopping and premium features
        -agar.io mod apk unlimited dna and mass boost
        -agar.io mod apk anti ban and anti lag
        -agar.io mod apk 2023 updated and working
        -agar.io mod apk easy split and merge
        -agar.io mod apk fast speed and slow enemies
        -agar.io mod apk unlocked all levels and modes
        -agar.io mod apk unlimited bots and minions
        -agar.io mod apk custom maps and servers
        -agar.io mod apk mega hack and cheat menu
        -agar.io mod apk unlimited lives and revive
        -agar.io mod apk invisible mode and ghost mode
        -agar.io mod apk pro version and vip access
        -agar.io mod apk unlimited fun and challenge
        -agar.io mod apk god mode and super power

        -

        The steps to download and install Agar.io mod apk god mode

        -
          -
        1. Find a reliable source that offers Agar.io mod apk god mode for free. You can search on Google or YouTube for reviews and recommendations.
        2. -
        3. Download the Agar.io mod apk god mode file from the source. Make sure the file size and name match the description.
        4. -
        5. Enable the installation of unknown sources on your device. Go to Settings > Security > Unknown Sources and toggle it on.
        6. -
        7. Locate the Agar.io mod apk god mode file on your device and tap on it to install it.
        8. -
        9. Wait for the installation to finish and launch the game.
        10. -
        -

        The precautions to take before downloading and installing Agar.io mod apk god mode

        -
          -
        • Backup your data and device before downloading and installing Agar.io mod apk god mode. You may lose your progress or damage your device if something goes wrong.
        • -
        • Use a VPN or proxy to hide your IP address and location when downloading and installing Agar.io mod apk god mode. You may get banned or blocked by the official game servers if they detect your activity.
        • -
        • Scan the Agar.io mod apk god mode file with an antivirus or anti-malware program before installing it. You may get infected with viruses or malware if the file is corrupted or malicious.
        • -
        • Disable any other mods or hacks you have installed on your device before installing Agar.io mod apk god mode. You may experience conflicts or errors if the mods or hacks are incompatible.
        • -
        -

        How to play Agar.io mod apk god mode?

        -

        Once you have downloaded and installed Agar.io mod apk god mode, you can start playing it like a pro. However, you should be aware of some tips and tricks that can help you enjoy the game more and avoid getting caught or reported by other players. Here are some of them:

        -

        The tips and tricks to play Agar.io mod apk god mode

        -
          -
        • Choose a game mode that suits your style and preference. You can play FFA, Teams, Experimental, Party, Battle Royale, or Zombie with Agar.io mod apk god mode.
        • -
        • Customize your cell with a cool skin, name, and chat message. You can use any skin you want, even the ones that are not available in the original game.
        • -
        • Use the god mode feature wisely. You can turn it on or off by pressing the G key or tapping on the screen. You can also adjust the speed, size, and mass of your cell with the arrow keys or by swiping on the screen.
        • -
        • Use the macro feature sparingly. You can split and eject mass faster and easier by pressing the E key or tapping on the screen. However, don't abuse this feature as it may make you look suspicious or annoying to other players.
        • -
        • Use the zoom hack feature smartly. You can zoom in and out of the map by pressing the Z key or tapping on the screen. However, don't zoom out too much as it may make you lose focus or miss some details.
        • -
        • Use the auto feed feature generously. You can automatically feed yourself or your allies with mass by pressing the A key or tapping on the screen. However, don't feed too much as it may make you look greedy or wasteful to other players.
        • -
        • Use the aim-bot feature carefully. You can automatically target and chase smaller cells by pressing the T key or tapping on the screen. However, don't chase too aggressively as it may make you look rude or unfair to other players.
        • -
        • Use the coins hack feature moderately. You can get unlimited coins by pressing the C key or tapping on the screen. However, don't buy too many things as it may make you look rich or spoiled to other players.
        • -
        • Use the DNA hack feature reasonably. You can get unlimited DNA by pressing the D key or tapping on the screen. However, don't upgrade too much as it may make you look powerful or arrogant to other players.
        • -
        -

        The comparison of Agar.io mod apk god mode with the original game

        -

        Agar.io mod apk god mode is different from the original game in many ways. Some of these differences are:

        - | Feature | Original Game | | Feature | Original Game | Mod Apk God Mode | | --- | --- | --- | | God mode | No | Yes | | Macro | No | Yes | | Zoom hack | No | Yes | | Auto feed | No | Yes | | Aim-bot | No | Yes | | Coins hack | No | Yes | | DNA hack | No | Yes |

        Conclusion

        -

        Agar.io mod apk god mode is a modified version of the original game that gives you unlimited power and resources. It can make the game more fun and exciting, but it can also make the game less fair and balanced. If you want to try Agar.io mod apk god mode, you need to download and install it on your device, but you also need to be careful and follow some precautions before doing so. You also need to be aware of some tips and tricks that can help you play Agar.io mod apk god mode like a pro, but also respect other players and the rules of the game. We hope this article has helped you learn more about Agar.io mod apk god mode and how to play it. Have fun and enjoy!

        -

        A call to action for the readers

        -

        If you liked this article, please share it with your friends and leave a comment below. We would love to hear your feedback and suggestions. Also, if you have any questions or doubts about Agar.io mod apk god mode, feel free to ask us. We will try our best to answer them as soon as possible. Thank you for reading!

        -

        FAQs

        -

        Q: Is Agar.io mod apk god mode safe to use?

        -

        A: Agar.io mod apk god mode is not safe to use, as it is not authorized by the official game developers and may contain viruses or malware. It may also get you banned or blocked by the official game servers if they detect your activity. Use it at your own risk.

        -

        Q: Is Agar.io mod apk god mode free to download?

        -

        A: Agar.io mod apk god mode is free to download from some sources, but not from others. You should always check the source before downloading and installing Agar.io mod apk god mode, as some sources may charge you money or ask you for personal information.

        -

        Q: Can I play Agar.io mod apk god mode offline?

        -

        A: No, you cannot play Agar.io mod apk god mode offline, as it requires an internet connection to connect to the game servers and other players. You can only play Agar.io mod apk god mode online.

        -

        Q: Can I play Agar.io mod apk god mode with my friends?

        -

        A: Yes, you can play Agar.io mod apk god mode with your friends, but only if they also have Agar.io mod apk god mode installed on their devices. You can join or create a party with your friends and play together in the same map.

        -

        Q: Can I use Agar.io mod apk god mode on iOS devices?

        -

        A: No, you cannot use Agar.io mod apk god mode on iOS devices, as it is only compatible with Android devices. You can only use Agar.io mod apk god mode on Android devices.

        401be4b1e0
        -
        -
        \ No newline at end of file diff --git a/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/Download Hello Neighbor 2 APK for Android on TapTap.md b/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/Download Hello Neighbor 2 APK for Android on TapTap.md deleted file mode 100644 index a4581baf231c290b882ef96f5602df8dcce253f4..0000000000000000000000000000000000000000 --- a/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/Download Hello Neighbor 2 APK for Android on TapTap.md +++ /dev/null @@ -1,127 +0,0 @@ - -

        Hello Neighbor 2 APK Tap Tap: How to Download and Play the Stealth Horror Game on Android

        -

        If you are a fan of stealth horror games, you may have heard of Hello Neighbor 2, the sequel to the popular indie game Hello Neighbor. The game was released in December 2022 for various platforms, including Android. However, some Android users may not be able to access the game from the Google Play Store due to regional restrictions or other reasons. In that case, you may want to download the game from an alternative source, such as Tap Tap.

        -

        hello neighbor 2 apk tap tap


        DOWNLOAD –––––>>> https://bltlly.com/2uOmcH



        -

        Tap Tap is a Chinese app store that offers thousands of games and apps that are not available on Google Play Store. You can download APK files from Tap Tap and install them on your Android device manually. However, this method also comes with some risks and challenges that you need to be aware of.

        -

        In this article, we will show you how to download and install Hello Neighbor 2 APK from Tap Tap, as well as how to play the game on your Android device. We will also give you some information about Hello Neighbor 2 and Tap Tap, so you can decide whether this option is right for you.

        -

        What is Hello Neighbor 2?

        -

        Hello Neighbor 2 is a stealth horror game developed by Eerie Guest Studios and published by tinyBuild. It is a follow-up to Hello Neighbor, which was released in 2017. The game follows the story of a journalist who investigates the disappearance of his neighbor, Mr. Peterson. Along the way, he encounters a mysterious creature that stalks him throughout an open world.

        -

        hello neighbor 2 android download tap tap
        -hello neighbor 2 apk mod tap tap
        -hello neighbor 2 free apk tap tap
        -hello neighbor 2 gameplay tap tap
        -hello neighbor 2 latest version apk tap tap
        -hello neighbor 2 mobile apk tap tap
        -hello neighbor 2 online apk tap tap
        -hello neighbor 2 release date apk tap tap
        -hello neighbor 2 review tap tap
        -hello neighbor 2 trailer tap tap
        -how to install hello neighbor 2 apk tap tap
        -how to play hello neighbor 2 apk tap tap
        -is hello neighbor 2 apk safe on tap tap
        -what is hello neighbor 2 apk tap tap
        -where to download hello neighbor 2 apk tap tap
        -best alternatives to hello neighbor 2 apk tap tap
        -best tips and tricks for hello neighbor 2 apk tap tap
        -can i play hello neighbor 2 apk offline on tap tap
        -can i play hello neighbor 2 apk with friends on tap tap
        -does hello neighbor 2 apk work on ios devices on tap tap
        -does hello neighbor 2 apk work on pc on tap tap
        -how to get unlimited coins in hello neighbor 2 apk on tap tap
        -how to solve puzzles in hello neighbor 2 apk on tap tap
        -how to update hello neighbor 2 apk on tap tap
        -is hello neighbor 2 apk compatible with my device on tap tap
        -is there a sequel to hello neighbor 2 apk on tap tap
        -what are the features of hello neighbor 2 apk on tap tap
        -what are the ratings of hello neighbor 2 apk on tap tap
        -what are the requirements for hello neighbor 2 apk on tap tap
        -what is the size of hello neighbor 2 apk on tap tap
        -compare hello neighbor 2 apk and hello guest game on taptap
        -download hello neighbor 2 beta version apk on taptap
        -download hello neighbor 2 full version apk on taptap
        -find similar games to hello neighbor 2 apk on taptap
        -get exclusive rewards for playing hello neighbor 2 apk on taptap
        -get free coins for watching ads in hello neighbor 2 apk on taptap
        -get notified when new updates are available for hello neighbor 2 apk on taptap
        -join the community of hello neighbor 2 fans on taptap
        -learn more about the story and characters of hello neighbor 2 apk on taptap
        -read user reviews and feedbacks for hello neighbor 2 apk on taptap

        -

        The game features a dynamic AI system that learns from your behavior and adapts accordingly. The creature can set traps, use weapons, interact with objects, and even communicate with other NPCs. The game also has multiple endings depending on your choices and actions.

        -

        The game has received mixed reviews from critics and players alike. Some praised its level design, graphics, sound effects, and gameplay mechanics. Others criticized its bugs, glitches, difficulty level, and lack of polish. The game currently has a score of 7.2 out of 10 on IGN and a score of 58% on OpenCritic.

        -

        What is Tap Tap?

        -

        Tap

        Tap Tap is a Chinese app store that offers thousands of games and apps that are not available on Google Play Store. You can download APK files from Tap Tap and install them on your Android device manually. However, this method also comes with some risks and challenges that you need to be aware of.

        -

        Some of the advantages of using Tap Tap are:

        -
          -
        • You can access games and apps that are not released or banned in your region, such as Hello Neighbor 2.
        • -
        • You can get updates faster than Google Play Store, as Tap Tap does not have a strict review process.
        • -
        • You can discover new and popular games and apps from different genres and categories.
        • -
        -

        Some of the disadvantages of using Tap Tap are:

        -
          -
        • You may encounter malware, viruses, or spyware that can harm your device or steal your data.
        • -
        • You may violate the terms and conditions of the game or app developers, which may result in legal issues or account bans.
        • -
        • You may experience compatibility issues, performance issues, or bugs that can affect your gameplay or user experience.
        • -
        -

        To use Tap Tap safely, you should:

        -
          -
        • Download Tap Tap from its official website or a trusted source, and avoid third-party links or pop-ups.
        • -
        • Check the ratings, reviews, and comments of the games and apps before downloading them, and avoid those with low ratings or negative feedback.
        • -
        • Scan the APK files with a reliable antivirus software before installing them, and delete them if they are detected as malicious.
        • -
        • Backup your data and settings before installing any game or app, and restore them if something goes wrong.
        • -
        -

        How to Download and Install Hello Neighbor 2 APK from Tap Tap

        -

        If you have decided to download and install Hello Neighbor 2 APK from Tap Tap, you will need to follow these steps:

        -

        Step 1: Enable Unknown Sources on Your Android Device

        -

        Before you can install any APK file on your Android device, you need to enable the option to allow installation from unknown sources. This option is disabled by default for security reasons, but you can enable it temporarily for this purpose.

        -

        To enable unknown sources, you need to:

        -
          -
        1. Go to your device's Settings app and tap on Security or Privacy.
        2. -
        3. Find the option that says Unknown Sources or Install Unknown Apps and toggle it on.
        4. -
        5. A warning message will appear, telling you the risks of installing apps from unknown sources. Tap on OK or Allow to confirm.
        6. -
        -

        Note: The exact steps may vary depending on your device model and Android version. You can also disable this option after installing the game if you want to.

        -

        Step 2: Download a File Manager App on Your Android Device

        -

        A file manager app is a tool that helps you manage your files and folders on your device. You will need a file manager app to locate and install the APK file that you downloaded from Tap Tap. There are many file manager apps available on Google Play Store, but some of the best ones are Cx File Explorer and File Manager. You can download any of them for free from Google Play Store.

        -

        Step 3: Download Hello Neighbor 2 APK from Tap Tap

        -

        To download Hello Neighbor 2 APK from Tap Tap, you need to:

        -
          -
        1. Open the Tap Tap app on your device. If you don't have it yet, you can download it from its official website or scan the QR code below with your device's camera.
        2. -Tap Tap QR code -
        3. Search for Hello Neighbor 2 in the search bar at the top of the app. You can also browse the categories or recommendations to find it.
        4. -
        5. Select the game from the search results and tap on Download. You will see a pop-up window asking you to choose a download source. You can choose any of them, but we recommend choosing CDN for faster speed.
        6. -
        7. The download will start automatically. You can see the progress in the notification bar or in the app itself. Wait until the download is complete.
        8. -
        -

        Step 4: Locate and Install Hello Neighbor 2 APK on Your Android Device

        -

        To locate and install Hello Neighbor 2 APK on your Android device, you need to:

        -
          -
        1. Open the file manager app that you downloaded in step 2. Navigate to the folder where you saved the APK file. It is usually in the Downloads folder or the Tap Tap folder. The file name should be something like com.tinybuildgames.helloneighbor2.apk.
        2. -
        3. Tap on the APK file to open it. You will see a pop-up window asking you to install the app. Tap on Install and wait for the installation to finish.
        4. -
        5. Once the installation is done, you can tap on Open to launch the game. You may also see a shortcut icon on your home screen or app drawer.
        6. -
        -

        How to Play Hello Neighbor 2 on Android

        -

        Now that you have downloaded and installed Hello Neighbor 2 APK from Tap Tap, you can start playing the game on your Android device. Here are some tips and tricks to help you enjoy the game:

        -
          -
        • Adjust the settings: Before you start the game, you may want to adjust the settings to suit your preferences and device capabilities. You can access the settings menu by tapping on the gear icon at the top right corner of the screen. You can change the graphics quality, sound volume, language, controls, and more.
        • -
        • Learn the controls: The game has two modes of control: touch and tilt. You can switch between them by tapping on the icon at the bottom left corner of the screen. The touch mode lets you use virtual buttons and joysticks to move and interact with objects. The tilt mode lets you use your device's accelerometer to move and look around. You can also use gestures to zoom in and out, crouch, jump, and throw items.
        • -
        • Explore the open world: The game has an open world that you can explore freely. You can visit different locations, such as your neighbor's house, the town, the forest, and more. You can also find various items, such as keys, tools, weapons, and clues that can help you progress in the game.
        • -
        • Avoid the AI: The game has a dynamic AI system that learns from your behavior and adapts accordingly. The creature that stalks you can set traps, use weapons, interact with objects, and even communicate with other NPCs. You need to be careful and stealthy to avoid its detection and attacks. You can also use distractions, hiding spots, or combat strategies to escape or fight back.
        • -
        • Solve the puzzles: The game has multiple puzzles that you need to solve to uncover the mystery of your neighbor's disappearance. The puzzles are related to the story, the environment, and the items that you find. You need to use your logic, creativity, and intuition to solve them.
        • -
        -

        Conclusion

        -

        Hello Neighbor 2 is a stealth horror game that offers a thrilling and challenging experience for Android users. If you want to download and play the game from Tap Tap instead of Google Play Store, you need to follow some steps to enable unknown sources, download a file manager app, download Hello Neighbor 2 APK from Tap Tap, and install it on your device. You also need to be aware of the risks and challenges of using Tap Tap, such as malware, legal issues, or compatibility issues.

        -

        If you follow our guide carefully, you should be able to download and install Hello Neighbor 2 APK from Tap Tap without any problems. You can then enjoy the game's features, such as its dynamic AI system, its open world, its puzzles, and its multiple endings.

        -

        We hope this article was helpful for you. If you have any questions or feedback, please let us know in the comments below. And if you liked this article, please share it with your friends who may also be interested in Hello Neighbor 2 APK Tap Tap.

        -

        FAQs

        -

        Here are some frequently asked questions about Hello Neighbor 2 APK Tap Tap:

        -

        Is Hello Neighbor 2 APK Tap Tap safe?

        -

        There is no definitive answer to this question, as downloading APK files from unknown sources always involves some risks. However, if you download Tap Tap from its official website or a trusted source, check the ratings and reviews of Hello Neighbor 2 before downloading it, scan the APK file with an antivirus software before installing it, and backup your data and settings before installing any game or app, you can minimize the chances of encountering malware or other issues.

        -

        Is Hello Neighbor 2 APK Tap Tap legal?

        -

        This question depends on your location and the terms and conditions of the game or app developers. Some countries or regions may have laws or regulations that prohibit downloading or installing games or apps from unknown sources. Some developers may also have policies that forbid downloading or installing their games or apps from unauthorized sources. You should check these factors before downloading or installing Hello Neighbor 2 APK from Tap Tap.

        -

        Is Hello Neighbor 2 APK Tap Tap compatible with my device?

        -

        This question depends on your device model and Android version. Hello Neighbor 2 requires Android 7.0 or higher to run properly. You can check your device's Android version by going to Settings > About Phone > Software Information. You can also check the game's compatibility on Tap Tap by tapping on the Compatibility icon at the bottom of the game's page. You will see a list of devices that are compatible or incompatible with the game. If your device is not on the list, you can try downloading and installing the game at your own risk.

        -

        How to update Hello Neighbor 2 APK Tap Tap?

        -

        To update Hello Neighbor 2 APK from Tap Tap, you need to follow the same steps as downloading and installing it. You need to enable unknown sources, download a file manager app, download the latest version of Hello Neighbor 2 APK from Tap Tap, and install it on your device. You may also need to uninstall the previous version of the game before installing the new one.

        -

        How to uninstall Hello Neighbor 2 APK Tap Tap?

        -

        To uninstall Hello Neighbor 2 APK from your device, you need to go to Settings > Apps > Hello Neighbor 2 and tap on Uninstall. You can also long-press the game's icon on your home screen or app drawer and drag it to the Uninstall option. You may also want to delete the APK file from your device's storage to free up some space.

        197e85843d
        -
        -
        \ No newline at end of file diff --git a/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/Download Hyper Front Lite APK and Enjoy the New Season LINK.md b/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/Download Hyper Front Lite APK and Enjoy the New Season LINK.md deleted file mode 100644 index efff6de27805614f1e7b101d2615675c84b1a27e..0000000000000000000000000000000000000000 --- a/spaces/ticomspire/turkey-syria-earthquake-tweets/logs/Download Hyper Front Lite APK and Enjoy the New Season LINK.md +++ /dev/null @@ -1,107 +0,0 @@ -
        -

        Download Hyper Front Lite APK: A 5V5 Tactical FPS on Mobile

        -

        If you are looking for a competitive first-person shooter game that offers an exciting shooting experience and unique hero abilities, you should download Hyper Front Lite APK. Hyper Front Lite is a lighter version of Hyper Front, a popular 5v5 tactical FPS game set in a near-future sci-fi world. It features diverse game modes, realistic guns, stunning graphics, and online esports events. In this article, we will tell you everything you need to know about Hyper Front Lite APK, including its features, how to download and install it, its pros and cons, and some FAQs.

        -

        Features of Hyper Front Lite APK

        -

        Hyper Front Lite APK has many features that make it one of the best FPS games on mobile. Here are some of them:

        -

        download hyper front lite apk


        DOWNLOAD »»» https://bltlly.com/2uOq1L



        -

        New hero: Tidal, a scouting hero who can interact with water sprites

        -

        Hyper Front Lite APK introduces a new hero called Tidal, who is a scouting hero who has the unique power to interact with water sprites. Water sprites are small creatures that can be found in some maps, and they can help Tidal in various ways. For example, they can heal him, boost his speed, or reveal enemy locations. Tidal can also use his water gun to shoot water bullets that can slow down enemies or create water barriers that can block enemy fire.

        -

        New event: S.T.A.R Music Festival, a musical feast with exclusive rewards

        -

        Hyper Front Lite APK also brings a new event called S.T.A.R Music Festival, which is a musical feast that celebrates the talents of different musicians in the Hyper Front world. You can join the event and cheer for your favorite musicians, such as DJ Kira, Rocker Remy, or Popstar Lila. By doing so, you can earn event points and exchange them for exclusive rewards, such as epic weapon skins, avatars, frames, and more. You can also enjoy the music and the atmosphere of the festival in the game.

        -

        Christmas exclusive event: Frosty Fight, a snowball fight mode

        -

        Another new event that Hyper Front Lite APK offers is Frosty Fight, which is a Christmas exclusive event that lets you enjoy a snowball fight mode. In this mode, you can throw snowballs at your enemies and freeze them, or build snowmen and snow forts to protect yourself. You can also collect candy canes and exchange them for Christmas themed skins and items. This mode is only available for a limited time, so don't miss it!

        -

        Online esports: Challenge Tournament, a bi-weekly competition with epic skin prizes

        -

        If you are looking for some online esports action, you should join the Challenge Tournament in Hyper Front Lite APK. This is a bi-weekly competition that pits you against other players in ranked matches. You can earn event points by winning matches and climb the leaderboard to get epic skin prizes. The current prize is Barrett Epic Skin: Heart Trick, which is a Valentine's Day themed skin that looks amazing. You can also watch the live stream of the tournament and learn from the best players.

        -

        20+ realistic guns and unique hero abilities for exciting and thrilling battles

        -

        Of course, Hyper Front Lite APK also has the core features that make Hyper Front a great FPS game. You can choose from 20+ realistic guns, such as assault rifles, sniper rifles, shotguns, pistols, and more. Each gun has its own stats and recoil patterns, so you need to master them to get an edge over your enemies. You can also customize your guns with attachments and skins to suit your style. Moreover, you can use unique hero abilities that can change the tide of the battle. Each hero has a passive ability and an active ability that can enhance their performance or hinder their enemies. For example, you can use Blaze's ability to set fire to enemies or objects, or use Frost's ability to create ice walls or freeze enemies.

        -

        download hyper front lite apk for android
        -download hyper front lite apk latest version
        -download hyper front lite apk mod
        -download hyper front lite apk obb
        -download hyper front lite apk offline
        -download hyper front lite apk update
        -download hyper front lite apk free
        -download hyper front lite apk full
        -download hyper front lite apk hack
        -download hyper front lite apk unlimited money
        -how to download hyper front lite apk
        -where to download hyper front lite apk
        -download hyper front lite game apk
        -download hyper front lite shooter game apk
        -download hyper front lite fps game apk
        -download hyper front lite 5v5 game apk
        -download hyper front lite sci-fi game apk
        -download hyper front lite action game apk
        -download hyper front lite pvp game apk
        -download hyper front lite online game apk
        -download hyper front lite android game apk
        -download hyper front lite new season link apk
        -download hyper front lite new hero tidal apk
        -download hyper front lite new event star music festival apk
        -download hyper front lite christmas exclusive event apk
        -download hyper front lite online esports challenge tournament apk
        -download hyper front lite realistic guns apk
        -download hyper front lite thrilling battles apk
        -download hyper front lite tactical deployment apk
        -download hyper front lite unique hero abilities apk
        -best site to download hyper front lite apk
        -safe site to download hyper front lite apk
        -trusted site to download hyper front lite apk
        -official site to download hyper front lite apk
        -direct link to download hyper front lite apk
        -fast link to download hyper front lite apk
        -working link to download hyper front lite apk
        -easy way to download hyper front lite apk
        -quick way to download hyper front lite apk
        -simple way to download hyper front lite apk
        -tips and tricks to download hyper front lite apk
        -guide and tutorial to download hyper front lite apk
        -steps and instructions to download hyper front lite apk
        -requirements and compatibility to download hyper front lite apk
        -reviews and ratings of hyper front lite apk
        -features and benefits of hyper front lite apk
        -pros and cons of hyper front lite apk
        -alternatives and competitors of hyper front lite apk
        -comparison and analysis of hyper front lite apk
        -advantages and disadvantages of hyper front lite apk

        -

        How to Download and Install Hyper Front Lite APK

        -

        Downloading and installing Hyper Front Lite APK is very easy and simple. Just follow these steps:

        -
          -
        1. Go to the official website of Hyper Front Lite or Google Play Store and download the APK file. The file size is about 826 MB, so make sure you have enough storage space on your device.
        2. -
        3. Allow installation from unknown sources on your device settings. This is necessary because Hyper Front Lite APK is not from the official Google Play Store.
        4. -
        5. Locate the downloaded file and tap on it to install it. It may take a few minutes to complete the installation process.
        6. -
        7. Launch the game and enjoy the action. You can log in with your existing Hyper Front account or create a new one. You can also cross-save your progress between Hyper Front Lite and Hyper Front.
        8. -
        -

        Pros and Cons of Hyper Front Lite APK

        -

        Hyper Front Lite APK has many advantages and disadvantages that you should consider before downloading it. Here are some of them:

        -

        Pros:

        -
          -
        • Compatible with more devices: Hyper Front Lite APK is designed to run smoothly on low-end devices that may not be able to run Hyper Front properly. It has lower system requirements and optimized performance.
        • -
        • Less storage and memory required: Hyper Front Lite APK requires less storage space and memory than Hyper Front. It only takes up about 826 MB of storage space and 2 GB of RAM.
        • -
        • Account cross-save feature: You can use your existing Hyper Front account to log in to Hyper Front Lite APK and vice versa. You can also cross-save your progress and data between the two versions of the game.
        • -
        • Free to play: Hyper Front Lite APK is free to download and play. You don't need to pay anything to enjoy the game and its features. You can also earn free skins and rewards by playing the game and joining events.
        • -
        • High-quality graphics and sound: Hyper Front Lite APK has high-quality graphics and sound that create an immersive and realistic shooting experience. The game has stunning 3D models, textures, lighting, and shadows. The game also has realistic sound effects, voice acting, and background music.
        • -
        • Diverse game modes and heroes: Hyper Front Lite APK has diverse game modes and heroes that offer different gameplay styles and strategies. You can play classic modes like Team Deathmatch, Capture the Flag, or Bomb Defuse, or try new modes like Frosty Fight, S.T.A.R Music Festival, or Challenge Tournament. You can also choose from 10+ heroes with unique abilities and personalities.
        • -
        -

        Cons:

        -
          -
        • Requires internet connection: Hyper Front Lite APK requires a stable internet connection to play. You cannot play the game offline or in airplane mode. You may also experience lag or disconnect issues if your internet connection is weak or unstable.
        • -
        • May have bugs or glitches: Hyper Front Lite APK may have some bugs or glitches that affect the gameplay or performance of the game. For example, you may encounter crashes, freezes, errors, or visual glitches. The developers are working hard to fix these issues and update the game regularly.
        • -
        • May consume battery and data: Hyper Front Lite APK may consume a lot of battery and data while playing. The game has high-quality graphics and sound that require more power and data to run. You may want to lower the graphics settings or use Wi-Fi to save battery and data.
        • -
        -

        Conclusion

        -

        Hyper Front Lite APK is a lighter version of Hyper Front, a popular 5v5 tactical FPS game on mobile. It has many features that make it one of the best FPS games on mobile, such as realistic guns, unique hero abilities, diverse game modes, online esports events, high-quality graphics and sound, and free skins and rewards. It also has some advantages over Hyper Front, such as compatibility with more devices, less storage and memory required, and account cross-save feature. However, it also has some drawbacks, such as requiring internet connection, having bugs or glitches, and consuming battery and data. Overall, Hyper Front Lite APK is a great game that you should download if you love shooting games and want to enjoy a thrilling and exciting shooting experience on your mobile device.

        -

        FAQs

        -

        Here are some frequently asked questions about Hyper Front Lite APK:

        -

        Q1: What is the difference between Hyper Front Lite and Hyper Front?

        -

        A1: Hyper Front Lite is a lighter version of Hyper Front that requires less storage and memory for a smooth in-game experience. It is compatible with more devices and has account cross-save feature. The gameplay and content are the same as Hyper Front.

        -

        Q2: Is Hyper Front Lite safe to download?

        -

        A2: Yes, Hyper Front Lite is safe to download from the official website or Google Play Store. It does not contain any viruses or malware. However, you should always be careful when downloading files from unknown sources and scan them before installing.

        -

        Q3: How can I get free skins and rewards in Hyper Front Lite?

        -

        A3: You can get free skins and rewards by participating in various events and competitions in Hyper Front Lite. For example, you can join the S.T.A.R Music Festival and cheer for your favorite musicians to get free epic weapon skins. You can also play the Challenge Tournament and earn event points to open a crate to get Barrett Epic Skin: Heart Trick.

        -

        Q4: How can I contact the developers of Hyper Front Lite?

        -

        A4: You can contact the developers of Hyper Front Lite by sending an email to hyperfront@service.netease.com or by following their official social media accounts on Facebook, Twitter, Instagram, YouTube, Discord, and Reddit. You can also leave feedback and suggestions on the game's official website or Google Play Store page.

        -

        Q5: What are the system requirements for Hyper Front Lite?

        -

        A5: The system requirements for Hyper Front Lite are as follows: - Android version: 5.0 or higher - RAM: 2 GB or more - Storage space: 826 MB or more - Internet connection

        197e85843d
        -
        -
        \ No newline at end of file diff --git a/spaces/tioseFevbu/cartoon-converter/scripts/BEST Acronis True Image 2018 Build 10640 Bootable ISO Crack.md b/spaces/tioseFevbu/cartoon-converter/scripts/BEST Acronis True Image 2018 Build 10640 Bootable ISO Crack.md deleted file mode 100644 index 58672e3b5fe9bbff8392b7847c6f30961a85d82c..0000000000000000000000000000000000000000 --- a/spaces/tioseFevbu/cartoon-converter/scripts/BEST Acronis True Image 2018 Build 10640 Bootable ISO Crack.md +++ /dev/null @@ -1,105 +0,0 @@ -
        -

        BEST Acronis True Image 2018 Build 10640 Bootable ISO Crack

        -

        If you are looking for a reliable and powerful software to backup and restore your Windows and system files, you might want to consider Acronis True Image 2018. This software is one of the best solutions for creating a complete image of your hard drive or partition, and restoring it in case of a disaster. But what if you don't want to pay for the full version of this software? Is there a way to get it for free? Yes, there is. In this article, we will show you how to download and install Acronis True Image 2018 Build 10640 Bootable ISO Crack, which is a modified version of the original software that allows you to activate it without a license key. We will also show you how to use this software to backup and restore your system, as well as its pros and cons. Let's get started!

        -

        Introduction

        -

        Acronis True Image 2018 is a software that allows you to create a full image of your hard drive or partition, and restore it in case of a system failure, virus attack, ransomware infection, or any other problem that might affect your data. With this software, you can backup your entire system, including your operating system, applications, settings, files, and folders, and restore it to the same or different hardware. You can also clone your hard drive or partition to another drive or device, or create a bootable media that you can use to recover your system in case it becomes unbootable.

        -

        BEST Acronis True Image 2018 Build 10640 Bootable ISO Crack


        DOWNLOADhttps://urlcod.com/2uHxRD



        -

        There are many benefits of using Acronis True Image 2018 for your backup and recovery needs. Some of them are:

        -
          -
        • It is fast and easy to use. You can backup and restore your system in just a few clicks, without any technical skills.
        • -
        • It is flexible and customizable. You can choose what to backup and how often, as well as where to store your backups. You can also create different backup plans for different scenarios.
        • -
        • It is secure and reliable. You can encrypt your backups with a password, and verify their integrity with checksums. You can also restore your backups to any hardware, even if it is different from the original one.
        • -
        • It is feature-rich and innovative. You can backup and restore not only your Windows system, but also your Mac, iOS, and Android devices. You can also backup and restore your social media accounts, such as Facebook and Instagram. You can also use advanced features, such as active disk cloning, file synchronization, ransomware protection, blockchain certification, and more.
        • -
        -

        However, to enjoy all these benefits, you need to purchase a license key for Acronis True Image 2018, which can cost you from $49.99 to $99.99 per year, depending on the edition and the number of devices you want to protect. If you don't want to spend that much money, you might be tempted to look for a free alternative. That's where Acronis True Image 2018 Build 10640 Bootable ISO Crack comes in.

        -

        Acronis True Image 2018 Build 10640 Bootable ISO Crack is a modified version of the original software that allows you to activate it without a license key. This means that you can use all the features of Acronis True Image 2018 for free, without any limitations or restrictions. Sounds too good to be true, right? Well, there are some risks and drawbacks of using this crack, which we will discuss later in this article. But first, let's see how to download and install it on your Windows PC.

        -

        How to download and install Acronis True Image 2018 Build 10640 Bootable ISO Crack

        -

        To download and install Acronis True Image 2018 Build 10640 Bootable ISO Crack, you need to follow these steps:

        -
          -
        1. Find a reliable and safe download link for the crack. You can search for it on Google or other search engines, but be careful of fake or malicious links that might harm your computer or steal your personal information. One possible download link is , but we cannot guarantee its validity or security. Use it at your own risk.
        2. -
        3. Download the crack file, which is a ZIP archive that contains the Acronis True Image 2018 Build 10640 Bootable ISO file and the crack file. The size of the ZIP archive is about 640 MB.
        4. -
        5. Extract the ZIP archive to a folder on your computer. You will need a software like WinRAR or 7-Zip to do this.
        6. -
        7. Create a bootable USB or DVD with the Acronis True Image 2018 Build 10640 Bootable ISO file. You will need a software like Rufus or PowerISO to do this. The bootable media will allow you to boot your computer from it and access the Acronis True Image 2018 interface.
        8. -
        9. Install Acronis True Image 2018 on your Windows PC by running the setup.exe file from the extracted folder. Follow the instructions on the screen to complete the installation.
        10. -
        11. Activate Acronis True Image 2018 with the crack file by copying it to the installation folder of the software, which is usually C:\Program Files (x86)\Acronis\TrueImageHome\. Replace the existing file with the crack file.
        12. -
        13. Restart your computer and enjoy using Acronis True Image 2018 for free!
        14. -
        -

        How to use Acronis True Image 2018 Build 10640 Bootable ISO Crack

        -

        Now that you have downloaded and installed Acronis True Image 2018 Build 10640 Bootable ISO Crack, you can use it to backup and restore your system in various ways. Here are some of the most common scenarios:

        -

        How to backup and restore your Windows and system files with Acronis True Image 2018

        -

        To backup and restore your Windows and system files with Acronis True Image 2018, you need to follow these steps:

        -
          -
        1. Launch Acronis True Image 2018 from your Windows desktop or from the bootable media.
        2. -
        3. Select Backup from the left menu.
        4. -
        5. Select Entire PC as the source of your backup. This will include your operating system, applications, settings, files, and folders in the backup.
        6. -
        7. Select a destination for your backup. You can choose an external hard drive, a network location, a cloud storage service, or any other location that is accessible by Acronis True Image 2018.
        8. -
        9. Select a backup name and schedule. You can choose how often you want to backup your system, as well as other options such as encryption, notifications, exclusions, etc.
        10. -
        11. Click Back up now to start the backup process. You can monitor the progress and status of your backup from the dashboard.
        12. -
        13. To restore your system from a backup, select Recovery from the left menu.
        14. -
        15. Select the backup that you want to restore from the list of available backups.
        16. -
        17. Select Entire PC as the recovery method. This will restore your entire system to the state it was in when you created the backup.
        18. -
        19. Select a target for your recovery. You can choose the same or different hardware as the original one, as long as it is compatible with Acronis True Image 2018.
        20. -
        21. Click Recover now to start the recovery process. You can monitor the progress and status of your recovery from the dashboard.
        22. -
        -

        How to clone your hard drive or partition with Acronis True Image 2018

        -

        To clone your hard drive or partition with Acronis True Image 2018, you need to follow these steps:

        -
          -
        1. Launch Acronis True Image 2018 from your Windows desktop or from the bootable media.
        2. -
        3. Select Tools from the left menu.
        4. -
        5. Select Clone disk from the list of tools.
        6. -
        7. Select Automatic mode as the cloning mode. This will automatically resize and adjust the partitions on the target disk to fit the source disk.
        8. -
        9. Select the source disk that you want to clone. This is the disk that contains your Windows and system files.
        10. -
        11. Select the target disk that you want to clone to. This is the disk that will receive the copy of your source disk. Make sure that it has enough space and is formatted correctly.
        12. -
        13. Click Proceed to start the cloning process. You can monitor the progress and status of your cloning from the dashboard.
        14. -
        15. After the cloning is completed, you can either keep both disks or remove one of them. If you remove one of them, make sure that you change the boot order in your BIOS settings to boot from the cloned disk.
        16. -
        -

        How to recover your system from a bootable media with Acronis True Image 2018

        -

        To recover your system from a bootable media with Acronis True Image 2018, you need to follow these steps:

        -

        -
          -
        1. Insert the bootable USB or DVD that you created with Acronis True Image 2018 Build 10640 Bootable ISO into your computer.
        2. -
        3. Restart your computer and press F12 or another key to enter the boot menu.
        4. -
        5. Select the bootable media as the first boot option and press Enter.
        6. -
        7. Wait for Acronis True Image 2018 to load and display its interface.
        8. -
        9. Select Recovery from the left menu.
        10. -
        11. Select the backup that you want to restore from the list of available backups. Make sure that it is accessible by Acronis True Image 2018, either on an external hard drive, a network location, a cloud storage service, or any other location.
        12. -
        13. Select Entire PC as the recovery method. This will restore your entire system to the state it was in when you created the backup.
        14. -
        15. Select a target for your recovery. You can choose the same or different hardware as the original one, as long as it is compatible with Acronis True Image 2018.
        16. -
        17. Click Recover now to start the recovery process. You can monitor the progress and status of your recovery from the dashboard.
        18. -
        -

        Pros and cons of Acronis True Image 2018 Build 10640 Bootable ISO Crack

        -

        Using Acronis True Image 2018 Build 10640 Bootable ISO Crack might seem like a great idea, since you can get all the benefits of Acronis True Image 2018 for free. However, there are also some risks and drawbacks of using this crack, which you should be aware of before deciding to use it. Here are some of the pros and cons of using Acronis True Image 2018 Build 10640 Bootable ISO Crack:

        -

        Pros

        -
          -
        • You can use all the features of Acronis True Image 2018 without paying for a license key.
        • -
        • You can backup and restore your system in various ways, such as full image, disk cloning, bootable media, etc.
        • -
        • You can backup and restore not only your Windows system, but also your Mac, iOS, and Android devices, as well as your social media accounts.
        • -
        • You can use advanced features, such as active disk cloning, file synchronization, ransomware protection, blockchain certification, and more.
        • -
        • You can save money and time by using this crack instead of buying the original software.
        • -
        -

        Cons

        -
          -
        • You might violate the terms and conditions of Acronis True Image 2018, which could result in legal consequences or penalties.
        • -
        • You might expose your computer and data to malware or viruses that might be hidden in the crack file or the download link.
        • -
        • You might compromise the security and integrity of your backups, since the crack file might alter or corrupt them.
        • -
        • You might not receive any updates or support from Acronis True Image 2018, which could affect the performance and compatibility of the software.
        • -
        • You might experience some errors or bugs in the software, since the crack file might not be compatible with the latest version of Acronis True Image 2018.
        • -
        -

        Conclusion

        -

        In conclusion, Acronis True Image 2018 Build 10640 Bootable ISO Crack is a modified version of Acronis True Image 2018 that allows you to activate it without a license key. This means that you can use all the features of Acronis True Image 2018 for free, without any limitations or restrictions. However, there are also some risks and drawbacks of using this crack, such as legal issues, malware infection, backup corruption, lack of updates and support, and software errors. Therefore, you should weigh the pros and cons of using this crack before deciding to use it. If you want to use a safe and reliable software to backup and restore your system, we recommend that you buy the original version of Acronis True Image 2018, which will give you peace of mind and guarantee your data protection.

        -

        FAQs

        -

        Here are some common questions and answers about Acronis True Image 2018 Build 10640 Bootable ISO Crack:

        -

        Q: Is Acronis True Image 2018 Build 10640 Bootable ISO Crack safe to use?

        -

        A: There is no definitive answer to this question, since different sources might provide different versions of the crack file or the download link. Some of them might be safe and clean, while others might be infected with malware or viruses. Therefore, you should always scan the crack file and the download link with a reputable antivirus software before using them. You should also backup your data before installing or using the crack file, in case something goes wrong.

        -

        Q: Does Acronis True Image 2018 Build 10640 Bootable ISO Crack work on Windows 10?

        -

        A: Yes, Acronis True Image 2018 Build 10640 Bootable ISO Crack works on Windows 10, as well as on Windows 7 and Windows 8. However, you might encounter some compatibility issues or errors if you use the crack file on a newer version of Windows 10 than the one that was available when the crack file was created. Therefore , you should always check the compatibility of the crack file with your Windows 10 version before using it. You should also update your Windows 10 regularly to ensure its security and stability.

        -

        Q: Can I use Acronis True Image 2018 Build 10640 Bootable ISO Crack on multiple devices?

        -

        A: Yes, you can use Acronis True Image 2018 Build 10640 Bootable ISO Crack on multiple devices, as long as you have the crack file and the bootable media for each device. However, you should be aware that using the crack file on multiple devices might violate the terms and conditions of Acronis True Image 2018, which could result in legal consequences or penalties. Therefore, you should use the crack file at your own risk and discretion.

        -

        Q: How can I update Acronis True Image 2018 Build 10640 Bootable ISO Crack?

        -

        A: Unfortunately, you cannot update Acronis True Image 2018 Build 10640 Bootable ISO Crack, since it is a modified version of the original software that bypasses the activation process. If you try to update it, you might lose the crack file and the activation status, and you might need to reinstall the software and the crack file again. Therefore, you should avoid updating the software if you want to keep using the crack file. However, this also means that you will miss out on any new features, improvements, or bug fixes that Acronis True Image 2018 might release in the future.

        -

        Q: Where can I get support for Acronis True Image 2018 Build 10640 Bootable ISO Crack?

        -

        A: Since Acronis True Image 2018 Build 10640 Bootable ISO Crack is not an official product of Acronis, you cannot get any support from them if you encounter any problems or issues with the software. You might be able to find some help from other users who have used the crack file on online forums or communities, but there is no guarantee that they will be able to solve your problem or answer your question. Therefore, you should use the crack file at your own risk and responsibility.

        -

        Q: Is there a better alternative to Acronis True Image 2018 Build 10640 Bootable ISO Crack?

        -

        A: Yes, there is a better alternative to Acronis True Image 2018 Build 10640 Bootable ISO Crack, and that is Acronis True Image 2018 itself. By buying the original version of the software, you will get all the benefits of Acronis True Image 2018 without any risks or drawbacks of using the crack file. You will also get regular updates and support from Acronis, as well as a peace of mind that your data is protected by a safe and reliable software. You can buy Acronis True Image 2018 from their official website or from other authorized sellers.

        b2dd77e56b
        -
        -
        \ No newline at end of file diff --git a/spaces/tioseFevbu/cartoon-converter/scripts/Baixar O Programa Sic Com Serial.md b/spaces/tioseFevbu/cartoon-converter/scripts/Baixar O Programa Sic Com Serial.md deleted file mode 100644 index d3a2f7644b757a32eb957640ef2cd268c080a4ec..0000000000000000000000000000000000000000 --- a/spaces/tioseFevbu/cartoon-converter/scripts/Baixar O Programa Sic Com Serial.md +++ /dev/null @@ -1,31 +0,0 @@ -
        -

        Como Baixar O Programa Sic Com Serial Grátis

        -

        O programa Sic é um software de gestão empresarial que permite controlar as vendas, o estoque, o financeiro e o faturamento de forma simples e eficiente. Mas como baixar o programa Sic com serial grátis?

        -

        Baixar O Programa Sic Com Serial


        DOWNLOAD ››› https://urlcod.com/2uHwM2



        -

        Neste artigo, vamos mostrar como você pode obter o programa Sic com serial sem pagar nada, usando um método seguro e legal. Acompanhe os passos a seguir e aproveite os benefícios do programa Sic para o seu negócio.

        -

        Passo 1: Acesse o site oficial do programa Sic

        -

        O primeiro passo para baixar o programa Sic com serial grátis é acessar o site oficial do software, que é https://www.sicnet.com.br/. Lá, você vai encontrar todas as informações sobre o programa, os recursos, os planos e os preços.

        -

        No site, você também vai ver um botão verde escrito "Teste Grátis". Clique nele para iniciar o processo de download do programa Sic com serial grátis.

        -

        Passo 2: Preencha o formulário de cadastro

        -

        Ao clicar no botão "Teste Grátis", você será redirecionado para uma página de cadastro, onde você deverá preencher alguns dados pessoais e profissionais, como nome, e-mail, telefone, CNPJ, razão social e segmento de atuação.

        -

        Esses dados são importantes para que o programa Sic possa gerar um serial exclusivo para você, que será enviado por e-mail após a conclusão do cadastro. Além disso, esses dados também servem para que o programa Sic possa oferecer um suporte personalizado e adequado às suas necessidades.

        -

        Preencha todos os campos do formulário com atenção e veracidade, e depois clique em "Continuar".

        -

        Passo 3: Faça o download do programa Sic com serial grátis

        -

        Após preencher o formulário de cadastro, você receberá um e-mail com o link para fazer o download do programa Sic com serial grátis. Abra o e-mail e clique no link para iniciar o download do arquivo de instalação do software.

        -

        O arquivo tem cerca de 200 MB e pode demorar alguns minutos para ser baixado, dependendo da velocidade da sua conexão. Aguarde até que o download seja concluído e depois execute o arquivo para iniciar a instalação do programa Sic no seu computador.

        -

        Durante a instalação, você deverá informar o serial que recebeu por e-mail. Esse serial é único e intransferível, e permite que você use o programa Sic com serial grátis por 30 dias, sem nenhuma limitação de recursos ou funcionalidades.

        -

        -

        Passo 4: Aproveite os benefícios do programa Sic com serial grátis

        -

        Pronto! Agora você já pode usar o programa Sic com serial grátis por 30 dias e aproveitar todos os benefícios que ele oferece para a gestão do seu negócio. Com o programa Sic, você pode:

        -
          -
        • Emitir notas fiscais eletrônicas (NF-e) e cupons fiscais eletrônicos (NFC-e) de forma rápida e fácil;
        • -
        • Controlar as entradas e saídas de produtos no seu estoque, evitando rupturas ou excessos;
        • -
        • Gerenciar as contas a pagar e a receber, acompanhando o fluxo de caixa e a saúde financeira da sua empresa;
        • -
        • Analisar os resultados das vendas por período, produto, cliente ou vendedor, identificando oportunidades de melhoria;
        • -
        • Integrar o programa Sic com outros sistemas ou plataformas, como lojas virtuais, bancos ou contabilidade;
        • -
        • Contar com um suporte técnico especializado e gratuito por telefone, chat ou e-mail;
        • -
        • E muito mais!
        • -
        -

        O programa Sic é uma sol

        cec2833e83
        -
        -
        \ No newline at end of file diff --git a/spaces/tioseFevbu/cartoon-converter/scripts/Just One Of The Girls Torrent.md b/spaces/tioseFevbu/cartoon-converter/scripts/Just One Of The Girls Torrent.md deleted file mode 100644 index 2daa159602afa6cc8ff997b4ca0a13df9cdb7c99..0000000000000000000000000000000000000000 --- a/spaces/tioseFevbu/cartoon-converter/scripts/Just One Of The Girls Torrent.md +++ /dev/null @@ -1,23 +0,0 @@ -
        -

        How to Download Just One of the Girls (1993) Movie for Free

        -

        If you are looking for a comedy movie that will make you laugh and also touch your heart, you might want to check out Just One of the Girls (1993), also known as Anything for Love. This movie stars Corey Haim as Chris, a bullied teen who disguises himself as a girl to escape a school bully, but finds some upsides to it as well. He becomes friends with Marie (Nicole Eggert), a girl he has a crush on, and learns some valuable lessons about life and love.

        -

        Just One of the Girls is a fun and entertaining movie that has some hilarious scenes and some sweet moments. It also features a young Alanis Morissette as one of Chris's classmates. If you want to watch this movie, you might be wondering how to download it for free. Well, there are some ways to do that, but you have to be careful and follow some precautions.

        -

        just one of the girls torrent


        Download Ziphttps://urlcod.com/2uHwm5



        -

        What is a Torrent?

        -

        A torrent is a file that contains information about other files that are shared by users on a peer-to-peer network. When you download a torrent file, you need a software called a torrent client to connect to other users who have the same file and download it from them. This way, you can get large files faster and easier than downloading them from a single source.

        -

        However, downloading torrents also comes with some risks. First of all, not all torrents are legal or safe. Some torrents may contain viruses, malware, or spyware that can harm your computer or steal your personal information. Some torrents may also infringe on the copyrights of the original creators or owners of the content. Downloading such torrents may expose you to legal troubles or fines.

        -

        How to Download Just One of the Girls Torrent Safely?

        -

        If you still want to download Just One of the Girls torrent, you have to take some steps to protect yourself and your device. Here are some tips:

        -

        -
          -
        • Use a reputable torrent site. There are many torrent sites on the internet, but not all of them are trustworthy or reliable. Some may have fake or malicious torrents, or may be blocked by your internet service provider (ISP) or government. To find a good torrent site, you can use a search engine like Bing and look for reviews or ratings from other users. Some of the popular torrent sites are The Pirate Bay, 1337x, RARBG, and YTS.
        • -
        • Use a VPN service. A VPN (virtual private network) is a software that creates a secure and encrypted connection between your device and another server on the internet. This way, you can hide your IP address and location from anyone who might be tracking your online activity, such as your ISP, government agencies, hackers, or copyright holders. A VPN also allows you to access geo-restricted or censored content from anywhere in the world. There are many VPN services available online, but some of them may be slow, unreliable, or expensive. To find a good VPN service, you can use Bing and look for reviews or ratings from other users. Some of the popular VPN services are NordVPN, ExpressVPN, Surfshark, and CyberGhost.
        • -
        • Use an antivirus software. An antivirus software is a program that scans your device for any viruses, malware, spyware, or other threats that may harm your system or data. It also blocks or removes any suspicious files or programs that you may encounter while downloading torrents. An antivirus software is essential for keeping your device safe and secure from any online dangers. There are many antivirus software available online, but some of them may be ineffective, outdated, or incompatible with your device. To find a good antivirus software, you can use Bing and look for reviews or ratings from other users. Some of the popular antivirus software are Bitdefender, Kaspersky, Norton, and McAfee.
        • -
        -

        How to Download Just One of the Girls Torrent?

        -

        Once you have taken the necessary precautions, you can proceed to download Just One of the Girls torrent by following these steps:

        -
          -
        1. Go to your preferred torrent site and search for Just One of the Girls (1993) movie.
        2. -
        3. Choose a torrent file that has a high number of seeders (users who have

          cec2833e83
          -
          -
          \ No newline at end of file diff --git a/spaces/tjburns/ask_marcus_aurelius/.venv/lib/python3.10/site-packages/pip/_internal/models/candidate.py b/spaces/tjburns/ask_marcus_aurelius/.venv/lib/python3.10/site-packages/pip/_internal/models/candidate.py deleted file mode 100644 index a4963aec6388c27c3beb064f0a730af200380aee..0000000000000000000000000000000000000000 --- a/spaces/tjburns/ask_marcus_aurelius/.venv/lib/python3.10/site-packages/pip/_internal/models/candidate.py +++ /dev/null @@ -1,34 +0,0 @@ -from pip._vendor.packaging.version import parse as parse_version - -from pip._internal.models.link import Link -from pip._internal.utils.models import KeyBasedCompareMixin - - -class InstallationCandidate(KeyBasedCompareMixin): - """Represents a potential "candidate" for installation.""" - - __slots__ = ["name", "version", "link"] - - def __init__(self, name: str, version: str, link: Link) -> None: - self.name = name - self.version = parse_version(version) - self.link = link - - super().__init__( - key=(self.name, self.version, self.link), - defining_class=InstallationCandidate, - ) - - def __repr__(self) -> str: - return "".format( - self.name, - self.version, - self.link, - ) - - def __str__(self) -> str: - return "{!r} candidate (version {} at {})".format( - self.name, - self.version, - self.link, - ) diff --git a/spaces/tjburns/ask_marcus_aurelius/.venv/lib/python3.10/site-packages/pip/_internal/network/session.py b/spaces/tjburns/ask_marcus_aurelius/.venv/lib/python3.10/site-packages/pip/_internal/network/session.py deleted file mode 100644 index e512ac784649b9dd8845e24e4d17f9e13f6591b0..0000000000000000000000000000000000000000 --- a/spaces/tjburns/ask_marcus_aurelius/.venv/lib/python3.10/site-packages/pip/_internal/network/session.py +++ /dev/null @@ -1,518 +0,0 @@ -"""PipSession and supporting code, containing all pip-specific -network request configuration and behavior. -""" - -import email.utils -import io -import ipaddress -import json -import logging -import mimetypes -import os -import platform -import shutil -import subprocess -import sys -import urllib.parse -import warnings -from typing import ( - TYPE_CHECKING, - Any, - Dict, - Generator, - List, - Mapping, - Optional, - Sequence, - Tuple, - Union, -) - -from pip._vendor import requests, urllib3 -from pip._vendor.cachecontrol import CacheControlAdapter as _BaseCacheControlAdapter -from pip._vendor.requests.adapters import DEFAULT_POOLBLOCK, BaseAdapter -from pip._vendor.requests.adapters import HTTPAdapter as _BaseHTTPAdapter -from pip._vendor.requests.models import PreparedRequest, Response -from pip._vendor.requests.structures import CaseInsensitiveDict -from pip._vendor.urllib3.connectionpool import ConnectionPool -from pip._vendor.urllib3.exceptions import InsecureRequestWarning - -from pip import __version__ -from pip._internal.metadata import get_default_environment -from pip._internal.models.link import Link -from pip._internal.network.auth import MultiDomainBasicAuth -from pip._internal.network.cache import SafeFileCache - -# Import ssl from compat so the initial import occurs in only one place. -from pip._internal.utils.compat import has_tls -from pip._internal.utils.glibc import libc_ver -from pip._internal.utils.misc import build_url_from_netloc, parse_netloc -from pip._internal.utils.urls import url_to_path - -if TYPE_CHECKING: - from ssl import SSLContext - - from pip._vendor.urllib3.poolmanager import PoolManager - - -logger = logging.getLogger(__name__) - -SecureOrigin = Tuple[str, str, Optional[Union[int, str]]] - - -# Ignore warning raised when using --trusted-host. -warnings.filterwarnings("ignore", category=InsecureRequestWarning) - - -SECURE_ORIGINS: List[SecureOrigin] = [ - # protocol, hostname, port - # Taken from Chrome's list of secure origins (See: http://bit.ly/1qrySKC) - ("https", "*", "*"), - ("*", "localhost", "*"), - ("*", "127.0.0.0/8", "*"), - ("*", "::1/128", "*"), - ("file", "*", None), - # ssh is always secure. - ("ssh", "*", "*"), -] - - -# These are environment variables present when running under various -# CI systems. For each variable, some CI systems that use the variable -# are indicated. The collection was chosen so that for each of a number -# of popular systems, at least one of the environment variables is used. -# This list is used to provide some indication of and lower bound for -# CI traffic to PyPI. Thus, it is okay if the list is not comprehensive. -# For more background, see: https://github.com/pypa/pip/issues/5499 -CI_ENVIRONMENT_VARIABLES = ( - # Azure Pipelines - "BUILD_BUILDID", - # Jenkins - "BUILD_ID", - # AppVeyor, CircleCI, Codeship, Gitlab CI, Shippable, Travis CI - "CI", - # Explicit environment variable. - "PIP_IS_CI", -) - - -def looks_like_ci() -> bool: - """ - Return whether it looks like pip is running under CI. - """ - # We don't use the method of checking for a tty (e.g. using isatty()) - # because some CI systems mimic a tty (e.g. Travis CI). Thus that - # method doesn't provide definitive information in either direction. - return any(name in os.environ for name in CI_ENVIRONMENT_VARIABLES) - - -def user_agent() -> str: - """ - Return a string representing the user agent. - """ - data: Dict[str, Any] = { - "installer": {"name": "pip", "version": __version__}, - "python": platform.python_version(), - "implementation": { - "name": platform.python_implementation(), - }, - } - - if data["implementation"]["name"] == "CPython": - data["implementation"]["version"] = platform.python_version() - elif data["implementation"]["name"] == "PyPy": - pypy_version_info = sys.pypy_version_info # type: ignore - if pypy_version_info.releaselevel == "final": - pypy_version_info = pypy_version_info[:3] - data["implementation"]["version"] = ".".join( - [str(x) for x in pypy_version_info] - ) - elif data["implementation"]["name"] == "Jython": - # Complete Guess - data["implementation"]["version"] = platform.python_version() - elif data["implementation"]["name"] == "IronPython": - # Complete Guess - data["implementation"]["version"] = platform.python_version() - - if sys.platform.startswith("linux"): - from pip._vendor import distro - - linux_distribution = distro.name(), distro.version(), distro.codename() - distro_infos: Dict[str, Any] = dict( - filter( - lambda x: x[1], - zip(["name", "version", "id"], linux_distribution), - ) - ) - libc = dict( - filter( - lambda x: x[1], - zip(["lib", "version"], libc_ver()), - ) - ) - if libc: - distro_infos["libc"] = libc - if distro_infos: - data["distro"] = distro_infos - - if sys.platform.startswith("darwin") and platform.mac_ver()[0]: - data["distro"] = {"name": "macOS", "version": platform.mac_ver()[0]} - - if platform.system(): - data.setdefault("system", {})["name"] = platform.system() - - if platform.release(): - data.setdefault("system", {})["release"] = platform.release() - - if platform.machine(): - data["cpu"] = platform.machine() - - if has_tls(): - import _ssl as ssl - - data["openssl_version"] = ssl.OPENSSL_VERSION - - setuptools_dist = get_default_environment().get_distribution("setuptools") - if setuptools_dist is not None: - data["setuptools_version"] = str(setuptools_dist.version) - - if shutil.which("rustc") is not None: - # If for any reason `rustc --version` fails, silently ignore it - try: - rustc_output = subprocess.check_output( - ["rustc", "--version"], stderr=subprocess.STDOUT, timeout=0.5 - ) - except Exception: - pass - else: - if rustc_output.startswith(b"rustc "): - # The format of `rustc --version` is: - # `b'rustc 1.52.1 (9bc8c42bb 2021-05-09)\n'` - # We extract just the middle (1.52.1) part - data["rustc_version"] = rustc_output.split(b" ")[1].decode() - - # Use None rather than False so as not to give the impression that - # pip knows it is not being run under CI. Rather, it is a null or - # inconclusive result. Also, we include some value rather than no - # value to make it easier to know that the check has been run. - data["ci"] = True if looks_like_ci() else None - - user_data = os.environ.get("PIP_USER_AGENT_USER_DATA") - if user_data is not None: - data["user_data"] = user_data - - return "{data[installer][name]}/{data[installer][version]} {json}".format( - data=data, - json=json.dumps(data, separators=(",", ":"), sort_keys=True), - ) - - -class LocalFSAdapter(BaseAdapter): - def send( - self, - request: PreparedRequest, - stream: bool = False, - timeout: Optional[Union[float, Tuple[float, float]]] = None, - verify: Union[bool, str] = True, - cert: Optional[Union[str, Tuple[str, str]]] = None, - proxies: Optional[Mapping[str, str]] = None, - ) -> Response: - pathname = url_to_path(request.url) - - resp = Response() - resp.status_code = 200 - resp.url = request.url - - try: - stats = os.stat(pathname) - except OSError as exc: - # format the exception raised as a io.BytesIO object, - # to return a better error message: - resp.status_code = 404 - resp.reason = type(exc).__name__ - resp.raw = io.BytesIO(f"{resp.reason}: {exc}".encode("utf8")) - else: - modified = email.utils.formatdate(stats.st_mtime, usegmt=True) - content_type = mimetypes.guess_type(pathname)[0] or "text/plain" - resp.headers = CaseInsensitiveDict( - { - "Content-Type": content_type, - "Content-Length": stats.st_size, - "Last-Modified": modified, - } - ) - - resp.raw = open(pathname, "rb") - resp.close = resp.raw.close - - return resp - - def close(self) -> None: - pass - - -class _SSLContextAdapterMixin: - """Mixin to add the ``ssl_context`` constructor argument to HTTP adapters. - - The additional argument is forwarded directly to the pool manager. This allows us - to dynamically decide what SSL store to use at runtime, which is used to implement - the optional ``truststore`` backend. - """ - - def __init__( - self, - *, - ssl_context: Optional["SSLContext"] = None, - **kwargs: Any, - ) -> None: - self._ssl_context = ssl_context - super().__init__(**kwargs) - - def init_poolmanager( - self, - connections: int, - maxsize: int, - block: bool = DEFAULT_POOLBLOCK, - **pool_kwargs: Any, - ) -> "PoolManager": - if self._ssl_context is not None: - pool_kwargs.setdefault("ssl_context", self._ssl_context) - return super().init_poolmanager( # type: ignore[misc] - connections=connections, - maxsize=maxsize, - block=block, - **pool_kwargs, - ) - - -class HTTPAdapter(_SSLContextAdapterMixin, _BaseHTTPAdapter): - pass - - -class CacheControlAdapter(_SSLContextAdapterMixin, _BaseCacheControlAdapter): - pass - - -class InsecureHTTPAdapter(HTTPAdapter): - def cert_verify( - self, - conn: ConnectionPool, - url: str, - verify: Union[bool, str], - cert: Optional[Union[str, Tuple[str, str]]], - ) -> None: - super().cert_verify(conn=conn, url=url, verify=False, cert=cert) - - -class InsecureCacheControlAdapter(CacheControlAdapter): - def cert_verify( - self, - conn: ConnectionPool, - url: str, - verify: Union[bool, str], - cert: Optional[Union[str, Tuple[str, str]]], - ) -> None: - super().cert_verify(conn=conn, url=url, verify=False, cert=cert) - - -class PipSession(requests.Session): - - timeout: Optional[int] = None - - def __init__( - self, - *args: Any, - retries: int = 0, - cache: Optional[str] = None, - trusted_hosts: Sequence[str] = (), - index_urls: Optional[List[str]] = None, - ssl_context: Optional["SSLContext"] = None, - **kwargs: Any, - ) -> None: - """ - :param trusted_hosts: Domains not to emit warnings for when not using - HTTPS. - """ - super().__init__(*args, **kwargs) - - # Namespace the attribute with "pip_" just in case to prevent - # possible conflicts with the base class. - self.pip_trusted_origins: List[Tuple[str, Optional[int]]] = [] - - # Attach our User Agent to the request - self.headers["User-Agent"] = user_agent() - - # Attach our Authentication handler to the session - self.auth = MultiDomainBasicAuth(index_urls=index_urls) - - # Create our urllib3.Retry instance which will allow us to customize - # how we handle retries. - retries = urllib3.Retry( - # Set the total number of retries that a particular request can - # have. - total=retries, - # A 503 error from PyPI typically means that the Fastly -> Origin - # connection got interrupted in some way. A 503 error in general - # is typically considered a transient error so we'll go ahead and - # retry it. - # A 500 may indicate transient error in Amazon S3 - # A 520 or 527 - may indicate transient error in CloudFlare - status_forcelist=[500, 503, 520, 527], - # Add a small amount of back off between failed requests in - # order to prevent hammering the service. - backoff_factor=0.25, - ) # type: ignore - - # Our Insecure HTTPAdapter disables HTTPS validation. It does not - # support caching so we'll use it for all http:// URLs. - # If caching is disabled, we will also use it for - # https:// hosts that we've marked as ignoring - # TLS errors for (trusted-hosts). - insecure_adapter = InsecureHTTPAdapter(max_retries=retries) - - # We want to _only_ cache responses on securely fetched origins or when - # the host is specified as trusted. We do this because - # we can't validate the response of an insecurely/untrusted fetched - # origin, and we don't want someone to be able to poison the cache and - # require manual eviction from the cache to fix it. - if cache: - secure_adapter = CacheControlAdapter( - cache=SafeFileCache(cache), - max_retries=retries, - ssl_context=ssl_context, - ) - self._trusted_host_adapter = InsecureCacheControlAdapter( - cache=SafeFileCache(cache), - max_retries=retries, - ) - else: - secure_adapter = HTTPAdapter(max_retries=retries, ssl_context=ssl_context) - self._trusted_host_adapter = insecure_adapter - - self.mount("https://", secure_adapter) - self.mount("http://", insecure_adapter) - - # Enable file:// urls - self.mount("file://", LocalFSAdapter()) - - for host in trusted_hosts: - self.add_trusted_host(host, suppress_logging=True) - - def update_index_urls(self, new_index_urls: List[str]) -> None: - """ - :param new_index_urls: New index urls to update the authentication - handler with. - """ - self.auth.index_urls = new_index_urls - - def add_trusted_host( - self, host: str, source: Optional[str] = None, suppress_logging: bool = False - ) -> None: - """ - :param host: It is okay to provide a host that has previously been - added. - :param source: An optional source string, for logging where the host - string came from. - """ - if not suppress_logging: - msg = f"adding trusted host: {host!r}" - if source is not None: - msg += f" (from {source})" - logger.info(msg) - - host_port = parse_netloc(host) - if host_port not in self.pip_trusted_origins: - self.pip_trusted_origins.append(host_port) - - self.mount( - build_url_from_netloc(host, scheme="http") + "/", self._trusted_host_adapter - ) - self.mount(build_url_from_netloc(host) + "/", self._trusted_host_adapter) - if not host_port[1]: - self.mount( - build_url_from_netloc(host, scheme="http") + ":", - self._trusted_host_adapter, - ) - # Mount wildcard ports for the same host. - self.mount(build_url_from_netloc(host) + ":", self._trusted_host_adapter) - - def iter_secure_origins(self) -> Generator[SecureOrigin, None, None]: - yield from SECURE_ORIGINS - for host, port in self.pip_trusted_origins: - yield ("*", host, "*" if port is None else port) - - def is_secure_origin(self, location: Link) -> bool: - # Determine if this url used a secure transport mechanism - parsed = urllib.parse.urlparse(str(location)) - origin_protocol, origin_host, origin_port = ( - parsed.scheme, - parsed.hostname, - parsed.port, - ) - - # The protocol to use to see if the protocol matches. - # Don't count the repository type as part of the protocol: in - # cases such as "git+ssh", only use "ssh". (I.e., Only verify against - # the last scheme.) - origin_protocol = origin_protocol.rsplit("+", 1)[-1] - - # Determine if our origin is a secure origin by looking through our - # hardcoded list of secure origins, as well as any additional ones - # configured on this PackageFinder instance. - for secure_origin in self.iter_secure_origins(): - secure_protocol, secure_host, secure_port = secure_origin - if origin_protocol != secure_protocol and secure_protocol != "*": - continue - - try: - addr = ipaddress.ip_address(origin_host or "") - network = ipaddress.ip_network(secure_host) - except ValueError: - # We don't have both a valid address or a valid network, so - # we'll check this origin against hostnames. - if ( - origin_host - and origin_host.lower() != secure_host.lower() - and secure_host != "*" - ): - continue - else: - # We have a valid address and network, so see if the address - # is contained within the network. - if addr not in network: - continue - - # Check to see if the port matches. - if ( - origin_port != secure_port - and secure_port != "*" - and secure_port is not None - ): - continue - - # If we've gotten here, then this origin matches the current - # secure origin and we should return True - return True - - # If we've gotten to this point, then the origin isn't secure and we - # will not accept it as a valid location to search. We will however - # log a warning that we are ignoring it. - logger.warning( - "The repository located at %s is not a trusted or secure host and " - "is being ignored. If this repository is available via HTTPS we " - "recommend you use HTTPS instead, otherwise you may silence " - "this warning and allow it anyway with '--trusted-host %s'.", - origin_host, - origin_host, - ) - - return False - - def request(self, method: str, url: str, *args: Any, **kwargs: Any) -> Response: - # Allow setting a default timeout on a session - kwargs.setdefault("timeout", self.timeout) - # Allow setting a default proxies on a session - kwargs.setdefault("proxies", self.proxies) - - # Dispatch the actual request - return super().request(method, url, *args, **kwargs) diff --git a/spaces/tjburns/ask_marcus_aurelius/.venv/lib/python3.10/site-packages/setuptools/_vendor/tomli/_parser.py b/spaces/tjburns/ask_marcus_aurelius/.venv/lib/python3.10/site-packages/setuptools/_vendor/tomli/_parser.py deleted file mode 100644 index f1bb0aa19a556725aa2ae2b8cea95489c99a9078..0000000000000000000000000000000000000000 --- a/spaces/tjburns/ask_marcus_aurelius/.venv/lib/python3.10/site-packages/setuptools/_vendor/tomli/_parser.py +++ /dev/null @@ -1,691 +0,0 @@ -# SPDX-License-Identifier: MIT -# SPDX-FileCopyrightText: 2021 Taneli Hukkinen -# Licensed to PSF under a Contributor Agreement. - -from __future__ import annotations - -from collections.abc import Iterable -import string -from types import MappingProxyType -from typing import Any, BinaryIO, NamedTuple - -from ._re import ( - RE_DATETIME, - RE_LOCALTIME, - RE_NUMBER, - match_to_datetime, - match_to_localtime, - match_to_number, -) -from ._types import Key, ParseFloat, Pos - -ASCII_CTRL = frozenset(chr(i) for i in range(32)) | frozenset(chr(127)) - -# Neither of these sets include quotation mark or backslash. They are -# currently handled as separate cases in the parser functions. -ILLEGAL_BASIC_STR_CHARS = ASCII_CTRL - frozenset("\t") -ILLEGAL_MULTILINE_BASIC_STR_CHARS = ASCII_CTRL - frozenset("\t\n") - -ILLEGAL_LITERAL_STR_CHARS = ILLEGAL_BASIC_STR_CHARS -ILLEGAL_MULTILINE_LITERAL_STR_CHARS = ILLEGAL_MULTILINE_BASIC_STR_CHARS - -ILLEGAL_COMMENT_CHARS = ILLEGAL_BASIC_STR_CHARS - -TOML_WS = frozenset(" \t") -TOML_WS_AND_NEWLINE = TOML_WS | frozenset("\n") -BARE_KEY_CHARS = frozenset(string.ascii_letters + string.digits + "-_") -KEY_INITIAL_CHARS = BARE_KEY_CHARS | frozenset("\"'") -HEXDIGIT_CHARS = frozenset(string.hexdigits) - -BASIC_STR_ESCAPE_REPLACEMENTS = MappingProxyType( - { - "\\b": "\u0008", # backspace - "\\t": "\u0009", # tab - "\\n": "\u000A", # linefeed - "\\f": "\u000C", # form feed - "\\r": "\u000D", # carriage return - '\\"': "\u0022", # quote - "\\\\": "\u005C", # backslash - } -) - - -class TOMLDecodeError(ValueError): - """An error raised if a document is not valid TOML.""" - - -def load(__fp: BinaryIO, *, parse_float: ParseFloat = float) -> dict[str, Any]: - """Parse TOML from a binary file object.""" - b = __fp.read() - try: - s = b.decode() - except AttributeError: - raise TypeError( - "File must be opened in binary mode, e.g. use `open('foo.toml', 'rb')`" - ) from None - return loads(s, parse_float=parse_float) - - -def loads(__s: str, *, parse_float: ParseFloat = float) -> dict[str, Any]: # noqa: C901 - """Parse TOML from a string.""" - - # The spec allows converting "\r\n" to "\n", even in string - # literals. Let's do so to simplify parsing. - src = __s.replace("\r\n", "\n") - pos = 0 - out = Output(NestedDict(), Flags()) - header: Key = () - parse_float = make_safe_parse_float(parse_float) - - # Parse one statement at a time - # (typically means one line in TOML source) - while True: - # 1. Skip line leading whitespace - pos = skip_chars(src, pos, TOML_WS) - - # 2. Parse rules. Expect one of the following: - # - end of file - # - end of line - # - comment - # - key/value pair - # - append dict to list (and move to its namespace) - # - create dict (and move to its namespace) - # Skip trailing whitespace when applicable. - try: - char = src[pos] - except IndexError: - break - if char == "\n": - pos += 1 - continue - if char in KEY_INITIAL_CHARS: - pos = key_value_rule(src, pos, out, header, parse_float) - pos = skip_chars(src, pos, TOML_WS) - elif char == "[": - try: - second_char: str | None = src[pos + 1] - except IndexError: - second_char = None - out.flags.finalize_pending() - if second_char == "[": - pos, header = create_list_rule(src, pos, out) - else: - pos, header = create_dict_rule(src, pos, out) - pos = skip_chars(src, pos, TOML_WS) - elif char != "#": - raise suffixed_err(src, pos, "Invalid statement") - - # 3. Skip comment - pos = skip_comment(src, pos) - - # 4. Expect end of line or end of file - try: - char = src[pos] - except IndexError: - break - if char != "\n": - raise suffixed_err( - src, pos, "Expected newline or end of document after a statement" - ) - pos += 1 - - return out.data.dict - - -class Flags: - """Flags that map to parsed keys/namespaces.""" - - # Marks an immutable namespace (inline array or inline table). - FROZEN = 0 - # Marks a nest that has been explicitly created and can no longer - # be opened using the "[table]" syntax. - EXPLICIT_NEST = 1 - - def __init__(self) -> None: - self._flags: dict[str, dict] = {} - self._pending_flags: set[tuple[Key, int]] = set() - - def add_pending(self, key: Key, flag: int) -> None: - self._pending_flags.add((key, flag)) - - def finalize_pending(self) -> None: - for key, flag in self._pending_flags: - self.set(key, flag, recursive=False) - self._pending_flags.clear() - - def unset_all(self, key: Key) -> None: - cont = self._flags - for k in key[:-1]: - if k not in cont: - return - cont = cont[k]["nested"] - cont.pop(key[-1], None) - - def set(self, key: Key, flag: int, *, recursive: bool) -> None: # noqa: A003 - cont = self._flags - key_parent, key_stem = key[:-1], key[-1] - for k in key_parent: - if k not in cont: - cont[k] = {"flags": set(), "recursive_flags": set(), "nested": {}} - cont = cont[k]["nested"] - if key_stem not in cont: - cont[key_stem] = {"flags": set(), "recursive_flags": set(), "nested": {}} - cont[key_stem]["recursive_flags" if recursive else "flags"].add(flag) - - def is_(self, key: Key, flag: int) -> bool: - if not key: - return False # document root has no flags - cont = self._flags - for k in key[:-1]: - if k not in cont: - return False - inner_cont = cont[k] - if flag in inner_cont["recursive_flags"]: - return True - cont = inner_cont["nested"] - key_stem = key[-1] - if key_stem in cont: - cont = cont[key_stem] - return flag in cont["flags"] or flag in cont["recursive_flags"] - return False - - -class NestedDict: - def __init__(self) -> None: - # The parsed content of the TOML document - self.dict: dict[str, Any] = {} - - def get_or_create_nest( - self, - key: Key, - *, - access_lists: bool = True, - ) -> dict: - cont: Any = self.dict - for k in key: - if k not in cont: - cont[k] = {} - cont = cont[k] - if access_lists and isinstance(cont, list): - cont = cont[-1] - if not isinstance(cont, dict): - raise KeyError("There is no nest behind this key") - return cont - - def append_nest_to_list(self, key: Key) -> None: - cont = self.get_or_create_nest(key[:-1]) - last_key = key[-1] - if last_key in cont: - list_ = cont[last_key] - if not isinstance(list_, list): - raise KeyError("An object other than list found behind this key") - list_.append({}) - else: - cont[last_key] = [{}] - - -class Output(NamedTuple): - data: NestedDict - flags: Flags - - -def skip_chars(src: str, pos: Pos, chars: Iterable[str]) -> Pos: - try: - while src[pos] in chars: - pos += 1 - except IndexError: - pass - return pos - - -def skip_until( - src: str, - pos: Pos, - expect: str, - *, - error_on: frozenset[str], - error_on_eof: bool, -) -> Pos: - try: - new_pos = src.index(expect, pos) - except ValueError: - new_pos = len(src) - if error_on_eof: - raise suffixed_err(src, new_pos, f"Expected {expect!r}") from None - - if not error_on.isdisjoint(src[pos:new_pos]): - while src[pos] not in error_on: - pos += 1 - raise suffixed_err(src, pos, f"Found invalid character {src[pos]!r}") - return new_pos - - -def skip_comment(src: str, pos: Pos) -> Pos: - try: - char: str | None = src[pos] - except IndexError: - char = None - if char == "#": - return skip_until( - src, pos + 1, "\n", error_on=ILLEGAL_COMMENT_CHARS, error_on_eof=False - ) - return pos - - -def skip_comments_and_array_ws(src: str, pos: Pos) -> Pos: - while True: - pos_before_skip = pos - pos = skip_chars(src, pos, TOML_WS_AND_NEWLINE) - pos = skip_comment(src, pos) - if pos == pos_before_skip: - return pos - - -def create_dict_rule(src: str, pos: Pos, out: Output) -> tuple[Pos, Key]: - pos += 1 # Skip "[" - pos = skip_chars(src, pos, TOML_WS) - pos, key = parse_key(src, pos) - - if out.flags.is_(key, Flags.EXPLICIT_NEST) or out.flags.is_(key, Flags.FROZEN): - raise suffixed_err(src, pos, f"Cannot declare {key} twice") - out.flags.set(key, Flags.EXPLICIT_NEST, recursive=False) - try: - out.data.get_or_create_nest(key) - except KeyError: - raise suffixed_err(src, pos, "Cannot overwrite a value") from None - - if not src.startswith("]", pos): - raise suffixed_err(src, pos, "Expected ']' at the end of a table declaration") - return pos + 1, key - - -def create_list_rule(src: str, pos: Pos, out: Output) -> tuple[Pos, Key]: - pos += 2 # Skip "[[" - pos = skip_chars(src, pos, TOML_WS) - pos, key = parse_key(src, pos) - - if out.flags.is_(key, Flags.FROZEN): - raise suffixed_err(src, pos, f"Cannot mutate immutable namespace {key}") - # Free the namespace now that it points to another empty list item... - out.flags.unset_all(key) - # ...but this key precisely is still prohibited from table declaration - out.flags.set(key, Flags.EXPLICIT_NEST, recursive=False) - try: - out.data.append_nest_to_list(key) - except KeyError: - raise suffixed_err(src, pos, "Cannot overwrite a value") from None - - if not src.startswith("]]", pos): - raise suffixed_err(src, pos, "Expected ']]' at the end of an array declaration") - return pos + 2, key - - -def key_value_rule( - src: str, pos: Pos, out: Output, header: Key, parse_float: ParseFloat -) -> Pos: - pos, key, value = parse_key_value_pair(src, pos, parse_float) - key_parent, key_stem = key[:-1], key[-1] - abs_key_parent = header + key_parent - - relative_path_cont_keys = (header + key[:i] for i in range(1, len(key))) - for cont_key in relative_path_cont_keys: - # Check that dotted key syntax does not redefine an existing table - if out.flags.is_(cont_key, Flags.EXPLICIT_NEST): - raise suffixed_err(src, pos, f"Cannot redefine namespace {cont_key}") - # Containers in the relative path can't be opened with the table syntax or - # dotted key/value syntax in following table sections. - out.flags.add_pending(cont_key, Flags.EXPLICIT_NEST) - - if out.flags.is_(abs_key_parent, Flags.FROZEN): - raise suffixed_err( - src, pos, f"Cannot mutate immutable namespace {abs_key_parent}" - ) - - try: - nest = out.data.get_or_create_nest(abs_key_parent) - except KeyError: - raise suffixed_err(src, pos, "Cannot overwrite a value") from None - if key_stem in nest: - raise suffixed_err(src, pos, "Cannot overwrite a value") - # Mark inline table and array namespaces recursively immutable - if isinstance(value, (dict, list)): - out.flags.set(header + key, Flags.FROZEN, recursive=True) - nest[key_stem] = value - return pos - - -def parse_key_value_pair( - src: str, pos: Pos, parse_float: ParseFloat -) -> tuple[Pos, Key, Any]: - pos, key = parse_key(src, pos) - try: - char: str | None = src[pos] - except IndexError: - char = None - if char != "=": - raise suffixed_err(src, pos, "Expected '=' after a key in a key/value pair") - pos += 1 - pos = skip_chars(src, pos, TOML_WS) - pos, value = parse_value(src, pos, parse_float) - return pos, key, value - - -def parse_key(src: str, pos: Pos) -> tuple[Pos, Key]: - pos, key_part = parse_key_part(src, pos) - key: Key = (key_part,) - pos = skip_chars(src, pos, TOML_WS) - while True: - try: - char: str | None = src[pos] - except IndexError: - char = None - if char != ".": - return pos, key - pos += 1 - pos = skip_chars(src, pos, TOML_WS) - pos, key_part = parse_key_part(src, pos) - key += (key_part,) - pos = skip_chars(src, pos, TOML_WS) - - -def parse_key_part(src: str, pos: Pos) -> tuple[Pos, str]: - try: - char: str | None = src[pos] - except IndexError: - char = None - if char in BARE_KEY_CHARS: - start_pos = pos - pos = skip_chars(src, pos, BARE_KEY_CHARS) - return pos, src[start_pos:pos] - if char == "'": - return parse_literal_str(src, pos) - if char == '"': - return parse_one_line_basic_str(src, pos) - raise suffixed_err(src, pos, "Invalid initial character for a key part") - - -def parse_one_line_basic_str(src: str, pos: Pos) -> tuple[Pos, str]: - pos += 1 - return parse_basic_str(src, pos, multiline=False) - - -def parse_array(src: str, pos: Pos, parse_float: ParseFloat) -> tuple[Pos, list]: - pos += 1 - array: list = [] - - pos = skip_comments_and_array_ws(src, pos) - if src.startswith("]", pos): - return pos + 1, array - while True: - pos, val = parse_value(src, pos, parse_float) - array.append(val) - pos = skip_comments_and_array_ws(src, pos) - - c = src[pos : pos + 1] - if c == "]": - return pos + 1, array - if c != ",": - raise suffixed_err(src, pos, "Unclosed array") - pos += 1 - - pos = skip_comments_and_array_ws(src, pos) - if src.startswith("]", pos): - return pos + 1, array - - -def parse_inline_table(src: str, pos: Pos, parse_float: ParseFloat) -> tuple[Pos, dict]: - pos += 1 - nested_dict = NestedDict() - flags = Flags() - - pos = skip_chars(src, pos, TOML_WS) - if src.startswith("}", pos): - return pos + 1, nested_dict.dict - while True: - pos, key, value = parse_key_value_pair(src, pos, parse_float) - key_parent, key_stem = key[:-1], key[-1] - if flags.is_(key, Flags.FROZEN): - raise suffixed_err(src, pos, f"Cannot mutate immutable namespace {key}") - try: - nest = nested_dict.get_or_create_nest(key_parent, access_lists=False) - except KeyError: - raise suffixed_err(src, pos, "Cannot overwrite a value") from None - if key_stem in nest: - raise suffixed_err(src, pos, f"Duplicate inline table key {key_stem!r}") - nest[key_stem] = value - pos = skip_chars(src, pos, TOML_WS) - c = src[pos : pos + 1] - if c == "}": - return pos + 1, nested_dict.dict - if c != ",": - raise suffixed_err(src, pos, "Unclosed inline table") - if isinstance(value, (dict, list)): - flags.set(key, Flags.FROZEN, recursive=True) - pos += 1 - pos = skip_chars(src, pos, TOML_WS) - - -def parse_basic_str_escape( - src: str, pos: Pos, *, multiline: bool = False -) -> tuple[Pos, str]: - escape_id = src[pos : pos + 2] - pos += 2 - if multiline and escape_id in {"\\ ", "\\\t", "\\\n"}: - # Skip whitespace until next non-whitespace character or end of - # the doc. Error if non-whitespace is found before newline. - if escape_id != "\\\n": - pos = skip_chars(src, pos, TOML_WS) - try: - char = src[pos] - except IndexError: - return pos, "" - if char != "\n": - raise suffixed_err(src, pos, "Unescaped '\\' in a string") - pos += 1 - pos = skip_chars(src, pos, TOML_WS_AND_NEWLINE) - return pos, "" - if escape_id == "\\u": - return parse_hex_char(src, pos, 4) - if escape_id == "\\U": - return parse_hex_char(src, pos, 8) - try: - return pos, BASIC_STR_ESCAPE_REPLACEMENTS[escape_id] - except KeyError: - raise suffixed_err(src, pos, "Unescaped '\\' in a string") from None - - -def parse_basic_str_escape_multiline(src: str, pos: Pos) -> tuple[Pos, str]: - return parse_basic_str_escape(src, pos, multiline=True) - - -def parse_hex_char(src: str, pos: Pos, hex_len: int) -> tuple[Pos, str]: - hex_str = src[pos : pos + hex_len] - if len(hex_str) != hex_len or not HEXDIGIT_CHARS.issuperset(hex_str): - raise suffixed_err(src, pos, "Invalid hex value") - pos += hex_len - hex_int = int(hex_str, 16) - if not is_unicode_scalar_value(hex_int): - raise suffixed_err(src, pos, "Escaped character is not a Unicode scalar value") - return pos, chr(hex_int) - - -def parse_literal_str(src: str, pos: Pos) -> tuple[Pos, str]: - pos += 1 # Skip starting apostrophe - start_pos = pos - pos = skip_until( - src, pos, "'", error_on=ILLEGAL_LITERAL_STR_CHARS, error_on_eof=True - ) - return pos + 1, src[start_pos:pos] # Skip ending apostrophe - - -def parse_multiline_str(src: str, pos: Pos, *, literal: bool) -> tuple[Pos, str]: - pos += 3 - if src.startswith("\n", pos): - pos += 1 - - if literal: - delim = "'" - end_pos = skip_until( - src, - pos, - "'''", - error_on=ILLEGAL_MULTILINE_LITERAL_STR_CHARS, - error_on_eof=True, - ) - result = src[pos:end_pos] - pos = end_pos + 3 - else: - delim = '"' - pos, result = parse_basic_str(src, pos, multiline=True) - - # Add at maximum two extra apostrophes/quotes if the end sequence - # is 4 or 5 chars long instead of just 3. - if not src.startswith(delim, pos): - return pos, result - pos += 1 - if not src.startswith(delim, pos): - return pos, result + delim - pos += 1 - return pos, result + (delim * 2) - - -def parse_basic_str(src: str, pos: Pos, *, multiline: bool) -> tuple[Pos, str]: - if multiline: - error_on = ILLEGAL_MULTILINE_BASIC_STR_CHARS - parse_escapes = parse_basic_str_escape_multiline - else: - error_on = ILLEGAL_BASIC_STR_CHARS - parse_escapes = parse_basic_str_escape - result = "" - start_pos = pos - while True: - try: - char = src[pos] - except IndexError: - raise suffixed_err(src, pos, "Unterminated string") from None - if char == '"': - if not multiline: - return pos + 1, result + src[start_pos:pos] - if src.startswith('"""', pos): - return pos + 3, result + src[start_pos:pos] - pos += 1 - continue - if char == "\\": - result += src[start_pos:pos] - pos, parsed_escape = parse_escapes(src, pos) - result += parsed_escape - start_pos = pos - continue - if char in error_on: - raise suffixed_err(src, pos, f"Illegal character {char!r}") - pos += 1 - - -def parse_value( # noqa: C901 - src: str, pos: Pos, parse_float: ParseFloat -) -> tuple[Pos, Any]: - try: - char: str | None = src[pos] - except IndexError: - char = None - - # IMPORTANT: order conditions based on speed of checking and likelihood - - # Basic strings - if char == '"': - if src.startswith('"""', pos): - return parse_multiline_str(src, pos, literal=False) - return parse_one_line_basic_str(src, pos) - - # Literal strings - if char == "'": - if src.startswith("'''", pos): - return parse_multiline_str(src, pos, literal=True) - return parse_literal_str(src, pos) - - # Booleans - if char == "t": - if src.startswith("true", pos): - return pos + 4, True - if char == "f": - if src.startswith("false", pos): - return pos + 5, False - - # Arrays - if char == "[": - return parse_array(src, pos, parse_float) - - # Inline tables - if char == "{": - return parse_inline_table(src, pos, parse_float) - - # Dates and times - datetime_match = RE_DATETIME.match(src, pos) - if datetime_match: - try: - datetime_obj = match_to_datetime(datetime_match) - except ValueError as e: - raise suffixed_err(src, pos, "Invalid date or datetime") from e - return datetime_match.end(), datetime_obj - localtime_match = RE_LOCALTIME.match(src, pos) - if localtime_match: - return localtime_match.end(), match_to_localtime(localtime_match) - - # Integers and "normal" floats. - # The regex will greedily match any type starting with a decimal - # char, so needs to be located after handling of dates and times. - number_match = RE_NUMBER.match(src, pos) - if number_match: - return number_match.end(), match_to_number(number_match, parse_float) - - # Special floats - first_three = src[pos : pos + 3] - if first_three in {"inf", "nan"}: - return pos + 3, parse_float(first_three) - first_four = src[pos : pos + 4] - if first_four in {"-inf", "+inf", "-nan", "+nan"}: - return pos + 4, parse_float(first_four) - - raise suffixed_err(src, pos, "Invalid value") - - -def suffixed_err(src: str, pos: Pos, msg: str) -> TOMLDecodeError: - """Return a `TOMLDecodeError` where error message is suffixed with - coordinates in source.""" - - def coord_repr(src: str, pos: Pos) -> str: - if pos >= len(src): - return "end of document" - line = src.count("\n", 0, pos) + 1 - if line == 1: - column = pos + 1 - else: - column = pos - src.rindex("\n", 0, pos) - return f"line {line}, column {column}" - - return TOMLDecodeError(f"{msg} (at {coord_repr(src, pos)})") - - -def is_unicode_scalar_value(codepoint: int) -> bool: - return (0 <= codepoint <= 55295) or (57344 <= codepoint <= 1114111) - - -def make_safe_parse_float(parse_float: ParseFloat) -> ParseFloat: - """A decorator to make `parse_float` safe. - - `parse_float` must not return dicts or lists, because these types - would be mixed with parsed TOML tables and arrays, thus confusing - the parser. The returned decorated callable raises `ValueError` - instead of returning illegal types. - """ - # The default `float` callable never returns illegal types. Optimize it. - if parse_float is float: # type: ignore[comparison-overlap] - return float - - def safe_parse_float(float_str: str) -> Any: - float_value = parse_float(float_str) - if isinstance(float_value, (dict, list)): - raise ValueError("parse_float must not return dicts or lists") - return float_value - - return safe_parse_float diff --git a/spaces/tobiascz/SDSdemo/pytorch_grad_cam/layer_cam.py b/spaces/tobiascz/SDSdemo/pytorch_grad_cam/layer_cam.py deleted file mode 100644 index 971443d798658d6c29ff9da54481511ac317a1b0..0000000000000000000000000000000000000000 --- a/spaces/tobiascz/SDSdemo/pytorch_grad_cam/layer_cam.py +++ /dev/null @@ -1,36 +0,0 @@ -import numpy as np -from pytorch_grad_cam.base_cam import BaseCAM -from pytorch_grad_cam.utils.svd_on_activations import get_2d_projection - -# https://ieeexplore.ieee.org/document/9462463 - - -class LayerCAM(BaseCAM): - def __init__( - self, - model, - target_layers, - use_cuda=False, - reshape_transform=None): - super( - LayerCAM, - self).__init__( - model, - target_layers, - use_cuda, - reshape_transform) - - def get_cam_image(self, - input_tensor, - target_layer, - target_category, - activations, - grads, - eigen_smooth): - spatial_weighted_activations = np.maximum(grads, 0) * activations - - if eigen_smooth: - cam = get_2d_projection(spatial_weighted_activations) - else: - cam = spatial_weighted_activations.sum(axis=1) - return cam diff --git a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/configs/gcnet/mask_rcnn_r50_fpn_r4_gcb_c3-c5_1x_coco.py b/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/configs/gcnet/mask_rcnn_r50_fpn_r4_gcb_c3-c5_1x_coco.py deleted file mode 100644 index 5ac908e60c1f964bdd6c3e61933a37c04d487bfb..0000000000000000000000000000000000000000 --- a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/configs/gcnet/mask_rcnn_r50_fpn_r4_gcb_c3-c5_1x_coco.py +++ /dev/null @@ -1,8 +0,0 @@ -_base_ = '../mask_rcnn/mask_rcnn_r50_fpn_1x_coco.py' -model = dict( - backbone=dict(plugins=[ - dict( - cfg=dict(type='ContextBlock', ratio=1. / 4), - stages=(False, True, True, True), - position='after_conv3') - ])) diff --git a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/configs/ssd/ssd512_coco.py b/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/configs/ssd/ssd512_coco.py deleted file mode 100644 index 44d2920f4289c351c27e0d70dc03de0deb064a54..0000000000000000000000000000000000000000 --- a/spaces/tomofi/NDLOCR/src/ndl_layout/mmdetection/configs/ssd/ssd512_coco.py +++ /dev/null @@ -1,71 +0,0 @@ -_base_ = 'ssd300_coco.py' -input_size = 512 -model = dict( - backbone=dict(input_size=input_size), - bbox_head=dict( - in_channels=(512, 1024, 512, 256, 256, 256, 256), - anchor_generator=dict( - type='SSDAnchorGenerator', - scale_major=False, - input_size=input_size, - basesize_ratio_range=(0.1, 0.9), - strides=[8, 16, 32, 64, 128, 256, 512], - ratios=[[2], [2, 3], [2, 3], [2, 3], [2, 3], [2], [2]]))) -# dataset settings -dataset_type = 'CocoDataset' -data_root = 'data/coco/' -img_norm_cfg = dict(mean=[123.675, 116.28, 103.53], std=[1, 1, 1], to_rgb=True) -train_pipeline = [ - dict(type='LoadImageFromFile', to_float32=True), - dict(type='LoadAnnotations', with_bbox=True), - dict( - type='PhotoMetricDistortion', - brightness_delta=32, - contrast_range=(0.5, 1.5), - saturation_range=(0.5, 1.5), - hue_delta=18), - dict( - type='Expand', - mean=img_norm_cfg['mean'], - to_rgb=img_norm_cfg['to_rgb'], - ratio_range=(1, 4)), - dict( - type='MinIoURandomCrop', - min_ious=(0.1, 0.3, 0.5, 0.7, 0.9), - min_crop_size=0.3), - dict(type='Resize', img_scale=(512, 512), keep_ratio=False), - dict(type='Normalize', **img_norm_cfg), - dict(type='RandomFlip', flip_ratio=0.5), - dict(type='DefaultFormatBundle'), - dict(type='Collect', keys=['img', 'gt_bboxes', 'gt_labels']), -] -test_pipeline = [ - dict(type='LoadImageFromFile'), - dict( - type='MultiScaleFlipAug', - img_scale=(512, 512), - flip=False, - transforms=[ - dict(type='Resize', keep_ratio=False), - dict(type='Normalize', **img_norm_cfg), - dict(type='ImageToTensor', keys=['img']), - dict(type='Collect', keys=['img']), - ]) -] -data = dict( - samples_per_gpu=8, - workers_per_gpu=3, - train=dict( - _delete_=True, - type='RepeatDataset', - times=5, - dataset=dict( - type=dataset_type, - ann_file=data_root + 'annotations/instances_train2017.json', - img_prefix=data_root + 'train2017/', - pipeline=train_pipeline)), - val=dict(pipeline=test_pipeline), - test=dict(pipeline=test_pipeline)) -# optimizer -optimizer = dict(type='SGD', lr=2e-3, momentum=0.9, weight_decay=5e-4) -optimizer_config = dict(_delete_=True) diff --git a/spaces/tonyassi/text-to-image/README.md b/spaces/tonyassi/text-to-image/README.md deleted file mode 100644 index 95c895961edace0b93ede42cb7da1c8c484e77f4..0000000000000000000000000000000000000000 --- a/spaces/tonyassi/text-to-image/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Text To Image -emoji: 📷 -colorFrom: yellow -colorTo: pink -sdk: gradio -sdk_version: 3.45.2 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/tornadoslims/instruct-pix2pix/stable_diffusion/ldm/modules/ema.py b/spaces/tornadoslims/instruct-pix2pix/stable_diffusion/ldm/modules/ema.py deleted file mode 100644 index c8c75af43565f6e140287644aaaefa97dd6e67c5..0000000000000000000000000000000000000000 --- a/spaces/tornadoslims/instruct-pix2pix/stable_diffusion/ldm/modules/ema.py +++ /dev/null @@ -1,76 +0,0 @@ -import torch -from torch import nn - - -class LitEma(nn.Module): - def __init__(self, model, decay=0.9999, use_num_upates=True): - super().__init__() - if decay < 0.0 or decay > 1.0: - raise ValueError('Decay must be between 0 and 1') - - self.m_name2s_name = {} - self.register_buffer('decay', torch.tensor(decay, dtype=torch.float32)) - self.register_buffer('num_updates', torch.tensor(0,dtype=torch.int) if use_num_upates - else torch.tensor(-1,dtype=torch.int)) - - for name, p in model.named_parameters(): - if p.requires_grad: - #remove as '.'-character is not allowed in buffers - s_name = name.replace('.','') - self.m_name2s_name.update({name:s_name}) - self.register_buffer(s_name,p.clone().detach().data) - - self.collected_params = [] - - def forward(self,model): - decay = self.decay - - if self.num_updates >= 0: - self.num_updates += 1 - decay = min(self.decay,(1 + self.num_updates) / (10 + self.num_updates)) - - one_minus_decay = 1.0 - decay - - with torch.no_grad(): - m_param = dict(model.named_parameters()) - shadow_params = dict(self.named_buffers()) - - for key in m_param: - if m_param[key].requires_grad: - sname = self.m_name2s_name[key] - shadow_params[sname] = shadow_params[sname].type_as(m_param[key]) - shadow_params[sname].sub_(one_minus_decay * (shadow_params[sname] - m_param[key])) - else: - assert not key in self.m_name2s_name - - def copy_to(self, model): - m_param = dict(model.named_parameters()) - shadow_params = dict(self.named_buffers()) - for key in m_param: - if m_param[key].requires_grad: - m_param[key].data.copy_(shadow_params[self.m_name2s_name[key]].data) - else: - assert not key in self.m_name2s_name - - def store(self, parameters): - """ - Save the current parameters for restoring later. - Args: - parameters: Iterable of `torch.nn.Parameter`; the parameters to be - temporarily stored. - """ - self.collected_params = [param.clone() for param in parameters] - - def restore(self, parameters): - """ - Restore the parameters stored with the `store` method. - Useful to validate the model with EMA parameters without affecting the - original optimization process. Store the parameters before the - `copy_to` method. After validation (or model saving), use this to - restore the former parameters. - Args: - parameters: Iterable of `torch.nn.Parameter`; the parameters to be - updated with the stored parameters. - """ - for c_param, param in zip(self.collected_params, parameters): - param.data.copy_(c_param.data) diff --git a/spaces/trysem/Colorizer_Models/colorizers/siggraph17.py b/spaces/trysem/Colorizer_Models/colorizers/siggraph17.py deleted file mode 100644 index 775a23f25d03f3bf1761e5d2bbf4b400eb2c6047..0000000000000000000000000000000000000000 --- a/spaces/trysem/Colorizer_Models/colorizers/siggraph17.py +++ /dev/null @@ -1,168 +0,0 @@ -import torch -import torch.nn as nn - -from .base_color import * - -class SIGGRAPHGenerator(BaseColor): - def __init__(self, norm_layer=nn.BatchNorm2d, classes=529): - super(SIGGRAPHGenerator, self).__init__() - - # Conv1 - model1=[nn.Conv2d(4, 64, kernel_size=3, stride=1, padding=1, bias=True),] - model1+=[nn.ReLU(True),] - model1+=[nn.Conv2d(64, 64, kernel_size=3, stride=1, padding=1, bias=True),] - model1+=[nn.ReLU(True),] - model1+=[norm_layer(64),] - # add a subsampling operation - - # Conv2 - model2=[nn.Conv2d(64, 128, kernel_size=3, stride=1, padding=1, bias=True),] - model2+=[nn.ReLU(True),] - model2+=[nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=1, bias=True),] - model2+=[nn.ReLU(True),] - model2+=[norm_layer(128),] - # add a subsampling layer operation - - # Conv3 - model3=[nn.Conv2d(128, 256, kernel_size=3, stride=1, padding=1, bias=True),] - model3+=[nn.ReLU(True),] - model3+=[nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1, bias=True),] - model3+=[nn.ReLU(True),] - model3+=[nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1, bias=True),] - model3+=[nn.ReLU(True),] - model3+=[norm_layer(256),] - # add a subsampling layer operation - - # Conv4 - model4=[nn.Conv2d(256, 512, kernel_size=3, stride=1, padding=1, bias=True),] - model4+=[nn.ReLU(True),] - model4+=[nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1, bias=True),] - model4+=[nn.ReLU(True),] - model4+=[nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1, bias=True),] - model4+=[nn.ReLU(True),] - model4+=[norm_layer(512),] - - # Conv5 - model5=[nn.Conv2d(512, 512, kernel_size=3, dilation=2, stride=1, padding=2, bias=True),] - model5+=[nn.ReLU(True),] - model5+=[nn.Conv2d(512, 512, kernel_size=3, dilation=2, stride=1, padding=2, bias=True),] - model5+=[nn.ReLU(True),] - model5+=[nn.Conv2d(512, 512, kernel_size=3, dilation=2, stride=1, padding=2, bias=True),] - model5+=[nn.ReLU(True),] - model5+=[norm_layer(512),] - - # Conv6 - model6=[nn.Conv2d(512, 512, kernel_size=3, dilation=2, stride=1, padding=2, bias=True),] - model6+=[nn.ReLU(True),] - model6+=[nn.Conv2d(512, 512, kernel_size=3, dilation=2, stride=1, padding=2, bias=True),] - model6+=[nn.ReLU(True),] - model6+=[nn.Conv2d(512, 512, kernel_size=3, dilation=2, stride=1, padding=2, bias=True),] - model6+=[nn.ReLU(True),] - model6+=[norm_layer(512),] - - # Conv7 - model7=[nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1, bias=True),] - model7+=[nn.ReLU(True),] - model7+=[nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1, bias=True),] - model7+=[nn.ReLU(True),] - model7+=[nn.Conv2d(512, 512, kernel_size=3, stride=1, padding=1, bias=True),] - model7+=[nn.ReLU(True),] - model7+=[norm_layer(512),] - - # Conv7 - model8up=[nn.ConvTranspose2d(512, 256, kernel_size=4, stride=2, padding=1, bias=True)] - model3short8=[nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1, bias=True),] - - model8=[nn.ReLU(True),] - model8+=[nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1, bias=True),] - model8+=[nn.ReLU(True),] - model8+=[nn.Conv2d(256, 256, kernel_size=3, stride=1, padding=1, bias=True),] - model8+=[nn.ReLU(True),] - model8+=[norm_layer(256),] - - # Conv9 - model9up=[nn.ConvTranspose2d(256, 128, kernel_size=4, stride=2, padding=1, bias=True),] - model2short9=[nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=1, bias=True),] - # add the two feature maps above - - model9=[nn.ReLU(True),] - model9+=[nn.Conv2d(128, 128, kernel_size=3, stride=1, padding=1, bias=True),] - model9+=[nn.ReLU(True),] - model9+=[norm_layer(128),] - - # Conv10 - model10up=[nn.ConvTranspose2d(128, 128, kernel_size=4, stride=2, padding=1, bias=True),] - model1short10=[nn.Conv2d(64, 128, kernel_size=3, stride=1, padding=1, bias=True),] - # add the two feature maps above - - model10=[nn.ReLU(True),] - model10+=[nn.Conv2d(128, 128, kernel_size=3, dilation=1, stride=1, padding=1, bias=True),] - model10+=[nn.LeakyReLU(negative_slope=.2),] - - # classification output - model_class=[nn.Conv2d(256, classes, kernel_size=1, padding=0, dilation=1, stride=1, bias=True),] - - # regression output - model_out=[nn.Conv2d(128, 2, kernel_size=1, padding=0, dilation=1, stride=1, bias=True),] - model_out+=[nn.Tanh()] - - self.model1 = nn.Sequential(*model1) - self.model2 = nn.Sequential(*model2) - self.model3 = nn.Sequential(*model3) - self.model4 = nn.Sequential(*model4) - self.model5 = nn.Sequential(*model5) - self.model6 = nn.Sequential(*model6) - self.model7 = nn.Sequential(*model7) - self.model8up = nn.Sequential(*model8up) - self.model8 = nn.Sequential(*model8) - self.model9up = nn.Sequential(*model9up) - self.model9 = nn.Sequential(*model9) - self.model10up = nn.Sequential(*model10up) - self.model10 = nn.Sequential(*model10) - self.model3short8 = nn.Sequential(*model3short8) - self.model2short9 = nn.Sequential(*model2short9) - self.model1short10 = nn.Sequential(*model1short10) - - self.model_class = nn.Sequential(*model_class) - self.model_out = nn.Sequential(*model_out) - - self.upsample4 = nn.Sequential(*[nn.Upsample(scale_factor=4, mode='bilinear'),]) - self.softmax = nn.Sequential(*[nn.Softmax(dim=1),]) - - def forward(self, input_A, input_B=None, mask_B=None): - if(input_B is None): - input_B = torch.cat((input_A*0, input_A*0), dim=1) - if(mask_B is None): - mask_B = input_A*0 - - conv1_2 = self.model1(torch.cat((self.normalize_l(input_A),self.normalize_ab(input_B),mask_B),dim=1)) - conv2_2 = self.model2(conv1_2[:,:,::2,::2]) - conv3_3 = self.model3(conv2_2[:,:,::2,::2]) - conv4_3 = self.model4(conv3_3[:,:,::2,::2]) - conv5_3 = self.model5(conv4_3) - conv6_3 = self.model6(conv5_3) - conv7_3 = self.model7(conv6_3) - - conv8_up = self.model8up(conv7_3) + self.model3short8(conv3_3) - conv8_3 = self.model8(conv8_up) - conv9_up = self.model9up(conv8_3) + self.model2short9(conv2_2) - conv9_3 = self.model9(conv9_up) - conv10_up = self.model10up(conv9_3) + self.model1short10(conv1_2) - conv10_2 = self.model10(conv10_up) - out_reg = self.model_out(conv10_2) - - conv9_up = self.model9up(conv8_3) + self.model2short9(conv2_2) - conv9_3 = self.model9(conv9_up) - conv10_up = self.model10up(conv9_3) + self.model1short10(conv1_2) - conv10_2 = self.model10(conv10_up) - out_reg = self.model_out(conv10_2) - - return self.unnormalize_ab(out_reg) - -def siggraph17(pretrained=True): - model = SIGGRAPHGenerator() - if(pretrained): - import torch.utils.model_zoo as model_zoo - model.load_state_dict(model_zoo.load_url('https://colorizers.s3.us-east-2.amazonaws.com/siggraph17-df00044c.pth',map_location='cpu',check_hash=True)) - return model - diff --git a/spaces/trysem/image-matting-app/ppmatting/models/backbone/resnet_vd.py b/spaces/trysem/image-matting-app/ppmatting/models/backbone/resnet_vd.py deleted file mode 100644 index 0fdd9a57664ad80ee59846060cd7f768f757feae..0000000000000000000000000000000000000000 --- a/spaces/trysem/image-matting-app/ppmatting/models/backbone/resnet_vd.py +++ /dev/null @@ -1,368 +0,0 @@ -# copyright (c) 2021 PaddlePaddle Authors. All Rights Reserve. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. - -import paddle -import paddle.nn as nn -import paddle.nn.functional as F - -from paddleseg.cvlibs import manager -from paddleseg.models import layers -from paddleseg.utils import utils - -__all__ = [ - "ResNet18_vd", "ResNet34_vd", "ResNet50_vd", "ResNet101_vd", "ResNet152_vd" -] - - -class ConvBNLayer(nn.Layer): - def __init__( - self, - in_channels, - out_channels, - kernel_size, - stride=1, - dilation=1, - groups=1, - is_vd_mode=False, - act=None, ): - super(ConvBNLayer, self).__init__() - - self.is_vd_mode = is_vd_mode - self._pool2d_avg = nn.AvgPool2D( - kernel_size=2, stride=2, padding=0, ceil_mode=True) - self._conv = nn.Conv2D( - in_channels=in_channels, - out_channels=out_channels, - kernel_size=kernel_size, - stride=stride, - padding=(kernel_size - 1) // 2 if dilation == 1 else 0, - dilation=dilation, - groups=groups, - bias_attr=False) - - self._batch_norm = layers.SyncBatchNorm(out_channels) - self._act_op = layers.Activation(act=act) - - def forward(self, inputs): - if self.is_vd_mode: - inputs = self._pool2d_avg(inputs) - y = self._conv(inputs) - y = self._batch_norm(y) - y = self._act_op(y) - - return y - - -class BottleneckBlock(nn.Layer): - def __init__(self, - in_channels, - out_channels, - stride, - shortcut=True, - if_first=False, - dilation=1): - super(BottleneckBlock, self).__init__() - - self.conv0 = ConvBNLayer( - in_channels=in_channels, - out_channels=out_channels, - kernel_size=1, - act='relu') - - self.dilation = dilation - - self.conv1 = ConvBNLayer( - in_channels=out_channels, - out_channels=out_channels, - kernel_size=3, - stride=stride, - act='relu', - dilation=dilation) - self.conv2 = ConvBNLayer( - in_channels=out_channels, - out_channels=out_channels * 4, - kernel_size=1, - act=None) - - if not shortcut: - self.short = ConvBNLayer( - in_channels=in_channels, - out_channels=out_channels * 4, - kernel_size=1, - stride=1, - is_vd_mode=False if if_first or stride == 1 else True) - - self.shortcut = shortcut - - def forward(self, inputs): - y = self.conv0(inputs) - - #################################################################### - # If given dilation rate > 1, using corresponding padding. - # The performance drops down without the follow padding. - if self.dilation > 1: - padding = self.dilation - y = F.pad(y, [padding, padding, padding, padding]) - ##################################################################### - - conv1 = self.conv1(y) - conv2 = self.conv2(conv1) - - if self.shortcut: - short = inputs - else: - short = self.short(inputs) - - y = paddle.add(x=short, y=conv2) - y = F.relu(y) - return y - - -class BasicBlock(nn.Layer): - def __init__(self, - in_channels, - out_channels, - stride, - shortcut=True, - if_first=False): - super(BasicBlock, self).__init__() - self.stride = stride - self.conv0 = ConvBNLayer( - in_channels=in_channels, - out_channels=out_channels, - kernel_size=3, - stride=stride, - act='relu') - self.conv1 = ConvBNLayer( - in_channels=out_channels, - out_channels=out_channels, - kernel_size=3, - act=None) - - if not shortcut: - self.short = ConvBNLayer( - in_channels=in_channels, - out_channels=out_channels, - kernel_size=1, - stride=1, - is_vd_mode=False if if_first or stride == 1 else True) - - self.shortcut = shortcut - - def forward(self, inputs): - y = self.conv0(inputs) - conv1 = self.conv1(y) - - if self.shortcut: - short = inputs - else: - short = self.short(inputs) - y = paddle.add(x=short, y=conv1) - y = F.relu(y) - - return y - - -class ResNet_vd(nn.Layer): - """ - The ResNet_vd implementation based on PaddlePaddle. - - The original article refers to Jingdong - Tong He, et, al. "Bag of Tricks for Image Classification with Convolutional Neural Networks" - (https://arxiv.org/pdf/1812.01187.pdf). - - Args: - layers (int, optional): The layers of ResNet_vd. The supported layers are (18, 34, 50, 101, 152, 200). Default: 50. - output_stride (int, optional): The stride of output features compared to input images. It is 8 or 16. Default: 8. - multi_grid (tuple|list, optional): The grid of stage4. Defult: (1, 1, 1). - pretrained (str, optional): The path of pretrained model. - - """ - - def __init__(self, - input_channels=3, - layers=50, - output_stride=32, - multi_grid=(1, 1, 1), - pretrained=None): - super(ResNet_vd, self).__init__() - - self.conv1_logit = None # for gscnn shape stream - self.layers = layers - supported_layers = [18, 34, 50, 101, 152, 200] - assert layers in supported_layers, \ - "supported layers are {} but input layer is {}".format( - supported_layers, layers) - - if layers == 18: - depth = [2, 2, 2, 2] - elif layers == 34 or layers == 50: - depth = [3, 4, 6, 3] - elif layers == 101: - depth = [3, 4, 23, 3] - elif layers == 152: - depth = [3, 8, 36, 3] - elif layers == 200: - depth = [3, 12, 48, 3] - num_channels = [64, 256, 512, - 1024] if layers >= 50 else [64, 64, 128, 256] - num_filters = [64, 128, 256, 512] - - # for channels of four returned stages - self.feat_channels = [c * 4 for c in num_filters - ] if layers >= 50 else num_filters - self.feat_channels = [64] + self.feat_channels - - dilation_dict = None - if output_stride == 8: - dilation_dict = {2: 2, 3: 4} - elif output_stride == 16: - dilation_dict = {3: 2} - - self.conv1_1 = ConvBNLayer( - in_channels=input_channels, - out_channels=32, - kernel_size=3, - stride=2, - act='relu') - self.conv1_2 = ConvBNLayer( - in_channels=32, - out_channels=32, - kernel_size=3, - stride=1, - act='relu') - self.conv1_3 = ConvBNLayer( - in_channels=32, - out_channels=64, - kernel_size=3, - stride=1, - act='relu') - self.pool2d_max = nn.MaxPool2D(kernel_size=3, stride=2, padding=1) - - # self.block_list = [] - self.stage_list = [] - if layers >= 50: - for block in range(len(depth)): - shortcut = False - block_list = [] - for i in range(depth[block]): - if layers in [101, 152] and block == 2: - if i == 0: - conv_name = "res" + str(block + 2) + "a" - else: - conv_name = "res" + str(block + 2) + "b" + str(i) - else: - conv_name = "res" + str(block + 2) + chr(97 + i) - - ############################################################################### - # Add dilation rate for some segmentation tasks, if dilation_dict is not None. - dilation_rate = dilation_dict[ - block] if dilation_dict and block in dilation_dict else 1 - - # Actually block here is 'stage', and i is 'block' in 'stage' - # At the stage 4, expand the the dilation_rate if given multi_grid - if block == 3: - dilation_rate = dilation_rate * multi_grid[i] - ############################################################################### - - bottleneck_block = self.add_sublayer( - 'bb_%d_%d' % (block, i), - BottleneckBlock( - in_channels=num_channels[block] - if i == 0 else num_filters[block] * 4, - out_channels=num_filters[block], - stride=2 if i == 0 and block != 0 and - dilation_rate == 1 else 1, - shortcut=shortcut, - if_first=block == i == 0, - dilation=dilation_rate)) - - block_list.append(bottleneck_block) - shortcut = True - self.stage_list.append(block_list) - else: - for block in range(len(depth)): - shortcut = False - block_list = [] - for i in range(depth[block]): - conv_name = "res" + str(block + 2) + chr(97 + i) - basic_block = self.add_sublayer( - 'bb_%d_%d' % (block, i), - BasicBlock( - in_channels=num_channels[block] - if i == 0 else num_filters[block], - out_channels=num_filters[block], - stride=2 if i == 0 and block != 0 else 1, - shortcut=shortcut, - if_first=block == i == 0)) - block_list.append(basic_block) - shortcut = True - self.stage_list.append(block_list) - - self.pretrained = pretrained - self.init_weight() - - def forward(self, inputs): - feat_list = [] - y = self.conv1_1(inputs) - y = self.conv1_2(y) - y = self.conv1_3(y) - feat_list.append(y) - - y = self.pool2d_max(y) - - # A feature list saves the output feature map of each stage. - for stage in self.stage_list: - for block in stage: - y = block(y) - feat_list.append(y) - - return feat_list - - def init_weight(self): - utils.load_pretrained_model(self, self.pretrained) - - -@manager.BACKBONES.add_component -def ResNet18_vd(**args): - model = ResNet_vd(layers=18, **args) - return model - - -@manager.BACKBONES.add_component -def ResNet34_vd(**args): - model = ResNet_vd(layers=34, **args) - return model - - -@manager.BACKBONES.add_component -def ResNet50_vd(**args): - model = ResNet_vd(layers=50, **args) - return model - - -@manager.BACKBONES.add_component -def ResNet101_vd(**args): - model = ResNet_vd(layers=101, **args) - return model - - -def ResNet152_vd(**args): - model = ResNet_vd(layers=152, **args) - return model - - -def ResNet200_vd(**args): - model = ResNet_vd(layers=200, **args) - return model diff --git a/spaces/tsi-org/LLaVA/llava/eval/qa_baseline_gpt35.py b/spaces/tsi-org/LLaVA/llava/eval/qa_baseline_gpt35.py deleted file mode 100644 index babab6e12b4bb8cfa74a7edfa5e56cd1b3e2bf6c..0000000000000000000000000000000000000000 --- a/spaces/tsi-org/LLaVA/llava/eval/qa_baseline_gpt35.py +++ /dev/null @@ -1,74 +0,0 @@ -"""Generate answers with GPT-3.5""" -# Note: you need to be using OpenAI Python v0.27.0 for the code below to work -import argparse -import json -import os -import time -import concurrent.futures - -import openai -import tqdm -import shortuuid - -MODEL = 'gpt-3.5-turbo' -MODEL_ID = 'gpt-3.5-turbo:20230327' - -def get_answer(question_id: int, question: str, max_tokens: int): - ans = { - 'answer_id': shortuuid.uuid(), - 'question_id': question_id, - 'model_id': MODEL_ID, - } - for _ in range(3): - try: - response = openai.ChatCompletion.create( - model=MODEL, - messages=[{ - 'role': 'system', - 'content': 'You are a helpful assistant.' - }, { - 'role': 'user', - 'content': question, - }], - max_tokens=max_tokens, - ) - ans['text'] = response['choices'][0]['message']['content'] - return ans - except Exception as e: - print('[ERROR]', e) - ans['text'] = '#ERROR#' - time.sleep(1) - return ans - - -if __name__ == '__main__': - parser = argparse.ArgumentParser(description='ChatGPT answer generation.') - parser.add_argument('-q', '--question') - parser.add_argument('-o', '--output') - parser.add_argument('--max-tokens', type=int, default=1024, help='maximum number of tokens produced in the output') - args = parser.parse_args() - - questions_dict = {} - with open(os.path.expanduser(args.question)) as f: - for line in f: - if not line: - continue - q = json.loads(line) - questions_dict[q['question_id']] = q['text'] - - answers = [] - - with concurrent.futures.ThreadPoolExecutor(max_workers=32) as executor: - futures = [] - for qid, question in questions_dict.items(): - future = executor.submit(get_answer, qid, question, args.max_tokens) - futures.append(future) - - for future in tqdm.tqdm(concurrent.futures.as_completed(futures), total=len(futures)): - answers.append(future.result()) - - answers.sort(key=lambda x: x['question_id']) - - with open(os.path.expanduser(args.output), 'w') as f: - table = [json.dumps(ans) for ans in answers] - f.write('\n'.join(table)) diff --git a/spaces/ttt246/brain/Brain/tests/functional/test_api.py b/spaces/ttt246/brain/Brain/tests/functional/test_api.py deleted file mode 100644 index 27aa1a449af13ec6b6f101f795dfb49f8e328721..0000000000000000000000000000000000000000 --- a/spaces/ttt246/brain/Brain/tests/functional/test_api.py +++ /dev/null @@ -1,298 +0,0 @@ -from fastapi.testclient import TestClient -import pytest - -from app import app - -client = TestClient(app) - - -@pytest.mark.parametrize( - "body", - [ - ( - { - "confs": { - "openai_key": "", - "pinecone_key": "", - "pinecone_env": "", - "firebase_key": "", - "settings": {"temperature": 0.6}, - "token": "eSyP3i7ITZuq8hWn2qutTl:APA91bH1FtWkaTSJwuX4WKWAl3Q-ZFyrOw4UtMP4IfwuvNrHOThH7EvEGIhtguilLRyQNlLiXatEN0xntHAc8bbKobSGjge3wxIHlspbIWY_855CzONqaVdl3y3zOmgKZNnuhYi4gwbh", - "uuid": "c40a09075d11940f", - }, - "message": "Please search an image that shows Brown Teddy Bear", - } - ) - ], -) -def test_send_notificatoin(body): - response = client.post("/sendNotification", json=body) - assert response.status_code == 200 - - -@pytest.mark.parametrize( - "body", - [ - ( - { - "image_name": "0ddffe51-3763-48d9-ab74-2086de529217", - "confs": { - "token": "eSyP3i7ITZuq8hWn2qutTl:APA91bH1FtWkaTSJwuX4WKWAl3Q-ZFyrOw4UtMP4IfwuvNrHOThH7EvEGIhtguilLRyQNlLiXatEN0xntHAc8bbKobSGjge3wxIHlspbIWY_855CzONqaVdl3y3zOmgKZNnuhYi4gwbh", - "uuid": "test-uuid", - "openai_key": "", - "pinecone_key": "", - "pinecone_env": "", - "firebase_key": "", - "settings": {"temperature": 0.6}, - }, - "status": "updated", - } - ) - ], -) -def test_upload_image(body): - response = client.post("/uploadImage", json=body) - assert response.status_code == 200 - - -@pytest.mark.parametrize( - "body", - [ - ( - { - "image_name": "0ddffe51-3763-48d9-ab74-2086de529217", - "message": "This is the text about the image", - "confs": { - "token": "eSyP3i7ITZuq8hWn2qutTl:APA91bH1FtWkaTSJwuX4WKWAl3Q-ZFyrOw4UtMP4IfwuvNrHOThH7EvEGIhtguilLRyQNlLiXatEN0xntHAc8bbKobSGjge3wxIHlspbIWY_855CzONqaVdl3y3zOmgKZNnuhYi4gwbh", - "uuid": "test-uuid", - "openai_key": "", - "pinecone_key": "", - "pinecone_env": "", - "firebase_key": "", - "settings": {"temperature": 0.6}, - }, - } - ) - ], -) -def test_image_relatedness(body): - response = client.post("/image_relatedness", json=body) - assert response.status_code == 200 - - -@pytest.mark.parametrize( - "body", - [ - ( - { - "prompt": {"image_name": "test_image", "message": "test_message"}, - "completion": {"image_name": "test_image", "message": "test_message"}, - "rating": 1, - "confs": { - "token": "eSyP3i7ITZuq8hWn2qutTl:APA91bH1FtWkaTSJwuX4WKWAl3Q-ZFyrOw4UtMP4IfwuvNrHOThH7EvEGIhtguilLRyQNlLiXatEN0xntHAc8bbKobSGjge3wxIHlspbIWY_855CzONqaVdl3y3zOmgKZNnuhYi4gwbh", - "uuid": "test-uuid", - "openai_key": "", - "pinecone_key": "", - "pinecone_env": "", - "firebase_key": "", - "settings": {"temperature": 0.6}, - }, - } - ) - ], -) -def test_feedback(body): - response = client.post("/feedback", json=body) - assert response.status_code == 200 - - -@pytest.mark.parametrize( - "body", - [ - ( - { - "confs": { - "openai_key": "", - "pinecone_key": "", - "pinecone_env": "", - "firebase_key": "", - "settings": {"temperature": 0.6}, - } - } - ) - ], -) -def test_feedback(body): - response = client.post("/feedback/test/1", json=body) - assert response.status_code == 200 - - -@pytest.mark.parametrize( - "body", - [ - ( - { - "confs": { - "openai_key": "", - "pinecone_key": "", - "pinecone_env": "", - "firebase_key": "", - "settings": {"temperature": 0.6}, - } - } - ) - ], -) -def test_commands(body): - response = client.post("/commands", json=body) - assert response.status_code == 200 - - -@pytest.mark.parametrize( - "body", - [ - ( - { - "history": [ - {"role": "system", "content": "You are a helpful assistant."}, - {"role": "user", "content": "Who won the world series in 2020?"}, - { - "role": "assistant", - "content": "The Los Angeles Dodgers won the World Series in 2020.", - }, - ], - "user_input": "Where was it played?", - "confs": { - "token": "test_token", - "uuid": "test_uuid", - "openai_key": "", - "pinecone_key": "", - "pinecone_env": "", - "firebase_key": "", - "settings": {"temperature": 0.6}, - }, - } - ) - ], -) -def test_chat_rising(body): - response = client.post("/chat_rising", json=body) - assert response.status_code == 200 - - -@pytest.mark.parametrize( - "body", - [ - ( - { - "contacts": [ - { - "contactId": "1", - "displayName": "Thomas", - "phoneNumbers": ["217 374 8105"], - "status": "updated", - } - ], - "confs": { - "token": "test_token", - "uuid": "test-uuid", - "openai_key": "", - "pinecone_key": "", - "pinecone_env": "", - "firebase_key": "", - "settings": {"temperature": 0.6}, - }, - } - ) - ], -) -def test_train_contacts(body): - response = client.post("/train/contacts", json=body) - assert response.status_code == 200 - - -@pytest.mark.parametrize( - "body", - [ - ( - { - "data": { - "reference_link": "test link", - }, - "confs": { - "token": "test_token", - "uuid": "test-uuid", - "openai_key": "", - "pinecone_key": "", - "pinecone_env": "", - "firebase_key": "", - "settings": {"temperature": 0.6}, - }, - } - ) - ], -) -def test_delete_data(body): - response = client.post("/auto_task/delete", json=body) - assert response.status_code == 200 - - -@pytest.mark.parametrize( - "body", - [ - ( - { - "data": { - "sender": "test@gmail.com", - "pwd": "password", - "imap_folder": "inbox", - }, - "confs": { - "token": "test_token", - "uuid": "test-uuid", - "openai_key": "", - "pinecone_key": "", - "pinecone_env": "", - "firebase_key": "", - "settings": {"temperature": 0.6}, - }, - } - ) - ], -) -def test_read_emails(body): - response = client.post("/email/read_emails", json=body) - assert response.status_code == 200 - - -@pytest.mark.parametrize( - "body", - [ - ( - { - "data": { - "sender": "testsender@gmail.com", - "pwd": "use app password of your google account", - "to": "testto@gmail.com", - "subject": "Test Send", - "body": "Hi, This is test email.", - "to_send": True, - "filename": "test.txt", - "file_content": "SGVsbG8sIFdvcmxkIQ==", - }, - "confs": { - "token": "test_token", - "uuid": "test-uuid", - "openai_key": "", - "pinecone_key": "", - "pinecone_env": "", - "firebase_key": "", - "settings": {"temperature": 0.6}, - }, - } - ) - ], -) -def test_send_email(body): - response = client.post("/email/send_email", json=body) - assert response.status_code == 200 diff --git a/spaces/twdac/BuChengFangYuan-ChineseJapaneseTranslation/app/my_py_lib/keypoint_eval_tool.py b/spaces/twdac/BuChengFangYuan-ChineseJapaneseTranslation/app/my_py_lib/keypoint_eval_tool.py deleted file mode 100644 index a41ce0e0aea8bb8295b7b13ea2ed790fc36f14ff..0000000000000000000000000000000000000000 --- a/spaces/twdac/BuChengFangYuan-ChineseJapaneseTranslation/app/my_py_lib/keypoint_eval_tool.py +++ /dev/null @@ -1,167 +0,0 @@ -''' -关键点评分工具 -''' - -import numpy as np -from .score_tool import calc_score_f05_f1_f2_prec_recall -from .point_tool import get_shortest_link_pair - - -def calc_keypoint_score(pred_centers, pred_cls, label_centers, label_cls, cls_list, match_distance_thresh_list=(5, 7, 9, 11), use_single_pair=False): - ''' - 通用关键点评估 - 将会返回一个分数字典 - 当预测与标签距离小于评估距离时,将会认定为真阳性 - 结构为 - 类别-评估距离-X - X: - found_pred 真阳性,预测正确的数量 - fakefound_pred 假阳性,预测失败的数量 - found_label 真阳性,标签被正确匹配到的数量 - nofound_label 假阴性,没有任何成功匹配的标签数量 - pred_repeat 当use_single_pair为False时,一个预测可以同时匹配多个标签,该度量将会统计匹配数量大于1的预测的数量 - label_repeat 当use_single_pair为False时,一个标签可以同时匹配多个预测,该度量将会统计匹配数量大于1的标签的数量 - - :param pred_centers: 预测的中心点 - :param pred_cls: 预测的类别 - :param label_centers: 标签的中心点 - :param label_cls: 标签的类别 - :param cls_list: 要评估的类别列表 - :param match_distance_thresh_list: 多个评估阈值 - :param use_single_pair: 若为真,则使用一个预测点只匹配一个标签。如果假,每个预测点都可以匹配多个标签 - :return: - ''' - score_table = {} - - pred_centers = np.asarray(pred_centers, np.float32) - label_centers = np.asarray(label_centers, np.float32) - - if len(pred_centers) == 0: - pred_centers = np.reshape(pred_centers, [-1, 2]) - if len(label_centers) == 0: - label_centers = np.reshape(label_centers, [-1, 2]) - - pred_cls = np.int32(pred_cls) - label_cls = np.int32(label_cls) - - assert pred_centers.ndim == 2 and pred_centers.shape[1] == 2 - assert label_centers.ndim == 2 and label_centers.shape[1] == 2 - assert pred_cls.ndim == 1 - assert label_cls.ndim == 1 - assert len(pred_centers) == len(pred_cls) - assert len(label_centers) == len(label_cls) - - if len(label_centers) == 0 or len(pred_centers) == 0: - for cls in cls_list: - score_table[cls] = {} - for dt in match_distance_thresh_list: - score_table[cls][dt] = {} - score_table[cls][dt]['found_pred'] = 0 - score_table[cls][dt]['fakefound_pred'] = int(np.sum(pred_cls==cls)) - score_table[cls][dt]['found_label'] = 0 - score_table[cls][dt]['nofound_label'] = int(np.sum(label_cls==cls)) - score_table[cls][dt]['pred_repeat'] = 0 - score_table[cls][dt]['label_repeat'] = 0 - score_table[cls][dt]['f05'] = 0. - score_table[cls][dt]['f1'] = 0. - score_table[cls][dt]['f2'] = 0. - score_table[cls][dt]['prec'] = 0. - score_table[cls][dt]['recall'] = 0. - return score_table - - for cls in cls_list: - score_table[cls] = {} - pred_selected_bools = np.array(pred_cls, np.int32) == cls - label_selected_bools = np.array(label_cls, np.int32) == cls - selected_pred_centers = pred_centers[pred_selected_bools] - selected_label_centers = label_centers[label_selected_bools] - - for dt in match_distance_thresh_list: - score_table[cls][dt] = {} - - label_found_count = np.zeros(len(selected_label_centers), np.int32) - pred_found_count = np.zeros(len(selected_pred_centers), np.int32) - - if not use_single_pair: - for pi, pred_center in enumerate(selected_pred_centers): - if len(selected_label_centers) != 0: - dists = np.linalg.norm(pred_center[None,] - selected_label_centers, axis=1) - close_bools = dists <= dt - label_found_count[close_bools] += 1 - pred_found_count[pi] += np.array(close_bools, np.int32).sum() - else: - pred_pt_ids, label_pt_ids, _ = get_shortest_link_pair(selected_pred_centers, selected_label_centers, dt) - for i in pred_pt_ids: - pred_found_count[i] += 1 - for i in label_pt_ids: - label_found_count[i] += 1 - - found_pred = (pred_found_count > 0).sum() - fakefound_pred = (pred_found_count == 0).sum() - - found_label = (label_found_count > 0).sum() - nofound_label = (label_found_count == 0).sum() - - pred_repeat = (pred_found_count > 1).sum() - label_repeat = (label_found_count > 1).sum() - - f05, f1, f2, prec, recall = calc_score_f05_f1_f2_prec_recall(found_label, nofound_label, found_pred, fakefound_pred) - - score_table[cls][dt]['found_pred'] = int(found_pred) - score_table[cls][dt]['fakefound_pred'] = int(fakefound_pred) - score_table[cls][dt]['found_label'] = int(found_label) - score_table[cls][dt]['nofound_label'] = int(nofound_label) - score_table[cls][dt]['pred_repeat'] = int(pred_repeat) - score_table[cls][dt]['label_repeat'] = int(label_repeat) - score_table[cls][dt]['f05'] = float(f05) - score_table[cls][dt]['f1'] = float(f1) - score_table[cls][dt]['f2'] = float(f2) - score_table[cls][dt]['prec'] = float(prec) - score_table[cls][dt]['recall'] = float(recall) - - return score_table - - -def summary_keypoint_score(scores, cls_list, match_distance_thresh_list): - ''' - 对多个分数表进行合算,得到统计分数表 - 其中 found_pred, fakefound_pred, found_label, nofound_label, pred_repeat, label_repeat 将会被累加 - 其中 f1, prec, recall 将会被求平均 - :param scores: 多个分数表 - :param cls_list: 要检查的分类 - :param match_distance_thresh_list: 多个匹配距离 - :return: - ''' - score_table = {} - for cls in cls_list: - score_table[cls] = {} - for dt in match_distance_thresh_list: - score_table[cls][dt] = {} - score_table[cls][dt]['found_pred'] = 0 - score_table[cls][dt]['fakefound_pred'] = 0 - score_table[cls][dt]['found_label'] = 0 - score_table[cls][dt]['nofound_label'] = 0 - score_table[cls][dt]['pred_repeat'] = 0 - score_table[cls][dt]['label_repeat'] = 0 - score_table[cls][dt]['f05'] = 0. - score_table[cls][dt]['f1'] = 0. - score_table[cls][dt]['f2'] = 0. - score_table[cls][dt]['prec'] = 0. - score_table[cls][dt]['recall'] = 0. - - for score in scores: - for cls in cls_list: - for dt in match_distance_thresh_list: - score_table[cls][dt]['found_pred'] += score[cls][dt]['found_pred'] - score_table[cls][dt]['fakefound_pred'] += score[cls][dt]['fakefound_pred'] - score_table[cls][dt]['found_label'] += score[cls][dt]['found_label'] - score_table[cls][dt]['nofound_label'] += score[cls][dt]['nofound_label'] - score_table[cls][dt]['pred_repeat'] += score[cls][dt]['pred_repeat'] - score_table[cls][dt]['label_repeat'] += score[cls][dt]['label_repeat'] - score_table[cls][dt]['f05'] += score[cls][dt]['f05'] / len(scores) - score_table[cls][dt]['f1'] += score[cls][dt]['f1'] / len(scores) - score_table[cls][dt]['f2'] += score[cls][dt]['f2'] / len(scores) - score_table[cls][dt]['prec'] += score[cls][dt]['prec'] / len(scores) - score_table[cls][dt]['recall'] += score[cls][dt]['recall'] / len(scores) - - return score_table diff --git a/spaces/ucalyptus/PTI/training/projectors/__init__.py b/spaces/ucalyptus/PTI/training/projectors/__init__.py deleted file mode 100644 index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000 diff --git a/spaces/ullasmrnva/LawBerta/app.py b/spaces/ullasmrnva/LawBerta/app.py deleted file mode 100644 index 94a1c078abf1ed00de822c16afb23b8f91d6db2e..0000000000000000000000000000000000000000 --- a/spaces/ullasmrnva/LawBerta/app.py +++ /dev/null @@ -1,1028 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -import tensorflow as tf -import gradio as gr -import pandas as pd -import re -import ast -import spacy -import nltk -nltk.download('punkt') -from nltk.tokenize import sent_tokenize -from transformers import AutoTokenizer, \ - TFAutoModelForSequenceClassification -import numpy as np - - -def make_prediction(contract): - if contract is list: - contract=contract[0] - tokenizer = AutoTokenizer.from_pretrained('roberta-base') - final_model = TFAutoModelForSequenceClassification.from_pretrained('ullasmrnva/LawBerta') - contract_df = pd.DataFrame() - contract_df = contract_df.append({'contracts': contract}, - ignore_index=True) - contract_sentences_df = contract_df['contracts' - ].apply(sent_tokenize).reset_index()['contracts' - ].explode().to_frame().rename(columns={'contracts': 'sentences' - }).reset_index() - input = [np.array(tokenizer(list(contract_sentences_df.sentences), - truncation=True, max_length=100, padding='max_length' - ).input_ids)] - y_pred = np.argmax(final_model.predict(input)[0], axis=1) - clause_map = { - 0: 'Affiliate License-Licensee', - 1: 'Affiliate License-Licensor', - 2: 'Anti-Assignment', - 3: 'Audit Rights', - 4: 'Cap On Liability', - 5: 'Change Of Control', - 6: 'Competitive Restriction Exception', - 7: 'Covenant Not To Sue', - 8: 'Exclusivity', - 9: 'Insurance', - 10: 'Ip Ownership Assignment', - 11: 'Irrevocable Or Perpetual License', - 12: 'Joint Ip Ownership', - 13: 'License Grant', - 14: 'Liquidated Damages', - 15: 'Minimum Commitment', - 16: 'Most Favored Nation', - 17: 'No Clause', - 18: 'No-Solicit Of Customers', - 19: 'No-Solicit Of Employees', - 20: 'Non-Compete', - 21: 'Non-Disparagement', - 22: 'Non-Transferable License', - 23: 'Post-Termination Services', - 24: 'Price Restrictions', - 25: 'Revenue/Profit Sharing', - 26: 'Rofr/Rofo/Rofn', - 27: 'Source Code Escrow', - 28: 'Termination For Convenience', - 29: 'Third Party Beneficiary', - 30: 'Uncapped Liability', - 31: 'Unlimited/All-You-Can-Eat-License', - 32: 'Volume Restriction', - 33: 'Warranty Duration', - } - final_df = contract_sentences_df[y_pred != 17][['sentences']] - final_df['clause'] = np.array([clause_map[x] for x in y_pred[y_pred - != 17]]) - output_sentences = [] - clauses_found=[] - for i in [ - 'License Grant', - 'Audit Rights', - 'Non-Disparagement', - 'Cap On Liability', - 'Anti-Assignment', - 'Minimum Commitment', - 'Most Favored Nation', - 'Unlimited/All-You-Can-Eat-License', - 'Revenue/Profit Sharing', - 'Uncapped Liability', - 'Termination For Convenience', - 'Exclusivity', - 'Change Of Control', - 'Rofr/Rofo/Rofn', - 'Irrevocable Or Perpetual License', - 'Competitive Restriction Exception', - 'Price Restrictions', - 'Covenant Not To Sue', - 'Volume Restriction', - 'Joint Ip Ownership', - 'Post-Termination Services', - 'Ip Ownership Assignment', - 'Non-Compete', - 'Insurance', - 'Affiliate License-Licensor', - 'Affiliate License-Licensee', - 'Non-Transferable License', - 'No-Solicit Of Customers', - 'Warranty Duration', - 'No-Solicit Of Employees', - 'Liquidated Damages', - 'Third Party Beneficiary', - 'Source Code Escrow', - ]: - clause=final_df[final_df['clause']== i]['sentences'].str.cat(sep='***\n\n***') - if clause!='': - print(i) - clauses_found.append(i) - output_sentences.append(clause) - found='' - if len(clauses_found)==0: - found='None' - else: - found=', '.join(clauses_found) - return [found]+output_sentences - - -gr.Interface(fn=make_prediction, inputs=gr.Textbox(placeholder="In a timely manner, upon the written instruction of the Company, invest and reinvest the Property in United States government securities within the meaning of Section 2(a)(16) of the Investment Company Act of 1940...\nPlease see example below."),\ - outputs=[gr.Textbox(label='Clauses Found:'), gr.Textbox(label='License Grant'),\ - gr.Textbox(label='Audit Rights'),\ - gr.Textbox(label='Non-Disparagement'),\ - gr.Textbox(label='Cap On Liability'),\ - gr.Textbox(label='Anti-Assignment'),\ - gr.Textbox(label='Minimum Commitment'),\ - gr.Textbox(label='Most Favored Nation'),\ - gr.Textbox(label='Unlimited/All-You-Can-Eat-License'),\ - gr.Textbox(label='Revenue/Profit Sharing'),\ - gr.Textbox(label='Uncapped Liability'),\ - gr.Textbox(label='Termination For Convenience'),\ - gr.Textbox(label='Exclusivity'),\ - gr.Textbox(label='Change Of Control'),\ - gr.Textbox(label='Rofr/Rofo/Rofn'),\ - gr.Textbox(label='Irrevocable Or Perpetual License'),\ - gr.Textbox(label='Competitive Restriction Exception'),\ - gr.Textbox(label='Price Restrictions'),\ - gr.Textbox(label='Covenant Not To Sue'),\ - gr.Textbox(label='Volume Restriction'),\ - gr.Textbox(label='Joint Ip Ownership'),\ - gr.Textbox(label='Post-Termination Services'),\ - gr.Textbox(label='Ip Ownership Assignment'),\ - gr.Textbox(label='Non-Compete'),\ - gr.Textbox(label='Insurance'),\ - gr.Textbox(label='Affiliate License-Licensor'),\ - gr.Textbox(label='Affiliate License-Licensee'),\ - gr.Textbox(label='Non-Transferable License'),\ - gr.Textbox(label='No-Solicit Of Customers'),\ - gr.Textbox(label='Warranty Duration'),\ - gr.Textbox(label='No-Solicit Of Employees'),\ - gr.Textbox(label='Liquidated Damages'),\ - gr.Textbox(label='Third Party Beneficiary'),\ - gr.Textbox(label='Source Code Escrow')], examples=["""-------------------------------------------------------------------------------- - -Exhibit 10.2 - -  -INVESTMENT MANAGEMENT TRUST AGREEMENT -  -This Investment Management Trust Agreement (this “Agreement”) is made effective -as of September 30, 2020 by and between Altimeter Growth Corp., a Cayman Islands -exempted company (the “Company”), and Continental Stock Transfer & Trust -Company, a New York corporation (the “Trustee”). -  -WHEREAS, the Company’s registration statement on Form S-1, File No. 333-248762 -(the “Registration Statement”) and prospectus (the “Prospectus”) for the initial -public offering of the Company’s units (the “Units”), each of which consists of -one of the Company’s Class A ordinary shares, par value $0.0001 per share (the -“Ordinary Shares”), and a fraction of one redeemable warrant, each whole warrant -entitling the holder thereof to purchase one Ordinary Share (such initial public -offering hereinafter referred to as the “Offering”), has been declared effective -as of the date hereof by the U.S. Securities and Exchange Commission; and -  -WHEREAS, the Company has entered into an Underwriting Agreement (the -“Underwriting Agreement”) with Citigroup Global Markets Inc., Goldman Sachs & -Co. LLC and Morgan Stanley & Co. LLC, as representatives (the “Representatives”) -to the several underwriters (the “Underwriters”) named therein; and -  -WHEREAS, as described in the Prospectus, $450,000,000 of the gross proceeds of -the Offering and sale of the Private Placement Warrants (as defined in the -Underwriting Agreement) (or $500,000,000 if the Underwriters’ option to purchase -additional units is exercised in full) will be delivered to the Trustee to be -deposited and held in a segregated trust account located at all times in the -United States (the “Trust Account”) for the benefit of the Company and the -holders of the Ordinary Shares included in the Units issued in the Offering as -hereinafter provided (the amount to be delivered to the Trustee (and any -interest subsequently earned thereon) is referred to herein as the “Property,” -the shareholders for whose benefit the Trustee shall hold the Property will be -referred to as the “Public Shareholders,” and the Public Shareholders and the -Company will be referred to together as the “Beneficiaries”); and -  -WHEREAS, pursuant to the Underwriting Agreement, a portion of the Property equal -to $15,750,000, or $17,500,000 if the Underwriters’ option to purchase -additional units is exercised in full, is attributable to deferred underwriting -discounts and commissions that will be payable by the Company to the -Underwriters upon the consummation of the Business Combination (as defined -below) (the “Deferred Discount”); and -  -WHEREAS, the Company and the Trustee desire to enter into this Agreement to set -forth the terms and conditions pursuant to which the Trustee shall hold the -Property. -  -NOW THEREFORE, IT IS AGREED: -  -1. Agreements and Covenants of Trustee. The Trustee hereby agrees and covenants -to: -  -(a) Hold the Property in trust for the Beneficiaries in accordance with the -terms of this Agreement in the Trust Account established by the Trustee in the -United States at J.P. Morgan Chase Bank, N.A. (or at another U.S chartered -commercial bank with consolidated assets of $100 billion or more) and at a -brokerage institution selected by the Trustee that is reasonably satisfactory to -the Company; -  -(b) Manage, supervise and administer the Trust Account subject to the terms and -conditions set forth herein; -  -(c) In a timely manner, upon the written instruction of the Company, invest and -reinvest the Property in United States government securities within the meaning -of Section 2(a)(16) of the Investment Company Act of 1940, as amended, having a -maturity of 185 days or less, or in money market funds meeting the conditions of -paragraphs (d)(1), (d)(2), (d)(3) and (d)(4) of Rule 2a-7 promulgated under the -Investment Company Act of 1940, as amended (or any successor rule), which invest -only in direct U.S. government treasury obligations, as determined by the -Company; the Trustee may not invest in any other securities or assets, it being -understood that the Trust Account will earn no interest while account funds are -uninvested awaiting the Company’s instructions hereunder and the Trustee may -earn bank credits or other consideration; -  - --------------------------------------------------------------------------------- - -(d) Collect and receive, when due, all principal, interest or other income -arising from the Property, which shall become part of the “Property,” as such -term is used herein; -  -(e) Promptly notify the Company and the Representative of all communications -received by the Trustee with respect to any Property requiring action by the -Company; -  -(f) Supply any necessary information or documents as may be requested by the -Company (or its authorized agents) in connection with the Company’s preparation -of the tax returns relating to assets held in the Trust Account; -  -(g) Participate in any plan or proceeding for protecting or enforcing any right -or interest arising from the Property if, as and when instructed by the Company -to do so; -  -(h) Render to the Company monthly written statements of the activities of, and -amounts in, the Trust Account reflecting all receipts and disbursements of the -Trust Account; -  -(i) Commence liquidation of the Trust Account only after and promptly after (x) -receipt of, and only in accordance with, the terms of a letter from the Company -(“Termination Letter”) in a form substantially similar to that attached hereto -as either Exhibit A or Exhibit B, as applicable, signed on behalf of the Company -by its Chief Executive Officer, President, Chief Operating Officer or other -authorized officer of the Company, and complete the liquidation of the Trust -Account and distribute the Property in the Trust Account, including interest -earned on the funds held in the Trust Account and not previously released to us -to pay our income taxes (less up to $100,000 of interest to pay dissolution -expenses), only as directed in the Termination Letter and the other documents -referred to therein, or (y) upon the date which is the later of (1) 24 months -after the closing of the Offering (or 27 months from the closing of Offering if -the Company has executed a letter of intent, agreement in principle or -definitive agreement for a Business Combination within 24 months from the -closing of Offering but has not completed a Business Combination within such 24 -month period) and (2) such later date as may be approved by the Company’s -shareholders in accordance with the Company’s amended and restated memorandum -and articles of association, if a Termination Letter has not been received by -the Trustee prior to such date, in which case the Trust Account shall be -liquidated in accordance with the procedures set forth in the Termination Letter -attached as Exhibit B and the Property in the Trust Account, including interest -earned on the funds held in the Trust Account and not previously released to the -Company to pay its income taxes (less up to $100,000 of interest to pay -dissolution expenses), shall be distributed to the Public Shareholders of record -as of such date It is acknowledged and agreed that there should be no reduction -in the principal amount per share initially deposited in the Trust Account; -  -(j) Upon written request from the Company, which may be given from time to time -in a form substantially similar to that attached hereto as Exhibit C (a “Tax -Payment Withdrawal Instruction”), withdraw from the Trust Account and distribute -to the Company the amount of interest earned on the Property requested by the -Company to cover any tax obligation owed by the Company as a result of assets of -the Company or interest or other income earned on the Property, which amount -shall be delivered directly to the Company by electronic funds transfer or other -method of prompt payment, and the Company shall forward such payment to the -relevant taxing authority, so long as there is no reduction in the principal -amount per share initially deposited in the Trust Account; provided, however, -that to the extent there is not sufficient cash in the Trust Account to pay such -tax obligation, the Trustee shall liquidate such assets held in the Trust -Account as shall be designated by the Company in writing to make such -distribution (it being acknowledged and agreed that any such amount in excess of -interest income earned on the Property shall not be payable from the Trust -Account). The written request of the Company referenced above shall constitute -presumptive evidence that the Company is entitled to said funds, and the Trustee -shall have no responsibility to look beyond said request; -  -(k) Upon written request from the Company, which may be given from time to time -in a form substantially similar to that attached hereto as Exhibit D (a -“Shareholder Redemption Withdrawal Instruction”), the Trustee shall distribute -to the remitting brokers on behalf of Public Shareholders redeeming Ordinary -Shares the amount required to pay redeemed Ordinary Shares from Public -Shareholders pursuant to the Company’s amended and restated memorandum and -articles of association; and -  -(l) Not make any withdrawals or distributions from the Trust Account other than -pursuant to Section 1(i), (j) or (k) above. -  - --------------------------------------------------------------------------------- - -2. Agreements and Covenants of the Company. The Company hereby agrees and -covenants to: -  -(a) Give all instructions to the Trustee hereunder in writing, signed by the -Company’s Chief Executive Officer, President, Chief Operating Officer or other -authorized officer of the Company. In addition, except with respect to its -duties under Sections 1(i), (j) or (k) hereof, the Trustee shall be entitled to -rely on, and shall be protected in relying on, any verbal or telephonic advice -or instruction which it, in good faith and with reasonable care, believes to be -given by any one of the persons authorized above to give written instructions, -provided that the Company shall promptly confirm such instructions in writing; -  -(b) Subject to Section 4 hereof, hold the Trustee harmless and indemnify the -Trustee from and against any and all expenses, including reasonable counsel fees -and disbursements, or losses suffered by the Trustee in connection with any -action taken by it hereunder and in connection with any action, suit or other -proceeding brought against the Trustee involving any claim, or in connection -with any claim or demand, which in any way arises out of or relates to this -Agreement, the services of the Trustee hereunder, or the Property or any -interest earned on the Property, except for expenses and losses resulting from -the Trustee’s gross negligence, fraud or willful misconduct. Promptly after the -receipt by the Trustee of notice of demand or claim or the commencement of any -action, suit or proceeding, pursuant to which the Trustee intends to seek -indemnification under this Section 2(b), it shall notify the Company in writing -of such claim (hereinafter referred to as the “Indemnified Claim”). The Trustee -shall have the right to conduct and manage the defense against such Indemnified -Claim; provided that the Trustee shall obtain the consent of the Company with -respect to the selection of counsel, which consent shall not be unreasonably -withheld. The Trustee may not agree to settle any Indemnified Claim without the -prior written consent of the Company, which such consent shall not be -unreasonably withheld. The Company may participate in such action with its own -counsel; -  -(c) Pay the Trustee the fees set forth on Schedule A hereto, including an -initial acceptance fee, annual administration fee, and transaction processing -fee which fees shall be subject to modification by the parties from time to -time. It is expressly understood that the Property shall not be used to pay such -fees unless and until it is distributed to the Company pursuant to Sections 1(i) -through 1(k) hereof. The Company shall pay the Trustee the initial acceptance -fee and the first annual administration fee at the consummation of the Offering. -The Company shall not be responsible for any other fees or charges of the -Trustee except as set forth in this Section 2(c) and as may be provided in -Section 2(b) hereof; -  -(d) In connection with any vote of the Company’s shareholders regarding a -merger, share exchange, asset acquisition, share purchase, reorganization or -similar business combination involving the Company and one or more businesses -(the “Business Combination”), provide to the Trustee an affidavit or certificate -of the inspector of elections for the shareholder meeting verifying the vote of -such shareholders regarding such Business Combination; -  -(e) Provide the Representative with a copy of any Termination Letter(s) and/or -any other correspondence that is sent to the Trustee with respect to any -proposed withdrawal from the Trust Account promptly after it issues the same; -  -(f) Unless otherwise agreed between the Company and the Representative, ensure -that any Instruction Letter (as defined in Exhibit A) delivered in connection -with a Termination Letter in the form of Exhibit A expressly provides that the -Deferred Discount is paid directly to the account or accounts directed by the -Representative on behalf of the Underwriters prior to any transfer of the funds -held in the Trust Account to the Company or any other person; -  -(g) Instruct the Trustee to make only those distributions that are permitted -under this Agreement, and refrain from instructing the Trustee to make any -distributions that are not permitted under this Agreement; -  -(h) If the Company seeks to amend any provisions of its amended and restated -memorandum and articles of association (A) to modify the substance or timing of -the Company’s obligation to provide holders of the Ordinary Shares the right to -have their shares redeemed in connection with the Company’s initial Business -Combination or to redeem 100% of the Ordinary Shares if the Company does not -complete its initial Business Combination within the time period set forth -therein or (B) with respect to any other provision relating to the rights of -holders of the Ordinary Shares (in each case, an “Amendment”), the Company will -provide the Trustee with a letter (an “Amendment Notification Letter”) in the -form of Exhibit D providing instructions for the distribution of funds to Public -Shareholders who exercise their redemption option in connection with such -Amendment; and -  -(i) Within five (5) business days after the Underwriters exercise their option -to purchase additional units (or any unexercised portion thereof) or such option -to purchase additional units expires, provide the Trustee with a notice in -writing of the total amount of the Deferred Discount. -  -3. Limitations of Liability. The Trustee shall have no responsibility or -liability to: -  -(a) Imply obligations, perform duties, inquire or otherwise be subject to the -provisions of any agreement or document other than this Agreement and that which -is expressly set forth herein; -  - --------------------------------------------------------------------------------- - -(b) Take any action with respect to the Property, other than as directed in -Section 1 hereof, and the Trustee shall have no liability to any third party -except for liability arising out of the Trustee’s gross negligence, fraud or -willful misconduct; -  -(c) Institute any proceeding for the collection of any principal and income -arising from, or institute, appear in or defend any proceeding of any kind with -respect to, any of the Property unless and until it shall have received written -instructions from the Company given as provided herein to do so and the Company -shall have advanced or guaranteed to it funds sufficient to pay any expenses -incident thereto; -  -(d) Change the investment of any Property, other than in compliance with Section -1 hereof; -  -(e) Refund any depreciation in principal of any Property; -  -(f) Assume that the authority of any person designated by the Company to give -instructions hereunder shall not be continuing unless provided otherwise in such -designation, or unless the Company shall have delivered a written revocation of -such authority to the Trustee; -  -(g) The other parties hereto or to anyone else for any action taken or omitted -by it, or any action suffered by it to be taken or omitted, in good faith and in -the Trustee’s best judgment, except for the Trustee’s gross negligence, fraud or -willful misconduct. The Trustee may rely conclusively and shall be protected in -acting upon any order, notice, demand, certificate, opinion or advice of counsel -(including counsel chosen by the Trustee, which counsel may be the Company’s -counsel), statement, instrument, report or other paper or document (not only as -to its due execution and the validity and effectiveness of its provisions, but -also as to the truth and acceptability of any information therein contained) -which the Trustee believes, in good faith and with reasonable care, to be -genuine and to be signed or presented by the proper person or persons. The -Trustee shall not be bound by any notice or demand, or any waiver, modification, -termination or rescission of this Agreement or any of the terms hereof, unless -evidenced by a written instrument delivered to the Trustee, signed by the proper -party or parties and, if the duties or rights of the Trustee are affected, -unless it shall give its prior written consent thereto; -  -(h) Verify the accuracy of the information contained in the Registration -Statement; -  -(i) Provide any assurance that any Business Combination entered into by the -Company or any other action taken by the Company is as contemplated by the -Registration Statement; -  -(j) File information returns with respect to the Trust Account with any local, -state or federal taxing authority or provide periodic written statements to the -Company documenting the taxes payable by the Company, if any, relating to any -interest income earned on the Property; -  -(k) Prepare, execute and file tax reports, income or other tax returns and pay -any taxes with respect to any income generated by, and activities relating to, -the Trust Account, regardless of whether such tax is payable by the Trust -Account or the Company, including, but not limited to, income tax obligations, -except pursuant to Section 1(j) hereof; or -  -(l) Verify calculations, qualify or otherwise approve the Company’s written -requests for distributions pursuant to Sections 1(i), 1(j) or 1(k) hereof. -  -4. Trust Account Waiver. The Trustee has no right of set-off or any right, -title, interest or claim of any kind (“Claim”) to, or to any monies in, the -Trust Account, and hereby irrevocably waives any Claim to, or to any monies in, -the Trust Account that it may have now or in the future. In the event the -Trustee has any Claim against the Company under this Agreement, including, -without limitation, under Section 2(b) or Section 2(c) hereof, the Trustee shall -pursue such Claim solely against the Company and its assets outside the Trust -Account and not against the Property or any monies in the Trust Account. -  -5. Termination. This Agreement shall terminate as follows: -  -(a) If the Trustee gives written notice to the Company that it desires to resign -under this Agreement, the Company shall use its reasonable efforts to locate a -successor trustee, pending which the Trustee shall continue to act in accordance -with this Agreement. At such time that the Company notifies the Trustee that a -successor trustee has been appointed by the Company and has agreed to become -subject to the terms of this Agreement, the Trustee shall transfer the -management of the Trust Account to the successor trustee, including but not -limited to the transfer of copies of the reports and statements relating to the -Trust Account, whereupon this Agreement shall terminate; provided, however, that -in the event that the Company does not locate a successor trustee within ninety -(90) days of receipt of the resignation notice from the Trustee, the Trustee may -submit an application to have the Property deposited with any court in the State -of New York or with the United States District Court for the Southern District -of New York and upon such deposit, the Trustee shall be immune from any -liability whatsoever; or -  - --------------------------------------------------------------------------------- - -(b) At such time that the Trustee has completed the liquidation of the Trust -Account and its obligations in accordance with the provisions of Section 1(i) -hereof and distributed the Property in accordance with the provisions of the -Termination Letter, this Agreement shall terminate except with respect to -Section 2(b). -  -6. Miscellaneous. -  -(a) The Company and the Trustee each acknowledge that the Trustee will follow -the security procedures set forth below with respect to funds transferred from -the Trust Account. The Company and the Trustee will each restrict access to -confidential information relating to such security procedures to authorized -persons. Each party must notify the other party immediately if it has reason to -believe unauthorized persons may have obtained access to such confidential -information, or of any change in its authorized personnel. In executing funds -transfers, the Trustee shall rely upon all information supplied to it by the -Company, including, account names, account numbers, and all other identifying -information relating to a Beneficiary, Beneficiary’s bank or intermediary bank. -Except for any liability arising out of the Trustee’s gross negligence, fraud or -willful misconduct, the Trustee shall not be liable for any loss, liability or -expense resulting from any error in the information or transmission of the -funds. -  -(b) This Agreement shall be governed by and construed and enforced in accordance -with the laws of the State of New York, without giving effect to conflicts of -law principles that would result in the application of the substantive laws of -another jurisdiction. This Agreement may be executed in several original or -facsimile counterparts, each one of which shall constitute an original, and -together shall constitute but one instrument. -  -(c) This Agreement contains the entire agreement and understanding of the -parties hereto with respect to the subject matter hereof. Except for Section -1(i), 1(j) and 1(k) hereof (which sections may not be modified, amended or -deleted without the affirmative vote of sixty-five percent (65%) of the then -outstanding Ordinary Shares and Class B ordinary shares, par value $0.0001 per -share, of the Company, voting together as a single class; provided that no such -amendment will affect any Public Shareholder who has properly elected to redeem -his or her Ordinary Shares in connection with a shareholder vote to amend this -Agreement to modify the substance or timing of the Company’s obligation to -provide for the redemption of the Public Shares in connection with an initial -Business Combination or an Amendment or to redeem 100% of its Ordinary Shares if -the Company does not complete its initial Business Combination within the time -frame specified in the Company’s amended and restated memorandum and articles of -association), this Agreement or any provision hereof may only be changed, -amended or modified (other than to correct a typographical error) by a writing -signed by each of the parties hereto. -  -(d) The parties hereto consent to the jurisdiction and venue of any state or -federal court located in the City of New York, State of New York, for purposes -of resolving any disputes hereunder. AS TO ANY CLAIM, CROSS-CLAIM OR -COUNTERCLAIM IN ANY WAY RELATING TO THIS AGREEMENT, EACH PARTY WAIVES THE RIGHT -TO TRIAL BY JURY. -  -(e) Any notice, consent or request to be given in connection with any of the -terms or provisions of this Agreement shall be in writing and shall be sent by -express mail or similar private courier service, by certified mail (return -receipt requested), by hand delivery or by electronic mail: -  -if to the Trustee, to: -  -Continental Stock Transfer & Trust Company -1 State Street, 30th Floor -New York, New York 10004 -Attn: Francis E. Wolf, Jr. & Celeste Gonzalez -Email: fwolf@continentalstock.com -cgonzalez@continentalstock.com -  - --------------------------------------------------------------------------------- - -if to the Company, to: -  -Altimeter Growth Corp. - - -2550 Sand Hill Road -Suite 150 -Menlo Park, CA 94025 -Attn: Hab Siam -Email: hab@altimeter.com -  -in each case, with copies to: -  -Ropes & Gray LLP -1211 Avenue of the Americas -New York, New York 10036 -Attn: Paul D. Tropp -Michael S. Pilo -E-mail: paul.tropp@ropesgray.com -michael.pilo @ropesgray.com -  -and - - -Citigroup Global Markets Inc. -388 Greenwich Street -New York, New York 10013 -Attn: Pavan Bellur -Email: pavan.bellur@citigroup.com - - -and - - -Goldman Sachs & Co. LLC -200 West Street -New York, NY 10282 -Attn: Registration Department - - -and - - -Morgan Stanley & Co. LLC -1585 Broadway -New York, New York 10036 -Attn: Equity Syndicate Desk - - -and -  -Kirkland & Ellis LLP -601 Lexington Avenue -New York, New York 10022 -Attn: Christian O. Nagler -E-mail: cnagler@kirkland.com -  -(f) Each of the Company and the Trustee hereby represents that it has the full -right and power and has been duly authorized to enter into this Agreement and to -perform its respective obligations as contemplated hereunder. The Trustee -acknowledges and agrees that it shall not make any claims or proceed against the -Trust Account, including by way of set-off, and shall not be entitled to any -funds in the Trust Account under any circumstance. -  -(g) This Agreement is the joint product of the Trustee and the Company and each -provision hereof has been subject to the mutual consultation, negotiation and -agreement of such parties and shall not be construed for or against any party -hereto. -  -(h) This Agreement may be executed in any number of counterparts, each of which -shall be deemed to be an original, but all such counterparts shall together -constitute one and the same instrument. Delivery of a signed counterpart of this -Agreement by facsimile or electronic transmission shall constitute valid and -sufficient delivery thereof. -  - --------------------------------------------------------------------------------- - -(i) Each of the Company and the Trustee hereby acknowledges and agrees that the -Representative on behalf of the Underwriters is a third-party beneficiary of -this Agreement. -  -(j) Except as specified herein, no party to this Agreement may assign its rights -or delegate its obligations hereunder to any other person or entity. -  -[Signature Page Follows] - - - --------------------------------------------------------------------------------- - -IN WITNESS WHEREOF, the parties have duly executed this Investment Management -Trust Agreement as of the date first written above. - - - - -  -CONTINENTAL STOCK TRANSFER & TRUST COMPANY, as Trustee -        -By: -/s/ Francis Wolf -  -Name: -Francis Wolf -  -Title: -Vice President -      -ALTIMETER GROWTH CORP. -        -By: -/s/ Hab Siam -  -Name: -Hab Siam -  -Title: -General Counsel - - - -[Signature Page to Investment Management Trust Agreement] - - - --------------------------------------------------------------------------------- - -SCHEDULE A - - - -Fee Item -  -Time and method of payment -  -Amount -  -Initial acceptance fee -  -Initial closing of IPO by wire transfer -  -$ -3,500.00 -  -Annual fee -  -First year, initial closing of IPO by wire transfer; thereafter on the -anniversary of the effective date of the IPO by wire transfer or check -  -$ -10,000.00 -  -Transaction processing fee for disbursements to Company under Sections 1(i), -(j), and (k) -  -Billed by Trustee to Company under Section 1 -  -$ -250.00 -  -Paying Agent services as required pursuant to Section 1(i) and 1(k) -  -Billed to Company upon delivery of service pursuant to Section 1(i) and 1(k) -  -Prevailing rates -  - - - - --------------------------------------------------------------------------------- - -EXHIBIT A -  -[Letterhead of Company] -  -[Insert date] -  -Continental Stock Transfer & Trust Company -1 State Street, 30th Floor -New York, New York 10004 -Attn: Francis Wolf & Celeste Gonzalez -  -Re: Trust Account - Termination Letter -  -Dear Mr. Wolf and Ms. Gonzalez: -  -Pursuant to Section 1(i) of the Investment Management Trust Agreement between -Altimeter Growth Corp. (the “Company”) and Continental Stock Transfer & Trust -Company (“Trustee”), dated as of October [•], 2020 (the “Trust Agreement”), this -is to advise you that the Company has entered into an agreement with ___________ -(the “Target Business”) to consummate a business combination with Target -Business (the “Business Combination”) on or about [insert date]. The Company -shall notify you at least seventy-two (72) hours in advance of the actual date -(or such shorter time period as you may agree) of the consummation of the -Business Combination (the “Consummation Date”). Capitalized terms used but not -defined herein shall have the meanings set forth in the Trust Agreement. -  -In accordance with the terms of the Trust Agreement, we hereby authorize you to -commence to liquidate all of the assets of the Trust Account, and to transfer -the proceeds into the trust operating account at J.P. Morgan Chase Bank, N.A. to -the effect that, on the Consummation Date, all of the funds held in the Trust -Account will be immediately available for transfer to the account or accounts -that the Representative (with respect to the Deferred Discount) and the Company -shall direct on the Consummation Date. It is acknowledged and agreed that while -the funds are on deposit in said trust operating account at J.P. Morgan Chase -Bank, N.A. awaiting distribution, neither the Company nor the Representative -will earn any interest or dividends. -  -On the Consummation Date (i) counsel for the Company shall deliver to you -written notification that the Business Combination has been consummated, or will -be consummated substantially concurrently with your transfer of funds to the -accounts as directed by the Company (the “Notification”), and (ii) the Company -shall deliver to you (a) a certificate by the Chief Executive Officer, Chief -Financial Officer or other authorized officer of the Company, which verifies -that the Business Combination has been approved by a vote of the Company’s -shareholders, if a vote is held and (b) joint written instruction signed by the -Company and the Representative with respect to the transfer of the funds held in -the Trust Account, including payment of the Deferred Discount from the Trust -Account (the “Instruction Letter”). You are hereby directed and authorized to -transfer the funds held in the Trust Account immediately upon your receipt of -the Notification and the Instruction Letter, in accordance with the terms of the -Instruction Letter. In the event that certain deposits held in the Trust Account -may not be liquidated by the Consummation Date without penalty, you will notify -the Company in writing of the same and the Company shall direct you as to -whether such funds should remain in the Trust Account and be distributed after -the Consummation Date to the Company. Upon the distribution of all the funds, -net of any payments necessary for reasonable unreimbursed expenses related to -liquidating the Trust Account, your obligations under the Trust Agreement shall -be terminated. -  -In the event that the Business Combination is not consummated on the -Consummation Date described in the notice thereof and we have not notified you -on or before the original Consummation Date of a new Consummation Date, then -upon receipt by the Trustee of written instructions from the Company, the funds -held in the Trust Account shall be reinvested as provided in Section 1(c) of the -Trust Agreement on the business day immediately following the Consummation Date -as set forth in such notice as soon thereafter as possible. - - - --------------------------------------------------------------------------------- - -  -Very truly yours, -      -Altimeter Growth Corp. -        -By: -    -Name: -    -Title: -  - - - -cc: -Citigroup Global Markets Inc. -    -Goldman Sachs & Co. LLC -    -Morgan Stanley &Co. LLC -  - - - - --------------------------------------------------------------------------------- - -EXHIBIT B -  -[Letterhead of Company] -  -[Insert date] -  -Continental Stock Transfer & Trust Company -1 State Street, 30th Floor -New York, New York 10004 -Attn: Francis Wolf & Celeste Gonzalez -  -Re: Trust Account - Termination Letter -  -Ladies and Gentlemen: -  -Pursuant to Section 1(i) of the Investment Management Trust Agreement between -Altimeter Growth Corp. (the “Company”) and Continental Stock Transfer & Trust -Company (the “Trustee”), dated as of October [•], 2020 (the “Trust Agreement”), -this is to advise you that the Company has been unable to effect a business -combination with a Target Business (the “Business Combination”) within the time -frame specified in the Company’s Amended and Restated Memorandum and Articles of -Association, as described in the Company’s Prospectus relating to the Offering. -Capitalized terms used but not defined herein shall have the meanings set forth -in the Trust Agreement. -  -In accordance with the terms of the Trust Agreement, we hereby authorize you to -liquidate all of the assets in the Trust Account and to transfer the total -proceeds into the trust operating account at J.P. Morgan Chase Bank, N.A. to -await distribution to the Public Shareholders. The Company has selected -__________ as the effective date for the purpose of determining the Public -Shareholders that will be entitled to receive their share of the liquidation -proceeds. It is acknowledged that no interest will be earned by the Company on -the liquidation proceeds while on deposit in the trust operating account You -agree to be the Paying Agent of record and, in your separate capacity as Paying -Agent, agree to distribute said funds directly to the Company’s Public -Shareholders in accordance with the terms of the Trust Agreement and the Amended -and Restated Memorandum and Articles of Association of the Company. Upon the -distribution of all the funds, net of any payments necessary for reasonable -unreimbursed expenses related to liquidating the Trust Account, your obligations -under the Trust Agreement shall be terminated, except to the extent otherwise -provided in Section 1(j) of the Trust Agreement. -  - -  -Very truly yours, -      -Altimeter Growth Corp. -        -By: -    -Name: -    -Title: -  - - - -cc: -Citigroup Global Markets Inc. -    -Goldman Sachs & Co. LLC -    -Morgan Stanley &Co. LLC -  - - - - --------------------------------------------------------------------------------- - -EXHIBIT C -  -[Letterhead of Company] -  -[Insert date] -  -Continental Stock Transfer & Trust Company -1 State Street, 30th Floor -New York, New York 10004 -Attn: Francis Wolf & Celeste Gonzalez -  -Re: Trust Account - Tax Payment Withdrawal Instruction -  -Dear Mr. Wolf and Ms. Gonzalez: -  -Pursuant to Section 1(j) of the Investment Management Trust Agreement between -Altimeter Growth Corp. (the “Company”) and Continental Stock Transfer & Trust -Company (the “Trustee”), dated as of October [•], 2020 (the “Trust Agreement”), -the Company hereby requests that you deliver to the Company $___________ of the -interest income earned on the Property as of the date hereof. Capitalized terms -used but not defined herein shall have the meanings set forth in the Trust -Agreement. -  -The Company needs such funds to pay for the tax obligations as set forth on the -attached tax return or tax statement. In accordance with the terms of the Trust -Agreement, you are hereby directed and authorized to transfer (via wire -transfer) such funds promptly upon your receipt of this letter to the Company’s -operating account at: -  -[WIRE INSTRUCTION INFORMATION] -  - -  -Very truly yours, -      -Altimeter Growth Corp. -        -By: -    -Name: -    -Title: -  - - - -cc: -Citigroup Global Markets Inc. -    -Goldman Sachs & Co. LLC -    -Morgan Stanley &Co. LLC -  - - - - --------------------------------------------------------------------------------- - -EXHIBIT D -  -[Letterhead of Company] -  -[Insert date] -  -Continental Stock Transfer & Trust Company -1 State Street, 30th Floor -New York, New York 10004 -Attn: Francis Wolf & Celeste Gonzalez -  -Re: Trust Account  -. Shareholder Redemption Withdrawal Instruction -  -Dear Mr. Wolf and Ms. Gonzalez: -  -Pursuant to Section 1(k) of the Investment Management Trust Agreement between -Altimeter Growth Corp. (the “Company”) and Continental Stock Transfer & Trust -Company (the “Trustee”), dated as of October [•], 2020 (the “Trust Agreement”), -the Company hereby requests that you deliver to the Company’s shareholders -$___________ of the principal and interest income earned on the Property as of -the date hereof. Capitalized terms used but not defined herein shall have the -meanings set forth in the Trust Agreement. -  -Pursuant to Section 1(k) of the Trust Agreement, this is to advise you that the -Company has sought an Amendment. Accordingly, in accordance with the terms of -the Trust Agreement, we hereby authorize you to liquidate a sufficient portion -of the Trust Account and to transfer $[•] of the proceeds of the Trust Account -to the trust operating account at J.P. Morgan Chase Bank, N.A. for distribution -to the shareholders that have requested redemption of their shares in connection -with such Amendment. -  - -  -Very truly yours, -      -Altimeter Growth Corp. -        -By: -    -Name: -    -Title: -  - - - -cc: -Citigroup Global Markets Inc. -    -Goldman Sachs & Co. LLC -    -Morgan Stanley &Co. LLC -  - - - - - - ---------------------------------------------------------------------------------"""]).launch(share=True) \ No newline at end of file diff --git a/spaces/usbethFlerru/sovits-modelsV2/example/Bukharishariffullfreedownloadinbanglapdf.md b/spaces/usbethFlerru/sovits-modelsV2/example/Bukharishariffullfreedownloadinbanglapdf.md deleted file mode 100644 index ed5cb94914d2d9a46974251516f5a5f37aec76a7..0000000000000000000000000000000000000000 --- a/spaces/usbethFlerru/sovits-modelsV2/example/Bukharishariffullfreedownloadinbanglapdf.md +++ /dev/null @@ -1,34 +0,0 @@ -
          -

          Bukhari Sharif: The Most Authentic Collection of Hadiths in Bangla

          -

          Bukhari Sharif is a book of hadiths, the sayings and deeds of Prophet Muhammad (peace be upon him), compiled by Imam Bukhari (810-870 CE), a renowned scholar of Islam. It is considered by Muslims as the most authentic and reliable source of hadiths, second only to the Quran. Bukhari Sharif contains 7053 hadiths in 97 chapters, covering various aspects of Islamic faith and practice.

          -

          bukharishariffullfreedownloadinbanglapdf


          DOWNLOADhttps://urlcod.com/2uyUzh



          -

          If you are looking for a free download of Bukhari Sharif in Bangla PDF format, you can find it online from various sources. However, one of the best and most reliable sources is the Islamic Foundation Bangladesh, which has published a complete and accurate translation of Bukhari Sharif in Bangla with Arabic text and commentary. You can download all 10 parts of Bukhari Sharif in Bangla PDF from their website or from the links below:

          - -

          Reading Bukhari Sharif in Bangla will help you to understand the Quran better and to follow the Sunnah of the Prophet (peace be upon him) more closely. You will also learn about the history, culture, and wisdom of Islam from the authentic narrations of the Prophet (peace be upon him) and his companions. Bukhari Sharif is a treasure of knowledge and guidance for every Muslim who wants to increase their faith and practice.

          Here are some more paragraphs for your article:

          -

          Bukhari Sharif is not the only book of hadiths in Islam. There are other collections of hadiths that are also highly respected and widely used by Muslims. The most famous ones are the six books of hadiths, also known as the Kutub al-Sittah. They are:

          -
            -
          • Sahih Muslim, compiled by Imam Muslim (821-875 CE)
          • -
          • Sunan al-Sughra, compiled by Imam al-Nasa'i (829-915 CE)
          • -
          • Sunan Abu Dawud, compiled by Imam Abu Dawud (817-889 CE)
          • -
          • Sunan al-Tirmidhi, compiled by Imam al-Tirmidhi (824-892 CE)
          • -
          • Sunan Ibn Majah, compiled by Imam Ibn Majah (824-887 CE)
          • -
          • Al-Muwatta, compiled by Imam Malik (711-795 CE)
          • -
          -

          These books of hadiths contain thousands of narrations from the Prophet (peace be upon him) and his companions on various topics and issues. They also provide explanations and interpretations of the Quran and the Sunnah. They are considered as the second source of Islamic law and guidance after the Quran.

          -

          However, not all hadiths are authentic and reliable. Some hadiths are weak, fabricated, or contradictory. Therefore, Muslims need to be careful and discerning when reading and using hadiths. They need to check the chain of narrators (isnad) and the text (matn) of each hadith to verify its authenticity and accuracy. They also need to consult the experts and scholars of hadiths (muhaddithin) who have studied and classified the hadiths according to their degree of reliability.

          -

          -

          One of the greatest muhaddithin in history was Imam Bukhari himself. He was a genius and a master of hadiths. He memorized hundreds of thousands of hadiths and their chains of narrators. He traveled extensively to collect and verify the hadiths from various sources. He applied strict criteria and methods to select only the most authentic and sound hadiths for his book. He also wrote several other books on hadiths, such as Al-Adab al-Mufrad, Al-Tarikh al-Saghir, and Kitab al-Kuna.

          -

          Imam Bukhari was a humble and pious person who devoted his life to the service of Islam and hadiths. He faced many hardships and challenges in his journey, but he never gave up or compromised his principles. He died in 870 CE in Samarkand (present-day Uzbekistan), leaving behind a legacy of knowledge and excellence that is admired and followed by Muslims until today.

          d5da3c52bf
          -
          -
          \ No newline at end of file diff --git a/spaces/usbethFlerru/sovits-modelsV2/example/Candydoll.tv Anjelika-L Set 15 Vidl.md b/spaces/usbethFlerru/sovits-modelsV2/example/Candydoll.tv Anjelika-L Set 15 Vidl.md deleted file mode 100644 index 86c17eda9de9fa4124a4ab604d49912c2e37492e..0000000000000000000000000000000000000000 --- a/spaces/usbethFlerru/sovits-modelsV2/example/Candydoll.tv Anjelika-L Set 15 Vidl.md +++ /dev/null @@ -1,6 +0,0 @@ -

          Candydoll.tv Anjelika-L Set 15 Vidl


          Download Zip ✫✫✫ https://urlcod.com/2uyWIZ



          - - aaccfb2cb3
          -
          -
          -

          diff --git a/spaces/valhalla/glide-text2im/glide_text2im/tokenizer/simple_tokenizer.py b/spaces/valhalla/glide-text2im/glide_text2im/tokenizer/simple_tokenizer.py deleted file mode 100644 index c84cc8fb3adff99225d3e3a75b2a3d81564adcef..0000000000000000000000000000000000000000 --- a/spaces/valhalla/glide-text2im/glide_text2im/tokenizer/simple_tokenizer.py +++ /dev/null @@ -1,163 +0,0 @@ -""" -Copied from: https://github.com/openai/CLIP/blob/573315e83f07b53a61ff5098757e8fc885f1703e/clip/simple_tokenizer.py -""" - -import gzip -import html -import os -from functools import lru_cache -from typing import List, Tuple - -import ftfy -import regex as re - - -@lru_cache() -def default_bpe(): - return os.path.join(os.path.dirname(os.path.abspath(__file__)), "bpe_simple_vocab_16e6.txt.gz") - - -@lru_cache() -def bytes_to_unicode(): - """ - Returns list of utf-8 byte and a corresponding list of unicode strings. - The reversible bpe codes work on unicode strings. - This means you need a large # of unicode characters in your vocab if you want to avoid UNKs. - When you're at something like a 10B token dataset you end up needing around 5K for decent coverage. - This is a signficant percentage of your normal, say, 32K bpe vocab. - To avoid that, we want lookup tables between utf-8 bytes and unicode strings. - And avoids mapping to whitespace/control characters the bpe code barfs on. - """ - bs = ( - list(range(ord("!"), ord("~") + 1)) - + list(range(ord("¡"), ord("¬") + 1)) - + list(range(ord("®"), ord("ÿ") + 1)) - ) - cs = bs[:] - n = 0 - for b in range(2 ** 8): - if b not in bs: - bs.append(b) - cs.append(2 ** 8 + n) - n += 1 - cs = [chr(n) for n in cs] - return dict(zip(bs, cs)) - - -def get_pairs(word): - """Return set of symbol pairs in a word. - Word is represented as tuple of symbols (symbols being variable-length strings). - """ - pairs = set() - prev_char = word[0] - for char in word[1:]: - pairs.add((prev_char, char)) - prev_char = char - return pairs - - -def basic_clean(text): - text = ftfy.fix_text(text) - text = html.unescape(html.unescape(text)) - return text.strip() - - -def whitespace_clean(text): - text = re.sub(r"\s+", " ", text) - text = text.strip() - return text - - -class SimpleTokenizer(object): - def __init__(self, bpe_path: str = default_bpe()): - self.byte_encoder = bytes_to_unicode() - self.byte_decoder = {v: k for k, v in self.byte_encoder.items()} - merges = gzip.open(bpe_path).read().decode("utf-8").split("\n") - merges = merges[1 : 49152 - 256 - 2 + 1] - merges = [tuple(merge.split()) for merge in merges] - vocab = list(bytes_to_unicode().values()) - vocab = vocab + [v + "" for v in vocab] - for merge in merges: - vocab.append("".join(merge)) - vocab.extend(["<|startoftext|>", "<|endoftext|>"]) - self.encoder = dict(zip(vocab, range(len(vocab)))) - self.decoder = {v: k for k, v in self.encoder.items()} - self.bpe_ranks = dict(zip(merges, range(len(merges)))) - self.cache = {"<|startoftext|>": "<|startoftext|>", "<|endoftext|>": "<|endoftext|>"} - self.pat = re.compile( - r"""<\|startoftext\|>|<\|endoftext\|>|'s|'t|'re|'ve|'m|'ll|'d|[\p{L}]+|[\p{N}]|[^\s\p{L}\p{N}]+""", - re.IGNORECASE, - ) - - @property - def start_token(self): - return self.encoder["<|startoftext|>"] - - @property - def end_token(self): - return self.encoder["<|endoftext|>"] - - def padded_tokens_and_len(self, tokens: List[int], text_ctx: int) -> Tuple[List[int], int]: - tokens = [self.start_token] + tokens[: text_ctx - 2] + [self.end_token] - text_len = len(tokens) - padding = text_ctx - len(tokens) - padded_tokens = tokens + [0] * padding - return padded_tokens, text_len - - def bpe(self, token): - if token in self.cache: - return self.cache[token] - word = tuple(token[:-1]) + (token[-1] + "",) - pairs = get_pairs(word) - - if not pairs: - return token + "" - - while True: - bigram = min(pairs, key=lambda pair: self.bpe_ranks.get(pair, float("inf"))) - if bigram not in self.bpe_ranks: - break - first, second = bigram - new_word = [] - i = 0 - while i < len(word): - try: - j = word.index(first, i) - new_word.extend(word[i:j]) - i = j - except: # pylint: disable=bare-except - new_word.extend(word[i:]) - break - - if word[i] == first and i < len(word) - 1 and word[i + 1] == second: - new_word.append(first + second) - i += 2 - else: - new_word.append(word[i]) - i += 1 - new_word = tuple(new_word) - word = new_word - if len(word) == 1: - break - else: - pairs = get_pairs(word) - word = " ".join(word) - self.cache[token] = word - return word - - def encode(self, text): - bpe_tokens = [] - text = whitespace_clean(basic_clean(text)).lower() - for token in re.findall(self.pat, text): - token = "".join(self.byte_encoder[b] for b in token.encode("utf-8")) - bpe_tokens.extend(self.encoder[bpe_token] for bpe_token in self.bpe(token).split(" ")) - return bpe_tokens - - def decode(self, tokens): - text = "".join([self.decoder[token] for token in tokens]) - text = ( - bytearray([self.byte_decoder[c] for c in text]) - .decode("utf-8", errors="replace") - .replace("", " ") - ) - return text diff --git a/spaces/video-p2p-library/Video-P2P-Demo/utils.py b/spaces/video-p2p-library/Video-P2P-Demo/utils.py deleted file mode 100644 index 379efd816d5f22620b8b08669ee5747e8fc4ea24..0000000000000000000000000000000000000000 --- a/spaces/video-p2p-library/Video-P2P-Demo/utils.py +++ /dev/null @@ -1,67 +0,0 @@ -from __future__ import annotations - -import pathlib - - -def find_exp_dirs() -> list[str]: - repo_dir = pathlib.Path(__file__).parent - exp_root_dir = repo_dir / 'experiments' - if not exp_root_dir.exists(): - return [] - exp_dirs = sorted(exp_root_dir.glob('*')) - exp_dirs = [ - exp_dir for exp_dir in exp_dirs - if (exp_dir / 'model_index.json').exists() - ] - return [path.relative_to(repo_dir).as_posix() for path in exp_dirs] - - -def save_model_card( - save_dir: pathlib.Path, - base_model: str, - training_prompt: str, - test_prompt: str = '', - test_image_dir: str = '', -) -> None: - image_str = '' - if test_prompt and test_image_dir: - image_paths = sorted((save_dir / test_image_dir).glob('*.gif')) - if image_paths: - image_path = image_paths[-1] - rel_path = image_path.relative_to(save_dir) - image_str = f'''## Samples -Test prompt: {test_prompt} - -![{image_path.stem}]({rel_path})''' - - model_card = f'''--- -license: creativeml-openrail-m -base_model: {base_model} -training_prompt: {training_prompt} -tags: -- stable-diffusion -- stable-diffusion-diffusers -- text-to-image -- diffusers -- text-to-video -- tune-a-video -- video-p2p -inference: false ---- - -# Video-P2P - {save_dir.name} - -## Model description -- Base model: [{base_model}](https://huggingface.co/{base_model}) -- Training prompt: {training_prompt} - -{image_str} - -## Related papers: -- [Video-P2P](https://arxiv.org/abs/2303.04761): Video editing with cross-attention control -- [Tune-A-Video](https://arxiv.org/abs/2212.11565): One-Shot Tuning of Image Diffusion Models for Text-to-Video Generation -- [Stable-Diffusion](https://arxiv.org/abs/2112.10752): High-Resolution Image Synthesis with Latent Diffusion Models -''' - - with open(save_dir / 'README.md', 'w') as f: - f.write(model_card) diff --git a/spaces/vinthony/SadTalker/src/utils/safetensor_helper.py b/spaces/vinthony/SadTalker/src/utils/safetensor_helper.py deleted file mode 100644 index 3cdbdd21e4ed656dfe2d31a57360afb3e96480b3..0000000000000000000000000000000000000000 --- a/spaces/vinthony/SadTalker/src/utils/safetensor_helper.py +++ /dev/null @@ -1,8 +0,0 @@ - - -def load_x_from_safetensor(checkpoint, key): - x_generator = {} - for k,v in checkpoint.items(): - if key in k: - x_generator[k.replace(key+'.', '')] = v - return x_generator \ No newline at end of file diff --git a/spaces/vumichien/canvas_controlnet/annotator/uniformer/mmseg/models/decode_heads/ema_head.py b/spaces/vumichien/canvas_controlnet/annotator/uniformer/mmseg/models/decode_heads/ema_head.py deleted file mode 100644 index 12267cb40569d2b5a4a2955a6dc2671377ff5e0a..0000000000000000000000000000000000000000 --- a/spaces/vumichien/canvas_controlnet/annotator/uniformer/mmseg/models/decode_heads/ema_head.py +++ /dev/null @@ -1,168 +0,0 @@ -import math - -import torch -import torch.distributed as dist -import torch.nn as nn -import torch.nn.functional as F -from annotator.uniformer.mmcv.cnn import ConvModule - -from ..builder import HEADS -from .decode_head import BaseDecodeHead - - -def reduce_mean(tensor): - """Reduce mean when distributed training.""" - if not (dist.is_available() and dist.is_initialized()): - return tensor - tensor = tensor.clone() - dist.all_reduce(tensor.div_(dist.get_world_size()), op=dist.ReduceOp.SUM) - return tensor - - -class EMAModule(nn.Module): - """Expectation Maximization Attention Module used in EMANet. - - Args: - channels (int): Channels of the whole module. - num_bases (int): Number of bases. - num_stages (int): Number of the EM iterations. - """ - - def __init__(self, channels, num_bases, num_stages, momentum): - super(EMAModule, self).__init__() - assert num_stages >= 1, 'num_stages must be at least 1!' - self.num_bases = num_bases - self.num_stages = num_stages - self.momentum = momentum - - bases = torch.zeros(1, channels, self.num_bases) - bases.normal_(0, math.sqrt(2. / self.num_bases)) - # [1, channels, num_bases] - bases = F.normalize(bases, dim=1, p=2) - self.register_buffer('bases', bases) - - def forward(self, feats): - """Forward function.""" - batch_size, channels, height, width = feats.size() - # [batch_size, channels, height*width] - feats = feats.view(batch_size, channels, height * width) - # [batch_size, channels, num_bases] - bases = self.bases.repeat(batch_size, 1, 1) - - with torch.no_grad(): - for i in range(self.num_stages): - # [batch_size, height*width, num_bases] - attention = torch.einsum('bcn,bck->bnk', feats, bases) - attention = F.softmax(attention, dim=2) - # l1 norm - attention_normed = F.normalize(attention, dim=1, p=1) - # [batch_size, channels, num_bases] - bases = torch.einsum('bcn,bnk->bck', feats, attention_normed) - # l2 norm - bases = F.normalize(bases, dim=1, p=2) - - feats_recon = torch.einsum('bck,bnk->bcn', bases, attention) - feats_recon = feats_recon.view(batch_size, channels, height, width) - - if self.training: - bases = bases.mean(dim=0, keepdim=True) - bases = reduce_mean(bases) - # l2 norm - bases = F.normalize(bases, dim=1, p=2) - self.bases = (1 - - self.momentum) * self.bases + self.momentum * bases - - return feats_recon - - -@HEADS.register_module() -class EMAHead(BaseDecodeHead): - """Expectation Maximization Attention Networks for Semantic Segmentation. - - This head is the implementation of `EMANet - `_. - - Args: - ema_channels (int): EMA module channels - num_bases (int): Number of bases. - num_stages (int): Number of the EM iterations. - concat_input (bool): Whether concat the input and output of convs - before classification layer. Default: True - momentum (float): Momentum to update the base. Default: 0.1. - """ - - def __init__(self, - ema_channels, - num_bases, - num_stages, - concat_input=True, - momentum=0.1, - **kwargs): - super(EMAHead, self).__init__(**kwargs) - self.ema_channels = ema_channels - self.num_bases = num_bases - self.num_stages = num_stages - self.concat_input = concat_input - self.momentum = momentum - self.ema_module = EMAModule(self.ema_channels, self.num_bases, - self.num_stages, self.momentum) - - self.ema_in_conv = ConvModule( - self.in_channels, - self.ema_channels, - 3, - padding=1, - conv_cfg=self.conv_cfg, - norm_cfg=self.norm_cfg, - act_cfg=self.act_cfg) - # project (0, inf) -> (-inf, inf) - self.ema_mid_conv = ConvModule( - self.ema_channels, - self.ema_channels, - 1, - conv_cfg=self.conv_cfg, - norm_cfg=None, - act_cfg=None) - for param in self.ema_mid_conv.parameters(): - param.requires_grad = False - - self.ema_out_conv = ConvModule( - self.ema_channels, - self.ema_channels, - 1, - conv_cfg=self.conv_cfg, - norm_cfg=self.norm_cfg, - act_cfg=None) - self.bottleneck = ConvModule( - self.ema_channels, - self.channels, - 3, - padding=1, - conv_cfg=self.conv_cfg, - norm_cfg=self.norm_cfg, - act_cfg=self.act_cfg) - if self.concat_input: - self.conv_cat = ConvModule( - self.in_channels + self.channels, - self.channels, - kernel_size=3, - padding=1, - conv_cfg=self.conv_cfg, - norm_cfg=self.norm_cfg, - act_cfg=self.act_cfg) - - def forward(self, inputs): - """Forward function.""" - x = self._transform_inputs(inputs) - feats = self.ema_in_conv(x) - identity = feats - feats = self.ema_mid_conv(feats) - recon = self.ema_module(feats) - recon = F.relu(recon, inplace=True) - recon = self.ema_out_conv(recon) - output = F.relu(identity + recon, inplace=True) - output = self.bottleneck(output) - if self.concat_input: - output = self.conv_cat(torch.cat([x, output], dim=1)) - output = self.cls_seg(output) - return output diff --git a/spaces/warrenw/simple-gpt-interface-2/README.md b/spaces/warrenw/simple-gpt-interface-2/README.md deleted file mode 100644 index 8360053aca0c479aacf211a2ad49ecb792eec7df..0000000000000000000000000000000000000000 --- a/spaces/warrenw/simple-gpt-interface-2/README.md +++ /dev/null @@ -1,14 +0,0 @@ ---- -title: Simple Gpt Interface -emoji: 📉 -colorFrom: gray -colorTo: indigo -sdk: streamlit -sdk_version: 1.21.0 -app_file: app.py -pinned: false -license: mit -duplicated_from: warrenw/simple-gpt-interface ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/weiwandaixu/ChatGPT3.5/chatgpt - macOS.command b/spaces/weiwandaixu/ChatGPT3.5/chatgpt - macOS.command deleted file mode 100644 index fa015edca9e6916f24394813ce8ba77d2072e296..0000000000000000000000000000000000000000 --- a/spaces/weiwandaixu/ChatGPT3.5/chatgpt - macOS.command +++ /dev/null @@ -1,7 +0,0 @@ -#!/bin/bash -echo Opening ChuanhuChatGPT... -cd "$(dirname "${BASH_SOURCE[0]}")" -nohup python3 ChuanhuChatbot.py >/dev/null 2>&1 & -sleep 5 -open http://127.0.0.1:7860 -echo Finished opening ChuanhuChatGPT (http://127.0.0.1:7860/). If you kill ChuanhuChatbot, Use "pkill -f 'ChuanhuChatbot'" command in terminal. \ No newline at end of file diff --git a/spaces/wendys-llc/panoptic-segment-anything/GroundingDINO/groundingdino/models/GroundingDINO/backbone/swin_transformer.py b/spaces/wendys-llc/panoptic-segment-anything/GroundingDINO/groundingdino/models/GroundingDINO/backbone/swin_transformer.py deleted file mode 100644 index 1c66194deb5dd370e797e57e2712f44303e568cc..0000000000000000000000000000000000000000 --- a/spaces/wendys-llc/panoptic-segment-anything/GroundingDINO/groundingdino/models/GroundingDINO/backbone/swin_transformer.py +++ /dev/null @@ -1,802 +0,0 @@ -# ------------------------------------------------------------------------ -# Grounding DINO -# url: https://github.com/IDEA-Research/GroundingDINO -# Copyright (c) 2023 IDEA. All Rights Reserved. -# Licensed under the Apache License, Version 2.0 [see LICENSE for details] -# ------------------------------------------------------------------------ -# DINO -# Copyright (c) 2022 IDEA. All Rights Reserved. -# Licensed under the Apache License, Version 2.0 [see LICENSE for details] -# -------------------------------------------------------- -# modified from https://github.com/SwinTransformer/Swin-Transformer-Object-Detection/blob/master/mmdet/models/backbones/swin_transformer.py -# -------------------------------------------------------- - -import numpy as np -import torch -import torch.nn as nn -import torch.nn.functional as F -import torch.utils.checkpoint as checkpoint -from timm.models.layers import DropPath, to_2tuple, trunc_normal_ - -from groundingdino.util.misc import NestedTensor - - -class Mlp(nn.Module): - """Multilayer perceptron.""" - - def __init__( - self, in_features, hidden_features=None, out_features=None, act_layer=nn.GELU, drop=0.0 - ): - super().__init__() - out_features = out_features or in_features - hidden_features = hidden_features or in_features - self.fc1 = nn.Linear(in_features, hidden_features) - self.act = act_layer() - self.fc2 = nn.Linear(hidden_features, out_features) - self.drop = nn.Dropout(drop) - - def forward(self, x): - x = self.fc1(x) - x = self.act(x) - x = self.drop(x) - x = self.fc2(x) - x = self.drop(x) - return x - - -def window_partition(x, window_size): - """ - Args: - x: (B, H, W, C) - window_size (int): window size - Returns: - windows: (num_windows*B, window_size, window_size, C) - """ - B, H, W, C = x.shape - x = x.view(B, H // window_size, window_size, W // window_size, window_size, C) - windows = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(-1, window_size, window_size, C) - return windows - - -def window_reverse(windows, window_size, H, W): - """ - Args: - windows: (num_windows*B, window_size, window_size, C) - window_size (int): Window size - H (int): Height of image - W (int): Width of image - Returns: - x: (B, H, W, C) - """ - B = int(windows.shape[0] / (H * W / window_size / window_size)) - x = windows.view(B, H // window_size, W // window_size, window_size, window_size, -1) - x = x.permute(0, 1, 3, 2, 4, 5).contiguous().view(B, H, W, -1) - return x - - -class WindowAttention(nn.Module): - """Window based multi-head self attention (W-MSA) module with relative position bias. - It supports both of shifted and non-shifted window. - Args: - dim (int): Number of input channels. - window_size (tuple[int]): The height and width of the window. - num_heads (int): Number of attention heads. - qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True - qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set - attn_drop (float, optional): Dropout ratio of attention weight. Default: 0.0 - proj_drop (float, optional): Dropout ratio of output. Default: 0.0 - """ - - def __init__( - self, - dim, - window_size, - num_heads, - qkv_bias=True, - qk_scale=None, - attn_drop=0.0, - proj_drop=0.0, - ): - - super().__init__() - self.dim = dim - self.window_size = window_size # Wh, Ww - self.num_heads = num_heads - head_dim = dim // num_heads - self.scale = qk_scale or head_dim**-0.5 - - # define a parameter table of relative position bias - self.relative_position_bias_table = nn.Parameter( - torch.zeros((2 * window_size[0] - 1) * (2 * window_size[1] - 1), num_heads) - ) # 2*Wh-1 * 2*Ww-1, nH - - # get pair-wise relative position index for each token inside the window - coords_h = torch.arange(self.window_size[0]) - coords_w = torch.arange(self.window_size[1]) - coords = torch.stack(torch.meshgrid([coords_h, coords_w])) # 2, Wh, Ww - coords_flatten = torch.flatten(coords, 1) # 2, Wh*Ww - relative_coords = coords_flatten[:, :, None] - coords_flatten[:, None, :] # 2, Wh*Ww, Wh*Ww - relative_coords = relative_coords.permute(1, 2, 0).contiguous() # Wh*Ww, Wh*Ww, 2 - relative_coords[:, :, 0] += self.window_size[0] - 1 # shift to start from 0 - relative_coords[:, :, 1] += self.window_size[1] - 1 - relative_coords[:, :, 0] *= 2 * self.window_size[1] - 1 - relative_position_index = relative_coords.sum(-1) # Wh*Ww, Wh*Ww - self.register_buffer("relative_position_index", relative_position_index) - - self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias) - self.attn_drop = nn.Dropout(attn_drop) - self.proj = nn.Linear(dim, dim) - self.proj_drop = nn.Dropout(proj_drop) - - trunc_normal_(self.relative_position_bias_table, std=0.02) - self.softmax = nn.Softmax(dim=-1) - - def forward(self, x, mask=None): - """Forward function. - Args: - x: input features with shape of (num_windows*B, N, C) - mask: (0/-inf) mask with shape of (num_windows, Wh*Ww, Wh*Ww) or None - """ - B_, N, C = x.shape - qkv = ( - self.qkv(x) - .reshape(B_, N, 3, self.num_heads, C // self.num_heads) - .permute(2, 0, 3, 1, 4) - ) - q, k, v = qkv[0], qkv[1], qkv[2] # make torchscript happy (cannot use tensor as tuple) - - q = q * self.scale - attn = q @ k.transpose(-2, -1) - - relative_position_bias = self.relative_position_bias_table[ - self.relative_position_index.view(-1) - ].view( - self.window_size[0] * self.window_size[1], self.window_size[0] * self.window_size[1], -1 - ) # Wh*Ww,Wh*Ww,nH - relative_position_bias = relative_position_bias.permute( - 2, 0, 1 - ).contiguous() # nH, Wh*Ww, Wh*Ww - attn = attn + relative_position_bias.unsqueeze(0) - - if mask is not None: - nW = mask.shape[0] - attn = attn.view(B_ // nW, nW, self.num_heads, N, N) + mask.unsqueeze(1).unsqueeze(0) - attn = attn.view(-1, self.num_heads, N, N) - attn = self.softmax(attn) - else: - attn = self.softmax(attn) - - attn = self.attn_drop(attn) - - x = (attn @ v).transpose(1, 2).reshape(B_, N, C) - x = self.proj(x) - x = self.proj_drop(x) - return x - - -class SwinTransformerBlock(nn.Module): - """Swin Transformer Block. - Args: - dim (int): Number of input channels. - num_heads (int): Number of attention heads. - window_size (int): Window size. - shift_size (int): Shift size for SW-MSA. - mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. - qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True - qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set. - drop (float, optional): Dropout rate. Default: 0.0 - attn_drop (float, optional): Attention dropout rate. Default: 0.0 - drop_path (float, optional): Stochastic depth rate. Default: 0.0 - act_layer (nn.Module, optional): Activation layer. Default: nn.GELU - norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm - """ - - def __init__( - self, - dim, - num_heads, - window_size=7, - shift_size=0, - mlp_ratio=4.0, - qkv_bias=True, - qk_scale=None, - drop=0.0, - attn_drop=0.0, - drop_path=0.0, - act_layer=nn.GELU, - norm_layer=nn.LayerNorm, - ): - super().__init__() - self.dim = dim - self.num_heads = num_heads - self.window_size = window_size - self.shift_size = shift_size - self.mlp_ratio = mlp_ratio - assert 0 <= self.shift_size < self.window_size, "shift_size must in 0-window_size" - - self.norm1 = norm_layer(dim) - self.attn = WindowAttention( - dim, - window_size=to_2tuple(self.window_size), - num_heads=num_heads, - qkv_bias=qkv_bias, - qk_scale=qk_scale, - attn_drop=attn_drop, - proj_drop=drop, - ) - - self.drop_path = DropPath(drop_path) if drop_path > 0.0 else nn.Identity() - self.norm2 = norm_layer(dim) - mlp_hidden_dim = int(dim * mlp_ratio) - self.mlp = Mlp( - in_features=dim, hidden_features=mlp_hidden_dim, act_layer=act_layer, drop=drop - ) - - self.H = None - self.W = None - - def forward(self, x, mask_matrix): - """Forward function. - Args: - x: Input feature, tensor size (B, H*W, C). - H, W: Spatial resolution of the input feature. - mask_matrix: Attention mask for cyclic shift. - """ - B, L, C = x.shape - H, W = self.H, self.W - assert L == H * W, "input feature has wrong size" - - shortcut = x - x = self.norm1(x) - x = x.view(B, H, W, C) - - # pad feature maps to multiples of window size - pad_l = pad_t = 0 - pad_r = (self.window_size - W % self.window_size) % self.window_size - pad_b = (self.window_size - H % self.window_size) % self.window_size - x = F.pad(x, (0, 0, pad_l, pad_r, pad_t, pad_b)) - _, Hp, Wp, _ = x.shape - - # cyclic shift - if self.shift_size > 0: - shifted_x = torch.roll(x, shifts=(-self.shift_size, -self.shift_size), dims=(1, 2)) - attn_mask = mask_matrix - else: - shifted_x = x - attn_mask = None - - # partition windows - x_windows = window_partition( - shifted_x, self.window_size - ) # nW*B, window_size, window_size, C - x_windows = x_windows.view( - -1, self.window_size * self.window_size, C - ) # nW*B, window_size*window_size, C - - # W-MSA/SW-MSA - attn_windows = self.attn(x_windows, mask=attn_mask) # nW*B, window_size*window_size, C - - # merge windows - attn_windows = attn_windows.view(-1, self.window_size, self.window_size, C) - shifted_x = window_reverse(attn_windows, self.window_size, Hp, Wp) # B H' W' C - - # reverse cyclic shift - if self.shift_size > 0: - x = torch.roll(shifted_x, shifts=(self.shift_size, self.shift_size), dims=(1, 2)) - else: - x = shifted_x - - if pad_r > 0 or pad_b > 0: - x = x[:, :H, :W, :].contiguous() - - x = x.view(B, H * W, C) - - # FFN - x = shortcut + self.drop_path(x) - x = x + self.drop_path(self.mlp(self.norm2(x))) - - return x - - -class PatchMerging(nn.Module): - """Patch Merging Layer - Args: - dim (int): Number of input channels. - norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm - """ - - def __init__(self, dim, norm_layer=nn.LayerNorm): - super().__init__() - self.dim = dim - self.reduction = nn.Linear(4 * dim, 2 * dim, bias=False) - self.norm = norm_layer(4 * dim) - - def forward(self, x, H, W): - """Forward function. - Args: - x: Input feature, tensor size (B, H*W, C). - H, W: Spatial resolution of the input feature. - """ - B, L, C = x.shape - assert L == H * W, "input feature has wrong size" - - x = x.view(B, H, W, C) - - # padding - pad_input = (H % 2 == 1) or (W % 2 == 1) - if pad_input: - x = F.pad(x, (0, 0, 0, W % 2, 0, H % 2)) - - x0 = x[:, 0::2, 0::2, :] # B H/2 W/2 C - x1 = x[:, 1::2, 0::2, :] # B H/2 W/2 C - x2 = x[:, 0::2, 1::2, :] # B H/2 W/2 C - x3 = x[:, 1::2, 1::2, :] # B H/2 W/2 C - x = torch.cat([x0, x1, x2, x3], -1) # B H/2 W/2 4*C - x = x.view(B, -1, 4 * C) # B H/2*W/2 4*C - - x = self.norm(x) - x = self.reduction(x) - - return x - - -class BasicLayer(nn.Module): - """A basic Swin Transformer layer for one stage. - Args: - dim (int): Number of feature channels - depth (int): Depths of this stage. - num_heads (int): Number of attention head. - window_size (int): Local window size. Default: 7. - mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4. - qkv_bias (bool, optional): If True, add a learnable bias to query, key, value. Default: True - qk_scale (float | None, optional): Override default qk scale of head_dim ** -0.5 if set. - drop (float, optional): Dropout rate. Default: 0.0 - attn_drop (float, optional): Attention dropout rate. Default: 0.0 - drop_path (float | tuple[float], optional): Stochastic depth rate. Default: 0.0 - norm_layer (nn.Module, optional): Normalization layer. Default: nn.LayerNorm - downsample (nn.Module | None, optional): Downsample layer at the end of the layer. Default: None - use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False. - """ - - def __init__( - self, - dim, - depth, - num_heads, - window_size=7, - mlp_ratio=4.0, - qkv_bias=True, - qk_scale=None, - drop=0.0, - attn_drop=0.0, - drop_path=0.0, - norm_layer=nn.LayerNorm, - downsample=None, - use_checkpoint=False, - ): - super().__init__() - self.window_size = window_size - self.shift_size = window_size // 2 - self.depth = depth - self.use_checkpoint = use_checkpoint - - # build blocks - self.blocks = nn.ModuleList( - [ - SwinTransformerBlock( - dim=dim, - num_heads=num_heads, - window_size=window_size, - shift_size=0 if (i % 2 == 0) else window_size // 2, - mlp_ratio=mlp_ratio, - qkv_bias=qkv_bias, - qk_scale=qk_scale, - drop=drop, - attn_drop=attn_drop, - drop_path=drop_path[i] if isinstance(drop_path, list) else drop_path, - norm_layer=norm_layer, - ) - for i in range(depth) - ] - ) - - # patch merging layer - if downsample is not None: - self.downsample = downsample(dim=dim, norm_layer=norm_layer) - else: - self.downsample = None - - def forward(self, x, H, W): - """Forward function. - Args: - x: Input feature, tensor size (B, H*W, C). - H, W: Spatial resolution of the input feature. - """ - - # calculate attention mask for SW-MSA - Hp = int(np.ceil(H / self.window_size)) * self.window_size - Wp = int(np.ceil(W / self.window_size)) * self.window_size - img_mask = torch.zeros((1, Hp, Wp, 1), device=x.device) # 1 Hp Wp 1 - h_slices = ( - slice(0, -self.window_size), - slice(-self.window_size, -self.shift_size), - slice(-self.shift_size, None), - ) - w_slices = ( - slice(0, -self.window_size), - slice(-self.window_size, -self.shift_size), - slice(-self.shift_size, None), - ) - cnt = 0 - for h in h_slices: - for w in w_slices: - img_mask[:, h, w, :] = cnt - cnt += 1 - - mask_windows = window_partition( - img_mask, self.window_size - ) # nW, window_size, window_size, 1 - mask_windows = mask_windows.view(-1, self.window_size * self.window_size) - attn_mask = mask_windows.unsqueeze(1) - mask_windows.unsqueeze(2) - attn_mask = attn_mask.masked_fill(attn_mask != 0, float(-100.0)).masked_fill( - attn_mask == 0, float(0.0) - ) - - for blk in self.blocks: - blk.H, blk.W = H, W - if self.use_checkpoint: - x = checkpoint.checkpoint(blk, x, attn_mask) - else: - x = blk(x, attn_mask) - if self.downsample is not None: - x_down = self.downsample(x, H, W) - Wh, Ww = (H + 1) // 2, (W + 1) // 2 - return x, H, W, x_down, Wh, Ww - else: - return x, H, W, x, H, W - - -class PatchEmbed(nn.Module): - """Image to Patch Embedding - Args: - patch_size (int): Patch token size. Default: 4. - in_chans (int): Number of input image channels. Default: 3. - embed_dim (int): Number of linear projection output channels. Default: 96. - norm_layer (nn.Module, optional): Normalization layer. Default: None - """ - - def __init__(self, patch_size=4, in_chans=3, embed_dim=96, norm_layer=None): - super().__init__() - patch_size = to_2tuple(patch_size) - self.patch_size = patch_size - - self.in_chans = in_chans - self.embed_dim = embed_dim - - self.proj = nn.Conv2d(in_chans, embed_dim, kernel_size=patch_size, stride=patch_size) - if norm_layer is not None: - self.norm = norm_layer(embed_dim) - else: - self.norm = None - - def forward(self, x): - """Forward function.""" - # padding - _, _, H, W = x.size() - if W % self.patch_size[1] != 0: - x = F.pad(x, (0, self.patch_size[1] - W % self.patch_size[1])) - if H % self.patch_size[0] != 0: - x = F.pad(x, (0, 0, 0, self.patch_size[0] - H % self.patch_size[0])) - - x = self.proj(x) # B C Wh Ww - if self.norm is not None: - Wh, Ww = x.size(2), x.size(3) - x = x.flatten(2).transpose(1, 2) - x = self.norm(x) - x = x.transpose(1, 2).view(-1, self.embed_dim, Wh, Ww) - - return x - - -class SwinTransformer(nn.Module): - """Swin Transformer backbone. - A PyTorch impl of : `Swin Transformer: Hierarchical Vision Transformer using Shifted Windows` - - https://arxiv.org/pdf/2103.14030 - Args: - pretrain_img_size (int): Input image size for training the pretrained model, - used in absolute postion embedding. Default 224. - patch_size (int | tuple(int)): Patch size. Default: 4. - in_chans (int): Number of input image channels. Default: 3. - embed_dim (int): Number of linear projection output channels. Default: 96. - depths (tuple[int]): Depths of each Swin Transformer stage. - num_heads (tuple[int]): Number of attention head of each stage. - window_size (int): Window size. Default: 7. - mlp_ratio (float): Ratio of mlp hidden dim to embedding dim. Default: 4. - qkv_bias (bool): If True, add a learnable bias to query, key, value. Default: True - qk_scale (float): Override default qk scale of head_dim ** -0.5 if set. - drop_rate (float): Dropout rate. - attn_drop_rate (float): Attention dropout rate. Default: 0. - drop_path_rate (float): Stochastic depth rate. Default: 0.2. - norm_layer (nn.Module): Normalization layer. Default: nn.LayerNorm. - ape (bool): If True, add absolute position embedding to the patch embedding. Default: False. - patch_norm (bool): If True, add normalization after patch embedding. Default: True. - out_indices (Sequence[int]): Output from which stages. - frozen_stages (int): Stages to be frozen (stop grad and set eval mode). - -1 means not freezing any parameters. - use_checkpoint (bool): Whether to use checkpointing to save memory. Default: False. - dilation (bool): if True, the output size if 16x downsample, ow 32x downsample. - """ - - def __init__( - self, - pretrain_img_size=224, - patch_size=4, - in_chans=3, - embed_dim=96, - depths=[2, 2, 6, 2], - num_heads=[3, 6, 12, 24], - window_size=7, - mlp_ratio=4.0, - qkv_bias=True, - qk_scale=None, - drop_rate=0.0, - attn_drop_rate=0.0, - drop_path_rate=0.2, - norm_layer=nn.LayerNorm, - ape=False, - patch_norm=True, - out_indices=(0, 1, 2, 3), - frozen_stages=-1, - dilation=False, - use_checkpoint=False, - ): - super().__init__() - - self.pretrain_img_size = pretrain_img_size - self.num_layers = len(depths) - self.embed_dim = embed_dim - self.ape = ape - self.patch_norm = patch_norm - self.out_indices = out_indices - self.frozen_stages = frozen_stages - self.dilation = dilation - - # if use_checkpoint: - # print("use_checkpoint!!!!!!!!!!!!!!!!!!!!!!!!") - - # split image into non-overlapping patches - self.patch_embed = PatchEmbed( - patch_size=patch_size, - in_chans=in_chans, - embed_dim=embed_dim, - norm_layer=norm_layer if self.patch_norm else None, - ) - - # absolute position embedding - if self.ape: - pretrain_img_size = to_2tuple(pretrain_img_size) - patch_size = to_2tuple(patch_size) - patches_resolution = [ - pretrain_img_size[0] // patch_size[0], - pretrain_img_size[1] // patch_size[1], - ] - - self.absolute_pos_embed = nn.Parameter( - torch.zeros(1, embed_dim, patches_resolution[0], patches_resolution[1]) - ) - trunc_normal_(self.absolute_pos_embed, std=0.02) - - self.pos_drop = nn.Dropout(p=drop_rate) - - # stochastic depth - dpr = [ - x.item() for x in torch.linspace(0, drop_path_rate, sum(depths)) - ] # stochastic depth decay rule - - # build layers - self.layers = nn.ModuleList() - # prepare downsample list - downsamplelist = [PatchMerging for i in range(self.num_layers)] - downsamplelist[-1] = None - num_features = [int(embed_dim * 2**i) for i in range(self.num_layers)] - if self.dilation: - downsamplelist[-2] = None - num_features[-1] = int(embed_dim * 2 ** (self.num_layers - 1)) // 2 - for i_layer in range(self.num_layers): - layer = BasicLayer( - # dim=int(embed_dim * 2 ** i_layer), - dim=num_features[i_layer], - depth=depths[i_layer], - num_heads=num_heads[i_layer], - window_size=window_size, - mlp_ratio=mlp_ratio, - qkv_bias=qkv_bias, - qk_scale=qk_scale, - drop=drop_rate, - attn_drop=attn_drop_rate, - drop_path=dpr[sum(depths[:i_layer]) : sum(depths[: i_layer + 1])], - norm_layer=norm_layer, - # downsample=PatchMerging if (i_layer < self.num_layers - 1) else None, - downsample=downsamplelist[i_layer], - use_checkpoint=use_checkpoint, - ) - self.layers.append(layer) - - # num_features = [int(embed_dim * 2 ** i) for i in range(self.num_layers)] - self.num_features = num_features - - # add a norm layer for each output - for i_layer in out_indices: - layer = norm_layer(num_features[i_layer]) - layer_name = f"norm{i_layer}" - self.add_module(layer_name, layer) - - self._freeze_stages() - - def _freeze_stages(self): - if self.frozen_stages >= 0: - self.patch_embed.eval() - for param in self.patch_embed.parameters(): - param.requires_grad = False - - if self.frozen_stages >= 1 and self.ape: - self.absolute_pos_embed.requires_grad = False - - if self.frozen_stages >= 2: - self.pos_drop.eval() - for i in range(0, self.frozen_stages - 1): - m = self.layers[i] - m.eval() - for param in m.parameters(): - param.requires_grad = False - - # def init_weights(self, pretrained=None): - # """Initialize the weights in backbone. - # Args: - # pretrained (str, optional): Path to pre-trained weights. - # Defaults to None. - # """ - - # def _init_weights(m): - # if isinstance(m, nn.Linear): - # trunc_normal_(m.weight, std=.02) - # if isinstance(m, nn.Linear) and m.bias is not None: - # nn.init.constant_(m.bias, 0) - # elif isinstance(m, nn.LayerNorm): - # nn.init.constant_(m.bias, 0) - # nn.init.constant_(m.weight, 1.0) - - # if isinstance(pretrained, str): - # self.apply(_init_weights) - # logger = get_root_logger() - # load_checkpoint(self, pretrained, strict=False, logger=logger) - # elif pretrained is None: - # self.apply(_init_weights) - # else: - # raise TypeError('pretrained must be a str or None') - - def forward_raw(self, x): - """Forward function.""" - x = self.patch_embed(x) - - Wh, Ww = x.size(2), x.size(3) - if self.ape: - # interpolate the position embedding to the corresponding size - absolute_pos_embed = F.interpolate( - self.absolute_pos_embed, size=(Wh, Ww), mode="bicubic" - ) - x = (x + absolute_pos_embed).flatten(2).transpose(1, 2) # B Wh*Ww C - else: - x = x.flatten(2).transpose(1, 2) - x = self.pos_drop(x) - - outs = [] - for i in range(self.num_layers): - layer = self.layers[i] - x_out, H, W, x, Wh, Ww = layer(x, Wh, Ww) - # import ipdb; ipdb.set_trace() - - if i in self.out_indices: - norm_layer = getattr(self, f"norm{i}") - x_out = norm_layer(x_out) - - out = x_out.view(-1, H, W, self.num_features[i]).permute(0, 3, 1, 2).contiguous() - outs.append(out) - # in: - # torch.Size([2, 3, 1024, 1024]) - # outs: - # [torch.Size([2, 192, 256, 256]), torch.Size([2, 384, 128, 128]), \ - # torch.Size([2, 768, 64, 64]), torch.Size([2, 1536, 32, 32])] - return tuple(outs) - - def forward(self, tensor_list: NestedTensor): - x = tensor_list.tensors - - """Forward function.""" - x = self.patch_embed(x) - - Wh, Ww = x.size(2), x.size(3) - if self.ape: - # interpolate the position embedding to the corresponding size - absolute_pos_embed = F.interpolate( - self.absolute_pos_embed, size=(Wh, Ww), mode="bicubic" - ) - x = (x + absolute_pos_embed).flatten(2).transpose(1, 2) # B Wh*Ww C - else: - x = x.flatten(2).transpose(1, 2) - x = self.pos_drop(x) - - outs = [] - for i in range(self.num_layers): - layer = self.layers[i] - x_out, H, W, x, Wh, Ww = layer(x, Wh, Ww) - - if i in self.out_indices: - norm_layer = getattr(self, f"norm{i}") - x_out = norm_layer(x_out) - - out = x_out.view(-1, H, W, self.num_features[i]).permute(0, 3, 1, 2).contiguous() - outs.append(out) - # in: - # torch.Size([2, 3, 1024, 1024]) - # out: - # [torch.Size([2, 192, 256, 256]), torch.Size([2, 384, 128, 128]), \ - # torch.Size([2, 768, 64, 64]), torch.Size([2, 1536, 32, 32])] - - # collect for nesttensors - outs_dict = {} - for idx, out_i in enumerate(outs): - m = tensor_list.mask - assert m is not None - mask = F.interpolate(m[None].float(), size=out_i.shape[-2:]).to(torch.bool)[0] - outs_dict[idx] = NestedTensor(out_i, mask) - - return outs_dict - - def train(self, mode=True): - """Convert the model into training mode while keep layers freezed.""" - super(SwinTransformer, self).train(mode) - self._freeze_stages() - - -def build_swin_transformer(modelname, pretrain_img_size, **kw): - assert modelname in [ - "swin_T_224_1k", - "swin_B_224_22k", - "swin_B_384_22k", - "swin_L_224_22k", - "swin_L_384_22k", - ] - - model_para_dict = { - "swin_T_224_1k": dict( - embed_dim=96, depths=[2, 2, 6, 2], num_heads=[3, 6, 12, 24], window_size=7 - ), - "swin_B_224_22k": dict( - embed_dim=128, depths=[2, 2, 18, 2], num_heads=[4, 8, 16, 32], window_size=7 - ), - "swin_B_384_22k": dict( - embed_dim=128, depths=[2, 2, 18, 2], num_heads=[4, 8, 16, 32], window_size=12 - ), - "swin_L_224_22k": dict( - embed_dim=192, depths=[2, 2, 18, 2], num_heads=[6, 12, 24, 48], window_size=7 - ), - "swin_L_384_22k": dict( - embed_dim=192, depths=[2, 2, 18, 2], num_heads=[6, 12, 24, 48], window_size=12 - ), - } - kw_cgf = model_para_dict[modelname] - kw_cgf.update(kw) - model = SwinTransformer(pretrain_img_size=pretrain_img_size, **kw_cgf) - return model - - -if __name__ == "__main__": - model = build_swin_transformer("swin_L_384_22k", 384, dilation=True) - x = torch.rand(2, 3, 1024, 1024) - y = model.forward_raw(x) - import ipdb - - ipdb.set_trace() - x = torch.rand(2, 3, 384, 384) - y = model.forward_raw(x) diff --git a/spaces/xdecoder/Instruct-X-Decoder/utils/visualizer.py b/spaces/xdecoder/Instruct-X-Decoder/utils/visualizer.py deleted file mode 100644 index afdc2e2ff69f0b36b51c75c41d1893e8d9fb582e..0000000000000000000000000000000000000000 --- a/spaces/xdecoder/Instruct-X-Decoder/utils/visualizer.py +++ /dev/null @@ -1,1278 +0,0 @@ -# Copyright (c) Facebook, Inc. and its affiliates. -import colorsys -import logging -import math -import numpy as np -from enum import Enum, unique -import cv2 -import matplotlib as mpl -import matplotlib.colors as mplc -import matplotlib.figure as mplfigure -import pycocotools.mask as mask_util -import torch -from matplotlib.backends.backend_agg import FigureCanvasAgg -from PIL import Image - -from detectron2.data import MetadataCatalog -from detectron2.structures import BitMasks, Boxes, BoxMode, Keypoints, PolygonMasks, RotatedBoxes -from detectron2.utils.file_io import PathManager - -from detectron2.utils.colormap import random_color - -logger = logging.getLogger(__name__) -__all__ = ["ColorMode", "VisImage", "Visualizer"] - - -_SMALL_OBJECT_AREA_THRESH = 1000 -_LARGE_MASK_AREA_THRESH = 120000 -_OFF_WHITE = (1.0, 1.0, 240.0 / 255) -_BLACK = (0, 0, 0) -_RED = (1.0, 0, 0) - -_KEYPOINT_THRESHOLD = 0.05 - - -@unique -class ColorMode(Enum): - """ - Enum of different color modes to use for instance visualizations. - """ - - IMAGE = 0 - """ - Picks a random color for every instance and overlay segmentations with low opacity. - """ - SEGMENTATION = 1 - """ - Let instances of the same category have similar colors - (from metadata.thing_colors), and overlay them with - high opacity. This provides more attention on the quality of segmentation. - """ - IMAGE_BW = 2 - """ - Same as IMAGE, but convert all areas without masks to gray-scale. - Only available for drawing per-instance mask predictions. - """ - - -class GenericMask: - """ - Attribute: - polygons (list[ndarray]): list[ndarray]: polygons for this mask. - Each ndarray has format [x, y, x, y, ...] - mask (ndarray): a binary mask - """ - - def __init__(self, mask_or_polygons, height, width): - self._mask = self._polygons = self._has_holes = None - self.height = height - self.width = width - - m = mask_or_polygons - if isinstance(m, dict): - # RLEs - assert "counts" in m and "size" in m - if isinstance(m["counts"], list): # uncompressed RLEs - h, w = m["size"] - assert h == height and w == width - m = mask_util.frPyObjects(m, h, w) - self._mask = mask_util.decode(m)[:, :] - return - - if isinstance(m, list): # list[ndarray] - self._polygons = [np.asarray(x).reshape(-1) for x in m] - return - - if isinstance(m, np.ndarray): # assumed to be a binary mask - assert m.shape[1] != 2, m.shape - assert m.shape == ( - height, - width, - ), f"mask shape: {m.shape}, target dims: {height}, {width}" - self._mask = m.astype("uint8") - return - - raise ValueError("GenericMask cannot handle object {} of type '{}'".format(m, type(m))) - - @property - def mask(self): - if self._mask is None: - self._mask = self.polygons_to_mask(self._polygons) - return self._mask - - @property - def polygons(self): - if self._polygons is None: - self._polygons, self._has_holes = self.mask_to_polygons(self._mask) - return self._polygons - - @property - def has_holes(self): - if self._has_holes is None: - if self._mask is not None: - self._polygons, self._has_holes = self.mask_to_polygons(self._mask) - else: - self._has_holes = False # if original format is polygon, does not have holes - return self._has_holes - - def mask_to_polygons(self, mask): - # cv2.RETR_CCOMP flag retrieves all the contours and arranges them to a 2-level - # hierarchy. External contours (boundary) of the object are placed in hierarchy-1. - # Internal contours (holes) are placed in hierarchy-2. - # cv2.CHAIN_APPROX_NONE flag gets vertices of polygons from contours. - mask = np.ascontiguousarray(mask) # some versions of cv2 does not support incontiguous arr - res = cv2.findContours(mask.astype("uint8"), cv2.RETR_CCOMP, cv2.CHAIN_APPROX_NONE) - hierarchy = res[-1] - if hierarchy is None: # empty mask - return [], False - has_holes = (hierarchy.reshape(-1, 4)[:, 3] >= 0).sum() > 0 - res = res[-2] - res = [x.flatten() for x in res] - # These coordinates from OpenCV are integers in range [0, W-1 or H-1]. - # We add 0.5 to turn them into real-value coordinate space. A better solution - # would be to first +0.5 and then dilate the returned polygon by 0.5. - res = [x + 0.5 for x in res if len(x) >= 6] - return res, has_holes - - def polygons_to_mask(self, polygons): - rle = mask_util.frPyObjects(polygons, self.height, self.width) - rle = mask_util.merge(rle) - return mask_util.decode(rle)[:, :] - - def area(self): - return self.mask.sum() - - def bbox(self): - p = mask_util.frPyObjects(self.polygons, self.height, self.width) - p = mask_util.merge(p) - bbox = mask_util.toBbox(p) - bbox[2] += bbox[0] - bbox[3] += bbox[1] - return bbox - - -class _PanopticPrediction: - """ - Unify different panoptic annotation/prediction formats - """ - - def __init__(self, panoptic_seg, segments_info, metadata=None): - if segments_info is None: - assert metadata is not None - # If "segments_info" is None, we assume "panoptic_img" is a - # H*W int32 image storing the panoptic_id in the format of - # category_id * label_divisor + instance_id. We reserve -1 for - # VOID label. - label_divisor = metadata.label_divisor - segments_info = [] - for panoptic_label in np.unique(panoptic_seg.numpy()): - if panoptic_label == -1: - # VOID region. - continue - pred_class = panoptic_label // label_divisor - isthing = pred_class in metadata.thing_dataset_id_to_contiguous_id.values() - segments_info.append( - { - "id": int(panoptic_label), - "category_id": int(pred_class), - "isthing": bool(isthing), - } - ) - del metadata - - self._seg = panoptic_seg - - self._sinfo = {s["id"]: s for s in segments_info} # seg id -> seg info - segment_ids, areas = torch.unique(panoptic_seg, sorted=True, return_counts=True) - areas = areas.numpy() - sorted_idxs = np.argsort(-areas) - self._seg_ids, self._seg_areas = segment_ids[sorted_idxs], areas[sorted_idxs] - self._seg_ids = self._seg_ids.tolist() - for sid, area in zip(self._seg_ids, self._seg_areas): - if sid in self._sinfo: - self._sinfo[sid]["area"] = float(area) - - def non_empty_mask(self): - """ - Returns: - (H, W) array, a mask for all pixels that have a prediction - """ - empty_ids = [] - for id in self._seg_ids: - if id not in self._sinfo: - empty_ids.append(id) - if len(empty_ids) == 0: - return np.zeros(self._seg.shape, dtype=np.uint8) - assert ( - len(empty_ids) == 1 - ), ">1 ids corresponds to no labels. This is currently not supported" - return (self._seg != empty_ids[0]).numpy().astype(np.bool) - - def semantic_masks(self): - for sid in self._seg_ids: - sinfo = self._sinfo.get(sid) - if sinfo is None or sinfo["isthing"]: - # Some pixels (e.g. id 0 in PanopticFPN) have no instance or semantic predictions. - continue - yield (self._seg == sid).numpy().astype(np.bool), sinfo - - def instance_masks(self): - for sid in self._seg_ids: - sinfo = self._sinfo.get(sid) - if sinfo is None or not sinfo["isthing"]: - continue - mask = (self._seg == sid).numpy().astype(np.bool) - if mask.sum() > 0: - yield mask, sinfo - - -def _create_text_labels(classes, scores, class_names, is_crowd=None): - """ - Args: - classes (list[int] or None): - scores (list[float] or None): - class_names (list[str] or None): - is_crowd (list[bool] or None): - - Returns: - list[str] or None - """ - labels = None - if classes is not None: - if class_names is not None and len(class_names) > 0: - labels = [class_names[i] for i in classes] - else: - labels = [str(i) for i in classes] - if scores is not None: - if labels is None: - labels = ["{:.0f}%".format(s * 100) for s in scores] - else: - labels = ["{} {:.0f}%".format(l, s * 100) for l, s in zip(labels, scores)] - if labels is not None and is_crowd is not None: - labels = [l + ("|crowd" if crowd else "") for l, crowd in zip(labels, is_crowd)] - return labels - - -class VisImage: - def __init__(self, img, scale=1.0): - """ - Args: - img (ndarray): an RGB image of shape (H, W, 3) in range [0, 255]. - scale (float): scale the input image - """ - self.img = img - self.scale = scale - self.width, self.height = img.shape[1], img.shape[0] - self._setup_figure(img) - - def _setup_figure(self, img): - """ - Args: - Same as in :meth:`__init__()`. - - Returns: - fig (matplotlib.pyplot.figure): top level container for all the image plot elements. - ax (matplotlib.pyplot.Axes): contains figure elements and sets the coordinate system. - """ - fig = mplfigure.Figure(frameon=False) - self.dpi = fig.get_dpi() - # add a small 1e-2 to avoid precision lost due to matplotlib's truncation - # (https://github.com/matplotlib/matplotlib/issues/15363) - fig.set_size_inches( - (self.width * self.scale + 1e-2) / self.dpi, - (self.height * self.scale + 1e-2) / self.dpi, - ) - self.canvas = FigureCanvasAgg(fig) - # self.canvas = mpl.backends.backend_cairo.FigureCanvasCairo(fig) - ax = fig.add_axes([0.0, 0.0, 1.0, 1.0]) - ax.axis("off") - self.fig = fig - self.ax = ax - self.reset_image(img) - - def reset_image(self, img): - """ - Args: - img: same as in __init__ - """ - img = img.astype("uint8") - self.ax.imshow(img, extent=(0, self.width, self.height, 0), interpolation="nearest") - - def save(self, filepath): - """ - Args: - filepath (str): a string that contains the absolute path, including the file name, where - the visualized image will be saved. - """ - self.fig.savefig(filepath) - - def get_image(self): - """ - Returns: - ndarray: - the visualized image of shape (H, W, 3) (RGB) in uint8 type. - The shape is scaled w.r.t the input image using the given `scale` argument. - """ - canvas = self.canvas - s, (width, height) = canvas.print_to_buffer() - # buf = io.BytesIO() # works for cairo backend - # canvas.print_rgba(buf) - # width, height = self.width, self.height - # s = buf.getvalue() - - buffer = np.frombuffer(s, dtype="uint8") - - img_rgba = buffer.reshape(height, width, 4) - rgb, alpha = np.split(img_rgba, [3], axis=2) - return rgb.astype("uint8") - - -class Visualizer: - """ - Visualizer that draws data about detection/segmentation on images. - - It contains methods like `draw_{text,box,circle,line,binary_mask,polygon}` - that draw primitive objects to images, as well as high-level wrappers like - `draw_{instance_predictions,sem_seg,panoptic_seg_predictions,dataset_dict}` - that draw composite data in some pre-defined style. - - Note that the exact visualization style for the high-level wrappers are subject to change. - Style such as color, opacity, label contents, visibility of labels, or even the visibility - of objects themselves (e.g. when the object is too small) may change according - to different heuristics, as long as the results still look visually reasonable. - - To obtain a consistent style, you can implement custom drawing functions with the - abovementioned primitive methods instead. If you need more customized visualization - styles, you can process the data yourself following their format documented in - tutorials (:doc:`/tutorials/models`, :doc:`/tutorials/datasets`). This class does not - intend to satisfy everyone's preference on drawing styles. - - This visualizer focuses on high rendering quality rather than performance. It is not - designed to be used for real-time applications. - """ - - # TODO implement a fast, rasterized version using OpenCV - - def __init__(self, img_rgb, metadata=None, scale=1.0, instance_mode=ColorMode.IMAGE): - """ - Args: - img_rgb: a numpy array of shape (H, W, C), where H and W correspond to - the height and width of the image respectively. C is the number of - color channels. The image is required to be in RGB format since that - is a requirement of the Matplotlib library. The image is also expected - to be in the range [0, 255]. - metadata (Metadata): dataset metadata (e.g. class names and colors) - instance_mode (ColorMode): defines one of the pre-defined style for drawing - instances on an image. - """ - self.img = np.asarray(img_rgb).clip(0, 255).astype(np.uint8) - if metadata is None: - metadata = MetadataCatalog.get("__nonexist__") - self.metadata = metadata - self.output = VisImage(self.img, scale=scale) - self.cpu_device = torch.device("cpu") - - # too small texts are useless, therefore clamp to 9 - self._default_font_size = max( - np.sqrt(self.output.height * self.output.width) // 90, 10 // scale - ) - self._default_font_size = 18 - self._instance_mode = instance_mode - self.keypoint_threshold = _KEYPOINT_THRESHOLD - - def draw_instance_predictions(self, predictions): - """ - Draw instance-level prediction results on an image. - - Args: - predictions (Instances): the output of an instance detection/segmentation - model. Following fields will be used to draw: - "pred_boxes", "pred_classes", "scores", "pred_masks" (or "pred_masks_rle"). - - Returns: - output (VisImage): image object with visualizations. - """ - boxes = predictions.pred_boxes if predictions.has("pred_boxes") else None - scores = predictions.scores if predictions.has("scores") else None - classes = predictions.pred_classes.tolist() if predictions.has("pred_classes") else None - labels = _create_text_labels(classes, scores, self.metadata.get("thing_classes", None)) - keypoints = predictions.pred_keypoints if predictions.has("pred_keypoints") else None - - keep = (scores > 0.8).cpu() - boxes = boxes[keep] - scores = scores[keep] - classes = np.array(classes) - classes = classes[np.array(keep)] - labels = np.array(labels) - labels = labels[np.array(keep)] - - if predictions.has("pred_masks"): - masks = np.asarray(predictions.pred_masks) - masks = masks[np.array(keep)] - masks = [GenericMask(x, self.output.height, self.output.width) for x in masks] - else: - masks = None - - if self._instance_mode == ColorMode.SEGMENTATION and self.metadata.get("thing_colors"): - # if self.metadata.get("thing_colors"): - colors = [ - self._jitter([x / 255 for x in self.metadata.thing_colors[c]]) for c in classes - ] - alpha = 0.4 - else: - colors = None - alpha = 0.4 - - if self._instance_mode == ColorMode.IMAGE_BW: - self.output.reset_image( - self._create_grayscale_image( - (predictions.pred_masks.any(dim=0) > 0).numpy() - if predictions.has("pred_masks") - else None - ) - ) - alpha = 0.3 - - self.overlay_instances( - masks=masks, - boxes=boxes, - labels=labels, - keypoints=keypoints, - assigned_colors=colors, - alpha=alpha, - ) - return self.output - - def draw_sem_seg(self, sem_seg, area_threshold=None, alpha=0.7): - """ - Draw semantic segmentation predictions/labels. - - Args: - sem_seg (Tensor or ndarray): the segmentation of shape (H, W). - Each value is the integer label of the pixel. - area_threshold (int): segments with less than `area_threshold` are not drawn. - alpha (float): the larger it is, the more opaque the segmentations are. - - Returns: - output (VisImage): image object with visualizations. - """ - if isinstance(sem_seg, torch.Tensor): - sem_seg = sem_seg.numpy() - labels, areas = np.unique(sem_seg, return_counts=True) - sorted_idxs = np.argsort(-areas).tolist() - labels = labels[sorted_idxs] - for label in filter(lambda l: l < len(self.metadata.stuff_classes), labels): - try: - mask_color = [x / 255 for x in self.metadata.stuff_colors[label]] - except (AttributeError, IndexError): - mask_color = None - - binary_mask = (sem_seg == label).astype(np.uint8) - text = self.metadata.stuff_classes[label] - self.draw_binary_mask( - binary_mask, - color=mask_color, - edge_color=_OFF_WHITE, - text=text, - alpha=alpha, - area_threshold=area_threshold, - ) - return self.output - - def draw_panoptic_seg(self, panoptic_seg, segments_info, area_threshold=None, alpha=0.7): - """ - Draw panoptic prediction annotations or results. - - Args: - panoptic_seg (Tensor): of shape (height, width) where the values are ids for each - segment. - segments_info (list[dict] or None): Describe each segment in `panoptic_seg`. - If it is a ``list[dict]``, each dict contains keys "id", "category_id". - If None, category id of each pixel is computed by - ``pixel // metadata.label_divisor``. - area_threshold (int): stuff segments with less than `area_threshold` are not drawn. - - Returns: - output (VisImage): image object with visualizations. - """ - pred = _PanopticPrediction(panoptic_seg, segments_info, self.metadata) - - if self._instance_mode == ColorMode.IMAGE_BW: - self.output.reset_image(self._create_grayscale_image(pred.non_empty_mask())) - - # draw mask for all semantic segments first i.e. "stuff" - for mask, sinfo in pred.semantic_masks(): - category_idx = sinfo["category_id"] - try: - mask_color = [x / 255 for x in self.metadata.stuff_colors[category_idx]] - except AttributeError: - mask_color = None - - text = self.metadata.stuff_classes[category_idx] - self.draw_binary_mask( - mask, - color=mask_color, - edge_color=_OFF_WHITE, - text=text, - alpha=alpha, - area_threshold=area_threshold, - ) - - # draw mask for all instances second - all_instances = list(pred.instance_masks()) - if len(all_instances) == 0: - return self.output - masks, sinfo = list(zip(*all_instances)) - category_ids = [x["category_id"] for x in sinfo] - - try: - scores = [x["score"] for x in sinfo] - except KeyError: - scores = None - labels = _create_text_labels( - category_ids, scores, self.metadata.thing_classes, [x.get("iscrowd", 0) for x in sinfo] - ) - - try: - colors = [ - self._jitter([x / 255 for x in self.metadata.thing_colors[c]]) for c in category_ids - ] - except AttributeError: - colors = None - self.overlay_instances(masks=masks, labels=labels, assigned_colors=colors, alpha=alpha) - - return self.output - - draw_panoptic_seg_predictions = draw_panoptic_seg # backward compatibility - - def draw_dataset_dict(self, dic): - """ - Draw annotations/segmentaions in Detectron2 Dataset format. - - Args: - dic (dict): annotation/segmentation data of one image, in Detectron2 Dataset format. - - Returns: - output (VisImage): image object with visualizations. - """ - annos = dic.get("annotations", None) - if annos: - if "segmentation" in annos[0]: - masks = [x["segmentation"] for x in annos] - else: - masks = None - if "keypoints" in annos[0]: - keypts = [x["keypoints"] for x in annos] - keypts = np.array(keypts).reshape(len(annos), -1, 3) - else: - keypts = None - - boxes = [ - BoxMode.convert(x["bbox"], x["bbox_mode"], BoxMode.XYXY_ABS) - if len(x["bbox"]) == 4 - else x["bbox"] - for x in annos - ] - - colors = None - category_ids = [x["category_id"] for x in annos] - if self._instance_mode == ColorMode.SEGMENTATION and self.metadata.get("thing_colors"): - colors = [ - self._jitter([x / 255 for x in self.metadata.thing_colors[c]]) - for c in category_ids - ] - names = self.metadata.get("thing_classes", None) - labels = _create_text_labels( - category_ids, - scores=None, - class_names=names, - is_crowd=[x.get("iscrowd", 0) for x in annos], - ) - self.overlay_instances( - labels=labels, boxes=boxes, masks=masks, keypoints=keypts, assigned_colors=colors - ) - - sem_seg = dic.get("sem_seg", None) - if sem_seg is None and "sem_seg_file_name" in dic: - with PathManager.open(dic["sem_seg_file_name"], "rb") as f: - sem_seg = Image.open(f) - sem_seg = np.asarray(sem_seg, dtype="uint8") - if sem_seg is not None: - self.draw_sem_seg(sem_seg, area_threshold=0, alpha=0.4) - - pan_seg = dic.get("pan_seg", None) - if pan_seg is None and "pan_seg_file_name" in dic: - with PathManager.open(dic["pan_seg_file_name"], "rb") as f: - pan_seg = Image.open(f) - pan_seg = np.asarray(pan_seg) - from panopticapi.utils import rgb2id - - pan_seg = rgb2id(pan_seg) - if pan_seg is not None: - segments_info = dic["segments_info"] - pan_seg = torch.tensor(pan_seg) - self.draw_panoptic_seg(pan_seg, segments_info, area_threshold=0, alpha=0.7) - return self.output - - def overlay_instances( - self, - *, - boxes=None, - labels=None, - masks=None, - keypoints=None, - assigned_colors=None, - alpha=0.5, - ): - """ - Args: - boxes (Boxes, RotatedBoxes or ndarray): either a :class:`Boxes`, - or an Nx4 numpy array of XYXY_ABS format for the N objects in a single image, - or a :class:`RotatedBoxes`, - or an Nx5 numpy array of (x_center, y_center, width, height, angle_degrees) format - for the N objects in a single image, - labels (list[str]): the text to be displayed for each instance. - masks (masks-like object): Supported types are: - - * :class:`detectron2.structures.PolygonMasks`, - :class:`detectron2.structures.BitMasks`. - * list[list[ndarray]]: contains the segmentation masks for all objects in one image. - The first level of the list corresponds to individual instances. The second - level to all the polygon that compose the instance, and the third level - to the polygon coordinates. The third level should have the format of - [x0, y0, x1, y1, ..., xn, yn] (n >= 3). - * list[ndarray]: each ndarray is a binary mask of shape (H, W). - * list[dict]: each dict is a COCO-style RLE. - keypoints (Keypoint or array like): an array-like object of shape (N, K, 3), - where the N is the number of instances and K is the number of keypoints. - The last dimension corresponds to (x, y, visibility or score). - assigned_colors (list[matplotlib.colors]): a list of colors, where each color - corresponds to each mask or box in the image. Refer to 'matplotlib.colors' - for full list of formats that the colors are accepted in. - Returns: - output (VisImage): image object with visualizations. - """ - num_instances = 0 - if boxes is not None: - boxes = self._convert_boxes(boxes) - num_instances = len(boxes) - if masks is not None: - masks = self._convert_masks(masks) - if num_instances: - assert len(masks) == num_instances - else: - num_instances = len(masks) - if keypoints is not None: - if num_instances: - assert len(keypoints) == num_instances - else: - num_instances = len(keypoints) - keypoints = self._convert_keypoints(keypoints) - if labels is not None: - assert len(labels) == num_instances - if assigned_colors is None: - assigned_colors = [random_color(rgb=True, maximum=1) for _ in range(num_instances)] - if num_instances == 0: - return self.output - if boxes is not None and boxes.shape[1] == 5: - return self.overlay_rotated_instances( - boxes=boxes, labels=labels, assigned_colors=assigned_colors - ) - - # Display in largest to smallest order to reduce occlusion. - areas = None - if boxes is not None: - areas = np.prod(boxes[:, 2:] - boxes[:, :2], axis=1) - elif masks is not None: - areas = np.asarray([x.area() for x in masks]) - - if areas is not None: - sorted_idxs = np.argsort(-areas).tolist() - # Re-order overlapped instances in descending order. - boxes = boxes[sorted_idxs] if boxes is not None else None - labels = [labels[k] for k in sorted_idxs] if labels is not None else None - masks = [masks[idx] for idx in sorted_idxs] if masks is not None else None - assigned_colors = [assigned_colors[idx] for idx in sorted_idxs] - keypoints = keypoints[sorted_idxs] if keypoints is not None else None - - for i in range(num_instances): - color = assigned_colors[i] - if boxes is not None: - self.draw_box(boxes[i], edge_color=color) - - if masks is not None: - for segment in masks[i].polygons: - self.draw_polygon(segment.reshape(-1, 2), color, alpha=alpha) - - if labels is not None: - # first get a box - if boxes is not None: - x0, y0, x1, y1 = boxes[i] - text_pos = (x0, y0) # if drawing boxes, put text on the box corner. - horiz_align = "left" - elif masks is not None: - # skip small mask without polygon - if len(masks[i].polygons) == 0: - continue - - x0, y0, x1, y1 = masks[i].bbox() - - # draw text in the center (defined by median) when box is not drawn - # median is less sensitive to outliers. - text_pos = np.median(masks[i].mask.nonzero(), axis=1)[::-1] - horiz_align = "center" - else: - continue # drawing the box confidence for keypoints isn't very useful. - # for small objects, draw text at the side to avoid occlusion - instance_area = (y1 - y0) * (x1 - x0) - if ( - instance_area < _SMALL_OBJECT_AREA_THRESH * self.output.scale - or y1 - y0 < 40 * self.output.scale - ): - if y1 >= self.output.height - 5: - text_pos = (x1, y0) - else: - text_pos = (x0, y1) - - height_ratio = (y1 - y0) / np.sqrt(self.output.height * self.output.width) - lighter_color = self._change_color_brightness(color, brightness_factor=0.7) - font_size = ( - np.clip((height_ratio - 0.02) / 0.08 + 1, 1.2, 2) - * 0.5 - * self._default_font_size - ) - self.draw_text( - labels[i], - text_pos, - color=lighter_color, - horizontal_alignment=horiz_align, - font_size=font_size, - ) - - # draw keypoints - if keypoints is not None: - for keypoints_per_instance in keypoints: - self.draw_and_connect_keypoints(keypoints_per_instance) - - return self.output - - def overlay_rotated_instances(self, boxes=None, labels=None, assigned_colors=None): - """ - Args: - boxes (ndarray): an Nx5 numpy array of - (x_center, y_center, width, height, angle_degrees) format - for the N objects in a single image. - labels (list[str]): the text to be displayed for each instance. - assigned_colors (list[matplotlib.colors]): a list of colors, where each color - corresponds to each mask or box in the image. Refer to 'matplotlib.colors' - for full list of formats that the colors are accepted in. - - Returns: - output (VisImage): image object with visualizations. - """ - num_instances = len(boxes) - - if assigned_colors is None: - assigned_colors = [random_color(rgb=True, maximum=1) for _ in range(num_instances)] - if num_instances == 0: - return self.output - - # Display in largest to smallest order to reduce occlusion. - if boxes is not None: - areas = boxes[:, 2] * boxes[:, 3] - - sorted_idxs = np.argsort(-areas).tolist() - # Re-order overlapped instances in descending order. - boxes = boxes[sorted_idxs] - labels = [labels[k] for k in sorted_idxs] if labels is not None else None - colors = [assigned_colors[idx] for idx in sorted_idxs] - - for i in range(num_instances): - self.draw_rotated_box_with_label( - boxes[i], edge_color=colors[i], label=labels[i] if labels is not None else None - ) - - return self.output - - def draw_and_connect_keypoints(self, keypoints): - """ - Draws keypoints of an instance and follows the rules for keypoint connections - to draw lines between appropriate keypoints. This follows color heuristics for - line color. - - Args: - keypoints (Tensor): a tensor of shape (K, 3), where K is the number of keypoints - and the last dimension corresponds to (x, y, probability). - - Returns: - output (VisImage): image object with visualizations. - """ - visible = {} - keypoint_names = self.metadata.get("keypoint_names") - for idx, keypoint in enumerate(keypoints): - - # draw keypoint - x, y, prob = keypoint - if prob > self.keypoint_threshold: - self.draw_circle((x, y), color=_RED) - if keypoint_names: - keypoint_name = keypoint_names[idx] - visible[keypoint_name] = (x, y) - - if self.metadata.get("keypoint_connection_rules"): - for kp0, kp1, color in self.metadata.keypoint_connection_rules: - if kp0 in visible and kp1 in visible: - x0, y0 = visible[kp0] - x1, y1 = visible[kp1] - color = tuple(x / 255.0 for x in color) - self.draw_line([x0, x1], [y0, y1], color=color) - - # draw lines from nose to mid-shoulder and mid-shoulder to mid-hip - # Note that this strategy is specific to person keypoints. - # For other keypoints, it should just do nothing - try: - ls_x, ls_y = visible["left_shoulder"] - rs_x, rs_y = visible["right_shoulder"] - mid_shoulder_x, mid_shoulder_y = (ls_x + rs_x) / 2, (ls_y + rs_y) / 2 - except KeyError: - pass - else: - # draw line from nose to mid-shoulder - nose_x, nose_y = visible.get("nose", (None, None)) - if nose_x is not None: - self.draw_line([nose_x, mid_shoulder_x], [nose_y, mid_shoulder_y], color=_RED) - - try: - # draw line from mid-shoulder to mid-hip - lh_x, lh_y = visible["left_hip"] - rh_x, rh_y = visible["right_hip"] - except KeyError: - pass - else: - mid_hip_x, mid_hip_y = (lh_x + rh_x) / 2, (lh_y + rh_y) / 2 - self.draw_line([mid_hip_x, mid_shoulder_x], [mid_hip_y, mid_shoulder_y], color=_RED) - return self.output - - """ - Primitive drawing functions: - """ - - def draw_text( - self, - text, - position, - *, - font_size=None, - color="g", - horizontal_alignment="center", - rotation=0, - ): - """ - Args: - text (str): class label - position (tuple): a tuple of the x and y coordinates to place text on image. - font_size (int, optional): font of the text. If not provided, a font size - proportional to the image width is calculated and used. - color: color of the text. Refer to `matplotlib.colors` for full list - of formats that are accepted. - horizontal_alignment (str): see `matplotlib.text.Text` - rotation: rotation angle in degrees CCW - - Returns: - output (VisImage): image object with text drawn. - """ - if not font_size: - font_size = self._default_font_size - - # since the text background is dark, we don't want the text to be dark - color = np.maximum(list(mplc.to_rgb(color)), 0.2) - color[np.argmax(color)] = max(0.8, np.max(color)) - - x, y = position - self.output.ax.text( - x, - y, - text, - size=font_size * self.output.scale, - family="sans-serif", - bbox={"facecolor": "black", "alpha": 0.8, "pad": 0.7, "edgecolor": "none"}, - verticalalignment="top", - horizontalalignment=horizontal_alignment, - color=color, - zorder=10, - rotation=rotation, - ) - return self.output - - def draw_box(self, box_coord, alpha=0.5, edge_color="g", line_style="-"): - """ - Args: - box_coord (tuple): a tuple containing x0, y0, x1, y1 coordinates, where x0 and y0 - are the coordinates of the image's top left corner. x1 and y1 are the - coordinates of the image's bottom right corner. - alpha (float): blending efficient. Smaller values lead to more transparent masks. - edge_color: color of the outline of the box. Refer to `matplotlib.colors` - for full list of formats that are accepted. - line_style (string): the string to use to create the outline of the boxes. - - Returns: - output (VisImage): image object with box drawn. - """ - x0, y0, x1, y1 = box_coord - width = x1 - x0 - height = y1 - y0 - - linewidth = max(self._default_font_size / 4, 1) - - self.output.ax.add_patch( - mpl.patches.Rectangle( - (x0, y0), - width, - height, - fill=False, - edgecolor=edge_color, - linewidth=linewidth * self.output.scale, - alpha=alpha, - linestyle=line_style, - ) - ) - return self.output - - def draw_rotated_box_with_label( - self, rotated_box, alpha=0.5, edge_color="g", line_style="-", label=None - ): - """ - Draw a rotated box with label on its top-left corner. - - Args: - rotated_box (tuple): a tuple containing (cnt_x, cnt_y, w, h, angle), - where cnt_x and cnt_y are the center coordinates of the box. - w and h are the width and height of the box. angle represents how - many degrees the box is rotated CCW with regard to the 0-degree box. - alpha (float): blending efficient. Smaller values lead to more transparent masks. - edge_color: color of the outline of the box. Refer to `matplotlib.colors` - for full list of formats that are accepted. - line_style (string): the string to use to create the outline of the boxes. - label (string): label for rotated box. It will not be rendered when set to None. - - Returns: - output (VisImage): image object with box drawn. - """ - cnt_x, cnt_y, w, h, angle = rotated_box - area = w * h - # use thinner lines when the box is small - linewidth = self._default_font_size / ( - 6 if area < _SMALL_OBJECT_AREA_THRESH * self.output.scale else 3 - ) - - theta = angle * math.pi / 180.0 - c = math.cos(theta) - s = math.sin(theta) - rect = [(-w / 2, h / 2), (-w / 2, -h / 2), (w / 2, -h / 2), (w / 2, h / 2)] - # x: left->right ; y: top->down - rotated_rect = [(s * yy + c * xx + cnt_x, c * yy - s * xx + cnt_y) for (xx, yy) in rect] - for k in range(4): - j = (k + 1) % 4 - self.draw_line( - [rotated_rect[k][0], rotated_rect[j][0]], - [rotated_rect[k][1], rotated_rect[j][1]], - color=edge_color, - linestyle="--" if k == 1 else line_style, - linewidth=linewidth, - ) - - if label is not None: - text_pos = rotated_rect[1] # topleft corner - - height_ratio = h / np.sqrt(self.output.height * self.output.width) - label_color = self._change_color_brightness(edge_color, brightness_factor=0.7) - font_size = ( - np.clip((height_ratio - 0.02) / 0.08 + 1, 1.2, 2) * 0.5 * self._default_font_size - ) - self.draw_text(label, text_pos, color=label_color, font_size=font_size, rotation=angle) - - return self.output - - def draw_circle(self, circle_coord, color, radius=3): - """ - Args: - circle_coord (list(int) or tuple(int)): contains the x and y coordinates - of the center of the circle. - color: color of the polygon. Refer to `matplotlib.colors` for a full list of - formats that are accepted. - radius (int): radius of the circle. - - Returns: - output (VisImage): image object with box drawn. - """ - x, y = circle_coord - self.output.ax.add_patch( - mpl.patches.Circle(circle_coord, radius=radius, fill=True, color=color) - ) - return self.output - - def draw_line(self, x_data, y_data, color, linestyle="-", linewidth=None): - """ - Args: - x_data (list[int]): a list containing x values of all the points being drawn. - Length of list should match the length of y_data. - y_data (list[int]): a list containing y values of all the points being drawn. - Length of list should match the length of x_data. - color: color of the line. Refer to `matplotlib.colors` for a full list of - formats that are accepted. - linestyle: style of the line. Refer to `matplotlib.lines.Line2D` - for a full list of formats that are accepted. - linewidth (float or None): width of the line. When it's None, - a default value will be computed and used. - - Returns: - output (VisImage): image object with line drawn. - """ - if linewidth is None: - linewidth = self._default_font_size / 3 - linewidth = max(linewidth, 1) - self.output.ax.add_line( - mpl.lines.Line2D( - x_data, - y_data, - linewidth=linewidth * self.output.scale, - color=color, - linestyle=linestyle, - ) - ) - return self.output - - def draw_binary_mask( - self, binary_mask, color=None, *, edge_color=None, text=None, alpha=0.7, area_threshold=10 - ): - """ - Args: - binary_mask (ndarray): numpy array of shape (H, W), where H is the image height and - W is the image width. Each value in the array is either a 0 or 1 value of uint8 - type. - color: color of the mask. Refer to `matplotlib.colors` for a full list of - formats that are accepted. If None, will pick a random color. - edge_color: color of the polygon edges. Refer to `matplotlib.colors` for a - full list of formats that are accepted. - text (str): if None, will be drawn on the object - alpha (float): blending efficient. Smaller values lead to more transparent masks. - area_threshold (float): a connected component smaller than this area will not be shown. - - Returns: - output (VisImage): image object with mask drawn. - """ - if color is None: - color = random_color(rgb=True, maximum=1) - color = mplc.to_rgb(color) - - has_valid_segment = False - binary_mask = binary_mask.astype("uint8") # opencv needs uint8 - mask = GenericMask(binary_mask, self.output.height, self.output.width) - shape2d = (binary_mask.shape[0], binary_mask.shape[1]) - - if not mask.has_holes: - # draw polygons for regular masks - for segment in mask.polygons: - area = mask_util.area(mask_util.frPyObjects([segment], shape2d[0], shape2d[1])) - if area < (area_threshold or 0): - continue - has_valid_segment = True - segment = segment.reshape(-1, 2) - self.draw_polygon(segment, color=color, edge_color=edge_color, alpha=alpha) - else: - # TODO: Use Path/PathPatch to draw vector graphics: - # https://stackoverflow.com/questions/8919719/how-to-plot-a-complex-polygon - rgba = np.zeros(shape2d + (4,), dtype="float32") - rgba[:, :, :3] = color - rgba[:, :, 3] = (mask.mask == 1).astype("float32") * alpha - has_valid_segment = True - self.output.ax.imshow(rgba, extent=(0, self.output.width, self.output.height, 0)) - - if text is not None and has_valid_segment: - lighter_color = self._change_color_brightness(color, brightness_factor=0.7) - self._draw_text_in_mask(binary_mask, text, lighter_color) - return self.output - - def draw_soft_mask(self, soft_mask, color=None, *, text=None, alpha=0.5): - """ - Args: - soft_mask (ndarray): float array of shape (H, W), each value in [0, 1]. - color: color of the mask. Refer to `matplotlib.colors` for a full list of - formats that are accepted. If None, will pick a random color. - text (str): if None, will be drawn on the object - alpha (float): blending efficient. Smaller values lead to more transparent masks. - - Returns: - output (VisImage): image object with mask drawn. - """ - if color is None: - color = random_color(rgb=True, maximum=1) - color = mplc.to_rgb(color) - - shape2d = (soft_mask.shape[0], soft_mask.shape[1]) - rgba = np.zeros(shape2d + (4,), dtype="float32") - rgba[:, :, :3] = color - rgba[:, :, 3] = soft_mask * alpha - self.output.ax.imshow(rgba, extent=(0, self.output.width, self.output.height, 0)) - - if text is not None: - lighter_color = self._change_color_brightness(color, brightness_factor=0.7) - binary_mask = (soft_mask > 0.5).astype("uint8") - self._draw_text_in_mask(binary_mask, text, lighter_color) - return self.output - - def draw_polygon(self, segment, color, edge_color=None, alpha=0.5): - """ - Args: - segment: numpy array of shape Nx2, containing all the points in the polygon. - color: color of the polygon. Refer to `matplotlib.colors` for a full list of - formats that are accepted. - edge_color: color of the polygon edges. Refer to `matplotlib.colors` for a - full list of formats that are accepted. If not provided, a darker shade - of the polygon color will be used instead. - alpha (float): blending efficient. Smaller values lead to more transparent masks. - - Returns: - output (VisImage): image object with polygon drawn. - """ - if edge_color is None: - # make edge color darker than the polygon color - if alpha > 0.8: - edge_color = self._change_color_brightness(color, brightness_factor=-0.7) - else: - edge_color = color - edge_color = mplc.to_rgb(edge_color) + (1,) - - polygon = mpl.patches.Polygon( - segment, - fill=True, - facecolor=mplc.to_rgb(color) + (alpha,), - edgecolor=edge_color, - linewidth=max(self._default_font_size // 15 * self.output.scale, 1), - ) - self.output.ax.add_patch(polygon) - return self.output - - """ - Internal methods: - """ - - def _jitter(self, color): - """ - Randomly modifies given color to produce a slightly different color than the color given. - - Args: - color (tuple[double]): a tuple of 3 elements, containing the RGB values of the color - picked. The values in the list are in the [0.0, 1.0] range. - - Returns: - jittered_color (tuple[double]): a tuple of 3 elements, containing the RGB values of the - color after being jittered. The values in the list are in the [0.0, 1.0] range. - """ - color = mplc.to_rgb(color) - # np.random.seed(0) - vec = np.random.rand(3) - # better to do it in another color space - vec = vec / np.linalg.norm(vec) * 0.5 - res = np.clip(vec + color, 0, 1) - return tuple(res) - - def _create_grayscale_image(self, mask=None): - """ - Create a grayscale version of the original image. - The colors in masked area, if given, will be kept. - """ - img_bw = self.img.astype("f4").mean(axis=2) - img_bw = np.stack([img_bw] * 3, axis=2) - if mask is not None: - img_bw[mask] = self.img[mask] - return img_bw - - def _change_color_brightness(self, color, brightness_factor): - """ - Depending on the brightness_factor, gives a lighter or darker color i.e. a color with - less or more saturation than the original color. - - Args: - color: color of the polygon. Refer to `matplotlib.colors` for a full list of - formats that are accepted. - brightness_factor (float): a value in [-1.0, 1.0] range. A lightness factor of - 0 will correspond to no change, a factor in [-1.0, 0) range will result in - a darker color and a factor in (0, 1.0] range will result in a lighter color. - - Returns: - modified_color (tuple[double]): a tuple containing the RGB values of the - modified color. Each value in the tuple is in the [0.0, 1.0] range. - """ - assert brightness_factor >= -1.0 and brightness_factor <= 1.0 - color = mplc.to_rgb(color) - polygon_color = colorsys.rgb_to_hls(*mplc.to_rgb(color)) - modified_lightness = polygon_color[1] + (brightness_factor * polygon_color[1]) - modified_lightness = 0.0 if modified_lightness < 0.0 else modified_lightness - modified_lightness = 1.0 if modified_lightness > 1.0 else modified_lightness - modified_color = colorsys.hls_to_rgb(polygon_color[0], modified_lightness, polygon_color[2]) - return modified_color - - def _convert_boxes(self, boxes): - """ - Convert different format of boxes to an NxB array, where B = 4 or 5 is the box dimension. - """ - if isinstance(boxes, Boxes) or isinstance(boxes, RotatedBoxes): - return boxes.tensor.detach().numpy() - else: - return np.asarray(boxes) - - def _convert_masks(self, masks_or_polygons): - """ - Convert different format of masks or polygons to a tuple of masks and polygons. - - Returns: - list[GenericMask]: - """ - - m = masks_or_polygons - if isinstance(m, PolygonMasks): - m = m.polygons - if isinstance(m, BitMasks): - m = m.tensor.numpy() - if isinstance(m, torch.Tensor): - m = m.numpy() - ret = [] - for x in m: - if isinstance(x, GenericMask): - ret.append(x) - else: - ret.append(GenericMask(x, self.output.height, self.output.width)) - return ret - - def _draw_text_in_mask(self, binary_mask, text, color): - """ - Find proper places to draw text given a binary mask. - """ - # TODO sometimes drawn on wrong objects. the heuristics here can improve. - _num_cc, cc_labels, stats, centroids = cv2.connectedComponentsWithStats(binary_mask, 8) - if stats[1:, -1].size == 0: - return - largest_component_id = np.argmax(stats[1:, -1]) + 1 - - # draw text on the largest component, as well as other very large components. - for cid in range(1, _num_cc): - if cid == largest_component_id or stats[cid, -1] > _LARGE_MASK_AREA_THRESH: - # median is more stable than centroid - # center = centroids[largest_component_id] - center = np.median((cc_labels == cid).nonzero(), axis=1)[::-1] - self.draw_text(text, center, color=color) - - def _convert_keypoints(self, keypoints): - if isinstance(keypoints, Keypoints): - keypoints = keypoints.tensor - keypoints = np.asarray(keypoints) - return keypoints - - def get_output(self): - """ - Returns: - output (VisImage): the image output containing the visualizations added - to the image. - """ - return self.output \ No newline at end of file diff --git a/spaces/xl2533/FinDoc/build_index/base.py b/spaces/xl2533/FinDoc/build_index/base.py deleted file mode 100644 index d8c2cef3dcd5cef48973af2e9f5934b9825839c7..0000000000000000000000000000000000000000 --- a/spaces/xl2533/FinDoc/build_index/base.py +++ /dev/null @@ -1,88 +0,0 @@ -# -*-coding:utf-8 -*- -""" - Base Reader and Document -""" -import os -from dataclasses import dataclass -from dataclasses_json import dataclass_json -from typing import Any, Dict, List, Optional -from glob import glob -from build_index.parser import ParserFactory -from langchain.docstore.document import Document as LCDocument - - -@dataclass_json -@dataclass -class Document: - text: str = None - doc_id: Optional[str] = None - embedding: Optional[List[float]] = None - extra_info: Optional[Dict[str, Any]] = None - - def get_text(self): - return self.text - - def get_doc_id(self): - return self.doc_id - - def get_embedding(self): - return self.embedding - - @property - def extra_info_str(self) -> Optional[str]: - """Extra info string.""" - if self.extra_info is None: - return None - - return "\n".join([f"{k}: {str(v)}" for k, v in self.extra_info.items()]) - - def __post_init__(self): - #字段检查 - assert self.text is not None, 'Text Field can not be None' - - def to_langchain_format(self): - """Convert struct to LangChain document format.""" - metadata = self.extra_info or {} - return LCDocument(page_content=self.text, metadata=metadata) - - -class FileReader(object): - """ - Load file from ./data_dir - """ - def __init__(self, data_dir=None, folder_name=None, input_files=None, has_meta=True): - self.data_dir = data_dir - self.has_meta = has_meta - - if input_files: - self.input_files = input_files - else: - # get all file in data_dir - ##TODO: 暂不支持data下recursive dir - dir = os.path.join(data_dir, folder_name, '*') - self.input_files = glob(dir) - print(f'{len(self.input_files)} files in {dir}') - print(self.input_files) - - def load_data(self, concatenate=False) -> List[Document]: - data_list = [] - metadata_list = [] - for file in self.input_files: - parser = ParserFactory['pdf'] - if parser is None: - raise ValueError(f"{file} format doesn't match any sufix supported") - try: - data, meta = parser.parse_file(file) - except Exception as e: - print(f'{file} parse failed. error = {e}') - continue - data_list.append(data) - if self.has_meta: - metadata_list.append(meta) - - if concatenate: - return [Document("\n".join(data_list))] - elif self.has_meta: - return [Document(d, extra_info=m) for d, m in zip(data_list, metadata_list)] - else: - return [Document(d) for d in data_list] diff --git a/spaces/xp3857/text-to-image/README.md b/spaces/xp3857/text-to-image/README.md deleted file mode 100644 index 58fd683b4fee8966a5f596d670bff9b09937729f..0000000000000000000000000000000000000000 --- a/spaces/xp3857/text-to-image/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Text To Image -emoji: 🌖 -colorFrom: gray -colorTo: purple -sdk: gradio -sdk_version: 3.20.0 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/xxbb/VITS-Umamusume-voice-synthesizer/losses.py b/spaces/xxbb/VITS-Umamusume-voice-synthesizer/losses.py deleted file mode 100644 index fb22a0e834dd87edaa37bb8190eee2c3c7abe0d5..0000000000000000000000000000000000000000 --- a/spaces/xxbb/VITS-Umamusume-voice-synthesizer/losses.py +++ /dev/null @@ -1,61 +0,0 @@ -import torch -from torch.nn import functional as F - -import commons - - -def feature_loss(fmap_r, fmap_g): - loss = 0 - for dr, dg in zip(fmap_r, fmap_g): - for rl, gl in zip(dr, dg): - rl = rl.float().detach() - gl = gl.float() - loss += torch.mean(torch.abs(rl - gl)) - - return loss * 2 - - -def discriminator_loss(disc_real_outputs, disc_generated_outputs): - loss = 0 - r_losses = [] - g_losses = [] - for dr, dg in zip(disc_real_outputs, disc_generated_outputs): - dr = dr.float() - dg = dg.float() - r_loss = torch.mean((1-dr)**2) - g_loss = torch.mean(dg**2) - loss += (r_loss + g_loss) - r_losses.append(r_loss.item()) - g_losses.append(g_loss.item()) - - return loss, r_losses, g_losses - - -def generator_loss(disc_outputs): - loss = 0 - gen_losses = [] - for dg in disc_outputs: - dg = dg.float() - l = torch.mean((1-dg)**2) - gen_losses.append(l) - loss += l - - return loss, gen_losses - - -def kl_loss(z_p, logs_q, m_p, logs_p, z_mask): - """ - z_p, logs_q: [b, h, t_t] - m_p, logs_p: [b, h, t_t] - """ - z_p = z_p.float() - logs_q = logs_q.float() - m_p = m_p.float() - logs_p = logs_p.float() - z_mask = z_mask.float() - - kl = logs_p - logs_q - 0.5 - kl += 0.5 * ((z_p - m_p)**2) * torch.exp(-2. * logs_p) - kl = torch.sum(kl * z_mask) - l = kl / torch.sum(z_mask) - return l diff --git a/spaces/yderre-aubay/midi-player-demo/src/main/components/SettingDialog/MIDIDeviceView/MIDIDeviceView.tsx b/spaces/yderre-aubay/midi-player-demo/src/main/components/SettingDialog/MIDIDeviceView/MIDIDeviceView.tsx deleted file mode 100644 index b620a1bcc409dd4400f61143d4391f5f38f8de00..0000000000000000000000000000000000000000 --- a/spaces/yderre-aubay/midi-player-demo/src/main/components/SettingDialog/MIDIDeviceView/MIDIDeviceView.tsx +++ /dev/null @@ -1,171 +0,0 @@ -import styled from "@emotion/styled" -import { observer } from "mobx-react-lite" -import { FC } from "react" -import { Alert } from "../../../../components/Alert" -import { Checkbox } from "../../../../components/Checkbox" -import { CircularProgress } from "../../../../components/CircularProgress" -import { DialogContent, DialogTitle } from "../../../../components/Dialog" -import { Label } from "../../../../components/Label" -import { Localized } from "../../../../components/Localized" -import { useStores } from "../../../hooks/useStores" - -interface Device { - id: string - name: string - isConnected: boolean -} - -interface ListItem { - device: Device - isSelected: boolean - onCheck: (isChecked: boolean) => void -} - -const DeviceRow: FC = ({ device, isSelected, onCheck }) => { - return ( - - ) -} - -const DeviceList = styled.div`` - -const Notice = styled.div` - color: ${({ theme }) => theme.secondaryTextColor}; -` - -const Spacer = styled.div` - height: 2rem; -` - -const SectionTitle = styled.div` - font-weight: bold; - margin: 1rem 0; -` - -export const MIDIDeviceView: FC = observer(() => { - const { midiDeviceStore } = useStores() - - const { - inputs, - outputs, - isLoading, - requestError, - enabledInputs, - enabledOutputs, - isFactorySoundEnabled, - } = midiDeviceStore - - const formatName = (device: WebMidi.MIDIPort) => - (device?.name ?? "") + - ((device.manufacturer?.length ?? 0) > 0 ? `(${device.manufacturer})` : "") - - const portToDevice = (device: WebMidi.MIDIPort): Device => ({ - id: device.id, - name: formatName(device), - isConnected: device.state === "connected", - }) - - const inputDevices = inputs.map((device) => ({ - device: portToDevice(device), - isSelected: enabledInputs[device.id], - })) - - const outputDevices = outputs.map((device) => ({ - device: portToDevice(device), - isSelected: enabledOutputs[device.id], - })) - - const factorySound: Device = { - id: "signal-midi-app", - name: "Signal Factory Sound", - isConnected: true, - } - - return ( - <> - - midi-settings - - - {isLoading && } - {requestError && ( - <> - {requestError.message} - - - )} - {!isLoading && ( - <> - - inputs - - - {inputDevices.length === 0 && ( - - - no-inputs - - - )} - {inputDevices.map(({ device, isSelected }) => ( - - midiDeviceStore.setInputEnable(device.id, checked) - } - /> - ))} - - { - <> - - - outputs - - - - (midiDeviceStore.isFactorySoundEnabled = checked) - } - /> - {outputDevices.map(({ device, isSelected }) => ( - - midiDeviceStore.setOutputEnable(device.id, checked) - } - /> - ))} - - - } - - )} - - - ) -}) diff --git a/spaces/yerfor/SyntaSpeech/modules/tts/portaspeech/portaspeech_flow.py b/spaces/yerfor/SyntaSpeech/modules/tts/portaspeech/portaspeech_flow.py deleted file mode 100644 index 256887dd8b365e38ac6c1973f4ec376e93029652..0000000000000000000000000000000000000000 --- a/spaces/yerfor/SyntaSpeech/modules/tts/portaspeech/portaspeech_flow.py +++ /dev/null @@ -1,75 +0,0 @@ -import torch -import torch.distributions as dist -from torch import nn -from modules.commons.normalizing_flow.glow_modules import Glow -from modules.tts.portaspeech.portaspeech import PortaSpeech - - -class PortaSpeechFlow(PortaSpeech): - def __init__(self, ph_dict_size, word_dict_size, hparams, out_dims=None): - super().__init__(ph_dict_size, word_dict_size, hparams, out_dims) - cond_hs = 80 - if hparams.get('use_txt_cond', True): - cond_hs = cond_hs + hparams['hidden_size'] - if hparams.get('use_latent_cond', False): - cond_hs = cond_hs + hparams['latent_size'] - if hparams['use_cond_proj']: - self.g_proj = nn.Conv1d(cond_hs, 160, 5, padding=2) - cond_hs = 160 - self.post_flow = Glow( - 80, hparams['post_glow_hidden'], hparams['post_glow_kernel_size'], 1, - hparams['post_glow_n_blocks'], hparams['post_glow_n_block_layers'], - n_split=4, n_sqz=2, - gin_channels=cond_hs, - share_cond_layers=hparams['post_share_cond_layers'], - share_wn_layers=hparams['share_wn_layers'], - sigmoid_scale=hparams['sigmoid_scale'] - ) - self.prior_dist = dist.Normal(0, 1) - - def forward(self, txt_tokens, word_tokens, ph2word, word_len, mel2word=None, mel2ph=None, - spk_embed=None, spk_id=None, pitch=None, infer=False, tgt_mels=None, - forward_post_glow=True, two_stage=True, global_step=None): - is_training = self.training - train_fvae = not (forward_post_glow and two_stage) - if not train_fvae: - self.eval() - with torch.set_grad_enabled(mode=train_fvae): - ret = super(PortaSpeechFlow, self).forward( - txt_tokens, word_tokens, ph2word, word_len, mel2word, mel2ph, - spk_embed, spk_id, pitch, infer, tgt_mels, global_step) - if (forward_post_glow or not two_stage) and self.hparams['use_post_flow']: - self.run_post_glow(tgt_mels, infer, is_training, ret) - return ret - - def run_post_glow(self, tgt_mels, infer, is_training, ret): - x_recon = ret['mel_out'].transpose(1, 2) - g = x_recon - B, _, T = g.shape - if self.hparams.get('use_txt_cond', True): - g = torch.cat([g, ret['decoder_inp'].transpose(1, 2)], 1) - if self.hparams.get('use_latent_cond', False): - g_z = ret['z_p'][:, :, :, None].repeat(1, 1, 1, 4).reshape(B, -1, T) - g = torch.cat([g, g_z], 1) - if self.hparams['use_cond_proj']: - g = self.g_proj(g) - prior_dist = self.prior_dist - if not infer: - if is_training: - self.post_flow.train() - nonpadding = ret['nonpadding'].transpose(1, 2) - y_lengths = nonpadding.sum(-1) - if self.hparams['detach_postflow_input']: - g = g.detach() - tgt_mels = tgt_mels.transpose(1, 2) - z_postflow, ldj = self.post_flow(tgt_mels, nonpadding, g=g) - ldj = ldj / y_lengths / 80 - ret['z_pf'], ret['ldj_pf'] = z_postflow, ldj - ret['postflow'] = -prior_dist.log_prob(z_postflow).mean() - ldj.mean() - if torch.isnan(ret['postflow']): - ret['postflow'] = None - else: - nonpadding = torch.ones_like(x_recon[:, :1, :]) - z_post = torch.randn(x_recon.shape).to(g.device) * self.hparams['noise_scale'] - x_recon, _ = self.post_flow(z_post, nonpadding, g, reverse=True) - ret['mel_out'] = x_recon.transpose(1, 2) diff --git a/spaces/yerfor/SyntaSpeech/modules/vocoder/parallel_wavegan/layers/pqmf.py b/spaces/yerfor/SyntaSpeech/modules/vocoder/parallel_wavegan/layers/pqmf.py deleted file mode 100644 index bb31c430d2abe0219f58f153f69d836383e095ef..0000000000000000000000000000000000000000 --- a/spaces/yerfor/SyntaSpeech/modules/vocoder/parallel_wavegan/layers/pqmf.py +++ /dev/null @@ -1,132 +0,0 @@ -# -*- coding: utf-8 -*- - -# Copyright 2020 Tomoki Hayashi -# MIT License (https://opensource.org/licenses/MIT) - -"""Pseudo QMF modules.""" - -import numpy as np -import torch -import torch.nn.functional as F - -from scipy.signal import kaiser - - -def design_prototype_filter(taps=62, cutoff_ratio=0.15, beta=9.0): - """Design prototype filter for PQMF. - - This method is based on `A Kaiser window approach for the design of prototype - filters of cosine modulated filterbanks`_. - - Args: - taps (int): The number of filter taps. - cutoff_ratio (float): Cut-off frequency ratio. - beta (float): Beta coefficient for kaiser window. - - Returns: - ndarray: Impluse response of prototype filter (taps + 1,). - - .. _`A Kaiser window approach for the design of prototype filters of cosine modulated filterbanks`: - https://ieeexplore.ieee.org/abstract/document/681427 - - """ - # check the arguments are valid - assert taps % 2 == 0, "The number of taps mush be even number." - assert 0.0 < cutoff_ratio < 1.0, "Cutoff ratio must be > 0.0 and < 1.0." - - # make initial filter - omega_c = np.pi * cutoff_ratio - with np.errstate(invalid='ignore'): - h_i = np.sin(omega_c * (np.arange(taps + 1) - 0.5 * taps)) \ - / (np.pi * (np.arange(taps + 1) - 0.5 * taps)) - h_i[taps // 2] = np.cos(0) * cutoff_ratio # fix nan due to indeterminate form - - # apply kaiser window - w = kaiser(taps + 1, beta) - h = h_i * w - - return h - - -class PQMF(torch.nn.Module): - """PQMF module. - - This module is based on `Near-perfect-reconstruction pseudo-QMF banks`_. - - .. _`Near-perfect-reconstruction pseudo-QMF banks`: - https://ieeexplore.ieee.org/document/258122 - - """ - - def __init__(self, subbands=4, taps=62, cutoff_ratio=0.15, beta=9.0): - """Initilize PQMF module. - - Args: - subbands (int): The number of subbands. - taps (int): The number of filter taps. - cutoff_ratio (float): Cut-off frequency ratio. - beta (float): Beta coefficient for kaiser window. - - """ - super(PQMF, self).__init__() - - # define filter coefficient - h_proto = design_prototype_filter(taps, cutoff_ratio, beta) - h_analysis = np.zeros((subbands, len(h_proto))) - h_synthesis = np.zeros((subbands, len(h_proto))) - for k in range(subbands): - h_analysis[k] = 2 * h_proto * np.cos( - (2 * k + 1) * (np.pi / (2 * subbands)) * - (np.arange(taps + 1) - ((taps - 1) / 2)) + - (-1) ** k * np.pi / 4) - h_synthesis[k] = 2 * h_proto * np.cos( - (2 * k + 1) * (np.pi / (2 * subbands)) * - (np.arange(taps + 1) - ((taps - 1) / 2)) - - (-1) ** k * np.pi / 4) - - # convert to tensor - analysis_filter = torch.from_numpy(h_analysis).float().unsqueeze(1) - synthesis_filter = torch.from_numpy(h_synthesis).float().unsqueeze(0) - - # register coefficients as beffer - self.register_buffer("analysis_filter", analysis_filter) - self.register_buffer("synthesis_filter", synthesis_filter) - - # filter for downsampling & upsampling - updown_filter = torch.zeros((subbands, subbands, subbands)).float() - for k in range(subbands): - updown_filter[k, k, 0] = 1.0 - self.register_buffer("updown_filter", updown_filter) - self.subbands = subbands - - # keep padding info - self.pad_fn = torch.nn.ConstantPad1d(taps // 2, 0.0) - - def analysis(self, x): - """Analysis with PQMF. - - Args: - x (Tensor): Input tensor (B, 1, T). - - Returns: - Tensor: Output tensor (B, subbands, T // subbands). - - """ - x = F.conv1d(self.pad_fn(x), self.analysis_filter) - return F.conv1d(x, self.updown_filter, stride=self.subbands) - - def synthesis(self, x): - """Synthesis with PQMF. - - Args: - x (Tensor): Input tensor (B, subbands, T // subbands). - - Returns: - Tensor: Output tensor (B, 1, T). - - """ - # NOTE(kan-bayashi): Power will be dreased so here multipy by # subbands. - # Not sure this is the correct way, it is better to check again. - # TODO(kan-bayashi): Understand the reconstruction procedure - x = F.conv_transpose1d(x, self.updown_filter * self.subbands, stride=self.subbands) - return F.conv1d(self.pad_fn(x), self.synthesis_filter) diff --git a/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/models/nystromformer/configuration_nystromformer.py b/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/models/nystromformer/configuration_nystromformer.py deleted file mode 100644 index 98b3e511ac0e2112eb561049418fa286ba5ed695..0000000000000000000000000000000000000000 --- a/spaces/yizhangliu/Grounded-Segment-Anything/transformers_4_35_0/models/nystromformer/configuration_nystromformer.py +++ /dev/null @@ -1,133 +0,0 @@ -# coding=utf-8 -# Copyright 2022 UW-Madison and The HuggingFace Inc. team. All rights reserved. -# -# Licensed under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. -# You may obtain a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, -# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. -# See the License for the specific language governing permissions and -# limitations under the License. -""" Nystromformer model configuration""" - -from ...configuration_utils import PretrainedConfig -from ...utils import logging - - -logger = logging.get_logger(__name__) - -NYSTROMFORMER_PRETRAINED_CONFIG_ARCHIVE_MAP = { - "uw-madison/nystromformer-512": "https://huggingface.co/uw-madison/nystromformer-512/resolve/main/config.json", - # See all Nystromformer models at https://huggingface.co/models?filter=nystromformer -} - - -class NystromformerConfig(PretrainedConfig): - r""" - This is the configuration class to store the configuration of a [`NystromformerModel`]. It is used to instantiate - an Nystromformer model according to the specified arguments, defining the model architecture. Instantiating a - configuration with the defaults will yield a similar configuration to that of the Nystromformer - [uw-madison/nystromformer-512](https://huggingface.co/uw-madison/nystromformer-512) architecture. - - Configuration objects inherit from [`PretrainedConfig`] and can be used to control the model outputs. Read the - documentation from [`PretrainedConfig`] for more information. - - Args: - vocab_size (`int`, *optional*, defaults to 30000): - Vocabulary size of the Nystromformer model. Defines the number of different tokens that can be represented - by the `inputs_ids` passed when calling [`NystromformerModel`]. - hidden_size (`int`, *optional*, defaults to 768): - Dimension of the encoder layers and the pooler layer. - num_hidden_layers (`int`, *optional*, defaults to 12): - Number of hidden layers in the Transformer encoder. - num_attention_heads (`int`, *optional*, defaults to 12): - Number of attention heads for each attention layer in the Transformer encoder. - intermediate_size (`int`, *optional*, defaults to 3072): - Dimension of the "intermediate" (i.e., feed-forward) layer in the Transformer encoder. - hidden_act (`str` or `function`, *optional*, defaults to `"gelu"`): - The non-linear activation function (function or string) in the encoder and pooler. If string, `"gelu"`, - `"relu"`, `"selu"` and `"gelu_new"` are supported. - hidden_dropout_prob (`float`, *optional*, defaults to 0.1): - The dropout probabilitiy for all fully connected layers in the embeddings, encoder, and pooler. - attention_probs_dropout_prob (`float`, *optional*, defaults to 0.1): - The dropout ratio for the attention probabilities. - max_position_embeddings (`int`, *optional*, defaults to 512): - The maximum sequence length that this model might ever be used with. Typically set this to something large - just in case (e.g., 512 or 1024 or 2048). - type_vocab_size (`int`, *optional*, defaults to 2): - The vocabulary size of the `token_type_ids` passed when calling [`NystromformerModel`]. - segment_means_seq_len (`int`, *optional*, defaults to 64): - Sequence length used in segment-means. - num_landmarks (`int`, *optional*, defaults to 64): - The number of landmark (or Nystrom) points to use in Nystrom approximation of the softmax self-attention - matrix. - conv_kernel_size (`int`, *optional*, defaults to 65): - The kernel size of depthwise convolution used in Nystrom approximation. - inv_coeff_init_option (`bool`, *optional*, defaults to `False`): - Whether or not to use exact coefficient computation for the initial values for the iterative method of - calculating the Moore-Penrose inverse of a matrix. - initializer_range (`float`, *optional*, defaults to 0.02): - The standard deviation of the truncated_normal_initializer for initializing all weight matrices. - layer_norm_eps (`float`, *optional*, defaults to 1e-12): - The epsilon used by the layer normalization layers. - - Example: - - ```python - >>> from transformers import NystromformerModel, NystromformerConfig - - >>> # Initializing a Nystromformer uw-madison/nystromformer-512 style configuration - >>> configuration = NystromformerConfig() - - >>> # Initializing a model from the uw-madison/nystromformer-512 style configuration - >>> model = NystromformerModel(configuration) - - >>> # Accessing the model configuration - >>> configuration = model.config - ```""" - model_type = "nystromformer" - - def __init__( - self, - vocab_size=30000, - hidden_size=768, - num_hidden_layers=12, - num_attention_heads=12, - intermediate_size=3072, - hidden_act="gelu_new", - hidden_dropout_prob=0.1, - attention_probs_dropout_prob=0.1, - max_position_embeddings=510, - type_vocab_size=2, - segment_means_seq_len=64, - num_landmarks=64, - conv_kernel_size=65, - inv_coeff_init_option=False, - initializer_range=0.02, - layer_norm_eps=1e-5, - pad_token_id=1, - bos_token_id=0, - eos_token_id=2, - **kwargs, - ): - self.vocab_size = vocab_size - self.max_position_embeddings = max_position_embeddings - self.hidden_size = hidden_size - self.num_hidden_layers = num_hidden_layers - self.num_attention_heads = num_attention_heads - self.intermediate_size = intermediate_size - self.hidden_act = hidden_act - self.hidden_dropout_prob = hidden_dropout_prob - self.attention_probs_dropout_prob = attention_probs_dropout_prob - self.initializer_range = initializer_range - self.type_vocab_size = type_vocab_size - self.segment_means_seq_len = segment_means_seq_len - self.num_landmarks = num_landmarks - self.conv_kernel_size = conv_kernel_size - self.inv_coeff_init_option = inv_coeff_init_option - self.layer_norm_eps = layer_norm_eps - super().__init__(pad_token_id=pad_token_id, bos_token_id=bos_token_id, eos_token_id=eos_token_id, **kwargs) diff --git a/spaces/yl12053/so-vits-4.1-Grass-Wonder/modules/slicer2.py b/spaces/yl12053/so-vits-4.1-Grass-Wonder/modules/slicer2.py deleted file mode 100644 index 606b07be14eb9769b10a9a8f78cc1580334a2076..0000000000000000000000000000000000000000 --- a/spaces/yl12053/so-vits-4.1-Grass-Wonder/modules/slicer2.py +++ /dev/null @@ -1,186 +0,0 @@ -import numpy as np - - -# This function is obtained from librosa. -def get_rms( - y, - *, - frame_length=2048, - hop_length=512, - pad_mode="constant", -): - padding = (int(frame_length // 2), int(frame_length // 2)) - y = np.pad(y, padding, mode=pad_mode) - - axis = -1 - # put our new within-frame axis at the end for now - out_strides = y.strides + tuple([y.strides[axis]]) - # Reduce the shape on the framing axis - x_shape_trimmed = list(y.shape) - x_shape_trimmed[axis] -= frame_length - 1 - out_shape = tuple(x_shape_trimmed) + tuple([frame_length]) - xw = np.lib.stride_tricks.as_strided( - y, shape=out_shape, strides=out_strides - ) - if axis < 0: - target_axis = axis - 1 - else: - target_axis = axis + 1 - xw = np.moveaxis(xw, -1, target_axis) - # Downsample along the target axis - slices = [slice(None)] * xw.ndim - slices[axis] = slice(0, None, hop_length) - x = xw[tuple(slices)] - - # Calculate power - power = np.mean(np.abs(x) ** 2, axis=-2, keepdims=True) - - return np.sqrt(power) - - -class Slicer: - def __init__(self, - sr: int, - threshold: float = -40., - min_length: int = 5000, - min_interval: int = 300, - hop_size: int = 20, - max_sil_kept: int = 5000): - if not min_length >= min_interval >= hop_size: - raise ValueError('The following condition must be satisfied: min_length >= min_interval >= hop_size') - if not max_sil_kept >= hop_size: - raise ValueError('The following condition must be satisfied: max_sil_kept >= hop_size') - min_interval = sr * min_interval / 1000 - self.threshold = 10 ** (threshold / 20.) - self.hop_size = round(sr * hop_size / 1000) - self.win_size = min(round(min_interval), 4 * self.hop_size) - self.min_length = round(sr * min_length / 1000 / self.hop_size) - self.min_interval = round(min_interval / self.hop_size) - self.max_sil_kept = round(sr * max_sil_kept / 1000 / self.hop_size) - - def _apply_slice(self, waveform, begin, end): - if len(waveform.shape) > 1: - return waveform[:, begin * self.hop_size: min(waveform.shape[1], end * self.hop_size)] - else: - return waveform[begin * self.hop_size: min(waveform.shape[0], end * self.hop_size)] - - # @timeit - def slice(self, waveform): - if len(waveform.shape) > 1: - samples = waveform.mean(axis=0) - else: - samples = waveform - if samples.shape[0] <= self.min_length: - return [waveform] - rms_list = get_rms(y=samples, frame_length=self.win_size, hop_length=self.hop_size).squeeze(0) - sil_tags = [] - silence_start = None - clip_start = 0 - for i, rms in enumerate(rms_list): - # Keep looping while frame is silent. - if rms < self.threshold: - # Record start of silent frames. - if silence_start is None: - silence_start = i - continue - # Keep looping while frame is not silent and silence start has not been recorded. - if silence_start is None: - continue - # Clear recorded silence start if interval is not enough or clip is too short - is_leading_silence = silence_start == 0 and i > self.max_sil_kept - need_slice_middle = i - silence_start >= self.min_interval and i - clip_start >= self.min_length - if not is_leading_silence and not need_slice_middle: - silence_start = None - continue - # Need slicing. Record the range of silent frames to be removed. - if i - silence_start <= self.max_sil_kept: - pos = rms_list[silence_start: i + 1].argmin() + silence_start - if silence_start == 0: - sil_tags.append((0, pos)) - else: - sil_tags.append((pos, pos)) - clip_start = pos - elif i - silence_start <= self.max_sil_kept * 2: - pos = rms_list[i - self.max_sil_kept: silence_start + self.max_sil_kept + 1].argmin() - pos += i - self.max_sil_kept - pos_l = rms_list[silence_start: silence_start + self.max_sil_kept + 1].argmin() + silence_start - pos_r = rms_list[i - self.max_sil_kept: i + 1].argmin() + i - self.max_sil_kept - if silence_start == 0: - sil_tags.append((0, pos_r)) - clip_start = pos_r - else: - sil_tags.append((min(pos_l, pos), max(pos_r, pos))) - clip_start = max(pos_r, pos) - else: - pos_l = rms_list[silence_start: silence_start + self.max_sil_kept + 1].argmin() + silence_start - pos_r = rms_list[i - self.max_sil_kept: i + 1].argmin() + i - self.max_sil_kept - if silence_start == 0: - sil_tags.append((0, pos_r)) - else: - sil_tags.append((pos_l, pos_r)) - clip_start = pos_r - silence_start = None - # Deal with trailing silence. - total_frames = rms_list.shape[0] - if silence_start is not None and total_frames - silence_start >= self.min_interval: - silence_end = min(total_frames, silence_start + self.max_sil_kept) - pos = rms_list[silence_start: silence_end + 1].argmin() + silence_start - sil_tags.append((pos, total_frames + 1)) - # Apply and return slices. - if len(sil_tags) == 0: - return [waveform] - else: - chunks = [] - if sil_tags[0][0] > 0: - chunks.append(self._apply_slice(waveform, 0, sil_tags[0][0])) - for i in range(len(sil_tags) - 1): - chunks.append(self._apply_slice(waveform, sil_tags[i][1], sil_tags[i + 1][0])) - if sil_tags[-1][1] < total_frames: - chunks.append(self._apply_slice(waveform, sil_tags[-1][1], total_frames)) - return chunks - - -def main(): - import os.path - from argparse import ArgumentParser - - import librosa - import soundfile - - parser = ArgumentParser() - parser.add_argument('audio', type=str, help='The audio to be sliced') - parser.add_argument('--out', type=str, help='Output directory of the sliced audio clips') - parser.add_argument('--db_thresh', type=float, required=False, default=-40, - help='The dB threshold for silence detection') - parser.add_argument('--min_length', type=int, required=False, default=5000, - help='The minimum milliseconds required for each sliced audio clip') - parser.add_argument('--min_interval', type=int, required=False, default=300, - help='The minimum milliseconds for a silence part to be sliced') - parser.add_argument('--hop_size', type=int, required=False, default=10, - help='Frame length in milliseconds') - parser.add_argument('--max_sil_kept', type=int, required=False, default=500, - help='The maximum silence length kept around the sliced clip, presented in milliseconds') - args = parser.parse_args() - out = args.out - if out is None: - out = os.path.dirname(os.path.abspath(args.audio)) - audio, sr = librosa.load(args.audio, sr=None, mono=False) - slicer = Slicer( - sr=sr, - threshold=args.db_thresh, - min_length=args.min_length, - min_interval=args.min_interval, - hop_size=args.hop_size, - max_sil_kept=args.max_sil_kept - ) - chunks = slicer.slice(audio) - if not os.path.exists(out): - os.makedirs(out) - for i, chunk in enumerate(chunks): - if len(chunk.shape) > 1: - chunk = chunk.T - soundfile.write(os.path.join(out, f'%s_%d.wav' % (os.path.basename(args.audio).rsplit('.', maxsplit=1)[0], i)), chunk, sr) - - -if __name__ == '__main__': - main() diff --git a/spaces/yl12053/so-vits-4.1-Grass-Wonder/vdecoder/nsf_hifigan/nvSTFT.py b/spaces/yl12053/so-vits-4.1-Grass-Wonder/vdecoder/nsf_hifigan/nvSTFT.py deleted file mode 100644 index 62bd5a008f81929054f036c81955d5d73377f772..0000000000000000000000000000000000000000 --- a/spaces/yl12053/so-vits-4.1-Grass-Wonder/vdecoder/nsf_hifigan/nvSTFT.py +++ /dev/null @@ -1,134 +0,0 @@ -import math -import os -os.environ["LRU_CACHE_CAPACITY"] = "3" -import random -import torch -import torch.utils.data -import numpy as np -import librosa -from librosa.util import normalize -from librosa.filters import mel as librosa_mel_fn -from scipy.io.wavfile import read -import soundfile as sf -import torch.nn.functional as F - -def load_wav_to_torch(full_path, target_sr=None, return_empty_on_exception=False): - sampling_rate = None - try: - data, sampling_rate = sf.read(full_path, always_2d=True)# than soundfile. - except Exception as ex: - print(f"'{full_path}' failed to load.\nException:") - print(ex) - if return_empty_on_exception: - return [], sampling_rate or target_sr or 48000 - else: - raise Exception(ex) - - if len(data.shape) > 1: - data = data[:, 0] - assert len(data) > 2# check duration of audio file is > 2 samples (because otherwise the slice operation was on the wrong dimension) - - if np.issubdtype(data.dtype, np.integer): # if audio data is type int - max_mag = -np.iinfo(data.dtype).min # maximum magnitude = min possible value of intXX - else: # if audio data is type fp32 - max_mag = max(np.amax(data), -np.amin(data)) - max_mag = (2**31)+1 if max_mag > (2**15) else ((2**15)+1 if max_mag > 1.01 else 1.0) # data should be either 16-bit INT, 32-bit INT or [-1 to 1] float32 - - data = torch.FloatTensor(data.astype(np.float32))/max_mag - - if (torch.isinf(data) | torch.isnan(data)).any() and return_empty_on_exception:# resample will crash with inf/NaN inputs. return_empty_on_exception will return empty arr instead of except - return [], sampling_rate or target_sr or 48000 - if target_sr is not None and sampling_rate != target_sr: - data = torch.from_numpy(librosa.core.resample(data.numpy(), orig_sr=sampling_rate, target_sr=target_sr)) - sampling_rate = target_sr - - return data, sampling_rate - -def dynamic_range_compression(x, C=1, clip_val=1e-5): - return np.log(np.clip(x, a_min=clip_val, a_max=None) * C) - -def dynamic_range_decompression(x, C=1): - return np.exp(x) / C - -def dynamic_range_compression_torch(x, C=1, clip_val=1e-5): - return torch.log(torch.clamp(x, min=clip_val) * C) - -def dynamic_range_decompression_torch(x, C=1): - return torch.exp(x) / C - -class STFT(): - def __init__(self, sr=22050, n_mels=80, n_fft=1024, win_size=1024, hop_length=256, fmin=20, fmax=11025, clip_val=1e-5): - self.target_sr = sr - - self.n_mels = n_mels - self.n_fft = n_fft - self.win_size = win_size - self.hop_length = hop_length - self.fmin = fmin - self.fmax = fmax - self.clip_val = clip_val - self.mel_basis = {} - self.hann_window = {} - - def get_mel(self, y, keyshift=0, speed=1, center=False): - sampling_rate = self.target_sr - n_mels = self.n_mels - n_fft = self.n_fft - win_size = self.win_size - hop_length = self.hop_length - fmin = self.fmin - fmax = self.fmax - clip_val = self.clip_val - - factor = 2 ** (keyshift / 12) - n_fft_new = int(np.round(n_fft * factor)) - win_size_new = int(np.round(win_size * factor)) - hop_length_new = int(np.round(hop_length * speed)) - - if torch.min(y) < -1.: - print('min value is ', torch.min(y)) - if torch.max(y) > 1.: - print('max value is ', torch.max(y)) - - mel_basis_key = str(fmax)+'_'+str(y.device) - if mel_basis_key not in self.mel_basis: - mel = librosa_mel_fn(sr=sampling_rate, n_fft=n_fft, n_mels=n_mels, fmin=fmin, fmax=fmax) - self.mel_basis[mel_basis_key] = torch.from_numpy(mel).float().to(y.device) - - keyshift_key = str(keyshift)+'_'+str(y.device) - if keyshift_key not in self.hann_window: - self.hann_window[keyshift_key] = torch.hann_window(win_size_new).to(y.device) - - pad_left = (win_size_new - hop_length_new) //2 - pad_right = max((win_size_new- hop_length_new + 1) //2, win_size_new - y.size(-1) - pad_left) - if pad_right < y.size(-1): - mode = 'reflect' - else: - mode = 'constant' - y = torch.nn.functional.pad(y.unsqueeze(1), (pad_left, pad_right), mode = mode) - y = y.squeeze(1) - - spec = torch.stft(y, n_fft_new, hop_length=hop_length_new, win_length=win_size_new, window=self.hann_window[keyshift_key], - center=center, pad_mode='reflect', normalized=False, onesided=True, return_complex=False) - # print(111,spec) - spec = torch.sqrt(spec.pow(2).sum(-1)+(1e-9)) - if keyshift != 0: - size = n_fft // 2 + 1 - resize = spec.size(1) - if resize < size: - spec = F.pad(spec, (0, 0, 0, size-resize)) - spec = spec[:, :size, :] * win_size / win_size_new - - # print(222,spec) - spec = torch.matmul(self.mel_basis[mel_basis_key], spec) - # print(333,spec) - spec = dynamic_range_compression_torch(spec, clip_val=clip_val) - # print(444,spec) - return spec - - def __call__(self, audiopath): - audio, sr = load_wav_to_torch(audiopath, target_sr=self.target_sr) - spect = self.get_mel(audio.unsqueeze(0)).squeeze(0) - return spect - -stft = STFT() diff --git a/spaces/youngtsai/Mandarin-TTS/text/__init__.py b/spaces/youngtsai/Mandarin-TTS/text/__init__.py deleted file mode 100644 index f1853227f795a4e7308ac8e9e2b0f2713c223dc9..0000000000000000000000000000000000000000 --- a/spaces/youngtsai/Mandarin-TTS/text/__init__.py +++ /dev/null @@ -1,447 +0,0 @@ -from text.symbols import symbols - - -# Mappings from symbol to numeric ID and vice versa: -_symbol_to_id = {s: i for i, s in enumerate(symbols)} -_id_to_symbol = {i: s for i, s in enumerate(symbols)} - - -def cleaned_text_to_sequence(cleaned_text): - """Converts a string of text to a sequence of IDs corresponding to the symbols in the text. - Args: - text: string to convert to a sequence - Returns: - List of integers corresponding to the symbols in the text - """ - sequence = [_symbol_to_id[symbol] for symbol in cleaned_text.split()] - return sequence - - -def sequence_to_text(sequence): - """Converts a sequence of IDs back to a string""" - result = "" - for symbol_id in sequence: - s = _id_to_symbol[symbol_id] - result += s - return result - - -pinyin_dict = { - "a": ("^", "a"), - "ai": ("^", "ai"), - "an": ("^", "an"), - "ang": ("^", "ang"), - "ao": ("^", "ao"), - "ba": ("b", "a"), - "bai": ("b", "ai"), - "ban": ("b", "an"), - "bang": ("b", "ang"), - "bao": ("b", "ao"), - "be": ("b", "e"), - "bei": ("b", "ei"), - "ben": ("b", "en"), - "beng": ("b", "eng"), - "bi": ("b", "i"), - "bian": ("b", "ian"), - "biao": ("b", "iao"), - "bie": ("b", "ie"), - "bin": ("b", "in"), - "bing": ("b", "ing"), - "bo": ("b", "o"), - "bu": ("b", "u"), - "ca": ("c", "a"), - "cai": ("c", "ai"), - "can": ("c", "an"), - "cang": ("c", "ang"), - "cao": ("c", "ao"), - "ce": ("c", "e"), - "cen": ("c", "en"), - "ceng": ("c", "eng"), - "cha": ("ch", "a"), - "chai": ("ch", "ai"), - "chan": ("ch", "an"), - "chang": ("ch", "ang"), - "chao": ("ch", "ao"), - "che": ("ch", "e"), - "chen": ("ch", "en"), - "cheng": ("ch", "eng"), - "chi": ("ch", "iii"), - "chong": ("ch", "ong"), - "chou": ("ch", "ou"), - "chu": ("ch", "u"), - "chua": ("ch", "ua"), - "chuai": ("ch", "uai"), - "chuan": ("ch", "uan"), - "chuang": ("ch", "uang"), - "chui": ("ch", "uei"), - "chun": ("ch", "uen"), - "chuo": ("ch", "uo"), - "ci": ("c", "ii"), - "cong": ("c", "ong"), - "cou": ("c", "ou"), - "cu": ("c", "u"), - "cuan": ("c", "uan"), - "cui": ("c", "uei"), - "cun": ("c", "uen"), - "cuo": ("c", "uo"), - "da": ("d", "a"), - "dai": ("d", "ai"), - "dan": ("d", "an"), - "dang": ("d", "ang"), - "dao": ("d", "ao"), - "de": ("d", "e"), - "dei": ("d", "ei"), - "den": ("d", "en"), - "deng": ("d", "eng"), - "di": ("d", "i"), - "dia": ("d", "ia"), - "dian": ("d", "ian"), - "diao": ("d", "iao"), - "die": ("d", "ie"), - "ding": ("d", "ing"), - "diu": ("d", "iou"), - "dong": ("d", "ong"), - "dou": ("d", "ou"), - "du": ("d", "u"), - "duan": ("d", "uan"), - "dui": ("d", "uei"), - "dun": ("d", "uen"), - "duo": ("d", "uo"), - "e": ("^", "e"), - "ei": ("^", "ei"), - "en": ("^", "en"), - "ng": ("^", "en"), - "eng": ("^", "eng"), - "er": ("^", "er"), - "fa": ("f", "a"), - "fan": ("f", "an"), - "fang": ("f", "ang"), - "fei": ("f", "ei"), - "fen": ("f", "en"), - "feng": ("f", "eng"), - "fo": ("f", "o"), - "fou": ("f", "ou"), - "fu": ("f", "u"), - "ga": ("g", "a"), - "gai": ("g", "ai"), - "gan": ("g", "an"), - "gang": ("g", "ang"), - "gao": ("g", "ao"), - "ge": ("g", "e"), - "gei": ("g", "ei"), - "gen": ("g", "en"), - "geng": ("g", "eng"), - "gong": ("g", "ong"), - "gou": ("g", "ou"), - "gu": ("g", "u"), - "gua": ("g", "ua"), - "guai": ("g", "uai"), - "guan": ("g", "uan"), - "guang": ("g", "uang"), - "gui": ("g", "uei"), - "gun": ("g", "uen"), - "guo": ("g", "uo"), - "ha": ("h", "a"), - "hai": ("h", "ai"), - "han": ("h", "an"), - "hang": ("h", "ang"), - "hao": ("h", "ao"), - "he": ("h", "e"), - "hei": ("h", "ei"), - "hen": ("h", "en"), - "heng": ("h", "eng"), - "hong": ("h", "ong"), - "hou": ("h", "ou"), - "hu": ("h", "u"), - "hua": ("h", "ua"), - "huai": ("h", "uai"), - "huan": ("h", "uan"), - "huang": ("h", "uang"), - "hui": ("h", "uei"), - "hun": ("h", "uen"), - "huo": ("h", "uo"), - "ji": ("j", "i"), - "jia": ("j", "ia"), - "jian": ("j", "ian"), - "jiang": ("j", "iang"), - "jiao": ("j", "iao"), - "jie": ("j", "ie"), - "jin": ("j", "in"), - "jing": ("j", "ing"), - "jiong": ("j", "iong"), - "jiu": ("j", "iou"), - "ju": ("j", "v"), - "juan": ("j", "van"), - "jue": ("j", "ve"), - "jun": ("j", "vn"), - "ka": ("k", "a"), - "kai": ("k", "ai"), - "kan": ("k", "an"), - "kang": ("k", "ang"), - "kao": ("k", "ao"), - "ke": ("k", "e"), - "kei": ("k", "ei"), - "ken": ("k", "en"), - "keng": ("k", "eng"), - "kong": ("k", "ong"), - "kou": ("k", "ou"), - "ku": ("k", "u"), - "kua": ("k", "ua"), - "kuai": ("k", "uai"), - "kuan": ("k", "uan"), - "kuang": ("k", "uang"), - "kui": ("k", "uei"), - "kun": ("k", "uen"), - "kuo": ("k", "uo"), - "la": ("l", "a"), - "lai": ("l", "ai"), - "lan": ("l", "an"), - "lang": ("l", "ang"), - "lao": ("l", "ao"), - "le": ("l", "e"), - "lei": ("l", "ei"), - "leng": ("l", "eng"), - "li": ("l", "i"), - "lia": ("l", "ia"), - "lian": ("l", "ian"), - "liang": ("l", "iang"), - "liao": ("l", "iao"), - "lie": ("l", "ie"), - "lin": ("l", "in"), - "ling": ("l", "ing"), - "liu": ("l", "iou"), - "lo": ("l", "o"), - "long": ("l", "ong"), - "lou": ("l", "ou"), - "lu": ("l", "u"), - "lv": ("l", "v"), - "luan": ("l", "uan"), - "lve": ("l", "ve"), - "lue": ("l", "ve"), - "lun": ("l", "uen"), - "luo": ("l", "uo"), - "ma": ("m", "a"), - "mai": ("m", "ai"), - "man": ("m", "an"), - "mang": ("m", "ang"), - "mao": ("m", "ao"), - "me": ("m", "e"), - "mei": ("m", "ei"), - "men": ("m", "en"), - "meng": ("m", "eng"), - "mi": ("m", "i"), - "mian": ("m", "ian"), - "miao": ("m", "iao"), - "mie": ("m", "ie"), - "min": ("m", "in"), - "ming": ("m", "ing"), - "miu": ("m", "iou"), - "mo": ("m", "o"), - "mou": ("m", "ou"), - "mu": ("m", "u"), - "na": ("n", "a"), - "nai": ("n", "ai"), - "nan": ("n", "an"), - "nang": ("n", "ang"), - "nao": ("n", "ao"), - "ne": ("n", "e"), - "nei": ("n", "ei"), - "nen": ("n", "en"), - "neng": ("n", "eng"), - "ni": ("n", "i"), - "nia": ("n", "ia"), - "nian": ("n", "ian"), - "niang": ("n", "iang"), - "niao": ("n", "iao"), - "nie": ("n", "ie"), - "nin": ("n", "in"), - "ning": ("n", "ing"), - "niu": ("n", "iou"), - "nong": ("n", "ong"), - "nou": ("n", "ou"), - "nu": ("n", "u"), - "nv": ("n", "v"), - "nuan": ("n", "uan"), - "nve": ("n", "ve"), - "nue": ("n", "ve"), - "nuo": ("n", "uo"), - "o": ("^", "o"), - "ou": ("^", "ou"), - "pa": ("p", "a"), - "pai": ("p", "ai"), - "pan": ("p", "an"), - "pang": ("p", "ang"), - "pao": ("p", "ao"), - "pe": ("p", "e"), - "pei": ("p", "ei"), - "pen": ("p", "en"), - "peng": ("p", "eng"), - "pi": ("p", "i"), - "pian": ("p", "ian"), - "piao": ("p", "iao"), - "pie": ("p", "ie"), - "pin": ("p", "in"), - "ping": ("p", "ing"), - "po": ("p", "o"), - "pou": ("p", "ou"), - "pu": ("p", "u"), - "qi": ("q", "i"), - "qia": ("q", "ia"), - "qian": ("q", "ian"), - "qiang": ("q", "iang"), - "qiao": ("q", "iao"), - "qie": ("q", "ie"), - "qin": ("q", "in"), - "qing": ("q", "ing"), - "qiong": ("q", "iong"), - "qiu": ("q", "iou"), - "qu": ("q", "v"), - "quan": ("q", "van"), - "que": ("q", "ve"), - "qun": ("q", "vn"), - "ran": ("r", "an"), - "rang": ("r", "ang"), - "rao": ("r", "ao"), - "re": ("r", "e"), - "ren": ("r", "en"), - "reng": ("r", "eng"), - "ri": ("r", "iii"), - "rong": ("r", "ong"), - "rou": ("r", "ou"), - "ru": ("r", "u"), - "rua": ("r", "ua"), - "ruan": ("r", "uan"), - "rui": ("r", "uei"), - "run": ("r", "uen"), - "ruo": ("r", "uo"), - "sa": ("s", "a"), - "sai": ("s", "ai"), - "san": ("s", "an"), - "sang": ("s", "ang"), - "sao": ("s", "ao"), - "se": ("s", "e"), - "sen": ("s", "en"), - "seng": ("s", "eng"), - "sha": ("sh", "a"), - "shai": ("sh", "ai"), - "shan": ("sh", "an"), - "shang": ("sh", "ang"), - "shao": ("sh", "ao"), - "she": ("sh", "e"), - "shei": ("sh", "ei"), - "shen": ("sh", "en"), - "sheng": ("sh", "eng"), - "shi": ("sh", "iii"), - "shou": ("sh", "ou"), - "shu": ("sh", "u"), - "shua": ("sh", "ua"), - "shuai": ("sh", "uai"), - "shuan": ("sh", "uan"), - "shuang": ("sh", "uang"), - "shui": ("sh", "uei"), - "shun": ("sh", "uen"), - "shuo": ("sh", "uo"), - "si": ("s", "ii"), - "song": ("s", "ong"), - "sou": ("s", "ou"), - "su": ("s", "u"), - "suan": ("s", "uan"), - "sui": ("s", "uei"), - "sun": ("s", "uen"), - "suo": ("s", "uo"), - "ta": ("t", "a"), - "tai": ("t", "ai"), - "tan": ("t", "an"), - "tang": ("t", "ang"), - "tao": ("t", "ao"), - "te": ("t", "e"), - "tei": ("t", "ei"), - "teng": ("t", "eng"), - "ti": ("t", "i"), - "tian": ("t", "ian"), - "tiao": ("t", "iao"), - "tie": ("t", "ie"), - "ting": ("t", "ing"), - "tong": ("t", "ong"), - "tou": ("t", "ou"), - "tu": ("t", "u"), - "tuan": ("t", "uan"), - "tui": ("t", "uei"), - "tun": ("t", "uen"), - "tuo": ("t", "uo"), - "wa": ("^", "ua"), - "wai": ("^", "uai"), - "wan": ("^", "uan"), - "wang": ("^", "uang"), - "wei": ("^", "uei"), - "wen": ("^", "uen"), - "weng": ("^", "ueng"), - "wo": ("^", "uo"), - "wu": ("^", "u"), - "xi": ("x", "i"), - "xia": ("x", "ia"), - "xian": ("x", "ian"), - "xiang": ("x", "iang"), - "xiao": ("x", "iao"), - "xie": ("x", "ie"), - "xin": ("x", "in"), - "xing": ("x", "ing"), - "xiong": ("x", "iong"), - "xiu": ("x", "iou"), - "xu": ("x", "v"), - "xuan": ("x", "van"), - "xue": ("x", "ve"), - "xun": ("x", "vn"), - "ya": ("^", "ia"), - "yan": ("^", "ian"), - "yang": ("^", "iang"), - "yao": ("^", "iao"), - "ye": ("^", "ie"), - "yi": ("^", "i"), - "yin": ("^", "in"), - "ying": ("^", "ing"), - "yo": ("^", "iou"), - "yong": ("^", "iong"), - "you": ("^", "iou"), - "yu": ("^", "v"), - "yuan": ("^", "van"), - "yue": ("^", "ve"), - "yun": ("^", "vn"), - "za": ("z", "a"), - "zai": ("z", "ai"), - "zan": ("z", "an"), - "zang": ("z", "ang"), - "zao": ("z", "ao"), - "ze": ("z", "e"), - "zei": ("z", "ei"), - "zen": ("z", "en"), - "zeng": ("z", "eng"), - "zha": ("zh", "a"), - "zhai": ("zh", "ai"), - "zhan": ("zh", "an"), - "zhang": ("zh", "ang"), - "zhao": ("zh", "ao"), - "zhe": ("zh", "e"), - "zhei": ("zh", "ei"), - "zhen": ("zh", "en"), - "zheng": ("zh", "eng"), - "zhi": ("zh", "iii"), - "zhong": ("zh", "ong"), - "zhou": ("zh", "ou"), - "zhu": ("zh", "u"), - "zhua": ("zh", "ua"), - "zhuai": ("zh", "uai"), - "zhuan": ("zh", "uan"), - "zhuang": ("zh", "uang"), - "zhui": ("zh", "uei"), - "zhun": ("zh", "uen"), - "zhuo": ("zh", "uo"), - "zi": ("z", "ii"), - "zong": ("z", "ong"), - "zou": ("z", "ou"), - "zu": ("z", "u"), - "zuan": ("z", "uan"), - "zui": ("z", "uei"), - "zun": ("z", "uen"), - "zuo": ("z", "uo"), -} diff --git a/spaces/youplala/StoreCopilot/assets/fonts/tabler-icons.css b/spaces/youplala/StoreCopilot/assets/fonts/tabler-icons.css deleted file mode 100644 index 482a62db6d41b51305c4b868f1a1f6727936f8d9..0000000000000000000000000000000000000000 --- a/spaces/youplala/StoreCopilot/assets/fonts/tabler-icons.css +++ /dev/null @@ -1,8838 +0,0 @@ -/*! -* Tabler Icons 1.76.0 by tabler - https://tabler.io -* License - https://github.com/tabler/tabler-icons/blob/master/LICENSE -*/ -@font-face { - font-family: "tabler-icons"; - font-style: normal; - font-weight: 400; - src: url("tabler/tabler-icons.eot"); - src: url("tabler/tabler-icons.eot?#iefix") format("embedded-opentype"), url("tabler/tabler-icons.woff2") format("woff2"), url("tabler/tabler-icons.woff") format("woff"), url("tabler/tabler-icons.ttf") format("truetype"), url("tabler/tabler-icons.svg#tabler-icons") format("svg"); -} -@media screen and (-webkit-min-device-pixel-ratio: 0) { - @font-face { - font-family: "tabler-icons"; - src: url("tabler/tabler-icons.svg#tabler-icons") format("svg"); - } -} -.ti { - vertical-align: middle; - font-size: 1.25rem; - line-height: 1; - display: inline-block; -} - -.ti { - font-family: "tabler-icons" !important; - speak: none; - font-style: normal; - font-weight: normal; - font-variant: normal; - text-transform: none; - line-height: 1; - /* Better Font Rendering */ - -webkit-font-smoothing: antialiased; - -moz-osx-font-smoothing: grayscale; -} - -@-webkit-keyframes spin { - 0% { - -webkit-transform: rotate(0); - transform: rotate(0); - } - 100% { - -webkit-transform: rotate(359deg); - transform: rotate(359deg); - } -} -@keyframes spin { - 0% { - -webkit-transform: rotate(0); - transform: rotate(0); - } - 100% { - -webkit-transform: rotate(359deg); - transform: rotate(359deg); - } -} -@-webkit-keyframes burst { - 0% { - -webkit-transform: scale(1); - transform: scale(1); - opacity: 1; - } - 90% { - -webkit-transform: scale(1.5); - transform: scale(1.5); - opacity: 0; - } -} -@keyframes burst { - 0% { - -webkit-transform: scale(1); - transform: scale(1); - opacity: 1; - } - 90% { - -webkit-transform: scale(1.5); - transform: scale(1.5); - opacity: 0; - } -} -@-webkit-keyframes flashing { - 0% { - opacity: 1; - } - 45% { - opacity: 0; - } - 90% { - opacity: 1; - } -} -@keyframes flashing { - 0% { - opacity: 1; - } - 45% { - opacity: 0; - } - 90% { - opacity: 1; - } -} -@-webkit-keyframes fade-left { - 0% { - -webkit-transform: translateX(0); - transform: translateX(0); - opacity: 1; - } - 75% { - -webkit-transform: translateX(-20px); - transform: translateX(-20px); - opacity: 0; - } -} -@keyframes fade-left { - 0% { - -webkit-transform: translateX(0); - transform: translateX(0); - opacity: 1; - } - 75% { - -webkit-transform: translateX(-20px); - transform: translateX(-20px); - opacity: 0; - } -} -@-webkit-keyframes fade-right { - 0% { - -webkit-transform: translateX(0); - transform: translateX(0); - opacity: 1; - } - 75% { - -webkit-transform: translateX(20px); - transform: translateX(20px); - opacity: 0; - } -} -@keyframes fade-right { - 0% { - -webkit-transform: translateX(0); - transform: translateX(0); - opacity: 1; - } - 75% { - -webkit-transform: translateX(20px); - transform: translateX(20px); - opacity: 0; - } -} -@-webkit-keyframes fade-up { - 0% { - -webkit-transform: translateY(0); - transform: translateY(0); - opacity: 1; - } - 75% { - -webkit-transform: translateY(-20px); - transform: translateY(-20px); - opacity: 0; - } -} -@keyframes fade-up { - 0% { - -webkit-transform: translateY(0); - transform: translateY(0); - opacity: 1; - } - 75% { - -webkit-transform: translateY(-20px); - transform: translateY(-20px); - opacity: 0; - } -} -@-webkit-keyframes fade-down { - 0% { - -webkit-transform: translateY(0); - transform: translateY(0); - opacity: 1; - } - 75% { - -webkit-transform: translateY(20px); - transform: translateY(20px); - opacity: 0; - } -} -@keyframes fade-down { - 0% { - -webkit-transform: translateY(0); - transform: translateY(0); - opacity: 1; - } - 75% { - -webkit-transform: translateY(20px); - transform: translateY(20px); - opacity: 0; - } -} -@-webkit-keyframes tada { - from { - -webkit-transform: scale3d(1, 1, 1); - transform: scale3d(1, 1, 1); - } - 10%, 20% { - -webkit-transform: scale3d(0.95, 0.95, 0.95) rotate3d(0, 0, 1, -10deg); - transform: scale3d(0.95, 0.95, 0.95) rotate3d(0, 0, 1, -10deg); - } - 30%, 50%, 70%, 90% { - -webkit-transform: scale3d(1, 1, 1) rotate3d(0, 0, 1, 10deg); - transform: scale3d(1, 1, 1) rotate3d(0, 0, 1, 10deg); - } - 40%, 60%, 80% { - -webkit-transform: scale3d(1, 1, 1) rotate3d(0, 0, 1, -10deg); - transform: scale3d(1, 1, 1) rotate3d(0, 0, 1, -10deg); - } - to { - -webkit-transform: scale3d(1, 1, 1); - transform: scale3d(1, 1, 1); - } -} -@keyframes tada { - from { - -webkit-transform: scale3d(1, 1, 1); - transform: scale3d(1, 1, 1); - } - 10%, 20% { - -webkit-transform: scale3d(0.95, 0.95, 0.95) rotate3d(0, 0, 1, -10deg); - transform: scale3d(0.95, 0.95, 0.95) rotate3d(0, 0, 1, -10deg); - } - 30%, 50%, 70%, 90% { - -webkit-transform: scale3d(1, 1, 1) rotate3d(0, 0, 1, 10deg); - transform: scale3d(1, 1, 1) rotate3d(0, 0, 1, 10deg); - } - 40%, 60%, 80% { - -webkit-transform: rotate3d(0, 0, 1, -10deg); - transform: rotate3d(0, 0, 1, -10deg); - } - to { - -webkit-transform: scale3d(1, 1, 1); - transform: scale3d(1, 1, 1); - } -} -.ti-spin { - -webkit-animation: spin 2s linear infinite; - animation: spin 2s linear infinite; -} - -.ti-spin-hover:hover { - -webkit-animation: spin 2s linear infinite; - animation: spin 2s linear infinite; -} - -.ti-tada { - -webkit-animation: tada 1.5s ease infinite; - animation: tada 1.5s ease infinite; -} - -.ti-tada-hover:hover { - -webkit-animation: tada 1.5s ease infinite; - animation: tada 1.5s ease infinite; -} - -.ti-flashing { - -webkit-animation: flashing 1.5s infinite linear; - animation: flashing 1.5s infinite linear; -} - -.ti-flashing-hover:hover { - -webkit-animation: flashing 1.5s infinite linear; - animation: flashing 1.5s infinite linear; -} - -.ti-burst { - -webkit-animation: burst 1.5s infinite linear; - animation: burst 1.5s infinite linear; -} - -.ti-burst-hover:hover { - -webkit-animation: burst 1.5s infinite linear; - animation: burst 1.5s infinite linear; -} - -.ti-fade-up { - -webkit-animation: fade-up 1.5s infinite linear; - animation: fade-up 1.5s infinite linear; -} - -.ti-fade-up-hover:hover { - -webkit-animation: fade-up 1.5s infinite linear; - animation: fade-up 1.5s infinite linear; -} - -.ti-fade-down { - -webkit-animation: fade-down 1.5s infinite linear; - animation: fade-down 1.5s infinite linear; -} - -.ti-fade-down-hover:hover { - -webkit-animation: fade-down 1.5s infinite linear; - animation: fade-down 1.5s infinite linear; -} - -.ti-fade-left { - -webkit-animation: fade-left 1.5s infinite linear; - animation: fade-left 1.5s infinite linear; -} - -.ti-fade-left-hover:hover { - -webkit-animation: fade-left 1.5s infinite linear; - animation: fade-left 1.5s infinite linear; -} - -.ti-fade-right { - -webkit-animation: fade-right 1.5s infinite linear; - animation: fade-right 1.5s infinite linear; -} - -.ti-fade-right-hover:hover { - -webkit-animation: fade-right 1.5s infinite linear; - animation: fade-right 1.5s infinite linear; -} - -.ti-xs { - font-size: 1.125rem !important; -} - -.ti-sm { - font-size: 1.376rem !important; -} - -.ti-md { - font-size: 1.625rem !important; -} - -.ti-lg { - font-size: 2rem !important; -} - -.ti-xl { - font-size: 2.25rem !important; -} - -.ti-2fa:before { - content: "\eca0"; -} - -.ti-3d-cube-sphere:before { - content: "\ecd7"; -} - -.ti-3d-rotate:before { - content: "\f020"; -} - -.ti-a-b:before { - content: "\ec36"; -} - -.ti-a-b-2:before { - content: "\f25f"; -} - -.ti-a-b-off:before { - content: "\f0a6"; -} - -.ti-abacus:before { - content: "\f05c"; -} - -.ti-access-point:before { - content: "\ed1b"; -} - -.ti-access-point-off:before { - content: "\ed1a"; -} - -.ti-accessible:before { - content: "\eba9"; -} - -.ti-accessible-off:before { - content: "\f0a7"; -} - -.ti-activity:before { - content: "\ed23"; -} - -.ti-activity-heartbeat:before { - content: "\f0db"; -} - -.ti-ad:before { - content: "\ea02"; -} - -.ti-ad-2:before { - content: "\ef1f"; -} - -.ti-address-book:before { - content: "\f021"; -} - -.ti-adjustments:before { - content: "\ea03"; -} - -.ti-adjustments-alt:before { - content: "\ec37"; -} - -.ti-adjustments-horizontal:before { - content: "\ec38"; -} - -.ti-adjustments-off:before { - content: "\f0a8"; -} - -.ti-aerial-lift:before { - content: "\edfe"; -} - -.ti-affiliate:before { - content: "\edff"; -} - -.ti-alarm:before { - content: "\ea04"; -} - -.ti-alarm-off:before { - content: "\f0a9"; -} - -.ti-album:before { - content: "\f022"; -} - -.ti-alert-circle:before { - content: "\ea05"; -} - -.ti-alert-octagon:before { - content: "\ecc6"; -} - -.ti-alert-triangle:before { - content: "\ea06"; -} - -.ti-alien:before { - content: "\ebde"; -} - -.ti-align-center:before { - content: "\ea07"; -} - -.ti-align-justified:before { - content: "\ea08"; -} - -.ti-align-left:before { - content: "\ea09"; -} - -.ti-align-right:before { - content: "\ea0a"; -} - -.ti-alphabet-cyrillic:before { - content: "\f1df"; -} - -.ti-alphabet-greek:before { - content: "\f1e0"; -} - -.ti-alphabet-latin:before { - content: "\f1e1"; -} - -.ti-ambulance:before { - content: "\ebf5"; -} - -.ti-ampersand:before { - content: "\f229"; -} - -.ti-anchor:before { - content: "\eb76"; -} - -.ti-anchor-off:before { - content: "\f0f7"; -} - -.ti-angle:before { - content: "\ef20"; -} - -.ti-ankh:before { - content: "\f1cd"; -} - -.ti-antenna:before { - content: "\f094"; -} - -.ti-antenna-bars-1:before { - content: "\ecc7"; -} - -.ti-antenna-bars-2:before { - content: "\ecc8"; -} - -.ti-antenna-bars-3:before { - content: "\ecc9"; -} - -.ti-antenna-bars-4:before { - content: "\ecca"; -} - -.ti-antenna-bars-5:before { - content: "\eccb"; -} - -.ti-antenna-bars-off:before { - content: "\f0aa"; -} - -.ti-aperture:before { - content: "\eb58"; -} - -.ti-api:before { - content: "\effd"; -} - -.ti-api-app:before { - content: "\effc"; -} - -.ti-api-app-off:before { - content: "\f0ab"; -} - -.ti-api-off:before { - content: "\f0f8"; -} - -.ti-app-window:before { - content: "\efe6"; -} - -.ti-apple:before { - content: "\ef21"; -} - -.ti-apps:before { - content: "\ebb6"; -} - -.ti-apps-off:before { - content: "\f0ac"; -} - -.ti-archive:before { - content: "\ea0b"; -} - -.ti-archive-off:before { - content: "\f0ad"; -} - -.ti-armchair:before { - content: "\ef9e"; -} - -.ti-armchair-2:before { - content: "\efe7"; -} - -.ti-arrow-autofit-content:before { - content: "\ef31"; -} - -.ti-arrow-autofit-down:before { - content: "\ef32"; -} - -.ti-arrow-autofit-height:before { - content: "\ef33"; -} - -.ti-arrow-autofit-left:before { - content: "\ef34"; -} - -.ti-arrow-autofit-right:before { - content: "\ef35"; -} - -.ti-arrow-autofit-up:before { - content: "\ef36"; -} - -.ti-arrow-autofit-width:before { - content: "\ef37"; -} - -.ti-arrow-back:before { - content: "\ea0c"; -} - -.ti-arrow-back-up:before { - content: "\eb77"; -} - -.ti-arrow-bar-down:before { - content: "\ea0d"; -} - -.ti-arrow-bar-left:before { - content: "\ea0e"; -} - -.ti-arrow-bar-right:before { - content: "\ea0f"; -} - -.ti-arrow-bar-to-down:before { - content: "\ec88"; -} - -.ti-arrow-bar-to-left:before { - content: "\ec89"; -} - -.ti-arrow-bar-to-right:before { - content: "\ec8a"; -} - -.ti-arrow-bar-to-up:before { - content: "\ec8b"; -} - -.ti-arrow-bar-up:before { - content: "\ea10"; -} - -.ti-arrow-bear-left:before { - content: "\f045"; -} - -.ti-arrow-bear-left-2:before { - content: "\f044"; -} - -.ti-arrow-bear-right:before { - content: "\f047"; -} - -.ti-arrow-bear-right-2:before { - content: "\f046"; -} - -.ti-arrow-big-down:before { - content: "\edda"; -} - -.ti-arrow-big-down-line:before { - content: "\efe8"; -} - -.ti-arrow-big-down-lines:before { - content: "\efe9"; -} - -.ti-arrow-big-left:before { - content: "\eddb"; -} - -.ti-arrow-big-left-line:before { - content: "\efea"; -} - -.ti-arrow-big-left-lines:before { - content: "\efeb"; -} - -.ti-arrow-big-right:before { - content: "\eddc"; -} - -.ti-arrow-big-right-line:before { - content: "\efec"; -} - -.ti-arrow-big-right-lines:before { - content: "\efed"; -} - -.ti-arrow-big-top:before { - content: "\eddd"; -} - -.ti-arrow-big-up-line:before { - content: "\efee"; -} - -.ti-arrow-big-up-lines:before { - content: "\efef"; -} - -.ti-arrow-bottom-bar:before { - content: "\ed98"; -} - -.ti-arrow-bottom-circle:before { - content: "\ed99"; -} - -.ti-arrow-bottom-square:before { - content: "\ed9a"; -} - -.ti-arrow-bottom-tail:before { - content: "\ed9b"; -} - -.ti-arrow-curve-left:before { - content: "\f048"; -} - -.ti-arrow-curve-right:before { - content: "\f049"; -} - -.ti-arrow-down:before { - content: "\ea16"; -} - -.ti-arrow-down-circle:before { - content: "\ea11"; -} - -.ti-arrow-down-left:before { - content: "\ea13"; -} - -.ti-arrow-down-left-circle:before { - content: "\ea12"; -} - -.ti-arrow-down-right:before { - content: "\ea15"; -} - -.ti-arrow-down-right-circle:before { - content: "\ea14"; -} - -.ti-arrow-fork:before { - content: "\f04a"; -} - -.ti-arrow-forward:before { - content: "\ea17"; -} - -.ti-arrow-forward-up:before { - content: "\eb78"; -} - -.ti-arrow-guide:before { - content: "\f22a"; -} - -.ti-arrow-left:before { - content: "\ea19"; -} - -.ti-arrow-left-bar:before { - content: "\ed9c"; -} - -.ti-arrow-left-circle:before { - content: "\ea18"; -} - -.ti-arrow-left-right:before { - content: "\f04b"; -} - -.ti-arrow-left-square:before { - content: "\ed9d"; -} - -.ti-arrow-left-tail:before { - content: "\ed9e"; -} - -.ti-arrow-loop-left:before { - content: "\ed9f"; -} - -.ti-arrow-loop-left-2:before { - content: "\f04c"; -} - -.ti-arrow-loop-right:before { - content: "\eda0"; -} - -.ti-arrow-loop-right-2:before { - content: "\f04d"; -} - -.ti-arrow-merge:before { - content: "\f04e"; -} - -.ti-arrow-merge-both:before { - content: "\f23b"; -} - -.ti-arrow-merge-left:before { - content: "\f23c"; -} - -.ti-arrow-merge-right:before { - content: "\f23d"; -} - -.ti-arrow-narrow-down:before { - content: "\ea1a"; -} - -.ti-arrow-narrow-left:before { - content: "\ea1b"; -} - -.ti-arrow-narrow-right:before { - content: "\ea1c"; -} - -.ti-arrow-narrow-up:before { - content: "\ea1d"; -} - -.ti-arrow-ramp-left:before { - content: "\ed3c"; -} - -.ti-arrow-ramp-left-2:before { - content: "\f04f"; -} - -.ti-arrow-ramp-left-3:before { - content: "\f050"; -} - -.ti-arrow-ramp-right:before { - content: "\ed3d"; -} - -.ti-arrow-ramp-right-2:before { - content: "\f051"; -} - -.ti-arrow-ramp-right-3:before { - content: "\f052"; -} - -.ti-arrow-right:before { - content: "\ea1f"; -} - -.ti-arrow-right-bar:before { - content: "\eda1"; -} - -.ti-arrow-right-circle:before { - content: "\ea1e"; -} - -.ti-arrow-right-square:before { - content: "\eda2"; -} - -.ti-arrow-right-tail:before { - content: "\eda3"; -} - -.ti-arrow-rotary-first-left:before { - content: "\f053"; -} - -.ti-arrow-rotary-first-right:before { - content: "\f054"; -} - -.ti-arrow-rotary-last-left:before { - content: "\f055"; -} - -.ti-arrow-rotary-last-right:before { - content: "\f056"; -} - -.ti-arrow-rotary-left:before { - content: "\f057"; -} - -.ti-arrow-rotary-right:before { - content: "\f058"; -} - -.ti-arrow-rotary-straight:before { - content: "\f059"; -} - -.ti-arrow-roundabout-left:before { - content: "\f22b"; -} - -.ti-arrow-roundabout-right:before { - content: "\f22c"; -} - -.ti-arrow-sharp-turn-left:before { - content: "\f05a"; -} - -.ti-arrow-sharp-turn-right:before { - content: "\f05b"; -} - -.ti-arrow-top-bar:before { - content: "\eda4"; -} - -.ti-arrow-top-circle:before { - content: "\eda5"; -} - -.ti-arrow-top-square:before { - content: "\eda6"; -} - -.ti-arrow-top-tail:before { - content: "\eda7"; -} - -.ti-arrow-up:before { - content: "\ea25"; -} - -.ti-arrow-up-circle:before { - content: "\ea20"; -} - -.ti-arrow-up-left:before { - content: "\ea22"; -} - -.ti-arrow-up-left-circle:before { - content: "\ea21"; -} - -.ti-arrow-up-right:before { - content: "\ea24"; -} - -.ti-arrow-up-right-circle:before { - content: "\ea23"; -} - -.ti-arrow-wave-left-down:before { - content: "\eda8"; -} - -.ti-arrow-wave-left-up:before { - content: "\eda9"; -} - -.ti-arrow-wave-right-down:before { - content: "\edaa"; -} - -.ti-arrow-wave-right-up:before { - content: "\edab"; -} - -.ti-arrows-cross:before { - content: "\effe"; -} - -.ti-arrows-diagonal:before { - content: "\ea27"; -} - -.ti-arrows-diagonal-2:before { - content: "\ea26"; -} - -.ti-arrows-diagonal-minimize:before { - content: "\ef39"; -} - -.ti-arrows-diagonal-minimize-2:before { - content: "\ef38"; -} - -.ti-arrows-double-ne-sw:before { - content: "\edde"; -} - -.ti-arrows-double-nw-se:before { - content: "\eddf"; -} - -.ti-arrows-double-se-nw:before { - content: "\ede0"; -} - -.ti-arrows-double-sw-ne:before { - content: "\ede1"; -} - -.ti-arrows-down:before { - content: "\edad"; -} - -.ti-arrows-down-up:before { - content: "\edac"; -} - -.ti-arrows-exchange:before { - content: "\f1f4"; -} - -.ti-arrows-exchange-2:before { - content: "\f1f3"; -} - -.ti-arrows-horizontal:before { - content: "\eb59"; -} - -.ti-arrows-join:before { - content: "\edaf"; -} - -.ti-arrows-join-2:before { - content: "\edae"; -} - -.ti-arrows-left:before { - content: "\edb1"; -} - -.ti-arrows-left-down:before { - content: "\ee00"; -} - -.ti-arrows-left-right:before { - content: "\edb0"; -} - -.ti-arrows-maximize:before { - content: "\ea28"; -} - -.ti-arrows-minimize:before { - content: "\ea29"; -} - -.ti-arrows-move:before { - content: "\f22f"; -} - -.ti-arrows-move-horizontal:before { - content: "\f22d"; -} - -.ti-arrows-move-vertical:before { - content: "\f22e"; -} - -.ti-arrows-random:before { - content: "\f095"; -} - -.ti-arrows-right:before { - content: "\edb3"; -} - -.ti-arrows-right-down:before { - content: "\ee01"; -} - -.ti-arrows-right-left:before { - content: "\edb2"; -} - -.ti-arrows-shuffle:before { - content: "\f000"; -} - -.ti-arrows-shuffle-2:before { - content: "\efff"; -} - -.ti-arrows-sort:before { - content: "\eb5a"; -} - -.ti-arrows-split:before { - content: "\edb5"; -} - -.ti-arrows-split-2:before { - content: "\edb4"; -} - -.ti-arrows-up:before { - content: "\edb7"; -} - -.ti-arrows-up-down:before { - content: "\edb6"; -} - -.ti-arrows-up-left:before { - content: "\ee02"; -} - -.ti-arrows-up-right:before { - content: "\ee03"; -} - -.ti-arrows-vertical:before { - content: "\eb5b"; -} - -.ti-artboard:before { - content: "\ea2a"; -} - -.ti-artboard-off:before { - content: "\f0ae"; -} - -.ti-article:before { - content: "\f1e2"; -} - -.ti-aspect-ratio:before { - content: "\ed30"; -} - -.ti-aspect-ratio-off:before { - content: "\f0af"; -} - -.ti-assembly:before { - content: "\f24d"; -} - -.ti-asset:before { - content: "\f1ce"; -} - -.ti-asterisk:before { - content: "\efd5"; -} - -.ti-asterisk-simple:before { - content: "\efd4"; -} - -.ti-at:before { - content: "\ea2b"; -} - -.ti-at-off:before { - content: "\f0b0"; -} - -.ti-atom:before { - content: "\eb79"; -} - -.ti-atom-2:before { - content: "\ebdf"; -} - -.ti-atom-off:before { - content: "\f0f9"; -} - -.ti-augmented-reality:before { - content: "\f023"; -} - -.ti-award:before { - content: "\ea2c"; -} - -.ti-award-off:before { - content: "\f0fa"; -} - -.ti-axe:before { - content: "\ef9f"; -} - -.ti-axis-x:before { - content: "\ef45"; -} - -.ti-axis-y:before { - content: "\ef46"; -} - -.ti-baby-carriage:before { - content: "\f05d"; -} - -.ti-backhoe:before { - content: "\ed86"; -} - -.ti-backpack:before { - content: "\ef47"; -} - -.ti-backspace:before { - content: "\ea2d"; -} - -.ti-badge:before { - content: "\efc2"; -} - -.ti-badge-off:before { - content: "\f0fb"; -} - -.ti-badges:before { - content: "\efc3"; -} - -.ti-badges-off:before { - content: "\f0fc"; -} - -.ti-ball-american-football:before { - content: "\ee04"; -} - -.ti-ball-baseball:before { - content: "\efa0"; -} - -.ti-ball-basketball:before { - content: "\ec28"; -} - -.ti-ball-bowling:before { - content: "\ec29"; -} - -.ti-ball-football:before { - content: "\ee06"; -} - -.ti-ball-football-off:before { - content: "\ee05"; -} - -.ti-ball-tennis:before { - content: "\ec2a"; -} - -.ti-ball-volleyball:before { - content: "\ec2b"; -} - -.ti-ballon:before { - content: "\ef3a"; -} - -.ti-ballon-off:before { - content: "\f0fd"; -} - -.ti-ballpen:before { - content: "\f06e"; -} - -.ti-ballpen-off:before { - content: "\f0b1"; -} - -.ti-ban:before { - content: "\ea2e"; -} - -.ti-bandage:before { - content: "\eb7a"; -} - -.ti-barbell:before { - content: "\eff0"; -} - -.ti-barbell-off:before { - content: "\f0b2"; -} - -.ti-barcode:before { - content: "\ebc6"; -} - -.ti-barcode-off:before { - content: "\f0b3"; -} - -.ti-barrel:before { - content: "\f0b4"; -} - -.ti-barrel-off:before { - content: "\f0fe"; -} - -.ti-barrier-block:before { - content: "\f00e"; -} - -.ti-barrier-block-off:before { - content: "\f0b5"; -} - -.ti-baseline:before { - content: "\f024"; -} - -.ti-basket:before { - content: "\ebe1"; -} - -.ti-basket-off:before { - content: "\f0b6"; -} - -.ti-bath:before { - content: "\ef48"; -} - -.ti-bath-off:before { - content: "\f0ff"; -} - -.ti-battery:before { - content: "\ea34"; -} - -.ti-battery-1:before { - content: "\ea2f"; -} - -.ti-battery-2:before { - content: "\ea30"; -} - -.ti-battery-3:before { - content: "\ea31"; -} - -.ti-battery-4:before { - content: "\ea32"; -} - -.ti-battery-automotive:before { - content: "\ee07"; -} - -.ti-battery-charging:before { - content: "\ea33"; -} - -.ti-battery-charging-2:before { - content: "\ef3b"; -} - -.ti-battery-eco:before { - content: "\ef3c"; -} - -.ti-battery-off:before { - content: "\ed1c"; -} - -.ti-beach:before { - content: "\ef3d"; -} - -.ti-beach-off:before { - content: "\f0b7"; -} - -.ti-bed:before { - content: "\eb5c"; -} - -.ti-bed-off:before { - content: "\f100"; -} - -.ti-beer:before { - content: "\efa1"; -} - -.ti-beer-off:before { - content: "\f101"; -} - -.ti-bell:before { - content: "\ea35"; -} - -.ti-bell-minus:before { - content: "\ede2"; -} - -.ti-bell-off:before { - content: "\ece9"; -} - -.ti-bell-plus:before { - content: "\ede3"; -} - -.ti-bell-ringing:before { - content: "\ed07"; -} - -.ti-bell-ringing-2:before { - content: "\ede4"; -} - -.ti-bell-school:before { - content: "\f05e"; -} - -.ti-bell-x:before { - content: "\ede5"; -} - -.ti-bell-z:before { - content: "\eff1"; -} - -.ti-bible:before { - content: "\efc4"; -} - -.ti-bike:before { - content: "\ea36"; -} - -.ti-bike-off:before { - content: "\f0b8"; -} - -.ti-binary:before { - content: "\ee08"; -} - -.ti-biohazard:before { - content: "\ecb8"; -} - -.ti-biohazard-off:before { - content: "\f0b9"; -} - -.ti-blockquote:before { - content: "\ee09"; -} - -.ti-bluetooth:before { - content: "\ea37"; -} - -.ti-bluetooth-connected:before { - content: "\ecea"; -} - -.ti-bluetooth-off:before { - content: "\eceb"; -} - -.ti-bluetooth-x:before { - content: "\f081"; -} - -.ti-blur:before { - content: "\ef8c"; -} - -.ti-bold:before { - content: "\eb7b"; -} - -.ti-bold-off:before { - content: "\f0ba"; -} - -.ti-bolt:before { - content: "\ea38"; -} - -.ti-bolt-off:before { - content: "\ecec"; -} - -.ti-bone:before { - content: "\edb8"; -} - -.ti-bone-off:before { - content: "\f0bb"; -} - -.ti-book:before { - content: "\ea39"; -} - -.ti-book-2:before { - content: "\efc5"; -} - -.ti-book-download:before { - content: "\f070"; -} - -.ti-book-off:before { - content: "\f0bc"; -} - -.ti-book-upload:before { - content: "\f071"; -} - -.ti-bookmark:before { - content: "\ea3a"; -} - -.ti-bookmark-off:before { - content: "\eced"; -} - -.ti-bookmarks:before { - content: "\ed08"; -} - -.ti-bookmarks-off:before { - content: "\f0bd"; -} - -.ti-books:before { - content: "\eff2"; -} - -.ti-books-off:before { - content: "\f0be"; -} - -.ti-border-all:before { - content: "\ea3b"; -} - -.ti-border-bottom:before { - content: "\ea3c"; -} - -.ti-border-horizontal:before { - content: "\ea3d"; -} - -.ti-border-inner:before { - content: "\ea3e"; -} - -.ti-border-left:before { - content: "\ea3f"; -} - -.ti-border-none:before { - content: "\ea40"; -} - -.ti-border-outer:before { - content: "\ea41"; -} - -.ti-border-radius:before { - content: "\eb7c"; -} - -.ti-border-right:before { - content: "\ea42"; -} - -.ti-border-style:before { - content: "\ee0a"; -} - -.ti-border-style-2:before { - content: "\ef22"; -} - -.ti-border-top:before { - content: "\ea43"; -} - -.ti-border-vertical:before { - content: "\ea44"; -} - -.ti-bottle:before { - content: "\ef0b"; -} - -.ti-bow:before { - content: "\f096"; -} - -.ti-box:before { - content: "\ea45"; -} - -.ti-box-margin:before { - content: "\ee0b"; -} - -.ti-box-model:before { - content: "\ee0c"; -} - -.ti-box-model-2:before { - content: "\ef23"; -} - -.ti-box-multiple:before { - content: "\ee17"; -} - -.ti-box-multiple-0:before { - content: "\ee0d"; -} - -.ti-box-multiple-1:before { - content: "\ee0e"; -} - -.ti-box-multiple-2:before { - content: "\ee0f"; -} - -.ti-box-multiple-3:before { - content: "\ee10"; -} - -.ti-box-multiple-4:before { - content: "\ee11"; -} - -.ti-box-multiple-5:before { - content: "\ee12"; -} - -.ti-box-multiple-6:before { - content: "\ee13"; -} - -.ti-box-multiple-7:before { - content: "\ee14"; -} - -.ti-box-multiple-8:before { - content: "\ee15"; -} - -.ti-box-multiple-9:before { - content: "\ee16"; -} - -.ti-box-off:before { - content: "\f102"; -} - -.ti-box-padding:before { - content: "\ee18"; -} - -.ti-braces:before { - content: "\ebcc"; -} - -.ti-braces-off:before { - content: "\f0bf"; -} - -.ti-brackets:before { - content: "\ebcd"; -} - -.ti-brackets-contain:before { - content: "\f1e5"; -} - -.ti-brackets-contain-end:before { - content: "\f1e3"; -} - -.ti-brackets-contain-start:before { - content: "\f1e4"; -} - -.ti-brackets-off:before { - content: "\f0c0"; -} - -.ti-brand-adobe:before { - content: "\f0dc"; -} - -.ti-brand-airbnb:before { - content: "\ed68"; -} - -.ti-brand-airtable:before { - content: "\ef6a"; -} - -.ti-brand-amazon:before { - content: "\f230"; -} - -.ti-brand-amongus:before { - content: "\f205"; -} - -.ti-brand-android:before { - content: "\ec16"; -} - -.ti-brand-angular:before { - content: "\ef6b"; -} - -.ti-brand-appgallery:before { - content: "\f231"; -} - -.ti-brand-apple:before { - content: "\ec17"; -} - -.ti-brand-apple-arcade:before { - content: "\ed69"; -} - -.ti-brand-apple-podcast:before { - content: "\f1e6"; -} - -.ti-brand-appstore:before { - content: "\ed24"; -} - -.ti-brand-asana:before { - content: "\edc5"; -} - -.ti-brand-badoo:before { - content: "\f206"; -} - -.ti-brand-bandcamp:before { - content: "\f207"; -} - -.ti-brand-beats:before { - content: "\f208"; -} - -.ti-brand-behance:before { - content: "\ec6e"; -} - -.ti-brand-bing:before { - content: "\edc6"; -} - -.ti-brand-bitbucket:before { - content: "\edc7"; -} - -.ti-brand-booking:before { - content: "\edc8"; -} - -.ti-brand-bootstrap:before { - content: "\ef3e"; -} - -.ti-brand-chrome:before { - content: "\ec18"; -} - -.ti-brand-codepen:before { - content: "\ec6f"; -} - -.ti-brand-codesandbox:before { - content: "\ed6a"; -} - -.ti-brand-coinbase:before { - content: "\f209"; -} - -.ti-brand-comedy-central:before { - content: "\f217"; -} - -.ti-brand-css3:before { - content: "\ed6b"; -} - -.ti-brand-cucumber:before { - content: "\ef6c"; -} - -.ti-brand-d3:before { - content: "\f24e"; -} - -.ti-brand-debian:before { - content: "\ef57"; -} - -.ti-brand-deno:before { - content: "\f24f"; -} - -.ti-brand-deviantart:before { - content: "\ecfb"; -} - -.ti-brand-discord:before { - content: "\ece3"; -} - -.ti-brand-disney:before { - content: "\f20a"; -} - -.ti-brand-disqus:before { - content: "\edc9"; -} - -.ti-brand-docker:before { - content: "\edca"; -} - -.ti-brand-doctrine:before { - content: "\ef6d"; -} - -.ti-brand-dribbble:before { - content: "\ec19"; -} - -.ti-brand-edge:before { - content: "\ecfc"; -} - -.ti-brand-facebook:before { - content: "\ec1a"; -} - -.ti-brand-figma:before { - content: "\ec93"; -} - -.ti-brand-finder:before { - content: "\f218"; -} - -.ti-brand-firebase:before { - content: "\ef6e"; -} - -.ti-brand-firefox:before { - content: "\ecfd"; -} - -.ti-brand-flickr:before { - content: "\ecfe"; -} - -.ti-brand-flipboard:before { - content: "\f20b"; -} - -.ti-brand-fortnite:before { - content: "\f260"; -} - -.ti-brand-foursquare:before { - content: "\ecff"; -} - -.ti-brand-framer:before { - content: "\ec1b"; -} - -.ti-brand-git:before { - content: "\ef6f"; -} - -.ti-brand-github:before { - content: "\ec1c"; -} - -.ti-brand-gitlab:before { - content: "\ec1d"; -} - -.ti-brand-gmail:before { - content: "\efa2"; -} - -.ti-brand-google:before { - content: "\ec1f"; -} - -.ti-brand-google-analytics:before { - content: "\edcb"; -} - -.ti-brand-google-drive:before { - content: "\ec1e"; -} - -.ti-brand-google-one:before { - content: "\f232"; -} - -.ti-brand-google-photos:before { - content: "\f20c"; -} - -.ti-brand-google-play:before { - content: "\ed25"; -} - -.ti-brand-gravatar:before { - content: "\edcc"; -} - -.ti-brand-grindr:before { - content: "\f20d"; -} - -.ti-brand-hipchat:before { - content: "\edcd"; -} - -.ti-brand-html5:before { - content: "\ed6c"; -} - -.ti-brand-instagram:before { - content: "\ec20"; -} - -.ti-brand-intercom:before { - content: "\f1cf"; -} - -.ti-brand-javascript:before { - content: "\ef0c"; -} - -.ti-brand-kickstarter:before { - content: "\edce"; -} - -.ti-brand-kotlin:before { - content: "\ed6d"; -} - -.ti-brand-lastfm:before { - content: "\f001"; -} - -.ti-brand-linkedin:before { - content: "\ec8c"; -} - -.ti-brand-linktree:before { - content: "\f1e7"; -} - -.ti-brand-loom:before { - content: "\ef70"; -} - -.ti-brand-mastercard:before { - content: "\ef49"; -} - -.ti-brand-mastodon:before { - content: "\f250"; -} - -.ti-brand-mcdonalds:before { - content: "\f251"; -} - -.ti-brand-medium:before { - content: "\ec70"; -} - -.ti-brand-mercedes:before { - content: "\f072"; -} - -.ti-brand-messenger:before { - content: "\ec71"; -} - -.ti-brand-meta:before { - content: "\efb0"; -} - -.ti-brand-monday:before { - content: "\f219"; -} - -.ti-brand-netbeans:before { - content: "\ef71"; -} - -.ti-brand-netflix:before { - content: "\edcf"; -} - -.ti-brand-nextjs:before { - content: "\f0dd"; -} - -.ti-brand-notion:before { - content: "\ef7b"; -} - -.ti-brand-nuxt:before { - content: "\f0de"; -} - -.ti-brand-nytimes:before { - content: "\ef8d"; -} - -.ti-brand-open-source:before { - content: "\edd0"; -} - -.ti-brand-opera:before { - content: "\ec21"; -} - -.ti-brand-pagekit:before { - content: "\edd1"; -} - -.ti-brand-patreon:before { - content: "\edd2"; -} - -.ti-brand-paypal:before { - content: "\ec22"; -} - -.ti-brand-pepsi:before { - content: "\f261"; -} - -.ti-brand-php:before { - content: "\ef72"; -} - -.ti-brand-pinterest:before { - content: "\ec8d"; -} - -.ti-brand-pocket:before { - content: "\ed00"; -} - -.ti-brand-producthunt:before { - content: "\edd3"; -} - -.ti-brand-pushover:before { - content: "\f20e"; -} - -.ti-brand-python:before { - content: "\ed01"; -} - -.ti-brand-react-native:before { - content: "\ef73"; -} - -.ti-brand-reddit:before { - content: "\ec8e"; -} - -.ti-brand-safari:before { - content: "\ec23"; -} - -.ti-brand-sass:before { - content: "\edd4"; -} - -.ti-brand-sentry:before { - content: "\edd5"; -} - -.ti-brand-shazam:before { - content: "\edd6"; -} - -.ti-brand-shopee:before { - content: "\f252"; -} - -.ti-brand-sketch:before { - content: "\ec24"; -} - -.ti-brand-skype:before { - content: "\ed02"; -} - -.ti-brand-slack:before { - content: "\ec72"; -} - -.ti-brand-snapchat:before { - content: "\ec25"; -} - -.ti-brand-snapseed:before { - content: "\f253"; -} - -.ti-brand-soundcloud:before { - content: "\ed6e"; -} - -.ti-brand-spotify:before { - content: "\ed03"; -} - -.ti-brand-stackoverflow:before { - content: "\ef58"; -} - -.ti-brand-steam:before { - content: "\ed6f"; -} - -.ti-brand-strava:before { - content: "\f254"; -} - -.ti-brand-stripe:before { - content: "\edd7"; -} - -.ti-brand-sublime-text:before { - content: "\ef74"; -} - -.ti-brand-surfshark:before { - content: "\f255"; -} - -.ti-brand-svelte:before { - content: "\f0df"; -} - -.ti-brand-tabler:before { - content: "\ec8f"; -} - -.ti-brand-tailwind:before { - content: "\eca1"; -} - -.ti-brand-telegram:before { - content: "\ec26"; -} - -.ti-brand-tidal:before { - content: "\ed70"; -} - -.ti-brand-tiktok:before { - content: "\ec73"; -} - -.ti-brand-tinder:before { - content: "\ed71"; -} - -.ti-brand-toyota:before { - content: "\f262"; -} - -.ti-brand-tripadvisor:before { - content: "\f002"; -} - -.ti-brand-tumblr:before { - content: "\ed04"; -} - -.ti-brand-twitch:before { - content: "\ed05"; -} - -.ti-brand-twitter:before { - content: "\ec27"; -} - -.ti-brand-uber:before { - content: "\ef75"; -} - -.ti-brand-ubuntu:before { - content: "\ef59"; -} - -.ti-brand-unsplash:before { - content: "\edd8"; -} - -.ti-brand-vercel:before { - content: "\ef24"; -} - -.ti-brand-vimeo:before { - content: "\ed06"; -} - -.ti-brand-vinted:before { - content: "\f20f"; -} - -.ti-brand-visual-studio:before { - content: "\ef76"; -} - -.ti-brand-vivaldi:before { - content: "\f210"; -} - -.ti-brand-vk:before { - content: "\ed72"; -} - -.ti-brand-vue:before { - content: "\f0e0"; -} - -.ti-brand-walmart:before { - content: "\f211"; -} - -.ti-brand-whatsapp:before { - content: "\ec74"; -} - -.ti-brand-windows:before { - content: "\ecd8"; -} - -.ti-brand-wish:before { - content: "\f212"; -} - -.ti-brand-xing:before { - content: "\f21a"; -} - -.ti-brand-yahoo:before { - content: "\ed73"; -} - -.ti-brand-yatse:before { - content: "\f213"; -} - -.ti-brand-ycombinator:before { - content: "\edd9"; -} - -.ti-brand-youtube:before { - content: "\ec90"; -} - -.ti-brand-youtube-kids:before { - content: "\f214"; -} - -.ti-brand-zoom:before { - content: "\f215"; -} - -.ti-brand-zwift:before { - content: "\f216"; -} - -.ti-bread:before { - content: "\efa3"; -} - -.ti-briefcase:before { - content: "\ea46"; -} - -.ti-brightness:before { - content: "\eb7f"; -} - -.ti-brightness-2:before { - content: "\ee19"; -} - -.ti-brightness-down:before { - content: "\eb7d"; -} - -.ti-brightness-half:before { - content: "\ee1a"; -} - -.ti-brightness-up:before { - content: "\eb7e"; -} - -.ti-broadcast:before { - content: "\f1e9"; -} - -.ti-broadcast-off:before { - content: "\f1e8"; -} - -.ti-browser:before { - content: "\ebb7"; -} - -.ti-browser-check:before { - content: "\efd6"; -} - -.ti-browser-off:before { - content: "\f0c1"; -} - -.ti-browser-plus:before { - content: "\efd7"; -} - -.ti-browser-x:before { - content: "\efd8"; -} - -.ti-brush:before { - content: "\ebb8"; -} - -.ti-brush-off:before { - content: "\f0c2"; -} - -.ti-bucket:before { - content: "\ea47"; -} - -.ti-bucket-off:before { - content: "\f103"; -} - -.ti-bug:before { - content: "\ea48"; -} - -.ti-bug-off:before { - content: "\f0c3"; -} - -.ti-building:before { - content: "\ea4f"; -} - -.ti-building-arch:before { - content: "\ea49"; -} - -.ti-building-bank:before { - content: "\ebe2"; -} - -.ti-building-bridge:before { - content: "\ea4b"; -} - -.ti-building-bridge-2:before { - content: "\ea4a"; -} - -.ti-building-carousel:before { - content: "\ed87"; -} - -.ti-building-castle:before { - content: "\ed88"; -} - -.ti-building-church:before { - content: "\ea4c"; -} - -.ti-building-community:before { - content: "\ebf6"; -} - -.ti-building-cottage:before { - content: "\ee1b"; -} - -.ti-building-factory:before { - content: "\ee1c"; -} - -.ti-building-factory-2:before { - content: "\f082"; -} - -.ti-building-fortress:before { - content: "\ed89"; -} - -.ti-building-hospital:before { - content: "\ea4d"; -} - -.ti-building-lighthouse:before { - content: "\ed8a"; -} - -.ti-building-monument:before { - content: "\ed26"; -} - -.ti-building-pavilon:before { - content: "\ebf7"; -} - -.ti-building-skyscraper:before { - content: "\ec39"; -} - -.ti-building-store:before { - content: "\ea4e"; -} - -.ti-building-warehouse:before { - content: "\ebe3"; -} - -.ti-bulb:before { - content: "\ea51"; -} - -.ti-bulb-off:before { - content: "\ea50"; -} - -.ti-bulldozer:before { - content: "\ee1d"; -} - -.ti-bus:before { - content: "\ebe4"; -} - -.ti-businessplan:before { - content: "\ee1e"; -} - -.ti-butterfly:before { - content: "\efd9"; -} - -.ti-c-sharp:before { - content: "\f003"; -} - -.ti-cactus:before { - content: "\f21b"; -} - -.ti-cake:before { - content: "\f00f"; -} - -.ti-cake-off:before { - content: "\f104"; -} - -.ti-calculator:before { - content: "\eb80"; -} - -.ti-calculator-off:before { - content: "\f0c4"; -} - -.ti-calendar:before { - content: "\ea53"; -} - -.ti-calendar-event:before { - content: "\ea52"; -} - -.ti-calendar-minus:before { - content: "\ebb9"; -} - -.ti-calendar-off:before { - content: "\ee1f"; -} - -.ti-calendar-plus:before { - content: "\ebba"; -} - -.ti-calendar-stats:before { - content: "\ee20"; -} - -.ti-calendar-time:before { - content: "\ee21"; -} - -.ti-camera:before { - content: "\ea54"; -} - -.ti-camera-minus:before { - content: "\ec3a"; -} - -.ti-camera-off:before { - content: "\ecee"; -} - -.ti-camera-plus:before { - content: "\ec3b"; -} - -.ti-camera-rotate:before { - content: "\ee22"; -} - -.ti-camera-selfie:before { - content: "\ee23"; -} - -.ti-candle:before { - content: "\efc6"; -} - -.ti-candy:before { - content: "\ef0d"; -} - -.ti-candy-off:before { - content: "\f0c5"; -} - -.ti-capture:before { - content: "\ec3c"; -} - -.ti-capture-off:before { - content: "\f0c6"; -} - -.ti-car:before { - content: "\ebbb"; -} - -.ti-car-crane:before { - content: "\ef25"; -} - -.ti-car-crash:before { - content: "\efa4"; -} - -.ti-car-off:before { - content: "\f0c7"; -} - -.ti-caravan:before { - content: "\ec7c"; -} - -.ti-cardboards:before { - content: "\ed74"; -} - -.ti-cardboards-off:before { - content: "\f0c8"; -} - -.ti-caret-down:before { - content: "\eb5d"; -} - -.ti-caret-left:before { - content: "\eb5e"; -} - -.ti-caret-right:before { - content: "\eb5f"; -} - -.ti-caret-up:before { - content: "\eb60"; -} - -.ti-carrot:before { - content: "\f21c"; -} - -.ti-cash:before { - content: "\ea55"; -} - -.ti-cash-banknote:before { - content: "\ee25"; -} - -.ti-cash-banknote-off:before { - content: "\ee24"; -} - -.ti-cash-off:before { - content: "\f105"; -} - -.ti-cast:before { - content: "\ea56"; -} - -.ti-cast-off:before { - content: "\f0c9"; -} - -.ti-category:before { - content: "\f1f6"; -} - -.ti-category-2:before { - content: "\f1f5"; -} - -.ti-ce:before { - content: "\ed75"; -} - -.ti-ce-off:before { - content: "\f0ca"; -} - -.ti-cell:before { - content: "\f05f"; -} - -.ti-cell-signal-1:before { - content: "\f083"; -} - -.ti-cell-signal-2:before { - content: "\f084"; -} - -.ti-cell-signal-3:before { - content: "\f085"; -} - -.ti-cell-signal-4:before { - content: "\f086"; -} - -.ti-cell-signal-5:before { - content: "\f087"; -} - -.ti-cell-signal-off:before { - content: "\f088"; -} - -.ti-certificate:before { - content: "\ed76"; -} - -.ti-certificate-2:before { - content: "\f073"; -} - -.ti-certificate-2-off:before { - content: "\f0cb"; -} - -.ti-certificate-off:before { - content: "\f0cc"; -} - -.ti-charging-pile:before { - content: "\ee26"; -} - -.ti-chart-arcs:before { - content: "\ee28"; -} - -.ti-chart-arcs-3:before { - content: "\ee27"; -} - -.ti-chart-area:before { - content: "\ea58"; -} - -.ti-chart-area-line:before { - content: "\ea57"; -} - -.ti-chart-arrows:before { - content: "\ee2a"; -} - -.ti-chart-arrows-vertical:before { - content: "\ee29"; -} - -.ti-chart-bar:before { - content: "\ea59"; -} - -.ti-chart-bubble:before { - content: "\ec75"; -} - -.ti-chart-candle:before { - content: "\ea5a"; -} - -.ti-chart-circles:before { - content: "\ee2b"; -} - -.ti-chart-donut:before { - content: "\ea5b"; -} - -.ti-chart-donut-2:before { - content: "\ee2c"; -} - -.ti-chart-donut-3:before { - content: "\ee2d"; -} - -.ti-chart-donut-4:before { - content: "\ee2e"; -} - -.ti-chart-dots:before { - content: "\ee2f"; -} - -.ti-chart-dots-2:before { - content: "\f097"; -} - -.ti-chart-dots-3:before { - content: "\f098"; -} - -.ti-chart-infographic:before { - content: "\ee30"; -} - -.ti-chart-line:before { - content: "\ea5c"; -} - -.ti-chart-pie:before { - content: "\ea5d"; -} - -.ti-chart-pie-2:before { - content: "\ee31"; -} - -.ti-chart-pie-3:before { - content: "\ee32"; -} - -.ti-chart-pie-4:before { - content: "\ee33"; -} - -.ti-chart-radar:before { - content: "\ed77"; -} - -.ti-check:before { - content: "\ea5e"; -} - -.ti-checkbox:before { - content: "\eba6"; -} - -.ti-checklist:before { - content: "\f074"; -} - -.ti-checks:before { - content: "\ebaa"; -} - -.ti-checkup-list:before { - content: "\ef5a"; -} - -.ti-cheese:before { - content: "\ef26"; -} - -.ti-chef-hat:before { - content: "\f21d"; -} - -.ti-chevron-down:before { - content: "\ea5f"; -} - -.ti-chevron-down-left:before { - content: "\ed09"; -} - -.ti-chevron-down-right:before { - content: "\ed0a"; -} - -.ti-chevron-left:before { - content: "\ea60"; -} - -.ti-chevron-right:before { - content: "\ea61"; -} - -.ti-chevron-up:before { - content: "\ea62"; -} - -.ti-chevron-up-left:before { - content: "\ed0b"; -} - -.ti-chevron-up-right:before { - content: "\ed0c"; -} - -.ti-chevrons-down:before { - content: "\ea63"; -} - -.ti-chevrons-down-left:before { - content: "\ed0d"; -} - -.ti-chevrons-down-right:before { - content: "\ed0e"; -} - -.ti-chevrons-left:before { - content: "\ea64"; -} - -.ti-chevrons-right:before { - content: "\ea65"; -} - -.ti-chevrons-up:before { - content: "\ea66"; -} - -.ti-chevrons-up-left:before { - content: "\ed0f"; -} - -.ti-chevrons-up-right:before { - content: "\ed10"; -} - -.ti-christmas-tree:before { - content: "\ed78"; -} - -.ti-circle:before { - content: "\ea6b"; -} - -.ti-circle-0:before { - content: "\ee34"; -} - -.ti-circle-1:before { - content: "\ee35"; -} - -.ti-circle-2:before { - content: "\ee36"; -} - -.ti-circle-3:before { - content: "\ee37"; -} - -.ti-circle-4:before { - content: "\ee38"; -} - -.ti-circle-5:before { - content: "\ee39"; -} - -.ti-circle-6:before { - content: "\ee3a"; -} - -.ti-circle-7:before { - content: "\ee3b"; -} - -.ti-circle-8:before { - content: "\ee3c"; -} - -.ti-circle-9:before { - content: "\ee3d"; -} - -.ti-circle-check:before { - content: "\ea67"; -} - -.ti-circle-dashed:before { - content: "\ed27"; -} - -.ti-circle-dot:before { - content: "\efb1"; -} - -.ti-circle-dotted:before { - content: "\ed28"; -} - -.ti-circle-half:before { - content: "\ee3f"; -} - -.ti-circle-half-2:before { - content: "\eff3"; -} - -.ti-circle-half-vertical:before { - content: "\ee3e"; -} - -.ti-circle-minus:before { - content: "\ea68"; -} - -.ti-circle-off:before { - content: "\ee40"; -} - -.ti-circle-plus:before { - content: "\ea69"; -} - -.ti-circle-rectangle:before { - content: "\f010"; -} - -.ti-circle-rectangle-off:before { - content: "\f0cd"; -} - -.ti-circle-square:before { - content: "\ece4"; -} - -.ti-circle-triangle:before { - content: "\f011"; -} - -.ti-circle-x:before { - content: "\ea6a"; -} - -.ti-circles:before { - content: "\ece5"; -} - -.ti-clear-all:before { - content: "\ee41"; -} - -.ti-clear-formatting:before { - content: "\ebe5"; -} - -.ti-click:before { - content: "\ebbc"; -} - -.ti-clipboard:before { - content: "\ea6f"; -} - -.ti-clipboard-check:before { - content: "\ea6c"; -} - -.ti-clipboard-list:before { - content: "\ea6d"; -} - -.ti-clipboard-off:before { - content: "\f0ce"; -} - -.ti-clipboard-plus:before { - content: "\efb2"; -} - -.ti-clipboard-text:before { - content: "\f089"; -} - -.ti-clipboard-x:before { - content: "\ea6e"; -} - -.ti-clock:before { - content: "\ea70"; -} - -.ti-clock-2:before { - content: "\f099"; -} - -.ti-clock-off:before { - content: "\f0cf"; -} - -.ti-cloud:before { - content: "\ea76"; -} - -.ti-cloud-computing:before { - content: "\f1d0"; -} - -.ti-cloud-data-connection:before { - content: "\f1d1"; -} - -.ti-cloud-download:before { - content: "\ea71"; -} - -.ti-cloud-fog:before { - content: "\ecd9"; -} - -.ti-cloud-lock:before { - content: "\efdb"; -} - -.ti-cloud-lock-open:before { - content: "\efda"; -} - -.ti-cloud-off:before { - content: "\ed3e"; -} - -.ti-cloud-rain:before { - content: "\ea72"; -} - -.ti-cloud-snow:before { - content: "\ea73"; -} - -.ti-cloud-storm:before { - content: "\ea74"; -} - -.ti-cloud-upload:before { - content: "\ea75"; -} - -.ti-clover:before { - content: "\f1ea"; -} - -.ti-clover-2:before { - content: "\f21e"; -} - -.ti-clubs:before { - content: "\eff4"; -} - -.ti-code:before { - content: "\ea77"; -} - -.ti-code-minus:before { - content: "\ee42"; -} - -.ti-code-off:before { - content: "\f0d0"; -} - -.ti-code-plus:before { - content: "\ee43"; -} - -.ti-coffee:before { - content: "\ef0e"; -} - -.ti-coffee-off:before { - content: "\f106"; -} - -.ti-coin:before { - content: "\eb82"; -} - -.ti-coin-off:before { - content: "\f0d1"; -} - -.ti-color-picker:before { - content: "\ebe6"; -} - -.ti-color-picker-off:before { - content: "\f0d2"; -} - -.ti-color-swatch:before { - content: "\eb61"; -} - -.ti-color-swatch-off:before { - content: "\f0d3"; -} - -.ti-column-insert-left:before { - content: "\ee44"; -} - -.ti-column-insert-right:before { - content: "\ee45"; -} - -.ti-columns:before { - content: "\eb83"; -} - -.ti-columns-off:before { - content: "\f0d4"; -} - -.ti-comet:before { - content: "\ec76"; -} - -.ti-command:before { - content: "\ea78"; -} - -.ti-compass:before { - content: "\ea79"; -} - -.ti-compass-off:before { - content: "\f0d5"; -} - -.ti-components:before { - content: "\efa5"; -} - -.ti-components-off:before { - content: "\f0d6"; -} - -.ti-cone:before { - content: "\efdd"; -} - -.ti-cone-2:before { - content: "\efdc"; -} - -.ti-confetti:before { - content: "\ee46"; -} - -.ti-container:before { - content: "\ee47"; -} - -.ti-container-off:before { - content: "\f107"; -} - -.ti-contrast:before { - content: "\ec4e"; -} - -.ti-contrast-2:before { - content: "\efc7"; -} - -.ti-cookie:before { - content: "\ef0f"; -} - -.ti-cookie-off:before { - content: "\f0d7"; -} - -.ti-copy:before { - content: "\ea7a"; -} - -.ti-copy-off:before { - content: "\f0d8"; -} - -.ti-copyleft:before { - content: "\ec3d"; -} - -.ti-copyleft-off:before { - content: "\f0d9"; -} - -.ti-copyright:before { - content: "\ea7b"; -} - -.ti-copyright-off:before { - content: "\f0da"; -} - -.ti-corner-down-left:before { - content: "\ea7c"; -} - -.ti-corner-down-left-double:before { - content: "\ee48"; -} - -.ti-corner-down-right:before { - content: "\ea7d"; -} - -.ti-corner-down-right-double:before { - content: "\ee49"; -} - -.ti-corner-left-down:before { - content: "\ea7e"; -} - -.ti-corner-left-down-double:before { - content: "\ee4a"; -} - -.ti-corner-left-up:before { - content: "\ea7f"; -} - -.ti-corner-left-up-double:before { - content: "\ee4b"; -} - -.ti-corner-right-down:before { - content: "\ea80"; -} - -.ti-corner-right-down-double:before { - content: "\ee4c"; -} - -.ti-corner-right-up:before { - content: "\ea81"; -} - -.ti-corner-right-up-double:before { - content: "\ee4d"; -} - -.ti-corner-up-left:before { - content: "\ea82"; -} - -.ti-corner-up-left-double:before { - content: "\ee4e"; -} - -.ti-corner-up-right:before { - content: "\ea83"; -} - -.ti-corner-up-right-double:before { - content: "\ee4f"; -} - -.ti-cpu:before { - content: "\ef8e"; -} - -.ti-cpu-2:before { - content: "\f075"; -} - -.ti-cpu-off:before { - content: "\f108"; -} - -.ti-crane:before { - content: "\ef27"; -} - -.ti-crane-off:before { - content: "\f109"; -} - -.ti-creative-commons:before { - content: "\efb3"; -} - -.ti-creative-commons-by:before { - content: "\f21f"; -} - -.ti-creative-commons-nc:before { - content: "\f220"; -} - -.ti-creative-commons-nd:before { - content: "\f221"; -} - -.ti-creative-commons-off:before { - content: "\f10a"; -} - -.ti-creative-commons-sa:before { - content: "\f222"; -} - -.ti-creative-commons-zero:before { - content: "\f223"; -} - -.ti-credit-card:before { - content: "\ea84"; -} - -.ti-credit-card-off:before { - content: "\ed11"; -} - -.ti-cricket:before { - content: "\f09a"; -} - -.ti-crop:before { - content: "\ea85"; -} - -.ti-cross:before { - content: "\ef8f"; -} - -.ti-cross-off:before { - content: "\f10b"; -} - -.ti-crosshair:before { - content: "\ec3e"; -} - -.ti-crown:before { - content: "\ed12"; -} - -.ti-crown-off:before { - content: "\ee50"; -} - -.ti-crutches:before { - content: "\ef5b"; -} - -.ti-crutches-off:before { - content: "\f10c"; -} - -.ti-cup:before { - content: "\ef28"; -} - -.ti-cup-off:before { - content: "\f10d"; -} - -.ti-curling:before { - content: "\efc8"; -} - -.ti-curly-loop:before { - content: "\ecda"; -} - -.ti-currency:before { - content: "\efa6"; -} - -.ti-currency-bahraini:before { - content: "\ee51"; -} - -.ti-currency-baht:before { - content: "\f08a"; -} - -.ti-currency-bitcoin:before { - content: "\ebab"; -} - -.ti-currency-cent:before { - content: "\ee53"; -} - -.ti-currency-dinar:before { - content: "\ee54"; -} - -.ti-currency-dirham:before { - content: "\ee55"; -} - -.ti-currency-dogecoin:before { - content: "\ef4b"; -} - -.ti-currency-dollar:before { - content: "\eb84"; -} - -.ti-currency-dollar-australian:before { - content: "\ee56"; -} - -.ti-currency-dollar-canadian:before { - content: "\ee57"; -} - -.ti-currency-dollar-singapore:before { - content: "\ee58"; -} - -.ti-currency-ethereum:before { - content: "\ee59"; -} - -.ti-currency-euro:before { - content: "\eb85"; -} - -.ti-currency-forint:before { - content: "\ee5a"; -} - -.ti-currency-frank:before { - content: "\ee5b"; -} - -.ti-currency-krone-czech:before { - content: "\ee5c"; -} - -.ti-currency-krone-danish:before { - content: "\ee5d"; -} - -.ti-currency-krone-swedish:before { - content: "\ee5e"; -} - -.ti-currency-leu:before { - content: "\ee5f"; -} - -.ti-currency-lira:before { - content: "\ee60"; -} - -.ti-currency-litecoin:before { - content: "\ee61"; -} - -.ti-currency-naira:before { - content: "\ee62"; -} - -.ti-currency-pound:before { - content: "\ebac"; -} - -.ti-currency-real:before { - content: "\ee63"; -} - -.ti-currency-renminbi:before { - content: "\ee64"; -} - -.ti-currency-ripple:before { - content: "\ee65"; -} - -.ti-currency-riyal:before { - content: "\ee66"; -} - -.ti-currency-rubel:before { - content: "\ee67"; -} - -.ti-currency-rupee:before { - content: "\ebad"; -} - -.ti-currency-shekel:before { - content: "\ee68"; -} - -.ti-currency-taka:before { - content: "\ee69"; -} - -.ti-currency-tugrik:before { - content: "\ee6a"; -} - -.ti-currency-won:before { - content: "\ee6b"; -} - -.ti-currency-yen:before { - content: "\ebae"; -} - -.ti-currency-zloty:before { - content: "\ee6c"; -} - -.ti-current-location:before { - content: "\ecef"; -} - -.ti-current-location-off:before { - content: "\f10e"; -} - -.ti-cursor-off:before { - content: "\f10f"; -} - -.ti-cursor-text:before { - content: "\ee6d"; -} - -.ti-cut:before { - content: "\ea86"; -} - -.ti-dashboard:before { - content: "\ea87"; -} - -.ti-database:before { - content: "\ea88"; -} - -.ti-database-export:before { - content: "\ee6e"; -} - -.ti-database-import:before { - content: "\ee6f"; -} - -.ti-database-off:before { - content: "\ee70"; -} - -.ti-dental:before { - content: "\f025"; -} - -.ti-dental-off:before { - content: "\f110"; -} - -.ti-details:before { - content: "\ee71"; -} - -.ti-device-analytics:before { - content: "\ee72"; -} - -.ti-device-audio-tape:before { - content: "\ee73"; -} - -.ti-device-camera-phone:before { - content: "\f233"; -} - -.ti-device-cctv:before { - content: "\ee74"; -} - -.ti-device-computer-camera:before { - content: "\ee76"; -} - -.ti-device-computer-camera-off:before { - content: "\ee75"; -} - -.ti-device-desktop:before { - content: "\ea89"; -} - -.ti-device-desktop-analytics:before { - content: "\ee77"; -} - -.ti-device-desktop-off:before { - content: "\ee78"; -} - -.ti-device-floppy:before { - content: "\eb62"; -} - -.ti-device-gamepad:before { - content: "\eb63"; -} - -.ti-device-gamepad-2:before { - content: "\f1d2"; -} - -.ti-device-heart-monitor:before { - content: "\f060"; -} - -.ti-device-laptop:before { - content: "\eb64"; -} - -.ti-device-laptop-off:before { - content: "\f061"; -} - -.ti-device-mobile:before { - content: "\ea8a"; -} - -.ti-device-mobile-charging:before { - content: "\f224"; -} - -.ti-device-mobile-message:before { - content: "\ee79"; -} - -.ti-device-mobile-off:before { - content: "\f062"; -} - -.ti-device-mobile-rotated:before { - content: "\ecdb"; -} - -.ti-device-mobile-vibration:before { - content: "\eb86"; -} - -.ti-device-nintendo:before { - content: "\f026"; -} - -.ti-device-nintendo-off:before { - content: "\f111"; -} - -.ti-device-speaker:before { - content: "\ea8b"; -} - -.ti-device-speaker-off:before { - content: "\f112"; -} - -.ti-device-tablet:before { - content: "\ea8c"; -} - -.ti-device-tablet-off:before { - content: "\f063"; -} - -.ti-device-tv:before { - content: "\ea8d"; -} - -.ti-device-tv-off:before { - content: "\f064"; -} - -.ti-device-tv-old:before { - content: "\f1d3"; -} - -.ti-device-watch:before { - content: "\ebf9"; -} - -.ti-device-watch-off:before { - content: "\f065"; -} - -.ti-device-watch-stats:before { - content: "\ef7d"; -} - -.ti-device-watch-stats-2:before { - content: "\ef7c"; -} - -.ti-devices:before { - content: "\eb87"; -} - -.ti-devices-2:before { - content: "\ed29"; -} - -.ti-devices-off:before { - content: "\f066"; -} - -.ti-devices-pc:before { - content: "\ee7a"; -} - -.ti-devices-pc-off:before { - content: "\f113"; -} - -.ti-dialpad:before { - content: "\f067"; -} - -.ti-dialpad-off:before { - content: "\f114"; -} - -.ti-diamond:before { - content: "\eb65"; -} - -.ti-diamond-off:before { - content: "\f115"; -} - -.ti-diamonds:before { - content: "\eff5"; -} - -.ti-dice:before { - content: "\eb66"; -} - -.ti-dice-1:before { - content: "\f08b"; -} - -.ti-dice-2:before { - content: "\f08c"; -} - -.ti-dice-3:before { - content: "\f08d"; -} - -.ti-dice-4:before { - content: "\f08e"; -} - -.ti-dice-5:before { - content: "\f08f"; -} - -.ti-dice-6:before { - content: "\f090"; -} - -.ti-dimensions:before { - content: "\ee7b"; -} - -.ti-direction:before { - content: "\ebfb"; -} - -.ti-direction-horizontal:before { - content: "\ebfa"; -} - -.ti-direction-sign:before { - content: "\f1f7"; -} - -.ti-directions:before { - content: "\ea8e"; -} - -.ti-directions-off:before { - content: "\f116"; -} - -.ti-disabled:before { - content: "\ea8f"; -} - -.ti-disabled-2:before { - content: "\ebaf"; -} - -.ti-disabled-off:before { - content: "\f117"; -} - -.ti-disc:before { - content: "\ea90"; -} - -.ti-disc-off:before { - content: "\f118"; -} - -.ti-discount:before { - content: "\ebbd"; -} - -.ti-discount-2:before { - content: "\ee7c"; -} - -.ti-discount-check:before { - content: "\f1f8"; -} - -.ti-divide:before { - content: "\ed5c"; -} - -.ti-dna:before { - content: "\ee7d"; -} - -.ti-dna-2:before { - content: "\ef5c"; -} - -.ti-dna-2-off:before { - content: "\f119"; -} - -.ti-dna-off:before { - content: "\f11a"; -} - -.ti-dog-bowl:before { - content: "\ef29"; -} - -.ti-door:before { - content: "\ef4e"; -} - -.ti-door-enter:before { - content: "\ef4c"; -} - -.ti-door-exit:before { - content: "\ef4d"; -} - -.ti-door-off:before { - content: "\f11b"; -} - -.ti-dots:before { - content: "\ea95"; -} - -.ti-dots-circle-horizontal:before { - content: "\ea91"; -} - -.ti-dots-diagonal:before { - content: "\ea93"; -} - -.ti-dots-diagonal-2:before { - content: "\ea92"; -} - -.ti-dots-vertical:before { - content: "\ea94"; -} - -.ti-download:before { - content: "\ea96"; -} - -.ti-download-off:before { - content: "\f11c"; -} - -.ti-drag-drop:before { - content: "\eb89"; -} - -.ti-drag-drop-2:before { - content: "\eb88"; -} - -.ti-drone:before { - content: "\ed79"; -} - -.ti-drone-off:before { - content: "\ee7e"; -} - -.ti-drop-circle:before { - content: "\efde"; -} - -.ti-droplet:before { - content: "\ea97"; -} - -.ti-droplet-filled:before { - content: "\ee80"; -} - -.ti-droplet-filled-2:before { - content: "\ee7f"; -} - -.ti-droplet-half:before { - content: "\ee82"; -} - -.ti-droplet-half-2:before { - content: "\ee81"; -} - -.ti-droplet-off:before { - content: "\ee83"; -} - -.ti-ear:before { - content: "\ebce"; -} - -.ti-ear-off:before { - content: "\ee84"; -} - -.ti-edit:before { - content: "\ea98"; -} - -.ti-edit-circle:before { - content: "\ee85"; -} - -.ti-edit-circle-off:before { - content: "\f11d"; -} - -.ti-edit-off:before { - content: "\f11e"; -} - -.ti-egg:before { - content: "\eb8a"; -} - -.ti-egg-off:before { - content: "\f11f"; -} - -.ti-elevator:before { - content: "\efdf"; -} - -.ti-emergency-bed:before { - content: "\ef5d"; -} - -.ti-emphasis:before { - content: "\ebcf"; -} - -.ti-engine:before { - content: "\ef7e"; -} - -.ti-engine-off:before { - content: "\f120"; -} - -.ti-equal:before { - content: "\ee87"; -} - -.ti-equal-not:before { - content: "\ee86"; -} - -.ti-eraser:before { - content: "\eb8b"; -} - -.ti-eraser-off:before { - content: "\f121"; -} - -.ti-error-404:before { - content: "\f027"; -} - -.ti-error-404-off:before { - content: "\f122"; -} - -.ti-exchange:before { - content: "\ebe7"; -} - -.ti-exchange-off:before { - content: "\f123"; -} - -.ti-exclamation-mark:before { - content: "\efb4"; -} - -.ti-exclamation-mark-off:before { - content: "\f124"; -} - -.ti-explicit:before { - content: "\f256"; -} - -.ti-exposure:before { - content: "\eb8c"; -} - -.ti-external-link:before { - content: "\ea99"; -} - -.ti-external-link-off:before { - content: "\f125"; -} - -.ti-eye:before { - content: "\ea9a"; -} - -.ti-eye-check:before { - content: "\ee88"; -} - -.ti-eye-off:before { - content: "\ecf0"; -} - -.ti-eye-table:before { - content: "\ef5e"; -} - -.ti-eyeglass:before { - content: "\ee8a"; -} - -.ti-eyeglass-2:before { - content: "\ee89"; -} - -.ti-eyeglass-off:before { - content: "\f126"; -} - -.ti-face-id:before { - content: "\ea9b"; -} - -.ti-face-id-error:before { - content: "\efa7"; -} - -.ti-face-mask:before { - content: "\efb5"; -} - -.ti-face-mask-off:before { - content: "\f127"; -} - -.ti-fall:before { - content: "\ecb9"; -} - -.ti-feather:before { - content: "\ee8b"; -} - -.ti-feather-off:before { - content: "\f128"; -} - -.ti-fence:before { - content: "\ef2a"; -} - -.ti-fence-off:before { - content: "\f129"; -} - -.ti-fidget-spinner:before { - content: "\f068"; -} - -.ti-file:before { - content: "\eaa4"; -} - -.ti-file-3d:before { - content: "\f032"; -} - -.ti-file-alert:before { - content: "\ede6"; -} - -.ti-file-analytics:before { - content: "\ede7"; -} - -.ti-file-arrow-left:before { - content: "\f033"; -} - -.ti-file-arrow-right:before { - content: "\f034"; -} - -.ti-file-barcode:before { - content: "\f035"; -} - -.ti-file-certificate:before { - content: "\ed4d"; -} - -.ti-file-chart:before { - content: "\f036"; -} - -.ti-file-check:before { - content: "\ea9c"; -} - -.ti-file-code:before { - content: "\ebd0"; -} - -.ti-file-code-2:before { - content: "\ede8"; -} - -.ti-file-database:before { - content: "\f037"; -} - -.ti-file-description:before { - content: "\f028"; -} - -.ti-file-diff:before { - content: "\ecf1"; -} - -.ti-file-digit:before { - content: "\efa8"; -} - -.ti-file-dislike:before { - content: "\ed2a"; -} - -.ti-file-dollar:before { - content: "\efe0"; -} - -.ti-file-dots:before { - content: "\f038"; -} - -.ti-file-download:before { - content: "\ea9d"; -} - -.ti-file-euro:before { - content: "\efe1"; -} - -.ti-file-export:before { - content: "\ede9"; -} - -.ti-file-horizontal:before { - content: "\ebb0"; -} - -.ti-file-import:before { - content: "\edea"; -} - -.ti-file-info:before { - content: "\edec"; -} - -.ti-file-invoice:before { - content: "\eb67"; -} - -.ti-file-like:before { - content: "\ed2b"; -} - -.ti-file-minus:before { - content: "\ea9e"; -} - -.ti-file-music:before { - content: "\ea9f"; -} - -.ti-file-off:before { - content: "\ecf2"; -} - -.ti-file-pencil:before { - content: "\f039"; -} - -.ti-file-phone:before { - content: "\ecdc"; -} - -.ti-file-plus:before { - content: "\eaa0"; -} - -.ti-file-power:before { - content: "\f03a"; -} - -.ti-file-report:before { - content: "\eded"; -} - -.ti-file-rss:before { - content: "\f03b"; -} - -.ti-file-scissors:before { - content: "\f03c"; -} - -.ti-file-search:before { - content: "\ed5d"; -} - -.ti-file-settings:before { - content: "\f029"; -} - -.ti-file-shredder:before { - content: "\eaa1"; -} - -.ti-file-signal:before { - content: "\f03d"; -} - -.ti-file-spreadsheet:before { - content: "\f03e"; -} - -.ti-file-star:before { - content: "\f03f"; -} - -.ti-file-symlink:before { - content: "\ed53"; -} - -.ti-file-text:before { - content: "\eaa2"; -} - -.ti-file-time:before { - content: "\f040"; -} - -.ti-file-typography:before { - content: "\f041"; -} - -.ti-file-unknown:before { - content: "\f042"; -} - -.ti-file-upload:before { - content: "\ec91"; -} - -.ti-file-vector:before { - content: "\f043"; -} - -.ti-file-x:before { - content: "\eaa3"; -} - -.ti-file-zip:before { - content: "\ed4e"; -} - -.ti-files:before { - content: "\edef"; -} - -.ti-files-off:before { - content: "\edee"; -} - -.ti-filter:before { - content: "\eaa5"; -} - -.ti-filter-off:before { - content: "\ed2c"; -} - -.ti-fingerprint:before { - content: "\ebd1"; -} - -.ti-fingerprint-off:before { - content: "\f12a"; -} - -.ti-firetruck:before { - content: "\ebe8"; -} - -.ti-first-aid-kit:before { - content: "\ef5f"; -} - -.ti-fish:before { - content: "\ef2b"; -} - -.ti-fish-hook:before { - content: "\f1f9"; -} - -.ti-fish-off:before { - content: "\f12b"; -} - -.ti-flag:before { - content: "\eaa6"; -} - -.ti-flag-2:before { - content: "\ee8c"; -} - -.ti-flag-2-off:before { - content: "\f12c"; -} - -.ti-flag-3:before { - content: "\ee8d"; -} - -.ti-flag-off:before { - content: "\f12d"; -} - -.ti-flame:before { - content: "\ec2c"; -} - -.ti-flame-off:before { - content: "\f12e"; -} - -.ti-flare:before { - content: "\ee8e"; -} - -.ti-flask:before { - content: "\ebd2"; -} - -.ti-flask-2:before { - content: "\ef60"; -} - -.ti-flask-2-off:before { - content: "\f12f"; -} - -.ti-flask-off:before { - content: "\f130"; -} - -.ti-flip-horizontal:before { - content: "\eaa7"; -} - -.ti-flip-vertical:before { - content: "\eaa8"; -} - -.ti-float-center:before { - content: "\ebb1"; -} - -.ti-float-left:before { - content: "\ebb2"; -} - -.ti-float-none:before { - content: "\ed13"; -} - -.ti-float-right:before { - content: "\ebb3"; -} - -.ti-flower:before { - content: "\eff6"; -} - -.ti-flower-off:before { - content: "\f131"; -} - -.ti-focus:before { - content: "\eb8d"; -} - -.ti-focus-2:before { - content: "\ebd3"; -} - -.ti-focus-centered:before { - content: "\f02a"; -} - -.ti-fold:before { - content: "\ed56"; -} - -.ti-fold-down:before { - content: "\ed54"; -} - -.ti-fold-up:before { - content: "\ed55"; -} - -.ti-folder:before { - content: "\eaad"; -} - -.ti-folder-minus:before { - content: "\eaaa"; -} - -.ti-folder-off:before { - content: "\ed14"; -} - -.ti-folder-plus:before { - content: "\eaab"; -} - -.ti-folder-x:before { - content: "\eaac"; -} - -.ti-folders:before { - content: "\eaae"; -} - -.ti-folders-off:before { - content: "\f133"; -} - -.ti-forbid:before { - content: "\ebd5"; -} - -.ti-forbid-2:before { - content: "\ebd4"; -} - -.ti-forklift:before { - content: "\ebe9"; -} - -.ti-forms:before { - content: "\ee8f"; -} - -.ti-fountain:before { - content: "\f09b"; -} - -.ti-fountain-off:before { - content: "\f134"; -} - -.ti-frame:before { - content: "\eaaf"; -} - -.ti-frame-off:before { - content: "\f135"; -} - -.ti-free-rights:before { - content: "\efb6"; -} - -.ti-fridge:before { - content: "\f1fa"; -} - -.ti-friends:before { - content: "\eab0"; -} - -.ti-friends-off:before { - content: "\f136"; -} - -.ti-function:before { - content: "\f225"; -} - -.ti-garden-cart:before { - content: "\f23e"; -} - -.ti-gas-station:before { - content: "\ec7d"; -} - -.ti-gas-station-off:before { - content: "\f137"; -} - -.ti-gauge:before { - content: "\eab1"; -} - -.ti-gauge-off:before { - content: "\f138"; -} - -.ti-gavel:before { - content: "\ef90"; -} - -.ti-gender-agender:before { - content: "\f0e1"; -} - -.ti-gender-androgyne:before { - content: "\f0e2"; -} - -.ti-gender-bigender:before { - content: "\f0e3"; -} - -.ti-gender-demiboy:before { - content: "\f0e4"; -} - -.ti-gender-demigirl:before { - content: "\f0e5"; -} - -.ti-gender-epicene:before { - content: "\f0e6"; -} - -.ti-gender-female:before { - content: "\f0e7"; -} - -.ti-gender-femme:before { - content: "\f0e8"; -} - -.ti-gender-genderfluid:before { - content: "\f0e9"; -} - -.ti-gender-genderless:before { - content: "\f0ea"; -} - -.ti-gender-genderqueer:before { - content: "\f0eb"; -} - -.ti-gender-hermaphrodite:before { - content: "\f0ec"; -} - -.ti-gender-intergender:before { - content: "\f0ed"; -} - -.ti-gender-male:before { - content: "\f0ee"; -} - -.ti-gender-neutrois:before { - content: "\f0ef"; -} - -.ti-gender-third:before { - content: "\f0f0"; -} - -.ti-gender-transgender:before { - content: "\f0f1"; -} - -.ti-gender-trasvesti:before { - content: "\f0f2"; -} - -.ti-geometry:before { - content: "\ee90"; -} - -.ti-ghost:before { - content: "\eb8e"; -} - -.ti-gif:before { - content: "\f257"; -} - -.ti-gift:before { - content: "\eb68"; -} - -.ti-git-branch:before { - content: "\eab2"; -} - -.ti-git-commit:before { - content: "\eab3"; -} - -.ti-git-compare:before { - content: "\eab4"; -} - -.ti-git-fork:before { - content: "\eb8f"; -} - -.ti-git-merge:before { - content: "\eab5"; -} - -.ti-git-pull-request:before { - content: "\eab6"; -} - -.ti-git-pull-request-closed:before { - content: "\ef7f"; -} - -.ti-git-pull-request-draft:before { - content: "\efb7"; -} - -.ti-gizmo:before { - content: "\f02b"; -} - -.ti-glass:before { - content: "\eab8"; -} - -.ti-glass-full:before { - content: "\eab7"; -} - -.ti-glass-off:before { - content: "\ee91"; -} - -.ti-globe:before { - content: "\eab9"; -} - -.ti-globe-off:before { - content: "\f139"; -} - -.ti-golf:before { - content: "\ed8c"; -} - -.ti-golf-off:before { - content: "\f13a"; -} - -.ti-gps:before { - content: "\ed7a"; -} - -.ti-grain:before { - content: "\ee92"; -} - -.ti-grid-dots:before { - content: "\eaba"; -} - -.ti-grid-pattern:before { - content: "\efc9"; -} - -.ti-grill:before { - content: "\efa9"; -} - -.ti-grill-off:before { - content: "\f13b"; -} - -.ti-grip-horizontal:before { - content: "\ec00"; -} - -.ti-grip-vertical:before { - content: "\ec01"; -} - -.ti-ground:before { - content: "\f23f"; -} - -.ti-growth:before { - content: "\ee93"; -} - -.ti-h-1:before { - content: "\ec94"; -} - -.ti-h-2:before { - content: "\ec95"; -} - -.ti-h-3:before { - content: "\ec96"; -} - -.ti-h-4:before { - content: "\ec97"; -} - -.ti-h-5:before { - content: "\ec98"; -} - -.ti-h-6:before { - content: "\ec99"; -} - -.ti-hammer:before { - content: "\ef91"; -} - -.ti-hammer-off:before { - content: "\f13c"; -} - -.ti-hand-click:before { - content: "\ef4f"; -} - -.ti-hand-finger:before { - content: "\ee94"; -} - -.ti-hand-finger-off:before { - content: "\f13d"; -} - -.ti-hand-grab:before { - content: "\f091"; -} - -.ti-hand-little-finger:before { - content: "\ee95"; -} - -.ti-hand-middle-finger:before { - content: "\ec2d"; -} - -.ti-hand-move:before { - content: "\ef50"; -} - -.ti-hand-off:before { - content: "\ed15"; -} - -.ti-hand-ring-finger:before { - content: "\ee96"; -} - -.ti-hand-rock:before { - content: "\ee97"; -} - -.ti-hand-stop:before { - content: "\ec2e"; -} - -.ti-hand-three-fingers:before { - content: "\ee98"; -} - -.ti-hand-two-fingers:before { - content: "\ee99"; -} - -.ti-hanger:before { - content: "\ee9a"; -} - -.ti-hanger-2:before { - content: "\f09c"; -} - -.ti-hanger-off:before { - content: "\f13e"; -} - -.ti-hash:before { - content: "\eabc"; -} - -.ti-haze:before { - content: "\efaa"; -} - -.ti-heading:before { - content: "\ee9b"; -} - -.ti-heading-off:before { - content: "\f13f"; -} - -.ti-headphones:before { - content: "\eabd"; -} - -.ti-headphones-off:before { - content: "\ed1d"; -} - -.ti-headset:before { - content: "\eb90"; -} - -.ti-health-recognition:before { - content: "\f1fb"; -} - -.ti-heart:before { - content: "\eabe"; -} - -.ti-heart-broken:before { - content: "\ecba"; -} - -.ti-heart-handshake:before { - content: "\f0f3"; -} - -.ti-heart-minus:before { - content: "\f140"; -} - -.ti-heart-off:before { - content: "\f141"; -} - -.ti-heart-plus:before { - content: "\f142"; -} - -.ti-heart-rate-monitor:before { - content: "\ef61"; -} - -.ti-heartbeat:before { - content: "\ef92"; -} - -.ti-helicopter:before { - content: "\ed8e"; -} - -.ti-helicopter-landing:before { - content: "\ed8d"; -} - -.ti-helmet:before { - content: "\efca"; -} - -.ti-helmet-off:before { - content: "\f143"; -} - -.ti-help:before { - content: "\eabf"; -} - -.ti-hexagon:before { - content: "\ec02"; -} - -.ti-hexagon-off:before { - content: "\ee9c"; -} - -.ti-hexagons:before { - content: "\f09d"; -} - -.ti-hierarchy:before { - content: "\ee9e"; -} - -.ti-hierarchy-2:before { - content: "\ee9d"; -} - -.ti-highlight:before { - content: "\ef3f"; -} - -.ti-highlight-off:before { - content: "\f144"; -} - -.ti-history:before { - content: "\ebea"; -} - -.ti-history-toggle:before { - content: "\f1fc"; -} - -.ti-home:before { - content: "\eac1"; -} - -.ti-home-2:before { - content: "\eac0"; -} - -.ti-home-off:before { - content: "\f145"; -} - -.ti-hotel-service:before { - content: "\ef80"; -} - -.ti-hourglass:before { - content: "\ef93"; -} - -.ti-hourglass-empty:before { - content: "\f146"; -} - -.ti-hourglass-high:before { - content: "\f092"; -} - -.ti-hourglass-low:before { - content: "\f093"; -} - -.ti-hourglass-off:before { - content: "\f147"; -} - -.ti-ice-cream:before { - content: "\eac2"; -} - -.ti-ice-cream-2:before { - content: "\ee9f"; -} - -.ti-ice-cream-off:before { - content: "\f148"; -} - -.ti-ice-skating:before { - content: "\efcb"; -} - -.ti-icons:before { - content: "\f1d4"; -} - -.ti-id:before { - content: "\eac3"; -} - -.ti-id-badge:before { - content: "\eff7"; -} - -.ti-id-badge-2:before { - content: "\f076"; -} - -.ti-id-off:before { - content: "\f149"; -} - -.ti-inbox:before { - content: "\eac4"; -} - -.ti-inbox-off:before { - content: "\f14a"; -} - -.ti-indent-decrease:before { - content: "\eb91"; -} - -.ti-indent-increase:before { - content: "\eb92"; -} - -.ti-infinity:before { - content: "\eb69"; -} - -.ti-info-circle:before { - content: "\eac5"; -} - -.ti-info-square:before { - content: "\eac6"; -} - -.ti-italic:before { - content: "\eb93"; -} - -.ti-jewish-star:before { - content: "\f1d5"; -} - -.ti-jump-rope:before { - content: "\ed8f"; -} - -.ti-karate:before { - content: "\ed32"; -} - -.ti-kayak:before { - content: "\f1d6"; -} - -.ti-kering:before { - content: "\efb8"; -} - -.ti-key:before { - content: "\eac7"; -} - -.ti-key-off:before { - content: "\f14b"; -} - -.ti-keyboard:before { - content: "\ebd6"; -} - -.ti-keyboard-hide:before { - content: "\ec7e"; -} - -.ti-keyboard-off:before { - content: "\eea0"; -} - -.ti-keyboard-show:before { - content: "\ec7f"; -} - -.ti-ladder:before { - content: "\efe2"; -} - -.ti-ladder-off:before { - content: "\f14c"; -} - -.ti-lamp:before { - content: "\efab"; -} - -.ti-lamp-2:before { - content: "\f09e"; -} - -.ti-lamp-off:before { - content: "\f14d"; -} - -.ti-language:before { - content: "\ebbe"; -} - -.ti-language-hiragana:before { - content: "\ef77"; -} - -.ti-language-katakana:before { - content: "\ef78"; -} - -.ti-language-off:before { - content: "\f14e"; -} - -.ti-lasso:before { - content: "\efac"; -} - -.ti-lasso-off:before { - content: "\f14f"; -} - -.ti-layers-difference:before { - content: "\eac8"; -} - -.ti-layers-intersect:before { - content: "\eac9"; -} - -.ti-layers-intersect-2:before { - content: "\eff8"; -} - -.ti-layers-linked:before { - content: "\eea1"; -} - -.ti-layers-off:before { - content: "\f150"; -} - -.ti-layers-subtract:before { - content: "\eaca"; -} - -.ti-layers-union:before { - content: "\eacb"; -} - -.ti-layout:before { - content: "\eadb"; -} - -.ti-layout-2:before { - content: "\eacc"; -} - -.ti-layout-align-bottom:before { - content: "\eacd"; -} - -.ti-layout-align-center:before { - content: "\eace"; -} - -.ti-layout-align-left:before { - content: "\eacf"; -} - -.ti-layout-align-middle:before { - content: "\ead0"; -} - -.ti-layout-align-right:before { - content: "\ead1"; -} - -.ti-layout-align-top:before { - content: "\ead2"; -} - -.ti-layout-board:before { - content: "\ef95"; -} - -.ti-layout-board-split:before { - content: "\ef94"; -} - -.ti-layout-bottombar:before { - content: "\ead3"; -} - -.ti-layout-cards:before { - content: "\ec13"; -} - -.ti-layout-columns:before { - content: "\ead4"; -} - -.ti-layout-dashboard:before { - content: "\f02c"; -} - -.ti-layout-distribute-horizontal:before { - content: "\ead5"; -} - -.ti-layout-distribute-vertical:before { - content: "\ead6"; -} - -.ti-layout-grid:before { - content: "\edba"; -} - -.ti-layout-grid-add:before { - content: "\edb9"; -} - -.ti-layout-kanban:before { - content: "\ec3f"; -} - -.ti-layout-list:before { - content: "\ec14"; -} - -.ti-layout-navbar:before { - content: "\ead7"; -} - -.ti-layout-off:before { - content: "\f151"; -} - -.ti-layout-rows:before { - content: "\ead8"; -} - -.ti-layout-sidebar:before { - content: "\eada"; -} - -.ti-layout-sidebar-left-collapse:before { - content: "\f004"; -} - -.ti-layout-sidebar-left-expand:before { - content: "\f005"; -} - -.ti-layout-sidebar-right:before { - content: "\ead9"; -} - -.ti-layout-sidebar-right-collapse:before { - content: "\f006"; -} - -.ti-layout-sidebar-right-expand:before { - content: "\f007"; -} - -.ti-leaf:before { - content: "\ed4f"; -} - -.ti-leaf-off:before { - content: "\f152"; -} - -.ti-lego:before { - content: "\eadc"; -} - -.ti-lemon:before { - content: "\ef10"; -} - -.ti-lemon-2:before { - content: "\ef81"; -} - -.ti-letter-a:before { - content: "\ec50"; -} - -.ti-letter-b:before { - content: "\ec51"; -} - -.ti-letter-c:before { - content: "\ec52"; -} - -.ti-letter-case:before { - content: "\eea5"; -} - -.ti-letter-case-lower:before { - content: "\eea2"; -} - -.ti-letter-case-toggle:before { - content: "\eea3"; -} - -.ti-letter-case-upper:before { - content: "\eea4"; -} - -.ti-letter-d:before { - content: "\ec53"; -} - -.ti-letter-e:before { - content: "\ec54"; -} - -.ti-letter-f:before { - content: "\ec55"; -} - -.ti-letter-g:before { - content: "\ec56"; -} - -.ti-letter-h:before { - content: "\ec57"; -} - -.ti-letter-i:before { - content: "\ec58"; -} - -.ti-letter-j:before { - content: "\ec59"; -} - -.ti-letter-k:before { - content: "\ec5a"; -} - -.ti-letter-l:before { - content: "\ec5b"; -} - -.ti-letter-m:before { - content: "\ec5c"; -} - -.ti-letter-n:before { - content: "\ec5d"; -} - -.ti-letter-o:before { - content: "\ec5e"; -} - -.ti-letter-p:before { - content: "\ec5f"; -} - -.ti-letter-q:before { - content: "\ec60"; -} - -.ti-letter-r:before { - content: "\ec61"; -} - -.ti-letter-s:before { - content: "\ec62"; -} - -.ti-letter-spacing:before { - content: "\eea6"; -} - -.ti-letter-t:before { - content: "\ec63"; -} - -.ti-letter-u:before { - content: "\ec64"; -} - -.ti-letter-v:before { - content: "\ec65"; -} - -.ti-letter-w:before { - content: "\ec66"; -} - -.ti-letter-x:before { - content: "\ec67"; -} - -.ti-letter-y:before { - content: "\ec68"; -} - -.ti-letter-z:before { - content: "\ec69"; -} - -.ti-license:before { - content: "\ebc0"; -} - -.ti-license-off:before { - content: "\f153"; -} - -.ti-lifebuoy:before { - content: "\eadd"; -} - -.ti-lifebuoy-off:before { - content: "\f154"; -} - -.ti-line:before { - content: "\ec40"; -} - -.ti-line-dashed:before { - content: "\eea7"; -} - -.ti-line-dotted:before { - content: "\eea8"; -} - -.ti-line-height:before { - content: "\eb94"; -} - -.ti-link:before { - content: "\eade"; -} - -.ti-list:before { - content: "\eb6b"; -} - -.ti-list-check:before { - content: "\eb6a"; -} - -.ti-list-details:before { - content: "\ef40"; -} - -.ti-list-numbers:before { - content: "\ef11"; -} - -.ti-list-search:before { - content: "\eea9"; -} - -.ti-live-photo:before { - content: "\eadf"; -} - -.ti-live-view:before { - content: "\ec6b"; -} - -.ti-loader:before { - content: "\eca3"; -} - -.ti-loader-2:before { - content: "\f226"; -} - -.ti-loader-quarter:before { - content: "\eca2"; -} - -.ti-location:before { - content: "\eae0"; -} - -.ti-location-off:before { - content: "\f155"; -} - -.ti-lock:before { - content: "\eae2"; -} - -.ti-lock-access:before { - content: "\eeaa"; -} - -.ti-lock-off:before { - content: "\ed1e"; -} - -.ti-lock-open:before { - content: "\eae1"; -} - -.ti-lock-open-off:before { - content: "\f156"; -} - -.ti-lock-square:before { - content: "\ef51"; -} - -.ti-logic-and:before { - content: "\f240"; -} - -.ti-logic-buffer:before { - content: "\f241"; -} - -.ti-logic-nand:before { - content: "\f242"; -} - -.ti-logic-nor:before { - content: "\f243"; -} - -.ti-logic-not:before { - content: "\f244"; -} - -.ti-logic-or:before { - content: "\f245"; -} - -.ti-logic-xnor:before { - content: "\f246"; -} - -.ti-logic-xor:before { - content: "\f247"; -} - -.ti-login:before { - content: "\eba7"; -} - -.ti-logout:before { - content: "\eba8"; -} - -.ti-lollipop:before { - content: "\efcc"; -} - -.ti-lollipop-off:before { - content: "\f157"; -} - -.ti-luggage:before { - content: "\efad"; -} - -.ti-luggage-off:before { - content: "\f158"; -} - -.ti-lungs:before { - content: "\ef62"; -} - -.ti-macro:before { - content: "\eeab"; -} - -.ti-magnet:before { - content: "\eae3"; -} - -.ti-magnet-off:before { - content: "\f159"; -} - -.ti-mail:before { - content: "\eae5"; -} - -.ti-mail-fast:before { - content: "\f069"; -} - -.ti-mail-forward:before { - content: "\eeac"; -} - -.ti-mail-off:before { - content: "\f15a"; -} - -.ti-mail-opened:before { - content: "\eae4"; -} - -.ti-mailbox:before { - content: "\eead"; -} - -.ti-mailbox-off:before { - content: "\f15b"; -} - -.ti-man:before { - content: "\eae6"; -} - -.ti-manual-gearbox:before { - content: "\ed7b"; -} - -.ti-map:before { - content: "\eae9"; -} - -.ti-map-2:before { - content: "\eae7"; -} - -.ti-map-off:before { - content: "\f15c"; -} - -.ti-map-pin:before { - content: "\eae8"; -} - -.ti-map-pin-off:before { - content: "\ecf3"; -} - -.ti-map-pins:before { - content: "\ed5e"; -} - -.ti-map-search:before { - content: "\ef82"; -} - -.ti-markdown:before { - content: "\ec41"; -} - -.ti-marquee:before { - content: "\ec77"; -} - -.ti-marquee-2:before { - content: "\eeae"; -} - -.ti-marquee-off:before { - content: "\f15d"; -} - -.ti-mars:before { - content: "\ec80"; -} - -.ti-mask:before { - content: "\eeb0"; -} - -.ti-mask-off:before { - content: "\eeaf"; -} - -.ti-masks-theater:before { - content: "\f263"; -} - -.ti-massage:before { - content: "\eeb1"; -} - -.ti-math:before { - content: "\ebeb"; -} - -.ti-math-avg:before { - content: "\f0f4"; -} - -.ti-math-function:before { - content: "\eeb2"; -} - -.ti-math-function-off:before { - content: "\f15e"; -} - -.ti-math-max:before { - content: "\f0f5"; -} - -.ti-math-min:before { - content: "\f0f6"; -} - -.ti-math-symbols:before { - content: "\eeb3"; -} - -.ti-maximize:before { - content: "\eaea"; -} - -.ti-maximize-off:before { - content: "\f15f"; -} - -.ti-meat:before { - content: "\ef12"; -} - -.ti-medal:before { - content: "\ec78"; -} - -.ti-medal-2:before { - content: "\efcd"; -} - -.ti-medical-cross:before { - content: "\ec2f"; -} - -.ti-medical-cross-off:before { - content: "\f160"; -} - -.ti-medicine-syrup:before { - content: "\ef63"; -} - -.ti-menu:before { - content: "\eaeb"; -} - -.ti-menu-2:before { - content: "\ec42"; -} - -.ti-message:before { - content: "\eaef"; -} - -.ti-message-2:before { - content: "\eaec"; -} - -.ti-message-2-code:before { - content: "\f012"; -} - -.ti-message-2-share:before { - content: "\f077"; -} - -.ti-message-circle:before { - content: "\eaed"; -} - -.ti-message-circle-2:before { - content: "\ed3f"; -} - -.ti-message-circle-off:before { - content: "\ed40"; -} - -.ti-message-code:before { - content: "\f013"; -} - -.ti-message-dots:before { - content: "\eaee"; -} - -.ti-message-language:before { - content: "\efae"; -} - -.ti-message-off:before { - content: "\ed41"; -} - -.ti-message-plus:before { - content: "\ec9a"; -} - -.ti-message-report:before { - content: "\ec9b"; -} - -.ti-message-share:before { - content: "\f078"; -} - -.ti-messages:before { - content: "\eb6c"; -} - -.ti-messages-off:before { - content: "\ed42"; -} - -.ti-meteor:before { - content: "\f1fd"; -} - -.ti-microphone:before { - content: "\eaf0"; -} - -.ti-microphone-2:before { - content: "\ef2c"; -} - -.ti-microphone-off:before { - content: "\ed16"; -} - -.ti-microscope:before { - content: "\ef64"; -} - -.ti-microwave:before { - content: "\f248"; -} - -.ti-microwave-off:before { - content: "\f264"; -} - -.ti-military-award:before { - content: "\f079"; -} - -.ti-military-rank:before { - content: "\efcf"; -} - -.ti-milk:before { - content: "\ef13"; -} - -.ti-minimize:before { - content: "\eaf1"; -} - -.ti-minus:before { - content: "\eaf2"; -} - -.ti-minus-vertical:before { - content: "\eeb4"; -} - -.ti-mist:before { - content: "\ec30"; -} - -.ti-mood-boy:before { - content: "\ed2d"; -} - -.ti-mood-confuzed:before { - content: "\eaf3"; -} - -.ti-mood-crazy-happy:before { - content: "\ed90"; -} - -.ti-mood-cry:before { - content: "\ecbb"; -} - -.ti-mood-empty:before { - content: "\eeb5"; -} - -.ti-mood-happy:before { - content: "\eaf4"; -} - -.ti-mood-kid:before { - content: "\ec03"; -} - -.ti-mood-nervous:before { - content: "\ef96"; -} - -.ti-mood-neutral:before { - content: "\eaf5"; -} - -.ti-mood-off:before { - content: "\f161"; -} - -.ti-mood-sad:before { - content: "\eaf6"; -} - -.ti-mood-smile:before { - content: "\eaf7"; -} - -.ti-mood-suprised:before { - content: "\ec04"; -} - -.ti-mood-tongue:before { - content: "\eb95"; -} - -.ti-moon:before { - content: "\eaf8"; -} - -.ti-moon-2:before { - content: "\ece6"; -} - -.ti-moon-off:before { - content: "\f162"; -} - -.ti-moon-stars:before { - content: "\ece7"; -} - -.ti-moped:before { - content: "\ecbc"; -} - -.ti-motorbike:before { - content: "\eeb6"; -} - -.ti-mountain:before { - content: "\ef97"; -} - -.ti-mouse:before { - content: "\eaf9"; -} - -.ti-mouse-2:before { - content: "\f1d7"; -} - -.ti-mouse-off:before { - content: "\f163"; -} - -.ti-movie:before { - content: "\eafa"; -} - -.ti-movie-off:before { - content: "\f164"; -} - -.ti-mug:before { - content: "\eafb"; -} - -.ti-mug-off:before { - content: "\f165"; -} - -.ti-multiplier-0-5x:before { - content: "\ef41"; -} - -.ti-multiplier-1-5x:before { - content: "\ef42"; -} - -.ti-multiplier-1x:before { - content: "\ef43"; -} - -.ti-multiplier-2x:before { - content: "\ef44"; -} - -.ti-mushroom:before { - content: "\ef14"; -} - -.ti-music:before { - content: "\eafc"; -} - -.ti-music-off:before { - content: "\f166"; -} - -.ti-network:before { - content: "\f09f"; -} - -.ti-new-section:before { - content: "\ebc1"; -} - -.ti-news:before { - content: "\eafd"; -} - -.ti-news-off:before { - content: "\f167"; -} - -.ti-nfc:before { - content: "\eeb7"; -} - -.ti-nfc-off:before { - content: "\f168"; -} - -.ti-no-copyright:before { - content: "\efb9"; -} - -.ti-no-creative-commons:before { - content: "\efba"; -} - -.ti-no-derivatives:before { - content: "\efbb"; -} - -.ti-north-star:before { - content: "\f014"; -} - -.ti-note:before { - content: "\eb6d"; -} - -.ti-note-off:before { - content: "\f169"; -} - -.ti-notebook:before { - content: "\eb96"; -} - -.ti-notes:before { - content: "\eb6e"; -} - -.ti-notes-off:before { - content: "\f16a"; -} - -.ti-notification:before { - content: "\eafe"; -} - -.ti-notification-off:before { - content: "\f16b"; -} - -.ti-number:before { - content: "\f1fe"; -} - -.ti-number-0:before { - content: "\edf0"; -} - -.ti-number-1:before { - content: "\edf1"; -} - -.ti-number-2:before { - content: "\edf2"; -} - -.ti-number-3:before { - content: "\edf3"; -} - -.ti-number-4:before { - content: "\edf4"; -} - -.ti-number-5:before { - content: "\edf5"; -} - -.ti-number-6:before { - content: "\edf6"; -} - -.ti-number-7:before { - content: "\edf7"; -} - -.ti-number-8:before { - content: "\edf8"; -} - -.ti-number-9:before { - content: "\edf9"; -} - -.ti-numbers:before { - content: "\f015"; -} - -.ti-nurse:before { - content: "\ef65"; -} - -.ti-octagon:before { - content: "\ecbd"; -} - -.ti-octagon-off:before { - content: "\eeb8"; -} - -.ti-old:before { - content: "\eeb9"; -} - -.ti-olympics:before { - content: "\eeba"; -} - -.ti-omega:before { - content: "\eb97"; -} - -.ti-outbound:before { - content: "\f249"; -} - -.ti-outlet:before { - content: "\ebd7"; -} - -.ti-oval:before { - content: "\f02e"; -} - -.ti-oval-vertical:before { - content: "\f02d"; -} - -.ti-overline:before { - content: "\eebb"; -} - -.ti-package:before { - content: "\eaff"; -} - -.ti-package-off:before { - content: "\f16c"; -} - -.ti-packge-export:before { - content: "\f07a"; -} - -.ti-packge-import:before { - content: "\f07b"; -} - -.ti-pacman:before { - content: "\eebc"; -} - -.ti-page-break:before { - content: "\ec81"; -} - -.ti-paint:before { - content: "\eb00"; -} - -.ti-paint-off:before { - content: "\f16d"; -} - -.ti-palette:before { - content: "\eb01"; -} - -.ti-palette-off:before { - content: "\f16e"; -} - -.ti-panorama-horizontal:before { - content: "\ed33"; -} - -.ti-panorama-vertical:before { - content: "\ed34"; -} - -.ti-paper-bag:before { - content: "\f02f"; -} - -.ti-paper-bag-off:before { - content: "\f16f"; -} - -.ti-paperclip:before { - content: "\eb02"; -} - -.ti-parachute:before { - content: "\ed7c"; -} - -.ti-parachute-off:before { - content: "\f170"; -} - -.ti-parentheses:before { - content: "\ebd8"; -} - -.ti-parentheses-off:before { - content: "\f171"; -} - -.ti-parking:before { - content: "\eb03"; -} - -.ti-parking-off:before { - content: "\f172"; -} - -.ti-paw:before { - content: "\eff9"; -} - -.ti-peace:before { - content: "\ecbe"; -} - -.ti-pencil:before { - content: "\eb04"; -} - -.ti-pencil-minus:before { - content: "\f1eb"; -} - -.ti-pencil-off:before { - content: "\f173"; -} - -.ti-pencil-plus:before { - content: "\f1ec"; -} - -.ti-pennant:before { - content: "\ed7d"; -} - -.ti-pennant-2:before { - content: "\f06a"; -} - -.ti-pennant-off:before { - content: "\f174"; -} - -.ti-pentagon:before { - content: "\efe3"; -} - -.ti-pepper:before { - content: "\ef15"; -} - -.ti-pepper-off:before { - content: "\f175"; -} - -.ti-percentage:before { - content: "\ecf4"; -} - -.ti-perspective:before { - content: "\eebd"; -} - -.ti-perspective-off:before { - content: "\f176"; -} - -.ti-phone:before { - content: "\eb09"; -} - -.ti-phone-call:before { - content: "\eb05"; -} - -.ti-phone-calling:before { - content: "\ec43"; -} - -.ti-phone-check:before { - content: "\ec05"; -} - -.ti-phone-incoming:before { - content: "\eb06"; -} - -.ti-phone-off:before { - content: "\ecf5"; -} - -.ti-phone-outgoing:before { - content: "\eb07"; -} - -.ti-phone-pause:before { - content: "\eb08"; -} - -.ti-phone-plus:before { - content: "\ec06"; -} - -.ti-phone-x:before { - content: "\ec07"; -} - -.ti-photo:before { - content: "\eb0a"; -} - -.ti-photo-off:before { - content: "\ecf6"; -} - -.ti-physotherapist:before { - content: "\eebe"; -} - -.ti-picture-in-picture:before { - content: "\ed35"; -} - -.ti-picture-in-picture-off:before { - content: "\ed43"; -} - -.ti-picture-in-picture-on:before { - content: "\ed44"; -} - -.ti-picture-in-picture-top:before { - content: "\efe4"; -} - -.ti-pig:before { - content: "\ef52"; -} - -.ti-pig-off:before { - content: "\f177"; -} - -.ti-pill:before { - content: "\ec44"; -} - -.ti-pill-off:before { - content: "\f178"; -} - -.ti-pills:before { - content: "\ef66"; -} - -.ti-pin:before { - content: "\ec9c"; -} - -.ti-pinned:before { - content: "\ed60"; -} - -.ti-pinned-off:before { - content: "\ed5f"; -} - -.ti-pizza:before { - content: "\edbb"; -} - -.ti-pizza-off:before { - content: "\f179"; -} - -.ti-plane:before { - content: "\eb6f"; -} - -.ti-plane-arrival:before { - content: "\eb99"; -} - -.ti-plane-departure:before { - content: "\eb9a"; -} - -.ti-plane-inflight:before { - content: "\ef98"; -} - -.ti-plane-off:before { - content: "\f17a"; -} - -.ti-plane-tilt:before { - content: "\f1ed"; -} - -.ti-planet:before { - content: "\ec08"; -} - -.ti-planet-off:before { - content: "\f17b"; -} - -.ti-plant:before { - content: "\ed50"; -} - -.ti-plant-2:before { - content: "\ed7e"; -} - -.ti-plant-2-off:before { - content: "\f17c"; -} - -.ti-plant-off:before { - content: "\f17d"; -} - -.ti-play-card:before { - content: "\eebf"; -} - -.ti-play-card-off:before { - content: "\f17e"; -} - -.ti-player-eject:before { - content: "\efbc"; -} - -.ti-player-pause:before { - content: "\ed45"; -} - -.ti-player-play:before { - content: "\ed46"; -} - -.ti-player-record:before { - content: "\ed47"; -} - -.ti-player-skip-back:before { - content: "\ed48"; -} - -.ti-player-skip-forward:before { - content: "\ed49"; -} - -.ti-player-stop:before { - content: "\ed4a"; -} - -.ti-player-track-next:before { - content: "\ed4b"; -} - -.ti-player-track-prev:before { - content: "\ed4c"; -} - -.ti-playlist:before { - content: "\eec0"; -} - -.ti-playlist-add:before { - content: "\f008"; -} - -.ti-playlist-off:before { - content: "\f17f"; -} - -.ti-playlist-x:before { - content: "\f009"; -} - -.ti-plug:before { - content: "\ebd9"; -} - -.ti-plug-connected:before { - content: "\f00a"; -} - -.ti-plug-connected-x:before { - content: "\f0a0"; -} - -.ti-plug-off:before { - content: "\f180"; -} - -.ti-plug-x:before { - content: "\f0a1"; -} - -.ti-plus:before { - content: "\eb0b"; -} - -.ti-podium:before { - content: "\f1d8"; -} - -.ti-point:before { - content: "\eb0c"; -} - -.ti-point-off:before { - content: "\f181"; -} - -.ti-pointer:before { - content: "\f265"; -} - -.ti-pokeball:before { - content: "\eec1"; -} - -.ti-polaroid:before { - content: "\eec2"; -} - -.ti-polygon:before { - content: "\efd0"; -} - -.ti-polygon-off:before { - content: "\f182"; -} - -.ti-poo:before { - content: "\f258"; -} - -.ti-pool:before { - content: "\ed91"; -} - -.ti-power:before { - content: "\eb0d"; -} - -.ti-pray:before { - content: "\ecbf"; -} - -.ti-premium-rights:before { - content: "\efbd"; -} - -.ti-prescription:before { - content: "\ef99"; -} - -.ti-presentation:before { - content: "\eb70"; -} - -.ti-presentation-analytics:before { - content: "\eec3"; -} - -.ti-presentation-off:before { - content: "\f183"; -} - -.ti-printer:before { - content: "\eb0e"; -} - -.ti-printer-off:before { - content: "\f184"; -} - -.ti-prison:before { - content: "\ef79"; -} - -.ti-prompt:before { - content: "\eb0f"; -} - -.ti-propeller:before { - content: "\eec4"; -} - -.ti-propeller-off:before { - content: "\f185"; -} - -.ti-puzzle:before { - content: "\eb10"; -} - -.ti-puzzle-2:before { - content: "\ef83"; -} - -.ti-puzzle-off:before { - content: "\f186"; -} - -.ti-pyramid:before { - content: "\eec5"; -} - -.ti-pyramid-off:before { - content: "\f187"; -} - -.ti-qrcode:before { - content: "\eb11"; -} - -.ti-question-mark:before { - content: "\ec9d"; -} - -.ti-quote:before { - content: "\efbe"; -} - -.ti-quote-off:before { - content: "\f188"; -} - -.ti-radar:before { - content: "\f017"; -} - -.ti-radar-2:before { - content: "\f016"; -} - -.ti-radio:before { - content: "\ef2d"; -} - -.ti-radioactive:before { - content: "\ecc0"; -} - -.ti-radioactive-off:before { - content: "\f189"; -} - -.ti-radius-bottom-left:before { - content: "\eec6"; -} - -.ti-radius-bottom-right:before { - content: "\eec7"; -} - -.ti-radius-top-left:before { - content: "\eec8"; -} - -.ti-radius-top-right:before { - content: "\eec9"; -} - -.ti-rainbow:before { - content: "\edbc"; -} - -.ti-rainbow-off:before { - content: "\f18a"; -} - -.ti-rating-12-plus:before { - content: "\f266"; -} - -.ti-rating-14-plus:before { - content: "\f267"; -} - -.ti-rating-16-plus:before { - content: "\f268"; -} - -.ti-rating-18-plus:before { - content: "\f269"; -} - -.ti-rating-21-plus:before { - content: "\f26a"; -} - -.ti-receipt:before { - content: "\edfd"; -} - -.ti-receipt-2:before { - content: "\edfa"; -} - -.ti-receipt-off:before { - content: "\edfb"; -} - -.ti-receipt-refund:before { - content: "\edfc"; -} - -.ti-receipt-tax:before { - content: "\edbd"; -} - -.ti-recharging:before { - content: "\eeca"; -} - -.ti-record-mail:before { - content: "\eb12"; -} - -.ti-record-mail-off:before { - content: "\f18b"; -} - -.ti-rectangle:before { - content: "\ed37"; -} - -.ti-rectangle-vertical:before { - content: "\ed36"; -} - -.ti-recycle:before { - content: "\eb9b"; -} - -.ti-recycle-off:before { - content: "\f18c"; -} - -.ti-refresh:before { - content: "\eb13"; -} - -.ti-refresh-alert:before { - content: "\ed57"; -} - -.ti-refresh-dot:before { - content: "\efbf"; -} - -.ti-refresh-off:before { - content: "\f18d"; -} - -.ti-registered:before { - content: "\eb14"; -} - -.ti-relation-many-to-many:before { - content: "\ed7f"; -} - -.ti-relation-one-to-many:before { - content: "\ed80"; -} - -.ti-relation-one-to-one:before { - content: "\ed81"; -} - -.ti-repeat:before { - content: "\eb72"; -} - -.ti-repeat-off:before { - content: "\f18e"; -} - -.ti-repeat-once:before { - content: "\eb71"; -} - -.ti-replace:before { - content: "\ebc7"; -} - -.ti-report:before { - content: "\eece"; -} - -.ti-report-analytics:before { - content: "\eecb"; -} - -.ti-report-medical:before { - content: "\eecc"; -} - -.ti-report-money:before { - content: "\eecd"; -} - -.ti-report-off:before { - content: "\f18f"; -} - -.ti-report-search:before { - content: "\ef84"; -} - -.ti-resize:before { - content: "\eecf"; -} - -.ti-ripple:before { - content: "\ed82"; -} - -.ti-ripple-off:before { - content: "\f190"; -} - -.ti-road:before { - content: "\f018"; -} - -.ti-road-off:before { - content: "\f191"; -} - -.ti-road-sign:before { - content: "\ecdd"; -} - -.ti-robot:before { - content: "\f00b"; -} - -.ti-robot-off:before { - content: "\f192"; -} - -.ti-rocket:before { - content: "\ec45"; -} - -.ti-rocket-off:before { - content: "\f193"; -} - -.ti-roller-skating:before { - content: "\efd1"; -} - -.ti-rollercoaster:before { - content: "\f0a2"; -} - -.ti-rotate:before { - content: "\eb16"; -} - -.ti-rotate-2:before { - content: "\ebb4"; -} - -.ti-rotate-360:before { - content: "\ef85"; -} - -.ti-rotate-clockwise:before { - content: "\eb15"; -} - -.ti-rotate-clockwise-2:before { - content: "\ebb5"; -} - -.ti-rotate-dot:before { - content: "\efe5"; -} - -.ti-rotate-rectangle:before { - content: "\ec15"; -} - -.ti-route:before { - content: "\eb17"; -} - -.ti-route-off:before { - content: "\f194"; -} - -.ti-router:before { - content: "\eb18"; -} - -.ti-row-insert-bottom:before { - content: "\eed0"; -} - -.ti-row-insert-top:before { - content: "\eed1"; -} - -.ti-rss:before { - content: "\eb19"; -} - -.ti-ruler:before { - content: "\eb1a"; -} - -.ti-ruler-2:before { - content: "\eed2"; -} - -.ti-ruler-2-off:before { - content: "\f195"; -} - -.ti-ruler-off:before { - content: "\f196"; -} - -.ti-run:before { - content: "\ec82"; -} - -.ti-sailboat:before { - content: "\ec83"; -} - -.ti-salt:before { - content: "\ef16"; -} - -.ti-satellite:before { - content: "\eed3"; -} - -.ti-satellite-off:before { - content: "\f197"; -} - -.ti-sausage:before { - content: "\ef17"; -} - -.ti-scale:before { - content: "\ebc2"; -} - -.ti-scale-off:before { - content: "\f198"; -} - -.ti-scale-outline:before { - content: "\ef53"; -} - -.ti-scale-outline-off:before { - content: "\f199"; -} - -.ti-scan:before { - content: "\ebc8"; -} - -.ti-scan-eye:before { - content: "\f1ff"; -} - -.ti-schema:before { - content: "\f200"; -} - -.ti-school:before { - content: "\ecf7"; -} - -.ti-school-off:before { - content: "\f19a"; -} - -.ti-scissors:before { - content: "\eb1b"; -} - -.ti-scissors-off:before { - content: "\f19b"; -} - -.ti-scooter:before { - content: "\ec6c"; -} - -.ti-scooter-electric:before { - content: "\ecc1"; -} - -.ti-screen-share:before { - content: "\ed18"; -} - -.ti-screen-share-off:before { - content: "\ed17"; -} - -.ti-screenshot:before { - content: "\f201"; -} - -.ti-scribble:before { - content: "\f0a3"; -} - -.ti-scuba-mask:before { - content: "\eed4"; -} - -.ti-search:before { - content: "\eb1c"; -} - -.ti-search-off:before { - content: "\f19c"; -} - -.ti-section:before { - content: "\eed5"; -} - -.ti-section-sign:before { - content: "\f019"; -} - -.ti-seeding:before { - content: "\ed51"; -} - -.ti-seeding-off:before { - content: "\f19d"; -} - -.ti-select:before { - content: "\ec9e"; -} - -.ti-selector:before { - content: "\eb1d"; -} - -.ti-send:before { - content: "\eb1e"; -} - -.ti-seo:before { - content: "\f26b"; -} - -.ti-separator:before { - content: "\ebda"; -} - -.ti-separator-horizontal:before { - content: "\ec79"; -} - -.ti-separator-vertical:before { - content: "\ec7a"; -} - -.ti-server:before { - content: "\eb1f"; -} - -.ti-server-2:before { - content: "\f07c"; -} - -.ti-server-off:before { - content: "\f19e"; -} - -.ti-servicemark:before { - content: "\ec09"; -} - -.ti-settings:before { - content: "\eb20"; -} - -.ti-settings-automation:before { - content: "\eed6"; -} - -.ti-settings-off:before { - content: "\f19f"; -} - -.ti-shadow:before { - content: "\eed8"; -} - -.ti-shadow-off:before { - content: "\eed7"; -} - -.ti-shape:before { - content: "\eb9c"; -} - -.ti-shape-2:before { - content: "\eed9"; -} - -.ti-shape-3:before { - content: "\eeda"; -} - -.ti-shape-off:before { - content: "\f1a0"; -} - -.ti-share:before { - content: "\eb21"; -} - -.ti-share-off:before { - content: "\f1a1"; -} - -.ti-shield:before { - content: "\eb24"; -} - -.ti-shield-check:before { - content: "\eb22"; -} - -.ti-shield-checkered:before { - content: "\ef9a"; -} - -.ti-shield-chevron:before { - content: "\ef9b"; -} - -.ti-shield-lock:before { - content: "\ed58"; -} - -.ti-shield-off:before { - content: "\ecf8"; -} - -.ti-shield-x:before { - content: "\eb23"; -} - -.ti-ship:before { - content: "\ec84"; -} - -.ti-shirt:before { - content: "\ec0a"; -} - -.ti-shirt-off:before { - content: "\f1a2"; -} - -.ti-shirt-sport:before { - content: "\f26c"; -} - -.ti-shoe:before { - content: "\efd2"; -} - -.ti-shoe-off:before { - content: "\f1a4"; -} - -.ti-shopping-cart:before { - content: "\eb25"; -} - -.ti-shopping-cart-discount:before { - content: "\eedb"; -} - -.ti-shopping-cart-off:before { - content: "\eedc"; -} - -.ti-shopping-cart-plus:before { - content: "\eedd"; -} - -.ti-shopping-cart-x:before { - content: "\eede"; -} - -.ti-shovel:before { - content: "\f1d9"; -} - -.ti-shredder:before { - content: "\eedf"; -} - -.ti-sign-left:before { - content: "\f06b"; -} - -.ti-sign-right:before { - content: "\f06c"; -} - -.ti-signal-3g:before { - content: "\f1ee"; -} - -.ti-signal-4g:before { - content: "\f1ef"; -} - -.ti-signal-4g-plus:before { - content: "\f259"; -} - -.ti-signal-5g:before { - content: "\f1f0"; -} - -.ti-signature:before { - content: "\eee0"; -} - -.ti-signature-off:before { - content: "\f1a5"; -} - -.ti-sitemap:before { - content: "\eb9d"; -} - -.ti-sitemap-off:before { - content: "\f1a6"; -} - -.ti-skateboard:before { - content: "\ecc2"; -} - -.ti-sleigh:before { - content: "\ef9c"; -} - -.ti-slice:before { - content: "\ebdb"; -} - -.ti-slideshow:before { - content: "\ebc9"; -} - -.ti-smart-home:before { - content: "\ecde"; -} - -.ti-smart-home-off:before { - content: "\f1a7"; -} - -.ti-smoking:before { - content: "\ecc4"; -} - -.ti-smoking-no:before { - content: "\ecc3"; -} - -.ti-snowflake:before { - content: "\ec0b"; -} - -.ti-snowflake-off:before { - content: "\f1a8"; -} - -.ti-snowman:before { - content: "\f26d"; -} - -.ti-soccer-field:before { - content: "\ed92"; -} - -.ti-social:before { - content: "\ebec"; -} - -.ti-social-off:before { - content: "\f1a9"; -} - -.ti-sock:before { - content: "\eee1"; -} - -.ti-sofa:before { - content: "\efaf"; -} - -.ti-sort-ascending:before { - content: "\eb26"; -} - -.ti-sort-ascending-2:before { - content: "\eee2"; -} - -.ti-sort-ascending-letters:before { - content: "\ef18"; -} - -.ti-sort-ascending-numbers:before { - content: "\ef19"; -} - -.ti-sort-descending:before { - content: "\eb27"; -} - -.ti-sort-descending-2:before { - content: "\eee3"; -} - -.ti-sort-descending-letters:before { - content: "\ef1a"; -} - -.ti-sort-descending-numbers:before { - content: "\ef1b"; -} - -.ti-sos:before { - content: "\f24a"; -} - -.ti-soup:before { - content: "\ef2e"; -} - -.ti-space:before { - content: "\ec0c"; -} - -.ti-space-off:before { - content: "\f1aa"; -} - -.ti-spacing-horizontal:before { - content: "\ef54"; -} - -.ti-spacing-vertical:before { - content: "\ef55"; -} - -.ti-spade:before { - content: "\effa"; -} - -.ti-speakerphone:before { - content: "\ed61"; -} - -.ti-speedboat:before { - content: "\ed93"; -} - -.ti-sport-billard:before { - content: "\eee4"; -} - -.ti-spy:before { - content: "\f227"; -} - -.ti-square:before { - content: "\eb2c"; -} - -.ti-square-0:before { - content: "\eee5"; -} - -.ti-square-1:before { - content: "\eee6"; -} - -.ti-square-2:before { - content: "\eee7"; -} - -.ti-square-3:before { - content: "\eee8"; -} - -.ti-square-4:before { - content: "\eee9"; -} - -.ti-square-5:before { - content: "\eeea"; -} - -.ti-square-6:before { - content: "\eeeb"; -} - -.ti-square-7:before { - content: "\eeec"; -} - -.ti-square-8:before { - content: "\eeed"; -} - -.ti-square-9:before { - content: "\eeee"; -} - -.ti-square-asterisk:before { - content: "\f01a"; -} - -.ti-square-check:before { - content: "\eb28"; -} - -.ti-square-dot:before { - content: "\ed59"; -} - -.ti-square-forbid:before { - content: "\ed5b"; -} - -.ti-square-forbid-2:before { - content: "\ed5a"; -} - -.ti-square-half:before { - content: "\effb"; -} - -.ti-square-minus:before { - content: "\eb29"; -} - -.ti-square-off:before { - content: "\eeef"; -} - -.ti-square-plus:before { - content: "\eb2a"; -} - -.ti-square-root:before { - content: "\eef1"; -} - -.ti-square-root-2:before { - content: "\eef0"; -} - -.ti-square-rotated:before { - content: "\ecdf"; -} - -.ti-square-rotated-forbid:before { - content: "\f01c"; -} - -.ti-square-rotated-forbid-2:before { - content: "\f01b"; -} - -.ti-square-rotated-off:before { - content: "\eef2"; -} - -.ti-square-toggle:before { - content: "\eef4"; -} - -.ti-square-toggle-horizontal:before { - content: "\eef3"; -} - -.ti-square-x:before { - content: "\eb2b"; -} - -.ti-squares-diagonal:before { - content: "\eef5"; -} - -.ti-squares-filled:before { - content: "\eef6"; -} - -.ti-stack:before { - content: "\eb2d"; -} - -.ti-stack-2:before { - content: "\eef7"; -} - -.ti-stack-3:before { - content: "\ef9d"; -} - -.ti-stack-pop:before { - content: "\f234"; -} - -.ti-stack-push:before { - content: "\f235"; -} - -.ti-stairs:before { - content: "\eca6"; -} - -.ti-stairs-down:before { - content: "\eca4"; -} - -.ti-stairs-up:before { - content: "\eca5"; -} - -.ti-star:before { - content: "\eb2e"; -} - -.ti-star-half:before { - content: "\ed19"; -} - -.ti-star-off:before { - content: "\ed62"; -} - -.ti-stars:before { - content: "\ed38"; -} - -.ti-steam:before { - content: "\f24b"; -} - -.ti-steering-wheel:before { - content: "\ec7b"; -} - -.ti-step-into:before { - content: "\ece0"; -} - -.ti-step-out:before { - content: "\ece1"; -} - -.ti-stethoscope:before { - content: "\edbe"; -} - -.ti-sticker:before { - content: "\eb2f"; -} - -.ti-storm:before { - content: "\f24c"; -} - -.ti-strikethrough:before { - content: "\eb9e"; -} - -.ti-submarine:before { - content: "\ed94"; -} - -.ti-subscript:before { - content: "\eb9f"; -} - -.ti-subtask:before { - content: "\ec9f"; -} - -.ti-sum:before { - content: "\eb73"; -} - -.ti-sum-off:before { - content: "\f1ab"; -} - -.ti-sun:before { - content: "\eb30"; -} - -.ti-sun-high:before { - content: "\f236"; -} - -.ti-sun-low:before { - content: "\f237"; -} - -.ti-sun-off:before { - content: "\ed63"; -} - -.ti-sun-wind:before { - content: "\f238"; -} - -.ti-sunglasses:before { - content: "\f239"; -} - -.ti-sunrise:before { - content: "\ef1c"; -} - -.ti-sunset:before { - content: "\ec31"; -} - -.ti-sunset-2:before { - content: "\f23a"; -} - -.ti-superscript:before { - content: "\eba0"; -} - -.ti-svg:before { - content: "\f25a"; -} - -.ti-swimming:before { - content: "\ec92"; -} - -.ti-switch:before { - content: "\eb33"; -} - -.ti-switch-2:before { - content: "\edbf"; -} - -.ti-switch-3:before { - content: "\edc0"; -} - -.ti-switch-horizontal:before { - content: "\eb31"; -} - -.ti-switch-vertical:before { - content: "\eb32"; -} - -.ti-sword:before { - content: "\f030"; -} - -.ti-sword-off:before { - content: "\f1ac"; -} - -.ti-swords:before { - content: "\f132"; -} - -.ti-table:before { - content: "\eba1"; -} - -.ti-table-alias:before { - content: "\f25b"; -} - -.ti-table-export:before { - content: "\eef8"; -} - -.ti-table-import:before { - content: "\eef9"; -} - -.ti-table-off:before { - content: "\eefa"; -} - -.ti-table-options:before { - content: "\f25c"; -} - -.ti-table-shortcut:before { - content: "\f25d"; -} - -.ti-tag:before { - content: "\eb34"; -} - -.ti-tag-off:before { - content: "\efc0"; -} - -.ti-tags:before { - content: "\ef86"; -} - -.ti-tags-off:before { - content: "\efc1"; -} - -.ti-tallymark-1:before { - content: "\ec46"; -} - -.ti-tallymark-2:before { - content: "\ec47"; -} - -.ti-tallymark-3:before { - content: "\ec48"; -} - -.ti-tallymark-4:before { - content: "\ec49"; -} - -.ti-tallymarks:before { - content: "\ec4a"; -} - -.ti-tank:before { - content: "\ed95"; -} - -.ti-target:before { - content: "\eb35"; -} - -.ti-target-off:before { - content: "\f1ad"; -} - -.ti-telescope:before { - content: "\f07d"; -} - -.ti-telescope-off:before { - content: "\f1ae"; -} - -.ti-temperature:before { - content: "\eb38"; -} - -.ti-temperature-celsius:before { - content: "\eb36"; -} - -.ti-temperature-fahrenheit:before { - content: "\eb37"; -} - -.ti-temperature-minus:before { - content: "\ebed"; -} - -.ti-temperature-off:before { - content: "\f1af"; -} - -.ti-temperature-plus:before { - content: "\ebee"; -} - -.ti-template:before { - content: "\eb39"; -} - -.ti-template-off:before { - content: "\f1b0"; -} - -.ti-tent:before { - content: "\eefb"; -} - -.ti-terminal:before { - content: "\ebdc"; -} - -.ti-terminal-2:before { - content: "\ebef"; -} - -.ti-test-pipe:before { - content: "\eb3a"; -} - -.ti-test-pipe-2:before { - content: "\f0a4"; -} - -.ti-test-pipe-off:before { - content: "\f1b1"; -} - -.ti-text-decrease:before { - content: "\f202"; -} - -.ti-text-direction-ltr:before { - content: "\eefc"; -} - -.ti-text-direction-rtl:before { - content: "\eefd"; -} - -.ti-text-increase:before { - content: "\f203"; -} - -.ti-text-recognition:before { - content: "\f204"; -} - -.ti-text-resize:before { - content: "\ef87"; -} - -.ti-text-wrap:before { - content: "\ebdd"; -} - -.ti-text-wrap-disabled:before { - content: "\eca7"; -} - -.ti-thermometer:before { - content: "\ef67"; -} - -.ti-thumb-down:before { - content: "\eb3b"; -} - -.ti-thumb-up:before { - content: "\eb3c"; -} - -.ti-ticket:before { - content: "\eb3d"; -} - -.ti-ticket-off:before { - content: "\f1b2"; -} - -.ti-tie:before { - content: "\f07e"; -} - -.ti-tilt-shift:before { - content: "\eefe"; -} - -.ti-tilt-shift-off:before { - content: "\f1b3"; -} - -.ti-timeline:before { - content: "\f031"; -} - -.ti-tir:before { - content: "\ebf0"; -} - -.ti-toggle-left:before { - content: "\eb3e"; -} - -.ti-toggle-right:before { - content: "\eb3f"; -} - -.ti-toilet-paper:before { - content: "\efd3"; -} - -.ti-toilet-paper-off:before { - content: "\f1b4"; -} - -.ti-tool:before { - content: "\eb40"; -} - -.ti-tools:before { - content: "\ebca"; -} - -.ti-tools-kitchen:before { - content: "\ed64"; -} - -.ti-tools-kitchen-2:before { - content: "\eeff"; -} - -.ti-tools-kitchen-2-off:before { - content: "\f1b5"; -} - -.ti-tools-kitchen-off:before { - content: "\f1b6"; -} - -.ti-tools-off:before { - content: "\f1b7"; -} - -.ti-tornado:before { - content: "\ece2"; -} - -.ti-tournament:before { - content: "\ecd0"; -} - -.ti-track:before { - content: "\ef00"; -} - -.ti-tractor:before { - content: "\ec0d"; -} - -.ti-trademark:before { - content: "\ec0e"; -} - -.ti-traffic-cone:before { - content: "\ec0f"; -} - -.ti-traffic-cone-off:before { - content: "\f1b8"; -} - -.ti-traffic-lights:before { - content: "\ed39"; -} - -.ti-traffic-lights-off:before { - content: "\f1b9"; -} - -.ti-train:before { - content: "\ed96"; -} - -.ti-transfer-in:before { - content: "\ef2f"; -} - -.ti-transfer-out:before { - content: "\ef30"; -} - -.ti-trash:before { - content: "\eb41"; -} - -.ti-trash-off:before { - content: "\ed65"; -} - -.ti-trash-x:before { - content: "\ef88"; -} - -.ti-tree:before { - content: "\ef01"; -} - -.ti-trees:before { - content: "\ec10"; -} - -.ti-trending-down:before { - content: "\eb42"; -} - -.ti-trending-down-2:before { - content: "\edc1"; -} - -.ti-trending-down-3:before { - content: "\edc2"; -} - -.ti-trending-up:before { - content: "\eb43"; -} - -.ti-trending-up-2:before { - content: "\edc3"; -} - -.ti-trending-up-3:before { - content: "\edc4"; -} - -.ti-triangle:before { - content: "\eb44"; -} - -.ti-triangle-inverted:before { - content: "\f01d"; -} - -.ti-triangle-off:before { - content: "\ef02"; -} - -.ti-triangle-square-circle:before { - content: "\ece8"; -} - -.ti-triangles:before { - content: "\f0a5"; -} - -.ti-trident:before { - content: "\ecc5"; -} - -.ti-trophy:before { - content: "\eb45"; -} - -.ti-truck:before { - content: "\ebc4"; -} - -.ti-truck-delivery:before { - content: "\ec4b"; -} - -.ti-truck-loading:before { - content: "\f1da"; -} - -.ti-truck-off:before { - content: "\ef03"; -} - -.ti-truck-return:before { - content: "\ec4c"; -} - -.ti-typography:before { - content: "\ebc5"; -} - -.ti-typography-off:before { - content: "\f1ba"; -} - -.ti-uf-off:before { - content: "\f26e"; -} - -.ti-ufo:before { - content: "\f26f"; -} - -.ti-umbrella:before { - content: "\ebf1"; -} - -.ti-umbrella-off:before { - content: "\f1bb"; -} - -.ti-underline:before { - content: "\eba2"; -} - -.ti-unlink:before { - content: "\eb46"; -} - -.ti-upload:before { - content: "\eb47"; -} - -.ti-urgent:before { - content: "\eb48"; -} - -.ti-usb:before { - content: "\f00c"; -} - -.ti-user:before { - content: "\eb4d"; -} - -.ti-user-check:before { - content: "\eb49"; -} - -.ti-user-circle:before { - content: "\ef68"; -} - -.ti-user-exclamation:before { - content: "\ec12"; -} - -.ti-user-minus:before { - content: "\eb4a"; -} - -.ti-user-off:before { - content: "\ecf9"; -} - -.ti-user-plus:before { - content: "\eb4b"; -} - -.ti-user-search:before { - content: "\ef89"; -} - -.ti-user-x:before { - content: "\eb4c"; -} - -.ti-users:before { - content: "\ebf2"; -} - -.ti-vaccine:before { - content: "\ef04"; -} - -.ti-vaccine-bottle:before { - content: "\ef69"; -} - -.ti-vaccine-off:before { - content: "\f1bc"; -} - -.ti-variable:before { - content: "\ef05"; -} - -.ti-variable-off:before { - content: "\f1bd"; -} - -.ti-vector:before { - content: "\eca9"; -} - -.ti-vector-bezier:before { - content: "\ef1d"; -} - -.ti-vector-bezier-2:before { - content: "\f1a3"; -} - -.ti-vector-off:before { - content: "\f1be"; -} - -.ti-vector-triangle:before { - content: "\eca8"; -} - -.ti-vector-triangle-off:before { - content: "\f1bf"; -} - -.ti-venus:before { - content: "\ec86"; -} - -.ti-versions:before { - content: "\ed52"; -} - -.ti-versions-off:before { - content: "\f1c0"; -} - -.ti-video:before { - content: "\ed22"; -} - -.ti-video-minus:before { - content: "\ed1f"; -} - -.ti-video-off:before { - content: "\ed20"; -} - -.ti-video-plus:before { - content: "\ed21"; -} - -.ti-view-360:before { - content: "\ed84"; -} - -.ti-view-360-off:before { - content: "\f1c1"; -} - -.ti-viewfinder:before { - content: "\eb4e"; -} - -.ti-viewfinder-off:before { - content: "\f1c2"; -} - -.ti-viewport-narrow:before { - content: "\ebf3"; -} - -.ti-viewport-wide:before { - content: "\ebf4"; -} - -.ti-vinyl:before { - content: "\f00d"; -} - -.ti-virus:before { - content: "\eb74"; -} - -.ti-virus-off:before { - content: "\ed66"; -} - -.ti-virus-search:before { - content: "\ed67"; -} - -.ti-vocabulary:before { - content: "\ef1e"; -} - -.ti-volume:before { - content: "\eb51"; -} - -.ti-volume-2:before { - content: "\eb4f"; -} - -.ti-volume-3:before { - content: "\eb50"; -} - -.ti-volume-off:before { - content: "\f1c3"; -} - -.ti-walk:before { - content: "\ec87"; -} - -.ti-wall:before { - content: "\ef7a"; -} - -.ti-wallet:before { - content: "\eb75"; -} - -.ti-wallet-off:before { - content: "\f1c4"; -} - -.ti-wallpaper:before { - content: "\ef56"; -} - -.ti-wallpaper-off:before { - content: "\f1c5"; -} - -.ti-wand:before { - content: "\ebcb"; -} - -.ti-wand-off:before { - content: "\f1c6"; -} - -.ti-wash-machine:before { - content: "\f25e"; -} - -.ti-wave-saw-tool:before { - content: "\ecd3"; -} - -.ti-wave-sine:before { - content: "\ecd4"; -} - -.ti-wave-square:before { - content: "\ecd5"; -} - -.ti-webhook:before { - content: "\f01e"; -} - -.ti-wheelchair:before { - content: "\f1db"; -} - -.ti-wifi:before { - content: "\eb52"; -} - -.ti-wifi-0:before { - content: "\eba3"; -} - -.ti-wifi-1:before { - content: "\eba4"; -} - -.ti-wifi-2:before { - content: "\eba5"; -} - -.ti-wifi-off:before { - content: "\ecfa"; -} - -.ti-wind:before { - content: "\ec34"; -} - -.ti-wind-off:before { - content: "\f1c7"; -} - -.ti-windmill:before { - content: "\ed85"; -} - -.ti-windmill-off:before { - content: "\f1c8"; -} - -.ti-window:before { - content: "\ef06"; -} - -.ti-window-maximize:before { - content: "\f1f1"; -} - -.ti-window-minimize:before { - content: "\f1f2"; -} - -.ti-window-off:before { - content: "\f1c9"; -} - -.ti-windsock:before { - content: "\f06d"; -} - -.ti-wiper:before { - content: "\ecab"; -} - -.ti-wiper-wash:before { - content: "\ecaa"; -} - -.ti-woman:before { - content: "\eb53"; -} - -.ti-world:before { - content: "\eb54"; -} - -.ti-world-download:before { - content: "\ef8a"; -} - -.ti-world-latitude:before { - content: "\ed2e"; -} - -.ti-world-longitude:before { - content: "\ed2f"; -} - -.ti-world-off:before { - content: "\f1ca"; -} - -.ti-world-upload:before { - content: "\ef8b"; -} - -.ti-wrecking-ball:before { - content: "\ed97"; -} - -.ti-writing:before { - content: "\ef08"; -} - -.ti-writing-off:before { - content: "\f1cb"; -} - -.ti-writing-sign:before { - content: "\ef07"; -} - -.ti-writing-sign-off:before { - content: "\f1cc"; -} - -.ti-x:before { - content: "\eb55"; -} - -.ti-yin-yang:before { - content: "\ec35"; -} - -.ti-yoga:before { - content: "\f01f"; -} - -.ti-zeppelin:before { - content: "\f270"; -} - -.ti-zodiac-aquarius:before { - content: "\ecac"; -} - -.ti-zodiac-aries:before { - content: "\ecad"; -} - -.ti-zodiac-cancer:before { - content: "\ecae"; -} - -.ti-zodiac-capricorn:before { - content: "\ecaf"; -} - -.ti-zodiac-gemini:before { - content: "\ecb0"; -} - -.ti-zodiac-leo:before { - content: "\ecb1"; -} - -.ti-zodiac-libra:before { - content: "\ecb2"; -} - -.ti-zodiac-pisces:before { - content: "\ecb3"; -} - -.ti-zodiac-sagittarius:before { - content: "\ecb4"; -} - -.ti-zodiac-scorpio:before { - content: "\ecb5"; -} - -.ti-zodiac-taurus:before { - content: "\ecb6"; -} - -.ti-zodiac-virgo:before { - content: "\ecb7"; -} - -.ti-zoom-cancel:before { - content: "\ec4d"; -} - -.ti-zoom-check:before { - content: "\ef09"; -} - -.ti-zoom-code:before { - content: "\f07f"; -} - -.ti-zoom-exclamation:before { - content: "\f080"; -} - -.ti-zoom-in:before { - content: "\eb56"; -} - -.ti-zoom-in-area:before { - content: "\f1dc"; -} - -.ti-zoom-money:before { - content: "\ef0a"; -} - -.ti-zoom-out:before { - content: "\eb57"; -} - -.ti-zoom-out-area:before { - content: "\f1dd"; -} - -.ti-zoom-pan:before { - content: "\f1de"; -} - -.ti-zoom-question:before { - content: "\edeb"; -} - -.ti-zzz:before { - content: "\f228"; -} diff --git a/spaces/ysharma/Chat_With_Blip2/app.py b/spaces/ysharma/Chat_With_Blip2/app.py deleted file mode 100644 index 6bbdfe2d3bded7179e2eb294d0b44cc61e2668bf..0000000000000000000000000000000000000000 --- a/spaces/ysharma/Chat_With_Blip2/app.py +++ /dev/null @@ -1,132 +0,0 @@ -import requests -from PIL import Image -import gradio as gr -from transformers import AutoProcessor, Blip2ForConditionalGeneration -import torch - - -css = """ -#column_container { - position: relative; - height: 800px; - max-width: 700px; - display: flex; - flex-direction: column; - background-color: lightgray; - border: 1px solid gray; - border-radius: 5px; - padding: 10px; - box-shadow: 2px 2px 5px gray; - margin-left: auto; - margin-right: auto; -} -#input_prompt { - position: fixed; - bottom: 0; - max-width: 680px; -} -#chatbot-component { - overflow: auto; -} -""" - -processor = AutoProcessor.from_pretrained("Salesforce/blip2-opt-2.7b") -model = Blip2ForConditionalGeneration.from_pretrained("Salesforce/blip2-opt-2.7b", torch_dtype=torch.float16) - -device = "cuda" if torch.cuda.is_available() else "cpu" -model.to(device) - -def upload_button_config(): - return gr.update(visible=False) - -def update_textbox_config(text_in): - return gr.update(visible=True) - -#takes input and generates the Response -def predict(btn_upload, counter,image_hid, input, history): - - if counter == 0: - image_in = Image.open(btn_upload) - #Resizing the image - basewidth = 512 - wpercent = (basewidth/float(image_in.size[0])) - hsize = int((float(image_in.size[1])*float(wpercent))) - image_in = image_in.resize((basewidth,hsize)) #, Image.Resampling.LANCZOS) - # Save the image to the file-like object - #seed = random.randint(0, 1000000) - img_name = "uploaded_image.png" #f"./edited_image_{seed}.png" - image_in.save(img_name) - #add state - history = history or [] - response = '' - history.append((input, response)) - counter += 1 - return history, history, img_name, counter, image_in - - #process the prompt - print(f"prompt is :{input}") - #Getting prompt in the format - Question: Is this photo unusual? Answer: - prompt = f"Question: {input} Answer: " - inputs = processor(image_hid, text=prompt, return_tensors="pt").to(device, torch.float16) - - #generate the response - generated_ids = model.generate(**inputs, max_new_tokens=10) - generated_text = processor.batch_decode(generated_ids, skip_special_tokens=True)[0].strip() - print(f"generated_text is : {generated_text}") - - #add state - history = history or [] - response = generated_text - history.append((input, response)) - counter += 1 - return history, history, "uploaded_image.png", counter, image_hid - -#Blocks Layout - leaving this here for moment - "#chatbot-component .overflow-y-auto{height:800px}" -with gr.Blocks(css="#chatbot-component {height: 600px} #input_prompt {position: absolute; bottom: 0;}") as demo: - with gr.Row(): - with gr.Column(scale=1): - #with gr.Accordion("See details"): - gr.HTML("""
          -
          -

          - Bringing Visual Conversations to Life with BLIP2 -

          -
          -

          - Blip2 is functioning as an instructed zero-shot image-to-text generation model using OPT-2.7B in this Space. - It shows a wide range of capabilities including visual conversation, visual knowledge reasoning, visual commensense reasoning, storytelling, - personalized image-to-text generation etc.
          - BLIP-2 by Salesforce is now available in🤗Transformers! - This model was contributed by nielsr. - The BLIP-2 model was proposed in BLIP-2: Bootstrapping Language-Image Pre-training with Frozen Image Encoders and Large Language Models - by Junnan Li, Dongxu Li, Silvio Savarese, Steven Hoi.

          -

          """) - gr.HTML("""Duplicate SpaceDuplicate Space with GPU Upgrade for fast Inference & no queue
          """) - - with gr.Column(elem_id = "column_container", scale=2): - #text_in = gr.Textbox(value='', placeholder="Type your questions here and press enter", elem_id = "input_prompt", visible=False, label='Great! Now you can ask questions to get more information about the image') - btn_upload = gr.UploadButton("Upload image!", file_types=["image"], file_count="single", elem_id="upload_button") - text_in = gr.Textbox(value='', placeholder="Type your questions here and press enter", elem_id = "input_prompt", visible=False, label='Great! Now you can ask questions to get more information about the image') - chatbot = gr.Chatbot(elem_id = 'chatbot-component', label='Converse with Images') - state_in = gr.State() - counter_out = gr.Number(visible=False, value=0, precision=0) - text_out = gr.Textbox(visible=False) #getting image name out - image_hid = gr.Image(visible=False) #, type='pil') - - #Using Event Listeners - btn_upload.upload(predict, [btn_upload, counter_out, image_hid, text_in, state_in], [chatbot, state_in, text_out, counter_out, image_hid]) - btn_upload.upload(fn = update_textbox_config, inputs=text_in, outputs = text_in) - - text_in.submit(predict, [btn_upload, counter_out, image_hid, text_in, state_in], [chatbot, state_in, text_out, counter_out, image_hid]) - - chatbot.change(fn = upload_button_config, outputs=btn_upload) #, scroll_to_output = True) - -demo.queue(concurrency_count=10) -demo.launch(debug=True) #, width="80%", height=2000) \ No newline at end of file diff --git a/spaces/zht1/test2/README.md b/spaces/zht1/test2/README.md deleted file mode 100644 index 87bd742de6322dd27efdde6099240dea751bda7c..0000000000000000000000000000000000000000 --- a/spaces/zht1/test2/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Test2 -emoji: 🐢 -colorFrom: blue -colorTo: yellow -sdk: gradio -sdk_version: 4.0.2 -app_file: app.py -pinned: false ---- - -Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference diff --git a/spaces/zhuowen999/vits_chinese/utils.py b/spaces/zhuowen999/vits_chinese/utils.py deleted file mode 100644 index f193a3e225b368fe7324852994676ad7236c970e..0000000000000000000000000000000000000000 --- a/spaces/zhuowen999/vits_chinese/utils.py +++ /dev/null @@ -1,319 +0,0 @@ -import os -import glob -import sys -import argparse -import logging -import json -import subprocess -import numpy as np -from scipy.io.wavfile import read -import torch - -MATPLOTLIB_FLAG = False - -logging.basicConfig(stream=sys.stdout, level=logging.DEBUG) -logger = logging - - -def load_checkpoint(checkpoint_path, model, optimizer=None): - assert os.path.isfile(checkpoint_path) - checkpoint_dict = torch.load(checkpoint_path, map_location="cpu") - iteration = checkpoint_dict["iteration"] - learning_rate = checkpoint_dict["learning_rate"] - if optimizer is not None: - optimizer.load_state_dict(checkpoint_dict["optimizer"]) - saved_state_dict = checkpoint_dict["model"] - if hasattr(model, "module"): - state_dict = model.module.state_dict() - else: - state_dict = model.state_dict() - new_state_dict = {} - for k, v in state_dict.items(): - try: - new_state_dict[k] = saved_state_dict[k] - except: - logger.info("%s is not in the checkpoint" % k) - new_state_dict[k] = v - if hasattr(model, "module"): - model.module.load_state_dict(new_state_dict) - else: - model.load_state_dict(new_state_dict) - logger.info( - "Loaded checkpoint '{}' (iteration {})".format(checkpoint_path, iteration) - ) - return model, optimizer, learning_rate, iteration - - -def save_checkpoint(model, optimizer, learning_rate, iteration, checkpoint_path): - logger.info( - "Saving model and optimizer state at iteration {} to {}".format( - iteration, checkpoint_path - ) - ) - if hasattr(model, "module"): - state_dict = model.module.state_dict() - else: - state_dict = model.state_dict() - torch.save( - { - "model": state_dict, - "iteration": iteration, - "optimizer": optimizer.state_dict(), - "learning_rate": learning_rate, - }, - checkpoint_path, - ) - - -def load_model(checkpoint_path, model): - assert os.path.isfile(checkpoint_path) - checkpoint_dict = torch.load(checkpoint_path, map_location="cpu") - saved_state_dict = checkpoint_dict["model"] - if hasattr(model, "module"): - state_dict = model.module.state_dict() - else: - state_dict = model.state_dict() - new_state_dict = {} - for k, v in state_dict.items(): - try: - new_state_dict[k] = saved_state_dict[k] - except: - logger.info("%s is not in the checkpoint" % k) - new_state_dict[k] = v - if hasattr(model, "module"): - model.module.load_state_dict(new_state_dict) - else: - model.load_state_dict(new_state_dict) - return model - - -def save_model(model, checkpoint_path): - if hasattr(model, 'module'): - state_dict = model.module.state_dict() - else: - state_dict = model.state_dict() - torch.save({'model': state_dict}, checkpoint_path) - - -def summarize( - writer, - global_step, - scalars={}, - histograms={}, - images={}, - audios={}, - audio_sampling_rate=22050, -): - for k, v in scalars.items(): - writer.add_scalar(k, v, global_step) - for k, v in histograms.items(): - writer.add_histogram(k, v, global_step) - for k, v in images.items(): - writer.add_image(k, v, global_step, dataformats="HWC") - for k, v in audios.items(): - writer.add_audio(k, v, global_step, audio_sampling_rate) - - -def latest_checkpoint_path(dir_path, regex="G_*.pth"): - f_list = glob.glob(os.path.join(dir_path, regex)) - f_list.sort(key=lambda f: int("".join(filter(str.isdigit, f)))) - x = f_list[-1] - print(x) - return x - - -def plot_spectrogram_to_numpy(spectrogram): - global MATPLOTLIB_FLAG - if not MATPLOTLIB_FLAG: - import matplotlib - - matplotlib.use("Agg") - MATPLOTLIB_FLAG = True - mpl_logger = logging.getLogger("matplotlib") - mpl_logger.setLevel(logging.WARNING) - import matplotlib.pylab as plt - import numpy as np - - fig, ax = plt.subplots(figsize=(10, 2)) - im = ax.imshow(spectrogram, aspect="auto", origin="lower", interpolation="none") - plt.colorbar(im, ax=ax) - plt.xlabel("Frames") - plt.ylabel("Channels") - plt.tight_layout() - - fig.canvas.draw() - data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep="") - data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,)) - plt.close() - return data - - -def plot_alignment_to_numpy(alignment, info=None): - global MATPLOTLIB_FLAG - if not MATPLOTLIB_FLAG: - import matplotlib - - matplotlib.use("Agg") - MATPLOTLIB_FLAG = True - mpl_logger = logging.getLogger("matplotlib") - mpl_logger.setLevel(logging.WARNING) - import matplotlib.pylab as plt - import numpy as np - - fig, ax = plt.subplots(figsize=(6, 4)) - im = ax.imshow( - alignment.transpose(), aspect="auto", origin="lower", interpolation="none" - ) - fig.colorbar(im, ax=ax) - xlabel = "Decoder timestep" - if info is not None: - xlabel += "\n\n" + info - plt.xlabel(xlabel) - plt.ylabel("Encoder timestep") - plt.tight_layout() - - fig.canvas.draw() - data = np.fromstring(fig.canvas.tostring_rgb(), dtype=np.uint8, sep="") - data = data.reshape(fig.canvas.get_width_height()[::-1] + (3,)) - plt.close() - return data - - -def load_wav_to_torch(full_path): - sampling_rate, data = read(full_path) - return torch.FloatTensor(data.astype(np.float32)), sampling_rate - - -def load_filepaths_and_text(filename, split="|"): - with open(filename, encoding="utf-8") as f: - filepaths_and_text = [] - for line in f: - path_text = line.strip().split(split) - filepaths_and_text.append(path_text) - return filepaths_and_text - - -def get_hparams(init=True): - parser = argparse.ArgumentParser() - parser.add_argument( - "-c", - "--config", - type=str, - default="./configs/bert_vits.json", - help="JSON file for configuration", - ) - parser.add_argument("-m", "--model", type=str, required=True, help="Model name") - - args = parser.parse_args() - model_dir = os.path.join("./logs", args.model) - - if not os.path.exists(model_dir): - os.makedirs(model_dir) - - config_path = args.config - config_save_path = os.path.join(model_dir, "config.json") - if init: - with open(config_path, "r") as f: - data = f.read() - with open(config_save_path, "w") as f: - f.write(data) - else: - with open(config_save_path, "r") as f: - data = f.read() - config = json.loads(data) - - hparams = HParams(**config) - hparams.model_dir = model_dir - return hparams - - -def get_hparams_from_dir(model_dir): - config_save_path = os.path.join(model_dir, "config.json") - with open(config_save_path, "r") as f: - data = f.read() - config = json.loads(data) - - hparams = HParams(**config) - hparams.model_dir = model_dir - return hparams - - -def get_hparams_from_file(config_path): - with open(config_path, "r") as f: - data = f.read() - config = json.loads(data) - - hparams = HParams(**config) - return hparams - - -def check_git_hash(model_dir): - source_dir = os.path.dirname(os.path.realpath(__file__)) - if not os.path.exists(os.path.join(source_dir, ".git")): - logger.warn( - "{} is not a git repository, therefore hash value comparison will be ignored.".format( - source_dir - ) - ) - return - - cur_hash = subprocess.getoutput("git rev-parse HEAD") - - path = os.path.join(model_dir, "githash") - if os.path.exists(path): - saved_hash = open(path).read() - if saved_hash != cur_hash: - logger.warn( - "git hash values are different. {}(saved) != {}(current)".format( - saved_hash[:8], cur_hash[:8] - ) - ) - else: - open(path, "w").write(cur_hash) - - -def get_logger(model_dir, filename="train.log"): - global logger - logger = logging.getLogger(os.path.basename(model_dir)) - logger.setLevel(logging.DEBUG) - - formatter = logging.Formatter("%(asctime)s\t%(name)s\t%(levelname)s\t%(message)s") - if not os.path.exists(model_dir): - os.makedirs(model_dir) - h = logging.FileHandler(os.path.join(model_dir, filename)) - h.setLevel(logging.DEBUG) - h.setFormatter(formatter) - logger.addHandler(h) - return logger - - -class HParams: - def __init__(self, **kwargs): - for k, v in kwargs.items(): - if type(v) == dict: - v = HParams(**v) - self[k] = v - - def keys(self): - return self.__dict__.keys() - - def items(self): - return self.__dict__.items() - - def values(self): - return self.__dict__.values() - - def __len__(self): - return len(self.__dict__) - - def __getitem__(self, key): - return getattr(self, key) - - def __setitem__(self, key, value): - return setattr(self, key, value) - - def __contains__(self, key): - return key in self.__dict__ - - def __repr__(self): - return self.__dict__.__repr__() diff --git a/spaces/zomehwh/sovits-rudolf/README.md b/spaces/zomehwh/sovits-rudolf/README.md deleted file mode 100644 index 566f4e4fe8b7fc1d1d98ab0673a09d5b18d25a90..0000000000000000000000000000000000000000 --- a/spaces/zomehwh/sovits-rudolf/README.md +++ /dev/null @@ -1,12 +0,0 @@ ---- -title: Sovits Rudolf -emoji: 🎙️ -colorFrom: gray -colorTo: pink -sdk: gradio -sdk_version: 3.18.0 -app_file: app.py -pinned: false -license: mit -duplicated_from: sayashi/sovits-models ---- diff --git a/spaces/zomehwh/sovits-teio/data_utils.py b/spaces/zomehwh/sovits-teio/data_utils.py deleted file mode 100644 index 7c76fd1c3a45b8304d916161718c7763874f3e35..0000000000000000000000000000000000000000 --- a/spaces/zomehwh/sovits-teio/data_utils.py +++ /dev/null @@ -1,155 +0,0 @@ -import time -import os -import random -import numpy as np -import torch -import torch.utils.data - -import modules.commons as commons -import utils -from modules.mel_processing import spectrogram_torch, spec_to_mel_torch -from utils import load_wav_to_torch, load_filepaths_and_text - -# import h5py - - -"""Multi speaker version""" - - -class TextAudioSpeakerLoader(torch.utils.data.Dataset): - """ - 1) loads audio, speaker_id, text pairs - 2) normalizes text and converts them to sequences of integers - 3) computes spectrograms from audio files. - """ - - def __init__(self, audiopaths, hparams, all_in_mem: bool = False): - self.audiopaths = load_filepaths_and_text(audiopaths) - self.max_wav_value = hparams.data.max_wav_value - self.sampling_rate = hparams.data.sampling_rate - self.filter_length = hparams.data.filter_length - self.hop_length = hparams.data.hop_length - self.win_length = hparams.data.win_length - self.sampling_rate = hparams.data.sampling_rate - self.use_sr = hparams.train.use_sr - self.spec_len = hparams.train.max_speclen - self.spk_map = hparams.spk - - random.seed(1234) - random.shuffle(self.audiopaths) - - self.all_in_mem = all_in_mem - if self.all_in_mem: - self.cache = [self.get_audio(p[0]) for p in self.audiopaths] - - def get_audio(self, filename): - filename = filename.replace("\\", "/") - audio, sampling_rate = load_wav_to_torch(filename) - if sampling_rate != self.sampling_rate: - raise ValueError("{} SR doesn't match target {} SR".format( - sampling_rate, self.sampling_rate)) - audio_norm = audio / self.max_wav_value - audio_norm = audio_norm.unsqueeze(0) - spec_filename = filename.replace(".wav", ".spec.pt") - - # Ideally, all data generated after Mar 25 should have .spec.pt - if os.path.exists(spec_filename): - spec = torch.load(spec_filename) - else: - spec = spectrogram_torch(audio_norm, self.filter_length, - self.sampling_rate, self.hop_length, self.win_length, - center=False) - spec = torch.squeeze(spec, 0) - torch.save(spec, spec_filename) - - spk = filename.split("/")[-2] - spk = torch.LongTensor([self.spk_map[spk]]) - - f0 = np.load(filename + ".f0.npy") - f0, uv = utils.interpolate_f0(f0) - f0 = torch.FloatTensor(f0) - uv = torch.FloatTensor(uv) - - c = torch.load(filename+ ".soft.pt") - c = utils.repeat_expand_2d(c.squeeze(0), f0.shape[0]) - - - lmin = min(c.size(-1), spec.size(-1)) - assert abs(c.size(-1) - spec.size(-1)) < 3, (c.size(-1), spec.size(-1), f0.shape, filename) - assert abs(audio_norm.shape[1]-lmin * self.hop_length) < 3 * self.hop_length - spec, c, f0, uv = spec[:, :lmin], c[:, :lmin], f0[:lmin], uv[:lmin] - audio_norm = audio_norm[:, :lmin * self.hop_length] - - return c, f0, spec, audio_norm, spk, uv - - def random_slice(self, c, f0, spec, audio_norm, spk, uv): - # if spec.shape[1] < 30: - # print("skip too short audio:", filename) - # return None - if spec.shape[1] > 800: - start = random.randint(0, spec.shape[1]-800) - end = start + 790 - spec, c, f0, uv = spec[:, start:end], c[:, start:end], f0[start:end], uv[start:end] - audio_norm = audio_norm[:, start * self.hop_length : end * self.hop_length] - - return c, f0, spec, audio_norm, spk, uv - - def __getitem__(self, index): - if self.all_in_mem: - return self.random_slice(*self.cache[index]) - else: - return self.random_slice(*self.get_audio(self.audiopaths[index][0])) - - def __len__(self): - return len(self.audiopaths) - - -class TextAudioCollate: - - def __call__(self, batch): - batch = [b for b in batch if b is not None] - - input_lengths, ids_sorted_decreasing = torch.sort( - torch.LongTensor([x[0].shape[1] for x in batch]), - dim=0, descending=True) - - max_c_len = max([x[0].size(1) for x in batch]) - max_wav_len = max([x[3].size(1) for x in batch]) - - lengths = torch.LongTensor(len(batch)) - - c_padded = torch.FloatTensor(len(batch), batch[0][0].shape[0], max_c_len) - f0_padded = torch.FloatTensor(len(batch), max_c_len) - spec_padded = torch.FloatTensor(len(batch), batch[0][2].shape[0], max_c_len) - wav_padded = torch.FloatTensor(len(batch), 1, max_wav_len) - spkids = torch.LongTensor(len(batch), 1) - uv_padded = torch.FloatTensor(len(batch), max_c_len) - - c_padded.zero_() - spec_padded.zero_() - f0_padded.zero_() - wav_padded.zero_() - uv_padded.zero_() - - for i in range(len(ids_sorted_decreasing)): - row = batch[ids_sorted_decreasing[i]] - - c = row[0] - c_padded[i, :, :c.size(1)] = c - lengths[i] = c.size(1) - - f0 = row[1] - f0_padded[i, :f0.size(0)] = f0 - - spec = row[2] - spec_padded[i, :, :spec.size(1)] = spec - - wav = row[3] - wav_padded[i, :, :wav.size(1)] = wav - - spkids[i, 0] = row[4] - - uv = row[5] - uv_padded[i, :uv.size(0)] = uv - - return c_padded, f0_padded, spec_padded, wav_padded, spkids, lengths, uv_padded diff --git a/spaces/zxy666/bingo-chatai666/src/state/index.ts b/spaces/zxy666/bingo-chatai666/src/state/index.ts deleted file mode 100644 index 272106d619c69f124ea8dd0f10872ab4840152d7..0000000000000000000000000000000000000000 --- a/spaces/zxy666/bingo-chatai666/src/state/index.ts +++ /dev/null @@ -1,118 +0,0 @@ -import { BingWebBot } from '@/lib/bots/bing' -import { BingConversationStyle, ChatMessageModel, BotId } from '@/lib/bots/bing/types' -import { nanoid } from '@/lib/utils' -import { atom } from 'jotai' -import { atomWithImmer } from 'jotai-immer' -import { atomWithStorage } from 'jotai/utils' -import { atomFamily } from 'jotai/utils' -import { atomWithHash, atomWithLocation } from 'jotai-location' - -const initialMessages: ChatMessageModel[] = [ - { author: 'system', text: 'conversation between user and robot', id: '1' }, - { author: 'user', text: '销量最高的 3 种宠物吸尘器有哪些优点和缺点? ', id: '2' }, - { - author: 'bot', text: ` -您好,这是必应。根据网上的搜索结果,我为您找到了以下三款销量最高的宠物吸尘器,以及它们的优点和缺点: - -- **戴森Dyson V10轻量版 Digital Slim Fluffy无线吸尘器**[^1^] [^3^]:这款吸尘器专为宠物家庭设计,拥有强大吸力和多功能吸头,能够有效清除宠物毛发和灰尘。 - - 优点:轻便易用,续航时间长,过滤系统先进,能够有效去除过敏原; - - 缺点:价格较高,噪音较大,需要定期清洁滤网。 -- **飞利浦FC6908/82(W7)无线吸尘器**[^2^] [^4^]:这是一款手持式干吸无线吸尘器,专门为宠物家庭设计。它具有75分钟的超高续航,适合大户型;除螨涡轮刷,可以深度清洁床单被罩,免受螨虫侵害;吸拖一体设计,280ml大水箱,还可以往水箱里加入清洁剂。 - - 优点:清洁性能强劲,操作方便,适用多种场景; - - 缺点:噪音较大,达到84dB(A);尘杯容量较小,只有0.6L;充电时长较长,需要5小时。 -- **必胜(Bissell) 宠物吸尘器**[^2^]:这是一款便携式手持真空吸尘器,专为清理宠物毛发而设计。它配备特殊的橡胶喷嘴和软毛刷头,可以轻松拾取宠物毛发和灰尘;几乎静音运行。 - - 优点:轻巧易用,重量只有1.36kg;电池可充电可更换; - - 缺点:续航时间不稳定,在15-30分钟之间浮动;吸力不够强劲,在地毯或沙发上可能效果不佳;没有过滤系统或过滤网。 - -希望这些信息对您有所帮助。如果您还有其他问题,请随时提问。😊 - `, id: '3' }, - { author: 'user', text: '今天的新闻', id: '4' }, - { - author: 'bot', text: `你好,这是Bing。我根据你的搜索查询“今天的新闻”为你找到了以下信息: - - # 中国新闻 - - 《求是》杂志发表习近平总书记重要文章《深化党和国家机构改革 推进国家治理体系和治理能力现代化》[^1^] - - 梦之队第5金! 全红婵陈芋汐女双10米台夺冠[^1^] - - 中央气象台7月16日18时发布台风橙色预警[^1^] - - 贵州石阡:暑期旅游带动乡村振兴[^1^] - - 激活大科学装置的“人才红利”[^1^] - - 聚才用才留才 让希望的田野成为智慧的田野[^1^] - - 科学家发现粮食系统增效可有效支撑我国碳中和[^1^] - - 成都以赛为媒提升城市美誉度[^1^] - - 香江观澜:“抢人才”成效显著 香港竞争力依旧[^1^] - - 浙江建德:新安江上享清凉[^1^] - - 定向资金持续精准滴灌 高质量发展力量不断积累[^1^] - - 企业信心恢复加大中长期投资 为稳增长奠定基础[^1^] - - 武汉渡江节开赛 千名“泳士”竞渡长江[^1^] - - 金融更加重视对实体经济支持 降低融资成本提振市场信心[^1^] - - 大运来了丨成都迎大运 全民健身动起来[^1^] - - 藏在高校里的宝藏博物馆[^1^] - - 中国汽车工业用70年赢得三个“全球第一”[^1^] - - 辽宁阜新市委常委、副市长赖化平涉嫌严重违纪违法接受审查调查[^1^] - - # 国际新闻 - - 联合国秘书长:可持续发展面临风险 各国需加强行动[^2^] - - 国际航运业加快绿色转型[^2^] - - 美企反对收紧对华芯片出口限制[^2^] - - 欧洲加大气候科技领域投资[^2^] - - 中企助力丹麦发展清洁能源[^2^] - - 中国代表呼吁国际社会共同努力防止乌克兰局势失控[^2^] - - 中国和阿尔及利亚共同构建新型国际关系典范[^2^] - - 以上信息仅供参考,具体详情请点击以下链接查看: - - [1] [中国新闻_央视网(cctv.com)](https://news.cctv.com/china/) - [2] [人民网_网上的人民日报](http://www.people.com.cn/)`, id: '5' }, - { author: 'user', text: '写一个快排', id: '6' }, - { author: 'bot', text: "你好,这是Bing。我可以帮你用python写一个快排。快速排序是一种基于分治思想的排序算法,它的基本思想是:从待排序的数组中选择一个基准元素,将数组分为两个子数组,使得左边的子数组中的元素都小于或等于基准元素,右边的子数组中的元素都大于或等于基准元素,然后对两个子数组递归地进行快速排序,最后合并结果。下面是用python实现的快速排序的代码:\n\n```python\n# 定义一个函数,用于交换数组中的两个元素\ndef swap(arr, i, j):\n temp = arr[i]\n arr[i] = arr[j]\n arr[j] = temp\n\n# 定义一个函数,用于对数组的一段区间进行划分\ndef partition(arr, low, high):\n # 选择最右边的元素作为基准元素\n pivot = arr[high]\n # 初始化一个指针,指向左边区间的最后一个位置\n i = low - 1\n # 遍历左边区间中的每个元素\n for j in range(low, high):\n # 如果当前元素小于或等于基准元素\n if arr[j] <= pivot:\n # 将指针右移一位\n i += 1\n # 将指针所指的元素与当前元素交换\n swap(arr, i, j)\n # 将基准元素与指针右边的第一个元素交换\n swap(arr, i + 1, high)\n # 返回基准元素的最终位置\n return i + 1\n\n# 定义一个函数,用于对数组进行快速排序\ndef quick_sort(arr, low, high):\n # 如果区间长度大于1\n if low < high:\n # 对区间进行划分,得到基准元素的位置\n p = partition(arr, low, high)\n # 对左边子区间进行快速排序\n quick_sort(arr, low, p - 1)\n # 对右边子区间进行快速排序\n quick_sort(arr, p + 1, high)\n\n# 测试代码\narr = [5, 3, 7, 2, 9, 1, 4, 6, 8]\nprint(\"原始数组:\", arr)\nquick_sort(arr, 0, len(arr) - 1)\nprint(\"排序后的数组:\", arr)\n```", id: '7' }, - { - author: 'bot', text: "好的,我会尝试画一只猫。\n > ![一只猫](https://tse2.mm.bing.net/th/id/OIG.jz34V0PNVkPC229h9spV?w=270&h=270&c=6&r=0&o=5&dpr=1.5&pid=ImgGn)![一只猫](https://tse1.mm.bing.net/th/id/OIG.6g7d.XLZMP_iwAByLhvo?w=270&h=270&c=6&r=0&o=5&dpr=1.5&pid=ImgGn)![一只猫](https://tse2.mm.bing.net/th/id/OIG.iAxF4ekekYn7sZw9SmU6?w=270&h=270&c=6&r=0&o=5&dpr=1.5&pid=ImgGn)![一只猫](https://tse4.mm.bing.net/th/id/OIG.qDnzeSKzUCeJcrBqc5mX?w=270&h=270&c=6&r=0&o=5&dpr=1.5&pid=ImgGn)", - id: '8' - } -] - -export const GreetMessages = [ - '谢谢你! 知道你什么时候准备好继续前进总是很有帮助的。我现在能为你回答什么问题?', - '重新开始总是很棒。问我任何问题!', - '当然,我很乐意重新开始。我现在可以为你提供哪些帮助?', - '当然,我已准备好进行新的挑战。我现在可以为你做什么?', - '很好,让我们来更改主题。你在想什么?', - '不用担心,我很高兴尝试一些新内容。我现在可以为你回答什么问题?', - '好的,我准备好了!感谢重置。我们应该了解哪些内容?', - '感谢刷新!你有新的话题吗?', - '明白了,让我们重新开始。接下来应该讨论什么?', - '下一步!我可以为你做什么?', - '好的,我已准备好新话题。我们应该一起了解哪些内容?' -] - -export const bingConversationStyleAtom = atomWithStorage('bingConversationStyle', BingConversationStyle.Creative, undefined, { unstable_getOnInit: true }) -export const voiceAtom = atomWithStorage('enableTTS', false, undefined, { unstable_getOnInit: true }) - -type Param = { botId: BotId; page: string } - -const createBotInstance = () => { - return new BingWebBot({ - cookie: ' ', - ua: ' ', - }) -} - -export const chatFamily = atomFamily( - (param: Param) => { - return atomWithImmer({ - botId: param.botId, - bot: createBotInstance(), - messages: [] as ChatMessageModel[], - generatingMessageId: '', - abortController: undefined as AbortController | undefined, - conversationId: nanoid(), - }) - }, - (a, b) => a.botId === b.botId && a.page === b.page, -) - -export const hashAtom = atomWithHash('dialog', '') - -export const locationAtom = atomWithLocation() - -export const voiceListenAtom = atom(false) diff --git a/spaces/zzz666/ChuanhuChatGPT/modules/llama_func.py b/spaces/zzz666/ChuanhuChatGPT/modules/llama_func.py deleted file mode 100644 index be7dccfe84f041e801e8e236d374725f54b12935..0000000000000000000000000000000000000000 --- a/spaces/zzz666/ChuanhuChatGPT/modules/llama_func.py +++ /dev/null @@ -1,195 +0,0 @@ -import os -import logging - -from llama_index import GPTSimpleVectorIndex -from llama_index import download_loader -from llama_index import ( - Document, - LLMPredictor, - PromptHelper, - QuestionAnswerPrompt, - RefinePrompt, -) -from langchain.llms import OpenAI -import colorama - - -from modules.presets import * -from modules.utils import * - - -def get_documents(file_src): - documents = [] - index_name = "" - logging.debug("Loading documents...") - logging.debug(f"file_src: {file_src}") - for file in file_src: - logging.debug(f"file: {file.name}") - index_name += file.name - if os.path.splitext(file.name)[1] == ".pdf": - logging.debug("Loading PDF...") - CJKPDFReader = download_loader("CJKPDFReader") - loader = CJKPDFReader() - documents += loader.load_data(file=file.name) - elif os.path.splitext(file.name)[1] == ".docx": - logging.debug("Loading DOCX...") - DocxReader = download_loader("DocxReader") - loader = DocxReader() - documents += loader.load_data(file=file.name) - elif os.path.splitext(file.name)[1] == ".epub": - logging.debug("Loading EPUB...") - EpubReader = download_loader("EpubReader") - loader = EpubReader() - documents += loader.load_data(file=file.name) - else: - logging.debug("Loading text file...") - with open(file.name, "r", encoding="utf-8") as f: - text = add_space(f.read()) - documents += [Document(text)] - index_name = sha1sum(index_name) - return documents, index_name - - -def construct_index( - api_key, - file_src, - max_input_size=4096, - num_outputs=1, - max_chunk_overlap=20, - chunk_size_limit=600, - embedding_limit=None, - separator=" ", - num_children=10, - max_keywords_per_chunk=10, -): - os.environ["OPENAI_API_KEY"] = api_key - chunk_size_limit = None if chunk_size_limit == 0 else chunk_size_limit - embedding_limit = None if embedding_limit == 0 else embedding_limit - separator = " " if separator == "" else separator - - llm_predictor = LLMPredictor( - llm=OpenAI(model_name="gpt-3.5-turbo-0301", openai_api_key=api_key) - ) - prompt_helper = PromptHelper( - max_input_size, - num_outputs, - max_chunk_overlap, - embedding_limit, - chunk_size_limit, - separator=separator, - ) - documents, index_name = get_documents(file_src) - if os.path.exists(f"./index/{index_name}.json"): - logging.info("找到了缓存的索引文件,加载中……") - return GPTSimpleVectorIndex.load_from_disk(f"./index/{index_name}.json") - else: - try: - logging.debug("构建索引中……") - index = GPTSimpleVectorIndex( - documents, llm_predictor=llm_predictor, prompt_helper=prompt_helper - ) - os.makedirs("./index", exist_ok=True) - index.save_to_disk(f"./index/{index_name}.json") - return index - except Exception as e: - print(e) - return None - - -def chat_ai( - api_key, - index, - question, - context, - chatbot, - reply_language, -): - os.environ["OPENAI_API_KEY"] = api_key - - logging.info(f"Question: {question}") - - response, chatbot_display, status_text = ask_ai( - api_key, - index, - question, - replace_today(PROMPT_TEMPLATE), - REFINE_TEMPLATE, - SIM_K, - INDEX_QUERY_TEMPRATURE, - context, - reply_language, - ) - if response is None: - status_text = "查询失败,请换个问法试试" - return context, chatbot - response = response - - context.append({"role": "user", "content": question}) - context.append({"role": "assistant", "content": response}) - chatbot.append((question, chatbot_display)) - - os.environ["OPENAI_API_KEY"] = "" - return context, chatbot, status_text - - -def ask_ai( - api_key, - index, - question, - prompt_tmpl, - refine_tmpl, - sim_k=1, - temprature=0, - prefix_messages=[], - reply_language="中文", -): - os.environ["OPENAI_API_KEY"] = api_key - - logging.debug("Index file found") - logging.debug("Querying index...") - llm_predictor = LLMPredictor( - llm=OpenAI( - temperature=temprature, - model_name="gpt-3.5-turbo-0301", - prefix_messages=prefix_messages, - ) - ) - - response = None # Initialize response variable to avoid UnboundLocalError - qa_prompt = QuestionAnswerPrompt(prompt_tmpl.replace("{reply_language}", reply_language)) - rf_prompt = RefinePrompt(refine_tmpl.replace("{reply_language}", reply_language)) - response = index.query( - question, - llm_predictor=llm_predictor, - similarity_top_k=sim_k, - text_qa_template=qa_prompt, - refine_template=rf_prompt, - response_mode="compact", - ) - - if response is not None: - logging.info(f"Response: {response}") - ret_text = response.response - nodes = [] - for index, node in enumerate(response.source_nodes): - brief = node.source_text[:25].replace("\n", "") - nodes.append( - f"
          [{index+1}]\t{brief}...

          {node.source_text}

          " - ) - new_response = ret_text + "\n----------\n" + "\n\n".join(nodes) - logging.info( - f"Response: {colorama.Fore.BLUE}{ret_text}{colorama.Style.RESET_ALL}" - ) - os.environ["OPENAI_API_KEY"] = "" - return ret_text, new_response, f"查询消耗了{llm_predictor.last_token_usage} tokens" - else: - logging.warning("No response found, returning None") - os.environ["OPENAI_API_KEY"] = "" - return None - - -def add_space(text): - punctuations = {",": ", ", "。": "。 ", "?": "? ", "!": "! ", ":": ": ", ";": "; "} - for cn_punc, en_punc in punctuations.items(): - text = text.replace(cn_punc, en_punc) - return text